aboutsummaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/arch/arm64/include/uapi/asm/unistd.h1
-rw-r--r--tools/arch/loongarch/include/uapi/asm/unistd.h1
-rw-r--r--tools/arch/x86/kcpuid/Makefile4
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-btf.rst6
-rw-r--r--tools/bpf/bpftool/Makefile3
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool3
-rw-r--r--tools/bpf/bpftool/btf.c193
-rw-r--r--tools/bpf/bpftool/cgroup.c40
-rw-r--r--tools/bpf/bpftool/common.c2
-rw-r--r--tools/bpf/bpftool/gen.c94
-rw-r--r--tools/bpf/bpftool/prog.c4
-rw-r--r--tools/bpf/bpftool/skeleton/pid_iter.bpf.c7
-rw-r--r--tools/bpf/bpftool/skeleton/profiler.bpf.c14
-rw-r--r--tools/bpf/resolve_btfids/main.c8
-rw-r--r--tools/build/feature/Makefile2
-rw-r--r--tools/build/feature/test-libtracefs.c2
-rwxr-xr-xtools/gpio/gpio-sloppy-logic-analyzer.sh246
-rw-r--r--tools/include/linux/compiler.h4
-rw-r--r--tools/include/linux/mm.h1
-rw-r--r--tools/include/linux/numa.h5
-rw-r--r--tools/include/linux/poison.h7
-rw-r--r--tools/include/nolibc/stdint.h19
-rw-r--r--tools/include/nolibc/stdio.h10
-rw-r--r--tools/include/nolibc/stdlib.h109
-rw-r--r--tools/include/uapi/asm-generic/unistd.h4
-rw-r--r--tools/include/uapi/linux/bpf.h17
-rw-r--r--tools/include/uapi/linux/fs.h552
-rw-r--r--tools/include/uapi/linux/kvm.h10
-rw-r--r--tools/include/uapi/linux/prctl.h331
-rw-r--r--tools/lib/api/io.h69
-rw-r--r--tools/lib/bpf/Build2
-rw-r--r--tools/lib/bpf/btf.c696
-rw-r--r--tools/lib/bpf/btf.h36
-rw-r--r--tools/lib/bpf/btf_iter.c177
-rw-r--r--tools/lib/bpf/btf_relocate.c519
-rw-r--r--tools/lib/bpf/libbpf.c136
-rw-r--r--tools/lib/bpf/libbpf.h23
-rw-r--r--tools/lib/bpf/libbpf.map4
-rw-r--r--tools/lib/bpf/libbpf_internal.h39
-rw-r--r--tools/lib/bpf/linker.c69
-rw-r--r--tools/lib/list_sort.c10
-rw-r--r--tools/lib/perf/include/perf/event.h6
-rw-r--r--tools/memory-model/Documentation/README4
-rw-r--r--tools/memory-model/Documentation/access-marking.txt34
-rw-r--r--tools/memory-model/lock.cat62
-rw-r--r--tools/mm/Makefile2
-rw-r--r--tools/mm/thp_swap_allocator_test.c234
-rw-r--r--tools/net/ynl/Makefile6
-rw-r--r--tools/net/ynl/Makefile.deps4
-rw-r--r--tools/net/ynl/lib/Makefile4
-rw-r--r--tools/net/ynl/lib/ynl-priv.h30
-rw-r--r--tools/net/ynl/lib/ynl.c10
-rw-r--r--tools/net/ynl/lib/ynl.h2
-rw-r--r--tools/net/ynl/lib/ynl.py2
-rwxr-xr-xtools/net/ynl/ynl-gen-c.py58
-rwxr-xr-xtools/net/ynl/ynl-gen-rst.py13
-rw-r--r--tools/objtool/Documentation/objtool.txt19
-rw-r--r--tools/objtool/arch/x86/decode.c8
-rw-r--r--tools/objtool/arch/x86/special.c23
-rw-r--r--tools/objtool/builtin-check.c4
-rw-r--r--tools/objtool/check.c2
-rw-r--r--tools/objtool/noreturns.h4
-rw-r--r--tools/objtool/special.c16
-rw-r--r--tools/perf/Build14
-rw-r--r--tools/perf/Documentation/perf-amd-ibs.txt189
-rw-r--r--tools/perf/Documentation/perf-kwork.txt4
-rw-r--r--tools/perf/Documentation/perf-lock.txt4
-rw-r--r--tools/perf/Documentation/perf-mem.txt2
-rw-r--r--tools/perf/Documentation/perf-record.txt4
-rw-r--r--tools/perf/Documentation/perf-sched.txt21
-rw-r--r--tools/perf/Documentation/perf-top.txt4
-rw-r--r--tools/perf/Documentation/perf.txt3
-rw-r--r--tools/perf/Makefile.config46
-rw-r--r--tools/perf/Makefile.perf85
-rw-r--r--tools/perf/arch/Build5
-rw-r--r--tools/perf/arch/arm/Build4
-rw-r--r--tools/perf/arch/arm/tests/Build8
-rw-r--r--tools/perf/arch/arm/util/Build10
-rw-r--r--tools/perf/arch/arm/util/pmu.c12
-rw-r--r--tools/perf/arch/arm64/Build4
-rw-r--r--tools/perf/arch/arm64/tests/Build8
-rw-r--r--tools/perf/arch/arm64/util/Build20
-rw-r--r--tools/perf/arch/csky/Build2
-rw-r--r--tools/perf/arch/csky/util/Build6
-rw-r--r--tools/perf/arch/loongarch/Build2
-rw-r--r--tools/perf/arch/loongarch/Makefile1
-rw-r--r--tools/perf/arch/loongarch/util/Build10
-rw-r--r--tools/perf/arch/loongarch/util/header.c96
-rw-r--r--tools/perf/arch/loongarch/util/kvm-stat.c139
-rw-r--r--tools/perf/arch/mips/Build2
-rw-r--r--tools/perf/arch/mips/util/Build6
-rw-r--r--tools/perf/arch/powerpc/Build4
-rw-r--r--tools/perf/arch/powerpc/tests/Build6
-rw-r--r--tools/perf/arch/powerpc/util/Build24
-rw-r--r--tools/perf/arch/powerpc/util/skip-callchain-idx.c8
-rw-r--r--tools/perf/arch/riscv/Build2
-rw-r--r--tools/perf/arch/riscv/Makefile1
-rw-r--r--tools/perf/arch/riscv/util/Build9
-rw-r--r--tools/perf/arch/riscv/util/kvm-stat.c78
-rw-r--r--tools/perf/arch/riscv/util/riscv_exception_types.h35
-rw-r--r--tools/perf/arch/s390/Build2
-rw-r--r--tools/perf/arch/s390/util/Build16
-rw-r--r--tools/perf/arch/sh/Build2
-rw-r--r--tools/perf/arch/sh/util/Build2
-rw-r--r--tools/perf/arch/sparc/Build2
-rw-r--r--tools/perf/arch/sparc/util/Build2
-rw-r--r--tools/perf/arch/x86/Build6
-rwxr-xr-xtools/perf/arch/x86/entry/syscalls/syscalltbl.sh4
-rw-r--r--tools/perf/arch/x86/tests/Build20
-rw-r--r--tools/perf/arch/x86/tests/insn-x86-dat-32.c116
-rw-r--r--tools/perf/arch/x86/tests/insn-x86-dat-64.c1026
-rw-r--r--tools/perf/arch/x86/tests/insn-x86-dat-src.c597
-rw-r--r--tools/perf/arch/x86/util/Build42
-rw-r--r--tools/perf/arch/x86/util/intel-pt.c15
-rw-r--r--tools/perf/arch/xtensa/Build2
-rw-r--r--tools/perf/bench/Build46
-rw-r--r--tools/perf/bench/epoll-ctl.c2
-rw-r--r--tools/perf/bench/epoll-wait.c2
-rw-r--r--tools/perf/bench/futex-hash.c2
-rw-r--r--tools/perf/bench/futex-lock-pi.c2
-rw-r--r--tools/perf/bench/futex-requeue.c2
-rw-r--r--tools/perf/bench/futex-wake-parallel.c4
-rw-r--r--tools/perf/bench/futex-wake.c2
-rw-r--r--tools/perf/builtin-annotate.c4
-rw-r--r--tools/perf/builtin-list.c23
-rw-r--r--tools/perf/builtin-lock.c27
-rw-r--r--tools/perf/builtin-record.c32
-rw-r--r--tools/perf/builtin-report.c11
-rw-r--r--tools/perf/builtin-sched.c189
-rw-r--r--tools/perf/builtin-stat.c399
-rw-r--r--tools/perf/builtin-top.c9
-rw-r--r--tools/perf/builtin-trace.c49
-rw-r--r--tools/perf/pmu-events/arch/arm64/freescale/imx93/sys/ddrc.json9
-rw-r--r--tools/perf/pmu-events/arch/arm64/freescale/imx93/sys/metrics.json26
-rw-r--r--tools/perf/pmu-events/arch/arm64/freescale/imx95/sys/ddrc.json9
-rw-r--r--tools/perf/pmu-events/arch/arm64/freescale/imx95/sys/metrics.json874
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json988
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/cache.json184
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/floating-point.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/frontend.json56
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/memory.json44
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/metricgroups.json23
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/other.json37
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/pipeline.json214
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/uncore-interconnect.json19
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/uncore-memory.json25
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/uncore-other.json1
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/virtual-memory.json26
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/adln-metrics.json658
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/cache.json50
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/floating-point.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/frontend.json3
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/memory.json11
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/metricgroups.json21
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/other.json13
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/pipeline.json80
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/uncore-interconnect.json19
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/uncore-memory.json25
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/uncore-other.json1
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/virtual-memory.json6
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/cache.json93
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/counter.json7
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/floating-point.json32
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/frontend.json11
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/memory.json19
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/other.json56
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/pipeline.json44
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/virtual-memory.json15
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json80
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/cache.json275
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/counter.json22
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/floating-point.json22
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/frontend.json28
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/memory.json240
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/metricgroups.json11
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/other.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/pipeline.json137
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/uncore-cache.json24
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/uncore-interconnect.json7
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/uncore-other.json10
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/virtual-memory.json38
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json80
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/cache.json76
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/counter.json42
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/floating-point.json22
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/frontend.json28
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/memory.json39
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/metricgroups.json11
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/other.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json137
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/uncore-cache.json382
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/uncore-interconnect.json70
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/uncore-io.json62
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/uncore-memory.json322
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/uncore-power.json57
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/virtual-memory.json38
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json128
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/cache.json88
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/counter.json57
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json22
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/frontend.json28
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/memory.json58
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/metricgroups.json11
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/other.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json137
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/uncore-cache.json399
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/uncore-interconnect.json454
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/uncore-io.json62
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/uncore-memory.json326
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/uncore-power.json57
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/virtual-memory.json38
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/cache.json1245
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json310
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/counter.json52
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/floating-point.json16
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/frontend.json49
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/memory.json743
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/metricgroups.json13
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/other.json168
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json104
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/uncore-cache.json2293
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/uncore-interconnect.json2536
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/uncore-io.json703
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/uncore-memory.json985
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/uncore-power.json50
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/virtual-memory.json28
-rw-r--r--tools/perf/pmu-events/arch/x86/elkhartlake/cache.json101
-rw-r--r--tools/perf/pmu-events/arch/x86/elkhartlake/counter.json7
-rw-r--r--tools/perf/pmu-events/arch/x86/elkhartlake/floating-point.json3
-rw-r--r--tools/perf/pmu-events/arch/x86/elkhartlake/frontend.json9
-rw-r--r--tools/perf/pmu-events/arch/x86/elkhartlake/memory.json40
-rw-r--r--tools/perf/pmu-events/arch/x86/elkhartlake/other.json61
-rw-r--r--tools/perf/pmu-events/arch/x86/elkhartlake/pipeline.json60
-rw-r--r--tools/perf/pmu-events/arch/x86/elkhartlake/virtual-memory.json31
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/cache.json159
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/counter.json82
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/emr-metrics.json2186
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/floating-point.json28
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/frontend.json50
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/memory.json50
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/metricgroups.json137
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/other.json43
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/pipeline.json133
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-cache.json1288
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-cxl.json110
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-interconnect.json1427
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-io.json743
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-memory.json742
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-power.json49
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/virtual-memory.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/cache.json103
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/counter.json7
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/floating-point.json3
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/frontend.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/memory.json3
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/other.json5
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/pipeline.json40
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json7
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/cache.json101
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/counter.json7
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/floating-point.json3
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/frontend.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/memory.json3
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/other.json5
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json42
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json18
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/cache.json97
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/counter.json42
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/floating-point.json54
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/frontend.json5
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/grr-metrics.json849
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/memory.json13
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/metricgroups.json23
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/other.json15
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/pipeline.json97
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/uncore-cache.json267
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/uncore-interconnect.json30
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/uncore-io.json181
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/uncore-memory.json66
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/uncore-power.json1
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/virtual-memory.json17
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/cache.json825
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/counter.json77
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/floating-point.json242
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/frontend.json469
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/memory.json175
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/other.json150
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/pipeline.json1009
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/uncore-cache.json3674
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/uncore-cxl.json31
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/uncore-interconnect.json1849
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/uncore-io.json1901
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/uncore-memory.json449
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/uncore-power.json11
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/virtual-memory.json159
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/cache.json94
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/counter.json22
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/floating-point.json10
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/frontend.json29
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json66
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/memory.json60
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/metricgroups.json11
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/other.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/pipeline.json130
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/uncore-cache.json33
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/uncore-interconnect.json6
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/uncore-other.json1
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/virtual-memory.json49
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/cache.json97
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/counter.json57
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/floating-point.json10
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/frontend.json29
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json114
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/memory.json67
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/metricgroups.json11
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/other.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/pipeline.json130
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/uncore-cache.json398
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/uncore-interconnect.json448
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/uncore-io.json59
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/uncore-memory.json325
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/uncore-power.json62
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/virtual-memory.json49
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/cache.json109
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/counter.json17
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/floating-point.json13
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/frontend.json41
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json308
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/memory.json44
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/metricgroups.json13
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/other.json27
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/pipeline.json94
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/uncore-interconnect.json34
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/uncore-other.json1
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/virtual-memory.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/cache.json106
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/counter.json57
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/floating-point.json13
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/frontend.json38
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json340
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/memory.json45
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/metricgroups.json13
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/other.json52
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/pipeline.json92
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/uncore-cache.json2149
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/uncore-interconnect.json3344
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/uncore-io.json1829
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/uncore-memory.json338
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/uncore-power.json51
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/virtual-memory.json22
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/cache.json104
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/counter.json17
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/floating-point.json17
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/frontend.json30
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json68
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/memory.json19
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/metricgroups.json11
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/other.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json126
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/uncore-cache.json25
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/uncore-interconnect.json9
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/virtual-memory.json18
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/cache.json118
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/counter.json52
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/floating-point.json17
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/frontend.json30
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json68
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/memory.json41
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/metricgroups.json11
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/other.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/pipeline.json126
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/uncore-cache.json349
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/uncore-interconnect.json385
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/uncore-io.json61
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/uncore-memory.json198
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/uncore-power.json74
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/virtual-memory.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/cache.json123
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/counter.json52
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/floating-point.json15
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/frontend.json32
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json24
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/memory.json35
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/metricgroups.json11
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/other.json6
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/pipeline.json127
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/uncore-cache.json205
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/uncore-interconnect.json207
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/uncore-io.json36
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/uncore-memory.json51
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json39
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/virtual-memory.json16
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/cache.json213
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/counter.json37
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/floating-point.json3
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/frontend.json7
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/memory.json101
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json45
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/uncore-cache.json421
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/uncore-io.json24
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/uncore-memory.json14
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json7
-rw-r--r--tools/perf/pmu-events/arch/x86/lunarlake/cache.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/lunarlake/frontend.json3
-rw-r--r--tools/perf/pmu-events/arch/x86/lunarlake/memory.json15
-rw-r--r--tools/perf/pmu-events/arch/x86/lunarlake/other.json6
-rw-r--r--tools/perf/pmu-events/arch/x86/lunarlake/pipeline.json36
-rw-r--r--tools/perf/pmu-events/arch/x86/lunarlake/virtual-memory.json6
-rw-r--r--tools/perf/pmu-events/arch/x86/mapfile.csv34
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/cache.json223
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/floating-point.json86
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/frontend.json69
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/memory.json62
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/other.json19
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json300
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/uncore-cache.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/uncore-interconnect.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/uncore-memory.json16
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/uncore-other.json1
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/virtual-memory.json37
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/cache.json320
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/counter.json7
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/floating-point.json28
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/frontend.json3
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/memory.json67
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/other.json18
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/pipeline.json109
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/virtual-memory.json13
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/cache.json315
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/counter.json7
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/floating-point.json28
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/frontend.json3
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/memory.json67
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/other.json18
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/pipeline.json109
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/virtual-memory.json13
-rw-r--r--tools/perf/pmu-events/arch/x86/rocketlake/cache.json109
-rw-r--r--tools/perf/pmu-events/arch/x86/rocketlake/counter.json17
-rw-r--r--tools/perf/pmu-events/arch/x86/rocketlake/floating-point.json13
-rw-r--r--tools/perf/pmu-events/arch/x86/rocketlake/frontend.json41
-rw-r--r--tools/perf/pmu-events/arch/x86/rocketlake/memory.json44
-rw-r--r--tools/perf/pmu-events/arch/x86/rocketlake/metricgroups.json13
-rw-r--r--tools/perf/pmu-events/arch/x86/rocketlake/other.json27
-rw-r--r--tools/perf/pmu-events/arch/x86/rocketlake/pipeline.json94
-rw-r--r--tools/perf/pmu-events/arch/x86/rocketlake/rkl-metrics.json308
-rw-r--r--tools/perf/pmu-events/arch/x86/rocketlake/uncore-interconnect.json28
-rw-r--r--tools/perf/pmu-events/arch/x86/rocketlake/uncore-other.json1
-rw-r--r--tools/perf/pmu-events/arch/x86/rocketlake/virtual-memory.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/cache.json173
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/counter.json17
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/floating-point.json15
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/frontend.json32
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/memory.json37
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/metricgroups.json11
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/other.json6
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json128
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json24
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/uncore-cache.json25
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/uncore-interconnect.json9
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/virtual-memory.json16
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/cache.json161
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/counter.json82
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/floating-point.json28
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/frontend.json50
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/memory.json50
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/metricgroups.json13
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/other.json48
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json133
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json411
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cache.json1244
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cxl.json110
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-interconnect.json1427
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-io.json679
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-memory.json742
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-power.json49
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/virtual-memory.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/cache.json97
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/counter.json77
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/floating-point.json54
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/frontend.json5
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/memory.json13
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/metricgroups.json23
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/other.json15
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/pipeline.json97
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/srf-metrics.json927
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/uncore-cache.json549
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/uncore-cxl.json21
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/uncore-interconnect.json267
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/uncore-io.json267
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/uncore-memory.json66
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/uncore-power.json1
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/virtual-memory.json17
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/cache.json77
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/counter.json7
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/floating-point.json1
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/frontend.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/memory.json1
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/other.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/pipeline.json34
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/virtual-memory.json7
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/cache.json250
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/counter.json22
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/floating-point.json10
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/frontend.json49
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/memory.json131
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/metricgroups.json13
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/other.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/pipeline.json103
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json196
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/uncore-cache.json23
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/uncore-interconnect.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/uncore-other.json10
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/virtual-memory.json28
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/cache.json155
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/counter.json52
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/floating-point.json13
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/frontend.json49
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/memory.json115
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/metricgroups.json13
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/other.json15
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/pipeline.json104
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json310
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/uncore-cache.json2274
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/uncore-interconnect.json2521
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/uncore-io.json703
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/uncore-memory.json804
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/uncore-power.json50
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/virtual-memory.json28
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/cache.json101
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/counter.json47
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/floating-point.json3
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/frontend.json9
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/memory.json40
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/other.json61
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/pipeline.json60
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/uncore-cache.json1548
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/uncore-interconnect.json1403
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/uncore-io.json1743
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/uncore-memory.json103
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/uncore-power.json51
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/virtual-memory.json31
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/cache.json73
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/counter.json17
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/floating-point.json13
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/frontend.json41
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/memory.json24
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/metricgroups.json13
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/other.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/pipeline.json95
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json198
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/uncore-interconnect.json19
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/uncore-memory.json6
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/uncore-other.json1
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/virtual-memory.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/cache.json282
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/counter.json7
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/floating-point.json28
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/frontend.json3
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/memory.json69
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/other.json28
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/pipeline.json111
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/virtual-memory.json21
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/cache.json321
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/counter.json7
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/floating-point.json28
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/frontend.json3
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/memory.json67
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/other.json28
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/pipeline.json111
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/virtual-memory.json18
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/cache.json320
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/counter.json7
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/floating-point.json28
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/frontend.json3
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/memory.json68
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/other.json28
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/pipeline.json111
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/virtual-memory.json21
-rwxr-xr-xtools/perf/pmu-events/jevents.py1
-rw-r--r--tools/perf/scripts/Build4
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/Build2
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/Build2
-rw-r--r--tools/perf/scripts/python/netdev-times.py3
-rwxr-xr-xtools/perf/scripts/python/parallel-perf.py3
-rw-r--r--tools/perf/tests/Build140
-rw-r--r--tools/perf/tests/pmu.c199
-rwxr-xr-xtools/perf/tests/shell/annotate.sh10
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_adding_kernel.sh31
-rwxr-xr-xtools/perf/tests/shell/stat_bpf_counters.sh36
-rwxr-xr-xtools/perf/tests/shell/test_arm_callgraph_fp.sh27
-rwxr-xr-xtools/perf/tests/shell/test_uprobe_from_different_cu.sh2
-rw-r--r--tools/perf/tests/workloads/Build12
-rw-r--r--tools/perf/tests/workloads/leafloop.c20
-rw-r--r--tools/perf/ui/Build18
-rw-r--r--tools/perf/ui/browsers/Build14
-rw-r--r--tools/perf/ui/gtk/annotate.c5
-rw-r--r--tools/perf/ui/hist.c144
-rw-r--r--tools/perf/ui/stdio/hist.c5
-rw-r--r--tools/perf/ui/tui/Build8
-rw-r--r--tools/perf/util/Build394
-rw-r--r--tools/perf/util/arm-spe-decoder/Build2
-rw-r--r--tools/perf/util/arm-spe-decoder/arm-spe-pkt-decoder.c23
-rw-r--r--tools/perf/util/bpf-filter.c33
-rw-r--r--tools/perf/util/bpf-filter.h5
-rw-r--r--tools/perf/util/bpf-filter.l66
-rw-r--r--tools/perf/util/bpf-filter.y7
-rw-r--r--tools/perf/util/bpf_skel/sample-filter.h40
-rw-r--r--tools/perf/util/bpf_skel/sample_filter.bpf.c73
-rw-r--r--tools/perf/util/comm.c29
-rw-r--r--tools/perf/util/cs-etm-decoder/Build2
-rw-r--r--tools/perf/util/cs-etm.c10
-rw-r--r--tools/perf/util/disasm.c10
-rw-r--r--tools/perf/util/dso.c12
-rw-r--r--tools/perf/util/dso.h14
-rw-r--r--tools/perf/util/dsos.c31
-rw-r--r--tools/perf/util/events_stats.h3
-rw-r--r--tools/perf/util/evsel.c239
-rw-r--r--tools/perf/util/evsel.h14
-rw-r--r--tools/perf/util/expr.c4
-rw-r--r--tools/perf/util/genelf.c5
-rw-r--r--tools/perf/util/hisi-ptt-decoder/Build2
-rw-r--r--tools/perf/util/hisi-ptt.c5
-rw-r--r--tools/perf/util/hist.c6
-rw-r--r--tools/perf/util/hist.h3
-rw-r--r--tools/perf/util/intel-pt-decoder/Build2
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c9
-rw-r--r--tools/perf/util/maps.c113
-rw-r--r--tools/perf/util/mem-events.c16
-rw-r--r--tools/perf/util/parse-events.c10
-rw-r--r--tools/perf/util/perf-regs-arch/Build18
-rw-r--r--tools/perf/util/pmu.c54
-rw-r--r--tools/perf/util/pmus.c72
-rw-r--r--tools/perf/util/pmus.h7
-rw-r--r--tools/perf/util/python-ext-sources53
-rw-r--r--tools/perf/util/python.c271
-rw-r--r--tools/perf/util/scripting-engines/Build4
-rw-r--r--tools/perf/util/session.c25
-rw-r--r--tools/perf/util/session.h3
-rw-r--r--tools/perf/util/setup.py33
-rw-r--r--tools/perf/util/sort.c2
-rw-r--r--tools/perf/util/srcline.c14
-rw-r--r--tools/perf/util/stat-display.c20
-rw-r--r--tools/perf/util/stat-shadow.c7
-rw-r--r--tools/perf/util/symbol.c23
-rw-r--r--tools/perf/util/symbol_conf.h3
-rw-r--r--tools/perf/util/syscalltbl.c7
-rw-r--r--tools/perf/util/syscalltbl.h1
-rw-r--r--tools/perf/util/unwind-libdw.c12
-rw-r--r--tools/perf/util/unwind-libunwind-local.c23
-rw-r--r--tools/power/cpupower/Makefile47
-rw-r--r--tools/power/cpupower/README160
-rw-r--r--tools/power/cpupower/bench/Makefile5
-rw-r--r--tools/power/cpupower/man/cpupower-monitor.113
-rw-r--r--tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c2
-rwxr-xr-xtools/power/pm-graph/bootgraph.py16
-rwxr-xr-xtools/power/pm-graph/sleepgraph.py1098
-rw-r--r--tools/power/x86/intel-speed-select/isst-config.c2
-rw-r--r--tools/power/x86/intel-speed-select/isst-core.c6
-rwxr-xr-xtools/rcu/rcu-updaters.sh52
-rw-r--r--tools/testing/memblock/tests/basic_api.c314
-rw-r--r--tools/testing/memblock/tests/common.c8
-rw-r--r--tools/testing/memblock/tests/common.h4
-rw-r--r--tools/testing/nvdimm/test/iomap.c1
-rw-r--r--tools/testing/nvdimm/test/ndtest.c1
-rw-r--r--tools/testing/nvdimm/test/nfit.c1
-rw-r--r--tools/testing/radix-tree/idr-test.c1
-rw-r--r--tools/testing/radix-tree/maple.c1
-rw-r--r--tools/testing/radix-tree/xarray.c1
-rw-r--r--tools/testing/selftests/Makefile5
-rw-r--r--tools/testing/selftests/alsa/mixer-test.c143
-rw-r--r--tools/testing/selftests/alsa/pcm-test.c70
-rw-r--r--tools/testing/selftests/arm64/abi/ptrace.c2
-rw-r--r--tools/testing/selftests/arm64/fp/.gitignore1
-rw-r--r--tools/testing/selftests/arm64/fp/Makefile1
-rw-r--r--tools/testing/selftests/arm64/fp/fp-stress.c26
-rw-r--r--tools/testing/selftests/arm64/fp/kernel-test.c324
-rw-r--r--tools/testing/selftests/arm64/tags/Makefile1
-rwxr-xr-xtools/testing/selftests/arm64/tags/run_tags_test.sh12
-rw-r--r--tools/testing/selftests/arm64/tags/tags_test.c10
-rw-r--r--tools/testing/selftests/bpf/DENYLIST.aarch641
-rw-r--r--tools/testing/selftests/bpf/DENYLIST.s390x4
-rw-r--r--tools/testing/selftests/bpf/bpf_arena_common.h2
-rw-r--r--tools/testing/selftests/bpf/bpf_experimental.h32
-rw-r--r--tools/testing/selftests/bpf/bpf_kfuncs.h2
-rw-r--r--tools/testing/selftests/bpf/bpf_test_no_cfi/bpf_test_no_cfi.c4
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c200
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h10
-rw-r--r--tools/testing/selftests/bpf/config17
-rw-r--r--tools/testing/selftests/bpf/network_helpers.c130
-rw-r--r--tools/testing/selftests/bpf/network_helpers.h24
-rw-r--r--tools/testing/selftests/bpf/prog_tests/arena_atomics.c18
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_cookie.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_nf.c7
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c247
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_distill.c552
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_field_iter.c161
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cpumask.c5
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c10
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_stress.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/find_vma.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c14
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kfunc_call.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kfunc_param_nullable.c11
-rw-r--r--tools/testing/selftests/bpf/prog_tests/linked_list.c12
-rw-r--r--tools/testing/selftests/bpf/prog_tests/mptcp.c7
-rw-r--r--tools/testing/selftests/bpf/prog_tests/rbtree.c47
-rw-r--r--tools/testing/selftests/bpf/prog_tests/send_signal.c3
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sk_lookup.c82
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_links.c61
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_redirect.c3
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c57
-rw-r--r--tools/testing/selftests/bpf/prog_tests/timer_lockup.c91
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tracing_struct.c44
-rw-r--r--tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c385
-rw-r--r--tools/testing/selftests/bpf/prog_tests/uretprobe_stack.c186
-rw-r--r--tools/testing/selftests/bpf/prog_tests/verifier.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_flowtable.c168
-rw-r--r--tools/testing/selftests/bpf/progs/arena_atomics.c143
-rw-r--r--tools/testing/selftests/bpf/progs/arena_htab.c17
-rw-r--r--tools/testing/selftests/bpf/progs/arena_list.c1
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_dctcp.c36
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c6
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c6
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_misc.h15
-rw-r--r--tools/testing/selftests/bpf/progs/cpumask_success.c171
-rw-r--r--tools/testing/selftests/bpf/progs/crypto_bench.c10
-rw-r--r--tools/testing/selftests/bpf/progs/crypto_sanity.c16
-rw-r--r--tools/testing/selftests/bpf/progs/dynptr_fail.c30
-rw-r--r--tools/testing/selftests/bpf/progs/get_func_ip_test.c7
-rw-r--r--tools/testing/selftests/bpf/progs/ip_check_defrag.c10
-rw-r--r--tools/testing/selftests/bpf/progs/iters.c2
-rw-r--r--tools/testing/selftests/bpf/progs/kfunc_call_test.c37
-rw-r--r--tools/testing/selftests/bpf/progs/kprobe_multi_session.c3
-rw-r--r--tools/testing/selftests/bpf/progs/kprobe_multi_session_cookie.c2
-rw-r--r--tools/testing/selftests/bpf/progs/linked_list.c47
-rw-r--r--tools/testing/selftests/bpf/progs/map_percpu_stats.c2
-rw-r--r--tools/testing/selftests/bpf/progs/nested_trust_common.h2
-rw-r--r--tools/testing/selftests/bpf/progs/nested_trust_failure.c8
-rw-r--r--tools/testing/selftests/bpf/progs/nested_trust_success.c8
-rw-r--r--tools/testing/selftests/bpf/progs/netif_receive_skb.c5
-rw-r--r--tools/testing/selftests/bpf/progs/profiler.inc.h5
-rw-r--r--tools/testing/selftests/bpf/progs/rbtree.c77
-rw-r--r--tools/testing/selftests/bpf/progs/rbtree_fail.c2
-rw-r--r--tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c4
-rw-r--r--tools/testing/selftests/bpf/progs/setget_sockopt.c5
-rw-r--r--tools/testing/selftests/bpf/progs/skb_pkt_end.c11
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_detach.c10
-rw-r--r--tools/testing/selftests/bpf/progs/test_bpf_ma.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_bpf_nf.c109
-rw-r--r--tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c1
-rw-r--r--tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c43
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_kern.h20
-rw-r--r--tools/testing/selftests/bpf/progs/test_sysctl_loop1.c5
-rw-r--r--tools/testing/selftests/bpf/progs/test_sysctl_loop2.c5
-rw-r--r--tools/testing/selftests/bpf/progs/test_sysctl_prog.c5
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_dtime.c39
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c1
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h2
-rw-r--r--tools/testing/selftests/bpf/progs/timer_lockup.c87
-rw-r--r--tools/testing/selftests/bpf/progs/tracing_struct.c54
-rw-r--r--tools/testing/selftests/bpf/progs/tracing_struct_many_args.c95
-rw-r--r--tools/testing/selftests/bpf/progs/uprobe_syscall.c15
-rw-r--r--tools/testing/selftests/bpf/progs/uprobe_syscall_executed.c17
-rw-r--r--tools/testing/selftests/bpf/progs/uretprobe_stack.c96
-rw-r--r--tools/testing/selftests/bpf/progs/user_ringbuf_fail.c22
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_arena.c1
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_arena_large.c1
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bits_iter.c153
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c236
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_netfilter_ctx.c6
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_subprog_precision.c2
-rw-r--r--tools/testing/selftests/bpf/progs/wq.c19
-rw-r--r--tools/testing/selftests/bpf/progs/wq_failures.c4
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_flowtable.c148
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c1
-rw-r--r--tools/testing/selftests/bpf/progs/xfrm_info.c1
-rw-r--r--tools/testing/selftests/bpf/test_loader.c115
-rw-r--r--tools/testing/selftests/bpf/test_progs.h9
-rw-r--r--tools/testing/selftests/bpf/test_sockmap.c137
-rw-r--r--tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c33
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c5
-rw-r--r--tools/testing/selftests/bpf/trace_helpers.c13
-rw-r--r--tools/testing/selftests/bpf/verifier/calls.c15
-rw-r--r--tools/testing/selftests/bpf/verifier/precise.c22
-rw-r--r--tools/testing/selftests/bpf/xskxceiver.c40
-rw-r--r--tools/testing/selftests/bpf/xskxceiver.h2
-rw-r--r--tools/testing/selftests/breakpoints/step_after_suspend_test.c1
-rw-r--r--tools/testing/selftests/cgroup/.gitignore11
-rw-r--r--tools/testing/selftests/cgroup/Makefile25
-rw-r--r--tools/testing/selftests/cgroup/config1
-rwxr-xr-xtools/testing/selftests/cgroup/test_cpuset_prs.sh75
-rw-r--r--tools/testing/selftests/cgroup/test_pids.c178
-rw-r--r--tools/testing/selftests/damon/Makefile3
-rw-r--r--tools/testing/selftests/damon/_damon_sysfs.py65
-rw-r--r--tools/testing/selftests/damon/access_memory.c2
-rw-r--r--tools/testing/selftests/damon/access_memory_even.c42
-rw-r--r--tools/testing/selftests/damon/damon_nr_regions.py145
-rw-r--r--tools/testing/selftests/damon/damos_tried_regions.py65
-rw-r--r--tools/testing/selftests/devices/Makefile4
-rw-r--r--tools/testing/selftests/devices/error_logs/Makefile3
-rwxr-xr-xtools/testing/selftests/devices/error_logs/test_device_error_logs.py85
-rw-r--r--tools/testing/selftests/devices/probe/Makefile4
-rw-r--r--tools/testing/selftests/devices/probe/boards/Dell Inc.,XPS 13 9300.yaml (renamed from tools/testing/selftests/devices/boards/Dell Inc.,XPS 13 9300.yaml)0
-rw-r--r--tools/testing/selftests/devices/probe/boards/google,spherion.yaml (renamed from tools/testing/selftests/devices/boards/google,spherion.yaml)4
-rwxr-xr-xtools/testing/selftests/devices/probe/test_discoverable_devices.py (renamed from tools/testing/selftests/devices/test_discoverable_devices.py)44
-rw-r--r--tools/testing/selftests/dma/dma_map_benchmark.c1
-rw-r--r--tools/testing/selftests/drivers/dma-buf/udmabuf.c214
-rw-r--r--tools/testing/selftests/drivers/net/hw/Makefile1
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/rss_ctx.py522
-rw-r--r--tools/testing/selftests/drivers/net/lib/py/env.py19
-rw-r--r--tools/testing/selftests/drivers/net/lib/py/load.py37
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/mirror_gre.sh71
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh18
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh55
-rw-r--r--tools/testing/selftests/drivers/platform/x86/intel/ifs/Makefile6
-rwxr-xr-xtools/testing/selftests/drivers/platform/x86/intel/ifs/test_ifs.sh494
-rw-r--r--tools/testing/selftests/exec/Makefile20
-rw-r--r--tools/testing/selftests/exec/load_address.c67
-rw-r--r--tools/testing/selftests/filesystems/eventfd/eventfd_test.c136
-rw-r--r--tools/testing/selftests/filesystems/statmount/Makefile2
-rw-r--r--tools/testing/selftests/filesystems/statmount/statmount.h46
-rw-r--r--tools/testing/selftests/filesystems/statmount/statmount_test.c144
-rw-r--r--tools/testing/selftests/filesystems/statmount/statmount_test_ns.c364
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/fgraph-multi.tc103
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc29
-rw-r--r--tools/testing/selftests/futex/functional/Makefile2
-rw-r--r--tools/testing/selftests/hid/hid_bpf.c426
-rw-r--r--tools/testing/selftests/hid/progs/hid.c392
-rw-r--r--tools/testing/selftests/hid/progs/hid_bpf_helpers.h46
-rw-r--r--tools/testing/selftests/intel_pstate/Makefile2
-rw-r--r--tools/testing/selftests/iommu/Makefile2
-rw-r--r--tools/testing/selftests/iommu/iommufd.c86
-rw-r--r--tools/testing/selftests/iommu/iommufd_fail_nth.c2
-rw-r--r--tools/testing/selftests/iommu/iommufd_utils.h92
-rw-r--r--tools/testing/selftests/kselftest.h8
-rw-r--r--tools/testing/selftests/kselftest/ksft.py (renamed from tools/testing/selftests/devices/ksft.py)0
-rw-r--r--tools/testing/selftests/kvm/Makefile4
-rw-r--r--tools/testing/selftests/kvm/aarch64/set_id_regs.c17
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/apic.h8
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/processor.h18
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c9
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c11
-rw-r--r--tools/testing/selftests/kvm/memslot_modification_stress_test.c6
-rw-r--r--tools/testing/selftests/kvm/pre_fault_memory_test.c146
-rw-r--r--tools/testing/selftests/kvm/riscv/get-reg-list.c28
-rw-r--r--tools/testing/selftests/kvm/x86_64/apic_bus_clock_test.c194
-rw-r--r--tools/testing/selftests/kvm/x86_64/max_vcpuid_cap_test.c22
-rw-r--r--tools/testing/selftests/kvm/x86_64/pmu_counters_test.c44
-rw-r--r--tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c35
-rw-r--r--tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c16
-rw-r--r--tools/testing/selftests/lib.mk11
-rwxr-xr-xtools/testing/selftests/livepatch/test-livepatch.sh138
-rwxr-xr-xtools/testing/selftests/livepatch/test-syscall.sh5
-rwxr-xr-xtools/testing/selftests/livepatch/test-sysfs.sh48
-rw-r--r--tools/testing/selftests/lkdtm/tests.txt1
-rw-r--r--tools/testing/selftests/mm/.gitignore1
-rw-r--r--tools/testing/selftests/mm/Makefile3
-rw-r--r--tools/testing/selftests/mm/hugepage-mremap.c2
-rw-r--r--tools/testing/selftests/mm/hugetlb-soft-offline.c228
-rw-r--r--tools/testing/selftests/mm/hugetlb_dio.c117
-rw-r--r--tools/testing/selftests/mm/ksm_functional_tests.c8
-rw-r--r--tools/testing/selftests/mm/memfd_secret.c14
-rw-r--r--tools/testing/selftests/mm/mkdirty.c8
-rw-r--r--tools/testing/selftests/mm/mlock2.h1
-rw-r--r--tools/testing/selftests/mm/mseal_helpers.h41
-rw-r--r--tools/testing/selftests/mm/mseal_test.c143
-rw-r--r--tools/testing/selftests/mm/pagemap_ioctl.c6
-rw-r--r--tools/testing/selftests/mm/protection_keys.c2
-rwxr-xr-xtools/testing/selftests/mm/run_vmtests.sh7
-rw-r--r--tools/testing/selftests/mm/seal_elf.c37
-rw-r--r--tools/testing/selftests/mm/split_huge_page_test.c3
-rw-r--r--tools/testing/selftests/mm/thuge-gen.c15
-rw-r--r--tools/testing/selftests/mm/uffd-common.c4
-rw-r--r--tools/testing/selftests/mm/uffd-stress.c31
-rw-r--r--tools/testing/selftests/mm/uffd-unit-tests.c14
-rw-r--r--tools/testing/selftests/mm/va_high_addr_switch.c454
-rwxr-xr-xtools/testing/selftests/mm/va_high_addr_switch.sh4
-rw-r--r--tools/testing/selftests/mqueue/mq_perf_tests.c6
-rw-r--r--tools/testing/selftests/net/Makefile3
-rwxr-xr-xtools/testing/selftests/net/amt.sh2
-rw-r--r--tools/testing/selftests/net/config6
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh24
-rw-r--r--tools/testing/selftests/net/forwarding/Makefile2
-rw-r--r--tools/testing/selftests/net/forwarding/devlink_lib.sh2
-rw-r--r--tools/testing/selftests/net/forwarding/lib.sh92
-rwxr-xr-xtools/testing/selftests/net/forwarding/min_max_mtu.sh283
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre.sh45
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bound.sh23
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh21
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh21
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh21
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh29
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_changes.sh73
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_flower.sh43
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_lag_lacp.sh65
-rw-r--r--tools/testing/selftests/net/forwarding/mirror_gre_lib.sh90
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_neigh.sh39
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_nh.sh35
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_vlan.sh21
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh69
-rw-r--r--tools/testing/selftests/net/forwarding/mirror_lib.sh79
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_vlan.sh43
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_mpath_seed.sh333
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh8
-rwxr-xr-xtools/testing/selftests/net/hsr/hsr_ping.sh9
-rwxr-xr-xtools/testing/selftests/net/hsr/hsr_redbox.sh15
-rw-r--r--tools/testing/selftests/net/lib.sh55
-rw-r--r--tools/testing/selftests/net/lib/py/ksft.py65
-rw-r--r--tools/testing/selftests/net/lib/py/utils.py61
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_lib.sh33
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_concat_range.sh76
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_queue.sh37
-rwxr-xr-xtools/testing/selftests/net/netns-sysctl.sh40
-rwxr-xr-xtools/testing/selftests/net/openvswitch/openvswitch.sh169
-rw-r--r--tools/testing/selftests/net/openvswitch/ovs-dpctl.py641
-rw-r--r--tools/testing/selftests/net/openvswitch/settings1
-rwxr-xr-xtools/testing/selftests/net/pmtu.sh145
-rw-r--r--tools/testing/selftests/net/tcp_ao/Makefile2
-rw-r--r--tools/testing/selftests/net/tcp_ao/self-connect.c18
-rw-r--r--tools/testing/selftests/net/udpgso.c15
-rwxr-xr-xtools/testing/selftests/net/udpgso.sh43
-rwxr-xr-xtools/testing/selftests/net/vrf_route_leaking.sh93
-rw-r--r--tools/testing/selftests/net/ynl.mk21
-rw-r--r--tools/testing/selftests/nolibc/Makefile2
-rw-r--r--tools/testing/selftests/nolibc/nolibc-test.c109
-rwxr-xr-xtools/testing/selftests/nolibc/run-tests.sh9
-rw-r--r--tools/testing/selftests/proc/.gitignore2
-rw-r--r--tools/testing/selftests/proc/Makefile4
-rw-r--r--tools/testing/selftests/proc/proc-2-is-kthread.c53
-rw-r--r--tools/testing/selftests/proc/proc-empty-vm.c3
-rw-r--r--tools/testing/selftests/proc/proc-pid-vm.c86
-rw-r--r--tools/testing/selftests/proc/proc-self-isnt-kthread.c37
-rw-r--r--tools/testing/selftests/resctrl/Makefile2
-rw-r--r--tools/testing/selftests/resctrl/cache.c10
-rw-r--r--tools/testing/selftests/resctrl/cat_test.c5
-rw-r--r--tools/testing/selftests/resctrl/cmt_test.c22
-rw-r--r--tools/testing/selftests/resctrl/mba_test.c26
-rw-r--r--tools/testing/selftests/resctrl/mbm_test.c26
-rw-r--r--tools/testing/selftests/resctrl/resctrl.h49
-rw-r--r--tools/testing/selftests/resctrl/resctrl_val.c371
-rw-r--r--tools/testing/selftests/resctrl/resctrlfs.c67
-rw-r--r--tools/testing/selftests/ring-buffer/Makefile1
-rw-r--r--tools/testing/selftests/riscv/mm/Makefile2
-rw-r--r--tools/testing/selftests/riscv/vector/vstate_prctl.c6
-rw-r--r--tools/testing/selftests/sched/cs_prctl_test.c10
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c131
-rw-r--r--tools/testing/selftests/sgx/Makefile2
-rw-r--r--tools/testing/selftests/sigaltstack/current_stack_pointer.h2
-rw-r--r--tools/testing/selftests/timens/exec.c6
-rw-r--r--tools/testing/selftests/timens/timer.c2
-rw-r--r--tools/testing/selftests/timens/timerfd.c2
-rw-r--r--tools/testing/selftests/timens/vfork_exec.c4
-rw-r--r--tools/testing/selftests/timers/rtcpie.c3
-rw-r--r--tools/testing/selftests/tmpfs/Makefile1
-rw-r--r--tools/testing/selftests/vDSO/Makefile29
-rw-r--r--tools/testing/selftests/vDSO/parse_vdso.c16
-rw-r--r--tools/testing/selftests/vDSO/vdso_standalone_test_x86.c18
-rw-r--r--tools/testing/selftests/wireguard/qemu/Makefile8
-rw-r--r--tools/testing/selftests/x86/Makefile31
-rw-r--r--tools/testing/selftests/x86/amx.c16
-rw-r--r--tools/testing/selftests/x86/clang_helpers_32.S11
-rw-r--r--tools/testing/selftests/x86/clang_helpers_64.S28
-rw-r--r--tools/testing/selftests/x86/fsgsbase.c6
-rw-r--r--tools/testing/selftests/x86/fsgsbase_restore.c11
-rw-r--r--tools/testing/selftests/x86/sigreturn.c2
-rw-r--r--tools/testing/selftests/x86/syscall_arg_fault.c1
-rw-r--r--tools/testing/selftests/x86/sysret_rip.c20
-rw-r--r--tools/testing/selftests/x86/test_FISTTP.c8
-rw-r--r--tools/testing/selftests/x86/test_shadow_stack.c145
-rw-r--r--tools/testing/selftests/x86/test_vsyscall.c15
-rw-r--r--tools/testing/selftests/x86/vdso_restorer.c2
-rw-r--r--tools/testing/vsock/Makefile13
-rw-r--r--tools/tracing/rtla/src/osnoise_hist.c15
-rw-r--r--tools/tracing/rtla/src/osnoise_top.c19
-rw-r--r--tools/virtio/vringh_test.c9
981 files changed, 99684 insertions, 8138 deletions
diff --git a/tools/arch/arm64/include/uapi/asm/unistd.h b/tools/arch/arm64/include/uapi/asm/unistd.h
index ce2ee8f1e361..9306726337fe 100644
--- a/tools/arch/arm64/include/uapi/asm/unistd.h
+++ b/tools/arch/arm64/include/uapi/asm/unistd.h
@@ -19,7 +19,6 @@
#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_SET_GET_RLIMIT
#define __ARCH_WANT_TIME32_SYSCALLS
-#define __ARCH_WANT_SYS_CLONE3
#define __ARCH_WANT_MEMFD_SECRET
#include <asm-generic/unistd.h>
diff --git a/tools/arch/loongarch/include/uapi/asm/unistd.h b/tools/arch/loongarch/include/uapi/asm/unistd.h
index 0c743344e92d..8eeaac0087c3 100644
--- a/tools/arch/loongarch/include/uapi/asm/unistd.h
+++ b/tools/arch/loongarch/include/uapi/asm/unistd.h
@@ -4,6 +4,5 @@
*/
#define __ARCH_WANT_SYS_CLONE
-#define __ARCH_WANT_SYS_CLONE3
#include <asm-generic/unistd.h>
diff --git a/tools/arch/x86/kcpuid/Makefile b/tools/arch/x86/kcpuid/Makefile
index 87b554fab14b..d0b4b0ed10ff 100644
--- a/tools/arch/x86/kcpuid/Makefile
+++ b/tools/arch/x86/kcpuid/Makefile
@@ -19,6 +19,6 @@ clean :
@rm -f kcpuid
install : kcpuid
- install -d $(DESTDIR)$(BINDIR)
+ install -d $(DESTDIR)$(BINDIR) $(DESTDIR)$(HWDATADIR)
install -m 755 -p kcpuid $(DESTDIR)$(BINDIR)/kcpuid
- install -m 444 -p cpuid.csv $(HWDATADIR)/cpuid.csv
+ install -m 444 -p cpuid.csv $(DESTDIR)$(HWDATADIR)/cpuid.csv
diff --git a/tools/bpf/bpftool/Documentation/bpftool-btf.rst b/tools/bpf/bpftool/Documentation/bpftool-btf.rst
index eaba24320fb2..3f6bca03ad2e 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-btf.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-btf.rst
@@ -28,7 +28,7 @@ BTF COMMANDS
| **bpftool** **btf help**
|
| *BTF_SRC* := { **id** *BTF_ID* | **prog** *PROG* | **map** *MAP* [{**key** | **value** | **kv** | **all**}] | **file** *FILE* }
-| *FORMAT* := { **raw** | **c** }
+| *FORMAT* := { **raw** | **c** [**unsorted**] }
| *MAP* := { **id** *MAP_ID* | **pinned** *FILE* }
| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* | **name** *PROG_NAME* }
@@ -63,7 +63,9 @@ bpftool btf dump *BTF_SRC*
pahole.
**format** option can be used to override default (raw) output format. Raw
- (**raw**) or C-syntax (**c**) output formats are supported.
+ (**raw**) or C-syntax (**c**) output formats are supported. With C-style
+ formatting, the output is sorted by default. Use the **unsorted** option
+ to avoid sorting the output.
bpftool btf help
Print short help message.
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
index dfa4f1bebbb3..ba927379eb20 100644
--- a/tools/bpf/bpftool/Makefile
+++ b/tools/bpf/bpftool/Makefile
@@ -204,10 +204,11 @@ ifeq ($(feature-clang-bpf-co-re),1)
BUILD_BPF_SKELS := 1
-$(OUTPUT)vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL_BOOTSTRAP)
ifeq ($(VMLINUX_H),)
+$(OUTPUT)vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL_BOOTSTRAP)
$(QUIET_GEN)$(BPFTOOL_BOOTSTRAP) btf dump file $< format c > $@
else
+$(OUTPUT)vmlinux.h: $(VMLINUX_H)
$(Q)cp "$(VMLINUX_H)" $@
endif
diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool
index 04afe2ac2228..be99d49b8714 100644
--- a/tools/bpf/bpftool/bash-completion/bpftool
+++ b/tools/bpf/bpftool/bash-completion/bpftool
@@ -930,6 +930,9 @@ _bpftool()
format)
COMPREPLY=( $( compgen -W "c raw" -- "$cur" ) )
;;
+ c)
+ COMPREPLY=( $( compgen -W "unsorted" -- "$cur" ) )
+ ;;
*)
# emit extra options
case ${words[3]} in
diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c
index 91fcb75babe3..6789c7a4d5ca 100644
--- a/tools/bpf/bpftool/btf.c
+++ b/tools/bpf/bpftool/btf.c
@@ -20,6 +20,8 @@
#include "json_writer.h"
#include "main.h"
+#define KFUNC_DECL_TAG "bpf_kfunc"
+
static const char * const btf_kind_str[NR_BTF_KINDS] = {
[BTF_KIND_UNKN] = "UNKNOWN",
[BTF_KIND_INT] = "INT",
@@ -43,6 +45,13 @@ static const char * const btf_kind_str[NR_BTF_KINDS] = {
[BTF_KIND_ENUM64] = "ENUM64",
};
+struct sort_datum {
+ int index;
+ int type_rank;
+ const char *sort_name;
+ const char *own_name;
+};
+
static const char *btf_int_enc_str(__u8 encoding)
{
switch (encoding) {
@@ -454,15 +463,171 @@ static int dump_btf_raw(const struct btf *btf,
return 0;
}
+static int dump_btf_kfuncs(struct btf_dump *d, const struct btf *btf)
+{
+ LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts);
+ int cnt = btf__type_cnt(btf);
+ int i;
+
+ printf("\n/* BPF kfuncs */\n");
+ printf("#ifndef BPF_NO_KFUNC_PROTOTYPES\n");
+
+ for (i = 1; i < cnt; i++) {
+ const struct btf_type *t = btf__type_by_id(btf, i);
+ const char *name;
+ int err;
+
+ if (!btf_is_decl_tag(t))
+ continue;
+
+ if (btf_decl_tag(t)->component_idx != -1)
+ continue;
+
+ name = btf__name_by_offset(btf, t->name_off);
+ if (strncmp(name, KFUNC_DECL_TAG, sizeof(KFUNC_DECL_TAG)))
+ continue;
+
+ t = btf__type_by_id(btf, t->type);
+ if (!btf_is_func(t))
+ continue;
+
+ printf("extern ");
+
+ opts.field_name = btf__name_by_offset(btf, t->name_off);
+ err = btf_dump__emit_type_decl(d, t->type, &opts);
+ if (err)
+ return err;
+
+ printf(" __weak __ksym;\n");
+ }
+
+ printf("#endif\n\n");
+
+ return 0;
+}
+
static void __printf(2, 0) btf_dump_printf(void *ctx,
const char *fmt, va_list args)
{
vfprintf(stdout, fmt, args);
}
+static int btf_type_rank(const struct btf *btf, __u32 index, bool has_name)
+{
+ const struct btf_type *t = btf__type_by_id(btf, index);
+ const int kind = btf_kind(t);
+ const int max_rank = 10;
+
+ if (t->name_off)
+ has_name = true;
+
+ switch (kind) {
+ case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
+ return has_name ? 1 : 0;
+ case BTF_KIND_INT:
+ case BTF_KIND_FLOAT:
+ return 2;
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ return has_name ? 3 : max_rank;
+ case BTF_KIND_FUNC_PROTO:
+ return has_name ? 4 : max_rank;
+ case BTF_KIND_ARRAY:
+ if (has_name)
+ return btf_type_rank(btf, btf_array(t)->type, has_name);
+ return max_rank;
+ case BTF_KIND_TYPE_TAG:
+ case BTF_KIND_CONST:
+ case BTF_KIND_PTR:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_DECL_TAG:
+ if (has_name)
+ return btf_type_rank(btf, t->type, has_name);
+ return max_rank;
+ default:
+ return max_rank;
+ }
+}
+
+static const char *btf_type_sort_name(const struct btf *btf, __u32 index, bool from_ref)
+{
+ const struct btf_type *t = btf__type_by_id(btf, index);
+
+ switch (btf_kind(t)) {
+ case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64: {
+ int name_off = t->name_off;
+
+ /* Use name of the first element for anonymous enums if allowed */
+ if (!from_ref && !t->name_off && btf_vlen(t))
+ name_off = btf_enum(t)->name_off;
+
+ return btf__name_by_offset(btf, name_off);
+ }
+ case BTF_KIND_ARRAY:
+ return btf_type_sort_name(btf, btf_array(t)->type, true);
+ case BTF_KIND_TYPE_TAG:
+ case BTF_KIND_CONST:
+ case BTF_KIND_PTR:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_DECL_TAG:
+ return btf_type_sort_name(btf, t->type, true);
+ default:
+ return btf__name_by_offset(btf, t->name_off);
+ }
+ return NULL;
+}
+
+static int btf_type_compare(const void *left, const void *right)
+{
+ const struct sort_datum *d1 = (const struct sort_datum *)left;
+ const struct sort_datum *d2 = (const struct sort_datum *)right;
+ int r;
+
+ if (d1->type_rank != d2->type_rank)
+ return d1->type_rank < d2->type_rank ? -1 : 1;
+
+ r = strcmp(d1->sort_name, d2->sort_name);
+ if (r)
+ return r;
+
+ return strcmp(d1->own_name, d2->own_name);
+}
+
+static struct sort_datum *sort_btf_c(const struct btf *btf)
+{
+ struct sort_datum *datums;
+ int n;
+
+ n = btf__type_cnt(btf);
+ datums = malloc(sizeof(struct sort_datum) * n);
+ if (!datums)
+ return NULL;
+
+ for (int i = 0; i < n; ++i) {
+ struct sort_datum *d = datums + i;
+ const struct btf_type *t = btf__type_by_id(btf, i);
+
+ d->index = i;
+ d->type_rank = btf_type_rank(btf, i, false);
+ d->sort_name = btf_type_sort_name(btf, i, false);
+ d->own_name = btf__name_by_offset(btf, t->name_off);
+ }
+
+ qsort(datums, n, sizeof(struct sort_datum), btf_type_compare);
+
+ return datums;
+}
+
static int dump_btf_c(const struct btf *btf,
- __u32 *root_type_ids, int root_type_cnt)
+ __u32 *root_type_ids, int root_type_cnt, bool sort_dump)
{
+ struct sort_datum *datums = NULL;
struct btf_dump *d;
int err = 0, i;
@@ -476,6 +641,12 @@ static int dump_btf_c(const struct btf *btf,
printf("#ifndef BPF_NO_PRESERVE_ACCESS_INDEX\n");
printf("#pragma clang attribute push (__attribute__((preserve_access_index)), apply_to = record)\n");
printf("#endif\n\n");
+ printf("#ifndef __ksym\n");
+ printf("#define __ksym __attribute__((section(\".ksyms\")))\n");
+ printf("#endif\n\n");
+ printf("#ifndef __weak\n");
+ printf("#define __weak __attribute__((weak))\n");
+ printf("#endif\n\n");
if (root_type_cnt) {
for (i = 0; i < root_type_cnt; i++) {
@@ -486,11 +657,19 @@ static int dump_btf_c(const struct btf *btf,
} else {
int cnt = btf__type_cnt(btf);
+ if (sort_dump)
+ datums = sort_btf_c(btf);
for (i = 1; i < cnt; i++) {
- err = btf_dump__dump_type(d, i);
+ int idx = datums ? datums[i].index : i;
+
+ err = btf_dump__dump_type(d, idx);
if (err)
goto done;
}
+
+ err = dump_btf_kfuncs(d, btf);
+ if (err)
+ goto done;
}
printf("#ifndef BPF_NO_PRESERVE_ACCESS_INDEX\n");
@@ -500,6 +679,7 @@ static int dump_btf_c(const struct btf *btf,
printf("#endif /* __VMLINUX_H__ */\n");
done:
+ free(datums);
btf_dump__free(d);
return err;
}
@@ -549,10 +729,10 @@ static bool btf_is_kernel_module(__u32 btf_id)
static int do_dump(int argc, char **argv)
{
+ bool dump_c = false, sort_dump_c = true;
struct btf *btf = NULL, *base = NULL;
__u32 root_type_ids[2];
int root_type_cnt = 0;
- bool dump_c = false;
__u32 btf_id = -1;
const char *src;
int fd = -1;
@@ -663,6 +843,9 @@ static int do_dump(int argc, char **argv)
goto done;
}
NEXT_ARG();
+ } else if (is_prefix(*argv, "unsorted")) {
+ sort_dump_c = false;
+ NEXT_ARG();
} else {
p_err("unrecognized option: '%s'", *argv);
err = -EINVAL;
@@ -691,7 +874,7 @@ static int do_dump(int argc, char **argv)
err = -ENOTSUP;
goto done;
}
- err = dump_btf_c(btf, root_type_ids, root_type_cnt);
+ err = dump_btf_c(btf, root_type_ids, root_type_cnt, sort_dump_c);
} else {
err = dump_btf_raw(btf, root_type_ids, root_type_cnt);
}
@@ -1063,7 +1246,7 @@ static int do_help(int argc, char **argv)
" %1$s %2$s help\n"
"\n"
" BTF_SRC := { id BTF_ID | prog PROG | map MAP [{key | value | kv | all}] | file FILE }\n"
- " FORMAT := { raw | c }\n"
+ " FORMAT := { raw | c [unsorted] }\n"
" " HELP_SPEC_MAP "\n"
" " HELP_SPEC_PROGRAM "\n"
" " HELP_SPEC_OPTIONS " |\n"
diff --git a/tools/bpf/bpftool/cgroup.c b/tools/bpf/bpftool/cgroup.c
index af6898c0f388..9af426d43299 100644
--- a/tools/bpf/bpftool/cgroup.c
+++ b/tools/bpf/bpftool/cgroup.c
@@ -19,6 +19,38 @@
#include "main.h"
+static const int cgroup_attach_types[] = {
+ BPF_CGROUP_INET_INGRESS,
+ BPF_CGROUP_INET_EGRESS,
+ BPF_CGROUP_INET_SOCK_CREATE,
+ BPF_CGROUP_INET_SOCK_RELEASE,
+ BPF_CGROUP_INET4_BIND,
+ BPF_CGROUP_INET6_BIND,
+ BPF_CGROUP_INET4_POST_BIND,
+ BPF_CGROUP_INET6_POST_BIND,
+ BPF_CGROUP_INET4_CONNECT,
+ BPF_CGROUP_INET6_CONNECT,
+ BPF_CGROUP_UNIX_CONNECT,
+ BPF_CGROUP_INET4_GETPEERNAME,
+ BPF_CGROUP_INET6_GETPEERNAME,
+ BPF_CGROUP_UNIX_GETPEERNAME,
+ BPF_CGROUP_INET4_GETSOCKNAME,
+ BPF_CGROUP_INET6_GETSOCKNAME,
+ BPF_CGROUP_UNIX_GETSOCKNAME,
+ BPF_CGROUP_UDP4_SENDMSG,
+ BPF_CGROUP_UDP6_SENDMSG,
+ BPF_CGROUP_UNIX_SENDMSG,
+ BPF_CGROUP_UDP4_RECVMSG,
+ BPF_CGROUP_UDP6_RECVMSG,
+ BPF_CGROUP_UNIX_RECVMSG,
+ BPF_CGROUP_SOCK_OPS,
+ BPF_CGROUP_DEVICE,
+ BPF_CGROUP_SYSCTL,
+ BPF_CGROUP_GETSOCKOPT,
+ BPF_CGROUP_SETSOCKOPT,
+ BPF_LSM_CGROUP
+};
+
#define HELP_SPEC_ATTACH_FLAGS \
"ATTACH_FLAGS := { multi | override }"
@@ -183,13 +215,13 @@ static int count_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type)
static int cgroup_has_attached_progs(int cgroup_fd)
{
- enum bpf_attach_type type;
+ unsigned int i = 0;
bool no_prog = true;
- for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
- int count = count_attached_bpf_progs(cgroup_fd, type);
+ for (i = 0; i < ARRAY_SIZE(cgroup_attach_types); i++) {
+ int count = count_attached_bpf_progs(cgroup_fd, cgroup_attach_types[i]);
- if (count < 0 && errno != EINVAL)
+ if (count < 0)
return -1;
if (count > 0) {
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index 958e92acca8e..9b75639434b8 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -410,7 +410,7 @@ void get_prog_full_name(const struct bpf_prog_info *prog_info, int prog_fd,
{
const char *prog_name = prog_info->name;
const struct btf_type *func_type;
- const struct bpf_func_info finfo = {};
+ struct bpf_func_info finfo = {};
struct bpf_prog_info info = {};
__u32 info_len = sizeof(info);
struct btf *prog_btf = NULL;
diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
index b3979ddc0189..5a4d3240689e 100644
--- a/tools/bpf/bpftool/gen.c
+++ b/tools/bpf/bpftool/gen.c
@@ -848,28 +848,45 @@ out:
}
static void
-codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped)
+codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped, bool populate_links)
{
struct bpf_map *map;
char ident[256];
- size_t i;
+ size_t i, map_sz;
if (!map_cnt)
return;
+ /* for backward compatibility with old libbpf versions that don't
+ * handle new BPF skeleton with new struct bpf_map_skeleton definition
+ * that includes link field, avoid specifying new increased size,
+ * unless we absolutely have to (i.e., if there are struct_ops maps
+ * present)
+ */
+ map_sz = offsetof(struct bpf_map_skeleton, link);
+ if (populate_links) {
+ bpf_object__for_each_map(map, obj) {
+ if (bpf_map__type(map) == BPF_MAP_TYPE_STRUCT_OPS) {
+ map_sz = sizeof(struct bpf_map_skeleton);
+ break;
+ }
+ }
+ }
+
codegen("\
\n\
- \n\
+ \n\
/* maps */ \n\
s->map_cnt = %zu; \n\
- s->map_skel_sz = sizeof(*s->maps); \n\
- s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\
+ s->map_skel_sz = %zu; \n\
+ s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt,\n\
+ sizeof(*s->maps) > %zu ? sizeof(*s->maps) : %zu);\n\
if (!s->maps) { \n\
err = -ENOMEM; \n\
goto err; \n\
} \n\
",
- map_cnt
+ map_cnt, map_sz, map_sz, map_sz
);
i = 0;
bpf_object__for_each_map(map, obj) {
@@ -878,15 +895,22 @@ codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped)
codegen("\
\n\
- \n\
- s->maps[%zu].name = \"%s\"; \n\
- s->maps[%zu].map = &obj->maps.%s; \n\
+ \n\
+ map = (struct bpf_map_skeleton *)((char *)s->maps + %zu * s->map_skel_sz);\n\
+ map->name = \"%s\"; \n\
+ map->map = &obj->maps.%s; \n\
",
- i, bpf_map__name(map), i, ident);
+ i, bpf_map__name(map), ident);
/* memory-mapped internal maps */
if (mmaped && is_mmapable_map(map, ident, sizeof(ident))) {
- printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n",
- i, ident);
+ printf("\tmap->mmaped = (void **)&obj->%s;\n", ident);
+ }
+
+ if (populate_links && bpf_map__type(map) == BPF_MAP_TYPE_STRUCT_OPS) {
+ codegen("\
+ \n\
+ map->link = &obj->links.%s; \n\
+ ", ident);
}
i++;
}
@@ -1141,7 +1165,7 @@ static void gen_st_ops_shadow_init(struct btf *btf, struct bpf_object *obj)
static int do_skeleton(int argc, char **argv)
{
char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SKEL_H__")];
- size_t map_cnt = 0, prog_cnt = 0, file_sz, mmap_sz;
+ size_t map_cnt = 0, prog_cnt = 0, attach_map_cnt = 0, file_sz, mmap_sz;
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data;
struct bpf_object *obj = NULL;
@@ -1225,6 +1249,10 @@ static int do_skeleton(int argc, char **argv)
bpf_map__name(map));
continue;
}
+
+ if (bpf_map__type(map) == BPF_MAP_TYPE_STRUCT_OPS)
+ attach_map_cnt++;
+
map_cnt++;
}
bpf_object__for_each_program(prog, obj) {
@@ -1260,6 +1288,8 @@ static int do_skeleton(int argc, char **argv)
#include <stdlib.h> \n\
#include <bpf/libbpf.h> \n\
\n\
+ #define BPF_SKEL_SUPPORTS_MAP_AUTO_ATTACH 1 \n\
+ \n\
struct %1$s { \n\
struct bpf_object_skeleton *skeleton; \n\
struct bpf_object *obj; \n\
@@ -1297,6 +1327,9 @@ static int do_skeleton(int argc, char **argv)
bpf_program__name(prog));
}
printf("\t} progs;\n");
+ }
+
+ if (prog_cnt + attach_map_cnt) {
printf("\tstruct {\n");
bpf_object__for_each_program(prog, obj) {
if (use_loader)
@@ -1306,6 +1339,19 @@ static int do_skeleton(int argc, char **argv)
printf("\t\tstruct bpf_link *%s;\n",
bpf_program__name(prog));
}
+
+ bpf_object__for_each_map(map, obj) {
+ if (!get_map_ident(map, ident, sizeof(ident)))
+ continue;
+ if (bpf_map__type(map) != BPF_MAP_TYPE_STRUCT_OPS)
+ continue;
+
+ if (use_loader)
+ printf("t\tint %s_fd;\n", ident);
+ else
+ printf("\t\tstruct bpf_link *%s;\n", ident);
+ }
+
printf("\t} links;\n");
}
@@ -1433,6 +1479,7 @@ static int do_skeleton(int argc, char **argv)
%1$s__create_skeleton(struct %1$s *obj) \n\
{ \n\
struct bpf_object_skeleton *s; \n\
+ struct bpf_map_skeleton *map __attribute__((unused));\n\
int err; \n\
\n\
s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\
@@ -1448,7 +1495,7 @@ static int do_skeleton(int argc, char **argv)
obj_name
);
- codegen_maps_skeleton(obj, map_cnt, true /*mmaped*/);
+ codegen_maps_skeleton(obj, map_cnt, true /*mmaped*/, true /*links*/);
codegen_progs_skeleton(obj, prog_cnt, true /*populate_links*/);
codegen("\
@@ -1723,6 +1770,7 @@ static int do_subskeleton(int argc, char **argv)
{ \n\
struct %1$s *obj; \n\
struct bpf_object_subskeleton *s; \n\
+ struct bpf_map_skeleton *map __attribute__((unused));\n\
int err; \n\
\n\
obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\
@@ -1786,7 +1834,7 @@ static int do_subskeleton(int argc, char **argv)
}
}
- codegen_maps_skeleton(obj, map_cnt, false /*mmaped*/);
+ codegen_maps_skeleton(obj, map_cnt, false /*mmaped*/, false /*links*/);
codegen_progs_skeleton(obj, prog_cnt, false /*links*/);
codegen("\
@@ -2379,15 +2427,6 @@ out:
return err;
}
-static int btfgen_remap_id(__u32 *type_id, void *ctx)
-{
- unsigned int *ids = ctx;
-
- *type_id = ids[*type_id];
-
- return 0;
-}
-
/* Generate BTF from relocation information previously recorded */
static struct btf *btfgen_get_btf(struct btfgen_info *info)
{
@@ -2467,10 +2506,15 @@ static struct btf *btfgen_get_btf(struct btfgen_info *info)
/* second pass: fix up type ids */
for (i = 1; i < btf__type_cnt(btf_new); i++) {
struct btf_type *btf_type = (struct btf_type *) btf__type_by_id(btf_new, i);
+ struct btf_field_iter it;
+ __u32 *type_id;
- err = btf_type_visit_type_ids(btf_type, btfgen_remap_id, ids);
+ err = btf_field_iter_init(&it, btf_type, BTF_FIELD_ITER_IDS);
if (err)
goto err_out;
+
+ while ((type_id = btf_field_iter_next(&it)))
+ *type_id = ids[*type_id];
}
free(ids);
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 1a501cf09e78..40ea743d139f 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -1813,6 +1813,10 @@ offload_dev:
}
if (pinmaps) {
+ err = create_and_mount_bpffs_dir(pinmaps);
+ if (err)
+ goto err_unpin;
+
err = bpf_object__pin_maps(obj, pinmaps);
if (err) {
p_err("failed to pin all maps");
diff --git a/tools/bpf/bpftool/skeleton/pid_iter.bpf.c b/tools/bpf/bpftool/skeleton/pid_iter.bpf.c
index 7bdbcac3cf62..948dde25034e 100644
--- a/tools/bpf/bpftool/skeleton/pid_iter.bpf.c
+++ b/tools/bpf/bpftool/skeleton/pid_iter.bpf.c
@@ -29,6 +29,7 @@ enum bpf_link_type___local {
};
extern const void bpf_link_fops __ksym;
+extern const void bpf_link_fops_poll __ksym __weak;
extern const void bpf_map_fops __ksym;
extern const void bpf_prog_fops __ksym;
extern const void btf_fops __ksym;
@@ -84,7 +85,11 @@ int iter(struct bpf_iter__task_file *ctx)
fops = &btf_fops;
break;
case BPF_OBJ_LINK:
- fops = &bpf_link_fops;
+ if (&bpf_link_fops_poll &&
+ file->f_op == &bpf_link_fops_poll)
+ fops = &bpf_link_fops_poll;
+ else
+ fops = &bpf_link_fops;
break;
default:
return 0;
diff --git a/tools/bpf/bpftool/skeleton/profiler.bpf.c b/tools/bpf/bpftool/skeleton/profiler.bpf.c
index 2f80edc682f1..f48c783cb9f7 100644
--- a/tools/bpf/bpftool/skeleton/profiler.bpf.c
+++ b/tools/bpf/bpftool/skeleton/profiler.bpf.c
@@ -40,17 +40,17 @@ struct {
const volatile __u32 num_cpu = 1;
const volatile __u32 num_metric = 1;
-#define MAX_NUM_MATRICS 4
+#define MAX_NUM_METRICS 4
SEC("fentry/XXX")
int BPF_PROG(fentry_XXX)
{
- struct bpf_perf_event_value___local *ptrs[MAX_NUM_MATRICS];
+ struct bpf_perf_event_value___local *ptrs[MAX_NUM_METRICS];
u32 key = bpf_get_smp_processor_id();
u32 i;
/* look up before reading, to reduce error */
- for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
+ for (i = 0; i < num_metric && i < MAX_NUM_METRICS; i++) {
u32 flag = i;
ptrs[i] = bpf_map_lookup_elem(&fentry_readings, &flag);
@@ -58,7 +58,7 @@ int BPF_PROG(fentry_XXX)
return 0;
}
- for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
+ for (i = 0; i < num_metric && i < MAX_NUM_METRICS; i++) {
struct bpf_perf_event_value___local reading;
int err;
@@ -99,14 +99,14 @@ fexit_update_maps(u32 id, struct bpf_perf_event_value___local *after)
SEC("fexit/XXX")
int BPF_PROG(fexit_XXX)
{
- struct bpf_perf_event_value___local readings[MAX_NUM_MATRICS];
+ struct bpf_perf_event_value___local readings[MAX_NUM_METRICS];
u32 cpu = bpf_get_smp_processor_id();
u32 i, zero = 0;
int err;
u64 *count;
/* read all events before updating the maps, to reduce error */
- for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
+ for (i = 0; i < num_metric && i < MAX_NUM_METRICS; i++) {
err = bpf_perf_event_read_value(&events, cpu + i * num_cpu,
(void *)(readings + i),
sizeof(*readings));
@@ -116,7 +116,7 @@ int BPF_PROG(fexit_XXX)
count = bpf_map_lookup_elem(&counts, &zero);
if (count) {
*count += 1;
- for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++)
+ for (i = 0; i < num_metric && i < MAX_NUM_METRICS; i++)
fexit_update_maps(i, &readings[i]);
}
return 0;
diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c
index af393c7dee1f..936ef95c3d32 100644
--- a/tools/bpf/resolve_btfids/main.c
+++ b/tools/bpf/resolve_btfids/main.c
@@ -409,6 +409,14 @@ static int elf_collect(struct object *obj)
obj->efile.idlist = data;
obj->efile.idlist_shndx = idx;
obj->efile.idlist_addr = sh.sh_addr;
+ } else if (!strcmp(name, BTF_BASE_ELF_SEC)) {
+ /* If a .BTF.base section is found, do not resolve
+ * BTF ids relative to vmlinux; resolve relative
+ * to the .BTF.base section instead. btf__parse_split()
+ * will take care of this once the base BTF it is
+ * passed is NULL.
+ */
+ obj->base_btf_path = NULL;
}
if (compressed_section_fix(elf, scn, &sh))
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index ed54cef450f5..489cbed7e82a 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -213,7 +213,7 @@ $(OUTPUT)test-libtraceevent.bin:
$(BUILD) -ltraceevent
$(OUTPUT)test-libtracefs.bin:
- $(BUILD) $(shell $(PKG_CONFIG) --cflags libtraceevent 2>/dev/null) -ltracefs
+ $(BUILD) $(shell $(PKG_CONFIG) --cflags libtracefs 2>/dev/null) -ltracefs
$(OUTPUT)test-libcrypto.bin:
$(BUILD) -lcrypto
diff --git a/tools/build/feature/test-libtracefs.c b/tools/build/feature/test-libtracefs.c
index 8eff16c0c10b..29a757a7d848 100644
--- a/tools/build/feature/test-libtracefs.c
+++ b/tools/build/feature/test-libtracefs.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-#include <tracefs/tracefs.h>
+#include <tracefs.h>
int main(void)
{
diff --git a/tools/gpio/gpio-sloppy-logic-analyzer.sh b/tools/gpio/gpio-sloppy-logic-analyzer.sh
new file mode 100755
index 000000000000..ed21a110df5e
--- /dev/null
+++ b/tools/gpio/gpio-sloppy-logic-analyzer.sh
@@ -0,0 +1,246 @@
+#!/bin/sh -eu
+# SPDX-License-Identifier: GPL-2.0
+#
+# Helper script for the Linux Kernel GPIO sloppy logic analyzer
+#
+# Copyright (C) Wolfram Sang <wsa@sang-engineering.com>
+# Copyright (C) Renesas Electronics Corporation
+
+samplefreq=1000000
+numsamples=250000
+cpusetdefaultdir='/sys/fs/cgroup'
+cpusetprefix='cpuset.'
+debugdir='/sys/kernel/debug'
+ladirname='gpio-sloppy-logic-analyzer'
+outputdir="$PWD"
+neededcmds='taskset zip'
+max_chans=8
+duration=
+initcpu=
+listinstances=0
+lainstance=
+lasysfsdir=
+triggerdat=
+trigger_bindat=
+progname="${0##*/}"
+print_help()
+{
+ cat << EOF
+$progname - helper script for the Linux Kernel Sloppy GPIO Logic Analyzer
+Available options:
+ -c|--cpu <n>: which CPU to isolate for sampling. Only needed once. Default <1>.
+ Remember that a more powerful CPU gives you higher sampling speeds.
+ Also CPU0 is not recommended as it usually does extra bookkeeping.
+ -d|--duration-us <SI-n>: number of microseconds to sample. Overrides -n, no default value.
+ -h|--help: print this help
+ -i|--instance <str>: name of the logic analyzer in case you have multiple instances. Default
+ to first instance found
+ -k|--kernel-debug-dir <str>: path to the kernel debugfs mountpoint. Default: <$debugdir>
+ -l|--list-instances: list all available instances
+ -n|--num_samples <SI-n>: number of samples to acquire. Default <$numsamples>
+ -o|--output-dir <str>: directory to put the result files. Default: current dir
+ -s|--sample_freq <SI-n>: desired sampling frequency. Might be capped if too large.
+ Default: <1000000>
+ -t|--trigger <str>: pattern to use as trigger. <str> consists of two-char pairs. First
+ char is channel number starting at "1". Second char is trigger level:
+ "L" - low; "H" - high; "R" - rising; "F" - falling
+ These pairs can be combined with "+", so "1H+2F" triggers when probe 1
+ is high while probe 2 has a falling edge. You can have multiple triggers
+ combined with ",". So, "1H+2F,1H+2R" is like the example before but it
+ waits for a rising edge on probe 2 while probe 1 is still high after the
+ first trigger has been met.
+ Trigger data will only be used for the next capture and then be erased.
+
+<SI-n> is an integer value where SI units "T", "G", "M", "K" are recognized, e.g. '1M500K' is 1500000.
+
+Examples:
+Samples $numsamples values at 1MHz with an already prepared CPU or automatically prepares CPU1 if needed,
+use the first logic analyzer instance found:
+ '$progname'
+Samples 50us at 2MHz waiting for a falling edge on channel 2. CPU and instance as above:
+ '$progname -d 50 -s 2M -t "2F"'
+
+Note that the process exits after checking all parameters but a sub-process still works in
+the background. The result is only available once the sub-process finishes.
+
+Result is a .sr file to be consumed with PulseView from the free Sigrok project. It is
+a zip file which also contains the binary sample data which may be consumed by others.
+The filename is the logic analyzer instance name plus a since-epoch timestamp.
+EOF
+}
+
+fail()
+{
+ echo "$1"
+ exit 1
+}
+
+parse_si()
+{
+ conv_si="$(printf $1 | sed 's/[tT]+\?/*1000G+/g; s/[gG]+\?/*1000M+/g; s/[mM]+\?/*1000K+/g; s/[kK]+\?/*1000+/g; s/+$//')"
+ si_val="$((conv_si))"
+}
+set_newmask()
+{
+ for f in $(find "$1" -iname "$2"); do echo "$newmask" > "$f" 2>/dev/null || true; done
+}
+
+init_cpu()
+{
+ isol_cpu="$1"
+
+ [ -d "$lacpusetdir" ] || mkdir "$lacpusetdir"
+
+ cur_cpu=$(cat "${lacpusetfile}cpus")
+ [ "$cur_cpu" = "$isol_cpu" ] && return
+ [ -z "$cur_cpu" ] || fail "CPU$isol_cpu requested but CPU$cur_cpu already isolated"
+
+ echo "$isol_cpu" > "${lacpusetfile}cpus" || fail "Could not isolate CPU$isol_cpu. Does it exist?"
+ echo 1 > "${lacpusetfile}cpu_exclusive"
+ echo 0 > "${lacpusetfile}mems"
+
+ oldmask=$(cat /proc/irq/default_smp_affinity)
+ newmask=$(printf "%x" $((0x$oldmask & ~(1 << isol_cpu))))
+
+ set_newmask '/proc/irq' '*smp_affinity'
+ set_newmask '/sys/devices/virtual/workqueue/' 'cpumask'
+
+ # Move tasks away from isolated CPU
+ for p in $(ps -o pid | tail -n +2); do
+ mask=$(taskset -p "$p") || continue
+ # Ignore tasks with a custom mask, i.e. not equal $oldmask
+ [ "${mask##*: }" = "$oldmask" ] || continue
+ taskset -p "$newmask" "$p" || continue
+ done 2>/dev/null >/dev/null
+
+ # Big hammer! Working with 'rcu_momentary_dyntick_idle()' for a more fine-grained solution
+ # still printed warnings. Same for re-enabling the stall detector after sampling.
+ echo 1 > /sys/module/rcupdate/parameters/rcu_cpu_stall_suppress
+
+ cpufreqgov="/sys/devices/system/cpu/cpu$isol_cpu/cpufreq/scaling_governor"
+ [ -w "$cpufreqgov" ] && echo 'performance' > "$cpufreqgov" || true
+}
+
+parse_triggerdat()
+{
+ oldifs="$IFS"
+ IFS=','; for trig in $1; do
+ mask=0; val1=0; val2=0
+ IFS='+'; for elem in $trig; do
+ chan=${elem%[lhfrLHFR]}
+ mode=${elem#$chan}
+ # Check if we could parse something and the channel number fits
+ [ "$chan" != "$elem" ] && [ "$chan" -le $max_chans ] || fail "Trigger syntax error: $elem"
+ bit=$((1 << (chan - 1)))
+ mask=$((mask | bit))
+ case $mode in
+ [hH]) val1=$((val1 | bit)); val2=$((val2 | bit));;
+ [fF]) val1=$((val1 | bit));;
+ [rR]) val2=$((val2 | bit));;
+ esac
+ done
+ trigger_bindat="$trigger_bindat$(printf '\\%o\\%o' $mask $val1)"
+ [ $val1 -ne $val2 ] && trigger_bindat="$trigger_bindat$(printf '\\%o\\%o' $mask $val2)"
+ done
+ IFS="$oldifs"
+}
+
+do_capture()
+{
+ taskset "$1" echo 1 > "$lasysfsdir"/capture || fail "Capture error! Check kernel log"
+
+ srtmp=$(mktemp -d)
+ echo 1 > "$srtmp"/version
+ cp "$lasysfsdir"/sample_data "$srtmp"/logic-1-1
+ cat > "$srtmp"/metadata << EOF
+[global]
+sigrok version=0.2.0
+
+[device 1]
+capturefile=logic-1
+total probes=$(wc -l < "$lasysfsdir"/meta_data)
+samplerate=${samplefreq}Hz
+unitsize=1
+EOF
+ cat "$lasysfsdir"/meta_data >> "$srtmp"/metadata
+
+ zipname="$outputdir/${lasysfsdir##*/}-$(date +%s).sr"
+ zip -jq "$zipname" "$srtmp"/*
+ rm -rf "$srtmp"
+ delay_ack=$(cat "$lasysfsdir"/delay_ns_acquisition)
+ [ "$delay_ack" -eq 0 ] && delay_ack=1
+ echo "Logic analyzer done. Saved '$zipname'"
+ echo "Max sample frequency this time: $((1000000000 / delay_ack))Hz."
+}
+
+rep=$(getopt -a -l cpu:,duration-us:,help,instance:,list-instances,kernel-debug-dir:,num_samples:,output-dir:,sample_freq:,trigger: -o c:d:hi:k:ln:o:s:t: -- "$@") || exit 1
+eval set -- "$rep"
+while true; do
+ case "$1" in
+ -c|--cpu) initcpu="$2"; shift;;
+ -d|--duration-us) parse_si $2; duration=$si_val; shift;;
+ -h|--help) print_help; exit 0;;
+ -i|--instance) lainstance="$2"; shift;;
+ -k|--kernel-debug-dir) debugdir="$2"; shift;;
+ -l|--list-instances) listinstances=1;;
+ -n|--num_samples) parse_si $2; numsamples=$si_val; shift;;
+ -o|--output-dir) outputdir="$2"; shift;;
+ -s|--sample_freq) parse_si $2; samplefreq=$si_val; shift;;
+ -t|--trigger) triggerdat="$2"; shift;;
+ --) break;;
+ *) fail "error parsing command line: $*";;
+ esac
+ shift
+done
+
+for f in $neededcmds; do
+ command -v "$f" >/dev/null || fail "Command '$f' not found"
+done
+
+# print cpuset mountpoint if any, errorcode > 0 if noprefix option was found
+cpusetdir=$(awk '$3 == "cgroup" && $4 ~ /cpuset/ { print $2; exit (match($4, /noprefix/) > 0) }' /proc/self/mounts) || cpusetprefix=''
+if [ -z "$cpusetdir" ]; then
+ cpusetdir="$cpusetdefaultdir"
+ [ -d $cpusetdir ] || mkdir $cpusetdir
+ mount -t cgroup -o cpuset none $cpusetdir || fail "Couldn't mount cpusets. Not in kernel or already in use?"
+fi
+
+lacpusetdir="$cpusetdir/$ladirname"
+lacpusetfile="$lacpusetdir/$cpusetprefix"
+sysfsdir="$debugdir/$ladirname"
+
+[ "$samplefreq" -ne 0 ] || fail "Invalid sample frequency"
+
+[ -d "$sysfsdir" ] || fail "Could not find logic analyzer root dir '$sysfsdir'. Module loaded?"
+[ -x "$sysfsdir" ] || fail "Could not access logic analyzer root dir '$sysfsdir'. Need root?"
+
+[ $listinstances -gt 0 ] && find "$sysfsdir" -mindepth 1 -type d | sed 's|.*/||' && exit 0
+
+if [ -n "$lainstance" ]; then
+ lasysfsdir="$sysfsdir/$lainstance"
+else
+ lasysfsdir=$(find "$sysfsdir" -mindepth 1 -type d -print -quit)
+fi
+[ -d "$lasysfsdir" ] || fail "Logic analyzer directory '$lasysfsdir' not found!"
+[ -d "$outputdir" ] || fail "Output directory '$outputdir' not found!"
+
+[ -n "$initcpu" ] && init_cpu "$initcpu"
+[ -d "$lacpusetdir" ] || { echo "Auto-Isolating CPU1"; init_cpu 1; }
+
+ndelay=$((1000000000 / samplefreq))
+echo "$ndelay" > "$lasysfsdir"/delay_ns
+
+[ -n "$duration" ] && numsamples=$((samplefreq * duration / 1000000))
+echo $numsamples > "$lasysfsdir"/buf_size
+
+if [ -n "$triggerdat" ]; then
+ parse_triggerdat "$triggerdat"
+ printf "$trigger_bindat" > "$lasysfsdir"/trigger 2>/dev/null || fail "Trigger data '$triggerdat' rejected"
+fi
+
+workcpu=$(cat "${lacpusetfile}effective_cpus")
+[ -n "$workcpu" ] || fail "No isolated CPU found"
+cpumask=$(printf '%x' $((1 << workcpu)))
+instance=${lasysfsdir##*/}
+echo "Setting up '$instance': $numsamples samples at ${samplefreq}Hz with ${triggerdat:-no} trigger using CPU$workcpu"
+do_capture "$cpumask" &
diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h
index 8a63a9913495..6f7f22ac9da5 100644
--- a/tools/include/linux/compiler.h
+++ b/tools/include/linux/compiler.h
@@ -62,6 +62,10 @@
#define __nocf_check __attribute__((nocf_check))
#endif
+#ifndef __naked
+#define __naked __attribute__((__naked__))
+#endif
+
/* Are two types/vars the same type (ignoring qualifiers)? */
#ifndef __same_type
# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
diff --git a/tools/include/linux/mm.h b/tools/include/linux/mm.h
index dc0fc7125bc3..cad4f2927983 100644
--- a/tools/include/linux/mm.h
+++ b/tools/include/linux/mm.h
@@ -12,6 +12,7 @@
#define PHYS_ADDR_MAX (~(phys_addr_t)0)
#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
+#define PAGE_ALIGN_DOWN(addr) ALIGN_DOWN(addr, PAGE_SIZE)
#define __va(x) ((void *)((unsigned long)(x)))
#define __pa(x) ((unsigned long)(x))
diff --git a/tools/include/linux/numa.h b/tools/include/linux/numa.h
index 110b0e5d0fb0..c8b9369335e0 100644
--- a/tools/include/linux/numa.h
+++ b/tools/include/linux/numa.h
@@ -13,4 +13,9 @@
#define NUMA_NO_NODE (-1)
+static inline bool numa_valid_node(int nid)
+{
+ return nid >= 0 && nid < MAX_NUMNODES;
+}
+
#endif /* _LINUX_NUMA_H */
diff --git a/tools/include/linux/poison.h b/tools/include/linux/poison.h
index 2e6338ac5eed..e530e54046c9 100644
--- a/tools/include/linux/poison.h
+++ b/tools/include/linux/poison.h
@@ -47,11 +47,8 @@
* Magic nums for obj red zoning.
* Placed in the first word before and the first word after an obj.
*/
-#define RED_INACTIVE 0x09F911029D74E35BULL /* when obj is inactive */
-#define RED_ACTIVE 0xD84156C5635688C0ULL /* when obj is active */
-
-#define SLUB_RED_INACTIVE 0xbb
-#define SLUB_RED_ACTIVE 0xcc
+#define SLUB_RED_INACTIVE 0xbb /* when obj is inactive */
+#define SLUB_RED_ACTIVE 0xcc /* when obj is active */
/* ...and for poisoning */
#define POISON_INUSE 0x5a /* for use-uninitialised poisoning */
diff --git a/tools/include/nolibc/stdint.h b/tools/include/nolibc/stdint.h
index 6665e272e213..cd79ddd6170e 100644
--- a/tools/include/nolibc/stdint.h
+++ b/tools/include/nolibc/stdint.h
@@ -96,6 +96,10 @@ typedef uint64_t uintmax_t;
#define UINT_FAST32_MAX SIZE_MAX
#define UINT_FAST64_MAX UINT64_MAX
+#define INTMAX_MIN INT64_MIN
+#define INTMAX_MAX INT64_MAX
+#define UINTMAX_MAX UINT64_MAX
+
#ifndef INT_MIN
#define INT_MIN (-__INT_MAX__ - 1)
#endif
@@ -110,4 +114,19 @@ typedef uint64_t uintmax_t;
#define LONG_MAX __LONG_MAX__
#endif
+#ifndef ULONG_MAX
+#define ULONG_MAX ((unsigned long)(__LONG_MAX__) * 2 + 1)
+#endif
+
+#ifndef LLONG_MIN
+#define LLONG_MIN (-__LONG_LONG_MAX__ - 1)
+#endif
+#ifndef LLONG_MAX
+#define LLONG_MAX __LONG_LONG_MAX__
+#endif
+
+#ifndef ULLONG_MAX
+#define ULLONG_MAX ((unsigned long long)(__LONG_LONG_MAX__) * 2 + 1)
+#endif
+
#endif /* _NOLIBC_STDINT_H */
diff --git a/tools/include/nolibc/stdio.h b/tools/include/nolibc/stdio.h
index 16cd4d807251..c968dbbc4ef8 100644
--- a/tools/include/nolibc/stdio.h
+++ b/tools/include/nolibc/stdio.h
@@ -376,6 +376,16 @@ int setvbuf(FILE *stream __attribute__((unused)),
return 0;
}
+static __attribute__((unused))
+const char *strerror(int errno)
+{
+ static char buf[18] = "errno=";
+
+ i64toa_r(errno, &buf[6]);
+
+ return buf;
+}
+
/* make sure to include all global symbols */
#include "nolibc.h"
diff --git a/tools/include/nolibc/stdlib.h b/tools/include/nolibc/stdlib.h
index 5be9d3c7435a..75aa273c23a6 100644
--- a/tools/include/nolibc/stdlib.h
+++ b/tools/include/nolibc/stdlib.h
@@ -438,6 +438,115 @@ char *u64toa(uint64_t in)
return itoa_buffer;
}
+static __attribute__((unused))
+uintmax_t __strtox(const char *nptr, char **endptr, int base, intmax_t lower_limit, uintmax_t upper_limit)
+{
+ const char signed_ = lower_limit != 0;
+ unsigned char neg = 0, overflow = 0;
+ uintmax_t val = 0, limit, old_val;
+ char c;
+
+ if (base < 0 || base > 36) {
+ SET_ERRNO(EINVAL);
+ goto out;
+ }
+
+ while (isspace(*nptr))
+ nptr++;
+
+ if (*nptr == '+') {
+ nptr++;
+ } else if (*nptr == '-') {
+ neg = 1;
+ nptr++;
+ }
+
+ if (signed_ && neg)
+ limit = -(uintmax_t)lower_limit;
+ else
+ limit = upper_limit;
+
+ if ((base == 0 || base == 16) &&
+ (strncmp(nptr, "0x", 2) == 0 || strncmp(nptr, "0X", 2) == 0)) {
+ base = 16;
+ nptr += 2;
+ } else if (base == 0 && strncmp(nptr, "0", 1) == 0) {
+ base = 8;
+ nptr += 1;
+ } else if (base == 0) {
+ base = 10;
+ }
+
+ while (*nptr) {
+ c = *nptr;
+
+ if (c >= '0' && c <= '9')
+ c -= '0';
+ else if (c >= 'a' && c <= 'z')
+ c = c - 'a' + 10;
+ else if (c >= 'A' && c <= 'Z')
+ c = c - 'A' + 10;
+ else
+ goto out;
+
+ if (c >= base)
+ goto out;
+
+ nptr++;
+ old_val = val;
+ val *= base;
+ val += c;
+
+ if (val > limit || val < old_val)
+ overflow = 1;
+ }
+
+out:
+ if (overflow) {
+ SET_ERRNO(ERANGE);
+ val = limit;
+ }
+ if (endptr)
+ *endptr = (char *)nptr;
+ return neg ? -val : val;
+}
+
+static __attribute__((unused))
+long strtol(const char *nptr, char **endptr, int base)
+{
+ return __strtox(nptr, endptr, base, LONG_MIN, LONG_MAX);
+}
+
+static __attribute__((unused))
+unsigned long strtoul(const char *nptr, char **endptr, int base)
+{
+ return __strtox(nptr, endptr, base, 0, ULONG_MAX);
+}
+
+static __attribute__((unused))
+long long strtoll(const char *nptr, char **endptr, int base)
+{
+ return __strtox(nptr, endptr, base, LLONG_MIN, LLONG_MAX);
+}
+
+static __attribute__((unused))
+unsigned long long strtoull(const char *nptr, char **endptr, int base)
+{
+ return __strtox(nptr, endptr, base, 0, ULLONG_MAX);
+}
+
+static __attribute__((unused))
+intmax_t strtoimax(const char *nptr, char **endptr, int base)
+{
+ return __strtox(nptr, endptr, base, INTMAX_MIN, INTMAX_MAX);
+}
+
+static __attribute__((unused))
+uintmax_t strtoumax(const char *nptr, char **endptr, int base)
+{
+ return __strtox(nptr, endptr, base, 0, UINTMAX_MAX);
+}
+
/* make sure to include all global symbols */
#include "nolibc.h"
diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
index d983c48a3b6a..a00d53d02723 100644
--- a/tools/include/uapi/asm-generic/unistd.h
+++ b/tools/include/uapi/asm-generic/unistd.h
@@ -776,12 +776,8 @@ __SYSCALL(__NR_fsmount, sys_fsmount)
__SYSCALL(__NR_fspick, sys_fspick)
#define __NR_pidfd_open 434
__SYSCALL(__NR_pidfd_open, sys_pidfd_open)
-
-#ifdef __ARCH_WANT_SYS_CLONE3
#define __NR_clone3 435
__SYSCALL(__NR_clone3, sys_clone3)
-#endif
-
#define __NR_close_range 436
__SYSCALL(__NR_close_range, sys_close_range)
#define __NR_openat2 437
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 90706a47f6ff..35bcf52dbc65 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -1425,6 +1425,8 @@ enum {
#define BPF_F_TEST_RUN_ON_CPU (1U << 0)
/* If set, XDP frames will be transmitted after processing */
#define BPF_F_TEST_XDP_LIVE_FRAMES (1U << 1)
+/* If set, apply CHECKSUM_COMPLETE to skb and validate the checksum */
+#define BPF_F_TEST_SKB_CHECKSUM_COMPLETE (1U << 2)
/* type for BPF_ENABLE_STATS */
enum bpf_stats_type {
@@ -6207,12 +6209,17 @@ union { \
__u64 :64; \
} __attribute__((aligned(8)))
+/* The enum used in skb->tstamp_type. It specifies the clock type
+ * of the time stored in the skb->tstamp.
+ */
enum {
- BPF_SKB_TSTAMP_UNSPEC,
- BPF_SKB_TSTAMP_DELIVERY_MONO, /* tstamp has mono delivery time */
- /* For any BPF_SKB_TSTAMP_* that the bpf prog cannot handle,
- * the bpf prog should handle it like BPF_SKB_TSTAMP_UNSPEC
- * and try to deduce it by ingress, egress or skb->sk->sk_clockid.
+ BPF_SKB_TSTAMP_UNSPEC = 0, /* DEPRECATED */
+ BPF_SKB_TSTAMP_DELIVERY_MONO = 1, /* DEPRECATED */
+ BPF_SKB_CLOCK_REALTIME = 0,
+ BPF_SKB_CLOCK_MONOTONIC = 1,
+ BPF_SKB_CLOCK_TAI = 2,
+ /* For any future BPF_SKB_CLOCK_* that the bpf prog cannot handle,
+ * the bpf prog can try to deduce it by ingress/egress/skb->sk->sk_clockid.
*/
};
diff --git a/tools/include/uapi/linux/fs.h b/tools/include/uapi/linux/fs.h
new file mode 100644
index 000000000000..8a27bc5c7a7f
--- /dev/null
+++ b/tools/include/uapi/linux/fs.h
@@ -0,0 +1,552 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_FS_H
+#define _UAPI_LINUX_FS_H
+
+/*
+ * This file has definitions for some important file table structures
+ * and constants and structures used by various generic file system
+ * ioctl's. Please do not make any changes in this file before
+ * sending patches for review to linux-fsdevel@vger.kernel.org and
+ * linux-api@vger.kernel.org.
+ */
+
+#include <linux/limits.h>
+#include <linux/ioctl.h>
+#include <linux/types.h>
+#ifndef __KERNEL__
+#include <linux/fscrypt.h>
+#endif
+
+/* Use of MS_* flags within the kernel is restricted to core mount(2) code. */
+#if !defined(__KERNEL__)
+#include <linux/mount.h>
+#endif
+
+/*
+ * It's silly to have NR_OPEN bigger than NR_FILE, but you can change
+ * the file limit at runtime and only root can increase the per-process
+ * nr_file rlimit, so it's safe to set up a ridiculously high absolute
+ * upper limit on files-per-process.
+ *
+ * Some programs (notably those using select()) may have to be
+ * recompiled to take full advantage of the new limits..
+ */
+
+/* Fixed constants first: */
+#undef NR_OPEN
+#define INR_OPEN_CUR 1024 /* Initial setting for nfile rlimits */
+#define INR_OPEN_MAX 4096 /* Hard limit for nfile rlimits */
+
+#define BLOCK_SIZE_BITS 10
+#define BLOCK_SIZE (1<<BLOCK_SIZE_BITS)
+
+#define SEEK_SET 0 /* seek relative to beginning of file */
+#define SEEK_CUR 1 /* seek relative to current file position */
+#define SEEK_END 2 /* seek relative to end of file */
+#define SEEK_DATA 3 /* seek to the next data */
+#define SEEK_HOLE 4 /* seek to the next hole */
+#define SEEK_MAX SEEK_HOLE
+
+#define RENAME_NOREPLACE (1 << 0) /* Don't overwrite target */
+#define RENAME_EXCHANGE (1 << 1) /* Exchange source and dest */
+#define RENAME_WHITEOUT (1 << 2) /* Whiteout source */
+
+struct file_clone_range {
+ __s64 src_fd;
+ __u64 src_offset;
+ __u64 src_length;
+ __u64 dest_offset;
+};
+
+struct fstrim_range {
+ __u64 start;
+ __u64 len;
+ __u64 minlen;
+};
+
+/*
+ * We include a length field because some filesystems (vfat) have an identifier
+ * that we do want to expose as a UUID, but doesn't have the standard length.
+ *
+ * We use a fixed size buffer beacuse this interface will, by fiat, never
+ * support "UUIDs" longer than 16 bytes; we don't want to force all downstream
+ * users to have to deal with that.
+ */
+struct fsuuid2 {
+ __u8 len;
+ __u8 uuid[16];
+};
+
+struct fs_sysfs_path {
+ __u8 len;
+ __u8 name[128];
+};
+
+/* extent-same (dedupe) ioctls; these MUST match the btrfs ioctl definitions */
+#define FILE_DEDUPE_RANGE_SAME 0
+#define FILE_DEDUPE_RANGE_DIFFERS 1
+
+/* from struct btrfs_ioctl_file_extent_same_info */
+struct file_dedupe_range_info {
+ __s64 dest_fd; /* in - destination file */
+ __u64 dest_offset; /* in - start of extent in destination */
+ __u64 bytes_deduped; /* out - total # of bytes we were able
+ * to dedupe from this file. */
+ /* status of this dedupe operation:
+ * < 0 for error
+ * == FILE_DEDUPE_RANGE_SAME if dedupe succeeds
+ * == FILE_DEDUPE_RANGE_DIFFERS if data differs
+ */
+ __s32 status; /* out - see above description */
+ __u32 reserved; /* must be zero */
+};
+
+/* from struct btrfs_ioctl_file_extent_same_args */
+struct file_dedupe_range {
+ __u64 src_offset; /* in - start of extent in source */
+ __u64 src_length; /* in - length of extent */
+ __u16 dest_count; /* in - total elements in info array */
+ __u16 reserved1; /* must be zero */
+ __u32 reserved2; /* must be zero */
+ struct file_dedupe_range_info info[];
+};
+
+/* And dynamically-tunable limits and defaults: */
+struct files_stat_struct {
+ unsigned long nr_files; /* read only */
+ unsigned long nr_free_files; /* read only */
+ unsigned long max_files; /* tunable */
+};
+
+struct inodes_stat_t {
+ long nr_inodes;
+ long nr_unused;
+ long dummy[5]; /* padding for sysctl ABI compatibility */
+};
+
+
+#define NR_FILE 8192 /* this can well be larger on a larger system */
+
+/*
+ * Structure for FS_IOC_FSGETXATTR[A] and FS_IOC_FSSETXATTR.
+ */
+struct fsxattr {
+ __u32 fsx_xflags; /* xflags field value (get/set) */
+ __u32 fsx_extsize; /* extsize field value (get/set)*/
+ __u32 fsx_nextents; /* nextents field value (get) */
+ __u32 fsx_projid; /* project identifier (get/set) */
+ __u32 fsx_cowextsize; /* CoW extsize field value (get/set)*/
+ unsigned char fsx_pad[8];
+};
+
+/*
+ * Flags for the fsx_xflags field
+ */
+#define FS_XFLAG_REALTIME 0x00000001 /* data in realtime volume */
+#define FS_XFLAG_PREALLOC 0x00000002 /* preallocated file extents */
+#define FS_XFLAG_IMMUTABLE 0x00000008 /* file cannot be modified */
+#define FS_XFLAG_APPEND 0x00000010 /* all writes append */
+#define FS_XFLAG_SYNC 0x00000020 /* all writes synchronous */
+#define FS_XFLAG_NOATIME 0x00000040 /* do not update access time */
+#define FS_XFLAG_NODUMP 0x00000080 /* do not include in backups */
+#define FS_XFLAG_RTINHERIT 0x00000100 /* create with rt bit set */
+#define FS_XFLAG_PROJINHERIT 0x00000200 /* create with parents projid */
+#define FS_XFLAG_NOSYMLINKS 0x00000400 /* disallow symlink creation */
+#define FS_XFLAG_EXTSIZE 0x00000800 /* extent size allocator hint */
+#define FS_XFLAG_EXTSZINHERIT 0x00001000 /* inherit inode extent size */
+#define FS_XFLAG_NODEFRAG 0x00002000 /* do not defragment */
+#define FS_XFLAG_FILESTREAM 0x00004000 /* use filestream allocator */
+#define FS_XFLAG_DAX 0x00008000 /* use DAX for IO */
+#define FS_XFLAG_COWEXTSIZE 0x00010000 /* CoW extent size allocator hint */
+#define FS_XFLAG_HASATTR 0x80000000 /* no DIFLAG for this */
+
+/* the read-only stuff doesn't really belong here, but any other place is
+ probably as bad and I don't want to create yet another include file. */
+
+#define BLKROSET _IO(0x12,93) /* set device read-only (0 = read-write) */
+#define BLKROGET _IO(0x12,94) /* get read-only status (0 = read_write) */
+#define BLKRRPART _IO(0x12,95) /* re-read partition table */
+#define BLKGETSIZE _IO(0x12,96) /* return device size /512 (long *arg) */
+#define BLKFLSBUF _IO(0x12,97) /* flush buffer cache */
+#define BLKRASET _IO(0x12,98) /* set read ahead for block device */
+#define BLKRAGET _IO(0x12,99) /* get current read ahead setting */
+#define BLKFRASET _IO(0x12,100)/* set filesystem (mm/filemap.c) read-ahead */
+#define BLKFRAGET _IO(0x12,101)/* get filesystem (mm/filemap.c) read-ahead */
+#define BLKSECTSET _IO(0x12,102)/* set max sectors per request (ll_rw_blk.c) */
+#define BLKSECTGET _IO(0x12,103)/* get max sectors per request (ll_rw_blk.c) */
+#define BLKSSZGET _IO(0x12,104)/* get block device sector size */
+#if 0
+#define BLKPG _IO(0x12,105)/* See blkpg.h */
+
+/* Some people are morons. Do not use sizeof! */
+
+#define BLKELVGET _IOR(0x12,106,size_t)/* elevator get */
+#define BLKELVSET _IOW(0x12,107,size_t)/* elevator set */
+/* This was here just to show that the number is taken -
+ probably all these _IO(0x12,*) ioctls should be moved to blkpg.h. */
+#endif
+/* A jump here: 108-111 have been used for various private purposes. */
+#define BLKBSZGET _IOR(0x12,112,size_t)
+#define BLKBSZSET _IOW(0x12,113,size_t)
+#define BLKGETSIZE64 _IOR(0x12,114,size_t) /* return device size in bytes (u64 *arg) */
+#define BLKTRACESETUP _IOWR(0x12,115,struct blk_user_trace_setup)
+#define BLKTRACESTART _IO(0x12,116)
+#define BLKTRACESTOP _IO(0x12,117)
+#define BLKTRACETEARDOWN _IO(0x12,118)
+#define BLKDISCARD _IO(0x12,119)
+#define BLKIOMIN _IO(0x12,120)
+#define BLKIOOPT _IO(0x12,121)
+#define BLKALIGNOFF _IO(0x12,122)
+#define BLKPBSZGET _IO(0x12,123)
+#define BLKDISCARDZEROES _IO(0x12,124)
+#define BLKSECDISCARD _IO(0x12,125)
+#define BLKROTATIONAL _IO(0x12,126)
+#define BLKZEROOUT _IO(0x12,127)
+#define BLKGETDISKSEQ _IOR(0x12,128,__u64)
+/*
+ * A jump here: 130-136 are reserved for zoned block devices
+ * (see uapi/linux/blkzoned.h)
+ */
+
+#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
+#define FIBMAP _IO(0x00,1) /* bmap access */
+#define FIGETBSZ _IO(0x00,2) /* get the block size used for bmap */
+#define FIFREEZE _IOWR('X', 119, int) /* Freeze */
+#define FITHAW _IOWR('X', 120, int) /* Thaw */
+#define FITRIM _IOWR('X', 121, struct fstrim_range) /* Trim */
+#define FICLONE _IOW(0x94, 9, int)
+#define FICLONERANGE _IOW(0x94, 13, struct file_clone_range)
+#define FIDEDUPERANGE _IOWR(0x94, 54, struct file_dedupe_range)
+
+#define FSLABEL_MAX 256 /* Max chars for the interface; each fs may differ */
+
+#define FS_IOC_GETFLAGS _IOR('f', 1, long)
+#define FS_IOC_SETFLAGS _IOW('f', 2, long)
+#define FS_IOC_GETVERSION _IOR('v', 1, long)
+#define FS_IOC_SETVERSION _IOW('v', 2, long)
+#define FS_IOC_FIEMAP _IOWR('f', 11, struct fiemap)
+#define FS_IOC32_GETFLAGS _IOR('f', 1, int)
+#define FS_IOC32_SETFLAGS _IOW('f', 2, int)
+#define FS_IOC32_GETVERSION _IOR('v', 1, int)
+#define FS_IOC32_SETVERSION _IOW('v', 2, int)
+#define FS_IOC_FSGETXATTR _IOR('X', 31, struct fsxattr)
+#define FS_IOC_FSSETXATTR _IOW('X', 32, struct fsxattr)
+#define FS_IOC_GETFSLABEL _IOR(0x94, 49, char[FSLABEL_MAX])
+#define FS_IOC_SETFSLABEL _IOW(0x94, 50, char[FSLABEL_MAX])
+/* Returns the external filesystem UUID, the same one blkid returns */
+#define FS_IOC_GETFSUUID _IOR(0x15, 0, struct fsuuid2)
+/*
+ * Returns the path component under /sys/fs/ that refers to this filesystem;
+ * also /sys/kernel/debug/ for filesystems with debugfs exports
+ */
+#define FS_IOC_GETFSSYSFSPATH _IOR(0x15, 1, struct fs_sysfs_path)
+
+/*
+ * Inode flags (FS_IOC_GETFLAGS / FS_IOC_SETFLAGS)
+ *
+ * Note: for historical reasons, these flags were originally used and
+ * defined for use by ext2/ext3, and then other file systems started
+ * using these flags so they wouldn't need to write their own version
+ * of chattr/lsattr (which was shipped as part of e2fsprogs). You
+ * should think twice before trying to use these flags in new
+ * contexts, or trying to assign these flags, since they are used both
+ * as the UAPI and the on-disk encoding for ext2/3/4. Also, we are
+ * almost out of 32-bit flags. :-)
+ *
+ * We have recently hoisted FS_IOC_FSGETXATTR / FS_IOC_FSSETXATTR from
+ * XFS to the generic FS level interface. This uses a structure that
+ * has padding and hence has more room to grow, so it may be more
+ * appropriate for many new use cases.
+ *
+ * Please do not change these flags or interfaces before checking with
+ * linux-fsdevel@vger.kernel.org and linux-api@vger.kernel.org.
+ */
+#define FS_SECRM_FL 0x00000001 /* Secure deletion */
+#define FS_UNRM_FL 0x00000002 /* Undelete */
+#define FS_COMPR_FL 0x00000004 /* Compress file */
+#define FS_SYNC_FL 0x00000008 /* Synchronous updates */
+#define FS_IMMUTABLE_FL 0x00000010 /* Immutable file */
+#define FS_APPEND_FL 0x00000020 /* writes to file may only append */
+#define FS_NODUMP_FL 0x00000040 /* do not dump file */
+#define FS_NOATIME_FL 0x00000080 /* do not update atime */
+/* Reserved for compression usage... */
+#define FS_DIRTY_FL 0x00000100
+#define FS_COMPRBLK_FL 0x00000200 /* One or more compressed clusters */
+#define FS_NOCOMP_FL 0x00000400 /* Don't compress */
+/* End compression flags --- maybe not all used */
+#define FS_ENCRYPT_FL 0x00000800 /* Encrypted file */
+#define FS_BTREE_FL 0x00001000 /* btree format dir */
+#define FS_INDEX_FL 0x00001000 /* hash-indexed directory */
+#define FS_IMAGIC_FL 0x00002000 /* AFS directory */
+#define FS_JOURNAL_DATA_FL 0x00004000 /* Reserved for ext3 */
+#define FS_NOTAIL_FL 0x00008000 /* file tail should not be merged */
+#define FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */
+#define FS_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/
+#define FS_HUGE_FILE_FL 0x00040000 /* Reserved for ext4 */
+#define FS_EXTENT_FL 0x00080000 /* Extents */
+#define FS_VERITY_FL 0x00100000 /* Verity protected inode */
+#define FS_EA_INODE_FL 0x00200000 /* Inode used for large EA */
+#define FS_EOFBLOCKS_FL 0x00400000 /* Reserved for ext4 */
+#define FS_NOCOW_FL 0x00800000 /* Do not cow file */
+#define FS_DAX_FL 0x02000000 /* Inode is DAX */
+#define FS_INLINE_DATA_FL 0x10000000 /* Reserved for ext4 */
+#define FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */
+#define FS_CASEFOLD_FL 0x40000000 /* Folder is case insensitive */
+#define FS_RESERVED_FL 0x80000000 /* reserved for ext2 lib */
+
+#define FS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */
+#define FS_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */
+
+
+#define SYNC_FILE_RANGE_WAIT_BEFORE 1
+#define SYNC_FILE_RANGE_WRITE 2
+#define SYNC_FILE_RANGE_WAIT_AFTER 4
+#define SYNC_FILE_RANGE_WRITE_AND_WAIT (SYNC_FILE_RANGE_WRITE | \
+ SYNC_FILE_RANGE_WAIT_BEFORE | \
+ SYNC_FILE_RANGE_WAIT_AFTER)
+
+/*
+ * Flags for preadv2/pwritev2:
+ */
+
+typedef int __bitwise __kernel_rwf_t;
+
+/* high priority request, poll if possible */
+#define RWF_HIPRI ((__force __kernel_rwf_t)0x00000001)
+
+/* per-IO O_DSYNC */
+#define RWF_DSYNC ((__force __kernel_rwf_t)0x00000002)
+
+/* per-IO O_SYNC */
+#define RWF_SYNC ((__force __kernel_rwf_t)0x00000004)
+
+/* per-IO, return -EAGAIN if operation would block */
+#define RWF_NOWAIT ((__force __kernel_rwf_t)0x00000008)
+
+/* per-IO O_APPEND */
+#define RWF_APPEND ((__force __kernel_rwf_t)0x00000010)
+
+/* per-IO negation of O_APPEND */
+#define RWF_NOAPPEND ((__force __kernel_rwf_t)0x00000020)
+
+/* mask of flags supported by the kernel */
+#define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT |\
+ RWF_APPEND | RWF_NOAPPEND)
+
+#define PROCFS_IOCTL_MAGIC 'f'
+
+/* Pagemap ioctl */
+#define PAGEMAP_SCAN _IOWR(PROCFS_IOCTL_MAGIC, 16, struct pm_scan_arg)
+
+/* Bitmasks provided in pm_scan_args masks and reported in page_region.categories. */
+#define PAGE_IS_WPALLOWED (1 << 0)
+#define PAGE_IS_WRITTEN (1 << 1)
+#define PAGE_IS_FILE (1 << 2)
+#define PAGE_IS_PRESENT (1 << 3)
+#define PAGE_IS_SWAPPED (1 << 4)
+#define PAGE_IS_PFNZERO (1 << 5)
+#define PAGE_IS_HUGE (1 << 6)
+#define PAGE_IS_SOFT_DIRTY (1 << 7)
+
+/*
+ * struct page_region - Page region with flags
+ * @start: Start of the region
+ * @end: End of the region (exclusive)
+ * @categories: PAGE_IS_* category bitmask for the region
+ */
+struct page_region {
+ __u64 start;
+ __u64 end;
+ __u64 categories;
+};
+
+/* Flags for PAGEMAP_SCAN ioctl */
+#define PM_SCAN_WP_MATCHING (1 << 0) /* Write protect the pages matched. */
+#define PM_SCAN_CHECK_WPASYNC (1 << 1) /* Abort the scan when a non-WP-enabled page is found. */
+
+/*
+ * struct pm_scan_arg - Pagemap ioctl argument
+ * @size: Size of the structure
+ * @flags: Flags for the IOCTL
+ * @start: Starting address of the region
+ * @end: Ending address of the region
+ * @walk_end Address where the scan stopped (written by kernel).
+ * walk_end == end (address tags cleared) informs that the scan completed on entire range.
+ * @vec: Address of page_region struct array for output
+ * @vec_len: Length of the page_region struct array
+ * @max_pages: Optional limit for number of returned pages (0 = disabled)
+ * @category_inverted: PAGE_IS_* categories which values match if 0 instead of 1
+ * @category_mask: Skip pages for which any category doesn't match
+ * @category_anyof_mask: Skip pages for which no category matches
+ * @return_mask: PAGE_IS_* categories that are to be reported in `page_region`s returned
+ */
+struct pm_scan_arg {
+ __u64 size;
+ __u64 flags;
+ __u64 start;
+ __u64 end;
+ __u64 walk_end;
+ __u64 vec;
+ __u64 vec_len;
+ __u64 max_pages;
+ __u64 category_inverted;
+ __u64 category_mask;
+ __u64 category_anyof_mask;
+ __u64 return_mask;
+};
+
+/* /proc/<pid>/maps ioctl */
+#define PROCMAP_QUERY _IOWR(PROCFS_IOCTL_MAGIC, 17, struct procmap_query)
+
+enum procmap_query_flags {
+ /*
+ * VMA permission flags.
+ *
+ * Can be used as part of procmap_query.query_flags field to look up
+ * only VMAs satisfying specified subset of permissions. E.g., specifying
+ * PROCMAP_QUERY_VMA_READABLE only will return both readable and read/write VMAs,
+ * while having PROCMAP_QUERY_VMA_READABLE | PROCMAP_QUERY_VMA_WRITABLE will only
+ * return read/write VMAs, though both executable/non-executable and
+ * private/shared will be ignored.
+ *
+ * PROCMAP_QUERY_VMA_* flags are also returned in procmap_query.vma_flags
+ * field to specify actual VMA permissions.
+ */
+ PROCMAP_QUERY_VMA_READABLE = 0x01,
+ PROCMAP_QUERY_VMA_WRITABLE = 0x02,
+ PROCMAP_QUERY_VMA_EXECUTABLE = 0x04,
+ PROCMAP_QUERY_VMA_SHARED = 0x08,
+ /*
+ * Query modifier flags.
+ *
+ * By default VMA that covers provided address is returned, or -ENOENT
+ * is returned. With PROCMAP_QUERY_COVERING_OR_NEXT_VMA flag set, closest
+ * VMA with vma_start > addr will be returned if no covering VMA is
+ * found.
+ *
+ * PROCMAP_QUERY_FILE_BACKED_VMA instructs query to consider only VMAs that
+ * have file backing. Can be combined with PROCMAP_QUERY_COVERING_OR_NEXT_VMA
+ * to iterate all VMAs with file backing.
+ */
+ PROCMAP_QUERY_COVERING_OR_NEXT_VMA = 0x10,
+ PROCMAP_QUERY_FILE_BACKED_VMA = 0x20,
+};
+
+/*
+ * Input/output argument structured passed into ioctl() call. It can be used
+ * to query a set of VMAs (Virtual Memory Areas) of a process.
+ *
+ * Each field can be one of three kinds, marked in a short comment to the
+ * right of the field:
+ * - "in", input argument, user has to provide this value, kernel doesn't modify it;
+ * - "out", output argument, kernel sets this field with VMA data;
+ * - "in/out", input and output argument; user provides initial value (used
+ * to specify maximum allowable buffer size), and kernel sets it to actual
+ * amount of data written (or zero, if there is no data).
+ *
+ * If matching VMA is found (according to criterias specified by
+ * query_addr/query_flags, all the out fields are filled out, and ioctl()
+ * returns 0. If there is no matching VMA, -ENOENT will be returned.
+ * In case of any other error, negative error code other than -ENOENT is
+ * returned.
+ *
+ * Most of the data is similar to the one returned as text in /proc/<pid>/maps
+ * file, but procmap_query provides more querying flexibility. There are no
+ * consistency guarantees between subsequent ioctl() calls, but data returned
+ * for matched VMA is self-consistent.
+ */
+struct procmap_query {
+ /* Query struct size, for backwards/forward compatibility */
+ __u64 size;
+ /*
+ * Query flags, a combination of enum procmap_query_flags values.
+ * Defines query filtering and behavior, see enum procmap_query_flags.
+ *
+ * Input argument, provided by user. Kernel doesn't modify it.
+ */
+ __u64 query_flags; /* in */
+ /*
+ * Query address. By default, VMA that covers this address will
+ * be looked up. PROCMAP_QUERY_* flags above modify this default
+ * behavior further.
+ *
+ * Input argument, provided by user. Kernel doesn't modify it.
+ */
+ __u64 query_addr; /* in */
+ /* VMA starting (inclusive) and ending (exclusive) address, if VMA is found. */
+ __u64 vma_start; /* out */
+ __u64 vma_end; /* out */
+ /* VMA permissions flags. A combination of PROCMAP_QUERY_VMA_* flags. */
+ __u64 vma_flags; /* out */
+ /* VMA backing page size granularity. */
+ __u64 vma_page_size; /* out */
+ /*
+ * VMA file offset. If VMA has file backing, this specifies offset
+ * within the file that VMA's start address corresponds to.
+ * Is set to zero if VMA has no backing file.
+ */
+ __u64 vma_offset; /* out */
+ /* Backing file's inode number, or zero, if VMA has no backing file. */
+ __u64 inode; /* out */
+ /* Backing file's device major/minor number, or zero, if VMA has no backing file. */
+ __u32 dev_major; /* out */
+ __u32 dev_minor; /* out */
+ /*
+ * If set to non-zero value, signals the request to return VMA name
+ * (i.e., VMA's backing file's absolute path, with " (deleted)" suffix
+ * appended, if file was unlinked from FS) for matched VMA. VMA name
+ * can also be some special name (e.g., "[heap]", "[stack]") or could
+ * be even user-supplied with prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME).
+ *
+ * Kernel will set this field to zero, if VMA has no associated name.
+ * Otherwise kernel will return actual amount of bytes filled in
+ * user-supplied buffer (see vma_name_addr field below), including the
+ * terminating zero.
+ *
+ * If VMA name is longer that user-supplied maximum buffer size,
+ * -E2BIG error is returned.
+ *
+ * If this field is set to non-zero value, vma_name_addr should point
+ * to valid user space memory buffer of at least vma_name_size bytes.
+ * If set to zero, vma_name_addr should be set to zero as well
+ */
+ __u32 vma_name_size; /* in/out */
+ /*
+ * If set to non-zero value, signals the request to extract and return
+ * VMA's backing file's build ID, if the backing file is an ELF file
+ * and it contains embedded build ID.
+ *
+ * Kernel will set this field to zero, if VMA has no backing file,
+ * backing file is not an ELF file, or ELF file has no build ID
+ * embedded.
+ *
+ * Build ID is a binary value (not a string). Kernel will set
+ * build_id_size field to exact number of bytes used for build ID.
+ * If build ID is requested and present, but needs more bytes than
+ * user-supplied maximum buffer size (see build_id_addr field below),
+ * -E2BIG error will be returned.
+ *
+ * If this field is set to non-zero value, build_id_addr should point
+ * to valid user space memory buffer of at least build_id_size bytes.
+ * If set to zero, build_id_addr should be set to zero as well
+ */
+ __u32 build_id_size; /* in/out */
+ /*
+ * User-supplied address of a buffer of at least vma_name_size bytes
+ * for kernel to fill with matched VMA's name (see vma_name_size field
+ * description above for details).
+ *
+ * Should be set to zero if VMA name should not be returned.
+ */
+ __u64 vma_name_addr; /* in */
+ /*
+ * User-supplied address of a buffer of at least build_id_size bytes
+ * for kernel to fill with matched VMA's ELF build ID, if available
+ * (see build_id_size field description above for details).
+ *
+ * Should be set to zero if build ID should not be returned.
+ */
+ __u64 build_id_addr; /* in */
+};
+
+#endif /* _UAPI_LINUX_FS_H */
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index d03842abae57..e5af8c692dc0 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -917,6 +917,7 @@ struct kvm_enable_cap {
#define KVM_CAP_MEMORY_ATTRIBUTES 233
#define KVM_CAP_GUEST_MEMFD 234
#define KVM_CAP_VM_TYPES 235
+#define KVM_CAP_PRE_FAULT_MEMORY 236
struct kvm_irq_routing_irqchip {
__u32 irqchip;
@@ -1548,4 +1549,13 @@ struct kvm_create_guest_memfd {
__u64 reserved[6];
};
+#define KVM_PRE_FAULT_MEMORY _IOWR(KVMIO, 0xd5, struct kvm_pre_fault_memory)
+
+struct kvm_pre_fault_memory {
+ __u64 gpa;
+ __u64 size;
+ __u64 flags;
+ __u64 padding[5];
+};
+
#endif /* __LINUX_KVM_H */
diff --git a/tools/include/uapi/linux/prctl.h b/tools/include/uapi/linux/prctl.h
new file mode 100644
index 000000000000..35791791a879
--- /dev/null
+++ b/tools/include/uapi/linux/prctl.h
@@ -0,0 +1,331 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _LINUX_PRCTL_H
+#define _LINUX_PRCTL_H
+
+#include <linux/types.h>
+
+/* Values to pass as first argument to prctl() */
+
+#define PR_SET_PDEATHSIG 1 /* Second arg is a signal */
+#define PR_GET_PDEATHSIG 2 /* Second arg is a ptr to return the signal */
+
+/* Get/set current->mm->dumpable */
+#define PR_GET_DUMPABLE 3
+#define PR_SET_DUMPABLE 4
+
+/* Get/set unaligned access control bits (if meaningful) */
+#define PR_GET_UNALIGN 5
+#define PR_SET_UNALIGN 6
+# define PR_UNALIGN_NOPRINT 1 /* silently fix up unaligned user accesses */
+# define PR_UNALIGN_SIGBUS 2 /* generate SIGBUS on unaligned user access */
+
+/* Get/set whether or not to drop capabilities on setuid() away from
+ * uid 0 (as per security/commoncap.c) */
+#define PR_GET_KEEPCAPS 7
+#define PR_SET_KEEPCAPS 8
+
+/* Get/set floating-point emulation control bits (if meaningful) */
+#define PR_GET_FPEMU 9
+#define PR_SET_FPEMU 10
+# define PR_FPEMU_NOPRINT 1 /* silently emulate fp operations accesses */
+# define PR_FPEMU_SIGFPE 2 /* don't emulate fp operations, send SIGFPE instead */
+
+/* Get/set floating-point exception mode (if meaningful) */
+#define PR_GET_FPEXC 11
+#define PR_SET_FPEXC 12
+# define PR_FP_EXC_SW_ENABLE 0x80 /* Use FPEXC for FP exception enables */
+# define PR_FP_EXC_DIV 0x010000 /* floating point divide by zero */
+# define PR_FP_EXC_OVF 0x020000 /* floating point overflow */
+# define PR_FP_EXC_UND 0x040000 /* floating point underflow */
+# define PR_FP_EXC_RES 0x080000 /* floating point inexact result */
+# define PR_FP_EXC_INV 0x100000 /* floating point invalid operation */
+# define PR_FP_EXC_DISABLED 0 /* FP exceptions disabled */
+# define PR_FP_EXC_NONRECOV 1 /* async non-recoverable exc. mode */
+# define PR_FP_EXC_ASYNC 2 /* async recoverable exception mode */
+# define PR_FP_EXC_PRECISE 3 /* precise exception mode */
+
+/* Get/set whether we use statistical process timing or accurate timestamp
+ * based process timing */
+#define PR_GET_TIMING 13
+#define PR_SET_TIMING 14
+# define PR_TIMING_STATISTICAL 0 /* Normal, traditional,
+ statistical process timing */
+# define PR_TIMING_TIMESTAMP 1 /* Accurate timestamp based
+ process timing */
+
+#define PR_SET_NAME 15 /* Set process name */
+#define PR_GET_NAME 16 /* Get process name */
+
+/* Get/set process endian */
+#define PR_GET_ENDIAN 19
+#define PR_SET_ENDIAN 20
+# define PR_ENDIAN_BIG 0
+# define PR_ENDIAN_LITTLE 1 /* True little endian mode */
+# define PR_ENDIAN_PPC_LITTLE 2 /* "PowerPC" pseudo little endian */
+
+/* Get/set process seccomp mode */
+#define PR_GET_SECCOMP 21
+#define PR_SET_SECCOMP 22
+
+/* Get/set the capability bounding set (as per security/commoncap.c) */
+#define PR_CAPBSET_READ 23
+#define PR_CAPBSET_DROP 24
+
+/* Get/set the process' ability to use the timestamp counter instruction */
+#define PR_GET_TSC 25
+#define PR_SET_TSC 26
+# define PR_TSC_ENABLE 1 /* allow the use of the timestamp counter */
+# define PR_TSC_SIGSEGV 2 /* throw a SIGSEGV instead of reading the TSC */
+
+/* Get/set securebits (as per security/commoncap.c) */
+#define PR_GET_SECUREBITS 27
+#define PR_SET_SECUREBITS 28
+
+/*
+ * Get/set the timerslack as used by poll/select/nanosleep
+ * A value of 0 means "use default"
+ */
+#define PR_SET_TIMERSLACK 29
+#define PR_GET_TIMERSLACK 30
+
+#define PR_TASK_PERF_EVENTS_DISABLE 31
+#define PR_TASK_PERF_EVENTS_ENABLE 32
+
+/*
+ * Set early/late kill mode for hwpoison memory corruption.
+ * This influences when the process gets killed on a memory corruption.
+ */
+#define PR_MCE_KILL 33
+# define PR_MCE_KILL_CLEAR 0
+# define PR_MCE_KILL_SET 1
+
+# define PR_MCE_KILL_LATE 0
+# define PR_MCE_KILL_EARLY 1
+# define PR_MCE_KILL_DEFAULT 2
+
+#define PR_MCE_KILL_GET 34
+
+/*
+ * Tune up process memory map specifics.
+ */
+#define PR_SET_MM 35
+# define PR_SET_MM_START_CODE 1
+# define PR_SET_MM_END_CODE 2
+# define PR_SET_MM_START_DATA 3
+# define PR_SET_MM_END_DATA 4
+# define PR_SET_MM_START_STACK 5
+# define PR_SET_MM_START_BRK 6
+# define PR_SET_MM_BRK 7
+# define PR_SET_MM_ARG_START 8
+# define PR_SET_MM_ARG_END 9
+# define PR_SET_MM_ENV_START 10
+# define PR_SET_MM_ENV_END 11
+# define PR_SET_MM_AUXV 12
+# define PR_SET_MM_EXE_FILE 13
+# define PR_SET_MM_MAP 14
+# define PR_SET_MM_MAP_SIZE 15
+
+/*
+ * This structure provides new memory descriptor
+ * map which mostly modifies /proc/pid/stat[m]
+ * output for a task. This mostly done in a
+ * sake of checkpoint/restore functionality.
+ */
+struct prctl_mm_map {
+ __u64 start_code; /* code section bounds */
+ __u64 end_code;
+ __u64 start_data; /* data section bounds */
+ __u64 end_data;
+ __u64 start_brk; /* heap for brk() syscall */
+ __u64 brk;
+ __u64 start_stack; /* stack starts at */
+ __u64 arg_start; /* command line arguments bounds */
+ __u64 arg_end;
+ __u64 env_start; /* environment variables bounds */
+ __u64 env_end;
+ __u64 *auxv; /* auxiliary vector */
+ __u32 auxv_size; /* vector size */
+ __u32 exe_fd; /* /proc/$pid/exe link file */
+};
+
+/*
+ * Set specific pid that is allowed to ptrace the current task.
+ * A value of 0 mean "no process".
+ */
+#define PR_SET_PTRACER 0x59616d61
+# define PR_SET_PTRACER_ANY ((unsigned long)-1)
+
+#define PR_SET_CHILD_SUBREAPER 36
+#define PR_GET_CHILD_SUBREAPER 37
+
+/*
+ * If no_new_privs is set, then operations that grant new privileges (i.e.
+ * execve) will either fail or not grant them. This affects suid/sgid,
+ * file capabilities, and LSMs.
+ *
+ * Operations that merely manipulate or drop existing privileges (setresuid,
+ * capset, etc.) will still work. Drop those privileges if you want them gone.
+ *
+ * Changing LSM security domain is considered a new privilege. So, for example,
+ * asking selinux for a specific new context (e.g. with runcon) will result
+ * in execve returning -EPERM.
+ *
+ * See Documentation/userspace-api/no_new_privs.rst for more details.
+ */
+#define PR_SET_NO_NEW_PRIVS 38
+#define PR_GET_NO_NEW_PRIVS 39
+
+#define PR_GET_TID_ADDRESS 40
+
+#define PR_SET_THP_DISABLE 41
+#define PR_GET_THP_DISABLE 42
+
+/*
+ * No longer implemented, but left here to ensure the numbers stay reserved:
+ */
+#define PR_MPX_ENABLE_MANAGEMENT 43
+#define PR_MPX_DISABLE_MANAGEMENT 44
+
+#define PR_SET_FP_MODE 45
+#define PR_GET_FP_MODE 46
+# define PR_FP_MODE_FR (1 << 0) /* 64b FP registers */
+# define PR_FP_MODE_FRE (1 << 1) /* 32b compatibility */
+
+/* Control the ambient capability set */
+#define PR_CAP_AMBIENT 47
+# define PR_CAP_AMBIENT_IS_SET 1
+# define PR_CAP_AMBIENT_RAISE 2
+# define PR_CAP_AMBIENT_LOWER 3
+# define PR_CAP_AMBIENT_CLEAR_ALL 4
+
+/* arm64 Scalable Vector Extension controls */
+/* Flag values must be kept in sync with ptrace NT_ARM_SVE interface */
+#define PR_SVE_SET_VL 50 /* set task vector length */
+# define PR_SVE_SET_VL_ONEXEC (1 << 18) /* defer effect until exec */
+#define PR_SVE_GET_VL 51 /* get task vector length */
+/* Bits common to PR_SVE_SET_VL and PR_SVE_GET_VL */
+# define PR_SVE_VL_LEN_MASK 0xffff
+# define PR_SVE_VL_INHERIT (1 << 17) /* inherit across exec */
+
+/* Per task speculation control */
+#define PR_GET_SPECULATION_CTRL 52
+#define PR_SET_SPECULATION_CTRL 53
+/* Speculation control variants */
+# define PR_SPEC_STORE_BYPASS 0
+# define PR_SPEC_INDIRECT_BRANCH 1
+# define PR_SPEC_L1D_FLUSH 2
+/* Return and control values for PR_SET/GET_SPECULATION_CTRL */
+# define PR_SPEC_NOT_AFFECTED 0
+# define PR_SPEC_PRCTL (1UL << 0)
+# define PR_SPEC_ENABLE (1UL << 1)
+# define PR_SPEC_DISABLE (1UL << 2)
+# define PR_SPEC_FORCE_DISABLE (1UL << 3)
+# define PR_SPEC_DISABLE_NOEXEC (1UL << 4)
+
+/* Reset arm64 pointer authentication keys */
+#define PR_PAC_RESET_KEYS 54
+# define PR_PAC_APIAKEY (1UL << 0)
+# define PR_PAC_APIBKEY (1UL << 1)
+# define PR_PAC_APDAKEY (1UL << 2)
+# define PR_PAC_APDBKEY (1UL << 3)
+# define PR_PAC_APGAKEY (1UL << 4)
+
+/* Tagged user address controls for arm64 */
+#define PR_SET_TAGGED_ADDR_CTRL 55
+#define PR_GET_TAGGED_ADDR_CTRL 56
+# define PR_TAGGED_ADDR_ENABLE (1UL << 0)
+/* MTE tag check fault modes */
+# define PR_MTE_TCF_NONE 0UL
+# define PR_MTE_TCF_SYNC (1UL << 1)
+# define PR_MTE_TCF_ASYNC (1UL << 2)
+# define PR_MTE_TCF_MASK (PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC)
+/* MTE tag inclusion mask */
+# define PR_MTE_TAG_SHIFT 3
+# define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
+/* Unused; kept only for source compatibility */
+# define PR_MTE_TCF_SHIFT 1
+
+/* Control reclaim behavior when allocating memory */
+#define PR_SET_IO_FLUSHER 57
+#define PR_GET_IO_FLUSHER 58
+
+/* Dispatch syscalls to a userspace handler */
+#define PR_SET_SYSCALL_USER_DISPATCH 59
+# define PR_SYS_DISPATCH_OFF 0
+# define PR_SYS_DISPATCH_ON 1
+/* The control values for the user space selector when dispatch is enabled */
+# define SYSCALL_DISPATCH_FILTER_ALLOW 0
+# define SYSCALL_DISPATCH_FILTER_BLOCK 1
+
+/* Set/get enabled arm64 pointer authentication keys */
+#define PR_PAC_SET_ENABLED_KEYS 60
+#define PR_PAC_GET_ENABLED_KEYS 61
+
+/* Request the scheduler to share a core */
+#define PR_SCHED_CORE 62
+# define PR_SCHED_CORE_GET 0
+# define PR_SCHED_CORE_CREATE 1 /* create unique core_sched cookie */
+# define PR_SCHED_CORE_SHARE_TO 2 /* push core_sched cookie to pid */
+# define PR_SCHED_CORE_SHARE_FROM 3 /* pull core_sched cookie to pid */
+# define PR_SCHED_CORE_MAX 4
+# define PR_SCHED_CORE_SCOPE_THREAD 0
+# define PR_SCHED_CORE_SCOPE_THREAD_GROUP 1
+# define PR_SCHED_CORE_SCOPE_PROCESS_GROUP 2
+
+/* arm64 Scalable Matrix Extension controls */
+/* Flag values must be in sync with SVE versions */
+#define PR_SME_SET_VL 63 /* set task vector length */
+# define PR_SME_SET_VL_ONEXEC (1 << 18) /* defer effect until exec */
+#define PR_SME_GET_VL 64 /* get task vector length */
+/* Bits common to PR_SME_SET_VL and PR_SME_GET_VL */
+# define PR_SME_VL_LEN_MASK 0xffff
+# define PR_SME_VL_INHERIT (1 << 17) /* inherit across exec */
+
+/* Memory deny write / execute */
+#define PR_SET_MDWE 65
+# define PR_MDWE_REFUSE_EXEC_GAIN (1UL << 0)
+# define PR_MDWE_NO_INHERIT (1UL << 1)
+
+#define PR_GET_MDWE 66
+
+#define PR_SET_VMA 0x53564d41
+# define PR_SET_VMA_ANON_NAME 0
+
+#define PR_GET_AUXV 0x41555856
+
+#define PR_SET_MEMORY_MERGE 67
+#define PR_GET_MEMORY_MERGE 68
+
+#define PR_RISCV_V_SET_CONTROL 69
+#define PR_RISCV_V_GET_CONTROL 70
+# define PR_RISCV_V_VSTATE_CTRL_DEFAULT 0
+# define PR_RISCV_V_VSTATE_CTRL_OFF 1
+# define PR_RISCV_V_VSTATE_CTRL_ON 2
+# define PR_RISCV_V_VSTATE_CTRL_INHERIT (1 << 4)
+# define PR_RISCV_V_VSTATE_CTRL_CUR_MASK 0x3
+# define PR_RISCV_V_VSTATE_CTRL_NEXT_MASK 0xc
+# define PR_RISCV_V_VSTATE_CTRL_MASK 0x1f
+
+#define PR_RISCV_SET_ICACHE_FLUSH_CTX 71
+# define PR_RISCV_CTX_SW_FENCEI_ON 0
+# define PR_RISCV_CTX_SW_FENCEI_OFF 1
+# define PR_RISCV_SCOPE_PER_PROCESS 0
+# define PR_RISCV_SCOPE_PER_THREAD 1
+
+/* PowerPC Dynamic Execution Control Register (DEXCR) controls */
+#define PR_PPC_GET_DEXCR 72
+#define PR_PPC_SET_DEXCR 73
+/* DEXCR aspect to act on */
+# define PR_PPC_DEXCR_SBHE 0 /* Speculative branch hint enable */
+# define PR_PPC_DEXCR_IBRTPD 1 /* Indirect branch recurrent target prediction disable */
+# define PR_PPC_DEXCR_SRAPD 2 /* Subroutine return address prediction disable */
+# define PR_PPC_DEXCR_NPHIE 3 /* Non-privileged hash instruction enable */
+/* Action to apply / return */
+# define PR_PPC_DEXCR_CTRL_EDITABLE 0x1 /* Aspect can be modified with PR_PPC_SET_DEXCR */
+# define PR_PPC_DEXCR_CTRL_SET 0x2 /* Set the aspect for this process */
+# define PR_PPC_DEXCR_CTRL_CLEAR 0x4 /* Clear the aspect for this process */
+# define PR_PPC_DEXCR_CTRL_SET_ONEXEC 0x8 /* Set the aspect on exec */
+# define PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC 0x10 /* Clear the aspect on exec */
+# define PR_PPC_DEXCR_CTRL_MASK 0x1f
+
+#endif /* _LINUX_PRCTL_H */
diff --git a/tools/lib/api/io.h b/tools/lib/api/io.h
index 84adf8102018..d3eb04d1bc89 100644
--- a/tools/lib/api/io.h
+++ b/tools/lib/api/io.h
@@ -43,48 +43,55 @@ static inline void io__init(struct io *io, int fd,
io->eof = false;
}
-/* Reads one character from the "io" file with similar semantics to fgetc. */
-static inline int io__get_char(struct io *io)
+/* Read from fd filling the buffer. Called when io->data == io->end. */
+static inline int io__fill_buffer(struct io *io)
{
- char *ptr = io->data;
+ ssize_t n;
if (io->eof)
return -1;
- if (ptr == io->end) {
- ssize_t n;
-
- if (io->timeout_ms != 0) {
- struct pollfd pfds[] = {
- {
- .fd = io->fd,
- .events = POLLIN,
- },
- };
-
- n = poll(pfds, 1, io->timeout_ms);
- if (n == 0)
- errno = ETIMEDOUT;
- if (n > 0 && !(pfds[0].revents & POLLIN)) {
- errno = EIO;
- n = -1;
- }
- if (n <= 0) {
- io->eof = true;
- return -1;
- }
+ if (io->timeout_ms != 0) {
+ struct pollfd pfds[] = {
+ {
+ .fd = io->fd,
+ .events = POLLIN,
+ },
+ };
+
+ n = poll(pfds, 1, io->timeout_ms);
+ if (n == 0)
+ errno = ETIMEDOUT;
+ if (n > 0 && !(pfds[0].revents & POLLIN)) {
+ errno = EIO;
+ n = -1;
}
- n = read(io->fd, io->buf, io->buf_len);
-
if (n <= 0) {
io->eof = true;
return -1;
}
- ptr = &io->buf[0];
- io->end = &io->buf[n];
}
- io->data = ptr + 1;
- return *ptr;
+ n = read(io->fd, io->buf, io->buf_len);
+
+ if (n <= 0) {
+ io->eof = true;
+ return -1;
+ }
+ io->data = &io->buf[0];
+ io->end = &io->buf[n];
+ return 0;
+}
+
+/* Reads one character from the "io" file with similar semantics to fgetc. */
+static inline int io__get_char(struct io *io)
+{
+ if (io->data == io->end) {
+ int ret = io__fill_buffer(io);
+
+ if (ret)
+ return ret;
+ }
+ return *io->data++;
}
/* Read a hexadecimal value with no 0x prefix into the out argument hex. If the
diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build
index b6619199a706..e2cd558ca0b4 100644
--- a/tools/lib/bpf/Build
+++ b/tools/lib/bpf/Build
@@ -1,4 +1,4 @@
libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o \
netlink.o bpf_prog_linfo.o libbpf_probes.o hashmap.o \
btf_dump.o ringbuf.o strset.o linker.o gen_loader.o relo_core.o \
- usdt.o zip.o elf.o features.o
+ usdt.o zip.o elf.o features.o btf_iter.o btf_relocate.o
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 2d0840ef599a..32c00db3b91b 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -116,6 +116,9 @@ struct btf {
/* whether strings are already deduplicated */
bool strs_deduped;
+ /* whether base_btf should be freed in btf_free for this instance */
+ bool owns_base;
+
/* BTF object FD, if loaded into kernel */
int fd;
@@ -598,7 +601,7 @@ static int btf_sanity_check(const struct btf *btf)
__u32 i, n = btf__type_cnt(btf);
int err;
- for (i = 1; i < n; i++) {
+ for (i = btf->start_id; i < n; i++) {
t = btf_type_by_id(btf, i);
err = btf_validate_type(btf, t, i);
if (err)
@@ -969,6 +972,8 @@ void btf__free(struct btf *btf)
free(btf->raw_data);
free(btf->raw_data_swapped);
free(btf->type_offs);
+ if (btf->owns_base)
+ btf__free(btf->base_btf);
free(btf);
}
@@ -1084,53 +1089,38 @@ struct btf *btf__new_split(const void *data, __u32 size, struct btf *base_btf)
return libbpf_ptr(btf_new(data, size, base_btf));
}
-static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
- struct btf_ext **btf_ext)
+struct btf_elf_secs {
+ Elf_Data *btf_data;
+ Elf_Data *btf_ext_data;
+ Elf_Data *btf_base_data;
+};
+
+static int btf_find_elf_sections(Elf *elf, const char *path, struct btf_elf_secs *secs)
{
- Elf_Data *btf_data = NULL, *btf_ext_data = NULL;
- int err = 0, fd = -1, idx = 0;
- struct btf *btf = NULL;
Elf_Scn *scn = NULL;
- Elf *elf = NULL;
+ Elf_Data *data;
GElf_Ehdr ehdr;
size_t shstrndx;
+ int idx = 0;
- if (elf_version(EV_CURRENT) == EV_NONE) {
- pr_warn("failed to init libelf for %s\n", path);
- return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
- }
-
- fd = open(path, O_RDONLY | O_CLOEXEC);
- if (fd < 0) {
- err = -errno;
- pr_warn("failed to open %s: %s\n", path, strerror(errno));
- return ERR_PTR(err);
- }
-
- err = -LIBBPF_ERRNO__FORMAT;
-
- elf = elf_begin(fd, ELF_C_READ, NULL);
- if (!elf) {
- pr_warn("failed to open %s as ELF file\n", path);
- goto done;
- }
if (!gelf_getehdr(elf, &ehdr)) {
pr_warn("failed to get EHDR from %s\n", path);
- goto done;
+ goto err;
}
if (elf_getshdrstrndx(elf, &shstrndx)) {
pr_warn("failed to get section names section index for %s\n",
path);
- goto done;
+ goto err;
}
if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL)) {
pr_warn("failed to get e_shstrndx from %s\n", path);
- goto done;
+ goto err;
}
while ((scn = elf_nextscn(elf, scn)) != NULL) {
+ Elf_Data **field;
GElf_Shdr sh;
char *name;
@@ -1138,42 +1128,102 @@ static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
if (gelf_getshdr(scn, &sh) != &sh) {
pr_warn("failed to get section(%d) header from %s\n",
idx, path);
- goto done;
+ goto err;
}
name = elf_strptr(elf, shstrndx, sh.sh_name);
if (!name) {
pr_warn("failed to get section(%d) name from %s\n",
idx, path);
- goto done;
+ goto err;
}
- if (strcmp(name, BTF_ELF_SEC) == 0) {
- btf_data = elf_getdata(scn, 0);
- if (!btf_data) {
- pr_warn("failed to get section(%d, %s) data from %s\n",
- idx, name, path);
- goto done;
- }
- continue;
- } else if (btf_ext && strcmp(name, BTF_EXT_ELF_SEC) == 0) {
- btf_ext_data = elf_getdata(scn, 0);
- if (!btf_ext_data) {
- pr_warn("failed to get section(%d, %s) data from %s\n",
- idx, name, path);
- goto done;
- }
+
+ if (strcmp(name, BTF_ELF_SEC) == 0)
+ field = &secs->btf_data;
+ else if (strcmp(name, BTF_EXT_ELF_SEC) == 0)
+ field = &secs->btf_ext_data;
+ else if (strcmp(name, BTF_BASE_ELF_SEC) == 0)
+ field = &secs->btf_base_data;
+ else
continue;
+
+ data = elf_getdata(scn, 0);
+ if (!data) {
+ pr_warn("failed to get section(%d, %s) data from %s\n",
+ idx, name, path);
+ goto err;
}
+ *field = data;
}
- if (!btf_data) {
+ return 0;
+
+err:
+ return -LIBBPF_ERRNO__FORMAT;
+}
+
+static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
+ struct btf_ext **btf_ext)
+{
+ struct btf_elf_secs secs = {};
+ struct btf *dist_base_btf = NULL;
+ struct btf *btf = NULL;
+ int err = 0, fd = -1;
+ Elf *elf = NULL;
+
+ if (elf_version(EV_CURRENT) == EV_NONE) {
+ pr_warn("failed to init libelf for %s\n", path);
+ return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
+ }
+
+ fd = open(path, O_RDONLY | O_CLOEXEC);
+ if (fd < 0) {
+ err = -errno;
+ pr_warn("failed to open %s: %s\n", path, strerror(errno));
+ return ERR_PTR(err);
+ }
+
+ elf = elf_begin(fd, ELF_C_READ, NULL);
+ if (!elf) {
+ pr_warn("failed to open %s as ELF file\n", path);
+ goto done;
+ }
+
+ err = btf_find_elf_sections(elf, path, &secs);
+ if (err)
+ goto done;
+
+ if (!secs.btf_data) {
pr_warn("failed to find '%s' ELF section in %s\n", BTF_ELF_SEC, path);
err = -ENODATA;
goto done;
}
- btf = btf_new(btf_data->d_buf, btf_data->d_size, base_btf);
- err = libbpf_get_error(btf);
- if (err)
+
+ if (secs.btf_base_data) {
+ dist_base_btf = btf_new(secs.btf_base_data->d_buf, secs.btf_base_data->d_size,
+ NULL);
+ if (IS_ERR(dist_base_btf)) {
+ err = PTR_ERR(dist_base_btf);
+ dist_base_btf = NULL;
+ goto done;
+ }
+ }
+
+ btf = btf_new(secs.btf_data->d_buf, secs.btf_data->d_size,
+ dist_base_btf ?: base_btf);
+ if (IS_ERR(btf)) {
+ err = PTR_ERR(btf);
goto done;
+ }
+ if (dist_base_btf && base_btf) {
+ err = btf__relocate(btf, base_btf);
+ if (err)
+ goto done;
+ btf__free(dist_base_btf);
+ dist_base_btf = NULL;
+ }
+
+ if (dist_base_btf)
+ btf->owns_base = true;
switch (gelf_getclass(elf)) {
case ELFCLASS32:
@@ -1187,11 +1237,12 @@ static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
break;
}
- if (btf_ext && btf_ext_data) {
- *btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size);
- err = libbpf_get_error(*btf_ext);
- if (err)
+ if (btf_ext && secs.btf_ext_data) {
+ *btf_ext = btf_ext__new(secs.btf_ext_data->d_buf, secs.btf_ext_data->d_size);
+ if (IS_ERR(*btf_ext)) {
+ err = PTR_ERR(*btf_ext);
goto done;
+ }
} else if (btf_ext) {
*btf_ext = NULL;
}
@@ -1205,6 +1256,7 @@ done:
if (btf_ext)
btf_ext__free(*btf_ext);
+ btf__free(dist_base_btf);
btf__free(btf);
return ERR_PTR(err);
@@ -1739,9 +1791,8 @@ struct btf_pipe {
struct hashmap *str_off_map; /* map string offsets from src to dst */
};
-static int btf_rewrite_str(__u32 *str_off, void *ctx)
+static int btf_rewrite_str(struct btf_pipe *p, __u32 *str_off)
{
- struct btf_pipe *p = ctx;
long mapped_off;
int off, err;
@@ -1771,10 +1822,11 @@ static int btf_rewrite_str(__u32 *str_off, void *ctx)
return 0;
}
-int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type)
+static int btf_add_type(struct btf_pipe *p, const struct btf_type *src_type)
{
- struct btf_pipe p = { .src = src_btf, .dst = btf };
+ struct btf_field_iter it;
struct btf_type *t;
+ __u32 *str_off;
int sz, err;
sz = btf_type_size(src_type);
@@ -1782,35 +1834,33 @@ int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_t
return libbpf_err(sz);
/* deconstruct BTF, if necessary, and invalidate raw_data */
- if (btf_ensure_modifiable(btf))
+ if (btf_ensure_modifiable(p->dst))
return libbpf_err(-ENOMEM);
- t = btf_add_type_mem(btf, sz);
+ t = btf_add_type_mem(p->dst, sz);
if (!t)
return libbpf_err(-ENOMEM);
memcpy(t, src_type, sz);
- err = btf_type_visit_str_offs(t, btf_rewrite_str, &p);
+ err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
if (err)
return libbpf_err(err);
- return btf_commit_type(btf, sz);
+ while ((str_off = btf_field_iter_next(&it))) {
+ err = btf_rewrite_str(p, str_off);
+ if (err)
+ return libbpf_err(err);
+ }
+
+ return btf_commit_type(p->dst, sz);
}
-static int btf_rewrite_type_ids(__u32 *type_id, void *ctx)
+int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type)
{
- struct btf *btf = ctx;
-
- if (!*type_id) /* nothing to do for VOID references */
- return 0;
+ struct btf_pipe p = { .src = src_btf, .dst = btf };
- /* we haven't updated btf's type count yet, so
- * btf->start_id + btf->nr_types - 1 is the type ID offset we should
- * add to all newly added BTF types
- */
- *type_id += btf->start_id + btf->nr_types - 1;
- return 0;
+ return btf_add_type(&p, src_type);
}
static size_t btf_dedup_identity_hash_fn(long key, void *ctx);
@@ -1858,6 +1908,9 @@ int btf__add_btf(struct btf *btf, const struct btf *src_btf)
memcpy(t, src_btf->types_data, data_sz);
for (i = 0; i < cnt; i++) {
+ struct btf_field_iter it;
+ __u32 *type_id, *str_off;
+
sz = btf_type_size(t);
if (sz < 0) {
/* unlikely, has to be corrupted src_btf */
@@ -1869,15 +1922,31 @@ int btf__add_btf(struct btf *btf, const struct btf *src_btf)
*off = t - btf->types_data;
/* add, dedup, and remap strings referenced by this BTF type */
- err = btf_type_visit_str_offs(t, btf_rewrite_str, &p);
+ err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
if (err)
goto err_out;
+ while ((str_off = btf_field_iter_next(&it))) {
+ err = btf_rewrite_str(&p, str_off);
+ if (err)
+ goto err_out;
+ }
/* remap all type IDs referenced from this BTF type */
- err = btf_type_visit_type_ids(t, btf_rewrite_type_ids, btf);
+ err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
if (err)
goto err_out;
+ while ((type_id = btf_field_iter_next(&it))) {
+ if (!*type_id) /* nothing to do for VOID references */
+ continue;
+
+ /* we haven't updated btf's type count yet, so
+ * btf->start_id + btf->nr_types - 1 is the type ID offset we should
+ * add to all newly added BTF types
+ */
+ *type_id += btf->start_id + btf->nr_types - 1;
+ }
+
/* go to next type data and type offset index entry */
t += sz;
off++;
@@ -3453,11 +3522,19 @@ static int btf_for_each_str_off(struct btf_dedup *d, str_off_visit_fn fn, void *
int i, r;
for (i = 0; i < d->btf->nr_types; i++) {
+ struct btf_field_iter it;
struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
+ __u32 *str_off;
- r = btf_type_visit_str_offs(t, fn, ctx);
+ r = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
if (r)
return r;
+
+ while ((str_off = btf_field_iter_next(&it))) {
+ r = fn(str_off, ctx);
+ if (r)
+ return r;
+ }
}
if (!d->btf_ext)
@@ -4919,10 +4996,23 @@ static int btf_dedup_remap_types(struct btf_dedup *d)
for (i = 0; i < d->btf->nr_types; i++) {
struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
+ struct btf_field_iter it;
+ __u32 *type_id;
- r = btf_type_visit_type_ids(t, btf_dedup_remap_type_id, d);
+ r = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
if (r)
return r;
+
+ while ((type_id = btf_field_iter_next(&it))) {
+ __u32 resolved_id, new_id;
+
+ resolved_id = resolve_type_id(d, *type_id);
+ new_id = d->hypot_map[resolved_id];
+ if (new_id > BTF_MAX_NR_TYPES)
+ return -EINVAL;
+
+ *type_id = new_id;
+ }
}
if (!d->btf_ext)
@@ -5003,136 +5093,6 @@ struct btf *btf__load_module_btf(const char *module_name, struct btf *vmlinux_bt
return btf__parse_split(path, vmlinux_btf);
}
-int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx)
-{
- int i, n, err;
-
- switch (btf_kind(t)) {
- case BTF_KIND_INT:
- case BTF_KIND_FLOAT:
- case BTF_KIND_ENUM:
- case BTF_KIND_ENUM64:
- return 0;
-
- case BTF_KIND_FWD:
- case BTF_KIND_CONST:
- case BTF_KIND_VOLATILE:
- case BTF_KIND_RESTRICT:
- case BTF_KIND_PTR:
- case BTF_KIND_TYPEDEF:
- case BTF_KIND_FUNC:
- case BTF_KIND_VAR:
- case BTF_KIND_DECL_TAG:
- case BTF_KIND_TYPE_TAG:
- return visit(&t->type, ctx);
-
- case BTF_KIND_ARRAY: {
- struct btf_array *a = btf_array(t);
-
- err = visit(&a->type, ctx);
- err = err ?: visit(&a->index_type, ctx);
- return err;
- }
-
- case BTF_KIND_STRUCT:
- case BTF_KIND_UNION: {
- struct btf_member *m = btf_members(t);
-
- for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
- err = visit(&m->type, ctx);
- if (err)
- return err;
- }
- return 0;
- }
-
- case BTF_KIND_FUNC_PROTO: {
- struct btf_param *m = btf_params(t);
-
- err = visit(&t->type, ctx);
- if (err)
- return err;
- for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
- err = visit(&m->type, ctx);
- if (err)
- return err;
- }
- return 0;
- }
-
- case BTF_KIND_DATASEC: {
- struct btf_var_secinfo *m = btf_var_secinfos(t);
-
- for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
- err = visit(&m->type, ctx);
- if (err)
- return err;
- }
- return 0;
- }
-
- default:
- return -EINVAL;
- }
-}
-
-int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx)
-{
- int i, n, err;
-
- err = visit(&t->name_off, ctx);
- if (err)
- return err;
-
- switch (btf_kind(t)) {
- case BTF_KIND_STRUCT:
- case BTF_KIND_UNION: {
- struct btf_member *m = btf_members(t);
-
- for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
- err = visit(&m->name_off, ctx);
- if (err)
- return err;
- }
- break;
- }
- case BTF_KIND_ENUM: {
- struct btf_enum *m = btf_enum(t);
-
- for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
- err = visit(&m->name_off, ctx);
- if (err)
- return err;
- }
- break;
- }
- case BTF_KIND_ENUM64: {
- struct btf_enum64 *m = btf_enum64(t);
-
- for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
- err = visit(&m->name_off, ctx);
- if (err)
- return err;
- }
- break;
- }
- case BTF_KIND_FUNC_PROTO: {
- struct btf_param *m = btf_params(t);
-
- for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
- err = visit(&m->name_off, ctx);
- if (err)
- return err;
- }
- break;
- }
- default:
- break;
- }
-
- return 0;
-}
-
int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx)
{
const struct btf_ext_info *seg;
@@ -5212,3 +5172,325 @@ int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void
return 0;
}
+
+struct btf_distill {
+ struct btf_pipe pipe;
+ int *id_map;
+ unsigned int split_start_id;
+ unsigned int split_start_str;
+ int diff_id;
+};
+
+static int btf_add_distilled_type_ids(struct btf_distill *dist, __u32 i)
+{
+ struct btf_type *split_t = btf_type_by_id(dist->pipe.src, i);
+ struct btf_field_iter it;
+ __u32 *id;
+ int err;
+
+ err = btf_field_iter_init(&it, split_t, BTF_FIELD_ITER_IDS);
+ if (err)
+ return err;
+ while ((id = btf_field_iter_next(&it))) {
+ struct btf_type *base_t;
+
+ if (!*id)
+ continue;
+ /* split BTF id, not needed */
+ if (*id >= dist->split_start_id)
+ continue;
+ /* already added ? */
+ if (dist->id_map[*id] > 0)
+ continue;
+
+ /* only a subset of base BTF types should be referenced from
+ * split BTF; ensure nothing unexpected is referenced.
+ */
+ base_t = btf_type_by_id(dist->pipe.src, *id);
+ switch (btf_kind(base_t)) {
+ case BTF_KIND_INT:
+ case BTF_KIND_FLOAT:
+ case BTF_KIND_FWD:
+ case BTF_KIND_ARRAY:
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
+ case BTF_KIND_PTR:
+ case BTF_KIND_CONST:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_FUNC_PROTO:
+ case BTF_KIND_TYPE_TAG:
+ dist->id_map[*id] = *id;
+ break;
+ default:
+ pr_warn("unexpected reference to base type[%u] of kind [%u] when creating distilled base BTF.\n",
+ *id, btf_kind(base_t));
+ return -EINVAL;
+ }
+ /* If a base type is used, ensure types it refers to are
+ * marked as used also; so for example if we find a PTR to INT
+ * we need both the PTR and INT.
+ *
+ * The only exception is named struct/unions, since distilled
+ * base BTF composite types have no members.
+ */
+ if (btf_is_composite(base_t) && base_t->name_off)
+ continue;
+ err = btf_add_distilled_type_ids(dist, *id);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int btf_add_distilled_types(struct btf_distill *dist)
+{
+ bool adding_to_base = dist->pipe.dst->start_id == 1;
+ int id = btf__type_cnt(dist->pipe.dst);
+ struct btf_type *t;
+ int i, err = 0;
+
+
+ /* Add types for each of the required references to either distilled
+ * base or split BTF, depending on type characteristics.
+ */
+ for (i = 1; i < dist->split_start_id; i++) {
+ const char *name;
+ int kind;
+
+ if (!dist->id_map[i])
+ continue;
+ t = btf_type_by_id(dist->pipe.src, i);
+ kind = btf_kind(t);
+ name = btf__name_by_offset(dist->pipe.src, t->name_off);
+
+ switch (kind) {
+ case BTF_KIND_INT:
+ case BTF_KIND_FLOAT:
+ case BTF_KIND_FWD:
+ /* Named int, float, fwd are added to base. */
+ if (!adding_to_base)
+ continue;
+ err = btf_add_type(&dist->pipe, t);
+ break;
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ /* Named struct/union are added to base as 0-vlen
+ * struct/union of same size. Anonymous struct/unions
+ * are added to split BTF as-is.
+ */
+ if (adding_to_base) {
+ if (!t->name_off)
+ continue;
+ err = btf_add_composite(dist->pipe.dst, kind, name, t->size);
+ } else {
+ if (t->name_off)
+ continue;
+ err = btf_add_type(&dist->pipe, t);
+ }
+ break;
+ case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
+ /* Named enum[64]s are added to base as a sized
+ * enum; relocation will match with appropriately-named
+ * and sized enum or enum64.
+ *
+ * Anonymous enums are added to split BTF as-is.
+ */
+ if (adding_to_base) {
+ if (!t->name_off)
+ continue;
+ err = btf__add_enum(dist->pipe.dst, name, t->size);
+ } else {
+ if (t->name_off)
+ continue;
+ err = btf_add_type(&dist->pipe, t);
+ }
+ break;
+ case BTF_KIND_ARRAY:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_PTR:
+ case BTF_KIND_CONST:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_FUNC_PROTO:
+ case BTF_KIND_TYPE_TAG:
+ /* All other types are added to split BTF. */
+ if (adding_to_base)
+ continue;
+ err = btf_add_type(&dist->pipe, t);
+ break;
+ default:
+ pr_warn("unexpected kind when adding base type '%s'[%u] of kind [%u] to distilled base BTF.\n",
+ name, i, kind);
+ return -EINVAL;
+
+ }
+ if (err < 0)
+ break;
+ dist->id_map[i] = id++;
+ }
+ return err;
+}
+
+/* Split BTF ids without a mapping will be shifted downwards since distilled
+ * base BTF is smaller than the original base BTF. For those that have a
+ * mapping (either to base or updated split BTF), update the id based on
+ * that mapping.
+ */
+static int btf_update_distilled_type_ids(struct btf_distill *dist, __u32 i)
+{
+ struct btf_type *t = btf_type_by_id(dist->pipe.dst, i);
+ struct btf_field_iter it;
+ __u32 *id;
+ int err;
+
+ err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
+ if (err)
+ return err;
+ while ((id = btf_field_iter_next(&it))) {
+ if (dist->id_map[*id])
+ *id = dist->id_map[*id];
+ else if (*id >= dist->split_start_id)
+ *id -= dist->diff_id;
+ }
+ return 0;
+}
+
+/* Create updated split BTF with distilled base BTF; distilled base BTF
+ * consists of BTF information required to clarify the types that split
+ * BTF refers to, omitting unneeded details. Specifically it will contain
+ * base types and memberless definitions of named structs, unions and enumerated
+ * types. Associated reference types like pointers, arrays and anonymous
+ * structs, unions and enumerated types will be added to split BTF.
+ * Size is recorded for named struct/unions to help guide matching to the
+ * target base BTF during later relocation.
+ *
+ * The only case where structs, unions or enumerated types are fully represented
+ * is when they are anonymous; in such cases, the anonymous type is added to
+ * split BTF in full.
+ *
+ * We return newly-created split BTF where the split BTF refers to a newly-created
+ * distilled base BTF. Both must be freed separately by the caller.
+ */
+int btf__distill_base(const struct btf *src_btf, struct btf **new_base_btf,
+ struct btf **new_split_btf)
+{
+ struct btf *new_base = NULL, *new_split = NULL;
+ const struct btf *old_base;
+ unsigned int n = btf__type_cnt(src_btf);
+ struct btf_distill dist = {};
+ struct btf_type *t;
+ int i, err = 0;
+
+ /* src BTF must be split BTF. */
+ old_base = btf__base_btf(src_btf);
+ if (!new_base_btf || !new_split_btf || !old_base)
+ return libbpf_err(-EINVAL);
+
+ new_base = btf__new_empty();
+ if (!new_base)
+ return libbpf_err(-ENOMEM);
+ dist.id_map = calloc(n, sizeof(*dist.id_map));
+ if (!dist.id_map) {
+ err = -ENOMEM;
+ goto done;
+ }
+ dist.pipe.src = src_btf;
+ dist.pipe.dst = new_base;
+ dist.pipe.str_off_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL);
+ if (IS_ERR(dist.pipe.str_off_map)) {
+ err = -ENOMEM;
+ goto done;
+ }
+ dist.split_start_id = btf__type_cnt(old_base);
+ dist.split_start_str = old_base->hdr->str_len;
+
+ /* Pass over src split BTF; generate the list of base BTF type ids it
+ * references; these will constitute our distilled BTF set to be
+ * distributed over base and split BTF as appropriate.
+ */
+ for (i = src_btf->start_id; i < n; i++) {
+ err = btf_add_distilled_type_ids(&dist, i);
+ if (err < 0)
+ goto done;
+ }
+ /* Next add types for each of the required references to base BTF and split BTF
+ * in turn.
+ */
+ err = btf_add_distilled_types(&dist);
+ if (err < 0)
+ goto done;
+
+ /* Create new split BTF with distilled base BTF as its base; the final
+ * state is split BTF with distilled base BTF that represents enough
+ * about its base references to allow it to be relocated with the base
+ * BTF available.
+ */
+ new_split = btf__new_empty_split(new_base);
+ if (!new_split) {
+ err = -errno;
+ goto done;
+ }
+ dist.pipe.dst = new_split;
+ /* First add all split types */
+ for (i = src_btf->start_id; i < n; i++) {
+ t = btf_type_by_id(src_btf, i);
+ err = btf_add_type(&dist.pipe, t);
+ if (err < 0)
+ goto done;
+ }
+ /* Now add distilled types to split BTF that are not added to base. */
+ err = btf_add_distilled_types(&dist);
+ if (err < 0)
+ goto done;
+
+ /* All split BTF ids will be shifted downwards since there are less base
+ * BTF ids in distilled base BTF.
+ */
+ dist.diff_id = dist.split_start_id - btf__type_cnt(new_base);
+
+ n = btf__type_cnt(new_split);
+ /* Now update base/split BTF ids. */
+ for (i = 1; i < n; i++) {
+ err = btf_update_distilled_type_ids(&dist, i);
+ if (err < 0)
+ break;
+ }
+done:
+ free(dist.id_map);
+ hashmap__free(dist.pipe.str_off_map);
+ if (err) {
+ btf__free(new_split);
+ btf__free(new_base);
+ return libbpf_err(err);
+ }
+ *new_base_btf = new_base;
+ *new_split_btf = new_split;
+
+ return 0;
+}
+
+const struct btf_header *btf_header(const struct btf *btf)
+{
+ return btf->hdr;
+}
+
+void btf_set_base_btf(struct btf *btf, const struct btf *base_btf)
+{
+ btf->base_btf = (struct btf *)base_btf;
+ btf->start_id = btf__type_cnt(base_btf);
+ btf->start_str_off = base_btf->hdr->str_len;
+}
+
+int btf__relocate(struct btf *btf, const struct btf *base_btf)
+{
+ int err = btf_relocate(btf, base_btf, NULL);
+
+ if (!err)
+ btf->owns_base = false;
+ return libbpf_err(err);
+}
diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
index 8e6880d91c84..b68d216837a9 100644
--- a/tools/lib/bpf/btf.h
+++ b/tools/lib/bpf/btf.h
@@ -18,6 +18,7 @@ extern "C" {
#define BTF_ELF_SEC ".BTF"
#define BTF_EXT_ELF_SEC ".BTF.ext"
+#define BTF_BASE_ELF_SEC ".BTF.base"
#define MAPS_ELF_SEC ".maps"
struct btf;
@@ -107,6 +108,27 @@ LIBBPF_API struct btf *btf__new_empty(void);
*/
LIBBPF_API struct btf *btf__new_empty_split(struct btf *base_btf);
+/**
+ * @brief **btf__distill_base()** creates new versions of the split BTF
+ * *src_btf* and its base BTF. The new base BTF will only contain the types
+ * needed to improve robustness of the split BTF to small changes in base BTF.
+ * When that split BTF is loaded against a (possibly changed) base, this
+ * distilled base BTF will help update references to that (possibly changed)
+ * base BTF.
+ *
+ * Both the new split and its associated new base BTF must be freed by
+ * the caller.
+ *
+ * If successful, 0 is returned and **new_base_btf** and **new_split_btf**
+ * will point at new base/split BTF. Both the new split and its associated
+ * new base BTF must be freed by the caller.
+ *
+ * A negative value is returned on error and the thread-local `errno` variable
+ * is set to the error code as well.
+ */
+LIBBPF_API int btf__distill_base(const struct btf *src_btf, struct btf **new_base_btf,
+ struct btf **new_split_btf);
+
LIBBPF_API struct btf *btf__parse(const char *path, struct btf_ext **btf_ext);
LIBBPF_API struct btf *btf__parse_split(const char *path, struct btf *base_btf);
LIBBPF_API struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext);
@@ -231,6 +253,20 @@ struct btf_dedup_opts {
LIBBPF_API int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts);
+/**
+ * @brief **btf__relocate()** will check the split BTF *btf* for references
+ * to base BTF kinds, and verify those references are compatible with
+ * *base_btf*; if they are, *btf* is adjusted such that is re-parented to
+ * *base_btf* and type ids and strings are adjusted to accommodate this.
+ *
+ * If successful, 0 is returned and **btf** now has **base_btf** as its
+ * base.
+ *
+ * A negative value is returned on error and the thread-local `errno` variable
+ * is set to the error code as well.
+ */
+LIBBPF_API int btf__relocate(struct btf *btf, const struct btf *base_btf);
+
struct btf_dump;
struct btf_dump_opts {
diff --git a/tools/lib/bpf/btf_iter.c b/tools/lib/bpf/btf_iter.c
new file mode 100644
index 000000000000..9a6c822c2294
--- /dev/null
+++ b/tools/lib/bpf/btf_iter.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+/* Copyright (c) 2021 Facebook */
+/* Copyright (c) 2024, Oracle and/or its affiliates. */
+
+#ifdef __KERNEL__
+#include <linux/bpf.h>
+#include <linux/btf.h>
+
+#define btf_var_secinfos(t) (struct btf_var_secinfo *)btf_type_var_secinfo(t)
+
+#else
+#include "btf.h"
+#include "libbpf_internal.h"
+#endif
+
+int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t,
+ enum btf_field_iter_kind iter_kind)
+{
+ it->p = NULL;
+ it->m_idx = -1;
+ it->off_idx = 0;
+ it->vlen = 0;
+
+ switch (iter_kind) {
+ case BTF_FIELD_ITER_IDS:
+ switch (btf_kind(t)) {
+ case BTF_KIND_UNKN:
+ case BTF_KIND_INT:
+ case BTF_KIND_FLOAT:
+ case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
+ it->desc = (struct btf_field_desc) {};
+ break;
+ case BTF_KIND_FWD:
+ case BTF_KIND_CONST:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_PTR:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_FUNC:
+ case BTF_KIND_VAR:
+ case BTF_KIND_DECL_TAG:
+ case BTF_KIND_TYPE_TAG:
+ it->desc = (struct btf_field_desc) { 1, {offsetof(struct btf_type, type)} };
+ break;
+ case BTF_KIND_ARRAY:
+ it->desc = (struct btf_field_desc) {
+ 2, {sizeof(struct btf_type) + offsetof(struct btf_array, type),
+ sizeof(struct btf_type) + offsetof(struct btf_array, index_type)}
+ };
+ break;
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ it->desc = (struct btf_field_desc) {
+ 0, {},
+ sizeof(struct btf_member),
+ 1, {offsetof(struct btf_member, type)}
+ };
+ break;
+ case BTF_KIND_FUNC_PROTO:
+ it->desc = (struct btf_field_desc) {
+ 1, {offsetof(struct btf_type, type)},
+ sizeof(struct btf_param),
+ 1, {offsetof(struct btf_param, type)}
+ };
+ break;
+ case BTF_KIND_DATASEC:
+ it->desc = (struct btf_field_desc) {
+ 0, {},
+ sizeof(struct btf_var_secinfo),
+ 1, {offsetof(struct btf_var_secinfo, type)}
+ };
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case BTF_FIELD_ITER_STRS:
+ switch (btf_kind(t)) {
+ case BTF_KIND_UNKN:
+ it->desc = (struct btf_field_desc) {};
+ break;
+ case BTF_KIND_INT:
+ case BTF_KIND_FLOAT:
+ case BTF_KIND_FWD:
+ case BTF_KIND_ARRAY:
+ case BTF_KIND_CONST:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_PTR:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_FUNC:
+ case BTF_KIND_VAR:
+ case BTF_KIND_DECL_TAG:
+ case BTF_KIND_TYPE_TAG:
+ case BTF_KIND_DATASEC:
+ it->desc = (struct btf_field_desc) {
+ 1, {offsetof(struct btf_type, name_off)}
+ };
+ break;
+ case BTF_KIND_ENUM:
+ it->desc = (struct btf_field_desc) {
+ 1, {offsetof(struct btf_type, name_off)},
+ sizeof(struct btf_enum),
+ 1, {offsetof(struct btf_enum, name_off)}
+ };
+ break;
+ case BTF_KIND_ENUM64:
+ it->desc = (struct btf_field_desc) {
+ 1, {offsetof(struct btf_type, name_off)},
+ sizeof(struct btf_enum64),
+ 1, {offsetof(struct btf_enum64, name_off)}
+ };
+ break;
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ it->desc = (struct btf_field_desc) {
+ 1, {offsetof(struct btf_type, name_off)},
+ sizeof(struct btf_member),
+ 1, {offsetof(struct btf_member, name_off)}
+ };
+ break;
+ case BTF_KIND_FUNC_PROTO:
+ it->desc = (struct btf_field_desc) {
+ 1, {offsetof(struct btf_type, name_off)},
+ sizeof(struct btf_param),
+ 1, {offsetof(struct btf_param, name_off)}
+ };
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (it->desc.m_sz)
+ it->vlen = btf_vlen(t);
+
+ it->p = t;
+ return 0;
+}
+
+__u32 *btf_field_iter_next(struct btf_field_iter *it)
+{
+ if (!it->p)
+ return NULL;
+
+ if (it->m_idx < 0) {
+ if (it->off_idx < it->desc.t_off_cnt)
+ return it->p + it->desc.t_offs[it->off_idx++];
+ /* move to per-member iteration */
+ it->m_idx = 0;
+ it->p += sizeof(struct btf_type);
+ it->off_idx = 0;
+ }
+
+ /* if type doesn't have members, stop */
+ if (it->desc.m_sz == 0) {
+ it->p = NULL;
+ return NULL;
+ }
+
+ if (it->off_idx >= it->desc.m_off_cnt) {
+ /* exhausted this member's fields, go to the next member */
+ it->m_idx++;
+ it->p += it->desc.m_sz;
+ it->off_idx = 0;
+ }
+
+ if (it->m_idx < it->vlen)
+ return it->p + it->desc.m_offs[it->off_idx++];
+
+ it->p = NULL;
+ return NULL;
+}
diff --git a/tools/lib/bpf/btf_relocate.c b/tools/lib/bpf/btf_relocate.c
new file mode 100644
index 000000000000..17f8b32f94a0
--- /dev/null
+++ b/tools/lib/bpf/btf_relocate.c
@@ -0,0 +1,519 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024, Oracle and/or its affiliates. */
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+
+#ifdef __KERNEL__
+#include <linux/bpf.h>
+#include <linux/bsearch.h>
+#include <linux/btf.h>
+#include <linux/sort.h>
+#include <linux/string.h>
+#include <linux/bpf_verifier.h>
+
+#define btf_type_by_id (struct btf_type *)btf_type_by_id
+#define btf__type_cnt btf_nr_types
+#define btf__base_btf btf_base_btf
+#define btf__name_by_offset btf_name_by_offset
+#define btf__str_by_offset btf_str_by_offset
+#define btf_kflag btf_type_kflag
+
+#define calloc(nmemb, sz) kvcalloc(nmemb, sz, GFP_KERNEL | __GFP_NOWARN)
+#define free(ptr) kvfree(ptr)
+#define qsort(base, num, sz, cmp) sort(base, num, sz, cmp, NULL)
+
+#else
+
+#include "btf.h"
+#include "bpf.h"
+#include "libbpf.h"
+#include "libbpf_internal.h"
+
+#endif /* __KERNEL__ */
+
+struct btf;
+
+struct btf_relocate {
+ struct btf *btf;
+ const struct btf *base_btf;
+ const struct btf *dist_base_btf;
+ unsigned int nr_base_types;
+ unsigned int nr_split_types;
+ unsigned int nr_dist_base_types;
+ int dist_str_len;
+ int base_str_len;
+ __u32 *id_map;
+ __u32 *str_map;
+};
+
+/* Set temporarily in relocation id_map if distilled base struct/union is
+ * embedded in a split BTF struct/union; in such a case, size information must
+ * match between distilled base BTF and base BTF representation of type.
+ */
+#define BTF_IS_EMBEDDED ((__u32)-1)
+
+/* <name, size, id> triple used in sorting/searching distilled base BTF. */
+struct btf_name_info {
+ const char *name;
+ /* set when search requires a size match */
+ bool needs_size: 1;
+ unsigned int size: 31;
+ __u32 id;
+};
+
+static int btf_relocate_rewrite_type_id(struct btf_relocate *r, __u32 i)
+{
+ struct btf_type *t = btf_type_by_id(r->btf, i);
+ struct btf_field_iter it;
+ __u32 *id;
+ int err;
+
+ err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
+ if (err)
+ return err;
+
+ while ((id = btf_field_iter_next(&it)))
+ *id = r->id_map[*id];
+ return 0;
+}
+
+/* Simple string comparison used for sorting within BTF, since all distilled
+ * types are named. If strings match, and size is non-zero for both elements
+ * fall back to using size for ordering.
+ */
+static int cmp_btf_name_size(const void *n1, const void *n2)
+{
+ const struct btf_name_info *ni1 = n1;
+ const struct btf_name_info *ni2 = n2;
+ int name_diff = strcmp(ni1->name, ni2->name);
+
+ if (!name_diff && ni1->needs_size && ni2->needs_size)
+ return ni2->size - ni1->size;
+ return name_diff;
+}
+
+/* Binary search with a small twist; find leftmost element that matches
+ * so that we can then iterate through all exact matches. So for example
+ * searching { "a", "bb", "bb", "c" } we would always match on the
+ * leftmost "bb".
+ */
+static struct btf_name_info *search_btf_name_size(struct btf_name_info *key,
+ struct btf_name_info *vals,
+ int nelems)
+{
+ struct btf_name_info *ret = NULL;
+ int high = nelems - 1;
+ int low = 0;
+
+ while (low <= high) {
+ int mid = (low + high)/2;
+ struct btf_name_info *val = &vals[mid];
+ int diff = cmp_btf_name_size(key, val);
+
+ if (diff == 0)
+ ret = val;
+ /* even if found, keep searching for leftmost match */
+ if (diff <= 0)
+ high = mid - 1;
+ else
+ low = mid + 1;
+ }
+ return ret;
+}
+
+/* If a member of a split BTF struct/union refers to a base BTF
+ * struct/union, mark that struct/union id temporarily in the id_map
+ * with BTF_IS_EMBEDDED. Members can be const/restrict/volatile/typedef
+ * reference types, but if a pointer is encountered, the type is no longer
+ * considered embedded.
+ */
+static int btf_mark_embedded_composite_type_ids(struct btf_relocate *r, __u32 i)
+{
+ struct btf_type *t = btf_type_by_id(r->btf, i);
+ struct btf_field_iter it;
+ __u32 *id;
+ int err;
+
+ if (!btf_is_composite(t))
+ return 0;
+
+ err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
+ if (err)
+ return err;
+
+ while ((id = btf_field_iter_next(&it))) {
+ __u32 next_id = *id;
+
+ while (next_id) {
+ t = btf_type_by_id(r->btf, next_id);
+ switch (btf_kind(t)) {
+ case BTF_KIND_CONST:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_TYPE_TAG:
+ next_id = t->type;
+ break;
+ case BTF_KIND_ARRAY: {
+ struct btf_array *a = btf_array(t);
+
+ next_id = a->type;
+ break;
+ }
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ if (next_id < r->nr_dist_base_types)
+ r->id_map[next_id] = BTF_IS_EMBEDDED;
+ next_id = 0;
+ break;
+ default:
+ next_id = 0;
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* Build a map from distilled base BTF ids to base BTF ids. To do so, iterate
+ * through base BTF looking up distilled type (using binary search) equivalents.
+ */
+static int btf_relocate_map_distilled_base(struct btf_relocate *r)
+{
+ struct btf_name_info *info, *info_end;
+ struct btf_type *base_t, *dist_t;
+ __u8 *base_name_cnt = NULL;
+ int err = 0;
+ __u32 id;
+
+ /* generate a sort index array of name/type ids sorted by name for
+ * distilled base BTF to speed name-based lookups.
+ */
+ info = calloc(r->nr_dist_base_types, sizeof(*info));
+ if (!info) {
+ err = -ENOMEM;
+ goto done;
+ }
+ info_end = info + r->nr_dist_base_types;
+ for (id = 0; id < r->nr_dist_base_types; id++) {
+ dist_t = btf_type_by_id(r->dist_base_btf, id);
+ info[id].name = btf__name_by_offset(r->dist_base_btf, dist_t->name_off);
+ info[id].id = id;
+ info[id].size = dist_t->size;
+ info[id].needs_size = true;
+ }
+ qsort(info, r->nr_dist_base_types, sizeof(*info), cmp_btf_name_size);
+
+ /* Mark distilled base struct/union members of split BTF structs/unions
+ * in id_map with BTF_IS_EMBEDDED; this signals that these types
+ * need to match both name and size, otherwise embedding the base
+ * struct/union in the split type is invalid.
+ */
+ for (id = r->nr_dist_base_types; id < r->nr_split_types; id++) {
+ err = btf_mark_embedded_composite_type_ids(r, id);
+ if (err)
+ goto done;
+ }
+
+ /* Collect name counts for composite types in base BTF. If multiple
+ * instances of a struct/union of the same name exist, we need to use
+ * size to determine which to map to since name alone is ambiguous.
+ */
+ base_name_cnt = calloc(r->base_str_len, sizeof(*base_name_cnt));
+ if (!base_name_cnt) {
+ err = -ENOMEM;
+ goto done;
+ }
+ for (id = 1; id < r->nr_base_types; id++) {
+ base_t = btf_type_by_id(r->base_btf, id);
+ if (!btf_is_composite(base_t) || !base_t->name_off)
+ continue;
+ if (base_name_cnt[base_t->name_off] < 255)
+ base_name_cnt[base_t->name_off]++;
+ }
+
+ /* Now search base BTF for matching distilled base BTF types. */
+ for (id = 1; id < r->nr_base_types; id++) {
+ struct btf_name_info *dist_info, base_info = {};
+ int dist_kind, base_kind;
+
+ base_t = btf_type_by_id(r->base_btf, id);
+ /* distilled base consists of named types only. */
+ if (!base_t->name_off)
+ continue;
+ base_kind = btf_kind(base_t);
+ base_info.id = id;
+ base_info.name = btf__name_by_offset(r->base_btf, base_t->name_off);
+ switch (base_kind) {
+ case BTF_KIND_INT:
+ case BTF_KIND_FLOAT:
+ case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
+ /* These types should match both name and size */
+ base_info.needs_size = true;
+ base_info.size = base_t->size;
+ break;
+ case BTF_KIND_FWD:
+ /* No size considerations for fwds. */
+ break;
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ /* Size only needs to be used for struct/union if there
+ * are multiple types in base BTF with the same name.
+ * If there are multiple _distilled_ types with the same
+ * name (a very unlikely scenario), that doesn't matter
+ * unless corresponding _base_ types to match them are
+ * missing.
+ */
+ base_info.needs_size = base_name_cnt[base_t->name_off] > 1;
+ base_info.size = base_t->size;
+ break;
+ default:
+ continue;
+ }
+ /* iterate over all matching distilled base types */
+ for (dist_info = search_btf_name_size(&base_info, info, r->nr_dist_base_types);
+ dist_info != NULL && dist_info < info_end &&
+ cmp_btf_name_size(&base_info, dist_info) == 0;
+ dist_info++) {
+ if (!dist_info->id || dist_info->id >= r->nr_dist_base_types) {
+ pr_warn("base BTF id [%d] maps to invalid distilled base BTF id [%d]\n",
+ id, dist_info->id);
+ err = -EINVAL;
+ goto done;
+ }
+ dist_t = btf_type_by_id(r->dist_base_btf, dist_info->id);
+ dist_kind = btf_kind(dist_t);
+
+ /* Validate that the found distilled type is compatible.
+ * Do not error out on mismatch as another match may
+ * occur for an identically-named type.
+ */
+ switch (dist_kind) {
+ case BTF_KIND_FWD:
+ switch (base_kind) {
+ case BTF_KIND_FWD:
+ if (btf_kflag(dist_t) != btf_kflag(base_t))
+ continue;
+ break;
+ case BTF_KIND_STRUCT:
+ if (btf_kflag(base_t))
+ continue;
+ break;
+ case BTF_KIND_UNION:
+ if (!btf_kflag(base_t))
+ continue;
+ break;
+ default:
+ continue;
+ }
+ break;
+ case BTF_KIND_INT:
+ if (dist_kind != base_kind ||
+ btf_int_encoding(base_t) != btf_int_encoding(dist_t))
+ continue;
+ break;
+ case BTF_KIND_FLOAT:
+ if (dist_kind != base_kind)
+ continue;
+ break;
+ case BTF_KIND_ENUM:
+ /* ENUM and ENUM64 are encoded as sized ENUM in
+ * distilled base BTF.
+ */
+ if (base_kind != dist_kind && base_kind != BTF_KIND_ENUM64)
+ continue;
+ break;
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ /* size verification is required for embedded
+ * struct/unions.
+ */
+ if (r->id_map[dist_info->id] == BTF_IS_EMBEDDED &&
+ base_t->size != dist_t->size)
+ continue;
+ break;
+ default:
+ continue;
+ }
+ if (r->id_map[dist_info->id] &&
+ r->id_map[dist_info->id] != BTF_IS_EMBEDDED) {
+ /* we already have a match; this tells us that
+ * multiple base types of the same name
+ * have the same size, since for cases where
+ * multiple types have the same name we match
+ * on name and size. In this case, we have
+ * no way of determining which to relocate
+ * to in base BTF, so error out.
+ */
+ pr_warn("distilled base BTF type '%s' [%u], size %u has multiple candidates of the same size (ids [%u, %u]) in base BTF\n",
+ base_info.name, dist_info->id,
+ base_t->size, id, r->id_map[dist_info->id]);
+ err = -EINVAL;
+ goto done;
+ }
+ /* map id and name */
+ r->id_map[dist_info->id] = id;
+ r->str_map[dist_t->name_off] = base_t->name_off;
+ }
+ }
+ /* ensure all distilled BTF ids now have a mapping... */
+ for (id = 1; id < r->nr_dist_base_types; id++) {
+ const char *name;
+
+ if (r->id_map[id] && r->id_map[id] != BTF_IS_EMBEDDED)
+ continue;
+ dist_t = btf_type_by_id(r->dist_base_btf, id);
+ name = btf__name_by_offset(r->dist_base_btf, dist_t->name_off);
+ pr_warn("distilled base BTF type '%s' [%d] is not mapped to base BTF id\n",
+ name, id);
+ err = -EINVAL;
+ break;
+ }
+done:
+ free(base_name_cnt);
+ free(info);
+ return err;
+}
+
+/* distilled base should only have named int/float/enum/fwd/struct/union types. */
+static int btf_relocate_validate_distilled_base(struct btf_relocate *r)
+{
+ unsigned int i;
+
+ for (i = 1; i < r->nr_dist_base_types; i++) {
+ struct btf_type *t = btf_type_by_id(r->dist_base_btf, i);
+ int kind = btf_kind(t);
+
+ switch (kind) {
+ case BTF_KIND_INT:
+ case BTF_KIND_FLOAT:
+ case BTF_KIND_ENUM:
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ case BTF_KIND_FWD:
+ if (t->name_off)
+ break;
+ pr_warn("type [%d], kind [%d] is invalid for distilled base BTF; it is anonymous\n",
+ i, kind);
+ return -EINVAL;
+ default:
+ pr_warn("type [%d] in distilled based BTF has unexpected kind [%d]\n",
+ i, kind);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int btf_relocate_rewrite_strs(struct btf_relocate *r, __u32 i)
+{
+ struct btf_type *t = btf_type_by_id(r->btf, i);
+ struct btf_field_iter it;
+ __u32 *str_off;
+ int off, err;
+
+ err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
+ if (err)
+ return err;
+
+ while ((str_off = btf_field_iter_next(&it))) {
+ if (!*str_off)
+ continue;
+ if (*str_off >= r->dist_str_len) {
+ *str_off += r->base_str_len - r->dist_str_len;
+ } else {
+ off = r->str_map[*str_off];
+ if (!off) {
+ pr_warn("string '%s' [offset %u] is not mapped to base BTF",
+ btf__str_by_offset(r->btf, off), *str_off);
+ return -ENOENT;
+ }
+ *str_off = off;
+ }
+ }
+ return 0;
+}
+
+/* If successful, output of relocation is updated BTF with base BTF pointing
+ * at base_btf, and type ids, strings adjusted accordingly.
+ */
+int btf_relocate(struct btf *btf, const struct btf *base_btf, __u32 **id_map)
+{
+ unsigned int nr_types = btf__type_cnt(btf);
+ const struct btf_header *dist_base_hdr;
+ const struct btf_header *base_hdr;
+ struct btf_relocate r = {};
+ int err = 0;
+ __u32 id, i;
+
+ r.dist_base_btf = btf__base_btf(btf);
+ if (!base_btf || r.dist_base_btf == base_btf)
+ return -EINVAL;
+
+ r.nr_dist_base_types = btf__type_cnt(r.dist_base_btf);
+ r.nr_base_types = btf__type_cnt(base_btf);
+ r.nr_split_types = nr_types - r.nr_dist_base_types;
+ r.btf = btf;
+ r.base_btf = base_btf;
+
+ r.id_map = calloc(nr_types, sizeof(*r.id_map));
+ r.str_map = calloc(btf_header(r.dist_base_btf)->str_len, sizeof(*r.str_map));
+ dist_base_hdr = btf_header(r.dist_base_btf);
+ base_hdr = btf_header(r.base_btf);
+ r.dist_str_len = dist_base_hdr->str_len;
+ r.base_str_len = base_hdr->str_len;
+ if (!r.id_map || !r.str_map) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ err = btf_relocate_validate_distilled_base(&r);
+ if (err)
+ goto err_out;
+
+ /* Split BTF ids need to be adjusted as base and distilled base
+ * have different numbers of types, changing the start id of split
+ * BTF.
+ */
+ for (id = r.nr_dist_base_types; id < nr_types; id++)
+ r.id_map[id] = id + r.nr_base_types - r.nr_dist_base_types;
+
+ /* Build a map from distilled base ids to actual base BTF ids; it is used
+ * to update split BTF id references. Also build a str_map mapping from
+ * distilled base BTF names to base BTF names.
+ */
+ err = btf_relocate_map_distilled_base(&r);
+ if (err)
+ goto err_out;
+
+ /* Next, rewrite type ids in split BTF, replacing split ids with updated
+ * ids based on number of types in base BTF, and base ids with
+ * relocated ids from base_btf.
+ */
+ for (i = 0, id = r.nr_dist_base_types; i < r.nr_split_types; i++, id++) {
+ err = btf_relocate_rewrite_type_id(&r, id);
+ if (err)
+ goto err_out;
+ }
+ /* String offsets now need to be updated using the str_map. */
+ for (i = 0; i < r.nr_split_types; i++) {
+ err = btf_relocate_rewrite_strs(&r, i + r.nr_dist_base_types);
+ if (err)
+ goto err_out;
+ }
+ /* Finally reset base BTF to be base_btf */
+ btf_set_base_btf(btf, base_btf);
+
+ if (id_map) {
+ *id_map = r.id_map;
+ r.id_map = NULL;
+ }
+err_out:
+ free(r.id_map);
+ free(r.str_map);
+ return err;
+}
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 5401f2df463d..a3be6f8fac09 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -229,7 +229,30 @@ static const char * const prog_type_name[] = {
static int __base_pr(enum libbpf_print_level level, const char *format,
va_list args)
{
- if (level == LIBBPF_DEBUG)
+ const char *env_var = "LIBBPF_LOG_LEVEL";
+ static enum libbpf_print_level min_level = LIBBPF_INFO;
+ static bool initialized;
+
+ if (!initialized) {
+ char *verbosity;
+
+ initialized = true;
+ verbosity = getenv(env_var);
+ if (verbosity) {
+ if (strcasecmp(verbosity, "warn") == 0)
+ min_level = LIBBPF_WARN;
+ else if (strcasecmp(verbosity, "debug") == 0)
+ min_level = LIBBPF_DEBUG;
+ else if (strcasecmp(verbosity, "info") == 0)
+ min_level = LIBBPF_INFO;
+ else
+ fprintf(stderr, "libbpf: unrecognized '%s' envvar value: '%s', should be one of 'warn', 'debug', or 'info'.\n",
+ env_var, verbosity);
+ }
+ }
+
+ /* if too verbose, skip logging */
+ if (level > min_level)
return 0;
return vfprintf(stderr, format, args);
@@ -549,6 +572,7 @@ struct bpf_map {
bool pinned;
bool reused;
bool autocreate;
+ bool autoattach;
__u64 map_extra;
};
@@ -1377,6 +1401,7 @@ static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name,
map->def.value_size = type->size;
map->def.max_entries = 1;
map->def.map_flags = strcmp(sec_name, STRUCT_OPS_LINK_SEC) == 0 ? BPF_F_LINK : 0;
+ map->autoattach = true;
map->st_ops = calloc(1, sizeof(*map->st_ops));
if (!map->st_ops)
@@ -4796,6 +4821,20 @@ int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate)
return 0;
}
+int bpf_map__set_autoattach(struct bpf_map *map, bool autoattach)
+{
+ if (!bpf_map__is_struct_ops(map))
+ return libbpf_err(-EINVAL);
+
+ map->autoattach = autoattach;
+ return 0;
+}
+
+bool bpf_map__autoattach(const struct bpf_map *map)
+{
+ return map->autoattach;
+}
+
int bpf_map__reuse_fd(struct bpf_map *map, int fd)
{
struct bpf_map_info info;
@@ -10336,7 +10375,7 @@ __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
struct bpf_map *
bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
{
- if (prev == NULL)
+ if (prev == NULL && obj != NULL)
return obj->maps;
return __bpf_map__iter(prev, obj, 1);
@@ -10345,7 +10384,7 @@ bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
struct bpf_map *
bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next)
{
- if (next == NULL) {
+ if (next == NULL && obj != NULL) {
if (!obj->nr_maps)
return NULL;
return obj->maps + obj->nr_maps - 1;
@@ -12877,8 +12916,10 @@ struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map)
__u32 zero = 0;
int err, fd;
- if (!bpf_map__is_struct_ops(map))
+ if (!bpf_map__is_struct_ops(map)) {
+ pr_warn("map '%s': can't attach non-struct_ops map\n", map->name);
return libbpf_err_ptr(-EINVAL);
+ }
if (map->fd < 0) {
pr_warn("map '%s': can't attach BPF map without FD (was it created?)\n", map->name);
@@ -13671,14 +13712,15 @@ int libbpf_num_possible_cpus(void)
static int populate_skeleton_maps(const struct bpf_object *obj,
struct bpf_map_skeleton *maps,
- size_t map_cnt)
+ size_t map_cnt, size_t map_skel_sz)
{
int i;
for (i = 0; i < map_cnt; i++) {
- struct bpf_map **map = maps[i].map;
- const char *name = maps[i].name;
- void **mmaped = maps[i].mmaped;
+ struct bpf_map_skeleton *map_skel = (void *)maps + i * map_skel_sz;
+ struct bpf_map **map = map_skel->map;
+ const char *name = map_skel->name;
+ void **mmaped = map_skel->mmaped;
*map = bpf_object__find_map_by_name(obj, name);
if (!*map) {
@@ -13695,13 +13737,14 @@ static int populate_skeleton_maps(const struct bpf_object *obj,
static int populate_skeleton_progs(const struct bpf_object *obj,
struct bpf_prog_skeleton *progs,
- size_t prog_cnt)
+ size_t prog_cnt, size_t prog_skel_sz)
{
int i;
for (i = 0; i < prog_cnt; i++) {
- struct bpf_program **prog = progs[i].prog;
- const char *name = progs[i].name;
+ struct bpf_prog_skeleton *prog_skel = (void *)progs + i * prog_skel_sz;
+ struct bpf_program **prog = prog_skel->prog;
+ const char *name = prog_skel->name;
*prog = bpf_object__find_program_by_name(obj, name);
if (!*prog) {
@@ -13742,13 +13785,13 @@ int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
}
*s->obj = obj;
- err = populate_skeleton_maps(obj, s->maps, s->map_cnt);
+ err = populate_skeleton_maps(obj, s->maps, s->map_cnt, s->map_skel_sz);
if (err) {
pr_warn("failed to populate skeleton maps for '%s': %d\n", s->name, err);
return libbpf_err(err);
}
- err = populate_skeleton_progs(obj, s->progs, s->prog_cnt);
+ err = populate_skeleton_progs(obj, s->progs, s->prog_cnt, s->prog_skel_sz);
if (err) {
pr_warn("failed to populate skeleton progs for '%s': %d\n", s->name, err);
return libbpf_err(err);
@@ -13778,20 +13821,20 @@ int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s)
return libbpf_err(-errno);
}
- err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt);
+ err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt, s->map_skel_sz);
if (err) {
pr_warn("failed to populate subskeleton maps: %d\n", err);
return libbpf_err(err);
}
- err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt);
+ err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt, s->prog_skel_sz);
if (err) {
pr_warn("failed to populate subskeleton maps: %d\n", err);
return libbpf_err(err);
}
for (var_idx = 0; var_idx < s->var_cnt; var_idx++) {
- var_skel = &s->vars[var_idx];
+ var_skel = (void *)s->vars + var_idx * s->var_skel_sz;
map = *var_skel->map;
map_type_id = bpf_map__btf_value_type_id(map);
map_type = btf__type_by_id(btf, map_type_id);
@@ -13838,10 +13881,11 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
}
for (i = 0; i < s->map_cnt; i++) {
- struct bpf_map *map = *s->maps[i].map;
+ struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz;
+ struct bpf_map *map = *map_skel->map;
size_t mmap_sz = bpf_map_mmap_sz(map);
int prot, map_fd = map->fd;
- void **mmaped = s->maps[i].mmaped;
+ void **mmaped = map_skel->mmaped;
if (!mmaped)
continue;
@@ -13889,8 +13933,9 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
int i, err;
for (i = 0; i < s->prog_cnt; i++) {
- struct bpf_program *prog = *s->progs[i].prog;
- struct bpf_link **link = s->progs[i].link;
+ struct bpf_prog_skeleton *prog_skel = (void *)s->progs + i * s->prog_skel_sz;
+ struct bpf_program *prog = *prog_skel->prog;
+ struct bpf_link **link = prog_skel->link;
if (!prog->autoload || !prog->autoattach)
continue;
@@ -13922,6 +13967,38 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
*/
}
+
+ for (i = 0; i < s->map_cnt; i++) {
+ struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz;
+ struct bpf_map *map = *map_skel->map;
+ struct bpf_link **link;
+
+ if (!map->autocreate || !map->autoattach)
+ continue;
+
+ /* only struct_ops maps can be attached */
+ if (!bpf_map__is_struct_ops(map))
+ continue;
+
+ /* skeleton is created with earlier version of bpftool, notify user */
+ if (s->map_skel_sz < offsetofend(struct bpf_map_skeleton, link)) {
+ pr_warn("map '%s': BPF skeleton version is old, skipping map auto-attachment...\n",
+ bpf_map__name(map));
+ continue;
+ }
+
+ link = map_skel->link;
+ if (*link)
+ continue;
+
+ *link = bpf_map__attach_struct_ops(map);
+ if (!*link) {
+ err = -errno;
+ pr_warn("map '%s': failed to auto-attach: %d\n", bpf_map__name(map), err);
+ return libbpf_err(err);
+ }
+ }
+
return 0;
}
@@ -13930,11 +14007,25 @@ void bpf_object__detach_skeleton(struct bpf_object_skeleton *s)
int i;
for (i = 0; i < s->prog_cnt; i++) {
- struct bpf_link **link = s->progs[i].link;
+ struct bpf_prog_skeleton *prog_skel = (void *)s->progs + i * s->prog_skel_sz;
+ struct bpf_link **link = prog_skel->link;
bpf_link__destroy(*link);
*link = NULL;
}
+
+ if (s->map_skel_sz < sizeof(struct bpf_map_skeleton))
+ return;
+
+ for (i = 0; i < s->map_cnt; i++) {
+ struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz;
+ struct bpf_link **link = map_skel->link;
+
+ if (link) {
+ bpf_link__destroy(*link);
+ *link = NULL;
+ }
+ }
}
void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
@@ -13942,8 +14033,7 @@ void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
if (!s)
return;
- if (s->progs)
- bpf_object__detach_skeleton(s);
+ bpf_object__detach_skeleton(s);
if (s->obj)
bpf_object__close(*s->obj);
free(s->maps);
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index c3f77d9260fe..64a6a3d323e3 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -98,7 +98,10 @@ typedef int (*libbpf_print_fn_t)(enum libbpf_print_level level,
/**
* @brief **libbpf_set_print()** sets user-provided log callback function to
- * be used for libbpf warnings and informational messages.
+ * be used for libbpf warnings and informational messages. If the user callback
+ * is not set, messages are logged to stderr by default. The verbosity of these
+ * messages can be controlled by setting the environment variable
+ * LIBBPF_LOG_LEVEL to either warn, info, or debug.
* @param fn The log print function. If NULL, libbpf won't print anything.
* @return Pointer to old print function.
*
@@ -976,6 +979,23 @@ LIBBPF_API int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate);
LIBBPF_API bool bpf_map__autocreate(const struct bpf_map *map);
/**
+ * @brief **bpf_map__set_autoattach()** sets whether libbpf has to auto-attach
+ * map during BPF skeleton attach phase.
+ * @param map the BPF map instance
+ * @param autoattach whether to attach map during BPF skeleton attach phase
+ * @return 0 on success; negative error code, otherwise
+ */
+LIBBPF_API int bpf_map__set_autoattach(struct bpf_map *map, bool autoattach);
+
+/**
+ * @brief **bpf_map__autoattach()** returns whether BPF map is configured to
+ * auto-attach during BPF skeleton attach phase.
+ * @param map the BPF map instance
+ * @return true if map is set to auto-attach during skeleton attach phase; false, otherwise
+ */
+LIBBPF_API bool bpf_map__autoattach(const struct bpf_map *map);
+
+/**
* @brief **bpf_map__fd()** gets the file descriptor of the passed
* BPF map
* @param map the BPF map instance
@@ -1669,6 +1689,7 @@ struct bpf_map_skeleton {
const char *name;
struct bpf_map **map;
void **mmaped;
+ struct bpf_link **link;
};
struct bpf_prog_skeleton {
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index c1ce8aa3520b..8f0d9ea3b1b4 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -419,6 +419,10 @@ LIBBPF_1.4.0 {
LIBBPF_1.5.0 {
global:
+ btf__distill_base;
+ btf__relocate;
+ bpf_map__autoattach;
+ bpf_map__set_autoattach;
bpf_program__attach_sockmap;
ring__consume_n;
ring_buffer__consume_n;
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
index a0dcfb82e455..408df59e0771 100644
--- a/tools/lib/bpf/libbpf_internal.h
+++ b/tools/lib/bpf/libbpf_internal.h
@@ -234,6 +234,9 @@ struct btf_type;
struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id);
const char *btf_kind_str(const struct btf_type *t);
const struct btf_type *skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id);
+const struct btf_header *btf_header(const struct btf *btf);
+void btf_set_base_btf(struct btf *btf, const struct btf *base_btf);
+int btf_relocate(struct btf *btf, const struct btf *base_btf, __u32 **id_map);
static inline enum btf_func_linkage btf_func_linkage(const struct btf_type *t)
{
@@ -508,11 +511,33 @@ struct bpf_line_info_min {
__u32 line_col;
};
+enum btf_field_iter_kind {
+ BTF_FIELD_ITER_IDS,
+ BTF_FIELD_ITER_STRS,
+};
+
+struct btf_field_desc {
+ /* once-per-type offsets */
+ int t_off_cnt, t_offs[2];
+ /* member struct size, or zero, if no members */
+ int m_sz;
+ /* repeated per-member offsets */
+ int m_off_cnt, m_offs[1];
+};
+
+struct btf_field_iter {
+ struct btf_field_desc desc;
+ void *p;
+ int m_idx;
+ int off_idx;
+ int vlen;
+};
+
+int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t, enum btf_field_iter_kind iter_kind);
+__u32 *btf_field_iter_next(struct btf_field_iter *it);
typedef int (*type_id_visit_fn)(__u32 *type_id, void *ctx);
typedef int (*str_off_visit_fn)(__u32 *str_off, void *ctx);
-int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx);
-int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx);
int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx);
int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx);
__s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,
@@ -597,13 +622,9 @@ static inline int ensure_good_fd(int fd)
return fd;
}
-static inline int sys_dup2(int oldfd, int newfd)
+static inline int sys_dup3(int oldfd, int newfd, int flags)
{
-#ifdef __NR_dup2
- return syscall(__NR_dup2, oldfd, newfd);
-#else
- return syscall(__NR_dup3, oldfd, newfd, 0);
-#endif
+ return syscall(__NR_dup3, oldfd, newfd, flags);
}
/* Point *fixed_fd* to the same file that *tmp_fd* points to.
@@ -614,7 +635,7 @@ static inline int reuse_fd(int fixed_fd, int tmp_fd)
{
int err;
- err = sys_dup2(tmp_fd, fixed_fd);
+ err = sys_dup3(tmp_fd, fixed_fd, O_CLOEXEC);
err = err < 0 ? -errno : 0;
close(tmp_fd); /* clean up temporary FD */
return err;
diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c
index 0d4be829551b..9cd3d4109788 100644
--- a/tools/lib/bpf/linker.c
+++ b/tools/lib/bpf/linker.c
@@ -957,19 +957,33 @@ static int check_btf_str_off(__u32 *str_off, void *ctx)
static int linker_sanity_check_btf(struct src_obj *obj)
{
struct btf_type *t;
- int i, n, err = 0;
+ int i, n, err;
if (!obj->btf)
return 0;
n = btf__type_cnt(obj->btf);
for (i = 1; i < n; i++) {
+ struct btf_field_iter it;
+ __u32 *type_id, *str_off;
+
t = btf_type_by_id(obj->btf, i);
- err = err ?: btf_type_visit_type_ids(t, check_btf_type_id, obj->btf);
- err = err ?: btf_type_visit_str_offs(t, check_btf_str_off, obj->btf);
+ err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
+ if (err)
+ return err;
+ while ((type_id = btf_field_iter_next(&it))) {
+ if (*type_id >= n)
+ return -EINVAL;
+ }
+
+ err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
if (err)
return err;
+ while ((str_off = btf_field_iter_next(&it))) {
+ if (!btf__str_by_offset(obj->btf, *str_off))
+ return -EINVAL;
+ }
}
return 0;
@@ -2213,10 +2227,17 @@ static int linker_fixup_btf(struct src_obj *obj)
vi = btf_var_secinfos(t);
for (j = 0, m = btf_vlen(t); j < m; j++, vi++) {
const struct btf_type *vt = btf__type_by_id(obj->btf, vi->type);
- const char *var_name = btf__str_by_offset(obj->btf, vt->name_off);
- int var_linkage = btf_var(vt)->linkage;
+ const char *var_name;
+ int var_linkage;
Elf64_Sym *sym;
+ /* could be a variable or function */
+ if (!btf_is_var(vt))
+ continue;
+
+ var_name = btf__str_by_offset(obj->btf, vt->name_off);
+ var_linkage = btf_var(vt)->linkage;
+
/* no need to patch up static or extern vars */
if (var_linkage != BTF_VAR_GLOBAL_ALLOCATED)
continue;
@@ -2234,26 +2255,10 @@ static int linker_fixup_btf(struct src_obj *obj)
return 0;
}
-static int remap_type_id(__u32 *type_id, void *ctx)
-{
- int *id_map = ctx;
- int new_id = id_map[*type_id];
-
- /* Error out if the type wasn't remapped. Ignore VOID which stays VOID. */
- if (new_id == 0 && *type_id != 0) {
- pr_warn("failed to find new ID mapping for original BTF type ID %u\n", *type_id);
- return -EINVAL;
- }
-
- *type_id = id_map[*type_id];
-
- return 0;
-}
-
static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj)
{
const struct btf_type *t;
- int i, j, n, start_id, id;
+ int i, j, n, start_id, id, err;
const char *name;
if (!obj->btf)
@@ -2324,9 +2329,25 @@ static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj)
n = btf__type_cnt(linker->btf);
for (i = start_id; i < n; i++) {
struct btf_type *dst_t = btf_type_by_id(linker->btf, i);
+ struct btf_field_iter it;
+ __u32 *type_id;
- if (btf_type_visit_type_ids(dst_t, remap_type_id, obj->btf_type_map))
- return -EINVAL;
+ err = btf_field_iter_init(&it, dst_t, BTF_FIELD_ITER_IDS);
+ if (err)
+ return err;
+
+ while ((type_id = btf_field_iter_next(&it))) {
+ int new_id = obj->btf_type_map[*type_id];
+
+ /* Error out if the type wasn't remapped. Ignore VOID which stays VOID. */
+ if (new_id == 0 && *type_id != 0) {
+ pr_warn("failed to find new ID mapping for original BTF type ID %u\n",
+ *type_id);
+ return -EINVAL;
+ }
+
+ *type_id = obj->btf_type_map[*type_id];
+ }
}
/* Rewrite VAR/FUNC underlying types (i.e., FUNC's FUNC_PROTO and VAR's
diff --git a/tools/lib/list_sort.c b/tools/lib/list_sort.c
index 10c067e3a8d2..69affa251fa7 100644
--- a/tools/lib/list_sort.c
+++ b/tools/lib/list_sort.c
@@ -52,7 +52,6 @@ static void merge_final(void *priv, list_cmp_func_t cmp, struct list_head *head,
struct list_head *a, struct list_head *b)
{
struct list_head *tail = head;
- u8 count = 0;
for (;;) {
/* if equal, take 'a' -- important for sort stability */
@@ -78,15 +77,6 @@ static void merge_final(void *priv, list_cmp_func_t cmp, struct list_head *head,
/* Finish linking remainder of list b on to tail */
tail->next = b;
do {
- /*
- * If the merge is highly unbalanced (e.g. the input is
- * already sorted), this loop may run many iterations.
- * Continue callbacks to the client even though no
- * element comparison is needed, so the client's cmp()
- * routine can invoke cond_resched() periodically.
- */
- if (unlikely(!++count))
- cmp(priv, b, b);
b->prev = tail;
tail = b;
b = b->next;
diff --git a/tools/lib/perf/include/perf/event.h b/tools/lib/perf/include/perf/event.h
index ae64090184d3..37bb7771d914 100644
--- a/tools/lib/perf/include/perf/event.h
+++ b/tools/lib/perf/include/perf/event.h
@@ -77,6 +77,12 @@ struct perf_record_lost_samples {
__u64 lost;
};
+#define MAX_ID_HDR_ENTRIES 6
+struct perf_record_lost_samples_and_ids {
+ struct perf_record_lost_samples lost;
+ __u64 sample_ids[MAX_ID_HDR_ENTRIES];
+};
+
/*
* PERF_FORMAT_ENABLED | PERF_FORMAT_RUNNING | PERF_FORMAT_ID | PERF_FORMAT_LOST
*/
diff --git a/tools/memory-model/Documentation/README b/tools/memory-model/Documentation/README
index db90a26dbdf4..304162743a5b 100644
--- a/tools/memory-model/Documentation/README
+++ b/tools/memory-model/Documentation/README
@@ -47,6 +47,10 @@ DESCRIPTION OF FILES
README
This file.
+access-marking.txt
+ Guidelines for marking intentionally concurrent accesses to
+ shared memory.
+
cheatsheet.txt
Quick-reference guide to the Linux-kernel memory model.
diff --git a/tools/memory-model/Documentation/access-marking.txt b/tools/memory-model/Documentation/access-marking.txt
index 65778222183e..3fbe77fd564a 100644
--- a/tools/memory-model/Documentation/access-marking.txt
+++ b/tools/memory-model/Documentation/access-marking.txt
@@ -6,7 +6,8 @@ normal accesses to shared memory, that is "normal" as in accesses that do
not use read-modify-write atomic operations. It also describes how to
document these accesses, both with comments and with special assertions
processed by the Kernel Concurrency Sanitizer (KCSAN). This discussion
-builds on an earlier LWN article [1].
+builds on an earlier LWN article [1] and Linux Foundation mentorship
+session [2].
ACCESS-MARKING OPTIONS
@@ -24,6 +25,11 @@ The Linux kernel provides the following access-marking options:
4. WRITE_ONCE(), for example, "WRITE_ONCE(a, b);"
The various forms of atomic_set() also fit in here.
+5. __data_racy, for example "int __data_racy a;"
+
+6. KCSAN's negative-marking assertions, ASSERT_EXCLUSIVE_ACCESS()
+ and ASSERT_EXCLUSIVE_WRITER(), are described in the
+ "ACCESS-DOCUMENTATION OPTIONS" section below.
These may be used in combination, as shown in this admittedly improbable
example:
@@ -31,7 +37,7 @@ example:
WRITE_ONCE(a, b + data_race(c + d) + READ_ONCE(e));
Neither plain C-language accesses nor data_race() (#1 and #2 above) place
-any sort of constraint on the compiler's choice of optimizations [2].
+any sort of constraint on the compiler's choice of optimizations [3].
In contrast, READ_ONCE() and WRITE_ONCE() (#3 and #4 above) restrict the
compiler's use of code-motion and common-subexpression optimizations.
Therefore, if a given access is involved in an intentional data race,
@@ -205,6 +211,23 @@ because doing otherwise prevents KCSAN from detecting violations of your
code's synchronization rules.
+Use of __data_racy
+------------------
+
+Adding the __data_racy type qualifier to the declaration of a variable
+causes KCSAN to treat all accesses to that variable as if they were
+enclosed by data_race(). However, __data_racy does not affect the
+compiler, though one could imagine hardened kernel builds treating the
+__data_racy type qualifier as if it was the volatile keyword.
+
+Note well that __data_racy is subject to the same pointer-declaration
+rules as are other type qualifiers such as const and volatile.
+For example:
+
+ int __data_racy *p; // Pointer to data-racy data.
+ int *__data_racy p; // Data-racy pointer to non-data-racy data.
+
+
ACCESS-DOCUMENTATION OPTIONS
============================
@@ -342,7 +365,7 @@ as follows:
Because foo is read locklessly, all accesses are marked. The purpose
of the ASSERT_EXCLUSIVE_WRITER() is to allow KCSAN to check for a buggy
-concurrent lockless write.
+concurrent write, whether marked or not.
Lock-Protected Writes With Heuristic Lockless Reads
@@ -594,5 +617,8 @@ REFERENCES
[1] "Concurrency bugs should fear the big bad data-race detector (part 2)"
https://lwn.net/Articles/816854/
-[2] "Who's afraid of a big bad optimizing compiler?"
+[2] "The Kernel Concurrency Sanitizer"
+ https://www.linuxfoundation.org/webinars/the-kernel-concurrency-sanitizer
+
+[3] "Who's afraid of a big bad optimizing compiler?"
https://lwn.net/Articles/793253/
diff --git a/tools/memory-model/lock.cat b/tools/memory-model/lock.cat
index 53b5a492739d..03c12efed66a 100644
--- a/tools/memory-model/lock.cat
+++ b/tools/memory-model/lock.cat
@@ -54,6 +54,12 @@ flag ~empty LKR \ domain(lk-rmw) as unpaired-LKR
*)
empty ([LKW] ; po-loc ; [LKR]) \ (po-loc ; [UL] ; po-loc) as lock-nest
+(*
+ * In the same way, spin_is_locked() inside a critical section must always
+ * return True (no RU events can be in a critical section for the same lock).
+ *)
+empty ([LKW] ; po-loc ; [RU]) \ (po-loc ; [UL] ; po-loc) as nested-is-locked
+
(* The final value of a spinlock should not be tested *)
flag ~empty [FW] ; loc ; [ALL-LOCKS] as lock-final
@@ -79,42 +85,50 @@ empty ([UNMATCHED-LKW] ; loc ; [UNMATCHED-LKW]) \ id as unmatched-locks
(* rfi for LF events: link each LKW to the LF events in its critical section *)
let rfi-lf = ([LKW] ; po-loc ; [LF]) \ ([LKW] ; po-loc ; [UL] ; po-loc)
-(* rfe for LF events *)
+(* Utility macro to convert a single pair to a single-edge relation *)
+let pair-to-relation p = p ++ 0
+
+(*
+ * If a given LF event e is outside a critical section, it cannot read
+ * internally but it may read from an LKW event in another thread.
+ * Compute the relation containing these possible edges.
+ *)
+let possible-rfe-noncrit-lf e = (LKW * {e}) & loc & ext
+
+(* Compute set of sets of possible rfe edges for LF events *)
let all-possible-rfe-lf =
(*
- * Given an LF event r, compute the possible rfe edges for that event
- * (all those starting from LKW events in other threads),
- * and then convert that relation to a set of single-edge relations.
+ * Convert the possible-rfe-noncrit-lf relation for e
+ * to a set of single edges
*)
- let possible-rfe-lf r =
- let pair-to-relation p = p ++ 0
- in map pair-to-relation ((LKW * {r}) & loc & ext)
- (* Do this for each LF event r that isn't in rfi-lf *)
- in map possible-rfe-lf (LF \ range(rfi-lf))
+ let set-of-singleton-rfe-lf e =
+ map pair-to-relation (possible-rfe-noncrit-lf e)
+ (* Do this for each LF event e that isn't in rfi-lf *)
+ in map set-of-singleton-rfe-lf (LF \ range(rfi-lf))
(* Generate all rf relations for LF events *)
with rfe-lf from cross(all-possible-rfe-lf)
let rf-lf = rfe-lf | rfi-lf
(*
- * RU, i.e., spin_is_locked() returning False, is slightly different.
- * We rely on the memory model to rule out cases where spin_is_locked()
- * within one of the lock's critical sections returns False.
+ * A given RU event e may read internally from the last po-previous UL,
+ * or it may read from a UL event in another thread or the initial write.
+ * Compute the relation containing these possible edges.
*)
-
-(* rfi for RU events: an RU may read from the last po-previous UL *)
-let rfi-ru = ([UL] ; po-loc ; [RU]) \ ([UL] ; po-loc ; [LKW] ; po-loc)
-
-(* rfe for RU events: an RU may read from an external UL or the initial write *)
-let all-possible-rfe-ru =
- let possible-rfe-ru r =
- let pair-to-relation p = p ++ 0
- in map pair-to-relation (((UL | IW) * {r}) & loc & ext)
- in map possible-rfe-ru RU
+let possible-rf-ru e = (((UL * {e}) & po-loc) \
+ ([UL] ; po-loc ; [UL] ; po-loc)) |
+ (((UL | IW) * {e}) & loc & ext)
+
+(* Compute set of sets of possible rf edges for RU events *)
+let all-possible-rf-ru =
+ (* Convert the possible-rf-ru relation for e to a set of single edges *)
+ let set-of-singleton-rf-ru e =
+ map pair-to-relation (possible-rf-ru e)
+ (* Do this for each RU event e *)
+ in map set-of-singleton-rf-ru RU
(* Generate all rf relations for RU events *)
-with rfe-ru from cross(all-possible-rfe-ru)
-let rf-ru = rfe-ru | rfi-ru
+with rf-ru from cross(all-possible-rf-ru)
(* Final rf relation *)
let rf = rf | rf-lf | rf-ru
diff --git a/tools/mm/Makefile b/tools/mm/Makefile
index 7bb03606b9ea..15791c1c5b28 100644
--- a/tools/mm/Makefile
+++ b/tools/mm/Makefile
@@ -3,7 +3,7 @@
#
include ../scripts/Makefile.include
-BUILD_TARGETS=page-types slabinfo page_owner_sort
+BUILD_TARGETS=page-types slabinfo page_owner_sort thp_swap_allocator_test
INSTALL_TARGETS = $(BUILD_TARGETS) thpmaps
LIB_DIR = ../lib/api
diff --git a/tools/mm/thp_swap_allocator_test.c b/tools/mm/thp_swap_allocator_test.c
new file mode 100644
index 000000000000..83afc52275a5
--- /dev/null
+++ b/tools/mm/thp_swap_allocator_test.c
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * thp_swap_allocator_test
+ *
+ * The purpose of this test program is helping check if THP swpout
+ * can correctly get swap slots to swap out as a whole instead of
+ * being split. It randomly releases swap entries through madvise
+ * DONTNEED and swapin/out on two memory areas: a memory area for
+ * 64KB THP and the other area for small folios. The second memory
+ * can be enabled by "-s".
+ * Before running the program, we need to setup a zRAM or similar
+ * swap device by:
+ * echo lzo > /sys/block/zram0/comp_algorithm
+ * echo 64M > /sys/block/zram0/disksize
+ * echo never > /sys/kernel/mm/transparent_hugepage/hugepages-2048kB/enabled
+ * echo always > /sys/kernel/mm/transparent_hugepage/hugepages-64kB/enabled
+ * mkswap /dev/zram0
+ * swapon /dev/zram0
+ * The expected result should be 0% anon swpout fallback ratio w/ or
+ * w/o "-s".
+ *
+ * Author(s): Barry Song <v-songbaohua@oppo.com>
+ */
+
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <linux/mman.h>
+#include <sys/mman.h>
+#include <errno.h>
+#include <time.h>
+
+#define MEMSIZE_MTHP (60 * 1024 * 1024)
+#define MEMSIZE_SMALLFOLIO (4 * 1024 * 1024)
+#define ALIGNMENT_MTHP (64 * 1024)
+#define ALIGNMENT_SMALLFOLIO (4 * 1024)
+#define TOTAL_DONTNEED_MTHP (16 * 1024 * 1024)
+#define TOTAL_DONTNEED_SMALLFOLIO (1 * 1024 * 1024)
+#define MTHP_FOLIO_SIZE (64 * 1024)
+
+#define SWPOUT_PATH \
+ "/sys/kernel/mm/transparent_hugepage/hugepages-64kB/stats/swpout"
+#define SWPOUT_FALLBACK_PATH \
+ "/sys/kernel/mm/transparent_hugepage/hugepages-64kB/stats/swpout_fallback"
+
+static void *aligned_alloc_mem(size_t size, size_t alignment)
+{
+ void *mem = NULL;
+
+ if (posix_memalign(&mem, alignment, size) != 0) {
+ perror("posix_memalign");
+ return NULL;
+ }
+ return mem;
+}
+
+/*
+ * This emulates the behavior of native libc and Java heap,
+ * as well as process exit and munmap. It helps generate mTHP
+ * and ensures that iterations can proceed with mTHP, as we
+ * currently don't support large folios swap-in.
+ */
+static void random_madvise_dontneed(void *mem, size_t mem_size,
+ size_t align_size, size_t total_dontneed_size)
+{
+ size_t num_pages = total_dontneed_size / align_size;
+ size_t i;
+ size_t offset;
+ void *addr;
+
+ for (i = 0; i < num_pages; ++i) {
+ offset = (rand() % (mem_size / align_size)) * align_size;
+ addr = (char *)mem + offset;
+ if (madvise(addr, align_size, MADV_DONTNEED) != 0)
+ perror("madvise dontneed");
+
+ memset(addr, 0x11, align_size);
+ }
+}
+
+static void random_swapin(void *mem, size_t mem_size,
+ size_t align_size, size_t total_swapin_size)
+{
+ size_t num_pages = total_swapin_size / align_size;
+ size_t i;
+ size_t offset;
+ void *addr;
+
+ for (i = 0; i < num_pages; ++i) {
+ offset = (rand() % (mem_size / align_size)) * align_size;
+ addr = (char *)mem + offset;
+ memset(addr, 0x11, align_size);
+ }
+}
+
+static unsigned long read_stat(const char *path)
+{
+ FILE *file;
+ unsigned long value;
+
+ file = fopen(path, "r");
+ if (!file) {
+ perror("fopen");
+ return 0;
+ }
+
+ if (fscanf(file, "%lu", &value) != 1) {
+ perror("fscanf");
+ fclose(file);
+ return 0;
+ }
+
+ fclose(file);
+ return value;
+}
+
+int main(int argc, char *argv[])
+{
+ int use_small_folio = 0, aligned_swapin = 0;
+ void *mem1 = NULL, *mem2 = NULL;
+ int i;
+
+ for (i = 1; i < argc; ++i) {
+ if (strcmp(argv[i], "-s") == 0)
+ use_small_folio = 1;
+ else if (strcmp(argv[i], "-a") == 0)
+ aligned_swapin = 1;
+ }
+
+ mem1 = aligned_alloc_mem(MEMSIZE_MTHP, ALIGNMENT_MTHP);
+ if (mem1 == NULL) {
+ fprintf(stderr, "Failed to allocate large folios memory\n");
+ return EXIT_FAILURE;
+ }
+
+ if (madvise(mem1, MEMSIZE_MTHP, MADV_HUGEPAGE) != 0) {
+ perror("madvise hugepage for mem1");
+ free(mem1);
+ return EXIT_FAILURE;
+ }
+
+ if (use_small_folio) {
+ mem2 = aligned_alloc_mem(MEMSIZE_SMALLFOLIO, ALIGNMENT_MTHP);
+ if (mem2 == NULL) {
+ fprintf(stderr, "Failed to allocate small folios memory\n");
+ free(mem1);
+ return EXIT_FAILURE;
+ }
+
+ if (madvise(mem2, MEMSIZE_SMALLFOLIO, MADV_NOHUGEPAGE) != 0) {
+ perror("madvise nohugepage for mem2");
+ free(mem1);
+ free(mem2);
+ return EXIT_FAILURE;
+ }
+ }
+
+ /* warm-up phase to occupy the swapfile */
+ memset(mem1, 0x11, MEMSIZE_MTHP);
+ madvise(mem1, MEMSIZE_MTHP, MADV_PAGEOUT);
+ if (use_small_folio) {
+ memset(mem2, 0x11, MEMSIZE_SMALLFOLIO);
+ madvise(mem2, MEMSIZE_SMALLFOLIO, MADV_PAGEOUT);
+ }
+
+ /* iterations with newly created mTHP, swap-in, and swap-out */
+ for (i = 0; i < 100; ++i) {
+ unsigned long initial_swpout;
+ unsigned long initial_swpout_fallback;
+ unsigned long final_swpout;
+ unsigned long final_swpout_fallback;
+ unsigned long swpout_inc;
+ unsigned long swpout_fallback_inc;
+ double fallback_percentage;
+
+ initial_swpout = read_stat(SWPOUT_PATH);
+ initial_swpout_fallback = read_stat(SWPOUT_FALLBACK_PATH);
+
+ /*
+ * The following setup creates a 1:1 ratio of mTHP to small folios
+ * since large folio swap-in isn't supported yet. Once we support
+ * mTHP swap-in, we'll likely need to reduce MEMSIZE_MTHP and
+ * increase MEMSIZE_SMALLFOLIO to maintain the ratio.
+ */
+ random_swapin(mem1, MEMSIZE_MTHP,
+ aligned_swapin ? ALIGNMENT_MTHP : ALIGNMENT_SMALLFOLIO,
+ TOTAL_DONTNEED_MTHP);
+ random_madvise_dontneed(mem1, MEMSIZE_MTHP, ALIGNMENT_MTHP,
+ TOTAL_DONTNEED_MTHP);
+
+ if (use_small_folio) {
+ random_swapin(mem2, MEMSIZE_SMALLFOLIO,
+ ALIGNMENT_SMALLFOLIO,
+ TOTAL_DONTNEED_SMALLFOLIO);
+ }
+
+ if (madvise(mem1, MEMSIZE_MTHP, MADV_PAGEOUT) != 0) {
+ perror("madvise pageout for mem1");
+ free(mem1);
+ if (mem2 != NULL)
+ free(mem2);
+ return EXIT_FAILURE;
+ }
+
+ if (use_small_folio) {
+ if (madvise(mem2, MEMSIZE_SMALLFOLIO, MADV_PAGEOUT) != 0) {
+ perror("madvise pageout for mem2");
+ free(mem1);
+ free(mem2);
+ return EXIT_FAILURE;
+ }
+ }
+
+ final_swpout = read_stat(SWPOUT_PATH);
+ final_swpout_fallback = read_stat(SWPOUT_FALLBACK_PATH);
+
+ swpout_inc = final_swpout - initial_swpout;
+ swpout_fallback_inc = final_swpout_fallback - initial_swpout_fallback;
+
+ fallback_percentage = (double)swpout_fallback_inc /
+ (swpout_fallback_inc + swpout_inc) * 100;
+
+ printf("Iteration %d: swpout inc: %lu, swpout fallback inc: %lu, Fallback percentage: %.2f%%\n",
+ i + 1, swpout_inc, swpout_fallback_inc, fallback_percentage);
+ }
+
+ free(mem1);
+ if (mem2 != NULL)
+ free(mem2);
+
+ return EXIT_SUCCESS;
+}
diff --git a/tools/net/ynl/Makefile b/tools/net/ynl/Makefile
index 8e9e09d84e26..d1cdf2a8f826 100644
--- a/tools/net/ynl/Makefile
+++ b/tools/net/ynl/Makefile
@@ -2,9 +2,12 @@
SUBDIRS = lib generated samples
-all: $(SUBDIRS)
+all: $(SUBDIRS) libynl.a
samples: | lib generated
+libynl.a: | lib generated
+ @echo -e "\tAR $@"
+ @ar rcs $@ lib/ynl.o generated/*-user.o
$(SUBDIRS):
@if [ -f "$@/Makefile" ] ; then \
@@ -17,5 +20,6 @@ clean distclean:
$(MAKE) -C $$dir $@; \
fi \
done
+ rm -f libynl.a
.PHONY: all clean distclean $(SUBDIRS)
diff --git a/tools/net/ynl/Makefile.deps b/tools/net/ynl/Makefile.deps
index f4e8eb79c1b8..0712b5e82eb7 100644
--- a/tools/net/ynl/Makefile.deps
+++ b/tools/net/ynl/Makefile.deps
@@ -16,7 +16,8 @@ get_hdr_inc=-D$(1) -include $(UAPI_PATH)/linux/$(2)
CFLAGS_devlink:=$(call get_hdr_inc,_LINUX_DEVLINK_H_,devlink.h)
CFLAGS_dpll:=$(call get_hdr_inc,_LINUX_DPLL_H,dpll.h)
-CFLAGS_ethtool:=$(call get_hdr_inc,_LINUX_ETHTOOL_NETLINK_H_,ethtool_netlink.h)
+CFLAGS_ethtool:=$(call get_hdr_inc,_LINUX_ETHTOOL_H,ethtool.h) \
+ $(call get_hdr_inc,_LINUX_ETHTOOL_NETLINK_H_,ethtool_netlink.h)
CFLAGS_handshake:=$(call get_hdr_inc,_LINUX_HANDSHAKE_H,handshake.h)
CFLAGS_mptcp_pm:=$(call get_hdr_inc,_LINUX_MPTCP_PM_H,mptcp_pm.h)
CFLAGS_netdev:=$(call get_hdr_inc,_LINUX_NETDEV_H,netdev.h)
@@ -25,3 +26,4 @@ CFLAGS_nfsd:=$(call get_hdr_inc,_LINUX_NFSD_NETLINK_H,nfsd_netlink.h)
CFLAGS_ovs_datapath:=$(call get_hdr_inc,__LINUX_OPENVSWITCH_H,openvswitch.h)
CFLAGS_ovs_flow:=$(call get_hdr_inc,__LINUX_OPENVSWITCH_H,openvswitch.h)
CFLAGS_ovs_vport:=$(call get_hdr_inc,__LINUX_OPENVSWITCH_H,openvswitch.h)
+CFLAGS_tcp_metrics:=$(call get_hdr_inc,_LINUX_TCP_METRICS_H,tcp_metrics.h)
diff --git a/tools/net/ynl/lib/Makefile b/tools/net/ynl/lib/Makefile
index dfff3ecd1cba..2887cc5de530 100644
--- a/tools/net/ynl/lib/Makefile
+++ b/tools/net/ynl/lib/Makefile
@@ -14,7 +14,9 @@ include $(wildcard *.d)
all: ynl.a
ynl.a: $(OBJS)
- ar rcs $@ $(OBJS)
+ @echo -e "\tAR $@"
+ @ar rcs $@ $(OBJS)
+
clean:
rm -f *.o *.d *~
rm -rf __pycache__
diff --git a/tools/net/ynl/lib/ynl-priv.h b/tools/net/ynl/lib/ynl-priv.h
index 6cf890080dc0..3c09a7bbfba5 100644
--- a/tools/net/ynl/lib/ynl-priv.h
+++ b/tools/net/ynl/lib/ynl-priv.h
@@ -45,17 +45,17 @@ struct ynl_policy_attr {
enum ynl_policy_type type;
unsigned int len;
const char *name;
- struct ynl_policy_nest *nest;
+ const struct ynl_policy_nest *nest;
};
struct ynl_policy_nest {
unsigned int max_attr;
- struct ynl_policy_attr *table;
+ const struct ynl_policy_attr *table;
};
struct ynl_parse_arg {
struct ynl_sock *ys;
- struct ynl_policy_nest *rsp_policy;
+ const struct ynl_policy_nest *rsp_policy;
void *data;
};
@@ -79,7 +79,7 @@ static inline void *ynl_dump_obj_next(void *obj)
struct ynl_dump_list_type *list;
uptr -= offsetof(struct ynl_dump_list_type, data);
- list = (void *)uptr;
+ list = (struct ynl_dump_list_type *)uptr;
uptr = (unsigned long)list->next;
uptr += offsetof(struct ynl_dump_list_type, data);
@@ -119,7 +119,7 @@ struct ynl_dump_state {
};
struct ynl_ntf_info {
- struct ynl_policy_nest *policy;
+ const struct ynl_policy_nest *policy;
ynl_parse_cb_t cb;
size_t alloc_sz;
void (*free)(struct ynl_ntf_base_type *ntf);
@@ -139,7 +139,7 @@ int ynl_error_parse(struct ynl_parse_arg *yarg, const char *msg);
static inline struct nlmsghdr *ynl_nlmsg_put_header(void *buf)
{
- struct nlmsghdr *nlh = buf;
+ struct nlmsghdr *nlh = (struct nlmsghdr *)buf;
memset(nlh, 0, sizeof(*nlh));
nlh->nlmsg_len = NLMSG_HDRLEN;
@@ -196,7 +196,7 @@ static inline void *ynl_attr_data(const struct nlattr *attr)
static inline void *ynl_attr_data_end(const struct nlattr *attr)
{
- return ynl_attr_data(attr) + ynl_attr_data_len(attr);
+ return (char *)ynl_attr_data(attr) + ynl_attr_data_len(attr);
}
#define ynl_attr_for_each(attr, nlh, fixed_hdr_sz) \
@@ -228,7 +228,7 @@ ynl_attr_next(const void *end, const struct nlattr *prev)
{
struct nlattr *attr;
- attr = (void *)((char *)prev + NLA_ALIGN(prev->nla_len));
+ attr = (struct nlattr *)((char *)prev + NLA_ALIGN(prev->nla_len));
return ynl_attr_if_good(end, attr);
}
@@ -237,8 +237,8 @@ ynl_attr_first(const void *start, size_t len, size_t skip)
{
struct nlattr *attr;
- attr = (void *)((char *)start + NLMSG_ALIGN(skip));
- return ynl_attr_if_good(start + len, attr);
+ attr = (struct nlattr *)((char *)start + NLMSG_ALIGN(skip));
+ return ynl_attr_if_good((char *)start + len, attr);
}
static inline bool
@@ -262,9 +262,9 @@ ynl_attr_nest_start(struct nlmsghdr *nlh, unsigned int attr_type)
struct nlattr *attr;
if (__ynl_attr_put_overflow(nlh, 0))
- return ynl_nlmsg_end_addr(nlh) - NLA_HDRLEN;
+ return (struct nlattr *)ynl_nlmsg_end_addr(nlh) - 1;
- attr = ynl_nlmsg_end_addr(nlh);
+ attr = (struct nlattr *)ynl_nlmsg_end_addr(nlh);
attr->nla_type = attr_type | NLA_F_NESTED;
nlh->nlmsg_len += NLA_HDRLEN;
@@ -286,7 +286,7 @@ ynl_attr_put(struct nlmsghdr *nlh, unsigned int attr_type,
if (__ynl_attr_put_overflow(nlh, size))
return;
- attr = ynl_nlmsg_end_addr(nlh);
+ attr = (struct nlattr *)ynl_nlmsg_end_addr(nlh);
attr->nla_type = attr_type;
attr->nla_len = NLA_HDRLEN + size;
@@ -305,10 +305,10 @@ ynl_attr_put_str(struct nlmsghdr *nlh, unsigned int attr_type, const char *str)
if (__ynl_attr_put_overflow(nlh, len))
return;
- attr = ynl_nlmsg_end_addr(nlh);
+ attr = (struct nlattr *)ynl_nlmsg_end_addr(nlh);
attr->nla_type = attr_type;
- strcpy(ynl_attr_data(attr), str);
+ strcpy((char *)ynl_attr_data(attr), str);
attr->nla_len = NLA_HDRLEN + NLA_ALIGN(len);
nlh->nlmsg_len += NLMSG_ALIGN(attr->nla_len);
diff --git a/tools/net/ynl/lib/ynl.c b/tools/net/ynl/lib/ynl.c
index 4b9c091fc86b..fcb18a5a6d70 100644
--- a/tools/net/ynl/lib/ynl.c
+++ b/tools/net/ynl/lib/ynl.c
@@ -46,7 +46,7 @@
/* -- Netlink boiler plate */
static int
-ynl_err_walk_report_one(struct ynl_policy_nest *policy, unsigned int type,
+ynl_err_walk_report_one(const struct ynl_policy_nest *policy, unsigned int type,
char *str, int str_sz, int *n)
{
if (!policy) {
@@ -75,8 +75,8 @@ ynl_err_walk_report_one(struct ynl_policy_nest *policy, unsigned int type,
static int
ynl_err_walk(struct ynl_sock *ys, void *start, void *end, unsigned int off,
- struct ynl_policy_nest *policy, char *str, int str_sz,
- struct ynl_policy_nest **nest_pol)
+ const struct ynl_policy_nest *policy, char *str, int str_sz,
+ const struct ynl_policy_nest **nest_pol)
{
unsigned int astart_off, aend_off;
const struct nlattr *attr;
@@ -206,7 +206,7 @@ ynl_ext_ack_check(struct ynl_sock *ys, const struct nlmsghdr *nlh,
bad_attr[n] = '\0';
}
if (tb[NLMSGERR_ATTR_MISS_TYPE]) {
- struct ynl_policy_nest *nest_pol = NULL;
+ const struct ynl_policy_nest *nest_pol = NULL;
unsigned int n, off, type;
void *start, *end;
int n2;
@@ -296,7 +296,7 @@ static int ynl_cb_done(const struct nlmsghdr *nlh, struct ynl_parse_arg *yarg)
int ynl_attr_validate(struct ynl_parse_arg *yarg, const struct nlattr *attr)
{
- struct ynl_policy_attr *policy;
+ const struct ynl_policy_attr *policy;
unsigned int type, len;
unsigned char *data;
diff --git a/tools/net/ynl/lib/ynl.h b/tools/net/ynl/lib/ynl.h
index eef7c6324ed4..6cd570b283ea 100644
--- a/tools/net/ynl/lib/ynl.h
+++ b/tools/net/ynl/lib/ynl.h
@@ -76,7 +76,7 @@ struct ynl_sock {
struct ynl_ntf_base_type **ntf_last_next;
struct nlmsghdr *nlh;
- struct ynl_policy_nest *req_policy;
+ const struct ynl_policy_nest *req_policy;
unsigned char *tx_buf;
unsigned char *rx_buf;
unsigned char raw_buf[];
diff --git a/tools/net/ynl/lib/ynl.py b/tools/net/ynl/lib/ynl.py
index 35e666928119..d42c1d605969 100644
--- a/tools/net/ynl/lib/ynl.py
+++ b/tools/net/ynl/lib/ynl.py
@@ -743,6 +743,8 @@ class YnlFamily(SpecFamily):
decoded = attr.as_scalar(attr_spec['type'], attr_spec.byte_order)
if 'enum' in attr_spec:
decoded = self._decode_enum(decoded, attr_spec)
+ elif attr_spec.display_hint:
+ decoded = self._formatted_string(decoded, attr_spec.display_hint)
elif attr_spec["type"] == 'indexed-array':
decoded = self._decode_array_attr(attr, attr_spec)
elif attr_spec["type"] == 'bitfield32':
diff --git a/tools/net/ynl/ynl-gen-c.py b/tools/net/ynl/ynl-gen-c.py
index a42d62b23ee0..51529fabd517 100755
--- a/tools/net/ynl/ynl-gen-c.py
+++ b/tools/net/ynl/ynl-gen-c.py
@@ -59,9 +59,9 @@ class Type(SpecAttr):
if 'nested-attributes' in attr:
self.nested_attrs = attr['nested-attributes']
if self.nested_attrs == family.name:
- self.nested_render_name = c_lower(f"{family.name}")
+ self.nested_render_name = c_lower(f"{family.ident_name}")
else:
- self.nested_render_name = c_lower(f"{family.name}_{self.nested_attrs}")
+ self.nested_render_name = c_lower(f"{family.ident_name}_{self.nested_attrs}")
if self.nested_attrs in self.family.consts:
self.nested_struct_type = 'struct ' + self.nested_render_name + '_'
@@ -693,9 +693,9 @@ class Struct:
self.nested = type_list is None
if family.name == c_lower(space_name):
- self.render_name = c_lower(family.name)
+ self.render_name = c_lower(family.ident_name)
else:
- self.render_name = c_lower(family.name + '-' + space_name)
+ self.render_name = c_lower(family.ident_name + '-' + space_name)
self.struct_name = 'struct ' + self.render_name
if self.nested and space_name in family.consts:
self.struct_name += '_'
@@ -761,7 +761,7 @@ class EnumEntry(SpecEnumEntry):
class EnumSet(SpecEnumSet):
def __init__(self, family, yaml):
- self.render_name = c_lower(family.name + '-' + yaml['name'])
+ self.render_name = c_lower(family.ident_name + '-' + yaml['name'])
if 'enum-name' in yaml:
if yaml['enum-name']:
@@ -777,7 +777,7 @@ class EnumSet(SpecEnumSet):
else:
self.user_type = 'int'
- self.value_pfx = yaml.get('name-prefix', f"{family.name}-{yaml['name']}-")
+ self.value_pfx = yaml.get('name-prefix', f"{family.ident_name}-{yaml['name']}-")
super().__init__(family, yaml)
@@ -802,9 +802,9 @@ class AttrSet(SpecAttrSet):
if 'name-prefix' in yaml:
pfx = yaml['name-prefix']
elif self.name == family.name:
- pfx = family.name + '-a-'
+ pfx = family.ident_name + '-a-'
else:
- pfx = f"{family.name}-a-{self.name}-"
+ pfx = f"{family.ident_name}-a-{self.name}-"
self.name_prefix = c_upper(pfx)
self.max_name = c_upper(self.yaml.get('attr-max-name', f"{self.name_prefix}max"))
self.cnt_name = c_upper(self.yaml.get('attr-cnt-name', f"__{self.name_prefix}max"))
@@ -861,7 +861,7 @@ class Operation(SpecOperation):
def __init__(self, family, yaml, req_value, rsp_value):
super().__init__(family, yaml, req_value, rsp_value)
- self.render_name = c_lower(family.name + '_' + self.name)
+ self.render_name = c_lower(family.ident_name + '_' + self.name)
self.dual_policy = ('do' in yaml and 'request' in yaml['do']) and \
('dump' in yaml and 'request' in yaml['dump'])
@@ -911,11 +911,11 @@ class Family(SpecFamily):
if 'uapi-header' in self.yaml:
self.uapi_header = self.yaml['uapi-header']
else:
- self.uapi_header = f"linux/{self.name}.h"
+ self.uapi_header = f"linux/{self.ident_name}.h"
if self.uapi_header.startswith("linux/") and self.uapi_header.endswith('.h'):
self.uapi_header_name = self.uapi_header[6:-2]
else:
- self.uapi_header_name = self.name
+ self.uapi_header_name = self.ident_name
def resolve(self):
self.resolve_up(super())
@@ -923,7 +923,7 @@ class Family(SpecFamily):
if self.yaml.get('protocol', 'genetlink') not in {'genetlink', 'genetlink-c', 'genetlink-legacy'}:
raise Exception("Codegen only supported for genetlink")
- self.c_name = c_lower(self.name)
+ self.c_name = c_lower(self.ident_name)
if 'name-prefix' in self.yaml['operations']:
self.op_prefix = c_upper(self.yaml['operations']['name-prefix'])
else:
@@ -1507,12 +1507,12 @@ def print_dump_prototype(ri):
def put_typol_fwd(cw, struct):
- cw.p(f'extern struct ynl_policy_nest {struct.render_name}_nest;')
+ cw.p(f'extern const struct ynl_policy_nest {struct.render_name}_nest;')
def put_typol(cw, struct):
type_max = struct.attr_set.max_name
- cw.block_start(line=f'struct ynl_policy_attr {struct.render_name}_policy[{type_max} + 1] =')
+ cw.block_start(line=f'const struct ynl_policy_attr {struct.render_name}_policy[{type_max} + 1] =')
for _, arg in struct.member_list():
arg.attr_typol(cw)
@@ -1520,7 +1520,7 @@ def put_typol(cw, struct):
cw.block_end(line=';')
cw.nl()
- cw.block_start(line=f'struct ynl_policy_nest {struct.render_name}_nest =')
+ cw.block_start(line=f'const struct ynl_policy_nest {struct.render_name}_nest =')
cw.p(f'.max_attr = {type_max},')
cw.p(f'.table = {struct.render_name}_policy,')
cw.block_end(line=';')
@@ -2173,7 +2173,7 @@ def print_kernel_op_table_fwd(family, cw, terminate):
exported = not kernel_can_gen_family_struct(family)
if not terminate or exported:
- cw.p(f"/* Ops table for {family.name} */")
+ cw.p(f"/* Ops table for {family.ident_name} */")
pol_to_struct = {'global': 'genl_small_ops',
'per-op': 'genl_ops',
@@ -2225,12 +2225,12 @@ def print_kernel_op_table_fwd(family, cw, terminate):
continue
if 'do' in op:
- name = c_lower(f"{family.name}-nl-{op_name}-doit")
+ name = c_lower(f"{family.ident_name}-nl-{op_name}-doit")
cw.write_func_prot('int', name,
['struct sk_buff *skb', 'struct genl_info *info'], suffix=';')
if 'dump' in op:
- name = c_lower(f"{family.name}-nl-{op_name}-dumpit")
+ name = c_lower(f"{family.ident_name}-nl-{op_name}-dumpit")
cw.write_func_prot('int', name,
['struct sk_buff *skb', 'struct netlink_callback *cb'], suffix=';')
cw.nl()
@@ -2256,13 +2256,13 @@ def print_kernel_op_table(family, cw):
for x in op['dont-validate']])), )
for op_mode in ['do', 'dump']:
if op_mode in op:
- name = c_lower(f"{family.name}-nl-{op_name}-{op_mode}it")
+ name = c_lower(f"{family.ident_name}-nl-{op_name}-{op_mode}it")
members.append((op_mode + 'it', name))
if family.kernel_policy == 'per-op':
struct = Struct(family, op['attribute-set'],
type_list=op['do']['request']['attributes'])
- name = c_lower(f"{family.name}-{op_name}-nl-policy")
+ name = c_lower(f"{family.ident_name}-{op_name}-nl-policy")
members.append(('policy', name))
members.append(('maxattr', struct.attr_max_val.enum_name))
if 'flags' in op:
@@ -2294,7 +2294,7 @@ def print_kernel_op_table(family, cw):
members.append(('validate',
' | '.join([c_upper('genl-dont-validate-' + x)
for x in dont_validate])), )
- name = c_lower(f"{family.name}-nl-{op_name}-{op_mode}it")
+ name = c_lower(f"{family.ident_name}-nl-{op_name}-{op_mode}it")
if 'pre' in op[op_mode]:
members.append((cb_names[op_mode]['pre'], c_lower(op[op_mode]['pre'])))
members.append((op_mode + 'it', name))
@@ -2305,9 +2305,9 @@ def print_kernel_op_table(family, cw):
type_list=op[op_mode]['request']['attributes'])
if op.dual_policy:
- name = c_lower(f"{family.name}-{op_name}-{op_mode}-nl-policy")
+ name = c_lower(f"{family.ident_name}-{op_name}-{op_mode}-nl-policy")
else:
- name = c_lower(f"{family.name}-{op_name}-nl-policy")
+ name = c_lower(f"{family.ident_name}-{op_name}-nl-policy")
members.append(('policy', name))
members.append(('maxattr', struct.attr_max_val.enum_name))
flags = (op['flags'] if 'flags' in op else []) + ['cmd-cap-' + op_mode]
@@ -2326,7 +2326,7 @@ def print_kernel_mcgrp_hdr(family, cw):
cw.block_start('enum')
for grp in family.mcgrps['list']:
- grp_id = c_upper(f"{family.name}-nlgrp-{grp['name']},")
+ grp_id = c_upper(f"{family.ident_name}-nlgrp-{grp['name']},")
cw.p(grp_id)
cw.block_end(';')
cw.nl()
@@ -2339,7 +2339,7 @@ def print_kernel_mcgrp_src(family, cw):
cw.block_start('static const struct genl_multicast_group ' + family.c_name + '_nl_mcgrps[] =')
for grp in family.mcgrps['list']:
name = grp['name']
- grp_id = c_upper(f"{family.name}-nlgrp-{name}")
+ grp_id = c_upper(f"{family.ident_name}-nlgrp-{name}")
cw.p('[' + grp_id + '] = { "' + name + '", },')
cw.block_end(';')
cw.nl()
@@ -2361,7 +2361,7 @@ def print_kernel_family_struct_src(family, cw):
if not kernel_can_gen_family_struct(family):
return
- cw.block_start(f"struct genl_family {family.name}_nl_family __ro_after_init =")
+ cw.block_start(f"struct genl_family {family.ident_name}_nl_family __ro_after_init =")
cw.p('.name\t\t= ' + family.fam_key + ',')
cw.p('.version\t= ' + family.ver_key + ',')
cw.p('.netnsok\t= true,')
@@ -2429,7 +2429,7 @@ def render_uapi(family, cw):
cw.p(' */')
uapi_enum_start(family, cw, const, 'name')
- name_pfx = const.get('name-prefix', f"{family.name}-{const['name']}-")
+ name_pfx = const.get('name-prefix', f"{family.ident_name}-{const['name']}-")
for entry in enum.entries.values():
suffix = ','
if entry.value_change:
@@ -2451,7 +2451,7 @@ def render_uapi(family, cw):
cw.nl()
elif const['type'] == 'const':
defines.append([c_upper(family.get('c-define-name',
- f"{family.name}-{const['name']}")),
+ f"{family.ident_name}-{const['name']}")),
const['value']])
if defines:
@@ -2529,7 +2529,7 @@ def render_uapi(family, cw):
defines = []
for grp in family.mcgrps['list']:
name = grp['name']
- defines.append([c_upper(grp.get('c-define-name', f"{family.name}-mcgrp-{name}")),
+ defines.append([c_upper(grp.get('c-define-name', f"{family.ident_name}-mcgrp-{name}")),
f'{name}'])
cw.nl()
if defines:
diff --git a/tools/net/ynl/ynl-gen-rst.py b/tools/net/ynl/ynl-gen-rst.py
index 657e881d2ea4..6c56d0d726b4 100755
--- a/tools/net/ynl/ynl-gen-rst.py
+++ b/tools/net/ynl/ynl-gen-rst.py
@@ -49,7 +49,7 @@ def inline(text: str) -> str:
def sanitize(text: str) -> str:
"""Remove newlines and multiple spaces"""
# This is useful for some fields that are spread across multiple lines
- return str(text).replace("\n", "").strip()
+ return str(text).replace("\n", " ").strip()
def rst_fields(key: str, value: str, level: int = 0) -> str:
@@ -156,7 +156,10 @@ def parse_do(do_dict: Dict[str, Any], level: int = 0) -> str:
lines = []
for key in do_dict.keys():
lines.append(rst_paragraph(bold(key), level + 1))
- lines.append(parse_do_attributes(do_dict[key], level + 1) + "\n")
+ if key in ['request', 'reply']:
+ lines.append(parse_do_attributes(do_dict[key], level + 1) + "\n")
+ else:
+ lines.append(headroom(level + 2) + do_dict[key] + "\n")
return "\n".join(lines)
@@ -172,13 +175,13 @@ def parse_do_attributes(attrs: Dict[str, Any], level: int = 0) -> str:
def parse_operations(operations: List[Dict[str, Any]], namespace: str) -> str:
"""Parse operations block"""
- preprocessed = ["name", "doc", "title", "do", "dump"]
+ preprocessed = ["name", "doc", "title", "do", "dump", "flags"]
linkable = ["fixed-header", "attribute-set"]
lines = []
for operation in operations:
lines.append(rst_section(namespace, 'operation', operation["name"]))
- lines.append(rst_paragraph(sanitize(operation["doc"])) + "\n")
+ lines.append(rst_paragraph(operation["doc"]) + "\n")
for key in operation.keys():
if key in preprocessed:
@@ -188,6 +191,8 @@ def parse_operations(operations: List[Dict[str, Any]], namespace: str) -> str:
if key in linkable:
value = rst_ref(namespace, key, value)
lines.append(rst_fields(key, value, 0))
+ if 'flags' in operation:
+ lines.append(rst_fields('flags', rst_list_inline(operation['flags'])))
if "do" in operation:
lines.append(rst_paragraph(":do:", 0))
diff --git a/tools/objtool/Documentation/objtool.txt b/tools/objtool/Documentation/objtool.txt
index fe39c2a8ef0d..7c3ee959b63c 100644
--- a/tools/objtool/Documentation/objtool.txt
+++ b/tools/objtool/Documentation/objtool.txt
@@ -284,6 +284,25 @@ the objtool maintainers.
Otherwise the stack frame may not get created before the call.
+ objtool can help with pinpointing the exact function where it happens:
+
+ $ OBJTOOL_ARGS="--verbose" make arch/x86/kvm/
+
+ arch/x86/kvm/kvm.o: warning: objtool: .altinstr_replacement+0xc5: call without frame pointer save/setup
+ arch/x86/kvm/kvm.o: warning: objtool: em_loop.part.0+0x29: (alt)
+ arch/x86/kvm/kvm.o: warning: objtool: em_loop.part.0+0x0: <=== (sym)
+ LD [M] arch/x86/kvm/kvm-intel.o
+ 0000 0000000000028220 <em_loop.part.0>:
+ 0000 28220: 0f b6 47 61 movzbl 0x61(%rdi),%eax
+ 0004 28224: 3c e2 cmp $0xe2,%al
+ 0006 28226: 74 2c je 28254 <em_loop.part.0+0x34>
+ 0008 28228: 48 8b 57 10 mov 0x10(%rdi),%rdx
+ 000c 2822c: 83 f0 05 xor $0x5,%eax
+ 000f 2822f: 48 c1 e0 04 shl $0x4,%rax
+ 0013 28233: 25 f0 00 00 00 and $0xf0,%eax
+ 0018 28238: 81 e2 d5 08 00 00 and $0x8d5,%edx
+ 001e 2823e: 80 ce 02 or $0x2,%dh
+ ...
2. file.o: warning: objtool: .text+0x53: unreachable instruction
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
index 3a1d80a7878d..ed6bff0e01dc 100644
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -125,8 +125,14 @@ bool arch_pc_relative_reloc(struct reloc *reloc)
#define is_RIP() ((modrm_rm & 7) == CFI_BP && modrm_mod == 0)
#define have_SIB() ((modrm_rm & 7) == CFI_SP && mod_is_mem())
+/*
+ * Check the ModRM register. If there is a SIB byte then check with
+ * the SIB base register. But if the SIB base is 5 (i.e. CFI_BP) and
+ * ModRM mod is 0 then there is no base register.
+ */
#define rm_is(reg) (have_SIB() ? \
- sib_base == (reg) && sib_index == CFI_SP : \
+ sib_base == (reg) && sib_index == CFI_SP && \
+ (sib_base != CFI_BP || modrm_mod != 0) : \
modrm_rm == (reg))
#define rm_is_mem(reg) (mod_is_mem() && !is_RIP() && rm_is(reg))
diff --git a/tools/objtool/arch/x86/special.c b/tools/objtool/arch/x86/special.c
index 4134d27c696b..4ea0f9815fda 100644
--- a/tools/objtool/arch/x86/special.c
+++ b/tools/objtool/arch/x86/special.c
@@ -9,6 +9,29 @@
void arch_handle_alternative(unsigned short feature, struct special_alt *alt)
{
+ static struct special_alt *group, *prev;
+
+ /*
+ * Recompute orig_len for nested ALTERNATIVE()s.
+ */
+ if (group && group->orig_sec == alt->orig_sec &&
+ group->orig_off == alt->orig_off) {
+
+ struct special_alt *iter = group;
+ for (;;) {
+ unsigned int len = max(iter->orig_len, alt->orig_len);
+ iter->orig_len = alt->orig_len = len;
+
+ if (iter == prev)
+ break;
+
+ iter = list_next_entry(iter, list);
+ }
+
+ } else group = alt;
+
+ prev = alt;
+
switch (feature) {
case X86_FEATURE_SMAP:
/*
diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c
index 5e21cfb7661d..387d56a7f5fb 100644
--- a/tools/objtool/builtin-check.c
+++ b/tools/objtool/builtin-check.c
@@ -144,7 +144,7 @@ static bool opts_valid(void)
opts.static_call ||
opts.uaccess) {
if (opts.dump_orc) {
- ERROR("--dump can't be combined with other options");
+ ERROR("--dump can't be combined with other actions");
return false;
}
@@ -159,7 +159,7 @@ static bool opts_valid(void)
if (opts.dump_orc)
return true;
- ERROR("At least one command required");
+ ERROR("At least one action required");
return false;
}
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 0a33d9195b7a..01237d167223 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -1202,6 +1202,8 @@ static const char *uaccess_safe_builtin[] = {
"__sanitizer_cov_trace_switch",
/* KMSAN */
"kmsan_copy_to_user",
+ "kmsan_disable_current",
+ "kmsan_enable_current",
"kmsan_report",
"kmsan_unpoison_entry_regs",
"kmsan_unpoison_memory",
diff --git a/tools/objtool/noreturns.h b/tools/objtool/noreturns.h
index 7ebf29c91184..1e8141ef1b15 100644
--- a/tools/objtool/noreturns.h
+++ b/tools/objtool/noreturns.h
@@ -7,12 +7,16 @@
* Yes, this is unfortunate. A better solution is in the works.
*/
NORETURN(__fortify_panic)
+NORETURN(__ia32_sys_exit)
+NORETURN(__ia32_sys_exit_group)
NORETURN(__kunit_abort)
NORETURN(__module_put_and_kthread_exit)
NORETURN(__reiserfs_panic)
NORETURN(__stack_chk_fail)
NORETURN(__tdx_hypercall_failed)
NORETURN(__ubsan_handle_builtin_unreachable)
+NORETURN(__x64_sys_exit)
+NORETURN(__x64_sys_exit_group)
NORETURN(arch_cpu_idle_dead)
NORETURN(bch2_trans_in_restart_error)
NORETURN(bch2_trans_restart_error)
diff --git a/tools/objtool/special.c b/tools/objtool/special.c
index 91b1950f5bd8..097a69db82a0 100644
--- a/tools/objtool/special.c
+++ b/tools/objtool/special.c
@@ -84,6 +84,14 @@ static int get_alt_entry(struct elf *elf, const struct special_entry *entry,
entry->new_len);
}
+ orig_reloc = find_reloc_by_dest(elf, sec, offset + entry->orig);
+ if (!orig_reloc) {
+ WARN_FUNC("can't find orig reloc", sec, offset + entry->orig);
+ return -1;
+ }
+
+ reloc_to_sec_off(orig_reloc, &alt->orig_sec, &alt->orig_off);
+
if (entry->feature) {
unsigned short feature;
@@ -94,14 +102,6 @@ static int get_alt_entry(struct elf *elf, const struct special_entry *entry,
arch_handle_alternative(feature, alt);
}
- orig_reloc = find_reloc_by_dest(elf, sec, offset + entry->orig);
- if (!orig_reloc) {
- WARN_FUNC("can't find orig reloc", sec, offset + entry->orig);
- return -1;
- }
-
- reloc_to_sec_off(orig_reloc, &alt->orig_sec, &alt->orig_off);
-
if (!entry->group || alt->new_len) {
new_reloc = find_reloc_by_dest(elf, sec, offset + entry->new);
if (!new_reloc) {
diff --git a/tools/perf/Build b/tools/perf/Build
index b0cb7ad8e6ac..1d4957957d75 100644
--- a/tools/perf/Build
+++ b/tools/perf/Build
@@ -1,4 +1,4 @@
-perf-y += builtin-bench.o
+perf-bench-y += builtin-bench.o
perf-y += builtin-annotate.o
perf-y += builtin-config.o
perf-y += builtin-diff.o
@@ -35,8 +35,8 @@ endif
perf-$(CONFIG_LIBELF) += builtin-probe.o
-perf-y += bench/
-perf-y += tests/
+perf-bench-y += bench/
+perf-test-y += tests/
perf-y += perf.o
@@ -53,10 +53,12 @@ CFLAGS_builtin-trace.o += -DSTRACE_GROUPS_DIR="BUILD_STR($(STRACE_GROUPS_DIR_
CFLAGS_builtin-report.o += -DTIPDIR="BUILD_STR($(tipdir_SQ))"
CFLAGS_builtin-report.o += -DDOCDIR="BUILD_STR($(srcdir_SQ)/Documentation)"
-perf-y += util/
+perf-util-y += util/
+perf-util-y += arch/
perf-y += arch/
-perf-y += ui/
-perf-y += scripts/
+perf-test-y += arch/
+perf-ui-y += ui/
+perf-util-y += scripts/
gtk-y += ui/gtk/
diff --git a/tools/perf/Documentation/perf-amd-ibs.txt b/tools/perf/Documentation/perf-amd-ibs.txt
new file mode 100644
index 000000000000..2fd31d9d7b71
--- /dev/null
+++ b/tools/perf/Documentation/perf-amd-ibs.txt
@@ -0,0 +1,189 @@
+perf-amd-ibs(1)
+===============
+
+NAME
+----
+perf-amd-ibs - Support for AMD Instruction-Based Sampling (IBS) with perf tool
+
+SYNOPSIS
+--------
+[verse]
+'perf record' -e ibs_op//
+'perf record' -e ibs_fetch//
+
+DESCRIPTION
+-----------
+
+Instruction-Based Sampling (IBS) provides precise Instruction Pointer (IP)
+profiling support on AMD platforms. IBS has two independent components: IBS
+Op and IBS Fetch. IBS Op sampling provides information about instruction
+execution (micro-op execution to be precise) with details like d-cache
+hit/miss, d-TLB hit/miss, cache miss latency, load/store data source, branch
+behavior etc. IBS Fetch sampling provides information about instruction fetch
+with details like i-cache hit/miss, i-TLB hit/miss, fetch latency etc. IBS is
+per-smt-thread i.e. each SMT hardware thread contains standalone IBS units.
+
+Both, IBS Op and IBS Fetch, are exposed as PMUs by Linux and can be exploited
+using the Linux perf utility. The following files will be created at boot time
+if IBS is supported by the hardware and kernel.
+
+ /sys/bus/event_source/devices/ibs_op/
+ /sys/bus/event_source/devices/ibs_fetch/
+
+IBS Op PMU supports two events: cycles and micro ops. IBS Fetch PMU supports
+one event: fetch ops.
+
+IBS PMUs do not have user/kernel filtering capability and thus it requires
+CAP_SYS_ADMIN or CAP_PERFMON privilege.
+
+IBS VS. REGULAR CORE PMU
+------------------------
+
+IBS gives samples with precise IP, i.e. the IP recorded with IBS sample has
+no skid. Whereas the IP recorded by regular core PMU will have some skid
+(sample was generated at IP X but perf would record it at IP X+n). Hence,
+regular core PMU might not help for profiling with instruction level
+precision. Further, IBS provides additional information about the sample in
+question. On the other hand, regular core PMU has it's own advantages like
+plethora of events, counting mode (less interference), up to 6 parallel
+counters, event grouping support, filtering capabilities etc.
+
+Three regular core PMU events are internally forwarded to IBS Op PMU when
+precise_ip attribute is set:
+
+ -e cpu-cycles:p becomes -e ibs_op//
+ -e r076:p becomes -e ibs_op//
+ -e r0C1:p becomes -e ibs_op/cnt_ctl=1/
+
+EXAMPLES
+--------
+
+IBS Op PMU
+~~~~~~~~~~
+
+System-wide profile, cycles event, sampling period: 100000
+
+ # perf record -e ibs_op// -c 100000 -a
+
+Per-cpu profile (cpu10), cycles event, sampling period: 100000
+
+ # perf record -e ibs_op// -c 100000 -C 10
+
+Per-cpu profile (cpu10), cycles event, sampling freq: 1000
+
+ # perf record -e ibs_op// -F 1000 -C 10
+
+System-wide profile, uOps event, sampling period: 100000
+
+ # perf record -e ibs_op/cnt_ctl=1/ -c 100000 -a
+
+Same command, but also capture IBS register raw dump along with perf sample:
+
+ # perf record -e ibs_op/cnt_ctl=1/ -c 100000 -a --raw-samples
+
+System-wide profile, uOps event, sampling period: 100000, L3MissOnly (Zen4 onward)
+
+ # perf record -e ibs_op/cnt_ctl=1,l3missonly=1/ -c 100000 -a
+
+Per process(upstream v6.2 onward), uOps event, sampling period: 100000
+
+ # perf record -e ibs_op/cnt_ctl=1/ -c 100000 -p 1234
+
+Per process(upstream v6.2 onward), uOps event, sampling period: 100000
+
+ # perf record -e ibs_op/cnt_ctl=1/ -c 100000 -- ls
+
+To analyse recorded profile in aggregate mode
+
+ # perf report
+ /* Select a line and press 'a' to drill down at instruction level. */
+
+To go over each sample
+
+ # perf script
+
+Raw dump of IBS registers when profiled with --raw-samples
+
+ # perf report -D
+ /* Look for PERF_RECORD_SAMPLE */
+
+ Example register raw dump:
+
+ ibs_op_ctl: 000002c30006186a MaxCnt 100000 L3MissOnly 0 En 1
+ Val 1 CntCtl 0=cycles CurCnt 707
+ IbsOpRip: ffffffff8204aea7
+ ibs_op_data: 0000010002550001 CompToRetCtr 1 TagToRetCtr 597
+ BrnRet 0 RipInvalid 0 BrnFuse 0 Microcode 1
+ ibs_op_data2: 0000000000000013 RmtNode 1 DataSrc 3=DRAM
+ ibs_op_data3: 0000000031960092 LdOp 0 StOp 1 DcL1TlbMiss 0
+ DcL2TlbMiss 0 DcL1TlbHit2M 1 DcL1TlbHit1G 0 DcL2TlbHit2M 0
+ DcMiss 1 DcMisAcc 0 DcWcMemAcc 0 DcUcMemAcc 0 DcLockedOp 0
+ DcMissNoMabAlloc 0 DcLinAddrValid 1 DcPhyAddrValid 1
+ DcL2TlbHit1G 0 L2Miss 1 SwPf 0 OpMemWidth 32 bytes
+ OpDcMissOpenMemReqs 12 DcMissLat 0 TlbRefillLat 0
+ IbsDCLinAd: ff110008a5398920
+ IbsDCPhysAd: 00000008a5398920
+
+IBS applied in a real world usecase
+
+ ~90% regression was observed in tbench with specific scheduler hint
+ which was counter intuitive. IBS profile of good and bad run captured
+ using perf helped in identifying exact cause of the problem:
+
+ https://lore.kernel.org/r/20220921063638.2489-1-kprateek.nayak@amd.com
+
+IBS Fetch PMU
+~~~~~~~~~~~~~
+
+Similar commands can be used with Fetch PMU as well.
+
+System-wide profile, fetch ops event, sampling period: 100000
+
+ # perf record -e ibs_fetch// -c 100000 -a
+
+System-wide profile, fetch ops event, sampling period: 100000, Random enable
+
+ # perf record -e ibs_fetch/rand_en=1/ -c 100000 -a
+
+ Random enable adds small degree of variability to sample period. This
+ helps in cases like long running loops where PMU is tagging the same
+ instruction over and over because of fixed sample period.
+
+etc.
+
+PERF MEM AND PERF C2C
+---------------------
+
+perf mem is a memory access profiler tool and perf c2c is a shared data
+cacheline analyser tool. Both of them internally uses IBS Op PMU on AMD.
+Below is a simple example of the perf mem tool.
+
+ # perf mem record -c 100000 -- make
+ # perf mem report
+
+A normal perf mem report output will provide detailed memory access profile.
+However, it can also be aggregated based on output fields. For example:
+
+ # perf mem report -F mem,sample,snoop
+ Samples: 3M of event 'ibs_op//', Event count (approx.): 23524876
+ Memory access Samples Snoop
+ N/A 1903343 N/A
+ L1 hit 1056754 N/A
+ L2 hit 75231 N/A
+ L3 hit 9496 HitM
+ L3 hit 2270 N/A
+ RAM hit 8710 N/A
+ Remote node, same socket RAM hit 3241 N/A
+ Remote core, same node Any cache hit 1572 HitM
+ Remote core, same node Any cache hit 514 N/A
+ Remote node, same socket Any cache hit 1216 HitM
+ Remote node, same socket Any cache hit 350 N/A
+ Uncached hit 18 N/A
+
+Please refer to their man page for more detail.
+
+SEE ALSO
+--------
+
+linkperf:perf-record[1], linkperf:perf-script[1], linkperf:perf-report[1],
+linkperf:perf-mem[1], linkperf:perf-c2c[1]
diff --git a/tools/perf/Documentation/perf-kwork.txt b/tools/perf/Documentation/perf-kwork.txt
index 109ace1d5e90..21e607669d78 100644
--- a/tools/perf/Documentation/perf-kwork.txt
+++ b/tools/perf/Documentation/perf-kwork.txt
@@ -1,4 +1,4 @@
-perf-kowrk(1)
+perf-kwork(1)
=============
NAME
@@ -35,7 +35,7 @@ There are several variants of 'perf kwork':
perf kwork top
perf kwork top -b
- By default it shows the individual work events such as irq, workqeueu,
+ By default it shows the individual work events such as irq, workqueue,
including the run time and delay (time between raise and actually entry):
Runtime start Runtime end Cpu Kwork name Runtime Delaytime
diff --git a/tools/perf/Documentation/perf-lock.txt b/tools/perf/Documentation/perf-lock.txt
index f5938d616d75..57a940399de0 100644
--- a/tools/perf/Documentation/perf-lock.txt
+++ b/tools/perf/Documentation/perf-lock.txt
@@ -111,11 +111,11 @@ INFO OPTIONS
-t::
--threads::
- dump thread list in perf.data
+ dump only the thread list in perf.data
-m::
--map::
- dump map of lock instances (address:name table)
+ dump only the map of lock instances (address:name table)
CONTENTION OPTIONS
diff --git a/tools/perf/Documentation/perf-mem.txt b/tools/perf/Documentation/perf-mem.txt
index 19862572e3f2..47456b212e99 100644
--- a/tools/perf/Documentation/perf-mem.txt
+++ b/tools/perf/Documentation/perf-mem.txt
@@ -21,7 +21,7 @@ and stores are sampled. Use the -t option to limit to loads or stores.
Note that on Intel systems the memory latency reported is the use-latency,
not the pure load (or store latency). Use latency includes any pipeline
-queueing delays in addition to the memory subsystem latency.
+queuing delays in addition to the memory subsystem latency.
On Arm64 this uses SPE to sample load and store operations, therefore hardware
and kernel support is required. See linkperf:perf-arm-spe[1] for a setup guide.
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 6015fdd08fb6..d6532ed97c02 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -200,7 +200,7 @@ OPTIONS
ip, id, tid, pid, cpu, time, addr, period, txn, weight, phys_addr,
code_pgsz, data_pgsz, weight1, weight2, weight3, ins_lat, retire_lat,
p_stage_cyc, mem_op, mem_lvl, mem_snoop, mem_remote, mem_lock,
- mem_dtlb, mem_blk, mem_hops
+ mem_dtlb, mem_blk, mem_hops, uid, gid
The <operator> can be one of:
==, !=, >, >=, <, <=, &
@@ -311,7 +311,7 @@ OPTIONS
User can change the size by passing the size after comma like
"--call-graph dwarf,4096".
- When "fp" recording is used, perf tries to save stack enties
+ When "fp" recording is used, perf tries to save stack entries
up to the number specified in sysctl.kernel.perf_event_max_stack
by default. User can change the number by passing it after comma
like "--call-graph fp,32".
diff --git a/tools/perf/Documentation/perf-sched.txt b/tools/perf/Documentation/perf-sched.txt
index a216d2991b19..84d49f9241b1 100644
--- a/tools/perf/Documentation/perf-sched.txt
+++ b/tools/perf/Documentation/perf-sched.txt
@@ -64,8 +64,8 @@ There are several variants of 'perf sched':
By default it shows the individual schedule events, including the wait
time (time between sched-out and next sched-in events for the task), the
- task scheduling delay (time between wakeup and actually running) and run
- time for the task:
+ task scheduling delay (time between runnable and actually running) and
+ run time for the task:
time cpu task name wait time sch delay run time
[tid/pid] (msec) (msec) (msec)
@@ -130,6 +130,16 @@ OPTIONS for 'perf sched map'
--color-pids::
Highlight the given pids.
+--task-name <task>::
+ Map output only for the given task name(s). Separate the
+ task names with a comma (without whitespace). The sched-out
+ time is printed and is represented by '*-' for the given
+ task name(s).
+ ('-' indicates other tasks while '.' is idle).
+
+--fuzzy-name::
+ Given task name(s) can be partially matched (fuzzy matching).
+
OPTIONS for 'perf sched timehist'
---------------------------------
-k::
@@ -202,6 +212,13 @@ OPTIONS for 'perf sched timehist'
--state::
Show task state when it switched out.
+OPTIONS for 'perf sched replay'
+------------------------------
+
+-r::
+--repeat <n>::
+ repeat the workload n times (0: infinite). Default is 10.
+
SEE ALSO
--------
linkperf:perf-record[1]
diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt
index a754875fa5bb..667e5102075e 100644
--- a/tools/perf/Documentation/perf-top.txt
+++ b/tools/perf/Documentation/perf-top.txt
@@ -43,6 +43,10 @@ Default is to monitor all CPUS.
encoding with the layout of the event control registers as described
by entries in /sys/bus/event_source/devices/cpu/format/*.
+--filter=<filter>::
+ Event filter. This option should follow an event selector (-e). For
+ syntax see linkperf:perf-record[1].
+
-E <entries>::
--entries=<entries>::
Display this many functions.
diff --git a/tools/perf/Documentation/perf.txt b/tools/perf/Documentation/perf.txt
index 09f516f3fdfb..cbcc2e4d557e 100644
--- a/tools/perf/Documentation/perf.txt
+++ b/tools/perf/Documentation/perf.txt
@@ -82,7 +82,8 @@ linkperf:perf-stat[1], linkperf:perf-top[1],
linkperf:perf-record[1], linkperf:perf-report[1],
linkperf:perf-list[1]
-linkperf:perf-annotate[1],linkperf:perf-archive[1],linkperf:perf-arm-spe[1],
+linkperf:perf-amd-ibs[1], linkperf:perf-annotate[1],
+linkperf:perf-archive[1], linkperf:perf-arm-spe[1],
linkperf:perf-bench[1], linkperf:perf-buildid-cache[1],
linkperf:perf-buildid-list[1], linkperf:perf-c2c[1],
linkperf:perf-config[1], linkperf:perf-data[1], linkperf:perf-diff[1],
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index 7f1e016a9253..a4829b6532d8 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -182,15 +182,19 @@ endif
FEATURE_CHECK_CFLAGS-libzstd := $(LIBZSTD_CFLAGS)
FEATURE_CHECK_LDFLAGS-libzstd := $(LIBZSTD_LDFLAGS)
-# for linking with debug library, run like:
-# make DEBUG=1 LIBTRACEEVENT_DIR=/opt/libtraceevent/
-TRACEEVENTLIBS := -ltraceevent
-ifdef LIBTRACEEVENT_DIR
- LIBTRACEEVENT_CFLAGS := -I$(LIBTRACEEVENT_DIR)/include
- LIBTRACEEVENT_LDFLAGS := -L$(LIBTRACEEVENT_DIR)/lib
+ifneq ($(NO_LIBTRACEEVENT),1)
+ ifeq ($(call get-executable,$(PKG_CONFIG)),)
+ $(error Error: $(PKG_CONFIG) needed by libtraceevent is missing on this system, please install it)
+ endif
endif
-FEATURE_CHECK_CFLAGS-libtraceevent := $(LIBTRACEEVENT_CFLAGS)
-FEATURE_CHECK_LDFLAGS-libtraceevent := $(LIBTRACEEVENT_LDFLAGS) $(TRACEEVENTLIBS)
+
+# for linking with debug library, run like:
+# make DEBUG=1 PKG_CONFIG_PATH=/opt/libtraceevent/(lib|lib64)/pkgconfig
+FEATURE_CHECK_CFLAGS-libtraceevent := $(shell $(PKG_CONFIG) --cflags libtraceevent 2>/dev/null)
+FEATURE_CHECK_LDFLAGS-libtraceevent := $(shell $(PKG_CONFIG) --libs libtraceevent 2>/dev/null)
+
+FEATURE_CHECK_CFLAGS-libtracefs := $(shell $(PKG_CONFIG) --cflags libtracefs 2>/dev/null)
+FEATURE_CHECK_LDFLAGS-libtracefs := $(shell $(PKG_CONFIG) --libs libtracefs 2>/dev/null)
FEATURE_CHECK_CFLAGS-bpf = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(SRCARCH)/include/uapi -I$(srctree)/tools/include/uapi
# include ARCH specific config
@@ -210,12 +214,6 @@ ifeq ($(call get-executable,$(BISON)),)
$(error Error: $(BISON) is missing on this system, please install it)
endif
-ifneq ($(NO_LIBTRACEEVENT),1)
- ifeq ($(call get-executable,$(PKG_CONFIG)),)
- dummy := $(error Error: $(PKG_CONFIG) needed by libtraceevent is missing on this system, please install it)
- endif
-endif
-
ifneq ($(OUTPUT),)
ifeq ($(shell expr $(shell $(BISON) --version | grep bison | sed -e 's/.\+ \([0-9]\+\).\([0-9]\+\).\([0-9]\+\)/\1\2\3/g') \>\= 371), 1)
BISON_FILE_PREFIX_MAP := --file-prefix-map=$(OUTPUT)=
@@ -910,6 +908,11 @@ else
endif
CFLAGS += -DHAVE_LIBPYTHON_SUPPORT
$(call detected,CONFIG_LIBPYTHON)
+ ifeq ($(filter -fPIC,$(CFLAGS)),)
+ # Building a shared library requires position independent code.
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+ endif
endif
endif
endif
@@ -1178,10 +1181,10 @@ endif
ifneq ($(NO_LIBTRACEEVENT),1)
$(call feature_check,libtraceevent)
ifeq ($(feature-libtraceevent), 1)
- CFLAGS += -DHAVE_LIBTRACEEVENT $(LIBTRACEEVENT_CFLAGS)
- LDFLAGS += $(LIBTRACEEVENT_LDFLAGS)
- EXTLIBS += ${TRACEEVENTLIBS}
- LIBTRACEEVENT_VERSION := $(shell PKG_CONFIG_PATH=$(LIBTRACEEVENT_DIR) $(PKG_CONFIG) --modversion libtraceevent)
+ CFLAGS += -DHAVE_LIBTRACEEVENT
+ LDFLAGS += $(shell $(PKG_CONFIG) --libs-only-L libtraceevent)
+ EXTLIBS += $(shell $(PKG_CONFIG) --libs-only-l libtraceevent)
+ LIBTRACEEVENT_VERSION := $(shell $(PKG_CONFIG) --modversion libtraceevent).0.0
LIBTRACEEVENT_VERSION_1 := $(word 1, $(subst ., ,$(LIBTRACEEVENT_VERSION)))
LIBTRACEEVENT_VERSION_2 := $(word 2, $(subst ., ,$(LIBTRACEEVENT_VERSION)))
LIBTRACEEVENT_VERSION_3 := $(word 3, $(subst ., ,$(LIBTRACEEVENT_VERSION)))
@@ -1194,8 +1197,10 @@ ifneq ($(NO_LIBTRACEEVENT),1)
$(call feature_check,libtracefs)
ifeq ($(feature-libtracefs), 1)
- EXTLIBS += -ltracefs
- LIBTRACEFS_VERSION := $(shell $(PKG_CONFIG) --modversion libtracefs)
+ CFLAGS += $(shell $(PKG_CONFIG) --cflags libtracefs)
+ LDFLAGS += $(shell $(PKG_CONFIG) --libs-only-L libtracefs)
+ EXTLIBS += $(shell $(PKG_CONFIG) --libs-only-l libtracefs)
+ LIBTRACEFS_VERSION := $(shell $(PKG_CONFIG) --modversion libtracefs).0.0
LIBTRACEFS_VERSION_1 := $(word 1, $(subst ., ,$(LIBTRACEFS_VERSION)))
LIBTRACEFS_VERSION_2 := $(word 2, $(subst ., ,$(LIBTRACEFS_VERSION)))
LIBTRACEFS_VERSION_3 := $(word 3, $(subst ., ,$(LIBTRACEFS_VERSION)))
@@ -1315,7 +1320,6 @@ ifeq ($(VF),1)
$(call print_var,LIBUNWIND_DIR)
$(call print_var,LIBDW_DIR)
$(call print_var,JDIR)
- $(call print_var,LIBTRACEEVENT_DIR)
ifeq ($(dwarf-post-unwind),1)
$(call feature_print_text,"DWARF post unwind library", $(dwarf-post-unwind-text)) $(info $(MSG))
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index e6d56b555369..175e4c7898f0 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -381,14 +381,6 @@ python-clean := $(call QUIET_CLEAN, python) $(RM) -r $(PYTHON_EXTBUILD) $(OUTPUT
# Use the detected configuration
-include $(OUTPUT).config-detected
-ifeq ($(CONFIG_LIBTRACEEVENT),y)
- PYTHON_EXT_SRCS := $(shell grep -v ^\# util/python-ext-sources)
-else
- PYTHON_EXT_SRCS := $(shell grep -v ^\#\\\|util/trace-event.c\\\|util/trace-event-parse.c util/python-ext-sources)
-endif
-
-PYTHON_EXT_DEPS := util/python-ext-sources util/setup.py $(LIBAPI)
-
SCRIPTS = $(patsubst %.sh,%,$(SCRIPT_SH))
PROGRAMS += $(OUTPUT)perf
@@ -426,10 +418,27 @@ endif
export PERL_PATH
+LIBPERF_BENCH_IN := $(OUTPUT)perf-bench-in.o
+LIBPERF_BENCH := $(OUTPUT)libperf-bench.a
+
+LIBPERF_TEST_IN := $(OUTPUT)perf-test-in.o
+LIBPERF_TEST := $(OUTPUT)libperf-test.a
+
+LIBPERF_UI_IN := $(OUTPUT)perf-ui-in.o
+LIBPERF_UI := $(OUTPUT)libperf-ui.a
+
+LIBPERF_UTIL_IN := $(OUTPUT)perf-util-in.o
+LIBPERF_UTIL := $(OUTPUT)libperf-util.a
+
+LIBPMU_EVENTS_IN := $(OUTPUT)pmu-events/pmu-events-in.o
+LIBPMU_EVENTS := $(OUTPUT)libpmu-events.a
+
PERFLIBS = $(LIBAPI) $(LIBPERF) $(LIBSUBCMD) $(LIBSYMBOL)
ifdef LIBBPF_STATIC
PERFLIBS += $(LIBBPF)
endif
+PERFLIBS += $(LIBPERF_BENCH) $(LIBPERF_TEST) $(LIBPERF_UI) $(LIBPERF_UTIL)
+PERFLIBS += $(LIBPMU_EVENTS)
# We choose to avoid "if .. else if .. else .. endif endif"
# because maintaining the nesting to match is a pain. If
@@ -699,9 +708,9 @@ all: shell_compatibility_test $(ALL_PROGRAMS) $(LANG_BINDINGS) $(OTHER_PROGRAMS)
# Create python binding output directory if not already present
$(shell [ -d '$(OUTPUT)python' ] || mkdir -p '$(OUTPUT)python')
-$(OUTPUT)python/perf$(PYTHON_EXTENSION_SUFFIX): $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS) $(LIBPERF) $(LIBSUBCMD)
+$(OUTPUT)python/perf$(PYTHON_EXTENSION_SUFFIX): util/python.c util/setup.py $(PERFLIBS)
$(QUIET_GEN)LDSHARED="$(CC) -pthread -shared" \
- CFLAGS='$(CFLAGS)' LDFLAGS='$(LDFLAGS)' \
+ CFLAGS='$(CFLAGS)' LDFLAGS='$(LDFLAGS) $(LIBS)' \
$(PYTHON_WORD) util/setup.py \
--quiet build_ext; \
cp $(PYTHON_EXTBUILD_LIB)perf*.so $(OUTPUT)python/
@@ -718,8 +727,6 @@ strip: $(PROGRAMS) $(OUTPUT)perf
$(STRIP) $(STRIP_OPTS) $(PROGRAMS) $(OUTPUT)perf
PERF_IN := $(OUTPUT)perf-in.o
-
-PMU_EVENTS_IN := $(OUTPUT)pmu-events/pmu-events-in.o
export NO_JEVENTS
build := -f $(srctree)/tools/build/Makefile.build dir=. obj
@@ -727,12 +734,39 @@ build := -f $(srctree)/tools/build/Makefile.build dir=. obj
$(PERF_IN): prepare FORCE
$(Q)$(MAKE) $(build)=perf
-$(PMU_EVENTS_IN): FORCE prepare
+$(LIBPMU_EVENTS_IN): FORCE prepare
$(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=pmu-events obj=pmu-events
-$(OUTPUT)perf: $(PERFLIBS) $(PERF_IN) $(PMU_EVENTS_IN)
+$(LIBPMU_EVENTS): $(LIBPMU_EVENTS_IN)
+ $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $<
+
+$(LIBPERF_BENCH_IN): FORCE prepare
+ $(Q)$(MAKE) $(build)=perf-bench
+
+$(LIBPERF_BENCH): $(LIBPERF_BENCH_IN)
+ $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $<
+
+$(LIBPERF_TEST_IN): FORCE prepare
+ $(Q)$(MAKE) $(build)=perf-test
+
+$(LIBPERF_TEST): $(LIBPERF_TEST_IN)
+ $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $<
+
+$(LIBPERF_UI_IN): FORCE prepare
+ $(Q)$(MAKE) $(build)=perf-ui
+
+$(LIBPERF_UI): $(LIBPERF_UI_IN)
+ $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $<
+
+$(LIBPERF_UTIL_IN): FORCE prepare
+ $(Q)$(MAKE) $(build)=perf-util
+
+$(LIBPERF_UTIL): $(LIBPERF_UTIL_IN)
+ $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $<
+
+$(OUTPUT)perf: $(PERFLIBS) $(PERF_IN)
$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) \
- $(PERF_IN) $(PMU_EVENTS_IN) $(LIBS) -o $@
+ $(PERF_IN) $(LIBS) -o $@
$(GTK_IN): FORCE prepare
$(Q)$(MAKE) $(build)=gtk
@@ -892,7 +926,7 @@ $(LIBAPI)-clean:
$(LIBBPF): FORCE | $(LIBBPF_OUTPUT)
$(Q)$(MAKE) -C $(LIBBPF_DIR) FEATURES_DUMP=$(FEATURE_DUMP_EXPORT) \
O= OUTPUT=$(LIBBPF_OUTPUT)/ DESTDIR=$(LIBBPF_DESTDIR) prefix= subdir= \
- $@ install_headers
+ EXTRA_CFLAGS="-fPIC" $@ install_headers
$(LIBBPF)-clean:
$(call QUIET_CLEAN, libbpf)
@@ -1094,7 +1128,7 @@ install-python_ext:
# 'make install-doc' should call 'make -C Documentation install'
$(INSTALL_DOC_TARGETS):
- $(Q)$(MAKE) -C $(DOC_DIR) O=$(OUTPUT) $(@:-doc=) ASCIIDOC_EXTRA=$(ASCIIDOC_EXTRA)
+ $(Q)$(MAKE) -C $(DOC_DIR) O=$(OUTPUT) $(@:-doc=) ASCIIDOC_EXTRA=$(ASCIIDOC_EXTRA) subdir=
### Cleaning rules
@@ -1202,12 +1236,19 @@ endif # CONFIG_PERF_BPF_SKEL
bpf-skel-clean:
$(call QUIET_CLEAN, bpf-skel) $(RM) -r $(SKEL_TMP_OUT) $(SKELETONS) $(SKEL_OUT)/vmlinux.h
-clean:: $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean $(LIBSYMBOL)-clean $(LIBPERF)-clean arm64-sysreg-defs-clean fixdep-clean python-clean bpf-skel-clean tests-coresight-targets-clean
- $(call QUIET_CLEAN, core-objs) $(RM) $(LIBPERF_A) $(OUTPUT)perf-archive $(OUTPUT)perf-iostat $(LANG_BINDINGS)
- $(Q)find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete -o -name '*.shellcheck_log' -delete
+clean:: $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean $(LIBSYMBOL)-clean $(LIBPERF)-clean \
+ arm64-sysreg-defs-clean fixdep-clean python-clean bpf-skel-clean \
+ tests-coresight-targets-clean
+ $(call QUIET_CLEAN, core-objs) $(RM) $(LIBPERF_A) $(OUTPUT)perf-archive \
+ $(OUTPUT)perf-iostat $(LANG_BINDINGS)
+ $(Q)find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '*.a' -delete -o \
+ -name '\.*.cmd' -delete -o -name '\.*.d' -delete -o -name '*.shellcheck_log' -delete
$(Q)$(RM) $(OUTPUT).config-detected
- $(call QUIET_CLEAN, core-progs) $(RM) $(ALL_PROGRAMS) perf perf-read-vdso32 perf-read-vdsox32 $(OUTPUT)$(LIBJVMTI).so
- $(call QUIET_CLEAN, core-gen) $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)FEATURE-DUMP $(OUTPUT)util/*-bison* $(OUTPUT)util/*-flex* \
+ $(call QUIET_CLEAN, core-progs) $(RM) $(ALL_PROGRAMS) perf perf-read-vdso32 \
+ perf-read-vdsox32 $(OUTPUT)$(LIBJVMTI).so
+ $(call QUIET_CLEAN, core-gen) $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo \
+ $(OUTPUT)common-cmds.h TAGS tags cscope* $(OUTPUT)PERF-VERSION-FILE \
+ $(OUTPUT)FEATURE-DUMP $(OUTPUT)util/*-bison* $(OUTPUT)util/*-flex* \
$(OUTPUT)util/intel-pt-decoder/inat-tables.c \
$(OUTPUT)tests/llvm-src-{base,kbuild,prologue,relocation}.c \
$(OUTPUT)pmu-events/pmu-events.c \
diff --git a/tools/perf/arch/Build b/tools/perf/arch/Build
index 688818844c11..f0d96a13445c 100644
--- a/tools/perf/arch/Build
+++ b/tools/perf/arch/Build
@@ -1,2 +1,3 @@
-perf-y += common.o
-perf-y += $(SRCARCH)/
+perf-util-y += common.o
+perf-test-y += $(SRCARCH)/
+perf-util-y += $(SRCARCH)/
diff --git a/tools/perf/arch/arm/Build b/tools/perf/arch/arm/Build
index 36222e64bbf7..317425aa3712 100644
--- a/tools/perf/arch/arm/Build
+++ b/tools/perf/arch/arm/Build
@@ -1,2 +1,2 @@
-perf-y += util/
-perf-$(CONFIG_DWARF_UNWIND) += tests/
+perf-util-y += util/
+perf-test-$(CONFIG_DWARF_UNWIND) += tests/
diff --git a/tools/perf/arch/arm/tests/Build b/tools/perf/arch/arm/tests/Build
index bc8e97380c82..599efa545727 100644
--- a/tools/perf/arch/arm/tests/Build
+++ b/tools/perf/arch/arm/tests/Build
@@ -1,5 +1,5 @@
-perf-y += regs_load.o
-perf-y += dwarf-unwind.o
-perf-y += vectors-page.o
+perf-test-y += regs_load.o
+perf-test-y += dwarf-unwind.o
+perf-test-y += vectors-page.o
-perf-y += arch-tests.o
+perf-test-y += arch-tests.o
diff --git a/tools/perf/arch/arm/util/Build b/tools/perf/arch/arm/util/Build
index 37fc63708966..e6dd7cd79ebd 100644
--- a/tools/perf/arch/arm/util/Build
+++ b/tools/perf/arch/arm/util/Build
@@ -1,8 +1,8 @@
-perf-y += perf_regs.o
+perf-util-y += perf_regs.o
-perf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-util-$(CONFIG_DWARF) += dwarf-regs.o
-perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
-perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
+perf-util-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
+perf-util-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
-perf-$(CONFIG_AUXTRACE) += pmu.o auxtrace.o cs-etm.o
+perf-util-$(CONFIG_AUXTRACE) += pmu.o auxtrace.o cs-etm.o
diff --git a/tools/perf/arch/arm/util/pmu.c b/tools/perf/arch/arm/util/pmu.c
index 8b7cb68ba1a8..1c9541d01722 100644
--- a/tools/perf/arch/arm/util/pmu.c
+++ b/tools/perf/arch/arm/util/pmu.c
@@ -11,12 +11,15 @@
#include "arm-spe.h"
#include "hisi-ptt.h"
+#include "../../../util/cpumap.h"
#include "../../../util/pmu.h"
#include "../../../util/cs-etm.h"
#include "../../arm64/util/mem-events.h"
-void perf_pmu__arch_init(struct perf_pmu *pmu __maybe_unused)
+void perf_pmu__arch_init(struct perf_pmu *pmu)
{
+ struct perf_cpu_map *intersect;
+
#ifdef HAVE_AUXTRACE_SUPPORT
if (!strcmp(pmu->name, CORESIGHT_ETM_PMU_NAME)) {
/* add ETM default config here */
@@ -27,12 +30,15 @@ void perf_pmu__arch_init(struct perf_pmu *pmu __maybe_unused)
pmu->selectable = true;
pmu->is_uncore = false;
pmu->perf_event_attr_init_default = arm_spe_pmu_default_config;
- if (!strcmp(pmu->name, "arm_spe_0"))
+ if (strstarts(pmu->name, "arm_spe_"))
pmu->mem_events = perf_mem_events_arm;
} else if (strstarts(pmu->name, HISI_PTT_PMU_NAME)) {
pmu->selectable = true;
#endif
}
-
#endif
+ /* Workaround some ARM PMU's failing to correctly set CPU maps for online processors. */
+ intersect = perf_cpu_map__intersect(cpu_map__online(), pmu->cpus);
+ perf_cpu_map__put(pmu->cpus);
+ pmu->cpus = intersect;
}
diff --git a/tools/perf/arch/arm64/Build b/tools/perf/arch/arm64/Build
index a7dd46a5b678..12ebc65ea7a3 100644
--- a/tools/perf/arch/arm64/Build
+++ b/tools/perf/arch/arm64/Build
@@ -1,2 +1,2 @@
-perf-y += util/
-perf-y += tests/
+perf-util-y += util/
+perf-test-y += tests/
diff --git a/tools/perf/arch/arm64/tests/Build b/tools/perf/arch/arm64/tests/Build
index e337c09e7f56..d44c9de92d42 100644
--- a/tools/perf/arch/arm64/tests/Build
+++ b/tools/perf/arch/arm64/tests/Build
@@ -1,5 +1,5 @@
-perf-y += regs_load.o
-perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
+perf-test-y += regs_load.o
+perf-test-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
-perf-y += arch-tests.o
-perf-y += cpuid-match.o
+perf-test-y += arch-tests.o
+perf-test-y += cpuid-match.o
diff --git a/tools/perf/arch/arm64/util/Build b/tools/perf/arch/arm64/util/Build
index 78ef7115be3d..343ef7589a77 100644
--- a/tools/perf/arch/arm64/util/Build
+++ b/tools/perf/arch/arm64/util/Build
@@ -1,14 +1,14 @@
-perf-y += header.o
-perf-y += machine.o
-perf-y += perf_regs.o
-perf-y += tsc.o
-perf-y += pmu.o
-perf-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o
-perf-$(CONFIG_DWARF) += dwarf-regs.o
-perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
-perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
+perf-util-y += header.o
+perf-util-y += machine.o
+perf-util-y += perf_regs.o
+perf-util-y += tsc.o
+perf-util-y += pmu.o
+perf-util-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o
+perf-util-$(CONFIG_DWARF) += dwarf-regs.o
+perf-util-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
+perf-util-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
-perf-$(CONFIG_AUXTRACE) += ../../arm/util/pmu.o \
+perf-util-$(CONFIG_AUXTRACE) += ../../arm/util/pmu.o \
../../arm/util/auxtrace.o \
../../arm/util/cs-etm.o \
arm-spe.o mem-events.o hisi-ptt.o
diff --git a/tools/perf/arch/csky/Build b/tools/perf/arch/csky/Build
index e4e5f33c84d8..e63eabc2c8f4 100644
--- a/tools/perf/arch/csky/Build
+++ b/tools/perf/arch/csky/Build
@@ -1 +1 @@
-perf-y += util/
+perf-util-y += util/
diff --git a/tools/perf/arch/csky/util/Build b/tools/perf/arch/csky/util/Build
index 7d3050134ae0..99d83f41bf43 100644
--- a/tools/perf/arch/csky/util/Build
+++ b/tools/perf/arch/csky/util/Build
@@ -1,4 +1,4 @@
-perf-y += perf_regs.o
+perf-util-y += perf_regs.o
-perf-$(CONFIG_DWARF) += dwarf-regs.o
-perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
+perf-util-$(CONFIG_DWARF) += dwarf-regs.o
+perf-util-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
diff --git a/tools/perf/arch/loongarch/Build b/tools/perf/arch/loongarch/Build
index e4e5f33c84d8..e63eabc2c8f4 100644
--- a/tools/perf/arch/loongarch/Build
+++ b/tools/perf/arch/loongarch/Build
@@ -1 +1 @@
-perf-y += util/
+perf-util-y += util/
diff --git a/tools/perf/arch/loongarch/Makefile b/tools/perf/arch/loongarch/Makefile
index 3992a67a87d9..c89d6bb6b184 100644
--- a/tools/perf/arch/loongarch/Makefile
+++ b/tools/perf/arch/loongarch/Makefile
@@ -4,6 +4,7 @@ PERF_HAVE_DWARF_REGS := 1
endif
PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
PERF_HAVE_JITDUMP := 1
+HAVE_KVM_STAT_SUPPORT := 1
#
# Syscall table generation for perf
diff --git a/tools/perf/arch/loongarch/util/Build b/tools/perf/arch/loongarch/util/Build
index d776125a2d06..b6b97de48233 100644
--- a/tools/perf/arch/loongarch/util/Build
+++ b/tools/perf/arch/loongarch/util/Build
@@ -1,5 +1,7 @@
-perf-y += perf_regs.o
+perf-util-y += header.o
+perf-util-y += perf_regs.o
-perf-$(CONFIG_DWARF) += dwarf-regs.o
-perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
-perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
+perf-util-$(CONFIG_DWARF) += dwarf-regs.o
+perf-util-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
+perf-util-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
+perf-util-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o
diff --git a/tools/perf/arch/loongarch/util/header.c b/tools/perf/arch/loongarch/util/header.c
new file mode 100644
index 000000000000..d962dff55512
--- /dev/null
+++ b/tools/perf/arch/loongarch/util/header.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Implementation of get_cpuid().
+ *
+ * Author: Nikita Shubin <n.shubin@yadro.com>
+ * Bibo Mao <maobibo@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <api/fs/fs.h>
+#include <errno.h>
+#include "util/debug.h"
+#include "util/header.h"
+
+/*
+ * Output example from /proc/cpuinfo
+ * CPU Family : Loongson-64bit
+ * Model Name : Loongson-3C5000
+ * CPU Revision : 0x10
+ * FPU Revision : 0x01
+ */
+#define CPUINFO_MODEL "Model Name"
+#define CPUINFO "/proc/cpuinfo"
+
+static char *_get_field(const char *line)
+{
+ char *line2, *nl;
+
+ line2 = strrchr(line, ' ');
+ if (!line2)
+ return NULL;
+
+ line2++;
+ nl = strrchr(line, '\n');
+ if (!nl)
+ return NULL;
+
+ return strndup(line2, nl - line2);
+}
+
+static char *_get_cpuid(void)
+{
+ unsigned long line_sz;
+ char *line, *model, *cpuid;
+ FILE *file;
+
+ file = fopen(CPUINFO, "r");
+ if (file == NULL)
+ return NULL;
+
+ line = model = cpuid = NULL;
+ while (getline(&line, &line_sz, file) != -1) {
+ if (strncmp(line, CPUINFO_MODEL, strlen(CPUINFO_MODEL)))
+ continue;
+
+ model = _get_field(line);
+ if (!model)
+ goto out_free;
+ break;
+ }
+
+ if (model && (asprintf(&cpuid, "%s", model) < 0))
+ cpuid = NULL;
+
+out_free:
+ fclose(file);
+ free(model);
+ return cpuid;
+}
+
+int get_cpuid(char *buffer, size_t sz)
+{
+ int ret = 0;
+ char *cpuid = _get_cpuid();
+
+ if (!cpuid)
+ return EINVAL;
+
+ if (sz < strlen(cpuid)) {
+ ret = ENOBUFS;
+ goto out_free;
+ }
+
+ scnprintf(buffer, sz, "%s", cpuid);
+
+out_free:
+ free(cpuid);
+ return ret;
+}
+
+char *get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
+{
+ return _get_cpuid();
+}
diff --git a/tools/perf/arch/loongarch/util/kvm-stat.c b/tools/perf/arch/loongarch/util/kvm-stat.c
new file mode 100644
index 000000000000..a7859a3a9a51
--- /dev/null
+++ b/tools/perf/arch/loongarch/util/kvm-stat.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <errno.h>
+#include <memory.h>
+#include "util/kvm-stat.h"
+#include "util/parse-events.h"
+#include "util/debug.h"
+#include "util/evsel.h"
+#include "util/evlist.h"
+#include "util/pmus.h"
+
+#define LOONGARCH_EXCEPTION_INT 0
+#define LOONGARCH_EXCEPTION_PIL 1
+#define LOONGARCH_EXCEPTION_PIS 2
+#define LOONGARCH_EXCEPTION_PIF 3
+#define LOONGARCH_EXCEPTION_PME 4
+#define LOONGARCH_EXCEPTION_FPD 15
+#define LOONGARCH_EXCEPTION_SXD 16
+#define LOONGARCH_EXCEPTION_ASXD 17
+#define LOONGARCH_EXCEPTION_GSPR 22
+#define LOONGARCH_EXCEPTION_CPUCFG 100
+#define LOONGARCH_EXCEPTION_CSR 101
+#define LOONGARCH_EXCEPTION_IOCSR 102
+#define LOONGARCH_EXCEPTION_IDLE 103
+#define LOONGARCH_EXCEPTION_OTHERS 104
+#define LOONGARCH_EXCEPTION_HVC 23
+
+#define loongarch_exception_type \
+ {LOONGARCH_EXCEPTION_INT, "Interrupt" }, \
+ {LOONGARCH_EXCEPTION_PIL, "Mem Read" }, \
+ {LOONGARCH_EXCEPTION_PIS, "Mem Store" }, \
+ {LOONGARCH_EXCEPTION_PIF, "Inst Fetch" }, \
+ {LOONGARCH_EXCEPTION_PME, "Mem Modify" }, \
+ {LOONGARCH_EXCEPTION_FPD, "FPU" }, \
+ {LOONGARCH_EXCEPTION_SXD, "LSX" }, \
+ {LOONGARCH_EXCEPTION_ASXD, "LASX" }, \
+ {LOONGARCH_EXCEPTION_GSPR, "Privilege Error" }, \
+ {LOONGARCH_EXCEPTION_HVC, "Hypercall" }, \
+ {LOONGARCH_EXCEPTION_CPUCFG, "CPUCFG" }, \
+ {LOONGARCH_EXCEPTION_CSR, "CSR" }, \
+ {LOONGARCH_EXCEPTION_IOCSR, "IOCSR" }, \
+ {LOONGARCH_EXCEPTION_IDLE, "Idle" }, \
+ {LOONGARCH_EXCEPTION_OTHERS, "Others" }
+
+define_exit_reasons_table(loongarch_exit_reasons, loongarch_exception_type);
+
+const char *vcpu_id_str = "vcpu_id";
+const char *kvm_exit_reason = "reason";
+const char *kvm_entry_trace = "kvm:kvm_enter";
+const char *kvm_reenter_trace = "kvm:kvm_reenter";
+const char *kvm_exit_trace = "kvm:kvm_exit";
+const char *kvm_events_tp[] = {
+ "kvm:kvm_enter",
+ "kvm:kvm_reenter",
+ "kvm:kvm_exit",
+ "kvm:kvm_exit_gspr",
+ NULL,
+};
+
+static bool event_begin(struct evsel *evsel,
+ struct perf_sample *sample, struct event_key *key)
+{
+ return exit_event_begin(evsel, sample, key);
+}
+
+static bool event_end(struct evsel *evsel,
+ struct perf_sample *sample __maybe_unused,
+ struct event_key *key __maybe_unused)
+{
+ /*
+ * LoongArch kvm is different with other architectures
+ *
+ * There is kvm:kvm_reenter or kvm:kvm_enter event adjacent with
+ * kvm:kvm_exit event.
+ * kvm:kvm_enter means returning to vmm and then to guest
+ * kvm:kvm_reenter means returning to guest immediately
+ */
+ return evsel__name_is(evsel, kvm_entry_trace) || evsel__name_is(evsel, kvm_reenter_trace);
+}
+
+static void event_gspr_get_key(struct evsel *evsel,
+ struct perf_sample *sample, struct event_key *key)
+{
+ unsigned int insn;
+
+ key->key = LOONGARCH_EXCEPTION_OTHERS;
+ insn = evsel__intval(evsel, sample, "inst_word");
+
+ switch (insn >> 24) {
+ case 0:
+ /* CPUCFG inst trap */
+ if ((insn >> 10) == 0x1b)
+ key->key = LOONGARCH_EXCEPTION_CPUCFG;
+ break;
+ case 4:
+ /* CSR inst trap */
+ key->key = LOONGARCH_EXCEPTION_CSR;
+ break;
+ case 6:
+ /* IOCSR inst trap */
+ if ((insn >> 15) == 0xc90)
+ key->key = LOONGARCH_EXCEPTION_IOCSR;
+ else if ((insn >> 15) == 0xc91)
+ /* Idle inst trap */
+ key->key = LOONGARCH_EXCEPTION_IDLE;
+ break;
+ default:
+ key->key = LOONGARCH_EXCEPTION_OTHERS;
+ break;
+ }
+}
+
+static struct child_event_ops child_events[] = {
+ { .name = "kvm:kvm_exit_gspr", .get_key = event_gspr_get_key },
+ { NULL, NULL },
+};
+
+static struct kvm_events_ops exit_events = {
+ .is_begin_event = event_begin,
+ .is_end_event = event_end,
+ .child_ops = child_events,
+ .decode_key = exit_event_decode_key,
+ .name = "VM-EXIT"
+};
+
+struct kvm_reg_events_ops kvm_reg_events_ops[] = {
+ { .name = "vmexit", .ops = &exit_events, },
+ { NULL, NULL },
+};
+
+const char * const kvm_skip_events[] = {
+ NULL,
+};
+
+int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid __maybe_unused)
+{
+ kvm->exit_reasons_isa = "loongarch64";
+ kvm->exit_reasons = loongarch_exit_reasons;
+ return 0;
+}
diff --git a/tools/perf/arch/mips/Build b/tools/perf/arch/mips/Build
index e4e5f33c84d8..e63eabc2c8f4 100644
--- a/tools/perf/arch/mips/Build
+++ b/tools/perf/arch/mips/Build
@@ -1 +1 @@
-perf-y += util/
+perf-util-y += util/
diff --git a/tools/perf/arch/mips/util/Build b/tools/perf/arch/mips/util/Build
index 51c8900a9a10..e4644f1e68a0 100644
--- a/tools/perf/arch/mips/util/Build
+++ b/tools/perf/arch/mips/util/Build
@@ -1,3 +1,3 @@
-perf-y += perf_regs.o
-perf-$(CONFIG_DWARF) += dwarf-regs.o
-perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
+perf-util-y += perf_regs.o
+perf-util-$(CONFIG_DWARF) += dwarf-regs.o
+perf-util-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
diff --git a/tools/perf/arch/powerpc/Build b/tools/perf/arch/powerpc/Build
index a7dd46a5b678..12ebc65ea7a3 100644
--- a/tools/perf/arch/powerpc/Build
+++ b/tools/perf/arch/powerpc/Build
@@ -1,2 +1,2 @@
-perf-y += util/
-perf-y += tests/
+perf-util-y += util/
+perf-test-y += tests/
diff --git a/tools/perf/arch/powerpc/tests/Build b/tools/perf/arch/powerpc/tests/Build
index 3526ab0af9f9..275026950645 100644
--- a/tools/perf/arch/powerpc/tests/Build
+++ b/tools/perf/arch/powerpc/tests/Build
@@ -1,4 +1,4 @@
-perf-$(CONFIG_DWARF_UNWIND) += regs_load.o
-perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
+perf-test-$(CONFIG_DWARF_UNWIND) += regs_load.o
+perf-test-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
-perf-y += arch-tests.o
+perf-test-y += arch-tests.o
diff --git a/tools/perf/arch/powerpc/util/Build b/tools/perf/arch/powerpc/util/Build
index 1d323f3a3322..6c588ecdf3bd 100644
--- a/tools/perf/arch/powerpc/util/Build
+++ b/tools/perf/arch/powerpc/util/Build
@@ -1,14 +1,14 @@
-perf-y += header.o
-perf-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o
-perf-y += perf_regs.o
-perf-y += mem-events.o
-perf-y += pmu.o
-perf-y += sym-handling.o
-perf-y += evsel.o
-perf-y += event.o
+perf-util-y += header.o
+perf-util-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o
+perf-util-y += perf_regs.o
+perf-util-y += mem-events.o
+perf-util-y += pmu.o
+perf-util-y += sym-handling.o
+perf-util-y += evsel.o
+perf-util-y += event.o
-perf-$(CONFIG_DWARF) += dwarf-regs.o
-perf-$(CONFIG_DWARF) += skip-callchain-idx.o
+perf-util-$(CONFIG_DWARF) += dwarf-regs.o
+perf-util-$(CONFIG_DWARF) += skip-callchain-idx.o
-perf-$(CONFIG_LIBUNWIND) += unwind-libunwind.o
-perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
+perf-util-$(CONFIG_LIBUNWIND) += unwind-libunwind.o
+perf-util-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
diff --git a/tools/perf/arch/powerpc/util/skip-callchain-idx.c b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
index 5f3edb3004d8..356786432fd3 100644
--- a/tools/perf/arch/powerpc/util/skip-callchain-idx.c
+++ b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
@@ -159,9 +159,9 @@ static int check_return_addr(struct dso *dso, u64 map_start, Dwarf_Addr pc)
Dwarf_Addr start = pc;
Dwarf_Addr end = pc;
bool signalp;
- const char *exec_file = dso->long_name;
+ const char *exec_file = dso__long_name(dso);
- dwfl = dso->dwfl;
+ dwfl = RC_CHK_ACCESS(dso)->dwfl;
if (!dwfl) {
dwfl = dwfl_begin(&offline_callbacks);
@@ -183,7 +183,7 @@ static int check_return_addr(struct dso *dso, u64 map_start, Dwarf_Addr pc)
dwfl_end(dwfl);
goto out;
}
- dso->dwfl = dwfl;
+ RC_CHK_ACCESS(dso)->dwfl = dwfl;
}
mod = dwfl_addrmodule(dwfl, pc);
@@ -267,7 +267,7 @@ int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain)
rc = check_return_addr(dso, map__start(al.map), ip);
pr_debug("[DSO %s, sym %s, ip 0x%" PRIx64 "] rc %d\n",
- dso->long_name, al.sym->name, ip, rc);
+ dso__long_name(dso), al.sym->name, ip, rc);
if (rc == 0) {
/*
diff --git a/tools/perf/arch/riscv/Build b/tools/perf/arch/riscv/Build
index e4e5f33c84d8..e63eabc2c8f4 100644
--- a/tools/perf/arch/riscv/Build
+++ b/tools/perf/arch/riscv/Build
@@ -1 +1 @@
-perf-y += util/
+perf-util-y += util/
diff --git a/tools/perf/arch/riscv/Makefile b/tools/perf/arch/riscv/Makefile
index a8d25d005207..90c3c476a242 100644
--- a/tools/perf/arch/riscv/Makefile
+++ b/tools/perf/arch/riscv/Makefile
@@ -3,3 +3,4 @@ PERF_HAVE_DWARF_REGS := 1
endif
PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
PERF_HAVE_JITDUMP := 1
+HAVE_KVM_STAT_SUPPORT := 1
diff --git a/tools/perf/arch/riscv/util/Build b/tools/perf/arch/riscv/util/Build
index 603dbb5ae4dc..f865cb0489ec 100644
--- a/tools/perf/arch/riscv/util/Build
+++ b/tools/perf/arch/riscv/util/Build
@@ -1,5 +1,6 @@
-perf-y += perf_regs.o
-perf-y += header.o
+perf-util-y += perf_regs.o
+perf-util-y += header.o
-perf-$(CONFIG_DWARF) += dwarf-regs.o
-perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
+perf-util-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o
+perf-util-$(CONFIG_DWARF) += dwarf-regs.o
+perf-util-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
diff --git a/tools/perf/arch/riscv/util/kvm-stat.c b/tools/perf/arch/riscv/util/kvm-stat.c
new file mode 100644
index 000000000000..491aef449d1a
--- /dev/null
+++ b/tools/perf/arch/riscv/util/kvm-stat.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Arch specific functions for perf kvm stat.
+ *
+ * Copyright 2024 Beijing ESWIN Computing Technology Co., Ltd.
+ *
+ */
+#include <errno.h>
+#include <memory.h>
+#include "../../../util/evsel.h"
+#include "../../../util/kvm-stat.h"
+#include "riscv_exception_types.h"
+#include "debug.h"
+
+define_exit_reasons_table(riscv_exit_reasons, kvm_riscv_exception_class);
+
+const char *vcpu_id_str = "id";
+const char *kvm_exit_reason = "scause";
+const char *kvm_entry_trace = "kvm:kvm_entry";
+const char *kvm_exit_trace = "kvm:kvm_exit";
+
+const char *kvm_events_tp[] = {
+ "kvm:kvm_entry",
+ "kvm:kvm_exit",
+ NULL,
+};
+
+static void event_get_key(struct evsel *evsel,
+ struct perf_sample *sample,
+ struct event_key *key)
+{
+ key->info = 0;
+ key->key = evsel__intval(evsel, sample, kvm_exit_reason);
+ key->exit_reasons = riscv_exit_reasons;
+}
+
+static bool event_begin(struct evsel *evsel,
+ struct perf_sample *sample __maybe_unused,
+ struct event_key *key __maybe_unused)
+{
+ return evsel__name_is(evsel, kvm_entry_trace);
+}
+
+static bool event_end(struct evsel *evsel,
+ struct perf_sample *sample,
+ struct event_key *key)
+{
+ if (evsel__name_is(evsel, kvm_exit_trace)) {
+ event_get_key(evsel, sample, key);
+ return true;
+ }
+ return false;
+}
+
+static struct kvm_events_ops exit_events = {
+ .is_begin_event = event_begin,
+ .is_end_event = event_end,
+ .decode_key = exit_event_decode_key,
+ .name = "VM-EXIT"
+};
+
+struct kvm_reg_events_ops kvm_reg_events_ops[] = {
+ {
+ .name = "vmexit",
+ .ops = &exit_events,
+ },
+ { NULL, NULL },
+};
+
+const char * const kvm_skip_events[] = {
+ NULL,
+};
+
+int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid __maybe_unused)
+{
+ kvm->exit_reasons_isa = "riscv64";
+ return 0;
+}
diff --git a/tools/perf/arch/riscv/util/riscv_exception_types.h b/tools/perf/arch/riscv/util/riscv_exception_types.h
new file mode 100644
index 000000000000..c49b8fa5e847
--- /dev/null
+++ b/tools/perf/arch/riscv/util/riscv_exception_types.h
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef ARCH_PERF_RISCV_EXCEPTION_TYPES_H
+#define ARCH_PERF_RISCV_EXCEPTION_TYPES_H
+
+#define EXC_INST_MISALIGNED 0
+#define EXC_INST_ACCESS 1
+#define EXC_INST_ILLEGAL 2
+#define EXC_BREAKPOINT 3
+#define EXC_LOAD_MISALIGNED 4
+#define EXC_LOAD_ACCESS 5
+#define EXC_STORE_MISALIGNED 6
+#define EXC_STORE_ACCESS 7
+#define EXC_SYSCALL 8
+#define EXC_HYPERVISOR_SYSCALL 9
+#define EXC_SUPERVISOR_SYSCALL 10
+#define EXC_INST_PAGE_FAULT 12
+#define EXC_LOAD_PAGE_FAULT 13
+#define EXC_STORE_PAGE_FAULT 15
+#define EXC_INST_GUEST_PAGE_FAULT 20
+#define EXC_LOAD_GUEST_PAGE_FAULT 21
+#define EXC_VIRTUAL_INST_FAULT 22
+#define EXC_STORE_GUEST_PAGE_FAULT 23
+
+#define EXC(x) {EXC_##x, #x }
+
+#define kvm_riscv_exception_class \
+ EXC(INST_MISALIGNED), EXC(INST_ACCESS), EXC(INST_ILLEGAL), \
+ EXC(BREAKPOINT), EXC(LOAD_MISALIGNED), EXC(LOAD_ACCESS), \
+ EXC(STORE_MISALIGNED), EXC(STORE_ACCESS), EXC(SYSCALL), \
+ EXC(HYPERVISOR_SYSCALL), EXC(SUPERVISOR_SYSCALL), \
+ EXC(INST_PAGE_FAULT), EXC(LOAD_PAGE_FAULT), EXC(STORE_PAGE_FAULT), \
+ EXC(INST_GUEST_PAGE_FAULT), EXC(LOAD_GUEST_PAGE_FAULT), \
+ EXC(VIRTUAL_INST_FAULT), EXC(STORE_GUEST_PAGE_FAULT)
+
+#endif /* ARCH_PERF_RISCV_EXCEPTION_TYPES_H */
diff --git a/tools/perf/arch/s390/Build b/tools/perf/arch/s390/Build
index e4e5f33c84d8..e63eabc2c8f4 100644
--- a/tools/perf/arch/s390/Build
+++ b/tools/perf/arch/s390/Build
@@ -1 +1 @@
-perf-y += util/
+perf-util-y += util/
diff --git a/tools/perf/arch/s390/util/Build b/tools/perf/arch/s390/util/Build
index fa66f15a14ec..1ac830030ff3 100644
--- a/tools/perf/arch/s390/util/Build
+++ b/tools/perf/arch/s390/util/Build
@@ -1,11 +1,11 @@
-perf-y += header.o
-perf-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o
-perf-y += perf_regs.o
+perf-util-y += header.o
+perf-util-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o
+perf-util-y += perf_regs.o
-perf-$(CONFIG_DWARF) += dwarf-regs.o
-perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
+perf-util-$(CONFIG_DWARF) += dwarf-regs.o
+perf-util-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
-perf-y += machine.o
-perf-y += pmu.o
+perf-util-y += machine.o
+perf-util-y += pmu.o
-perf-$(CONFIG_AUXTRACE) += auxtrace.o
+perf-util-$(CONFIG_AUXTRACE) += auxtrace.o
diff --git a/tools/perf/arch/sh/Build b/tools/perf/arch/sh/Build
index e4e5f33c84d8..e63eabc2c8f4 100644
--- a/tools/perf/arch/sh/Build
+++ b/tools/perf/arch/sh/Build
@@ -1 +1 @@
-perf-y += util/
+perf-util-y += util/
diff --git a/tools/perf/arch/sh/util/Build b/tools/perf/arch/sh/util/Build
index e813e618954b..32f44fc4ab98 100644
--- a/tools/perf/arch/sh/util/Build
+++ b/tools/perf/arch/sh/util/Build
@@ -1 +1 @@
-perf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-util-$(CONFIG_DWARF) += dwarf-regs.o
diff --git a/tools/perf/arch/sparc/Build b/tools/perf/arch/sparc/Build
index e4e5f33c84d8..e63eabc2c8f4 100644
--- a/tools/perf/arch/sparc/Build
+++ b/tools/perf/arch/sparc/Build
@@ -1 +1 @@
-perf-y += util/
+perf-util-y += util/
diff --git a/tools/perf/arch/sparc/util/Build b/tools/perf/arch/sparc/util/Build
index e813e618954b..32f44fc4ab98 100644
--- a/tools/perf/arch/sparc/util/Build
+++ b/tools/perf/arch/sparc/util/Build
@@ -1 +1 @@
-perf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-util-$(CONFIG_DWARF) += dwarf-regs.o
diff --git a/tools/perf/arch/x86/Build b/tools/perf/arch/x86/Build
index ed37013b4289..87d057491343 100644
--- a/tools/perf/arch/x86/Build
+++ b/tools/perf/arch/x86/Build
@@ -1,5 +1,5 @@
-perf-y += util/
-perf-y += tests/
+perf-util-y += util/
+perf-test-y += tests/
ifdef SHELLCHECK
SHELL_TESTS := entry/syscalls/syscalltbl.sh
@@ -13,4 +13,4 @@ $(OUTPUT)%.shellcheck_log: %
$(call rule_mkdir)
$(Q)$(call echo-cmd,test)shellcheck -a -S warning "$<" > $@ || (cat $@ && rm $@ && false)
-perf-y += $(TEST_LOGS)
+perf-test-y += $(TEST_LOGS)
diff --git a/tools/perf/arch/x86/entry/syscalls/syscalltbl.sh b/tools/perf/arch/x86/entry/syscalls/syscalltbl.sh
index 59d7914ed6bb..2b71f99933a5 100755
--- a/tools/perf/arch/x86/entry/syscalls/syscalltbl.sh
+++ b/tools/perf/arch/x86/entry/syscalls/syscalltbl.sh
@@ -24,7 +24,9 @@ sorted_table=$(mktemp /tmp/syscalltbl.XXXXXX)
grep '^[0-9]' "$in" | sort -n > $sorted_table
max_nr=0
-while read nr _abi name entry _compat; do
+# the params are: nr abi name entry compat
+# use _ for intentionally unused variables according to SC2034
+while read nr _ name _ _; do
if [ $nr -ge 512 ] ; then # discard compat sycalls
break
fi
diff --git a/tools/perf/arch/x86/tests/Build b/tools/perf/arch/x86/tests/Build
index c1e3b7d39554..3227053f3355 100644
--- a/tools/perf/arch/x86/tests/Build
+++ b/tools/perf/arch/x86/tests/Build
@@ -1,15 +1,15 @@
-perf-$(CONFIG_DWARF_UNWIND) += regs_load.o
-perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
+perf-test-$(CONFIG_DWARF_UNWIND) += regs_load.o
+perf-test-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
-perf-y += arch-tests.o
-perf-y += sample-parsing.o
-perf-y += hybrid.o
-perf-$(CONFIG_AUXTRACE) += intel-pt-test.o
+perf-test-y += arch-tests.o
+perf-test-y += sample-parsing.o
+perf-test-y += hybrid.o
+perf-test-$(CONFIG_AUXTRACE) += intel-pt-test.o
ifeq ($(CONFIG_EXTRA_TESTS),y)
-perf-$(CONFIG_AUXTRACE) += insn-x86.o
+perf-test-$(CONFIG_AUXTRACE) += insn-x86.o
endif
-perf-$(CONFIG_X86_64) += bp-modify.o
-perf-y += amd-ibs-via-core-pmu.o
+perf-test-$(CONFIG_X86_64) += bp-modify.o
+perf-test-y += amd-ibs-via-core-pmu.o
ifdef SHELLCHECK
SHELL_TESTS := gen-insn-x86-dat.sh
@@ -23,4 +23,4 @@ $(OUTPUT)%.shellcheck_log: %
$(call rule_mkdir)
$(Q)$(call echo-cmd,test)shellcheck -a -S warning "$<" > $@ || (cat $@ && rm $@ && false)
-perf-y += $(TEST_LOGS)
+perf-test-y += $(TEST_LOGS)
diff --git a/tools/perf/arch/x86/tests/insn-x86-dat-32.c b/tools/perf/arch/x86/tests/insn-x86-dat-32.c
index ba429cadb18f..ce9645edaf68 100644
--- a/tools/perf/arch/x86/tests/insn-x86-dat-32.c
+++ b/tools/perf/arch/x86/tests/insn-x86-dat-32.c
@@ -3107,6 +3107,122 @@
"62 f5 7c 08 2e ca \tvucomish %xmm2,%xmm1",},
{{0x62, 0xf5, 0x7c, 0x08, 0x2e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
"62 f5 7c 08 2e 8c c8 78 56 34 12 \tvucomish 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0xf3, 0x0f, 0x38, 0xdc, 0xd1, }, 5, 0, "", "",
+"f3 0f 38 dc d1 \tloadiwkey %xmm1,%xmm2",},
+{{0xf3, 0x0f, 0x38, 0xfa, 0xd0, }, 5, 0, "", "",
+"f3 0f 38 fa d0 \tencodekey128 %eax,%edx",},
+{{0xf3, 0x0f, 0x38, 0xfb, 0xd0, }, 5, 0, "", "",
+"f3 0f 38 fb d0 \tencodekey256 %eax,%edx",},
+{{0xf3, 0x0f, 0x38, 0xdc, 0x5a, 0x77, }, 6, 0, "", "",
+"f3 0f 38 dc 5a 77 \taesenc128kl 0x77(%edx),%xmm3",},
+{{0xf3, 0x0f, 0x38, 0xde, 0x5a, 0x77, }, 6, 0, "", "",
+"f3 0f 38 de 5a 77 \taesenc256kl 0x77(%edx),%xmm3",},
+{{0xf3, 0x0f, 0x38, 0xdd, 0x5a, 0x77, }, 6, 0, "", "",
+"f3 0f 38 dd 5a 77 \taesdec128kl 0x77(%edx),%xmm3",},
+{{0xf3, 0x0f, 0x38, 0xdf, 0x5a, 0x77, }, 6, 0, "", "",
+"f3 0f 38 df 5a 77 \taesdec256kl 0x77(%edx),%xmm3",},
+{{0xf3, 0x0f, 0x38, 0xd8, 0x42, 0x77, }, 6, 0, "", "",
+"f3 0f 38 d8 42 77 \taesencwide128kl 0x77(%edx)",},
+{{0xf3, 0x0f, 0x38, 0xd8, 0x52, 0x77, }, 6, 0, "", "",
+"f3 0f 38 d8 52 77 \taesencwide256kl 0x77(%edx)",},
+{{0xf3, 0x0f, 0x38, 0xd8, 0x4a, 0x77, }, 6, 0, "", "",
+"f3 0f 38 d8 4a 77 \taesdecwide128kl 0x77(%edx)",},
+{{0xf3, 0x0f, 0x38, 0xd8, 0x5a, 0x77, }, 6, 0, "", "",
+"f3 0f 38 d8 5a 77 \taesdecwide256kl 0x77(%edx)",},
+{{0x0f, 0x38, 0xfc, 0x08, }, 4, 0, "", "",
+"0f 38 fc 08 \taadd %ecx,(%eax)",},
+{{0x0f, 0x38, 0xfc, 0x15, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 fc 15 78 56 34 12 \taadd %edx,0x12345678",},
+{{0x0f, 0x38, 0xfc, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 fc 94 c8 78 56 34 12 \taadd %edx,0x12345678(%eax,%ecx,8)",},
+{{0x66, 0x0f, 0x38, 0xfc, 0x08, }, 5, 0, "", "",
+"66 0f 38 fc 08 \taand %ecx,(%eax)",},
+{{0x66, 0x0f, 0x38, 0xfc, 0x15, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 38 fc 15 78 56 34 12 \taand %edx,0x12345678",},
+{{0x66, 0x0f, 0x38, 0xfc, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"66 0f 38 fc 94 c8 78 56 34 12 \taand %edx,0x12345678(%eax,%ecx,8)",},
+{{0xf2, 0x0f, 0x38, 0xfc, 0x08, }, 5, 0, "", "",
+"f2 0f 38 fc 08 \taor %ecx,(%eax)",},
+{{0xf2, 0x0f, 0x38, 0xfc, 0x15, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 38 fc 15 78 56 34 12 \taor %edx,0x12345678",},
+{{0xf2, 0x0f, 0x38, 0xfc, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"f2 0f 38 fc 94 c8 78 56 34 12 \taor %edx,0x12345678(%eax,%ecx,8)",},
+{{0xf3, 0x0f, 0x38, 0xfc, 0x08, }, 5, 0, "", "",
+"f3 0f 38 fc 08 \taxor %ecx,(%eax)",},
+{{0xf3, 0x0f, 0x38, 0xfc, 0x15, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 38 fc 15 78 56 34 12 \taxor %edx,0x12345678",},
+{{0xf3, 0x0f, 0x38, 0xfc, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"f3 0f 38 fc 94 c8 78 56 34 12 \taxor %edx,0x12345678(%eax,%ecx,8)",},
+{{0xc4, 0xe2, 0x7a, 0xb1, 0x31, }, 5, 0, "", "",
+"c4 e2 7a b1 31 \tvbcstnebf162ps (%ecx),%xmm6",},
+{{0xc4, 0xe2, 0x79, 0xb1, 0x31, }, 5, 0, "", "",
+"c4 e2 79 b1 31 \tvbcstnesh2ps (%ecx),%xmm6",},
+{{0xc4, 0xe2, 0x7a, 0xb0, 0x31, }, 5, 0, "", "",
+"c4 e2 7a b0 31 \tvcvtneebf162ps (%ecx),%xmm6",},
+{{0xc4, 0xe2, 0x79, 0xb0, 0x31, }, 5, 0, "", "",
+"c4 e2 79 b0 31 \tvcvtneeph2ps (%ecx),%xmm6",},
+{{0xc4, 0xe2, 0x7b, 0xb0, 0x31, }, 5, 0, "", "",
+"c4 e2 7b b0 31 \tvcvtneobf162ps (%ecx),%xmm6",},
+{{0xc4, 0xe2, 0x78, 0xb0, 0x31, }, 5, 0, "", "",
+"c4 e2 78 b0 31 \tvcvtneoph2ps (%ecx),%xmm6",},
+{{0x62, 0xf2, 0x7e, 0x08, 0x72, 0xf1, }, 6, 0, "", "",
+"62 f2 7e 08 72 f1 \tvcvtneps2bf16 %xmm1,%xmm6",},
+{{0xc4, 0xe2, 0x6b, 0x50, 0xd9, }, 5, 0, "", "",
+"c4 e2 6b 50 d9 \tvpdpbssd %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x6b, 0x51, 0xd9, }, 5, 0, "", "",
+"c4 e2 6b 51 d9 \tvpdpbssds %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x6a, 0x50, 0xd9, }, 5, 0, "", "",
+"c4 e2 6a 50 d9 \tvpdpbsud %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x6a, 0x51, 0xd9, }, 5, 0, "", "",
+"c4 e2 6a 51 d9 \tvpdpbsuds %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x68, 0x50, 0xd9, }, 5, 0, "", "",
+"c4 e2 68 50 d9 \tvpdpbuud %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x68, 0x51, 0xd9, }, 5, 0, "", "",
+"c4 e2 68 51 d9 \tvpdpbuuds %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x6a, 0xd2, 0xd9, }, 5, 0, "", "",
+"c4 e2 6a d2 d9 \tvpdpwsud %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x6a, 0xd3, 0xd9, }, 5, 0, "", "",
+"c4 e2 6a d3 d9 \tvpdpwsuds %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x69, 0xd2, 0xd9, }, 5, 0, "", "",
+"c4 e2 69 d2 d9 \tvpdpwusd %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x69, 0xd3, 0xd9, }, 5, 0, "", "",
+"c4 e2 69 d3 d9 \tvpdpwusds %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x68, 0xd2, 0xd9, }, 5, 0, "", "",
+"c4 e2 68 d2 d9 \tvpdpwuud %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x68, 0xd3, 0xd9, }, 5, 0, "", "",
+"c4 e2 68 d3 d9 \tvpdpwuuds %xmm1,%xmm2,%xmm3",},
+{{0x62, 0xf2, 0xed, 0x08, 0xb5, 0xd9, }, 6, 0, "", "",
+"62 f2 ed 08 b5 d9 \tvpmadd52huq %xmm1,%xmm2,%xmm3",},
+{{0x62, 0xf2, 0xed, 0x08, 0xb4, 0xd9, }, 6, 0, "", "",
+"62 f2 ed 08 b4 d9 \tvpmadd52luq %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x7f, 0xcc, 0xd1, }, 5, 0, "", "",
+"c4 e2 7f cc d1 \tvsha512msg1 %xmm1,%ymm2",},
+{{0xc4, 0xe2, 0x7f, 0xcd, 0xd1, }, 5, 0, "", "",
+"c4 e2 7f cd d1 \tvsha512msg2 %ymm1,%ymm2",},
+{{0xc4, 0xe2, 0x6f, 0xcb, 0xd9, }, 5, 0, "", "",
+"c4 e2 6f cb d9 \tvsha512rnds2 %xmm1,%ymm2,%ymm3",},
+{{0xc4, 0xe2, 0x68, 0xda, 0xd9, }, 5, 0, "", "",
+"c4 e2 68 da d9 \tvsm3msg1 %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x69, 0xda, 0xd9, }, 5, 0, "", "",
+"c4 e2 69 da d9 \tvsm3msg2 %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe3, 0x69, 0xde, 0xd9, 0xa1, }, 6, 0, "", "",
+"c4 e3 69 de d9 a1 \tvsm3rnds2 $0xa1,%xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x6a, 0xda, 0xd9, }, 5, 0, "", "",
+"c4 e2 6a da d9 \tvsm4key4 %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x6b, 0xda, 0xd9, }, 5, 0, "", "",
+"c4 e2 6b da d9 \tvsm4rnds4 %xmm1,%xmm2,%xmm3",},
+{{0x0f, 0x0d, 0x00, }, 3, 0, "", "",
+"0f 0d 00 \tprefetch (%eax)",},
+{{0x0f, 0x18, 0x08, }, 3, 0, "", "",
+"0f 18 08 \tprefetcht0 (%eax)",},
+{{0x0f, 0x18, 0x10, }, 3, 0, "", "",
+"0f 18 10 \tprefetcht1 (%eax)",},
+{{0x0f, 0x18, 0x18, }, 3, 0, "", "",
+"0f 18 18 \tprefetcht2 (%eax)",},
+{{0x0f, 0x18, 0x00, }, 3, 0, "", "",
+"0f 18 00 \tprefetchnta (%eax)",},
+{{0x0f, 0x01, 0xc6, }, 3, 0, "", "",
+"0f 01 c6 \twrmsrns",},
{{0xf3, 0x0f, 0x3a, 0xf0, 0xc0, 0x00, }, 6, 0, "", "",
"f3 0f 3a f0 c0 00 \threset $0x0",},
{{0x0f, 0x01, 0xe8, }, 3, 0, "", "",
diff --git a/tools/perf/arch/x86/tests/insn-x86-dat-64.c b/tools/perf/arch/x86/tests/insn-x86-dat-64.c
index 3a47e98fec33..3881fe89df8b 100644
--- a/tools/perf/arch/x86/tests/insn-x86-dat-64.c
+++ b/tools/perf/arch/x86/tests/insn-x86-dat-64.c
@@ -3877,6 +3877,1032 @@
"62 f5 7c 08 2e 8c c8 78 56 34 12 \tvucomish 0x12345678(%rax,%rcx,8),%xmm1",},
{{0x67, 0x62, 0xf5, 0x7c, 0x08, 0x2e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
"67 62 f5 7c 08 2e 8c c8 78 56 34 12 \tvucomish 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0xf3, 0x0f, 0x38, 0xdc, 0xd1, }, 5, 0, "", "",
+"f3 0f 38 dc d1 \tloadiwkey %xmm1,%xmm2",},
+{{0xf3, 0x0f, 0x38, 0xfa, 0xd0, }, 5, 0, "", "",
+"f3 0f 38 fa d0 \tencodekey128 %eax,%edx",},
+{{0xf3, 0x0f, 0x38, 0xfb, 0xd0, }, 5, 0, "", "",
+"f3 0f 38 fb d0 \tencodekey256 %eax,%edx",},
+{{0xf3, 0x0f, 0x38, 0xdc, 0x5a, 0x77, }, 6, 0, "", "",
+"f3 0f 38 dc 5a 77 \taesenc128kl 0x77(%rdx),%xmm3",},
+{{0xf3, 0x0f, 0x38, 0xde, 0x5a, 0x77, }, 6, 0, "", "",
+"f3 0f 38 de 5a 77 \taesenc256kl 0x77(%rdx),%xmm3",},
+{{0xf3, 0x0f, 0x38, 0xdd, 0x5a, 0x77, }, 6, 0, "", "",
+"f3 0f 38 dd 5a 77 \taesdec128kl 0x77(%rdx),%xmm3",},
+{{0xf3, 0x0f, 0x38, 0xdf, 0x5a, 0x77, }, 6, 0, "", "",
+"f3 0f 38 df 5a 77 \taesdec256kl 0x77(%rdx),%xmm3",},
+{{0xf3, 0x0f, 0x38, 0xd8, 0x42, 0x77, }, 6, 0, "", "",
+"f3 0f 38 d8 42 77 \taesencwide128kl 0x77(%rdx)",},
+{{0xf3, 0x0f, 0x38, 0xd8, 0x52, 0x77, }, 6, 0, "", "",
+"f3 0f 38 d8 52 77 \taesencwide256kl 0x77(%rdx)",},
+{{0xf3, 0x0f, 0x38, 0xd8, 0x4a, 0x77, }, 6, 0, "", "",
+"f3 0f 38 d8 4a 77 \taesdecwide128kl 0x77(%rdx)",},
+{{0xf3, 0x0f, 0x38, 0xd8, 0x5a, 0x77, }, 6, 0, "", "",
+"f3 0f 38 d8 5a 77 \taesdecwide256kl 0x77(%rdx)",},
+{{0x0f, 0x38, 0xfc, 0x08, }, 4, 0, "", "",
+"0f 38 fc 08 \taadd %ecx,(%rax)",},
+{{0x41, 0x0f, 0x38, 0xfc, 0x10, }, 5, 0, "", "",
+"41 0f 38 fc 10 \taadd %edx,(%r8)",},
+{{0x0f, 0x38, 0xfc, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 fc 94 c8 78 56 34 12 \taadd %edx,0x12345678(%rax,%rcx,8)",},
+{{0x41, 0x0f, 0x38, 0xfc, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"41 0f 38 fc 94 c8 78 56 34 12 \taadd %edx,0x12345678(%r8,%rcx,8)",},
+{{0x48, 0x0f, 0x38, 0xfc, 0x08, }, 5, 0, "", "",
+"48 0f 38 fc 08 \taadd %rcx,(%rax)",},
+{{0x49, 0x0f, 0x38, 0xfc, 0x10, }, 5, 0, "", "",
+"49 0f 38 fc 10 \taadd %rdx,(%r8)",},
+{{0x48, 0x0f, 0x38, 0xfc, 0x14, 0x25, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"48 0f 38 fc 14 25 78 56 34 12 \taadd %rdx,0x12345678",},
+{{0x48, 0x0f, 0x38, 0xfc, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"48 0f 38 fc 94 c8 78 56 34 12 \taadd %rdx,0x12345678(%rax,%rcx,8)",},
+{{0x49, 0x0f, 0x38, 0xfc, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"49 0f 38 fc 94 c8 78 56 34 12 \taadd %rdx,0x12345678(%r8,%rcx,8)",},
+{{0x66, 0x0f, 0x38, 0xfc, 0x08, }, 5, 0, "", "",
+"66 0f 38 fc 08 \taand %ecx,(%rax)",},
+{{0x66, 0x41, 0x0f, 0x38, 0xfc, 0x10, }, 6, 0, "", "",
+"66 41 0f 38 fc 10 \taand %edx,(%r8)",},
+{{0x66, 0x0f, 0x38, 0xfc, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"66 0f 38 fc 94 c8 78 56 34 12 \taand %edx,0x12345678(%rax,%rcx,8)",},
+{{0x66, 0x41, 0x0f, 0x38, 0xfc, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"66 41 0f 38 fc 94 c8 78 56 34 12 \taand %edx,0x12345678(%r8,%rcx,8)",},
+{{0x66, 0x48, 0x0f, 0x38, 0xfc, 0x08, }, 6, 0, "", "",
+"66 48 0f 38 fc 08 \taand %rcx,(%rax)",},
+{{0x66, 0x49, 0x0f, 0x38, 0xfc, 0x10, }, 6, 0, "", "",
+"66 49 0f 38 fc 10 \taand %rdx,(%r8)",},
+{{0x66, 0x48, 0x0f, 0x38, 0xfc, 0x14, 0x25, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"66 48 0f 38 fc 14 25 78 56 34 12 \taand %rdx,0x12345678",},
+{{0x66, 0x48, 0x0f, 0x38, 0xfc, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"66 48 0f 38 fc 94 c8 78 56 34 12 \taand %rdx,0x12345678(%rax,%rcx,8)",},
+{{0x66, 0x49, 0x0f, 0x38, 0xfc, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"66 49 0f 38 fc 94 c8 78 56 34 12 \taand %rdx,0x12345678(%r8,%rcx,8)",},
+{{0xf2, 0x0f, 0x38, 0xfc, 0x08, }, 5, 0, "", "",
+"f2 0f 38 fc 08 \taor %ecx,(%rax)",},
+{{0xf2, 0x41, 0x0f, 0x38, 0xfc, 0x10, }, 6, 0, "", "",
+"f2 41 0f 38 fc 10 \taor %edx,(%r8)",},
+{{0xf2, 0x0f, 0x38, 0xfc, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"f2 0f 38 fc 94 c8 78 56 34 12 \taor %edx,0x12345678(%rax,%rcx,8)",},
+{{0xf2, 0x41, 0x0f, 0x38, 0xfc, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"f2 41 0f 38 fc 94 c8 78 56 34 12 \taor %edx,0x12345678(%r8,%rcx,8)",},
+{{0xf2, 0x48, 0x0f, 0x38, 0xfc, 0x08, }, 6, 0, "", "",
+"f2 48 0f 38 fc 08 \taor %rcx,(%rax)",},
+{{0xf2, 0x49, 0x0f, 0x38, 0xfc, 0x10, }, 6, 0, "", "",
+"f2 49 0f 38 fc 10 \taor %rdx,(%r8)",},
+{{0xf2, 0x48, 0x0f, 0x38, 0xfc, 0x14, 0x25, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"f2 48 0f 38 fc 14 25 78 56 34 12 \taor %rdx,0x12345678",},
+{{0xf2, 0x48, 0x0f, 0x38, 0xfc, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"f2 48 0f 38 fc 94 c8 78 56 34 12 \taor %rdx,0x12345678(%rax,%rcx,8)",},
+{{0xf2, 0x49, 0x0f, 0x38, 0xfc, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"f2 49 0f 38 fc 94 c8 78 56 34 12 \taor %rdx,0x12345678(%r8,%rcx,8)",},
+{{0xf3, 0x0f, 0x38, 0xfc, 0x08, }, 5, 0, "", "",
+"f3 0f 38 fc 08 \taxor %ecx,(%rax)",},
+{{0xf3, 0x41, 0x0f, 0x38, 0xfc, 0x10, }, 6, 0, "", "",
+"f3 41 0f 38 fc 10 \taxor %edx,(%r8)",},
+{{0xf3, 0x0f, 0x38, 0xfc, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"f3 0f 38 fc 94 c8 78 56 34 12 \taxor %edx,0x12345678(%rax,%rcx,8)",},
+{{0xf3, 0x41, 0x0f, 0x38, 0xfc, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"f3 41 0f 38 fc 94 c8 78 56 34 12 \taxor %edx,0x12345678(%r8,%rcx,8)",},
+{{0xf3, 0x48, 0x0f, 0x38, 0xfc, 0x08, }, 6, 0, "", "",
+"f3 48 0f 38 fc 08 \taxor %rcx,(%rax)",},
+{{0xf3, 0x49, 0x0f, 0x38, 0xfc, 0x10, }, 6, 0, "", "",
+"f3 49 0f 38 fc 10 \taxor %rdx,(%r8)",},
+{{0xf3, 0x48, 0x0f, 0x38, 0xfc, 0x14, 0x25, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"f3 48 0f 38 fc 14 25 78 56 34 12 \taxor %rdx,0x12345678",},
+{{0xf3, 0x48, 0x0f, 0x38, 0xfc, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"f3 48 0f 38 fc 94 c8 78 56 34 12 \taxor %rdx,0x12345678(%rax,%rcx,8)",},
+{{0xf3, 0x49, 0x0f, 0x38, 0xfc, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"f3 49 0f 38 fc 94 c8 78 56 34 12 \taxor %rdx,0x12345678(%r8,%rcx,8)",},
+{{0xc4, 0xc2, 0x61, 0xe6, 0x09, }, 5, 0, "", "",
+"c4 c2 61 e6 09 \tcmpbexadd %ebx,%ecx,(%r9)",},
+{{0xc4, 0xc2, 0x61, 0xe2, 0x09, }, 5, 0, "", "",
+"c4 c2 61 e2 09 \tcmpbxadd %ebx,%ecx,(%r9)",},
+{{0xc4, 0xc2, 0x61, 0xee, 0x09, }, 5, 0, "", "",
+"c4 c2 61 ee 09 \tcmplexadd %ebx,%ecx,(%r9)",},
+{{0xc4, 0xc2, 0x61, 0xec, 0x09, }, 5, 0, "", "",
+"c4 c2 61 ec 09 \tcmplxadd %ebx,%ecx,(%r9)",},
+{{0xc4, 0xc2, 0x61, 0xe7, 0x09, }, 5, 0, "", "",
+"c4 c2 61 e7 09 \tcmpnbexadd %ebx,%ecx,(%r9)",},
+{{0xc4, 0xc2, 0x61, 0xe3, 0x09, }, 5, 0, "", "",
+"c4 c2 61 e3 09 \tcmpnbxadd %ebx,%ecx,(%r9)",},
+{{0xc4, 0xc2, 0x61, 0xef, 0x09, }, 5, 0, "", "",
+"c4 c2 61 ef 09 \tcmpnlexadd %ebx,%ecx,(%r9)",},
+{{0xc4, 0xc2, 0x61, 0xed, 0x09, }, 5, 0, "", "",
+"c4 c2 61 ed 09 \tcmpnlxadd %ebx,%ecx,(%r9)",},
+{{0xc4, 0xc2, 0x61, 0xe1, 0x09, }, 5, 0, "", "",
+"c4 c2 61 e1 09 \tcmpnoxadd %ebx,%ecx,(%r9)",},
+{{0xc4, 0xc2, 0x61, 0xeb, 0x09, }, 5, 0, "", "",
+"c4 c2 61 eb 09 \tcmpnpxadd %ebx,%ecx,(%r9)",},
+{{0xc4, 0xc2, 0x61, 0xe9, 0x09, }, 5, 0, "", "",
+"c4 c2 61 e9 09 \tcmpnsxadd %ebx,%ecx,(%r9)",},
+{{0xc4, 0xc2, 0x61, 0xe5, 0x09, }, 5, 0, "", "",
+"c4 c2 61 e5 09 \tcmpnzxadd %ebx,%ecx,(%r9)",},
+{{0xc4, 0xc2, 0x61, 0xe0, 0x09, }, 5, 0, "", "",
+"c4 c2 61 e0 09 \tcmpoxadd %ebx,%ecx,(%r9)",},
+{{0xc4, 0xc2, 0x61, 0xea, 0x09, }, 5, 0, "", "",
+"c4 c2 61 ea 09 \tcmppxadd %ebx,%ecx,(%r9)",},
+{{0xc4, 0xc2, 0x61, 0xe8, 0x09, }, 5, 0, "", "",
+"c4 c2 61 e8 09 \tcmpsxadd %ebx,%ecx,(%r9)",},
+{{0xc4, 0xc2, 0x61, 0xe4, 0x09, }, 5, 0, "", "",
+"c4 c2 61 e4 09 \tcmpzxadd %ebx,%ecx,(%r9)",},
+{{0x0f, 0x0d, 0x00, }, 3, 0, "", "",
+"0f 0d 00 \tprefetch (%rax)",},
+{{0x0f, 0x18, 0x08, }, 3, 0, "", "",
+"0f 18 08 \tprefetcht0 (%rax)",},
+{{0x0f, 0x18, 0x10, }, 3, 0, "", "",
+"0f 18 10 \tprefetcht1 (%rax)",},
+{{0x0f, 0x18, 0x18, }, 3, 0, "", "",
+"0f 18 18 \tprefetcht2 (%rax)",},
+{{0x0f, 0x18, 0x00, }, 3, 0, "", "",
+"0f 18 00 \tprefetchnta (%rax)",},
+{{0x0f, 0x18, 0x3d, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
+"0f 18 3d 78 56 34 12 \tprefetchit0 0x12345678(%rip) # 1234924e <main+0x1234924e>",},
+{{0x0f, 0x18, 0x35, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
+"0f 18 35 78 56 34 12 \tprefetchit1 0x12345678(%rip) # 12349255 <main+0x12349255>",},
+{{0xf2, 0x0f, 0x01, 0xc6, }, 4, 0, "", "",
+"f2 0f 01 c6 \trdmsrlist",},
+{{0xf3, 0x0f, 0x01, 0xc6, }, 4, 0, "", "",
+"f3 0f 01 c6 \twrmsrlist",},
+{{0xf2, 0x0f, 0x38, 0xf8, 0xd0, }, 5, 0, "", "",
+"f2 0f 38 f8 d0 \turdmsr %rdx,%rax",},
+{{0x62, 0xfc, 0x7f, 0x08, 0xf8, 0xd6, }, 6, 0, "", "",
+"62 fc 7f 08 f8 d6 \turdmsr %rdx,%r22",},
+{{0xc4, 0xc7, 0x7b, 0xf8, 0xc4, 0x7f, 0x00, 0x00, 0x00, }, 9, 0, "", "",
+"c4 c7 7b f8 c4 7f 00 00 00 \turdmsr $0x7f,%r12",},
+{{0xf3, 0x0f, 0x38, 0xf8, 0xd0, }, 5, 0, "", "",
+"f3 0f 38 f8 d0 \tuwrmsr %rax,%rdx",},
+{{0x62, 0xfc, 0x7e, 0x08, 0xf8, 0xd6, }, 6, 0, "", "",
+"62 fc 7e 08 f8 d6 \tuwrmsr %r22,%rdx",},
+{{0xc4, 0xc7, 0x7a, 0xf8, 0xc4, 0x7f, 0x00, 0x00, 0x00, }, 9, 0, "", "",
+"c4 c7 7a f8 c4 7f 00 00 00 \tuwrmsr %r12,$0x7f",},
+{{0xc4, 0xe2, 0x7a, 0xb1, 0x31, }, 5, 0, "", "",
+"c4 e2 7a b1 31 \tvbcstnebf162ps (%rcx),%xmm6",},
+{{0xc4, 0xe2, 0x79, 0xb1, 0x31, }, 5, 0, "", "",
+"c4 e2 79 b1 31 \tvbcstnesh2ps (%rcx),%xmm6",},
+{{0xc4, 0xe2, 0x7a, 0xb0, 0x31, }, 5, 0, "", "",
+"c4 e2 7a b0 31 \tvcvtneebf162ps (%rcx),%xmm6",},
+{{0xc4, 0xe2, 0x79, 0xb0, 0x31, }, 5, 0, "", "",
+"c4 e2 79 b0 31 \tvcvtneeph2ps (%rcx),%xmm6",},
+{{0xc4, 0xe2, 0x7b, 0xb0, 0x31, }, 5, 0, "", "",
+"c4 e2 7b b0 31 \tvcvtneobf162ps (%rcx),%xmm6",},
+{{0xc4, 0xe2, 0x78, 0xb0, 0x31, }, 5, 0, "", "",
+"c4 e2 78 b0 31 \tvcvtneoph2ps (%rcx),%xmm6",},
+{{0x62, 0xf2, 0x7e, 0x08, 0x72, 0xf1, }, 6, 0, "", "",
+"62 f2 7e 08 72 f1 \tvcvtneps2bf16 %xmm1,%xmm6",},
+{{0xf2, 0x0f, 0x01, 0xca, }, 4, 0, "erets", "indirect",
+"f2 0f 01 ca \terets",},
+{{0xf3, 0x0f, 0x01, 0xca, }, 4, 0, "eretu", "indirect",
+"f3 0f 01 ca \teretu",},
+{{0xc4, 0xe2, 0x71, 0x6c, 0xda, }, 5, 0, "", "",
+"c4 e2 71 6c da \ttcmmimfp16ps %tmm1,%tmm2,%tmm3",},
+{{0xc4, 0xe2, 0x70, 0x6c, 0xda, }, 5, 0, "", "",
+"c4 e2 70 6c da \ttcmmrlfp16ps %tmm1,%tmm2,%tmm3",},
+{{0xc4, 0xe2, 0x73, 0x5c, 0xda, }, 5, 0, "", "",
+"c4 e2 73 5c da \ttdpfp16ps %tmm1,%tmm2,%tmm3",},
+{{0xd5, 0x10, 0xf6, 0xc2, 0x05, }, 5, 0, "", "",
+"d5 10 f6 c2 05 \ttest $0x5,%r18b",},
+{{0xd5, 0x10, 0xf7, 0xc2, 0x05, 0x00, 0x00, 0x00, }, 8, 0, "", "",
+"d5 10 f7 c2 05 00 00 00 \ttest $0x5,%r18d",},
+{{0xd5, 0x18, 0xf7, 0xc2, 0x05, 0x00, 0x00, 0x00, }, 8, 0, "", "",
+"d5 18 f7 c2 05 00 00 00 \ttest $0x5,%r18",},
+{{0x66, 0xd5, 0x10, 0xf7, 0xc2, 0x05, 0x00, }, 7, 0, "", "",
+"66 d5 10 f7 c2 05 00 \ttest $0x5,%r18w",},
+{{0x44, 0x0f, 0xaf, 0xf0, }, 4, 0, "", "",
+"44 0f af f0 \timul %eax,%r14d",},
+{{0xd5, 0xc0, 0xaf, 0xc8, }, 4, 0, "", "",
+"d5 c0 af c8 \timul %eax,%r17d",},
+{{0xd5, 0x90, 0x62, 0x12, }, 4, 0, "", "",
+"d5 90 62 12 \tpunpckldq %mm2,(%r18)",},
+{{0xd5, 0x40, 0x8d, 0x00, }, 4, 0, "", "",
+"d5 40 8d 00 \tlea (%rax),%r16d",},
+{{0xd5, 0x44, 0x8d, 0x38, }, 4, 0, "", "",
+"d5 44 8d 38 \tlea (%rax),%r31d",},
+{{0xd5, 0x20, 0x8d, 0x04, 0x05, 0x00, 0x00, 0x00, 0x00, }, 9, 0, "", "",
+"d5 20 8d 04 05 00 00 00 00 \tlea 0x0(,%r16,1),%eax",},
+{{0xd5, 0x22, 0x8d, 0x04, 0x3d, 0x00, 0x00, 0x00, 0x00, }, 9, 0, "", "",
+"d5 22 8d 04 3d 00 00 00 00 \tlea 0x0(,%r31,1),%eax",},
+{{0xd5, 0x10, 0x8d, 0x00, }, 4, 0, "", "",
+"d5 10 8d 00 \tlea (%r16),%eax",},
+{{0xd5, 0x11, 0x8d, 0x07, }, 4, 0, "", "",
+"d5 11 8d 07 \tlea (%r31),%eax",},
+{{0x4c, 0x8d, 0x38, }, 3, 0, "", "",
+"4c 8d 38 \tlea (%rax),%r15",},
+{{0xd5, 0x48, 0x8d, 0x00, }, 4, 0, "", "",
+"d5 48 8d 00 \tlea (%rax),%r16",},
+{{0x49, 0x8d, 0x07, }, 3, 0, "", "",
+"49 8d 07 \tlea (%r15),%rax",},
+{{0xd5, 0x18, 0x8d, 0x00, }, 4, 0, "", "",
+"d5 18 8d 00 \tlea (%r16),%rax",},
+{{0x4a, 0x8d, 0x04, 0x3d, 0x00, 0x00, 0x00, 0x00, }, 8, 0, "", "",
+"4a 8d 04 3d 00 00 00 00 \tlea 0x0(,%r15,1),%rax",},
+{{0xd5, 0x28, 0x8d, 0x04, 0x05, 0x00, 0x00, 0x00, 0x00, }, 9, 0, "", "",
+"d5 28 8d 04 05 00 00 00 00 \tlea 0x0(,%r16,1),%rax",},
+{{0xd5, 0x1c, 0x03, 0x00, }, 4, 0, "", "",
+"d5 1c 03 00 \tadd (%r16),%r8",},
+{{0xd5, 0x1c, 0x03, 0x38, }, 4, 0, "", "",
+"d5 1c 03 38 \tadd (%r16),%r15",},
+{{0xd5, 0x4a, 0x8b, 0x04, 0x0d, 0x00, 0x00, 0x00, 0x00, }, 9, 0, "", "",
+"d5 4a 8b 04 0d 00 00 00 00 \tmov 0x0(,%r9,1),%r16",},
+{{0xd5, 0x4a, 0x8b, 0x04, 0x35, 0x00, 0x00, 0x00, 0x00, }, 9, 0, "", "",
+"d5 4a 8b 04 35 00 00 00 00 \tmov 0x0(,%r14,1),%r16",},
+{{0xd5, 0x4d, 0x2b, 0x3a, }, 4, 0, "", "",
+"d5 4d 2b 3a \tsub (%r10),%r31",},
+{{0xd5, 0x4d, 0x2b, 0x7d, 0x00, }, 5, 0, "", "",
+"d5 4d 2b 7d 00 \tsub 0x0(%r13),%r31",},
+{{0xd5, 0x30, 0x8d, 0x44, 0x28, 0x01, }, 6, 0, "", "",
+"d5 30 8d 44 28 01 \tlea 0x1(%r16,%r21,1),%eax",},
+{{0xd5, 0x76, 0x8d, 0x7c, 0x10, 0x01, }, 6, 0, "", "",
+"d5 76 8d 7c 10 01 \tlea 0x1(%r16,%r26,1),%r31d",},
+{{0xd5, 0x12, 0x8d, 0x84, 0x0d, 0x81, 0x00, 0x00, 0x00, }, 9, 0, "", "",
+"d5 12 8d 84 0d 81 00 00 00 \tlea 0x81(%r21,%r9,1),%eax",},
+{{0xd5, 0x57, 0x8d, 0xbc, 0x0a, 0x81, 0x00, 0x00, 0x00, }, 9, 0, "", "",
+"d5 57 8d bc 0a 81 00 00 00 \tlea 0x81(%r26,%r9,1),%r31d",},
+{{0xd5, 0x00, 0xa1, 0xef, 0xcd, 0xab, 0x90, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "jmp", "indirect",
+"d5 00 a1 ef cd ab 90 78 56 34 12 \tjmpabs $0x1234567890abcdef",},
+{{0xd5, 0x08, 0x53, }, 3, 0, "", "",
+"d5 08 53 \tpushp %rbx",},
+{{0xd5, 0x18, 0x50, }, 3, 0, "", "",
+"d5 18 50 \tpushp %r16",},
+{{0xd5, 0x19, 0x57, }, 3, 0, "", "",
+"d5 19 57 \tpushp %r31",},
+{{0xd5, 0x19, 0x5f, }, 3, 0, "", "",
+"d5 19 5f \tpopp %r31",},
+{{0xd5, 0x18, 0x58, }, 3, 0, "", "",
+"d5 18 58 \tpopp %r16",},
+{{0xd5, 0x08, 0x5b, }, 3, 0, "", "",
+"d5 08 5b \tpopp %rbx",},
+{{0x62, 0x72, 0x34, 0x00, 0xf7, 0xd2, }, 6, 0, "", "",
+"62 72 34 00 f7 d2 \tbextr %r25d,%edx,%r10d",},
+{{0x62, 0xda, 0x34, 0x00, 0xf7, 0x94, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 da 34 00 f7 94 87 23 01 00 00 \tbextr %r25d,0x123(%r31,%rax,4),%edx",},
+{{0x62, 0x52, 0x84, 0x00, 0xf7, 0xdf, }, 6, 0, "", "",
+"62 52 84 00 f7 df \tbextr %r31,%r15,%r11",},
+{{0x62, 0x5a, 0x84, 0x00, 0xf7, 0xbc, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 5a 84 00 f7 bc 87 23 01 00 00 \tbextr %r31,0x123(%r31,%rax,4),%r15",},
+{{0x62, 0xda, 0x6c, 0x08, 0xf3, 0xd9, }, 6, 0, "", "",
+"62 da 6c 08 f3 d9 \tblsi %r25d,%edx",},
+{{0x62, 0xda, 0x84, 0x08, 0xf3, 0xdf, }, 6, 0, "", "",
+"62 da 84 08 f3 df \tblsi %r31,%r15",},
+{{0x62, 0xda, 0x34, 0x00, 0xf3, 0x9c, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 da 34 00 f3 9c 87 23 01 00 00 \tblsi 0x123(%r31,%rax,4),%r25d",},
+{{0x62, 0xda, 0x84, 0x00, 0xf3, 0x9c, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 da 84 00 f3 9c 87 23 01 00 00 \tblsi 0x123(%r31,%rax,4),%r31",},
+{{0x62, 0xda, 0x6c, 0x08, 0xf3, 0xd1, }, 6, 0, "", "",
+"62 da 6c 08 f3 d1 \tblsmsk %r25d,%edx",},
+{{0x62, 0xda, 0x84, 0x08, 0xf3, 0xd7, }, 6, 0, "", "",
+"62 da 84 08 f3 d7 \tblsmsk %r31,%r15",},
+{{0x62, 0xda, 0x34, 0x00, 0xf3, 0x94, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 da 34 00 f3 94 87 23 01 00 00 \tblsmsk 0x123(%r31,%rax,4),%r25d",},
+{{0x62, 0xda, 0x84, 0x00, 0xf3, 0x94, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 da 84 00 f3 94 87 23 01 00 00 \tblsmsk 0x123(%r31,%rax,4),%r31",},
+{{0x62, 0xda, 0x6c, 0x08, 0xf3, 0xc9, }, 6, 0, "", "",
+"62 da 6c 08 f3 c9 \tblsr %r25d,%edx",},
+{{0x62, 0xda, 0x84, 0x08, 0xf3, 0xcf, }, 6, 0, "", "",
+"62 da 84 08 f3 cf \tblsr %r31,%r15",},
+{{0x62, 0xda, 0x34, 0x00, 0xf3, 0x8c, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 da 34 00 f3 8c 87 23 01 00 00 \tblsr 0x123(%r31,%rax,4),%r25d",},
+{{0x62, 0xda, 0x84, 0x00, 0xf3, 0x8c, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 da 84 00 f3 8c 87 23 01 00 00 \tblsr 0x123(%r31,%rax,4),%r31",},
+{{0x62, 0x72, 0x34, 0x00, 0xf5, 0xd2, }, 6, 0, "", "",
+"62 72 34 00 f5 d2 \tbzhi %r25d,%edx,%r10d",},
+{{0x62, 0xda, 0x34, 0x00, 0xf5, 0x94, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 da 34 00 f5 94 87 23 01 00 00 \tbzhi %r25d,0x123(%r31,%rax,4),%edx",},
+{{0x62, 0x52, 0x84, 0x00, 0xf5, 0xdf, }, 6, 0, "", "",
+"62 52 84 00 f5 df \tbzhi %r31,%r15,%r11",},
+{{0x62, 0x5a, 0x84, 0x00, 0xf5, 0xbc, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 5a 84 00 f5 bc 87 23 01 00 00 \tbzhi %r31,0x123(%r31,%rax,4),%r15",},
+{{0x62, 0xda, 0x35, 0x00, 0xe6, 0x94, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 da 35 00 e6 94 87 23 01 00 00 \tcmpbexadd %r25d,%edx,0x123(%r31,%rax,4)",},
+{{0x62, 0x5a, 0x85, 0x00, 0xe6, 0xbc, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 5a 85 00 e6 bc 87 23 01 00 00 \tcmpbexadd %r31,%r15,0x123(%r31,%rax,4)",},
+{{0x62, 0xda, 0x35, 0x00, 0xe2, 0x94, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 da 35 00 e2 94 87 23 01 00 00 \tcmpbxadd %r25d,%edx,0x123(%r31,%rax,4)",},
+{{0x62, 0x5a, 0x85, 0x00, 0xe2, 0xbc, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 5a 85 00 e2 bc 87 23 01 00 00 \tcmpbxadd %r31,%r15,0x123(%r31,%rax,4)",},
+{{0x62, 0xda, 0x35, 0x00, 0xec, 0x94, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 da 35 00 ec 94 87 23 01 00 00 \tcmplxadd %r25d,%edx,0x123(%r31,%rax,4)",},
+{{0x62, 0x5a, 0x85, 0x00, 0xec, 0xbc, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 5a 85 00 ec bc 87 23 01 00 00 \tcmplxadd %r31,%r15,0x123(%r31,%rax,4)",},
+{{0x62, 0xda, 0x35, 0x00, 0xe7, 0x94, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 da 35 00 e7 94 87 23 01 00 00 \tcmpnbexadd %r25d,%edx,0x123(%r31,%rax,4)",},
+{{0x62, 0x5a, 0x85, 0x00, 0xe7, 0xbc, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 5a 85 00 e7 bc 87 23 01 00 00 \tcmpnbexadd %r31,%r15,0x123(%r31,%rax,4)",},
+{{0x62, 0xda, 0x35, 0x00, 0xe3, 0x94, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 da 35 00 e3 94 87 23 01 00 00 \tcmpnbxadd %r25d,%edx,0x123(%r31,%rax,4)",},
+{{0x62, 0x5a, 0x85, 0x00, 0xe3, 0xbc, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 5a 85 00 e3 bc 87 23 01 00 00 \tcmpnbxadd %r31,%r15,0x123(%r31,%rax,4)",},
+{{0x62, 0xda, 0x35, 0x00, 0xef, 0x94, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 da 35 00 ef 94 87 23 01 00 00 \tcmpnlexadd %r25d,%edx,0x123(%r31,%rax,4)",},
+{{0x62, 0x5a, 0x85, 0x00, 0xef, 0xbc, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 5a 85 00 ef bc 87 23 01 00 00 \tcmpnlexadd %r31,%r15,0x123(%r31,%rax,4)",},
+{{0x62, 0xda, 0x35, 0x00, 0xed, 0x94, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 da 35 00 ed 94 87 23 01 00 00 \tcmpnlxadd %r25d,%edx,0x123(%r31,%rax,4)",},
+{{0x62, 0x5a, 0x85, 0x00, 0xed, 0xbc, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 5a 85 00 ed bc 87 23 01 00 00 \tcmpnlxadd %r31,%r15,0x123(%r31,%rax,4)",},
+{{0x62, 0xda, 0x35, 0x00, 0xe1, 0x94, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 da 35 00 e1 94 87 23 01 00 00 \tcmpnoxadd %r25d,%edx,0x123(%r31,%rax,4)",},
+{{0x62, 0x5a, 0x85, 0x00, 0xe1, 0xbc, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 5a 85 00 e1 bc 87 23 01 00 00 \tcmpnoxadd %r31,%r15,0x123(%r31,%rax,4)",},
+{{0x62, 0xda, 0x35, 0x00, 0xeb, 0x94, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 da 35 00 eb 94 87 23 01 00 00 \tcmpnpxadd %r25d,%edx,0x123(%r31,%rax,4)",},
+{{0x62, 0x5a, 0x85, 0x00, 0xeb, 0xbc, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 5a 85 00 eb bc 87 23 01 00 00 \tcmpnpxadd %r31,%r15,0x123(%r31,%rax,4)",},
+{{0x62, 0xda, 0x35, 0x00, 0xe9, 0x94, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 da 35 00 e9 94 87 23 01 00 00 \tcmpnsxadd %r25d,%edx,0x123(%r31,%rax,4)",},
+{{0x62, 0x5a, 0x85, 0x00, 0xe9, 0xbc, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 5a 85 00 e9 bc 87 23 01 00 00 \tcmpnsxadd %r31,%r15,0x123(%r31,%rax,4)",},
+{{0x62, 0xda, 0x35, 0x00, 0xe5, 0x94, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 da 35 00 e5 94 87 23 01 00 00 \tcmpnzxadd %r25d,%edx,0x123(%r31,%rax,4)",},
+{{0x62, 0x5a, 0x85, 0x00, 0xe5, 0xbc, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 5a 85 00 e5 bc 87 23 01 00 00 \tcmpnzxadd %r31,%r15,0x123(%r31,%rax,4)",},
+{{0x62, 0xda, 0x35, 0x00, 0xe0, 0x94, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 da 35 00 e0 94 87 23 01 00 00 \tcmpoxadd %r25d,%edx,0x123(%r31,%rax,4)",},
+{{0x62, 0x5a, 0x85, 0x00, 0xe0, 0xbc, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 5a 85 00 e0 bc 87 23 01 00 00 \tcmpoxadd %r31,%r15,0x123(%r31,%rax,4)",},
+{{0x62, 0xda, 0x35, 0x00, 0xea, 0x94, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 da 35 00 ea 94 87 23 01 00 00 \tcmppxadd %r25d,%edx,0x123(%r31,%rax,4)",},
+{{0x62, 0x5a, 0x85, 0x00, 0xea, 0xbc, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 5a 85 00 ea bc 87 23 01 00 00 \tcmppxadd %r31,%r15,0x123(%r31,%rax,4)",},
+{{0x62, 0xda, 0x35, 0x00, 0xe8, 0x94, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 da 35 00 e8 94 87 23 01 00 00 \tcmpsxadd %r25d,%edx,0x123(%r31,%rax,4)",},
+{{0x62, 0x5a, 0x85, 0x00, 0xe8, 0xbc, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 5a 85 00 e8 bc 87 23 01 00 00 \tcmpsxadd %r31,%r15,0x123(%r31,%rax,4)",},
+{{0x62, 0xda, 0x35, 0x00, 0xe4, 0x94, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 da 35 00 e4 94 87 23 01 00 00 \tcmpzxadd %r25d,%edx,0x123(%r31,%rax,4)",},
+{{0x62, 0x5a, 0x85, 0x00, 0xe4, 0xbc, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 5a 85 00 e4 bc 87 23 01 00 00 \tcmpzxadd %r31,%r15,0x123(%r31,%rax,4)",},
+{{0x62, 0xcc, 0xfc, 0x08, 0xf1, 0xf7, }, 6, 0, "", "",
+"62 cc fc 08 f1 f7 \tcrc32 %r31,%r22",},
+{{0x62, 0xcc, 0xfc, 0x08, 0xf1, 0x37, }, 6, 0, "", "",
+"62 cc fc 08 f1 37 \tcrc32q (%r31),%r22",},
+{{0x62, 0xec, 0xfc, 0x08, 0xf0, 0xcb, }, 6, 0, "", "",
+"62 ec fc 08 f0 cb \tcrc32 %r19b,%r17",},
+{{0x62, 0xec, 0x7c, 0x08, 0xf0, 0xeb, }, 6, 0, "", "",
+"62 ec 7c 08 f0 eb \tcrc32 %r19b,%r21d",},
+{{0x62, 0xfc, 0x7c, 0x08, 0xf0, 0x1b, }, 6, 0, "", "",
+"62 fc 7c 08 f0 1b \tcrc32b (%r19),%ebx",},
+{{0x62, 0xcc, 0x7c, 0x08, 0xf1, 0xff, }, 6, 0, "", "",
+"62 cc 7c 08 f1 ff \tcrc32 %r31d,%r23d",},
+{{0x62, 0xcc, 0x7c, 0x08, 0xf1, 0x3f, }, 6, 0, "", "",
+"62 cc 7c 08 f1 3f \tcrc32l (%r31),%r23d",},
+{{0x62, 0xcc, 0x7d, 0x08, 0xf1, 0xef, }, 6, 0, "", "",
+"62 cc 7d 08 f1 ef \tcrc32 %r31w,%r21d",},
+{{0x62, 0xcc, 0x7d, 0x08, 0xf1, 0x2f, }, 6, 0, "", "",
+"62 cc 7d 08 f1 2f \tcrc32w (%r31),%r21d",},
+{{0x62, 0xe4, 0xfc, 0x08, 0xf1, 0xd0, }, 6, 0, "", "",
+"62 e4 fc 08 f1 d0 \tcrc32 %rax,%r18",},
+{{0x67, 0x62, 0x4c, 0x7f, 0x08, 0xf8, 0x8c, 0x87, 0x23, 0x01, 0x00, 0x00, }, 12, 0, "", "",
+"67 62 4c 7f 08 f8 8c 87 23 01 00 00 \tenqcmd 0x123(%r31d,%eax,4),%r25d",},
+{{0x62, 0x4c, 0x7f, 0x08, 0xf8, 0xbc, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 4c 7f 08 f8 bc 87 23 01 00 00 \tenqcmd 0x123(%r31,%rax,4),%r31",},
+{{0x67, 0x62, 0x4c, 0x7e, 0x08, 0xf8, 0x8c, 0x87, 0x23, 0x01, 0x00, 0x00, }, 12, 0, "", "",
+"67 62 4c 7e 08 f8 8c 87 23 01 00 00 \tenqcmds 0x123(%r31d,%eax,4),%r25d",},
+{{0x62, 0x4c, 0x7e, 0x08, 0xf8, 0xbc, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 4c 7e 08 f8 bc 87 23 01 00 00 \tenqcmds 0x123(%r31,%rax,4),%r31",},
+{{0x62, 0x4c, 0x7e, 0x08, 0xf0, 0xbc, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 4c 7e 08 f0 bc 87 23 01 00 00 \tinvept 0x123(%r31,%rax,4),%r31",},
+{{0x62, 0x4c, 0x7e, 0x08, 0xf2, 0xbc, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 4c 7e 08 f2 bc 87 23 01 00 00 \tinvpcid 0x123(%r31,%rax,4),%r31",},
+{{0x62, 0x4c, 0x7e, 0x08, 0xf1, 0xbc, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 4c 7e 08 f1 bc 87 23 01 00 00 \tinvvpid 0x123(%r31,%rax,4),%r31",},
+{{0x62, 0x61, 0x7d, 0x08, 0x93, 0xcd, }, 6, 0, "", "",
+"62 61 7d 08 93 cd \tkmovb %k5,%r25d",},
+{{0x62, 0xd9, 0x7d, 0x08, 0x91, 0xac, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 d9 7d 08 91 ac 87 23 01 00 00 \tkmovb %k5,0x123(%r31,%rax,4)",},
+{{0x62, 0xd9, 0x7d, 0x08, 0x92, 0xe9, }, 6, 0, "", "",
+"62 d9 7d 08 92 e9 \tkmovb %r25d,%k5",},
+{{0x62, 0xd9, 0x7d, 0x08, 0x90, 0xac, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 d9 7d 08 90 ac 87 23 01 00 00 \tkmovb 0x123(%r31,%rax,4),%k5",},
+{{0x62, 0x61, 0x7f, 0x08, 0x93, 0xcd, }, 6, 0, "", "",
+"62 61 7f 08 93 cd \tkmovd %k5,%r25d",},
+{{0x62, 0xd9, 0xfd, 0x08, 0x91, 0xac, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 d9 fd 08 91 ac 87 23 01 00 00 \tkmovd %k5,0x123(%r31,%rax,4)",},
+{{0x62, 0xd9, 0x7f, 0x08, 0x92, 0xe9, }, 6, 0, "", "",
+"62 d9 7f 08 92 e9 \tkmovd %r25d,%k5",},
+{{0x62, 0xd9, 0xfd, 0x08, 0x90, 0xac, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 d9 fd 08 90 ac 87 23 01 00 00 \tkmovd 0x123(%r31,%rax,4),%k5",},
+{{0x62, 0x61, 0xff, 0x08, 0x93, 0xfd, }, 6, 0, "", "",
+"62 61 ff 08 93 fd \tkmovq %k5,%r31",},
+{{0x62, 0xd9, 0xfc, 0x08, 0x91, 0xac, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 d9 fc 08 91 ac 87 23 01 00 00 \tkmovq %k5,0x123(%r31,%rax,4)",},
+{{0x62, 0xd9, 0xff, 0x08, 0x92, 0xef, }, 6, 0, "", "",
+"62 d9 ff 08 92 ef \tkmovq %r31,%k5",},
+{{0x62, 0xd9, 0xfc, 0x08, 0x90, 0xac, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 d9 fc 08 90 ac 87 23 01 00 00 \tkmovq 0x123(%r31,%rax,4),%k5",},
+{{0x62, 0x61, 0x7c, 0x08, 0x93, 0xcd, }, 6, 0, "", "",
+"62 61 7c 08 93 cd \tkmovw %k5,%r25d",},
+{{0x62, 0xd9, 0x7c, 0x08, 0x91, 0xac, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 d9 7c 08 91 ac 87 23 01 00 00 \tkmovw %k5,0x123(%r31,%rax,4)",},
+{{0x62, 0xd9, 0x7c, 0x08, 0x92, 0xe9, }, 6, 0, "", "",
+"62 d9 7c 08 92 e9 \tkmovw %r25d,%k5",},
+{{0x62, 0xd9, 0x7c, 0x08, 0x90, 0xac, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 d9 7c 08 90 ac 87 23 01 00 00 \tkmovw 0x123(%r31,%rax,4),%k5",},
+{{0x62, 0xda, 0x7c, 0x08, 0x49, 0x84, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 da 7c 08 49 84 87 23 01 00 00 \tldtilecfg 0x123(%r31,%rax,4)",},
+{{0x62, 0xfc, 0x7d, 0x08, 0x60, 0xc2, }, 6, 0, "", "",
+"62 fc 7d 08 60 c2 \tmovbe %r18w,%ax",},
+{{0x62, 0xd4, 0x7d, 0x08, 0x60, 0xc7, }, 6, 0, "", "",
+"62 d4 7d 08 60 c7 \tmovbe %r15w,%ax",},
+{{0x62, 0xec, 0x7d, 0x08, 0x61, 0x94, 0x80, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 ec 7d 08 61 94 80 23 01 00 00 \tmovbe %r18w,0x123(%r16,%rax,4)",},
+{{0x62, 0xcc, 0x7d, 0x08, 0x61, 0x94, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 cc 7d 08 61 94 87 23 01 00 00 \tmovbe %r18w,0x123(%r31,%rax,4)",},
+{{0x62, 0xdc, 0x7c, 0x08, 0x60, 0xd1, }, 6, 0, "", "",
+"62 dc 7c 08 60 d1 \tmovbe %r25d,%edx",},
+{{0x62, 0xd4, 0x7c, 0x08, 0x60, 0xd7, }, 6, 0, "", "",
+"62 d4 7c 08 60 d7 \tmovbe %r15d,%edx",},
+{{0x62, 0x6c, 0x7c, 0x08, 0x61, 0x8c, 0x80, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 6c 7c 08 61 8c 80 23 01 00 00 \tmovbe %r25d,0x123(%r16,%rax,4)",},
+{{0x62, 0x5c, 0xfc, 0x08, 0x60, 0xff, }, 6, 0, "", "",
+"62 5c fc 08 60 ff \tmovbe %r31,%r15",},
+{{0x62, 0x54, 0xfc, 0x08, 0x60, 0xf8, }, 6, 0, "", "",
+"62 54 fc 08 60 f8 \tmovbe %r8,%r15",},
+{{0x62, 0x6c, 0xfc, 0x08, 0x61, 0xbc, 0x80, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 6c fc 08 61 bc 80 23 01 00 00 \tmovbe %r31,0x123(%r16,%rax,4)",},
+{{0x62, 0x4c, 0xfc, 0x08, 0x61, 0xbc, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 4c fc 08 61 bc 87 23 01 00 00 \tmovbe %r31,0x123(%r31,%rax,4)",},
+{{0x62, 0x6c, 0xfc, 0x08, 0x60, 0xbc, 0x80, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 6c fc 08 60 bc 80 23 01 00 00 \tmovbe 0x123(%r16,%rax,4),%r31",},
+{{0x62, 0xcc, 0x7d, 0x08, 0x60, 0x94, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 cc 7d 08 60 94 87 23 01 00 00 \tmovbe 0x123(%r31,%rax,4),%r18w",},
+{{0x62, 0x4c, 0x7c, 0x08, 0x60, 0x8c, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 4c 7c 08 60 8c 87 23 01 00 00 \tmovbe 0x123(%r31,%rax,4),%r25d",},
+{{0x67, 0x62, 0x4c, 0x7d, 0x08, 0xf8, 0x8c, 0x87, 0x23, 0x01, 0x00, 0x00, }, 12, 0, "", "",
+"67 62 4c 7d 08 f8 8c 87 23 01 00 00 \tmovdir64b 0x123(%r31d,%eax,4),%r25d",},
+{{0x62, 0x4c, 0x7d, 0x08, 0xf8, 0xbc, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 4c 7d 08 f8 bc 87 23 01 00 00 \tmovdir64b 0x123(%r31,%rax,4),%r31",},
+{{0x62, 0x4c, 0x7c, 0x08, 0xf9, 0x8c, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 4c 7c 08 f9 8c 87 23 01 00 00 \tmovdiri %r25d,0x123(%r31,%rax,4)",},
+{{0x62, 0x4c, 0xfc, 0x08, 0xf9, 0xbc, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 4c fc 08 f9 bc 87 23 01 00 00 \tmovdiri %r31,0x123(%r31,%rax,4)",},
+{{0x62, 0x5a, 0x6f, 0x08, 0xf5, 0xd1, }, 6, 0, "", "",
+"62 5a 6f 08 f5 d1 \tpdep %r25d,%edx,%r10d",},
+{{0x62, 0x5a, 0x87, 0x08, 0xf5, 0xdf, }, 6, 0, "", "",
+"62 5a 87 08 f5 df \tpdep %r31,%r15,%r11",},
+{{0x62, 0xda, 0x37, 0x00, 0xf5, 0x94, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 da 37 00 f5 94 87 23 01 00 00 \tpdep 0x123(%r31,%rax,4),%r25d,%edx",},
+{{0x62, 0x5a, 0x87, 0x00, 0xf5, 0xbc, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 5a 87 00 f5 bc 87 23 01 00 00 \tpdep 0x123(%r31,%rax,4),%r31,%r15",},
+{{0x62, 0x5a, 0x6e, 0x08, 0xf5, 0xd1, }, 6, 0, "", "",
+"62 5a 6e 08 f5 d1 \tpext %r25d,%edx,%r10d",},
+{{0x62, 0x5a, 0x86, 0x08, 0xf5, 0xdf, }, 6, 0, "", "",
+"62 5a 86 08 f5 df \tpext %r31,%r15,%r11",},
+{{0x62, 0xda, 0x36, 0x00, 0xf5, 0x94, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 da 36 00 f5 94 87 23 01 00 00 \tpext 0x123(%r31,%rax,4),%r25d,%edx",},
+{{0x62, 0x5a, 0x86, 0x00, 0xf5, 0xbc, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 5a 86 00 f5 bc 87 23 01 00 00 \tpext 0x123(%r31,%rax,4),%r31,%r15",},
+{{0x62, 0x72, 0x35, 0x00, 0xf7, 0xd2, }, 6, 0, "", "",
+"62 72 35 00 f7 d2 \tshlx %r25d,%edx,%r10d",},
+{{0x62, 0xda, 0x35, 0x00, 0xf7, 0x94, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 da 35 00 f7 94 87 23 01 00 00 \tshlx %r25d,0x123(%r31,%rax,4),%edx",},
+{{0x62, 0x52, 0x85, 0x00, 0xf7, 0xdf, }, 6, 0, "", "",
+"62 52 85 00 f7 df \tshlx %r31,%r15,%r11",},
+{{0x62, 0x5a, 0x85, 0x00, 0xf7, 0xbc, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 5a 85 00 f7 bc 87 23 01 00 00 \tshlx %r31,0x123(%r31,%rax,4),%r15",},
+{{0x62, 0x72, 0x37, 0x00, 0xf7, 0xd2, }, 6, 0, "", "",
+"62 72 37 00 f7 d2 \tshrx %r25d,%edx,%r10d",},
+{{0x62, 0xda, 0x37, 0x00, 0xf7, 0x94, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 da 37 00 f7 94 87 23 01 00 00 \tshrx %r25d,0x123(%r31,%rax,4),%edx",},
+{{0x62, 0x52, 0x87, 0x00, 0xf7, 0xdf, }, 6, 0, "", "",
+"62 52 87 00 f7 df \tshrx %r31,%r15,%r11",},
+{{0x62, 0x5a, 0x87, 0x00, 0xf7, 0xbc, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 5a 87 00 f7 bc 87 23 01 00 00 \tshrx %r31,0x123(%r31,%rax,4),%r15",},
+{{0x62, 0xda, 0x7d, 0x08, 0x49, 0x84, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 da 7d 08 49 84 87 23 01 00 00 \tsttilecfg 0x123(%r31,%rax,4)",},
+{{0x62, 0xda, 0x7f, 0x08, 0x4b, 0xb4, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 da 7f 08 4b b4 87 23 01 00 00 \ttileloadd 0x123(%r31,%rax,4),%tmm6",},
+{{0x62, 0xda, 0x7d, 0x08, 0x4b, 0xb4, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 da 7d 08 4b b4 87 23 01 00 00 \ttileloaddt1 0x123(%r31,%rax,4),%tmm6",},
+{{0x62, 0xda, 0x7e, 0x08, 0x4b, 0xb4, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 da 7e 08 4b b4 87 23 01 00 00 \ttilestored %tmm6,0x123(%r31,%rax,4)",},
+{{0x62, 0xfa, 0x7d, 0x28, 0x1a, 0x18, }, 6, 0, "", "",
+"62 fa 7d 28 1a 18 \tvbroadcastf32x4 (%r16),%ymm3",},
+{{0x62, 0xfa, 0x7d, 0x28, 0x5a, 0x18, }, 6, 0, "", "",
+"62 fa 7d 28 5a 18 \tvbroadcasti32x4 (%r16),%ymm3",},
+{{0x62, 0xfb, 0x7d, 0x28, 0x19, 0x18, 0x01, }, 7, 0, "", "",
+"62 fb 7d 28 19 18 01 \tvextractf32x4 $0x1,%ymm3,(%r16)",},
+{{0x62, 0xfb, 0x7d, 0x28, 0x39, 0x18, 0x01, }, 7, 0, "", "",
+"62 fb 7d 28 39 18 01 \tvextracti32x4 $0x1,%ymm3,(%r16)",},
+{{0x62, 0x7b, 0x65, 0x28, 0x18, 0x00, 0x01, }, 7, 0, "", "",
+"62 7b 65 28 18 00 01 \tvinsertf32x4 $0x1,(%r16),%ymm3,%ymm8",},
+{{0x62, 0x7b, 0x65, 0x28, 0x38, 0x00, 0x01, }, 7, 0, "", "",
+"62 7b 65 28 38 00 01 \tvinserti32x4 $0x1,(%r16),%ymm3,%ymm8",},
+{{0x62, 0xdb, 0xfd, 0x08, 0x09, 0x30, 0x01, }, 7, 0, "", "",
+"62 db fd 08 09 30 01 \tvrndscalepd $0x1,(%r24),%xmm6",},
+{{0x62, 0xdb, 0x7d, 0x08, 0x08, 0x30, 0x02, }, 7, 0, "", "",
+"62 db 7d 08 08 30 02 \tvrndscaleps $0x2,(%r24),%xmm6",},
+{{0x62, 0xdb, 0xcd, 0x08, 0x0b, 0x18, 0x03, }, 7, 0, "", "",
+"62 db cd 08 0b 18 03 \tvrndscalesd $0x3,(%r24),%xmm6,%xmm3",},
+{{0x62, 0xdb, 0x4d, 0x08, 0x0a, 0x18, 0x04, }, 7, 0, "", "",
+"62 db 4d 08 0a 18 04 \tvrndscaless $0x4,(%r24),%xmm6,%xmm3",},
+{{0x62, 0x4c, 0x7c, 0x08, 0x66, 0x8c, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 4c 7c 08 66 8c 87 23 01 00 00 \twrssd %r25d,0x123(%r31,%rax,4)",},
+{{0x62, 0x4c, 0xfc, 0x08, 0x66, 0xbc, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 4c fc 08 66 bc 87 23 01 00 00 \twrssq %r31,0x123(%r31,%rax,4)",},
+{{0x62, 0x4c, 0x7d, 0x08, 0x65, 0x8c, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 4c 7d 08 65 8c 87 23 01 00 00 \twrussd %r25d,0x123(%r31,%rax,4)",},
+{{0x62, 0x4c, 0xfd, 0x08, 0x65, 0xbc, 0x87, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 4c fd 08 65 bc 87 23 01 00 00 \twrussq %r31,0x123(%r31,%rax,4)",},
+{{0x62, 0xf4, 0x0d, 0x10, 0x81, 0xd0, 0x34, 0x12, }, 8, 0, "", "",
+"62 f4 0d 10 81 d0 34 12 \tadc $0x1234,%ax,%r30w",},
+{{0x62, 0x7c, 0x6c, 0x10, 0x10, 0xf9, }, 6, 0, "", "",
+"62 7c 6c 10 10 f9 \tadc %r15b,%r17b,%r18b",},
+{{0x62, 0x54, 0x6c, 0x10, 0x11, 0x38, }, 6, 0, "", "",
+"62 54 6c 10 11 38 \tadc %r15d,(%r8),%r18d",},
+{{0x62, 0xc4, 0x3c, 0x18, 0x12, 0x04, 0x07, }, 7, 0, "", "",
+"62 c4 3c 18 12 04 07 \tadc (%r15,%rax,1),%r16b,%r8b",},
+{{0x62, 0xc4, 0x3d, 0x18, 0x13, 0x04, 0x07, }, 7, 0, "", "",
+"62 c4 3d 18 13 04 07 \tadc (%r15,%rax,1),%r16w,%r8w",},
+{{0x62, 0xfc, 0x5c, 0x10, 0x83, 0x14, 0x83, 0x11, }, 8, 0, "", "",
+"62 fc 5c 10 83 14 83 11 \tadc $0x11,(%r19,%rax,4),%r20d",},
+{{0x62, 0x54, 0x6d, 0x10, 0x66, 0xc7, }, 6, 0, "", "",
+"62 54 6d 10 66 c7 \tadcx %r15d,%r8d,%r18d",},
+{{0x62, 0x14, 0xf9, 0x08, 0x66, 0x04, 0x3f, }, 7, 0, "", "",
+"62 14 f9 08 66 04 3f \tadcx (%r15,%r31,1),%r8",},
+{{0x62, 0x14, 0x69, 0x10, 0x66, 0x04, 0x3f, }, 7, 0, "", "",
+"62 14 69 10 66 04 3f \tadcx (%r15,%r31,1),%r8d,%r18d",},
+{{0x62, 0xf4, 0x0d, 0x10, 0x81, 0xc0, 0x34, 0x12, }, 8, 0, "", "",
+"62 f4 0d 10 81 c0 34 12 \tadd $0x1234,%ax,%r30w",},
+{{0x62, 0xd4, 0xfc, 0x10, 0x81, 0xc7, 0x33, 0x44, 0x34, 0x12, }, 10, 0, "", "",
+"62 d4 fc 10 81 c7 33 44 34 12 \tadd $0x12344433,%r15,%r16",},
+{{0x62, 0xd4, 0x74, 0x10, 0x80, 0xc5, 0x34, }, 7, 0, "", "",
+"62 d4 74 10 80 c5 34 \tadd $0x34,%r13b,%r17b",},
+{{0x62, 0xf4, 0xbc, 0x18, 0x81, 0xc0, 0x11, 0x22, 0x33, 0xf4, }, 10, 0, "", "",
+"62 f4 bc 18 81 c0 11 22 33 f4 \tadd $0xfffffffff4332211,%rax,%r8",},
+{{0x62, 0x44, 0xfc, 0x10, 0x01, 0xf8, }, 6, 0, "", "",
+"62 44 fc 10 01 f8 \tadd %r31,%r8,%r16",},
+{{0x62, 0x44, 0xfc, 0x10, 0x01, 0x38, }, 6, 0, "", "",
+"62 44 fc 10 01 38 \tadd %r31,(%r8),%r16",},
+{{0x62, 0x44, 0xf8, 0x10, 0x01, 0x3c, 0xc0, }, 7, 0, "", "",
+"62 44 f8 10 01 3c c0 \tadd %r31,(%r8,%r16,8),%r16",},
+{{0x62, 0x44, 0x7c, 0x10, 0x00, 0xf8, }, 6, 0, "", "",
+"62 44 7c 10 00 f8 \tadd %r31b,%r8b,%r16b",},
+{{0x62, 0x44, 0x7c, 0x10, 0x01, 0xf8, }, 6, 0, "", "",
+"62 44 7c 10 01 f8 \tadd %r31d,%r8d,%r16d",},
+{{0x62, 0x44, 0x7d, 0x10, 0x01, 0xf8, }, 6, 0, "", "",
+"62 44 7d 10 01 f8 \tadd %r31w,%r8w,%r16w",},
+{{0x62, 0x5c, 0xfc, 0x10, 0x03, 0x07, }, 6, 0, "", "",
+"62 5c fc 10 03 07 \tadd (%r31),%r8,%r16",},
+{{0x62, 0x5c, 0xf8, 0x10, 0x03, 0x84, 0x07, 0x90, 0x90, 0x00, 0x00, }, 11, 0, "", "",
+"62 5c f8 10 03 84 07 90 90 00 00 \tadd 0x9090(%r31,%r16,1),%r8,%r16",},
+{{0x62, 0x44, 0x7c, 0x10, 0x00, 0xf8, }, 6, 0, "", "",
+"62 44 7c 10 00 f8 \tadd %r31b,%r8b,%r16b",},
+{{0x62, 0x44, 0x7c, 0x10, 0x01, 0xf8, }, 6, 0, "", "",
+"62 44 7c 10 01 f8 \tadd %r31d,%r8d,%r16d",},
+{{0x62, 0xfc, 0x5c, 0x10, 0x83, 0x04, 0x83, 0x11, }, 8, 0, "", "",
+"62 fc 5c 10 83 04 83 11 \tadd $0x11,(%r19,%rax,4),%r20d",},
+{{0x62, 0x44, 0xfc, 0x10, 0x01, 0xf8, }, 6, 0, "", "",
+"62 44 fc 10 01 f8 \tadd %r31,%r8,%r16",},
+{{0x62, 0xd4, 0xfc, 0x10, 0x81, 0x04, 0x8f, 0x33, 0x44, 0x34, 0x12, }, 11, 0, "", "",
+"62 d4 fc 10 81 04 8f 33 44 34 12 \tadd $0x12344433,(%r15,%rcx,4),%r16",},
+{{0x62, 0x44, 0x7d, 0x10, 0x01, 0xf8, }, 6, 0, "", "",
+"62 44 7d 10 01 f8 \tadd %r31w,%r8w,%r16w",},
+{{0x62, 0x54, 0x6e, 0x10, 0x66, 0xc7, }, 6, 0, "", "",
+"62 54 6e 10 66 c7 \tadox %r15d,%r8d,%r18d",},
+{{0x62, 0x5c, 0xfc, 0x10, 0x03, 0xc7, }, 6, 0, "", "",
+"62 5c fc 10 03 c7 \tadd %r31,%r8,%r16",},
+{{0x62, 0x44, 0xfc, 0x10, 0x01, 0xf8, }, 6, 0, "", "",
+"62 44 fc 10 01 f8 \tadd %r31,%r8,%r16",},
+{{0x62, 0x14, 0xfa, 0x08, 0x66, 0x04, 0x3f, }, 7, 0, "", "",
+"62 14 fa 08 66 04 3f \tadox (%r15,%r31,1),%r8",},
+{{0x62, 0x14, 0x6a, 0x10, 0x66, 0x04, 0x3f, }, 7, 0, "", "",
+"62 14 6a 10 66 04 3f \tadox (%r15,%r31,1),%r8d,%r18d",},
+{{0x62, 0xf4, 0x0d, 0x10, 0x81, 0xe0, 0x34, 0x12, }, 8, 0, "", "",
+"62 f4 0d 10 81 e0 34 12 \tand $0x1234,%ax,%r30w",},
+{{0x62, 0x7c, 0x6c, 0x10, 0x20, 0xf9, }, 6, 0, "", "",
+"62 7c 6c 10 20 f9 \tand %r15b,%r17b,%r18b",},
+{{0x62, 0x54, 0x6c, 0x10, 0x21, 0x38, }, 6, 0, "", "",
+"62 54 6c 10 21 38 \tand %r15d,(%r8),%r18d",},
+{{0x62, 0xc4, 0x3c, 0x18, 0x22, 0x04, 0x07, }, 7, 0, "", "",
+"62 c4 3c 18 22 04 07 \tand (%r15,%rax,1),%r16b,%r8b",},
+{{0x62, 0xc4, 0x3d, 0x18, 0x23, 0x04, 0x07, }, 7, 0, "", "",
+"62 c4 3d 18 23 04 07 \tand (%r15,%rax,1),%r16w,%r8w",},
+{{0x62, 0xfc, 0x5c, 0x10, 0x83, 0x24, 0x83, 0x11, }, 8, 0, "", "",
+"62 fc 5c 10 83 24 83 11 \tand $0x11,(%r19,%rax,4),%r20d",},
+{{0x67, 0x62, 0xf4, 0x3c, 0x18, 0x47, 0x90, 0x90, 0x90, 0x90, 0x90, }, 11, 0, "", "",
+"67 62 f4 3c 18 47 90 90 90 90 90 \tcmova -0x6f6f6f70(%eax),%edx,%r8d",},
+{{0x67, 0x62, 0xf4, 0x3c, 0x18, 0x43, 0x90, 0x90, 0x90, 0x90, 0x90, }, 11, 0, "", "",
+"67 62 f4 3c 18 43 90 90 90 90 90 \tcmovae -0x6f6f6f70(%eax),%edx,%r8d",},
+{{0x67, 0x62, 0xf4, 0x3c, 0x18, 0x42, 0x90, 0x90, 0x90, 0x90, 0x90, }, 11, 0, "", "",
+"67 62 f4 3c 18 42 90 90 90 90 90 \tcmovb -0x6f6f6f70(%eax),%edx,%r8d",},
+{{0x67, 0x62, 0xf4, 0x3c, 0x18, 0x46, 0x90, 0x90, 0x90, 0x90, 0x90, }, 11, 0, "", "",
+"67 62 f4 3c 18 46 90 90 90 90 90 \tcmovbe -0x6f6f6f70(%eax),%edx,%r8d",},
+{{0x67, 0x62, 0xf4, 0x3c, 0x18, 0x44, 0x90, 0x90, 0x90, 0x90, 0x90, }, 11, 0, "", "",
+"67 62 f4 3c 18 44 90 90 90 90 90 \tcmove -0x6f6f6f70(%eax),%edx,%r8d",},
+{{0x67, 0x62, 0xf4, 0x3c, 0x18, 0x4f, 0x90, 0x90, 0x90, 0x90, 0x90, }, 11, 0, "", "",
+"67 62 f4 3c 18 4f 90 90 90 90 90 \tcmovg -0x6f6f6f70(%eax),%edx,%r8d",},
+{{0x67, 0x62, 0xf4, 0x3c, 0x18, 0x4d, 0x90, 0x90, 0x90, 0x90, 0x90, }, 11, 0, "", "",
+"67 62 f4 3c 18 4d 90 90 90 90 90 \tcmovge -0x6f6f6f70(%eax),%edx,%r8d",},
+{{0x67, 0x62, 0xf4, 0x3c, 0x18, 0x4c, 0x90, 0x90, 0x90, 0x90, 0x90, }, 11, 0, "", "",
+"67 62 f4 3c 18 4c 90 90 90 90 90 \tcmovl -0x6f6f6f70(%eax),%edx,%r8d",},
+{{0x67, 0x62, 0xf4, 0x3c, 0x18, 0x4e, 0x90, 0x90, 0x90, 0x90, 0x90, }, 11, 0, "", "",
+"67 62 f4 3c 18 4e 90 90 90 90 90 \tcmovle -0x6f6f6f70(%eax),%edx,%r8d",},
+{{0x67, 0x62, 0xf4, 0x3c, 0x18, 0x45, 0x90, 0x90, 0x90, 0x90, 0x90, }, 11, 0, "", "",
+"67 62 f4 3c 18 45 90 90 90 90 90 \tcmovne -0x6f6f6f70(%eax),%edx,%r8d",},
+{{0x67, 0x62, 0xf4, 0x3c, 0x18, 0x41, 0x90, 0x90, 0x90, 0x90, 0x90, }, 11, 0, "", "",
+"67 62 f4 3c 18 41 90 90 90 90 90 \tcmovno -0x6f6f6f70(%eax),%edx,%r8d",},
+{{0x67, 0x62, 0xf4, 0x3c, 0x18, 0x4b, 0x90, 0x90, 0x90, 0x90, 0x90, }, 11, 0, "", "",
+"67 62 f4 3c 18 4b 90 90 90 90 90 \tcmovnp -0x6f6f6f70(%eax),%edx,%r8d",},
+{{0x67, 0x62, 0xf4, 0x3c, 0x18, 0x49, 0x90, 0x90, 0x90, 0x90, 0x90, }, 11, 0, "", "",
+"67 62 f4 3c 18 49 90 90 90 90 90 \tcmovns -0x6f6f6f70(%eax),%edx,%r8d",},
+{{0x67, 0x62, 0xf4, 0x3c, 0x18, 0x40, 0x90, 0x90, 0x90, 0x90, 0x90, }, 11, 0, "", "",
+"67 62 f4 3c 18 40 90 90 90 90 90 \tcmovo -0x6f6f6f70(%eax),%edx,%r8d",},
+{{0x67, 0x62, 0xf4, 0x3c, 0x18, 0x4a, 0x90, 0x90, 0x90, 0x90, 0x90, }, 11, 0, "", "",
+"67 62 f4 3c 18 4a 90 90 90 90 90 \tcmovp -0x6f6f6f70(%eax),%edx,%r8d",},
+{{0x67, 0x62, 0xf4, 0x3c, 0x18, 0x48, 0x90, 0x90, 0x90, 0x90, 0x90, }, 11, 0, "", "",
+"67 62 f4 3c 18 48 90 90 90 90 90 \tcmovs -0x6f6f6f70(%eax),%edx,%r8d",},
+{{0x62, 0xf4, 0xf4, 0x10, 0xff, 0xc8, }, 6, 0, "", "",
+"62 f4 f4 10 ff c8 \tdec %rax,%r17",},
+{{0x62, 0x9c, 0x3c, 0x18, 0xfe, 0x0c, 0x27, }, 7, 0, "", "",
+"62 9c 3c 18 fe 0c 27 \tdec (%r31,%r12,1),%r8b",},
+{{0x62, 0xb4, 0xb0, 0x10, 0xaf, 0x94, 0xf8, 0x09, 0x09, 0x00, 0x00, }, 11, 0, "", "",
+"62 b4 b0 10 af 94 f8 09 09 00 00 \timul 0x909(%rax,%r31,8),%rdx,%r25",},
+{{0x67, 0x62, 0xf4, 0x3c, 0x18, 0xaf, 0x90, 0x09, 0x09, 0x09, 0x00, }, 11, 0, "", "",
+"67 62 f4 3c 18 af 90 09 09 09 00 \timul 0x90909(%eax),%edx,%r8d",},
+{{0x62, 0xdc, 0xfc, 0x10, 0xff, 0xc7, }, 6, 0, "", "",
+"62 dc fc 10 ff c7 \tinc %r31,%r16",},
+{{0x62, 0xdc, 0xbc, 0x18, 0xff, 0xc7, }, 6, 0, "", "",
+"62 dc bc 18 ff c7 \tinc %r31,%r8",},
+{{0x62, 0xf4, 0xe4, 0x18, 0xff, 0xc0, }, 6, 0, "", "",
+"62 f4 e4 18 ff c0 \tinc %rax,%rbx",},
+{{0x62, 0xf4, 0xf4, 0x10, 0xf7, 0xd8, }, 6, 0, "", "",
+"62 f4 f4 10 f7 d8 \tneg %rax,%r17",},
+{{0x62, 0x9c, 0x3c, 0x18, 0xf6, 0x1c, 0x27, }, 7, 0, "", "",
+"62 9c 3c 18 f6 1c 27 \tneg (%r31,%r12,1),%r8b",},
+{{0x62, 0xf4, 0xf4, 0x10, 0xf7, 0xd0, }, 6, 0, "", "",
+"62 f4 f4 10 f7 d0 \tnot %rax,%r17",},
+{{0x62, 0x9c, 0x3c, 0x18, 0xf6, 0x14, 0x27, }, 7, 0, "", "",
+"62 9c 3c 18 f6 14 27 \tnot (%r31,%r12,1),%r8b",},
+{{0x62, 0xf4, 0x0d, 0x10, 0x81, 0xc8, 0x34, 0x12, }, 8, 0, "", "",
+"62 f4 0d 10 81 c8 34 12 \tor $0x1234,%ax,%r30w",},
+{{0x62, 0x7c, 0x6c, 0x10, 0x08, 0xf9, }, 6, 0, "", "",
+"62 7c 6c 10 08 f9 \tor %r15b,%r17b,%r18b",},
+{{0x62, 0x54, 0x6c, 0x10, 0x09, 0x38, }, 6, 0, "", "",
+"62 54 6c 10 09 38 \tor %r15d,(%r8),%r18d",},
+{{0x62, 0xc4, 0x3c, 0x18, 0x0a, 0x04, 0x07, }, 7, 0, "", "",
+"62 c4 3c 18 0a 04 07 \tor (%r15,%rax,1),%r16b,%r8b",},
+{{0x62, 0xc4, 0x3d, 0x18, 0x0b, 0x04, 0x07, }, 7, 0, "", "",
+"62 c4 3d 18 0b 04 07 \tor (%r15,%rax,1),%r16w,%r8w",},
+{{0x62, 0xfc, 0x5c, 0x10, 0x83, 0x0c, 0x83, 0x11, }, 8, 0, "", "",
+"62 fc 5c 10 83 0c 83 11 \tor $0x11,(%r19,%rax,4),%r20d",},
+{{0x62, 0xd4, 0x04, 0x10, 0xc0, 0xd4, 0x02, }, 7, 0, "", "",
+"62 d4 04 10 c0 d4 02 \trcl $0x2,%r12b,%r31b",},
+{{0x62, 0xfc, 0x3c, 0x18, 0xd2, 0xd0, }, 6, 0, "", "",
+"62 fc 3c 18 d2 d0 \trcl %cl,%r16b,%r8b",},
+{{0x62, 0xf4, 0x04, 0x10, 0xd0, 0x10, }, 6, 0, "", "",
+"62 f4 04 10 d0 10 \trcl $1,(%rax),%r31b",},
+{{0x62, 0xf4, 0x04, 0x10, 0xc1, 0x10, 0x02, }, 7, 0, "", "",
+"62 f4 04 10 c1 10 02 \trcl $0x2,(%rax),%r31d",},
+{{0x62, 0xf4, 0x05, 0x10, 0xd1, 0x10, }, 6, 0, "", "",
+"62 f4 05 10 d1 10 \trcl $1,(%rax),%r31w",},
+{{0x62, 0xfc, 0x05, 0x10, 0xd3, 0x14, 0x83, }, 7, 0, "", "",
+"62 fc 05 10 d3 14 83 \trcl %cl,(%r19,%rax,4),%r31w",},
+{{0x62, 0xd4, 0x04, 0x10, 0xc0, 0xdc, 0x02, }, 7, 0, "", "",
+"62 d4 04 10 c0 dc 02 \trcr $0x2,%r12b,%r31b",},
+{{0x62, 0xfc, 0x3c, 0x18, 0xd2, 0xd8, }, 6, 0, "", "",
+"62 fc 3c 18 d2 d8 \trcr %cl,%r16b,%r8b",},
+{{0x62, 0xf4, 0x04, 0x10, 0xd0, 0x18, }, 6, 0, "", "",
+"62 f4 04 10 d0 18 \trcr $1,(%rax),%r31b",},
+{{0x62, 0xf4, 0x04, 0x10, 0xc1, 0x18, 0x02, }, 7, 0, "", "",
+"62 f4 04 10 c1 18 02 \trcr $0x2,(%rax),%r31d",},
+{{0x62, 0xf4, 0x05, 0x10, 0xd1, 0x18, }, 6, 0, "", "",
+"62 f4 05 10 d1 18 \trcr $1,(%rax),%r31w",},
+{{0x62, 0xfc, 0x05, 0x10, 0xd3, 0x1c, 0x83, }, 7, 0, "", "",
+"62 fc 05 10 d3 1c 83 \trcr %cl,(%r19,%rax,4),%r31w",},
+{{0x62, 0xd4, 0x04, 0x10, 0xc0, 0xc4, 0x02, }, 7, 0, "", "",
+"62 d4 04 10 c0 c4 02 \trol $0x2,%r12b,%r31b",},
+{{0x62, 0xfc, 0x3c, 0x18, 0xd2, 0xc0, }, 6, 0, "", "",
+"62 fc 3c 18 d2 c0 \trol %cl,%r16b,%r8b",},
+{{0x62, 0xf4, 0x04, 0x10, 0xd0, 0x00, }, 6, 0, "", "",
+"62 f4 04 10 d0 00 \trol $1,(%rax),%r31b",},
+{{0x62, 0xf4, 0x04, 0x10, 0xc1, 0x00, 0x02, }, 7, 0, "", "",
+"62 f4 04 10 c1 00 02 \trol $0x2,(%rax),%r31d",},
+{{0x62, 0xf4, 0x05, 0x10, 0xd1, 0x00, }, 6, 0, "", "",
+"62 f4 05 10 d1 00 \trol $1,(%rax),%r31w",},
+{{0x62, 0xfc, 0x05, 0x10, 0xd3, 0x04, 0x83, }, 7, 0, "", "",
+"62 fc 05 10 d3 04 83 \trol %cl,(%r19,%rax,4),%r31w",},
+{{0x62, 0xd4, 0x04, 0x10, 0xc0, 0xcc, 0x02, }, 7, 0, "", "",
+"62 d4 04 10 c0 cc 02 \tror $0x2,%r12b,%r31b",},
+{{0x62, 0xfc, 0x3c, 0x18, 0xd2, 0xc8, }, 6, 0, "", "",
+"62 fc 3c 18 d2 c8 \tror %cl,%r16b,%r8b",},
+{{0x62, 0xf4, 0x04, 0x10, 0xd0, 0x08, }, 6, 0, "", "",
+"62 f4 04 10 d0 08 \tror $1,(%rax),%r31b",},
+{{0x62, 0xf4, 0x04, 0x10, 0xc1, 0x08, 0x02, }, 7, 0, "", "",
+"62 f4 04 10 c1 08 02 \tror $0x2,(%rax),%r31d",},
+{{0x62, 0xf4, 0x05, 0x10, 0xd1, 0x08, }, 6, 0, "", "",
+"62 f4 05 10 d1 08 \tror $1,(%rax),%r31w",},
+{{0x62, 0xfc, 0x05, 0x10, 0xd3, 0x0c, 0x83, }, 7, 0, "", "",
+"62 fc 05 10 d3 0c 83 \tror %cl,(%r19,%rax,4),%r31w",},
+{{0x62, 0xd4, 0x04, 0x10, 0xc0, 0xfc, 0x02, }, 7, 0, "", "",
+"62 d4 04 10 c0 fc 02 \tsar $0x2,%r12b,%r31b",},
+{{0x62, 0xfc, 0x3c, 0x18, 0xd2, 0xf8, }, 6, 0, "", "",
+"62 fc 3c 18 d2 f8 \tsar %cl,%r16b,%r8b",},
+{{0x62, 0xf4, 0x04, 0x10, 0xd0, 0x38, }, 6, 0, "", "",
+"62 f4 04 10 d0 38 \tsar $1,(%rax),%r31b",},
+{{0x62, 0xf4, 0x04, 0x10, 0xc1, 0x38, 0x02, }, 7, 0, "", "",
+"62 f4 04 10 c1 38 02 \tsar $0x2,(%rax),%r31d",},
+{{0x62, 0xf4, 0x05, 0x10, 0xd1, 0x38, }, 6, 0, "", "",
+"62 f4 05 10 d1 38 \tsar $1,(%rax),%r31w",},
+{{0x62, 0xfc, 0x05, 0x10, 0xd3, 0x3c, 0x83, }, 7, 0, "", "",
+"62 fc 05 10 d3 3c 83 \tsar %cl,(%r19,%rax,4),%r31w",},
+{{0x62, 0xf4, 0x0d, 0x10, 0x81, 0xd8, 0x34, 0x12, }, 8, 0, "", "",
+"62 f4 0d 10 81 d8 34 12 \tsbb $0x1234,%ax,%r30w",},
+{{0x62, 0x7c, 0x6c, 0x10, 0x18, 0xf9, }, 6, 0, "", "",
+"62 7c 6c 10 18 f9 \tsbb %r15b,%r17b,%r18b",},
+{{0x62, 0x54, 0x6c, 0x10, 0x19, 0x38, }, 6, 0, "", "",
+"62 54 6c 10 19 38 \tsbb %r15d,(%r8),%r18d",},
+{{0x62, 0xc4, 0x3c, 0x18, 0x1a, 0x04, 0x07, }, 7, 0, "", "",
+"62 c4 3c 18 1a 04 07 \tsbb (%r15,%rax,1),%r16b,%r8b",},
+{{0x62, 0xc4, 0x3d, 0x18, 0x1b, 0x04, 0x07, }, 7, 0, "", "",
+"62 c4 3d 18 1b 04 07 \tsbb (%r15,%rax,1),%r16w,%r8w",},
+{{0x62, 0xfc, 0x5c, 0x10, 0x83, 0x1c, 0x83, 0x11, }, 8, 0, "", "",
+"62 fc 5c 10 83 1c 83 11 \tsbb $0x11,(%r19,%rax,4),%r20d",},
+{{0x62, 0xd4, 0x04, 0x10, 0xc0, 0xe4, 0x02, }, 7, 0, "", "",
+"62 d4 04 10 c0 e4 02 \tshl $0x2,%r12b,%r31b",},
+{{0x62, 0xd4, 0x04, 0x10, 0xc0, 0xe4, 0x02, }, 7, 0, "", "",
+"62 d4 04 10 c0 e4 02 \tshl $0x2,%r12b,%r31b",},
+{{0x62, 0xfc, 0x3c, 0x18, 0xd2, 0xe0, }, 6, 0, "", "",
+"62 fc 3c 18 d2 e0 \tshl %cl,%r16b,%r8b",},
+{{0x62, 0xfc, 0x3c, 0x18, 0xd2, 0xe0, }, 6, 0, "", "",
+"62 fc 3c 18 d2 e0 \tshl %cl,%r16b,%r8b",},
+{{0x62, 0xf4, 0x04, 0x10, 0xd0, 0x20, }, 6, 0, "", "",
+"62 f4 04 10 d0 20 \tshl $1,(%rax),%r31b",},
+{{0x62, 0xf4, 0x04, 0x10, 0xd0, 0x20, }, 6, 0, "", "",
+"62 f4 04 10 d0 20 \tshl $1,(%rax),%r31b",},
+{{0x62, 0x74, 0x84, 0x10, 0x24, 0x20, 0x01, }, 7, 0, "", "",
+"62 74 84 10 24 20 01 \tshld $0x1,%r12,(%rax),%r31",},
+{{0x62, 0x74, 0x04, 0x10, 0x24, 0x38, 0x02, }, 7, 0, "", "",
+"62 74 04 10 24 38 02 \tshld $0x2,%r15d,(%rax),%r31d",},
+{{0x62, 0x54, 0x05, 0x10, 0x24, 0xc4, 0x02, }, 7, 0, "", "",
+"62 54 05 10 24 c4 02 \tshld $0x2,%r8w,%r12w,%r31w",},
+{{0x62, 0x7c, 0xbc, 0x18, 0xa5, 0xe0, }, 6, 0, "", "",
+"62 7c bc 18 a5 e0 \tshld %cl,%r12,%r16,%r8",},
+{{0x62, 0x7c, 0x05, 0x10, 0xa5, 0x2c, 0x83, }, 7, 0, "", "",
+"62 7c 05 10 a5 2c 83 \tshld %cl,%r13w,(%r19,%rax,4),%r31w",},
+{{0x62, 0x74, 0x05, 0x10, 0xa5, 0x08, }, 6, 0, "", "",
+"62 74 05 10 a5 08 \tshld %cl,%r9w,(%rax),%r31w",},
+{{0x62, 0xf4, 0x04, 0x10, 0xc1, 0x20, 0x02, }, 7, 0, "", "",
+"62 f4 04 10 c1 20 02 \tshl $0x2,(%rax),%r31d",},
+{{0x62, 0xf4, 0x04, 0x10, 0xc1, 0x20, 0x02, }, 7, 0, "", "",
+"62 f4 04 10 c1 20 02 \tshl $0x2,(%rax),%r31d",},
+{{0x62, 0xf4, 0x05, 0x10, 0xd1, 0x20, }, 6, 0, "", "",
+"62 f4 05 10 d1 20 \tshl $1,(%rax),%r31w",},
+{{0x62, 0xf4, 0x05, 0x10, 0xd1, 0x20, }, 6, 0, "", "",
+"62 f4 05 10 d1 20 \tshl $1,(%rax),%r31w",},
+{{0x62, 0xfc, 0x05, 0x10, 0xd3, 0x24, 0x83, }, 7, 0, "", "",
+"62 fc 05 10 d3 24 83 \tshl %cl,(%r19,%rax,4),%r31w",},
+{{0x62, 0xfc, 0x05, 0x10, 0xd3, 0x24, 0x83, }, 7, 0, "", "",
+"62 fc 05 10 d3 24 83 \tshl %cl,(%r19,%rax,4),%r31w",},
+{{0x62, 0xd4, 0x04, 0x10, 0xc0, 0xec, 0x02, }, 7, 0, "", "",
+"62 d4 04 10 c0 ec 02 \tshr $0x2,%r12b,%r31b",},
+{{0x62, 0xfc, 0x3c, 0x18, 0xd2, 0xe8, }, 6, 0, "", "",
+"62 fc 3c 18 d2 e8 \tshr %cl,%r16b,%r8b",},
+{{0x62, 0xf4, 0x04, 0x10, 0xd0, 0x28, }, 6, 0, "", "",
+"62 f4 04 10 d0 28 \tshr $1,(%rax),%r31b",},
+{{0x62, 0x74, 0x84, 0x10, 0x2c, 0x20, 0x01, }, 7, 0, "", "",
+"62 74 84 10 2c 20 01 \tshrd $0x1,%r12,(%rax),%r31",},
+{{0x62, 0x74, 0x04, 0x10, 0x2c, 0x38, 0x02, }, 7, 0, "", "",
+"62 74 04 10 2c 38 02 \tshrd $0x2,%r15d,(%rax),%r31d",},
+{{0x62, 0x54, 0x05, 0x10, 0x2c, 0xc4, 0x02, }, 7, 0, "", "",
+"62 54 05 10 2c c4 02 \tshrd $0x2,%r8w,%r12w,%r31w",},
+{{0x62, 0x7c, 0xbc, 0x18, 0xad, 0xe0, }, 6, 0, "", "",
+"62 7c bc 18 ad e0 \tshrd %cl,%r12,%r16,%r8",},
+{{0x62, 0x7c, 0x05, 0x10, 0xad, 0x2c, 0x83, }, 7, 0, "", "",
+"62 7c 05 10 ad 2c 83 \tshrd %cl,%r13w,(%r19,%rax,4),%r31w",},
+{{0x62, 0x74, 0x05, 0x10, 0xad, 0x08, }, 6, 0, "", "",
+"62 74 05 10 ad 08 \tshrd %cl,%r9w,(%rax),%r31w",},
+{{0x62, 0xf4, 0x04, 0x10, 0xc1, 0x28, 0x02, }, 7, 0, "", "",
+"62 f4 04 10 c1 28 02 \tshr $0x2,(%rax),%r31d",},
+{{0x62, 0xf4, 0x05, 0x10, 0xd1, 0x28, }, 6, 0, "", "",
+"62 f4 05 10 d1 28 \tshr $1,(%rax),%r31w",},
+{{0x62, 0xfc, 0x05, 0x10, 0xd3, 0x2c, 0x83, }, 7, 0, "", "",
+"62 fc 05 10 d3 2c 83 \tshr %cl,(%r19,%rax,4),%r31w",},
+{{0x62, 0xf4, 0x0d, 0x10, 0x81, 0xe8, 0x34, 0x12, }, 8, 0, "", "",
+"62 f4 0d 10 81 e8 34 12 \tsub $0x1234,%ax,%r30w",},
+{{0x62, 0x7c, 0x6c, 0x10, 0x28, 0xf9, }, 6, 0, "", "",
+"62 7c 6c 10 28 f9 \tsub %r15b,%r17b,%r18b",},
+{{0x62, 0x54, 0x6c, 0x10, 0x29, 0x38, }, 6, 0, "", "",
+"62 54 6c 10 29 38 \tsub %r15d,(%r8),%r18d",},
+{{0x62, 0xc4, 0x3c, 0x18, 0x2a, 0x04, 0x07, }, 7, 0, "", "",
+"62 c4 3c 18 2a 04 07 \tsub (%r15,%rax,1),%r16b,%r8b",},
+{{0x62, 0xc4, 0x3d, 0x18, 0x2b, 0x04, 0x07, }, 7, 0, "", "",
+"62 c4 3d 18 2b 04 07 \tsub (%r15,%rax,1),%r16w,%r8w",},
+{{0x62, 0xfc, 0x5c, 0x10, 0x83, 0x2c, 0x83, 0x11, }, 8, 0, "", "",
+"62 fc 5c 10 83 2c 83 11 \tsub $0x11,(%r19,%rax,4),%r20d",},
+{{0x62, 0xf4, 0x0d, 0x10, 0x81, 0xf0, 0x34, 0x12, }, 8, 0, "", "",
+"62 f4 0d 10 81 f0 34 12 \txor $0x1234,%ax,%r30w",},
+{{0x62, 0x7c, 0x6c, 0x10, 0x30, 0xf9, }, 6, 0, "", "",
+"62 7c 6c 10 30 f9 \txor %r15b,%r17b,%r18b",},
+{{0x62, 0x54, 0x6c, 0x10, 0x31, 0x38, }, 6, 0, "", "",
+"62 54 6c 10 31 38 \txor %r15d,(%r8),%r18d",},
+{{0x62, 0xc4, 0x3c, 0x18, 0x32, 0x04, 0x07, }, 7, 0, "", "",
+"62 c4 3c 18 32 04 07 \txor (%r15,%rax,1),%r16b,%r8b",},
+{{0x62, 0xc4, 0x3d, 0x18, 0x33, 0x04, 0x07, }, 7, 0, "", "",
+"62 c4 3d 18 33 04 07 \txor (%r15,%rax,1),%r16w,%r8w",},
+{{0x62, 0xfc, 0x5c, 0x10, 0x83, 0x34, 0x83, 0x11, }, 8, 0, "", "",
+"62 fc 5c 10 83 34 83 11 \txor $0x11,(%r19,%rax,4),%r20d",},
+{{0x62, 0xf4, 0x3c, 0x1c, 0x00, 0xda, }, 6, 0, "", "",
+"62 f4 3c 1c 00 da \t{nf} add %bl,%dl,%r8b",},
+{{0x62, 0xf4, 0x35, 0x1c, 0x01, 0xd0, }, 6, 0, "", "",
+"62 f4 35 1c 01 d0 \t{nf} add %dx,%ax,%r9w",},
+{{0x62, 0xd4, 0x6c, 0x1c, 0x02, 0x9c, 0x80, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 d4 6c 1c 02 9c 80 23 01 00 00 \t{nf} add 0x123(%r8,%rax,4),%bl,%dl",},
+{{0x62, 0xd4, 0x7d, 0x1c, 0x03, 0x94, 0x80, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 d4 7d 1c 03 94 80 23 01 00 00 \t{nf} add 0x123(%r8,%rax,4),%dx,%ax",},
+{{0x62, 0xf4, 0x3c, 0x1c, 0x08, 0xda, }, 6, 0, "", "",
+"62 f4 3c 1c 08 da \t{nf} or %bl,%dl,%r8b",},
+{{0x62, 0xf4, 0x35, 0x1c, 0x09, 0xd0, }, 6, 0, "", "",
+"62 f4 35 1c 09 d0 \t{nf} or %dx,%ax,%r9w",},
+{{0x62, 0xd4, 0x6c, 0x1c, 0x0a, 0x9c, 0x80, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 d4 6c 1c 0a 9c 80 23 01 00 00 \t{nf} or 0x123(%r8,%rax,4),%bl,%dl",},
+{{0x62, 0xd4, 0x7d, 0x1c, 0x0b, 0x94, 0x80, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 d4 7d 1c 0b 94 80 23 01 00 00 \t{nf} or 0x123(%r8,%rax,4),%dx,%ax",},
+{{0x62, 0xf4, 0x3c, 0x1c, 0x20, 0xda, }, 6, 0, "", "",
+"62 f4 3c 1c 20 da \t{nf} and %bl,%dl,%r8b",},
+{{0x62, 0xf4, 0x35, 0x1c, 0x21, 0xd0, }, 6, 0, "", "",
+"62 f4 35 1c 21 d0 \t{nf} and %dx,%ax,%r9w",},
+{{0x62, 0xd4, 0x6c, 0x1c, 0x22, 0x9c, 0x80, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 d4 6c 1c 22 9c 80 23 01 00 00 \t{nf} and 0x123(%r8,%rax,4),%bl,%dl",},
+{{0x62, 0xd4, 0x7d, 0x1c, 0x23, 0x94, 0x80, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 d4 7d 1c 23 94 80 23 01 00 00 \t{nf} and 0x123(%r8,%rax,4),%dx,%ax",},
+{{0x62, 0xf4, 0x35, 0x1c, 0x24, 0xd0, 0x7b, }, 7, 0, "", "",
+"62 f4 35 1c 24 d0 7b \t{nf} shld $0x7b,%dx,%ax,%r9w",},
+{{0x62, 0xf4, 0x3c, 0x1c, 0x28, 0xda, }, 6, 0, "", "",
+"62 f4 3c 1c 28 da \t{nf} sub %bl,%dl,%r8b",},
+{{0x62, 0xf4, 0x35, 0x1c, 0x29, 0xd0, }, 6, 0, "", "",
+"62 f4 35 1c 29 d0 \t{nf} sub %dx,%ax,%r9w",},
+{{0x62, 0xd4, 0x6c, 0x1c, 0x2a, 0x9c, 0x80, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 d4 6c 1c 2a 9c 80 23 01 00 00 \t{nf} sub 0x123(%r8,%rax,4),%bl,%dl",},
+{{0x62, 0xd4, 0x7d, 0x1c, 0x2b, 0x94, 0x80, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 d4 7d 1c 2b 94 80 23 01 00 00 \t{nf} sub 0x123(%r8,%rax,4),%dx,%ax",},
+{{0x62, 0xf4, 0x35, 0x1c, 0x2c, 0xd0, 0x7b, }, 7, 0, "", "",
+"62 f4 35 1c 2c d0 7b \t{nf} shrd $0x7b,%dx,%ax,%r9w",},
+{{0x62, 0xf4, 0x3c, 0x1c, 0x30, 0xda, }, 6, 0, "", "",
+"62 f4 3c 1c 30 da \t{nf} xor %bl,%dl,%r8b",},
+{{0x62, 0x4c, 0xfc, 0x0c, 0x31, 0xff, }, 6, 0, "", "",
+"62 4c fc 0c 31 ff \t{nf} xor %r31,%r31",},
+{{0x62, 0xd4, 0x6c, 0x1c, 0x32, 0x9c, 0x80, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 d4 6c 1c 32 9c 80 23 01 00 00 \t{nf} xor 0x123(%r8,%rax,4),%bl,%dl",},
+{{0x62, 0xd4, 0x7d, 0x1c, 0x33, 0x94, 0x80, 0x23, 0x01, 0x00, 0x00, }, 11, 0, "", "",
+"62 d4 7d 1c 33 94 80 23 01 00 00 \t{nf} xor 0x123(%r8,%rax,4),%dx,%ax",},
+{{0x62, 0x54, 0xfc, 0x0c, 0x69, 0xf9, 0x90, 0xff, 0x00, 0x00, }, 10, 0, "", "",
+"62 54 fc 0c 69 f9 90 ff 00 00 \t{nf} imul $0xff90,%r9,%r15",},
+{{0x62, 0x54, 0xfc, 0x0c, 0x6b, 0xf9, 0x7b, }, 7, 0, "", "",
+"62 54 fc 0c 6b f9 7b \t{nf} imul $0x7b,%r9,%r15",},
+{{0x62, 0xf4, 0x6c, 0x1c, 0x80, 0xf3, 0x7b, }, 7, 0, "", "",
+"62 f4 6c 1c 80 f3 7b \t{nf} xor $0x7b,%bl,%dl",},
+{{0x62, 0xf4, 0x7d, 0x1c, 0x83, 0xf2, 0x7b, }, 7, 0, "", "",
+"62 f4 7d 1c 83 f2 7b \t{nf} xor $0x7b,%dx,%ax",},
+{{0x62, 0x44, 0xfc, 0x0c, 0x88, 0xf9, }, 6, 0, "", "",
+"62 44 fc 0c 88 f9 \t{nf} popcnt %r9,%r31",},
+{{0x62, 0xf4, 0x35, 0x1c, 0xa5, 0xd0, }, 6, 0, "", "",
+"62 f4 35 1c a5 d0 \t{nf} shld %cl,%dx,%ax,%r9w",},
+{{0x62, 0xf4, 0x35, 0x1c, 0xad, 0xd0, }, 6, 0, "", "",
+"62 f4 35 1c ad d0 \t{nf} shrd %cl,%dx,%ax,%r9w",},
+{{0x62, 0x44, 0xa4, 0x1c, 0xaf, 0xf9, }, 6, 0, "", "",
+"62 44 a4 1c af f9 \t{nf} imul %r9,%r31,%r11",},
+{{0x62, 0xf4, 0x6c, 0x1c, 0xc0, 0xfb, 0x7b, }, 7, 0, "", "",
+"62 f4 6c 1c c0 fb 7b \t{nf} sar $0x7b,%bl,%dl",},
+{{0x62, 0xf4, 0x7d, 0x1c, 0xc1, 0xfa, 0x7b, }, 7, 0, "", "",
+"62 f4 7d 1c c1 fa 7b \t{nf} sar $0x7b,%dx,%ax",},
+{{0x62, 0xf4, 0x6c, 0x1c, 0xd0, 0xfb, }, 6, 0, "", "",
+"62 f4 6c 1c d0 fb \t{nf} sar $1,%bl,%dl",},
+{{0x62, 0xf4, 0x7d, 0x1c, 0xd1, 0xfa, }, 6, 0, "", "",
+"62 f4 7d 1c d1 fa \t{nf} sar $1,%dx,%ax",},
+{{0x62, 0xf4, 0x6c, 0x1c, 0xd2, 0xfb, }, 6, 0, "", "",
+"62 f4 6c 1c d2 fb \t{nf} sar %cl,%bl,%dl",},
+{{0x62, 0xf4, 0x7d, 0x1c, 0xd3, 0xfa, }, 6, 0, "", "",
+"62 f4 7d 1c d3 fa \t{nf} sar %cl,%dx,%ax",},
+{{0x62, 0x52, 0x84, 0x04, 0xf2, 0xd9, }, 6, 0, "", "",
+"62 52 84 04 f2 d9 \t{nf} andn %r9,%r31,%r11",},
+{{0x62, 0xd2, 0x84, 0x04, 0xf3, 0xd9, }, 6, 0, "", "",
+"62 d2 84 04 f3 d9 \t{nf} blsi %r9,%r31",},
+{{0x62, 0x44, 0xfc, 0x0c, 0xf4, 0xf9, }, 6, 0, "", "",
+"62 44 fc 0c f4 f9 \t{nf} tzcnt %r9,%r31",},
+{{0x62, 0x44, 0xfc, 0x0c, 0xf5, 0xf9, }, 6, 0, "", "",
+"62 44 fc 0c f5 f9 \t{nf} lzcnt %r9,%r31",},
+{{0x62, 0xf4, 0x7c, 0x0c, 0xf6, 0xfb, }, 6, 0, "", "",
+"62 f4 7c 0c f6 fb \t{nf} idiv %bl",},
+{{0x62, 0xf4, 0x7d, 0x0c, 0xf7, 0xfa, }, 6, 0, "", "",
+"62 f4 7d 0c f7 fa \t{nf} idiv %dx",},
+{{0x62, 0xf4, 0x6c, 0x1c, 0xfe, 0xcb, }, 6, 0, "", "",
+"62 f4 6c 1c fe cb \t{nf} dec %bl,%dl",},
+{{0x62, 0xf4, 0x7d, 0x1c, 0xff, 0xca, }, 6, 0, "", "",
+"62 f4 7d 1c ff ca \t{nf} dec %dx,%ax",},
+{{0xf3, 0x0f, 0x38, 0xdc, 0xd1, }, 5, 0, "", "",
+"f3 0f 38 dc d1 \tloadiwkey %xmm1,%xmm2",},
+{{0xf3, 0x0f, 0x38, 0xfa, 0xd0, }, 5, 0, "", "",
+"f3 0f 38 fa d0 \tencodekey128 %eax,%edx",},
+{{0xf3, 0x0f, 0x38, 0xfb, 0xd0, }, 5, 0, "", "",
+"f3 0f 38 fb d0 \tencodekey256 %eax,%edx",},
+{{0x67, 0xf3, 0x0f, 0x38, 0xdc, 0x5a, 0x77, }, 7, 0, "", "",
+"67 f3 0f 38 dc 5a 77 \taesenc128kl 0x77(%edx),%xmm3",},
+{{0x67, 0xf3, 0x0f, 0x38, 0xde, 0x5a, 0x77, }, 7, 0, "", "",
+"67 f3 0f 38 de 5a 77 \taesenc256kl 0x77(%edx),%xmm3",},
+{{0x67, 0xf3, 0x0f, 0x38, 0xdd, 0x5a, 0x77, }, 7, 0, "", "",
+"67 f3 0f 38 dd 5a 77 \taesdec128kl 0x77(%edx),%xmm3",},
+{{0x67, 0xf3, 0x0f, 0x38, 0xdf, 0x5a, 0x77, }, 7, 0, "", "",
+"67 f3 0f 38 df 5a 77 \taesdec256kl 0x77(%edx),%xmm3",},
+{{0x67, 0xf3, 0x0f, 0x38, 0xd8, 0x42, 0x77, }, 7, 0, "", "",
+"67 f3 0f 38 d8 42 77 \taesencwide128kl 0x77(%edx)",},
+{{0x67, 0xf3, 0x0f, 0x38, 0xd8, 0x52, 0x77, }, 7, 0, "", "",
+"67 f3 0f 38 d8 52 77 \taesencwide256kl 0x77(%edx)",},
+{{0x67, 0xf3, 0x0f, 0x38, 0xd8, 0x4a, 0x77, }, 7, 0, "", "",
+"67 f3 0f 38 d8 4a 77 \taesdecwide128kl 0x77(%edx)",},
+{{0x67, 0xf3, 0x0f, 0x38, 0xd8, 0x5a, 0x77, }, 7, 0, "", "",
+"67 f3 0f 38 d8 5a 77 \taesdecwide256kl 0x77(%edx)",},
+{{0x67, 0x0f, 0x38, 0xfc, 0x08, }, 5, 0, "", "",
+"67 0f 38 fc 08 \taadd %ecx,(%eax)",},
+{{0x0f, 0x38, 0xfc, 0x14, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 fc 14 25 78 56 34 12 \taadd %edx,0x12345678",},
+{{0x67, 0x0f, 0x38, 0xfc, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"67 0f 38 fc 94 c8 78 56 34 12 \taadd %edx,0x12345678(%eax,%ecx,8)",},
+{{0x67, 0x66, 0x0f, 0x38, 0xfc, 0x08, }, 6, 0, "", "",
+"67 66 0f 38 fc 08 \taand %ecx,(%eax)",},
+{{0x66, 0x0f, 0x38, 0xfc, 0x14, 0x25, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"66 0f 38 fc 14 25 78 56 34 12 \taand %edx,0x12345678",},
+{{0x67, 0x66, 0x0f, 0x38, 0xfc, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"67 66 0f 38 fc 94 c8 78 56 34 12 \taand %edx,0x12345678(%eax,%ecx,8)",},
+{{0x67, 0xf2, 0x0f, 0x38, 0xfc, 0x08, }, 6, 0, "", "",
+"67 f2 0f 38 fc 08 \taor %ecx,(%eax)",},
+{{0xf2, 0x0f, 0x38, 0xfc, 0x14, 0x25, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"f2 0f 38 fc 14 25 78 56 34 12 \taor %edx,0x12345678",},
+{{0x67, 0xf2, 0x0f, 0x38, 0xfc, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"67 f2 0f 38 fc 94 c8 78 56 34 12 \taor %edx,0x12345678(%eax,%ecx,8)",},
+{{0x67, 0xf3, 0x0f, 0x38, 0xfc, 0x08, }, 6, 0, "", "",
+"67 f3 0f 38 fc 08 \taxor %ecx,(%eax)",},
+{{0xf3, 0x0f, 0x38, 0xfc, 0x14, 0x25, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"f3 0f 38 fc 14 25 78 56 34 12 \taxor %edx,0x12345678",},
+{{0x67, 0xf3, 0x0f, 0x38, 0xfc, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"67 f3 0f 38 fc 94 c8 78 56 34 12 \taxor %edx,0x12345678(%eax,%ecx,8)",},
+{{0x67, 0xc4, 0xe2, 0x7a, 0xb1, 0x31, }, 6, 0, "", "",
+"67 c4 e2 7a b1 31 \tvbcstnebf162ps (%ecx),%xmm6",},
+{{0x67, 0xc4, 0xe2, 0x79, 0xb1, 0x31, }, 6, 0, "", "",
+"67 c4 e2 79 b1 31 \tvbcstnesh2ps (%ecx),%xmm6",},
+{{0x67, 0xc4, 0xe2, 0x7a, 0xb0, 0x31, }, 6, 0, "", "",
+"67 c4 e2 7a b0 31 \tvcvtneebf162ps (%ecx),%xmm6",},
+{{0x67, 0xc4, 0xe2, 0x79, 0xb0, 0x31, }, 6, 0, "", "",
+"67 c4 e2 79 b0 31 \tvcvtneeph2ps (%ecx),%xmm6",},
+{{0x67, 0xc4, 0xe2, 0x7b, 0xb0, 0x31, }, 6, 0, "", "",
+"67 c4 e2 7b b0 31 \tvcvtneobf162ps (%ecx),%xmm6",},
+{{0x67, 0xc4, 0xe2, 0x78, 0xb0, 0x31, }, 6, 0, "", "",
+"67 c4 e2 78 b0 31 \tvcvtneoph2ps (%ecx),%xmm6",},
+{{0x62, 0xf2, 0x7e, 0x08, 0x72, 0xf1, }, 6, 0, "", "",
+"62 f2 7e 08 72 f1 \tvcvtneps2bf16 %xmm1,%xmm6",},
+{{0xc4, 0xe2, 0x6b, 0x50, 0xd9, }, 5, 0, "", "",
+"c4 e2 6b 50 d9 \tvpdpbssd %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x6b, 0x51, 0xd9, }, 5, 0, "", "",
+"c4 e2 6b 51 d9 \tvpdpbssds %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x6a, 0x50, 0xd9, }, 5, 0, "", "",
+"c4 e2 6a 50 d9 \tvpdpbsud %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x6a, 0x51, 0xd9, }, 5, 0, "", "",
+"c4 e2 6a 51 d9 \tvpdpbsuds %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x68, 0x50, 0xd9, }, 5, 0, "", "",
+"c4 e2 68 50 d9 \tvpdpbuud %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x68, 0x51, 0xd9, }, 5, 0, "", "",
+"c4 e2 68 51 d9 \tvpdpbuuds %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x6a, 0xd2, 0xd9, }, 5, 0, "", "",
+"c4 e2 6a d2 d9 \tvpdpwsud %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x6a, 0xd3, 0xd9, }, 5, 0, "", "",
+"c4 e2 6a d3 d9 \tvpdpwsuds %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x69, 0xd2, 0xd9, }, 5, 0, "", "",
+"c4 e2 69 d2 d9 \tvpdpwusd %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x69, 0xd3, 0xd9, }, 5, 0, "", "",
+"c4 e2 69 d3 d9 \tvpdpwusds %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x68, 0xd2, 0xd9, }, 5, 0, "", "",
+"c4 e2 68 d2 d9 \tvpdpwuud %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x68, 0xd3, 0xd9, }, 5, 0, "", "",
+"c4 e2 68 d3 d9 \tvpdpwuuds %xmm1,%xmm2,%xmm3",},
+{{0x62, 0xf2, 0xed, 0x08, 0xb5, 0xd9, }, 6, 0, "", "",
+"62 f2 ed 08 b5 d9 \tvpmadd52huq %xmm1,%xmm2,%xmm3",},
+{{0x62, 0xf2, 0xed, 0x08, 0xb4, 0xd9, }, 6, 0, "", "",
+"62 f2 ed 08 b4 d9 \tvpmadd52luq %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x7f, 0xcc, 0xd1, }, 5, 0, "", "",
+"c4 e2 7f cc d1 \tvsha512msg1 %xmm1,%ymm2",},
+{{0xc4, 0xe2, 0x7f, 0xcd, 0xd1, }, 5, 0, "", "",
+"c4 e2 7f cd d1 \tvsha512msg2 %ymm1,%ymm2",},
+{{0xc4, 0xe2, 0x6f, 0xcb, 0xd9, }, 5, 0, "", "",
+"c4 e2 6f cb d9 \tvsha512rnds2 %xmm1,%ymm2,%ymm3",},
+{{0xc4, 0xe2, 0x68, 0xda, 0xd9, }, 5, 0, "", "",
+"c4 e2 68 da d9 \tvsm3msg1 %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x69, 0xda, 0xd9, }, 5, 0, "", "",
+"c4 e2 69 da d9 \tvsm3msg2 %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe3, 0x69, 0xde, 0xd9, 0xa1, }, 6, 0, "", "",
+"c4 e3 69 de d9 a1 \tvsm3rnds2 $0xa1,%xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x6a, 0xda, 0xd9, }, 5, 0, "", "",
+"c4 e2 6a da d9 \tvsm4key4 %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x6b, 0xda, 0xd9, }, 5, 0, "", "",
+"c4 e2 6b da d9 \tvsm4rnds4 %xmm1,%xmm2,%xmm3",},
+{{0x67, 0x0f, 0x0d, 0x00, }, 4, 0, "", "",
+"67 0f 0d 00 \tprefetch (%eax)",},
+{{0x67, 0x0f, 0x18, 0x08, }, 4, 0, "", "",
+"67 0f 18 08 \tprefetcht0 (%eax)",},
+{{0x67, 0x0f, 0x18, 0x10, }, 4, 0, "", "",
+"67 0f 18 10 \tprefetcht1 (%eax)",},
+{{0x67, 0x0f, 0x18, 0x18, }, 4, 0, "", "",
+"67 0f 18 18 \tprefetcht2 (%eax)",},
+{{0x67, 0x0f, 0x18, 0x00, }, 4, 0, "", "",
+"67 0f 18 00 \tprefetchnta (%eax)",},
+{{0x0f, 0x01, 0xc6, }, 3, 0, "", "",
+"0f 01 c6 \twrmsrns",},
{{0xf3, 0x0f, 0x3a, 0xf0, 0xc0, 0x00, }, 6, 0, "", "",
"f3 0f 3a f0 c0 00 \threset $0x0",},
{{0x0f, 0x01, 0xe8, }, 3, 0, "", "",
diff --git a/tools/perf/arch/x86/tests/insn-x86-dat-src.c b/tools/perf/arch/x86/tests/insn-x86-dat-src.c
index a391464c8dee..f55505c75d51 100644
--- a/tools/perf/arch/x86/tests/insn-x86-dat-src.c
+++ b/tools/perf/arch/x86/tests/insn-x86-dat-src.c
@@ -2628,6 +2628,512 @@ int main(void)
asm volatile("vucomish 0x12345678(%rax,%rcx,8), %xmm1");
asm volatile("vucomish 0x12345678(%eax,%ecx,8), %xmm1");
+ /* Key Locker */
+
+ asm volatile("loadiwkey %xmm1, %xmm2");
+ asm volatile("encodekey128 %eax, %edx");
+ asm volatile("encodekey256 %eax, %edx");
+ asm volatile("aesenc128kl 0x77(%rdx), %xmm3");
+ asm volatile("aesenc256kl 0x77(%rdx), %xmm3");
+ asm volatile("aesdec128kl 0x77(%rdx), %xmm3");
+ asm volatile("aesdec256kl 0x77(%rdx), %xmm3");
+ asm volatile("aesencwide128kl 0x77(%rdx)");
+ asm volatile("aesencwide256kl 0x77(%rdx)");
+ asm volatile("aesdecwide128kl 0x77(%rdx)");
+ asm volatile("aesdecwide256kl 0x77(%rdx)");
+
+ /* Remote Atomic Operations */
+
+ asm volatile("aadd %ecx,(%rax)");
+ asm volatile("aadd %edx,(%r8)");
+ asm volatile("aadd %edx,0x12345678(%rax,%rcx,8)");
+ asm volatile("aadd %edx,0x12345678(%r8,%rcx,8)");
+ asm volatile("aadd %rcx,(%rax)");
+ asm volatile("aadd %rdx,(%r8)");
+ asm volatile("aadd %rdx,(0x12345678)");
+ asm volatile("aadd %rdx,0x12345678(%rax,%rcx,8)");
+ asm volatile("aadd %rdx,0x12345678(%r8,%rcx,8)");
+
+ asm volatile("aand %ecx,(%rax)");
+ asm volatile("aand %edx,(%r8)");
+ asm volatile("aand %edx,0x12345678(%rax,%rcx,8)");
+ asm volatile("aand %edx,0x12345678(%r8,%rcx,8)");
+ asm volatile("aand %rcx,(%rax)");
+ asm volatile("aand %rdx,(%r8)");
+ asm volatile("aand %rdx,(0x12345678)");
+ asm volatile("aand %rdx,0x12345678(%rax,%rcx,8)");
+ asm volatile("aand %rdx,0x12345678(%r8,%rcx,8)");
+
+ asm volatile("aor %ecx,(%rax)");
+ asm volatile("aor %edx,(%r8)");
+ asm volatile("aor %edx,0x12345678(%rax,%rcx,8)");
+ asm volatile("aor %edx,0x12345678(%r8,%rcx,8)");
+ asm volatile("aor %rcx,(%rax)");
+ asm volatile("aor %rdx,(%r8)");
+ asm volatile("aor %rdx,(0x12345678)");
+ asm volatile("aor %rdx,0x12345678(%rax,%rcx,8)");
+ asm volatile("aor %rdx,0x12345678(%r8,%rcx,8)");
+
+ asm volatile("axor %ecx,(%rax)");
+ asm volatile("axor %edx,(%r8)");
+ asm volatile("axor %edx,0x12345678(%rax,%rcx,8)");
+ asm volatile("axor %edx,0x12345678(%r8,%rcx,8)");
+ asm volatile("axor %rcx,(%rax)");
+ asm volatile("axor %rdx,(%r8)");
+ asm volatile("axor %rdx,(0x12345678)");
+ asm volatile("axor %rdx,0x12345678(%rax,%rcx,8)");
+ asm volatile("axor %rdx,0x12345678(%r8,%rcx,8)");
+
+ /* VEX CMPxxXADD */
+
+ asm volatile("cmpbexadd %ebx,%ecx,(%r9)");
+ asm volatile("cmpbxadd %ebx,%ecx,(%r9)");
+ asm volatile("cmplexadd %ebx,%ecx,(%r9)");
+ asm volatile("cmplxadd %ebx,%ecx,(%r9)");
+ asm volatile("cmpnbexadd %ebx,%ecx,(%r9)");
+ asm volatile("cmpnbxadd %ebx,%ecx,(%r9)");
+ asm volatile("cmpnlexadd %ebx,%ecx,(%r9)");
+ asm volatile("cmpnlxadd %ebx,%ecx,(%r9)");
+ asm volatile("cmpnoxadd %ebx,%ecx,(%r9)");
+ asm volatile("cmpnpxadd %ebx,%ecx,(%r9)");
+ asm volatile("cmpnsxadd %ebx,%ecx,(%r9)");
+ asm volatile("cmpnzxadd %ebx,%ecx,(%r9)");
+ asm volatile("cmpoxadd %ebx,%ecx,(%r9)");
+ asm volatile("cmppxadd %ebx,%ecx,(%r9)");
+ asm volatile("cmpsxadd %ebx,%ecx,(%r9)");
+ asm volatile("cmpzxadd %ebx,%ecx,(%r9)");
+
+ /* Pre-fetch */
+
+ asm volatile("prefetch (%rax)");
+ asm volatile("prefetcht0 (%rax)");
+ asm volatile("prefetcht1 (%rax)");
+ asm volatile("prefetcht2 (%rax)");
+ asm volatile("prefetchnta (%rax)");
+ asm volatile("prefetchit0 0x12345678(%rip)");
+ asm volatile("prefetchit1 0x12345678(%rip)");
+
+ /* MSR List */
+
+ asm volatile("rdmsrlist");
+ asm volatile("wrmsrlist");
+
+ /* User Read/Write MSR */
+
+ asm volatile("urdmsr %rdx,%rax");
+ asm volatile("urdmsr %rdx,%r22");
+ asm volatile("urdmsr $0x7f,%r12");
+ asm volatile("uwrmsr %rax,%rdx");
+ asm volatile("uwrmsr %r22,%rdx");
+ asm volatile("uwrmsr %r12,$0x7f");
+
+ /* AVX NE Convert */
+
+ asm volatile("vbcstnebf162ps (%rcx),%xmm6");
+ asm volatile("vbcstnesh2ps (%rcx),%xmm6");
+ asm volatile("vcvtneebf162ps (%rcx),%xmm6");
+ asm volatile("vcvtneeph2ps (%rcx),%xmm6");
+ asm volatile("vcvtneobf162ps (%rcx),%xmm6");
+ asm volatile("vcvtneoph2ps (%rcx),%xmm6");
+ asm volatile("vcvtneps2bf16 %xmm1,%xmm6");
+
+ /* FRED */
+
+ asm volatile("erets"); /* Expecting: erets indirect 0 */
+ asm volatile("eretu"); /* Expecting: eretu indirect 0 */
+
+ /* AMX Complex */
+
+ asm volatile("tcmmimfp16ps %tmm1,%tmm2,%tmm3");
+ asm volatile("tcmmrlfp16ps %tmm1,%tmm2,%tmm3");
+
+ /* AMX FP16 */
+
+ asm volatile("tdpfp16ps %tmm1,%tmm2,%tmm3");
+
+ /* REX2 */
+
+ asm volatile("test $0x5, %r18b");
+ asm volatile("test $0x5, %r18d");
+ asm volatile("test $0x5, %r18");
+ asm volatile("test $0x5, %r18w");
+ asm volatile("imull %eax, %r14d");
+ asm volatile("imull %eax, %r17d");
+ asm volatile("punpckldq (%r18), %mm2");
+ asm volatile("leal (%rax), %r16d");
+ asm volatile("leal (%rax), %r31d");
+ asm volatile("leal (,%r16), %eax");
+ asm volatile("leal (,%r31), %eax");
+ asm volatile("leal (%r16), %eax");
+ asm volatile("leal (%r31), %eax");
+ asm volatile("leaq (%rax), %r15");
+ asm volatile("leaq (%rax), %r16");
+ asm volatile("leaq (%r15), %rax");
+ asm volatile("leaq (%r16), %rax");
+ asm volatile("leaq (,%r15), %rax");
+ asm volatile("leaq (,%r16), %rax");
+ asm volatile("add (%r16), %r8");
+ asm volatile("add (%r16), %r15");
+ asm volatile("mov (,%r9), %r16");
+ asm volatile("mov (,%r14), %r16");
+ asm volatile("sub (%r10), %r31");
+ asm volatile("sub (%r13), %r31");
+ asm volatile("leal 1(%r16, %r21), %eax");
+ asm volatile("leal 1(%r16, %r26), %r31d");
+ asm volatile("leal 129(%r21, %r9), %eax");
+ asm volatile("leal 129(%r26, %r9), %r31d");
+ /*
+ * Have to use .byte for jmpabs because gas does not support the
+ * mnemonic for some reason, but then it also gets the source line wrong
+ * with .byte, so the following is a workaround.
+ */
+ asm volatile(""); /* Expecting: jmp indirect 0 */
+ asm volatile(".byte 0xd5, 0x00, 0xa1, 0xef, 0xcd, 0xab, 0x90, 0x78, 0x56, 0x34, 0x12");
+ asm volatile("pushp %rbx");
+ asm volatile("pushp %r16");
+ asm volatile("pushp %r31");
+ asm volatile("popp %r31");
+ asm volatile("popp %r16");
+ asm volatile("popp %rbx");
+
+ /* APX */
+
+ asm volatile("bextr %r25d,%edx,%r10d");
+ asm volatile("bextr %r25d,0x123(%r31,%rax,4),%edx");
+ asm volatile("bextr %r31,%r15,%r11");
+ asm volatile("bextr %r31,0x123(%r31,%rax,4),%r15");
+ asm volatile("blsi %r25d,%edx");
+ asm volatile("blsi %r31,%r15");
+ asm volatile("blsi 0x123(%r31,%rax,4),%r25d");
+ asm volatile("blsi 0x123(%r31,%rax,4),%r31");
+ asm volatile("blsmsk %r25d,%edx");
+ asm volatile("blsmsk %r31,%r15");
+ asm volatile("blsmsk 0x123(%r31,%rax,4),%r25d");
+ asm volatile("blsmsk 0x123(%r31,%rax,4),%r31");
+ asm volatile("blsr %r25d,%edx");
+ asm volatile("blsr %r31,%r15");
+ asm volatile("blsr 0x123(%r31,%rax,4),%r25d");
+ asm volatile("blsr 0x123(%r31,%rax,4),%r31");
+ asm volatile("bzhi %r25d,%edx,%r10d");
+ asm volatile("bzhi %r25d,0x123(%r31,%rax,4),%edx");
+ asm volatile("bzhi %r31,%r15,%r11");
+ asm volatile("bzhi %r31,0x123(%r31,%rax,4),%r15");
+ asm volatile("cmpbexadd %r25d,%edx,0x123(%r31,%rax,4)");
+ asm volatile("cmpbexadd %r31,%r15,0x123(%r31,%rax,4)");
+ asm volatile("cmpbxadd %r25d,%edx,0x123(%r31,%rax,4)");
+ asm volatile("cmpbxadd %r31,%r15,0x123(%r31,%rax,4)");
+ asm volatile("cmplxadd %r25d,%edx,0x123(%r31,%rax,4)");
+ asm volatile("cmplxadd %r31,%r15,0x123(%r31,%rax,4)");
+ asm volatile("cmpnbexadd %r25d,%edx,0x123(%r31,%rax,4)");
+ asm volatile("cmpnbexadd %r31,%r15,0x123(%r31,%rax,4)");
+ asm volatile("cmpnbxadd %r25d,%edx,0x123(%r31,%rax,4)");
+ asm volatile("cmpnbxadd %r31,%r15,0x123(%r31,%rax,4)");
+ asm volatile("cmpnlexadd %r25d,%edx,0x123(%r31,%rax,4)");
+ asm volatile("cmpnlexadd %r31,%r15,0x123(%r31,%rax,4)");
+ asm volatile("cmpnlxadd %r25d,%edx,0x123(%r31,%rax,4)");
+ asm volatile("cmpnlxadd %r31,%r15,0x123(%r31,%rax,4)");
+ asm volatile("cmpnoxadd %r25d,%edx,0x123(%r31,%rax,4)");
+ asm volatile("cmpnoxadd %r31,%r15,0x123(%r31,%rax,4)");
+ asm volatile("cmpnpxadd %r25d,%edx,0x123(%r31,%rax,4)");
+ asm volatile("cmpnpxadd %r31,%r15,0x123(%r31,%rax,4)");
+ asm volatile("cmpnsxadd %r25d,%edx,0x123(%r31,%rax,4)");
+ asm volatile("cmpnsxadd %r31,%r15,0x123(%r31,%rax,4)");
+ asm volatile("cmpnzxadd %r25d,%edx,0x123(%r31,%rax,4)");
+ asm volatile("cmpnzxadd %r31,%r15,0x123(%r31,%rax,4)");
+ asm volatile("cmpoxadd %r25d,%edx,0x123(%r31,%rax,4)");
+ asm volatile("cmpoxadd %r31,%r15,0x123(%r31,%rax,4)");
+ asm volatile("cmppxadd %r25d,%edx,0x123(%r31,%rax,4)");
+ asm volatile("cmppxadd %r31,%r15,0x123(%r31,%rax,4)");
+ asm volatile("cmpsxadd %r25d,%edx,0x123(%r31,%rax,4)");
+ asm volatile("cmpsxadd %r31,%r15,0x123(%r31,%rax,4)");
+ asm volatile("cmpzxadd %r25d,%edx,0x123(%r31,%rax,4)");
+ asm volatile("cmpzxadd %r31,%r15,0x123(%r31,%rax,4)");
+ asm volatile("crc32q %r31, %r22");
+ asm volatile("crc32q (%r31), %r22");
+ asm volatile("crc32b %r19b, %r17");
+ asm volatile("crc32b %r19b, %r21d");
+ asm volatile("crc32b (%r19),%ebx");
+ asm volatile("crc32l %r31d, %r23d");
+ asm volatile("crc32l (%r31), %r23d");
+ asm volatile("crc32w %r31w, %r21d");
+ asm volatile("crc32w (%r31),%r21d");
+ asm volatile("crc32 %rax, %r18");
+ asm volatile("enqcmd 0x123(%r31d,%eax,4),%r25d");
+ asm volatile("enqcmd 0x123(%r31,%rax,4),%r31");
+ asm volatile("enqcmds 0x123(%r31d,%eax,4),%r25d");
+ asm volatile("enqcmds 0x123(%r31,%rax,4),%r31");
+ asm volatile("invept 0x123(%r31,%rax,4),%r31");
+ asm volatile("invpcid 0x123(%r31,%rax,4),%r31");
+ asm volatile("invvpid 0x123(%r31,%rax,4),%r31");
+ asm volatile("kmovb %k5,%r25d");
+ asm volatile("kmovb %k5,0x123(%r31,%rax,4)");
+ asm volatile("kmovb %r25d,%k5");
+ asm volatile("kmovb 0x123(%r31,%rax,4),%k5");
+ asm volatile("kmovd %k5,%r25d");
+ asm volatile("kmovd %k5,0x123(%r31,%rax,4)");
+ asm volatile("kmovd %r25d,%k5");
+ asm volatile("kmovd 0x123(%r31,%rax,4),%k5");
+ asm volatile("kmovq %k5,%r31");
+ asm volatile("kmovq %k5,0x123(%r31,%rax,4)");
+ asm volatile("kmovq %r31,%k5");
+ asm volatile("kmovq 0x123(%r31,%rax,4),%k5");
+ asm volatile("kmovw %k5,%r25d");
+ asm volatile("kmovw %k5,0x123(%r31,%rax,4)");
+ asm volatile("kmovw %r25d,%k5");
+ asm volatile("kmovw 0x123(%r31,%rax,4),%k5");
+ asm volatile("ldtilecfg 0x123(%r31,%rax,4)");
+ asm volatile("movbe %r18w,%ax");
+ asm volatile("movbe %r15w,%ax");
+ asm volatile("movbe %r18w,0x123(%r16,%rax,4)");
+ asm volatile("movbe %r18w,0x123(%r31,%rax,4)");
+ asm volatile("movbe %r25d,%edx");
+ asm volatile("movbe %r15d,%edx");
+ asm volatile("movbe %r25d,0x123(%r16,%rax,4)");
+ asm volatile("movbe %r31,%r15");
+ asm volatile("movbe %r8,%r15");
+ asm volatile("movbe %r31,0x123(%r16,%rax,4)");
+ asm volatile("movbe %r31,0x123(%r31,%rax,4)");
+ asm volatile("movbe 0x123(%r16,%rax,4),%r31");
+ asm volatile("movbe 0x123(%r31,%rax,4),%r18w");
+ asm volatile("movbe 0x123(%r31,%rax,4),%r25d");
+ asm volatile("movdir64b 0x123(%r31d,%eax,4),%r25d");
+ asm volatile("movdir64b 0x123(%r31,%rax,4),%r31");
+ asm volatile("movdiri %r25d,0x123(%r31,%rax,4)");
+ asm volatile("movdiri %r31,0x123(%r31,%rax,4)");
+ asm volatile("pdep %r25d,%edx,%r10d");
+ asm volatile("pdep %r31,%r15,%r11");
+ asm volatile("pdep 0x123(%r31,%rax,4),%r25d,%edx");
+ asm volatile("pdep 0x123(%r31,%rax,4),%r31,%r15");
+ asm volatile("pext %r25d,%edx,%r10d");
+ asm volatile("pext %r31,%r15,%r11");
+ asm volatile("pext 0x123(%r31,%rax,4),%r25d,%edx");
+ asm volatile("pext 0x123(%r31,%rax,4),%r31,%r15");
+ asm volatile("shlx %r25d,%edx,%r10d");
+ asm volatile("shlx %r25d,0x123(%r31,%rax,4),%edx");
+ asm volatile("shlx %r31,%r15,%r11");
+ asm volatile("shlx %r31,0x123(%r31,%rax,4),%r15");
+ asm volatile("shrx %r25d,%edx,%r10d");
+ asm volatile("shrx %r25d,0x123(%r31,%rax,4),%edx");
+ asm volatile("shrx %r31,%r15,%r11");
+ asm volatile("shrx %r31,0x123(%r31,%rax,4),%r15");
+ asm volatile("sttilecfg 0x123(%r31,%rax,4)");
+ asm volatile("tileloadd 0x123(%r31,%rax,4),%tmm6");
+ asm volatile("tileloaddt1 0x123(%r31,%rax,4),%tmm6");
+ asm volatile("tilestored %tmm6,0x123(%r31,%rax,4)");
+ asm volatile("vbroadcastf128 (%r16),%ymm3");
+ asm volatile("vbroadcasti128 (%r16),%ymm3");
+ asm volatile("vextractf128 $1,%ymm3,(%r16)");
+ asm volatile("vextracti128 $1,%ymm3,(%r16)");
+ asm volatile("vinsertf128 $1,(%r16),%ymm3,%ymm8");
+ asm volatile("vinserti128 $1,(%r16),%ymm3,%ymm8");
+ asm volatile("vroundpd $1,(%r24),%xmm6");
+ asm volatile("vroundps $2,(%r24),%xmm6");
+ asm volatile("vroundsd $3,(%r24),%xmm6,%xmm3");
+ asm volatile("vroundss $4,(%r24),%xmm6,%xmm3");
+ asm volatile("wrssd %r25d,0x123(%r31,%rax,4)");
+ asm volatile("wrssq %r31,0x123(%r31,%rax,4)");
+ asm volatile("wrussd %r25d,0x123(%r31,%rax,4)");
+ asm volatile("wrussq %r31,0x123(%r31,%rax,4)");
+
+ /* APX new data destination */
+
+ asm volatile("adc $0x1234,%ax,%r30w");
+ asm volatile("adc %r15b,%r17b,%r18b");
+ asm volatile("adc %r15d,(%r8),%r18d");
+ asm volatile("adc (%r15,%rax,1),%r16b,%r8b");
+ asm volatile("adc (%r15,%rax,1),%r16w,%r8w");
+ asm volatile("adcl $0x11,(%r19,%rax,4),%r20d");
+ asm volatile("adcx %r15d,%r8d,%r18d");
+ asm volatile("adcx (%r15,%r31,1),%r8");
+ asm volatile("adcx (%r15,%r31,1),%r8d,%r18d");
+ asm volatile("add $0x1234,%ax,%r30w");
+ asm volatile("add $0x12344433,%r15,%r16");
+ asm volatile("add $0x34,%r13b,%r17b");
+ asm volatile("add $0xfffffffff4332211,%rax,%r8");
+ asm volatile("add %r31,%r8,%r16");
+ asm volatile("add %r31,(%r8),%r16");
+ asm volatile("add %r31,(%r8,%r16,8),%r16");
+ asm volatile("add %r31b,%r8b,%r16b");
+ asm volatile("add %r31d,%r8d,%r16d");
+ asm volatile("add %r31w,%r8w,%r16w");
+ asm volatile("add (%r31),%r8,%r16");
+ asm volatile("add 0x9090(%r31,%r16,1),%r8,%r16");
+ asm volatile("addb %r31b,%r8b,%r16b");
+ asm volatile("addl %r31d,%r8d,%r16d");
+ asm volatile("addl $0x11,(%r19,%rax,4),%r20d");
+ asm volatile("addq %r31,%r8,%r16");
+ asm volatile("addq $0x12344433,(%r15,%rcx,4),%r16");
+ asm volatile("addw %r31w,%r8w,%r16w");
+ asm volatile("adox %r15d,%r8d,%r18d");
+ asm volatile("{load} add %r31,%r8,%r16");
+ asm volatile("{store} add %r31,%r8,%r16");
+ asm volatile("adox (%r15,%r31,1),%r8");
+ asm volatile("adox (%r15,%r31,1),%r8d,%r18d");
+ asm volatile("and $0x1234,%ax,%r30w");
+ asm volatile("and %r15b,%r17b,%r18b");
+ asm volatile("and %r15d,(%r8),%r18d");
+ asm volatile("and (%r15,%rax,1),%r16b,%r8b");
+ asm volatile("and (%r15,%rax,1),%r16w,%r8w");
+ asm volatile("andl $0x11,(%r19,%rax,4),%r20d");
+ asm volatile("cmova 0x90909090(%eax),%edx,%r8d");
+ asm volatile("cmovae 0x90909090(%eax),%edx,%r8d");
+ asm volatile("cmovb 0x90909090(%eax),%edx,%r8d");
+ asm volatile("cmovbe 0x90909090(%eax),%edx,%r8d");
+ asm volatile("cmove 0x90909090(%eax),%edx,%r8d");
+ asm volatile("cmovg 0x90909090(%eax),%edx,%r8d");
+ asm volatile("cmovge 0x90909090(%eax),%edx,%r8d");
+ asm volatile("cmovl 0x90909090(%eax),%edx,%r8d");
+ asm volatile("cmovle 0x90909090(%eax),%edx,%r8d");
+ asm volatile("cmovne 0x90909090(%eax),%edx,%r8d");
+ asm volatile("cmovno 0x90909090(%eax),%edx,%r8d");
+ asm volatile("cmovnp 0x90909090(%eax),%edx,%r8d");
+ asm volatile("cmovns 0x90909090(%eax),%edx,%r8d");
+ asm volatile("cmovo 0x90909090(%eax),%edx,%r8d");
+ asm volatile("cmovp 0x90909090(%eax),%edx,%r8d");
+ asm volatile("cmovs 0x90909090(%eax),%edx,%r8d");
+ asm volatile("dec %rax,%r17");
+ asm volatile("decb (%r31,%r12,1),%r8b");
+ asm volatile("imul 0x909(%rax,%r31,8),%rdx,%r25");
+ asm volatile("imul 0x90909(%eax),%edx,%r8d");
+ asm volatile("inc %r31,%r16");
+ asm volatile("inc %r31,%r8");
+ asm volatile("inc %rax,%rbx");
+ asm volatile("neg %rax,%r17");
+ asm volatile("negb (%r31,%r12,1),%r8b");
+ asm volatile("not %rax,%r17");
+ asm volatile("notb (%r31,%r12,1),%r8b");
+ asm volatile("or $0x1234,%ax,%r30w");
+ asm volatile("or %r15b,%r17b,%r18b");
+ asm volatile("or %r15d,(%r8),%r18d");
+ asm volatile("or (%r15,%rax,1),%r16b,%r8b");
+ asm volatile("or (%r15,%rax,1),%r16w,%r8w");
+ asm volatile("orl $0x11,(%r19,%rax,4),%r20d");
+ asm volatile("rcl $0x2,%r12b,%r31b");
+ asm volatile("rcl %cl,%r16b,%r8b");
+ asm volatile("rclb $0x1,(%rax),%r31b");
+ asm volatile("rcll $0x2,(%rax),%r31d");
+ asm volatile("rclw $0x1,(%rax),%r31w");
+ asm volatile("rclw %cl,(%r19,%rax,4),%r31w");
+ asm volatile("rcr $0x2,%r12b,%r31b");
+ asm volatile("rcr %cl,%r16b,%r8b");
+ asm volatile("rcrb $0x1,(%rax),%r31b");
+ asm volatile("rcrl $0x2,(%rax),%r31d");
+ asm volatile("rcrw $0x1,(%rax),%r31w");
+ asm volatile("rcrw %cl,(%r19,%rax,4),%r31w");
+ asm volatile("rol $0x2,%r12b,%r31b");
+ asm volatile("rol %cl,%r16b,%r8b");
+ asm volatile("rolb $0x1,(%rax),%r31b");
+ asm volatile("roll $0x2,(%rax),%r31d");
+ asm volatile("rolw $0x1,(%rax),%r31w");
+ asm volatile("rolw %cl,(%r19,%rax,4),%r31w");
+ asm volatile("ror $0x2,%r12b,%r31b");
+ asm volatile("ror %cl,%r16b,%r8b");
+ asm volatile("rorb $0x1,(%rax),%r31b");
+ asm volatile("rorl $0x2,(%rax),%r31d");
+ asm volatile("rorw $0x1,(%rax),%r31w");
+ asm volatile("rorw %cl,(%r19,%rax,4),%r31w");
+ asm volatile("sar $0x2,%r12b,%r31b");
+ asm volatile("sar %cl,%r16b,%r8b");
+ asm volatile("sarb $0x1,(%rax),%r31b");
+ asm volatile("sarl $0x2,(%rax),%r31d");
+ asm volatile("sarw $0x1,(%rax),%r31w");
+ asm volatile("sarw %cl,(%r19,%rax,4),%r31w");
+ asm volatile("sbb $0x1234,%ax,%r30w");
+ asm volatile("sbb %r15b,%r17b,%r18b");
+ asm volatile("sbb %r15d,(%r8),%r18d");
+ asm volatile("sbb (%r15,%rax,1),%r16b,%r8b");
+ asm volatile("sbb (%r15,%rax,1),%r16w,%r8w");
+ asm volatile("sbbl $0x11,(%r19,%rax,4),%r20d");
+ asm volatile("shl $0x2,%r12b,%r31b");
+ asm volatile("shl $0x2,%r12b,%r31b");
+ asm volatile("shl %cl,%r16b,%r8b");
+ asm volatile("shl %cl,%r16b,%r8b");
+ asm volatile("shlb $0x1,(%rax),%r31b");
+ asm volatile("shlb $0x1,(%rax),%r31b");
+ asm volatile("shld $0x1,%r12,(%rax),%r31");
+ asm volatile("shld $0x2,%r15d,(%rax),%r31d");
+ asm volatile("shld $0x2,%r8w,%r12w,%r31w");
+ asm volatile("shld %cl,%r12,%r16,%r8");
+ asm volatile("shld %cl,%r13w,(%r19,%rax,4),%r31w");
+ asm volatile("shld %cl,%r9w,(%rax),%r31w");
+ asm volatile("shll $0x2,(%rax),%r31d");
+ asm volatile("shll $0x2,(%rax),%r31d");
+ asm volatile("shlw $0x1,(%rax),%r31w");
+ asm volatile("shlw $0x1,(%rax),%r31w");
+ asm volatile("shlw %cl,(%r19,%rax,4),%r31w");
+ asm volatile("shlw %cl,(%r19,%rax,4),%r31w");
+ asm volatile("shr $0x2,%r12b,%r31b");
+ asm volatile("shr %cl,%r16b,%r8b");
+ asm volatile("shrb $0x1,(%rax),%r31b");
+ asm volatile("shrd $0x1,%r12,(%rax),%r31");
+ asm volatile("shrd $0x2,%r15d,(%rax),%r31d");
+ asm volatile("shrd $0x2,%r8w,%r12w,%r31w");
+ asm volatile("shrd %cl,%r12,%r16,%r8");
+ asm volatile("shrd %cl,%r13w,(%r19,%rax,4),%r31w");
+ asm volatile("shrd %cl,%r9w,(%rax),%r31w");
+ asm volatile("shrl $0x2,(%rax),%r31d");
+ asm volatile("shrw $0x1,(%rax),%r31w");
+ asm volatile("shrw %cl,(%r19,%rax,4),%r31w");
+ asm volatile("sub $0x1234,%ax,%r30w");
+ asm volatile("sub %r15b,%r17b,%r18b");
+ asm volatile("sub %r15d,(%r8),%r18d");
+ asm volatile("sub (%r15,%rax,1),%r16b,%r8b");
+ asm volatile("sub (%r15,%rax,1),%r16w,%r8w");
+ asm volatile("subl $0x11,(%r19,%rax,4),%r20d");
+ asm volatile("xor $0x1234,%ax,%r30w");
+ asm volatile("xor %r15b,%r17b,%r18b");
+ asm volatile("xor %r15d,(%r8),%r18d");
+ asm volatile("xor (%r15,%rax,1),%r16b,%r8b");
+ asm volatile("xor (%r15,%rax,1),%r16w,%r8w");
+ asm volatile("xorl $0x11,(%r19,%rax,4),%r20d");
+
+ /* APX suppress status flags */
+
+ asm volatile("{nf} add %bl,%dl,%r8b");
+ asm volatile("{nf} add %dx,%ax,%r9w");
+ asm volatile("{nf} add 0x123(%r8,%rax,4),%bl,%dl");
+ asm volatile("{nf} add 0x123(%r8,%rax,4),%dx,%ax");
+ asm volatile("{nf} or %bl,%dl,%r8b");
+ asm volatile("{nf} or %dx,%ax,%r9w");
+ asm volatile("{nf} or 0x123(%r8,%rax,4),%bl,%dl");
+ asm volatile("{nf} or 0x123(%r8,%rax,4),%dx,%ax");
+ asm volatile("{nf} and %bl,%dl,%r8b");
+ asm volatile("{nf} and %dx,%ax,%r9w");
+ asm volatile("{nf} and 0x123(%r8,%rax,4),%bl,%dl");
+ asm volatile("{nf} and 0x123(%r8,%rax,4),%dx,%ax");
+ asm volatile("{nf} shld $0x7b,%dx,%ax,%r9w");
+ asm volatile("{nf} sub %bl,%dl,%r8b");
+ asm volatile("{nf} sub %dx,%ax,%r9w");
+ asm volatile("{nf} sub 0x123(%r8,%rax,4),%bl,%dl");
+ asm volatile("{nf} sub 0x123(%r8,%rax,4),%dx,%ax");
+ asm volatile("{nf} shrd $0x7b,%dx,%ax,%r9w");
+ asm volatile("{nf} xor %bl,%dl,%r8b");
+ asm volatile("{nf} xor %r31,%r31");
+ asm volatile("{nf} xor 0x123(%r8,%rax,4),%bl,%dl");
+ asm volatile("{nf} xor 0x123(%r8,%rax,4),%dx,%ax");
+ asm volatile("{nf} imul $0xff90,%r9,%r15");
+ asm volatile("{nf} imul $0x7b,%r9,%r15");
+ asm volatile("{nf} xor $0x7b,%bl,%dl");
+ asm volatile("{nf} xor $0x7b,%dx,%ax");
+ asm volatile("{nf} popcnt %r9,%r31");
+ asm volatile("{nf} shld %cl,%dx,%ax,%r9w");
+ asm volatile("{nf} shrd %cl,%dx,%ax,%r9w");
+ asm volatile("{nf} imul %r9,%r31,%r11");
+ asm volatile("{nf} sar $0x7b,%bl,%dl");
+ asm volatile("{nf} sar $0x7b,%dx,%ax");
+ asm volatile("{nf} sar $1,%bl,%dl");
+ asm volatile("{nf} sar $1,%dx,%ax");
+ asm volatile("{nf} sar %cl,%bl,%dl");
+ asm volatile("{nf} sar %cl,%dx,%ax");
+ asm volatile("{nf} andn %r9,%r31,%r11");
+ asm volatile("{nf} blsi %r9,%r31");
+ asm volatile("{nf} tzcnt %r9,%r31");
+ asm volatile("{nf} lzcnt %r9,%r31");
+ asm volatile("{nf} idiv %bl");
+ asm volatile("{nf} idiv %dx");
+ asm volatile("{nf} dec %bl,%dl");
+ asm volatile("{nf} dec %dx,%ax");
+
#else /* #ifdef __x86_64__ */
/* bound r32, mem (same op code as EVEX prefix) */
@@ -4848,6 +5354,97 @@ int main(void)
#endif /* #ifndef __x86_64__ */
+ /* Key Locker */
+
+ asm volatile(" loadiwkey %xmm1, %xmm2");
+ asm volatile(" encodekey128 %eax, %edx");
+ asm volatile(" encodekey256 %eax, %edx");
+ asm volatile(" aesenc128kl 0x77(%edx), %xmm3");
+ asm volatile(" aesenc256kl 0x77(%edx), %xmm3");
+ asm volatile(" aesdec128kl 0x77(%edx), %xmm3");
+ asm volatile(" aesdec256kl 0x77(%edx), %xmm3");
+ asm volatile(" aesencwide128kl 0x77(%edx)");
+ asm volatile(" aesencwide256kl 0x77(%edx)");
+ asm volatile(" aesdecwide128kl 0x77(%edx)");
+ asm volatile(" aesdecwide256kl 0x77(%edx)");
+
+ /* Remote Atomic Operations */
+
+ asm volatile("aadd %ecx,(%eax)");
+ asm volatile("aadd %edx,(0x12345678)");
+ asm volatile("aadd %edx,0x12345678(%eax,%ecx,8)");
+
+ asm volatile("aand %ecx,(%eax)");
+ asm volatile("aand %edx,(0x12345678)");
+ asm volatile("aand %edx,0x12345678(%eax,%ecx,8)");
+
+ asm volatile("aor %ecx,(%eax)");
+ asm volatile("aor %edx,(0x12345678)");
+ asm volatile("aor %edx,0x12345678(%eax,%ecx,8)");
+
+ asm volatile("axor %ecx,(%eax)");
+ asm volatile("axor %edx,(0x12345678)");
+ asm volatile("axor %edx,0x12345678(%eax,%ecx,8)");
+
+ /* AVX NE Convert */
+
+ asm volatile("vbcstnebf162ps (%ecx),%xmm6");
+ asm volatile("vbcstnesh2ps (%ecx),%xmm6");
+ asm volatile("vcvtneebf162ps (%ecx),%xmm6");
+ asm volatile("vcvtneeph2ps (%ecx),%xmm6");
+ asm volatile("vcvtneobf162ps (%ecx),%xmm6");
+ asm volatile("vcvtneoph2ps (%ecx),%xmm6");
+ asm volatile("vcvtneps2bf16 %xmm1,%xmm6");
+
+ /* AVX VNNI INT16 */
+
+ asm volatile("vpdpbssd %xmm1,%xmm2,%xmm3");
+ asm volatile("vpdpbssds %xmm1,%xmm2,%xmm3");
+ asm volatile("vpdpbsud %xmm1,%xmm2,%xmm3");
+ asm volatile("vpdpbsuds %xmm1,%xmm2,%xmm3");
+ asm volatile("vpdpbuud %xmm1,%xmm2,%xmm3");
+ asm volatile("vpdpbuuds %xmm1,%xmm2,%xmm3");
+ asm volatile("vpdpwsud %xmm1,%xmm2,%xmm3");
+ asm volatile("vpdpwsuds %xmm1,%xmm2,%xmm3");
+ asm volatile("vpdpwusd %xmm1,%xmm2,%xmm3");
+ asm volatile("vpdpwusds %xmm1,%xmm2,%xmm3");
+ asm volatile("vpdpwuud %xmm1,%xmm2,%xmm3");
+ asm volatile("vpdpwuuds %xmm1,%xmm2,%xmm3");
+
+ /* AVX IFMA */
+
+ asm volatile("vpmadd52huq %xmm1,%xmm2,%xmm3");
+ asm volatile("vpmadd52luq %xmm1,%xmm2,%xmm3");
+
+ /* AVX SHA512 */
+
+ asm volatile("vsha512msg1 %xmm1,%ymm2");
+ asm volatile("vsha512msg2 %ymm1,%ymm2");
+ asm volatile("vsha512rnds2 %xmm1,%ymm2,%ymm3");
+
+ /* AVX SM3 */
+
+ asm volatile("vsm3msg1 %xmm1,%xmm2,%xmm3");
+ asm volatile("vsm3msg2 %xmm1,%xmm2,%xmm3");
+ asm volatile("vsm3rnds2 $0xa1,%xmm1,%xmm2,%xmm3");
+
+ /* AVX SM4 */
+
+ asm volatile("vsm4key4 %xmm1,%xmm2,%xmm3");
+ asm volatile("vsm4rnds4 %xmm1,%xmm2,%xmm3");
+
+ /* Pre-fetch */
+
+ asm volatile("prefetch (%eax)");
+ asm volatile("prefetcht0 (%eax)");
+ asm volatile("prefetcht1 (%eax)");
+ asm volatile("prefetcht2 (%eax)");
+ asm volatile("prefetchnta (%eax)");
+
+ /* Non-serializing write MSR */
+
+ asm volatile("wrmsrns");
+
/* Prediction history reset */
asm volatile("hreset $0");
diff --git a/tools/perf/arch/x86/util/Build b/tools/perf/arch/x86/util/Build
index 005907cb97d8..2607ed5c4296 100644
--- a/tools/perf/arch/x86/util/Build
+++ b/tools/perf/arch/x86/util/Build
@@ -1,24 +1,24 @@
-perf-y += header.o
-perf-y += tsc.o
-perf-y += pmu.o
-perf-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o
-perf-y += perf_regs.o
-perf-y += topdown.o
-perf-y += machine.o
-perf-y += event.o
-perf-y += evlist.o
-perf-y += mem-events.o
-perf-y += evsel.o
-perf-y += iostat.o
-perf-y += env.o
+perf-util-y += header.o
+perf-util-y += tsc.o
+perf-util-y += pmu.o
+perf-util-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o
+perf-util-y += perf_regs.o
+perf-util-y += topdown.o
+perf-util-y += machine.o
+perf-util-y += event.o
+perf-util-y += evlist.o
+perf-util-y += mem-events.o
+perf-util-y += evsel.o
+perf-util-y += iostat.o
+perf-util-y += env.o
-perf-$(CONFIG_DWARF) += dwarf-regs.o
-perf-$(CONFIG_BPF_PROLOGUE) += dwarf-regs.o
+perf-util-$(CONFIG_DWARF) += dwarf-regs.o
+perf-util-$(CONFIG_BPF_PROLOGUE) += dwarf-regs.o
-perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
-perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
+perf-util-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
+perf-util-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
-perf-$(CONFIG_AUXTRACE) += auxtrace.o
-perf-$(CONFIG_AUXTRACE) += archinsn.o
-perf-$(CONFIG_AUXTRACE) += intel-pt.o
-perf-$(CONFIG_AUXTRACE) += intel-bts.o
+perf-util-$(CONFIG_AUXTRACE) += auxtrace.o
+perf-util-$(CONFIG_AUXTRACE) += archinsn.o
+perf-util-$(CONFIG_AUXTRACE) += intel-pt.o
+perf-util-$(CONFIG_AUXTRACE) += intel-bts.o
diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
index 6de7e2d21075..4b710e875953 100644
--- a/tools/perf/arch/x86/util/intel-pt.c
+++ b/tools/perf/arch/x86/util/intel-pt.c
@@ -32,6 +32,7 @@
#include "../../../util/tsc.h"
#include <internal/lib.h> // page_size
#include "../../../util/intel-pt.h"
+#include <api/fs/fs.h>
#define KiB(x) ((x) * 1024)
#define MiB(x) ((x) * 1024 * 1024)
@@ -428,6 +429,16 @@ static int intel_pt_track_switches(struct evlist *evlist)
}
#endif
+static bool intel_pt_exclude_guest(void)
+{
+ int pt_mode;
+
+ if (sysfs__read_int("module/kvm_intel/parameters/pt_mode", &pt_mode))
+ pt_mode = 0;
+
+ return pt_mode == 1;
+}
+
static void intel_pt_valid_str(char *str, size_t len, u64 valid)
{
unsigned int val, last = 0, state = 1;
@@ -620,6 +631,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
}
evsel->core.attr.freq = 0;
evsel->core.attr.sample_period = 1;
+ evsel->core.attr.exclude_guest = intel_pt_exclude_guest();
evsel->no_aux_samples = true;
evsel->needs_auxtrace_mmap = true;
intel_pt_evsel = evsel;
@@ -758,7 +770,8 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
}
if (!opts->auxtrace_snapshot_mode && !opts->auxtrace_sample_mode) {
- u32 aux_watermark = opts->auxtrace_mmap_pages * page_size / 4;
+ size_t aw = opts->auxtrace_mmap_pages * (size_t)page_size / 4;
+ u32 aux_watermark = aw > UINT_MAX ? UINT_MAX : aw;
intel_pt_evsel->core.attr.aux_watermark = aux_watermark;
}
diff --git a/tools/perf/arch/xtensa/Build b/tools/perf/arch/xtensa/Build
index e4e5f33c84d8..e63eabc2c8f4 100644
--- a/tools/perf/arch/xtensa/Build
+++ b/tools/perf/arch/xtensa/Build
@@ -1 +1 @@
-perf-y += util/
+perf-util-y += util/
diff --git a/tools/perf/bench/Build b/tools/perf/bench/Build
index c2ab30907ae7..279ab2ab4abe 100644
--- a/tools/perf/bench/Build
+++ b/tools/perf/bench/Build
@@ -1,25 +1,25 @@
-perf-y += sched-messaging.o
-perf-y += sched-pipe.o
-perf-y += sched-seccomp-notify.o
-perf-y += syscall.o
-perf-y += mem-functions.o
-perf-y += futex-hash.o
-perf-y += futex-wake.o
-perf-y += futex-wake-parallel.o
-perf-y += futex-requeue.o
-perf-y += futex-lock-pi.o
-perf-y += epoll-wait.o
-perf-y += epoll-ctl.o
-perf-y += synthesize.o
-perf-y += kallsyms-parse.o
-perf-y += find-bit-bench.o
-perf-y += inject-buildid.o
-perf-y += evlist-open-close.o
-perf-y += breakpoint.o
-perf-y += pmu-scan.o
-perf-y += uprobe.o
+perf-bench-y += sched-messaging.o
+perf-bench-y += sched-pipe.o
+perf-bench-y += sched-seccomp-notify.o
+perf-bench-y += syscall.o
+perf-bench-y += mem-functions.o
+perf-bench-y += futex-hash.o
+perf-bench-y += futex-wake.o
+perf-bench-y += futex-wake-parallel.o
+perf-bench-y += futex-requeue.o
+perf-bench-y += futex-lock-pi.o
+perf-bench-y += epoll-wait.o
+perf-bench-y += epoll-ctl.o
+perf-bench-y += synthesize.o
+perf-bench-y += kallsyms-parse.o
+perf-bench-y += find-bit-bench.o
+perf-bench-y += inject-buildid.o
+perf-bench-y += evlist-open-close.o
+perf-bench-y += breakpoint.o
+perf-bench-y += pmu-scan.o
+perf-bench-y += uprobe.o
-perf-$(CONFIG_X86_64) += mem-memcpy-x86-64-asm.o
-perf-$(CONFIG_X86_64) += mem-memset-x86-64-asm.o
+perf-bench-$(CONFIG_X86_64) += mem-memcpy-x86-64-asm.o
+perf-bench-$(CONFIG_X86_64) += mem-memset-x86-64-asm.o
-perf-$(CONFIG_NUMA) += numa.o
+perf-bench-$(CONFIG_NUMA) += numa.o
diff --git a/tools/perf/bench/epoll-ctl.c b/tools/perf/bench/epoll-ctl.c
index d3db73dac66a..d66d852b90e4 100644
--- a/tools/perf/bench/epoll-ctl.c
+++ b/tools/perf/bench/epoll-ctl.c
@@ -232,7 +232,7 @@ static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
if (!noaffinity)
pthread_attr_init(&thread_attr);
- nrcpus = perf_cpu_map__nr(cpu);
+ nrcpus = cpu__max_cpu().cpu;
cpuset = CPU_ALLOC(nrcpus);
BUG_ON(!cpuset);
size = CPU_ALLOC_SIZE(nrcpus);
diff --git a/tools/perf/bench/epoll-wait.c b/tools/perf/bench/epoll-wait.c
index 06bb3187660a..ef5c4257844d 100644
--- a/tools/perf/bench/epoll-wait.c
+++ b/tools/perf/bench/epoll-wait.c
@@ -309,7 +309,7 @@ static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
if (!noaffinity)
pthread_attr_init(&thread_attr);
- nrcpus = perf_cpu_map__nr(cpu);
+ nrcpus = cpu__max_cpu().cpu;
cpuset = CPU_ALLOC(nrcpus);
BUG_ON(!cpuset);
size = CPU_ALLOC_SIZE(nrcpus);
diff --git a/tools/perf/bench/futex-hash.c b/tools/perf/bench/futex-hash.c
index 0c69d20efa32..b472eded521b 100644
--- a/tools/perf/bench/futex-hash.c
+++ b/tools/perf/bench/futex-hash.c
@@ -174,7 +174,7 @@ int bench_futex_hash(int argc, const char **argv)
pthread_attr_init(&thread_attr);
gettimeofday(&bench__start, NULL);
- nrcpus = perf_cpu_map__nr(cpu);
+ nrcpus = cpu__max_cpu().cpu;
cpuset = CPU_ALLOC(nrcpus);
BUG_ON(!cpuset);
size = CPU_ALLOC_SIZE(nrcpus);
diff --git a/tools/perf/bench/futex-lock-pi.c b/tools/perf/bench/futex-lock-pi.c
index 7a4973346180..0416120c091b 100644
--- a/tools/perf/bench/futex-lock-pi.c
+++ b/tools/perf/bench/futex-lock-pi.c
@@ -122,7 +122,7 @@ static void create_threads(struct worker *w, struct perf_cpu_map *cpu)
{
cpu_set_t *cpuset;
unsigned int i;
- int nrcpus = perf_cpu_map__nr(cpu);
+ int nrcpus = cpu__max_cpu().cpu;
size_t size;
threads_starting = params.nthreads;
diff --git a/tools/perf/bench/futex-requeue.c b/tools/perf/bench/futex-requeue.c
index d9ad736c1a3e..aad5bfc4fe18 100644
--- a/tools/perf/bench/futex-requeue.c
+++ b/tools/perf/bench/futex-requeue.c
@@ -125,7 +125,7 @@ static void block_threads(pthread_t *w, struct perf_cpu_map *cpu)
{
cpu_set_t *cpuset;
unsigned int i;
- int nrcpus = perf_cpu_map__nr(cpu);
+ int nrcpus = cpu__max_cpu().cpu;
size_t size;
threads_starting = params.nthreads;
diff --git a/tools/perf/bench/futex-wake-parallel.c b/tools/perf/bench/futex-wake-parallel.c
index b66df553e561..4352e318631e 100644
--- a/tools/perf/bench/futex-wake-parallel.c
+++ b/tools/perf/bench/futex-wake-parallel.c
@@ -149,7 +149,7 @@ static void block_threads(pthread_t *w, struct perf_cpu_map *cpu)
{
cpu_set_t *cpuset;
unsigned int i;
- int nrcpus = perf_cpu_map__nr(cpu);
+ int nrcpus = cpu__max_cpu().cpu;
size_t size;
threads_starting = params.nthreads;
@@ -318,7 +318,7 @@ int bench_futex_wake_parallel(int argc, const char **argv)
cond_broadcast(&thread_worker);
mutex_unlock(&thread_lock);
- usleep(100000);
+ usleep(200000);
/* Ok, all threads are patiently blocked, start waking folks up */
wakeup_threads(waking_worker);
diff --git a/tools/perf/bench/futex-wake.c b/tools/perf/bench/futex-wake.c
index 690fd6d3da13..49b3c89b0b35 100644
--- a/tools/perf/bench/futex-wake.c
+++ b/tools/perf/bench/futex-wake.c
@@ -100,7 +100,7 @@ static void block_threads(pthread_t *w, struct perf_cpu_map *cpu)
cpu_set_t *cpuset;
unsigned int i;
size_t size;
- int nrcpus = perf_cpu_map__nr(cpu);
+ int nrcpus = cpu__max_cpu().cpu;
threads_starting = params.nthreads;
cpuset = CPU_ALLOC(nrcpus);
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 50d2fb222d48..b10b7f005658 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -571,8 +571,8 @@ static int __cmd_annotate(struct perf_annotate *ann)
goto out;
if (dump_trace) {
- perf_session__fprintf_nr_events(session, stdout, false);
- evlist__fprintf_nr_events(session->evlist, stdout, false);
+ perf_session__fprintf_nr_events(session, stdout);
+ evlist__fprintf_nr_events(session->evlist, stdout);
goto out;
}
diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c
index 5cab31231551..82cb4b1010aa 100644
--- a/tools/perf/builtin-list.c
+++ b/tools/perf/builtin-list.c
@@ -162,7 +162,11 @@ static void default_print_event(void *ps, const char *pmu_name, const char *topi
} else
fputc('\n', fp);
- if (desc && print_state->desc) {
+ if (long_desc && print_state->long_desc) {
+ fprintf(fp, "%*s", 8, "[");
+ wordwrap(fp, long_desc, 8, pager_get_columns(), 0);
+ fprintf(fp, "]\n");
+ } else if (desc && print_state->desc) {
char *desc_with_unit = NULL;
int desc_len = -1;
@@ -178,12 +182,6 @@ static void default_print_event(void *ps, const char *pmu_name, const char *topi
fprintf(fp, "]\n");
free(desc_with_unit);
}
- long_desc = long_desc ?: desc;
- if (long_desc && print_state->long_desc) {
- fprintf(fp, "%*s", 8, "[");
- wordwrap(fp, long_desc, 8, pager_get_columns(), 0);
- fprintf(fp, "]\n");
- }
if (print_state->detailed && encoding_desc) {
fprintf(fp, "%*s", 8, "");
@@ -256,15 +254,14 @@ static void default_print_metric(void *ps,
}
fprintf(fp, " %s\n", name);
- if (desc && print_state->desc) {
- fprintf(fp, "%*s", 8, "[");
- wordwrap(fp, desc, 8, pager_get_columns(), 0);
- fprintf(fp, "]\n");
- }
if (long_desc && print_state->long_desc) {
fprintf(fp, "%*s", 8, "[");
wordwrap(fp, long_desc, 8, pager_get_columns(), 0);
fprintf(fp, "]\n");
+ } else if (desc && print_state->desc) {
+ fprintf(fp, "%*s", 8, "[");
+ wordwrap(fp, desc, 8, pager_get_columns(), 0);
+ fprintf(fp, "]\n");
}
if (expr && print_state->detailed) {
fprintf(fp, "%*s", 8, "[");
@@ -507,6 +504,7 @@ int cmd_list(int argc, const char **argv)
int i, ret = 0;
struct print_state default_ps = {
.fp = stdout,
+ .desc = true,
};
struct print_state json_ps = {
.fp = stdout,
@@ -579,7 +577,6 @@ int cmd_list(int argc, const char **argv)
};
ps = &json_ps;
} else {
- default_ps.desc = !default_ps.long_desc;
default_ps.last_topic = strdup("");
assert(default_ps.last_topic);
default_ps.visited_metrics = strlist__new(NULL, NULL);
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index 7007d26fe654..0253184b3b58 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -1477,20 +1477,16 @@ static void dump_map(void)
fprintf(lock_output, " %#llx: %s\n", (unsigned long long)st->addr, st->name);
}
-static int dump_info(void)
+static void dump_info(void)
{
- int rc = 0;
-
if (info_threads)
dump_threads();
- else if (info_map)
+
+ if (info_map) {
+ if (info_threads)
+ fputc('\n', lock_output);
dump_map();
- else {
- rc = -1;
- pr_err("Unknown type of information\n");
}
-
- return rc;
}
static const struct evsel_str_handler lock_tracepoints[] = {
@@ -1992,7 +1988,7 @@ static int __cmd_report(bool display_info)
setup_pager();
if (display_info) /* used for info subcommand */
- err = dump_info();
+ dump_info();
else {
combine_result();
sort_result();
@@ -2568,9 +2564,9 @@ int cmd_lock(int argc, const char **argv)
const struct option info_options[] = {
OPT_BOOLEAN('t', "threads", &info_threads,
- "dump thread list in perf.data"),
+ "dump the thread list in perf.data"),
OPT_BOOLEAN('m', "map", &info_map,
- "map of lock instances (address:name table)"),
+ "dump the map of lock instances (address:name table)"),
OPT_PARENT(lock_options)
};
@@ -2684,6 +2680,13 @@ int cmd_lock(int argc, const char **argv)
if (argc)
usage_with_options(info_usage, info_options);
}
+
+ /* If neither threads nor map requested, display both */
+ if (!info_threads && !info_map) {
+ info_threads = true;
+ info_map = true;
+ }
+
/* recycling report_lock_ops */
trace_handler = &report_lock_ops;
rc = __cmd_report(true);
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 0a8ba1323d64..a94516e8c522 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -1926,7 +1926,7 @@ static void __record__save_lost_samples(struct record *rec, struct evsel *evsel,
static void record__read_lost_samples(struct record *rec)
{
struct perf_session *session = rec->session;
- struct perf_record_lost_samples *lost = NULL;
+ struct perf_record_lost_samples_and_ids lost;
struct evsel *evsel;
/* there was an error during record__open */
@@ -1951,19 +1951,13 @@ static void record__read_lost_samples(struct record *rec)
if (perf_evsel__read(&evsel->core, x, y, &count) < 0) {
pr_debug("read LOST count failed\n");
- goto out;
+ return;
}
if (count.lost) {
- if (!lost) {
- lost = zalloc(PERF_SAMPLE_MAX_SIZE);
- if (!lost) {
- pr_debug("Memory allocation failed\n");
- return;
- }
- lost->header.type = PERF_RECORD_LOST_SAMPLES;
- }
- __record__save_lost_samples(rec, evsel, lost,
+ memset(&lost, 0, sizeof(lost));
+ lost.lost.header.type = PERF_RECORD_LOST_SAMPLES;
+ __record__save_lost_samples(rec, evsel, &lost.lost,
x, y, count.lost, 0);
}
}
@@ -1971,20 +1965,12 @@ static void record__read_lost_samples(struct record *rec)
lost_count = perf_bpf_filter__lost_count(evsel);
if (lost_count) {
- if (!lost) {
- lost = zalloc(PERF_SAMPLE_MAX_SIZE);
- if (!lost) {
- pr_debug("Memory allocation failed\n");
- return;
- }
- lost->header.type = PERF_RECORD_LOST_SAMPLES;
- }
- __record__save_lost_samples(rec, evsel, lost, 0, 0, lost_count,
+ memset(&lost, 0, sizeof(lost));
+ lost.lost.header.type = PERF_RECORD_LOST_SAMPLES;
+ __record__save_lost_samples(rec, evsel, &lost.lost, 0, 0, lost_count,
PERF_RECORD_MISC_LOST_SAMPLES_BPF);
}
}
-out:
- free(lost);
}
static volatile sig_atomic_t workload_exec_errno;
@@ -3196,7 +3182,7 @@ static int switch_output_setup(struct record *rec)
unsigned long val;
/*
- * If we're using --switch-output-events, then we imply its
+ * If we're using --switch-output-events, then we imply its
* --switch-output=signal, as we'll send a SIGUSR2 from the side band
* thread to its parent.
*/
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 69618fb0110b..6edc0d4ce6fb 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -810,8 +810,8 @@ static int stats_print(struct report *rep)
{
struct perf_session *session = rep->session;
- perf_session__fprintf_nr_events(session, stdout, rep->skip_empty);
- evlist__fprintf_nr_events(session->evlist, stdout, rep->skip_empty);
+ perf_session__fprintf_nr_events(session, stdout);
+ evlist__fprintf_nr_events(session->evlist, stdout);
return 0;
}
@@ -1089,10 +1089,7 @@ static int __cmd_report(struct report *rep)
perf_session__fprintf_dsos(session, stdout);
if (dump_trace) {
- perf_session__fprintf_nr_events(session, stdout,
- rep->skip_empty);
- evlist__fprintf_nr_events(session->evlist, stdout,
- rep->skip_empty);
+ stats_print(rep);
return 0;
}
}
@@ -1562,6 +1559,8 @@ int cmd_report(int argc, const char **argv)
data.path = input_name;
data.force = symbol_conf.force;
+ symbol_conf.skip_empty = report.skip_empty;
+
repeat:
session = perf_session__new(&data, &report.tool);
if (IS_ERR(session)) {
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 5977c49ae2c7..8750b5f2d49b 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -156,6 +156,9 @@ struct perf_sched_map {
const char *color_pids_str;
struct perf_cpu_map *color_cpus;
const char *color_cpus_str;
+ const char *task_name;
+ struct strlist *task_names;
+ bool fuzzy;
struct perf_cpu_map *cpus;
const char *cpus_str;
};
@@ -177,6 +180,7 @@ struct perf_sched {
struct perf_cpu max_cpu;
u32 *curr_pid;
struct thread **curr_thread;
+ struct thread **curr_out_thread;
char next_shortname1;
char next_shortname2;
unsigned int replay_repeat;
@@ -1538,23 +1542,91 @@ map__findnew_thread(struct perf_sched *sched, struct machine *machine, pid_t pid
return thread;
}
+static bool sched_match_task(struct perf_sched *sched, const char *comm_str)
+{
+ bool fuzzy_match = sched->map.fuzzy;
+ struct strlist *task_names = sched->map.task_names;
+ struct str_node *node;
+
+ strlist__for_each_entry(node, task_names) {
+ bool match_found = fuzzy_match ? !!strstr(comm_str, node->s) :
+ !strcmp(comm_str, node->s);
+ if (match_found)
+ return true;
+ }
+
+ return false;
+}
+
+static void print_sched_map(struct perf_sched *sched, struct perf_cpu this_cpu, int cpus_nr,
+ const char *color, bool sched_out)
+{
+ for (int i = 0; i < cpus_nr; i++) {
+ struct perf_cpu cpu = {
+ .cpu = sched->map.comp ? sched->map.comp_cpus[i].cpu : i,
+ };
+ struct thread *curr_thread = sched->curr_thread[cpu.cpu];
+ struct thread *curr_out_thread = sched->curr_out_thread[cpu.cpu];
+ struct thread_runtime *curr_tr;
+ const char *pid_color = color;
+ const char *cpu_color = color;
+ char symbol = ' ';
+ struct thread *thread_to_check = sched_out ? curr_out_thread : curr_thread;
+
+ if (thread_to_check && thread__has_color(thread_to_check))
+ pid_color = COLOR_PIDS;
+
+ if (sched->map.color_cpus && perf_cpu_map__has(sched->map.color_cpus, cpu))
+ cpu_color = COLOR_CPUS;
+
+ if (cpu.cpu == this_cpu.cpu)
+ symbol = '*';
+
+ color_fprintf(stdout, cpu.cpu != this_cpu.cpu ? color : cpu_color, "%c", symbol);
+
+ thread_to_check = sched_out ? sched->curr_out_thread[cpu.cpu] :
+ sched->curr_thread[cpu.cpu];
+
+ if (thread_to_check) {
+ curr_tr = thread__get_runtime(thread_to_check);
+ if (curr_tr == NULL)
+ return;
+
+ if (sched_out) {
+ if (cpu.cpu == this_cpu.cpu)
+ color_fprintf(stdout, color, "- ");
+ else {
+ curr_tr = thread__get_runtime(sched->curr_thread[cpu.cpu]);
+ if (curr_tr != NULL)
+ color_fprintf(stdout, pid_color, "%2s ",
+ curr_tr->shortname);
+ }
+ } else
+ color_fprintf(stdout, pid_color, "%2s ", curr_tr->shortname);
+ } else
+ color_fprintf(stdout, color, " ");
+ }
+}
+
static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
struct perf_sample *sample, struct machine *machine)
{
const u32 next_pid = evsel__intval(evsel, sample, "next_pid");
- struct thread *sched_in;
+ const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid");
+ struct thread *sched_in, *sched_out;
struct thread_runtime *tr;
int new_shortname;
u64 timestamp0, timestamp = sample->time;
s64 delta;
- int i;
struct perf_cpu this_cpu = {
.cpu = sample->cpu,
};
int cpus_nr;
+ int proceed;
bool new_cpu = false;
const char *color = PERF_COLOR_NORMAL;
char stimestamp[32];
+ const char *str;
BUG_ON(this_cpu.cpu >= MAX_CPUS || this_cpu.cpu < 0);
@@ -1583,7 +1655,8 @@ static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
}
sched_in = map__findnew_thread(sched, machine, -1, next_pid);
- if (sched_in == NULL)
+ sched_out = map__findnew_thread(sched, machine, -1, prev_pid);
+ if (sched_in == NULL || sched_out == NULL)
return -1;
tr = thread__get_runtime(sched_in);
@@ -1593,9 +1666,9 @@ static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
}
sched->curr_thread[this_cpu.cpu] = thread__get(sched_in);
+ sched->curr_out_thread[this_cpu.cpu] = thread__get(sched_out);
- printf(" ");
-
+ str = thread__comm_str(sched_in);
new_shortname = 0;
if (!tr->shortname[0]) {
if (!strcmp(thread__comm_str(sched_in), "swapper")) {
@@ -1605,7 +1678,7 @@ static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
*/
tr->shortname[0] = '.';
tr->shortname[1] = ' ';
- } else {
+ } else if (!sched->map.task_name || sched_match_task(sched, str)) {
tr->shortname[0] = sched->next_shortname1;
tr->shortname[1] = sched->next_shortname2;
@@ -1618,46 +1691,37 @@ static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
else
sched->next_shortname2 = '0';
}
+ } else {
+ tr->shortname[0] = '-';
+ tr->shortname[1] = ' ';
}
new_shortname = 1;
}
- for (i = 0; i < cpus_nr; i++) {
- struct perf_cpu cpu = {
- .cpu = sched->map.comp ? sched->map.comp_cpus[i].cpu : i,
- };
- struct thread *curr_thread = sched->curr_thread[cpu.cpu];
- struct thread_runtime *curr_tr;
- const char *pid_color = color;
- const char *cpu_color = color;
-
- if (curr_thread && thread__has_color(curr_thread))
- pid_color = COLOR_PIDS;
-
- if (sched->map.cpus && !perf_cpu_map__has(sched->map.cpus, cpu))
- continue;
-
- if (sched->map.color_cpus && perf_cpu_map__has(sched->map.color_cpus, cpu))
- cpu_color = COLOR_CPUS;
+ if (sched->map.cpus && !perf_cpu_map__has(sched->map.cpus, this_cpu))
+ goto out;
- if (cpu.cpu != this_cpu.cpu)
- color_fprintf(stdout, color, " ");
+ proceed = 0;
+ str = thread__comm_str(sched_in);
+ /*
+ * Check which of sched_in and sched_out matches the passed --task-name
+ * arguments and call the corresponding print_sched_map.
+ */
+ if (sched->map.task_name && !sched_match_task(sched, str)) {
+ if (!sched_match_task(sched, thread__comm_str(sched_out)))
+ goto out;
else
- color_fprintf(stdout, cpu_color, "*");
+ goto sched_out;
- if (sched->curr_thread[cpu.cpu]) {
- curr_tr = thread__get_runtime(sched->curr_thread[cpu.cpu]);
- if (curr_tr == NULL) {
- thread__put(sched_in);
- return -1;
- }
- color_fprintf(stdout, pid_color, "%2s ", curr_tr->shortname);
- } else
- color_fprintf(stdout, color, " ");
+ } else {
+ str = thread__comm_str(sched_out);
+ if (!(sched->map.task_name && !sched_match_task(sched, str)))
+ proceed = 1;
}
- if (sched->map.cpus && !perf_cpu_map__has(sched->map.cpus, this_cpu))
- goto out;
+ printf(" ");
+
+ print_sched_map(sched, this_cpu, cpus_nr, color, false);
timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp));
color_fprintf(stdout, color, " %12s secs ", stimestamp);
@@ -1675,9 +1739,32 @@ static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
if (sched->map.comp && new_cpu)
color_fprintf(stdout, color, " (CPU %d)", this_cpu);
-out:
+ if (proceed != 1) {
+ color_fprintf(stdout, color, "\n");
+ goto out;
+ }
+
+sched_out:
+ if (sched->map.task_name) {
+ tr = thread__get_runtime(sched->curr_out_thread[this_cpu.cpu]);
+ if (strcmp(tr->shortname, "") == 0)
+ goto out;
+
+ if (proceed == 1)
+ color_fprintf(stdout, color, "\n");
+
+ printf(" ");
+ print_sched_map(sched, this_cpu, cpus_nr, color, true);
+ timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp));
+ color_fprintf(stdout, color, " %12s secs ", stimestamp);
+ }
+
color_fprintf(stdout, color, "\n");
+out:
+ if (sched->map.task_name)
+ thread__put(sched_out);
+
thread__put(sched_in);
return 0;
@@ -2659,7 +2746,10 @@ out:
tr->last_state = state;
/* sched out event for task so reset ready to run time */
- tr->ready_to_run = 0;
+ if (state == 'R')
+ tr->ready_to_run = t;
+ else
+ tr->ready_to_run = 0;
}
evsel__save_time(evsel, sample->time, sample->cpu);
@@ -3307,6 +3397,10 @@ static int perf_sched__map(struct perf_sched *sched)
if (!sched->curr_thread)
return rc;
+ sched->curr_out_thread = calloc(MAX_CPUS, sizeof(*(sched->curr_out_thread)));
+ if (!sched->curr_out_thread)
+ return rc;
+
if (setup_cpus_switch_event(sched))
goto out_free_curr_thread;
@@ -3383,6 +3477,9 @@ static int perf_sched__replay(struct perf_sched *sched)
sched->thread_funcs_exit = false;
create_tasks(sched);
printf("------------------------------------------------------------\n");
+ if (sched->replay_repeat == 0)
+ sched->replay_repeat = UINT_MAX;
+
for (i = 0; i < sched->replay_repeat; i++)
run_one_test(sched);
@@ -3548,7 +3645,7 @@ int cmd_sched(int argc, const char **argv)
};
const struct option replay_options[] = {
OPT_UINTEGER('r', "repeat", &sched.replay_repeat,
- "repeat the workload replay N times (-1: infinite)"),
+ "repeat the workload replay N times (0: infinite)"),
OPT_PARENT(sched_options)
};
const struct option map_options[] = {
@@ -3560,6 +3657,10 @@ int cmd_sched(int argc, const char **argv)
"highlight given CPUs in map"),
OPT_STRING(0, "cpus", &sched.map.cpus_str, "cpus",
"display given CPUs in map"),
+ OPT_STRING(0, "task-name", &sched.map.task_name, "task",
+ "map output only for the given task name(s)."),
+ OPT_BOOLEAN(0, "fuzzy-name", &sched.map.fuzzy,
+ "given command name can be partially matched (fuzzy matching)"),
OPT_PARENT(sched_options)
};
const struct option timehist_options[] = {
@@ -3658,6 +3759,14 @@ int cmd_sched(int argc, const char **argv)
argc = parse_options(argc, argv, map_options, map_usage, 0);
if (argc)
usage_with_options(map_usage, map_options);
+
+ if (sched.map.task_name) {
+ sched.map.task_names = strlist__new(sched.map.task_name, NULL);
+ if (sched.map.task_names == NULL) {
+ fprintf(stderr, "Failed to parse task names\n");
+ return -1;
+ }
+ }
}
sched.tp_handler = &map_ops;
setup_sorting(&sched, latency_options, latency_usage);
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 35f79b48e8dc..661832756a24 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -164,6 +164,35 @@ static struct perf_stat_config stat_config = {
.iostat_run = false,
};
+/* Options set from the command line. */
+struct opt_aggr_mode {
+ bool node, socket, die, cluster, cache, core, thread, no_aggr;
+};
+
+/* Turn command line option into most generic aggregation mode setting. */
+static enum aggr_mode opt_aggr_mode_to_aggr_mode(struct opt_aggr_mode *opt_mode)
+{
+ enum aggr_mode mode = AGGR_GLOBAL;
+
+ if (opt_mode->node)
+ mode = AGGR_NODE;
+ if (opt_mode->socket)
+ mode = AGGR_SOCKET;
+ if (opt_mode->die)
+ mode = AGGR_DIE;
+ if (opt_mode->cluster)
+ mode = AGGR_CLUSTER;
+ if (opt_mode->cache)
+ mode = AGGR_CACHE;
+ if (opt_mode->core)
+ mode = AGGR_CORE;
+ if (opt_mode->thread)
+ mode = AGGR_THREAD;
+ if (opt_mode->no_aggr)
+ mode = AGGR_NONE;
+ return mode;
+}
+
static void evlist__check_cpu_maps(struct evlist *evlist)
{
struct evsel *evsel, *warned_leader = NULL;
@@ -255,45 +284,38 @@ static int evsel__write_stat_event(struct evsel *counter, int cpu_map_idx, u32 t
process_synthesized_event, NULL);
}
-static int read_single_counter(struct evsel *counter, int cpu_map_idx,
- int thread, struct timespec *rs)
+static int read_single_counter(struct evsel *counter, int cpu_map_idx, int thread)
{
- switch(counter->tool_event) {
- case PERF_TOOL_DURATION_TIME: {
- u64 val = rs->tv_nsec + rs->tv_sec*1000000000ULL;
- struct perf_counts_values *count =
- perf_counts(counter->counts, cpu_map_idx, thread);
- count->ena = count->run = val;
- count->val = val;
- return 0;
- }
- case PERF_TOOL_USER_TIME:
- case PERF_TOOL_SYSTEM_TIME: {
- u64 val;
- struct perf_counts_values *count =
- perf_counts(counter->counts, cpu_map_idx, thread);
- if (counter->tool_event == PERF_TOOL_USER_TIME)
- val = ru_stats.ru_utime_usec_stat.mean;
- else
- val = ru_stats.ru_stime_usec_stat.mean;
- count->ena = count->run = val;
- count->val = val;
- return 0;
- }
- default:
- case PERF_TOOL_NONE:
- return evsel__read_counter(counter, cpu_map_idx, thread);
- case PERF_TOOL_MAX:
- /* This should never be reached */
- return 0;
+ int err = evsel__read_counter(counter, cpu_map_idx, thread);
+
+ /*
+ * Reading user and system time will fail when the process
+ * terminates. Use the wait4 values in that case.
+ */
+ if (err && cpu_map_idx == 0 &&
+ (counter->tool_event == PERF_TOOL_USER_TIME ||
+ counter->tool_event == PERF_TOOL_SYSTEM_TIME)) {
+ u64 val, *start_time;
+ struct perf_counts_values *count =
+ perf_counts(counter->counts, cpu_map_idx, thread);
+
+ start_time = xyarray__entry(counter->start_times, cpu_map_idx, thread);
+ if (counter->tool_event == PERF_TOOL_USER_TIME)
+ val = ru_stats.ru_utime_usec_stat.mean;
+ else
+ val = ru_stats.ru_stime_usec_stat.mean;
+ count->ena = count->run = *start_time + val;
+ count->val = val;
+ return 0;
}
+ return err;
}
/*
* Read out the results of a single counter:
* do not aggregate counts across CPUs in system-wide mode
*/
-static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu_map_idx)
+static int read_counter_cpu(struct evsel *counter, int cpu_map_idx)
{
int nthreads = perf_thread_map__nr(evsel_list->core.threads);
int thread;
@@ -311,7 +333,7 @@ static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu_
* (via evsel__read_counter()) and sets their count->loaded.
*/
if (!perf_counts__is_loaded(counter->counts, cpu_map_idx, thread) &&
- read_single_counter(counter, cpu_map_idx, thread, rs)) {
+ read_single_counter(counter, cpu_map_idx, thread)) {
counter->counts->scaled = -1;
perf_counts(counter->counts, cpu_map_idx, thread)->ena = 0;
perf_counts(counter->counts, cpu_map_idx, thread)->run = 0;
@@ -340,7 +362,7 @@ static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu_
return 0;
}
-static int read_affinity_counters(struct timespec *rs)
+static int read_affinity_counters(void)
{
struct evlist_cpu_iterator evlist_cpu_itr;
struct affinity saved_affinity, *affinity;
@@ -361,10 +383,8 @@ static int read_affinity_counters(struct timespec *rs)
if (evsel__is_bpf(counter))
continue;
- if (!counter->err) {
- counter->err = read_counter_cpu(counter, rs,
- evlist_cpu_itr.cpu_map_idx);
- }
+ if (!counter->err)
+ counter->err = read_counter_cpu(counter, evlist_cpu_itr.cpu_map_idx);
}
if (affinity)
affinity__cleanup(&saved_affinity);
@@ -388,11 +408,11 @@ static int read_bpf_map_counters(void)
return 0;
}
-static int read_counters(struct timespec *rs)
+static int read_counters(void)
{
if (!stat_config.stop_read_counter) {
if (read_bpf_map_counters() ||
- read_affinity_counters(rs))
+ read_affinity_counters())
return -1;
}
return 0;
@@ -423,7 +443,7 @@ static void process_interval(void)
evlist__reset_aggr_stats(evsel_list);
- if (read_counters(&rs) == 0)
+ if (read_counters() == 0)
process_counters();
if (STAT_RECORD) {
@@ -911,7 +931,7 @@ try_again_reset:
* avoid arbitrary skew, we must read all counters before closing any
* group leaders.
*/
- if (read_counters(&(struct timespec) { .tv_nsec = t1-t0 }) == 0)
+ if (read_counters() == 0)
process_counters();
/*
@@ -1096,7 +1116,7 @@ static int parse_cache_level(const struct option *opt,
int unset __maybe_unused)
{
int level;
- u32 *aggr_mode = (u32 *)opt->value;
+ struct opt_aggr_mode *opt_aggr_mode = (struct opt_aggr_mode *)opt->value;
u32 *aggr_level = (u32 *)opt->data;
/*
@@ -1135,155 +1155,11 @@ static int parse_cache_level(const struct option *opt,
return -EINVAL;
}
out:
- *aggr_mode = AGGR_CACHE;
+ opt_aggr_mode->cache = true;
*aggr_level = level;
return 0;
}
-static struct option stat_options[] = {
- OPT_BOOLEAN('T', "transaction", &transaction_run,
- "hardware transaction statistics"),
- OPT_CALLBACK('e', "event", &parse_events_option_args, "event",
- "event selector. use 'perf list' to list available events",
- parse_events_option),
- OPT_CALLBACK(0, "filter", &evsel_list, "filter",
- "event filter", parse_filter),
- OPT_BOOLEAN('i', "no-inherit", &stat_config.no_inherit,
- "child tasks do not inherit counters"),
- OPT_STRING('p', "pid", &target.pid, "pid",
- "stat events on existing process id"),
- OPT_STRING('t', "tid", &target.tid, "tid",
- "stat events on existing thread id"),
-#ifdef HAVE_BPF_SKEL
- OPT_STRING('b', "bpf-prog", &target.bpf_str, "bpf-prog-id",
- "stat events on existing bpf program id"),
- OPT_BOOLEAN(0, "bpf-counters", &target.use_bpf,
- "use bpf program to count events"),
- OPT_STRING(0, "bpf-attr-map", &target.attr_map, "attr-map-path",
- "path to perf_event_attr map"),
-#endif
- OPT_BOOLEAN('a', "all-cpus", &target.system_wide,
- "system-wide collection from all CPUs"),
- OPT_BOOLEAN(0, "scale", &stat_config.scale,
- "Use --no-scale to disable counter scaling for multiplexing"),
- OPT_INCR('v', "verbose", &verbose,
- "be more verbose (show counter open errors, etc)"),
- OPT_INTEGER('r', "repeat", &stat_config.run_count,
- "repeat command and print average + stddev (max: 100, forever: 0)"),
- OPT_BOOLEAN(0, "table", &stat_config.walltime_run_table,
- "display details about each run (only with -r option)"),
- OPT_BOOLEAN('n', "null", &stat_config.null_run,
- "null run - dont start any counters"),
- OPT_INCR('d', "detailed", &detailed_run,
- "detailed run - start a lot of events"),
- OPT_BOOLEAN('S', "sync", &sync_run,
- "call sync() before starting a run"),
- OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL,
- "print large numbers with thousands\' separators",
- stat__set_big_num),
- OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
- "list of cpus to monitor in system-wide"),
- OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode,
- "disable aggregation across CPUs or PMUs", AGGR_NONE),
- OPT_SET_UINT(0, "no-merge", &stat_config.aggr_mode,
- "disable aggregation the same as -A or -no-aggr", AGGR_NONE),
- OPT_BOOLEAN(0, "hybrid-merge", &stat_config.hybrid_merge,
- "Merge identical named hybrid events"),
- OPT_STRING('x', "field-separator", &stat_config.csv_sep, "separator",
- "print counts with custom separator"),
- OPT_BOOLEAN('j', "json-output", &stat_config.json_output,
- "print counts in JSON format"),
- OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
- "monitor event in cgroup name only", parse_stat_cgroups),
- OPT_STRING(0, "for-each-cgroup", &stat_config.cgroup_list, "name",
- "expand events for each cgroup"),
- OPT_STRING('o', "output", &output_name, "file", "output file name"),
- OPT_BOOLEAN(0, "append", &append_file, "append to the output file"),
- OPT_INTEGER(0, "log-fd", &output_fd,
- "log output to fd, instead of stderr"),
- OPT_STRING(0, "pre", &pre_cmd, "command",
- "command to run prior to the measured command"),
- OPT_STRING(0, "post", &post_cmd, "command",
- "command to run after to the measured command"),
- OPT_UINTEGER('I', "interval-print", &stat_config.interval,
- "print counts at regular interval in ms "
- "(overhead is possible for values <= 100ms)"),
- OPT_INTEGER(0, "interval-count", &stat_config.times,
- "print counts for fixed number of times"),
- OPT_BOOLEAN(0, "interval-clear", &stat_config.interval_clear,
- "clear screen in between new interval"),
- OPT_UINTEGER(0, "timeout", &stat_config.timeout,
- "stop workload and print counts after a timeout period in ms (>= 10ms)"),
- OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode,
- "aggregate counts per processor socket", AGGR_SOCKET),
- OPT_SET_UINT(0, "per-die", &stat_config.aggr_mode,
- "aggregate counts per processor die", AGGR_DIE),
- OPT_SET_UINT(0, "per-cluster", &stat_config.aggr_mode,
- "aggregate counts per processor cluster", AGGR_CLUSTER),
- OPT_CALLBACK_OPTARG(0, "per-cache", &stat_config.aggr_mode, &stat_config.aggr_level,
- "cache level", "aggregate count at this cache level (Default: LLC)",
- parse_cache_level),
- OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode,
- "aggregate counts per physical processor core", AGGR_CORE),
- OPT_SET_UINT(0, "per-thread", &stat_config.aggr_mode,
- "aggregate counts per thread", AGGR_THREAD),
- OPT_SET_UINT(0, "per-node", &stat_config.aggr_mode,
- "aggregate counts per numa node", AGGR_NODE),
- OPT_INTEGER('D', "delay", &target.initial_delay,
- "ms to wait before starting measurement after program start (-1: start with events disabled)"),
- OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config.metric_only, NULL,
- "Only print computed metrics. No raw values", enable_metric_only),
- OPT_BOOLEAN(0, "metric-no-group", &stat_config.metric_no_group,
- "don't group metric events, impacts multiplexing"),
- OPT_BOOLEAN(0, "metric-no-merge", &stat_config.metric_no_merge,
- "don't try to share events between metrics in a group"),
- OPT_BOOLEAN(0, "metric-no-threshold", &stat_config.metric_no_threshold,
- "disable adding events for the metric threshold calculation"),
- OPT_BOOLEAN(0, "topdown", &topdown_run,
- "measure top-down statistics"),
- OPT_UINTEGER(0, "td-level", &stat_config.topdown_level,
- "Set the metrics level for the top-down statistics (0: max level)"),
- OPT_BOOLEAN(0, "smi-cost", &smi_cost,
- "measure SMI cost"),
- OPT_CALLBACK('M', "metrics", &evsel_list, "metric/metric group list",
- "monitor specified metrics or metric groups (separated by ,)",
- append_metric_groups),
- OPT_BOOLEAN_FLAG(0, "all-kernel", &stat_config.all_kernel,
- "Configure all used events to run in kernel space.",
- PARSE_OPT_EXCLUSIVE),
- OPT_BOOLEAN_FLAG(0, "all-user", &stat_config.all_user,
- "Configure all used events to run in user space.",
- PARSE_OPT_EXCLUSIVE),
- OPT_BOOLEAN(0, "percore-show-thread", &stat_config.percore_show_thread,
- "Use with 'percore' event qualifier to show the event "
- "counts of one hardware thread by sum up total hardware "
- "threads of same physical core"),
- OPT_BOOLEAN(0, "summary", &stat_config.summary,
- "print summary for interval mode"),
- OPT_BOOLEAN(0, "no-csv-summary", &stat_config.no_csv_summary,
- "don't print 'summary' for CSV summary output"),
- OPT_BOOLEAN(0, "quiet", &quiet,
- "don't print any output, messages or warnings (useful with record)"),
- OPT_CALLBACK(0, "cputype", &evsel_list, "hybrid cpu type",
- "Only enable events on applying cpu with this type "
- "for hybrid platform (e.g. core or atom)",
- parse_cputype),
-#ifdef HAVE_LIBPFM
- OPT_CALLBACK(0, "pfm-events", &evsel_list, "event",
- "libpfm4 event selector. use 'perf list' to list available events",
- parse_libpfm_events_option),
-#endif
- OPT_CALLBACK(0, "control", &stat_config, "fd:ctl-fd[,ack-fd] or fifo:ctl-fifo[,ack-fifo]",
- "Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable': disable events).\n"
- "\t\t\t Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n"
- "\t\t\t Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.",
- parse_control_option),
- OPT_CALLBACK_OPTARG(0, "iostat", &evsel_list, &stat_config, "default",
- "measure I/O performance metrics provided by arch/platform",
- iostat_parse),
- OPT_END()
-};
-
/**
* Calculate the cache instance ID from the map in
* /sys/devices/system/cpu/cpuX/cache/indexY/shared_cpu_list
@@ -2245,13 +2121,15 @@ static void init_features(struct perf_session *session)
perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
}
-static int __cmd_record(int argc, const char **argv)
+static int __cmd_record(const struct option stat_options[], struct opt_aggr_mode *opt_mode,
+ int argc, const char **argv)
{
struct perf_session *session;
struct perf_data *data = &perf_stat.data;
argc = parse_options(argc, argv, stat_options, stat_record_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
+ stat_config.aggr_mode = opt_aggr_mode_to_aggr_mode(opt_mode);
if (output_name)
data->path = output_name;
@@ -2494,6 +2372,147 @@ static void setup_system_wide(int forks)
int cmd_stat(int argc, const char **argv)
{
+ struct opt_aggr_mode opt_mode = {};
+ struct option stat_options[] = {
+ OPT_BOOLEAN('T', "transaction", &transaction_run,
+ "hardware transaction statistics"),
+ OPT_CALLBACK('e', "event", &parse_events_option_args, "event",
+ "event selector. use 'perf list' to list available events",
+ parse_events_option),
+ OPT_CALLBACK(0, "filter", &evsel_list, "filter",
+ "event filter", parse_filter),
+ OPT_BOOLEAN('i', "no-inherit", &stat_config.no_inherit,
+ "child tasks do not inherit counters"),
+ OPT_STRING('p', "pid", &target.pid, "pid",
+ "stat events on existing process id"),
+ OPT_STRING('t', "tid", &target.tid, "tid",
+ "stat events on existing thread id"),
+#ifdef HAVE_BPF_SKEL
+ OPT_STRING('b', "bpf-prog", &target.bpf_str, "bpf-prog-id",
+ "stat events on existing bpf program id"),
+ OPT_BOOLEAN(0, "bpf-counters", &target.use_bpf,
+ "use bpf program to count events"),
+ OPT_STRING(0, "bpf-attr-map", &target.attr_map, "attr-map-path",
+ "path to perf_event_attr map"),
+#endif
+ OPT_BOOLEAN('a', "all-cpus", &target.system_wide,
+ "system-wide collection from all CPUs"),
+ OPT_BOOLEAN(0, "scale", &stat_config.scale,
+ "Use --no-scale to disable counter scaling for multiplexing"),
+ OPT_INCR('v', "verbose", &verbose,
+ "be more verbose (show counter open errors, etc)"),
+ OPT_INTEGER('r', "repeat", &stat_config.run_count,
+ "repeat command and print average + stddev (max: 100, forever: 0)"),
+ OPT_BOOLEAN(0, "table", &stat_config.walltime_run_table,
+ "display details about each run (only with -r option)"),
+ OPT_BOOLEAN('n', "null", &stat_config.null_run,
+ "null run - dont start any counters"),
+ OPT_INCR('d', "detailed", &detailed_run,
+ "detailed run - start a lot of events"),
+ OPT_BOOLEAN('S', "sync", &sync_run,
+ "call sync() before starting a run"),
+ OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL,
+ "print large numbers with thousands\' separators",
+ stat__set_big_num),
+ OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
+ "list of cpus to monitor in system-wide"),
+ OPT_BOOLEAN('A', "no-aggr", &opt_mode.no_aggr,
+ "disable aggregation across CPUs or PMUs"),
+ OPT_BOOLEAN(0, "no-merge", &opt_mode.no_aggr,
+ "disable aggregation the same as -A or -no-aggr"),
+ OPT_BOOLEAN(0, "hybrid-merge", &stat_config.hybrid_merge,
+ "Merge identical named hybrid events"),
+ OPT_STRING('x', "field-separator", &stat_config.csv_sep, "separator",
+ "print counts with custom separator"),
+ OPT_BOOLEAN('j', "json-output", &stat_config.json_output,
+ "print counts in JSON format"),
+ OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
+ "monitor event in cgroup name only", parse_stat_cgroups),
+ OPT_STRING(0, "for-each-cgroup", &stat_config.cgroup_list, "name",
+ "expand events for each cgroup"),
+ OPT_STRING('o', "output", &output_name, "file", "output file name"),
+ OPT_BOOLEAN(0, "append", &append_file, "append to the output file"),
+ OPT_INTEGER(0, "log-fd", &output_fd,
+ "log output to fd, instead of stderr"),
+ OPT_STRING(0, "pre", &pre_cmd, "command",
+ "command to run prior to the measured command"),
+ OPT_STRING(0, "post", &post_cmd, "command",
+ "command to run after to the measured command"),
+ OPT_UINTEGER('I', "interval-print", &stat_config.interval,
+ "print counts at regular interval in ms "
+ "(overhead is possible for values <= 100ms)"),
+ OPT_INTEGER(0, "interval-count", &stat_config.times,
+ "print counts for fixed number of times"),
+ OPT_BOOLEAN(0, "interval-clear", &stat_config.interval_clear,
+ "clear screen in between new interval"),
+ OPT_UINTEGER(0, "timeout", &stat_config.timeout,
+ "stop workload and print counts after a timeout period in ms (>= 10ms)"),
+ OPT_BOOLEAN(0, "per-socket", &opt_mode.socket,
+ "aggregate counts per processor socket"),
+ OPT_BOOLEAN(0, "per-die", &opt_mode.die, "aggregate counts per processor die"),
+ OPT_BOOLEAN(0, "per-cluster", &opt_mode.cluster,
+ "aggregate counts per processor cluster"),
+ OPT_CALLBACK_OPTARG(0, "per-cache", &opt_mode, &stat_config.aggr_level,
+ "cache level", "aggregate count at this cache level (Default: LLC)",
+ parse_cache_level),
+ OPT_BOOLEAN(0, "per-core", &opt_mode.core,
+ "aggregate counts per physical processor core"),
+ OPT_BOOLEAN(0, "per-thread", &opt_mode.thread, "aggregate counts per thread"),
+ OPT_BOOLEAN(0, "per-node", &opt_mode.node, "aggregate counts per numa node"),
+ OPT_INTEGER('D', "delay", &target.initial_delay,
+ "ms to wait before starting measurement after program start (-1: start with events disabled)"),
+ OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config.metric_only, NULL,
+ "Only print computed metrics. No raw values", enable_metric_only),
+ OPT_BOOLEAN(0, "metric-no-group", &stat_config.metric_no_group,
+ "don't group metric events, impacts multiplexing"),
+ OPT_BOOLEAN(0, "metric-no-merge", &stat_config.metric_no_merge,
+ "don't try to share events between metrics in a group"),
+ OPT_BOOLEAN(0, "metric-no-threshold", &stat_config.metric_no_threshold,
+ "disable adding events for the metric threshold calculation"),
+ OPT_BOOLEAN(0, "topdown", &topdown_run,
+ "measure top-down statistics"),
+ OPT_UINTEGER(0, "td-level", &stat_config.topdown_level,
+ "Set the metrics level for the top-down statistics (0: max level)"),
+ OPT_BOOLEAN(0, "smi-cost", &smi_cost,
+ "measure SMI cost"),
+ OPT_CALLBACK('M', "metrics", &evsel_list, "metric/metric group list",
+ "monitor specified metrics or metric groups (separated by ,)",
+ append_metric_groups),
+ OPT_BOOLEAN_FLAG(0, "all-kernel", &stat_config.all_kernel,
+ "Configure all used events to run in kernel space.",
+ PARSE_OPT_EXCLUSIVE),
+ OPT_BOOLEAN_FLAG(0, "all-user", &stat_config.all_user,
+ "Configure all used events to run in user space.",
+ PARSE_OPT_EXCLUSIVE),
+ OPT_BOOLEAN(0, "percore-show-thread", &stat_config.percore_show_thread,
+ "Use with 'percore' event qualifier to show the event "
+ "counts of one hardware thread by sum up total hardware "
+ "threads of same physical core"),
+ OPT_BOOLEAN(0, "summary", &stat_config.summary,
+ "print summary for interval mode"),
+ OPT_BOOLEAN(0, "no-csv-summary", &stat_config.no_csv_summary,
+ "don't print 'summary' for CSV summary output"),
+ OPT_BOOLEAN(0, "quiet", &quiet,
+ "don't print any output, messages or warnings (useful with record)"),
+ OPT_CALLBACK(0, "cputype", &evsel_list, "hybrid cpu type",
+ "Only enable events on applying cpu with this type "
+ "for hybrid platform (e.g. core or atom)",
+ parse_cputype),
+#ifdef HAVE_LIBPFM
+ OPT_CALLBACK(0, "pfm-events", &evsel_list, "event",
+ "libpfm4 event selector. use 'perf list' to list available events",
+ parse_libpfm_events_option),
+#endif
+ OPT_CALLBACK(0, "control", &stat_config, "fd:ctl-fd[,ack-fd] or fifo:ctl-fifo[,ack-fifo]",
+ "Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable': disable events).\n"
+ "\t\t\t Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n"
+ "\t\t\t Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.",
+ parse_control_option),
+ OPT_CALLBACK_OPTARG(0, "iostat", &evsel_list, &stat_config, "default",
+ "measure I/O performance metrics provided by arch/platform",
+ iostat_parse),
+ OPT_END()
+ };
const char * const stat_usage[] = {
"perf stat [<options>] [<command>]",
NULL
@@ -2522,6 +2541,8 @@ int cmd_stat(int argc, const char **argv)
(const char **) stat_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
+ stat_config.aggr_mode = opt_aggr_mode_to_aggr_mode(&opt_mode);
+
if (stat_config.csv_sep) {
stat_config.csv_output = true;
if (!strcmp(stat_config.csv_sep, "\\t"))
@@ -2530,7 +2551,7 @@ int cmd_stat(int argc, const char **argv)
stat_config.csv_sep = DEFAULT_SEPARATOR;
if (argc && strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
- argc = __cmd_record(argc, argv);
+ argc = __cmd_record(stat_options, &opt_mode, argc, argv);
if (argc < 0)
return -1;
} else if (argc && strlen(argv[0]) > 2 && strstarts("report", argv[0]))
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 1d6aef51c122..e8cbbf10d361 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -1055,6 +1055,13 @@ try_again:
}
}
+ if (evlist__apply_filters(evlist, &counter)) {
+ pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
+ counter->filter ?: "BPF", evsel__name(counter), errno,
+ str_error_r(errno, msg, sizeof(msg)));
+ goto out_err;
+ }
+
if (evlist__mmap(evlist, opts->mmap_pages) < 0) {
ui__error("Failed to mmap with %d (%s)\n",
errno, str_error_r(errno, msg, sizeof(msg)));
@@ -1462,6 +1469,8 @@ int cmd_top(int argc, const char **argv)
OPT_CALLBACK('e', "event", &parse_events_option_args, "event",
"event selector. use 'perf list' to list available events",
parse_events_option),
+ OPT_CALLBACK(0, "filter", &top.evlist, "filter",
+ "event filter", parse_filter),
OPT_U64('c', "count", &opts->user_interval, "event period to sample"),
OPT_STRING('p', "pid", &target->pid, "pid",
"profile events on existing process id"),
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 08a3a6effac1..8449f2beb54d 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -1033,7 +1033,7 @@ static const struct syscall_fmt syscall_fmts[] = {
#if defined(__s390x__)
.alias = "old_mmap",
#endif
- .arg = { [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ },
+ .arg = { [2] = { .scnprintf = SCA_MMAP_PROT, .show_zero = true, /* prot */ },
[3] = { .scnprintf = SCA_MMAP_FLAGS, /* flags */
.strtoul = STUL_STRARRAY_FLAGS,
.parm = &strarray__mmap_flags, },
@@ -1050,7 +1050,7 @@ static const struct syscall_fmt syscall_fmts[] = {
[4] = { .scnprintf = SCA_MOVE_MOUNT_FLAGS, /* flags */ }, }, },
{ .name = "mprotect",
.arg = { [0] = { .scnprintf = SCA_HEX, /* start */ },
- [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ }, }, },
+ [2] = { .scnprintf = SCA_MMAP_PROT, .show_zero = true, /* prot */ }, }, },
{ .name = "mq_unlink",
.arg = { [0] = { .scnprintf = SCA_FILENAME, /* u_name */ }, }, },
{ .name = "mremap", .hexret = true,
@@ -1084,7 +1084,7 @@ static const struct syscall_fmt syscall_fmts[] = {
.arg = { [0] = { .scnprintf = SCA_INT, /* key */ }, }, },
{ .name = "pkey_mprotect",
.arg = { [0] = { .scnprintf = SCA_HEX, /* start */ },
- [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ },
+ [2] = { .scnprintf = SCA_MMAP_PROT, .show_zero = true, /* prot */ },
[3] = { .scnprintf = SCA_INT, /* pkey */ }, }, },
{ .name = "poll", .timeout = true, },
{ .name = "ppoll", .timeout = true, },
@@ -2091,17 +2091,11 @@ static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
val = syscall_arg_fmt__mask_val(&sc->arg_fmt[arg.idx], &arg, val);
/*
- * Suppress this argument if its value is zero and
- * and we don't have a string associated in an
- * strarray for it.
- */
- if (val == 0 &&
- !trace->show_zeros &&
- !(sc->arg_fmt &&
- (sc->arg_fmt[arg.idx].show_zero ||
- sc->arg_fmt[arg.idx].scnprintf == SCA_STRARRAY ||
- sc->arg_fmt[arg.idx].scnprintf == SCA_STRARRAYS) &&
- sc->arg_fmt[arg.idx].parm))
+ * Suppress this argument if its value is zero and show_zero
+ * property isn't set.
+ */
+ if (val == 0 && !trace->show_zeros &&
+ !(sc->arg_fmt && sc->arg_fmt[arg.idx].show_zero))
continue;
printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : "");
@@ -2796,17 +2790,8 @@ static size_t trace__fprintf_tp_fields(struct trace *trace, struct evsel *evsel,
*/
val = syscall_arg_fmt__mask_val(arg, &syscall_arg, val);
- /*
- * Suppress this argument if its value is zero and
- * we don't have a string associated in an
- * strarray for it.
- */
- if (val == 0 &&
- !trace->show_zeros &&
- !((arg->show_zero ||
- arg->scnprintf == SCA_STRARRAY ||
- arg->scnprintf == SCA_STRARRAYS) &&
- arg->parm))
+ /* Suppress this argument if its value is zero and show_zero property isn't set. */
+ if (val == 0 && !trace->show_zeros && !arg->show_zero)
continue;
printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : "");
@@ -3369,8 +3354,6 @@ static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int id)
static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace, struct syscall *sc)
{
struct tep_format_field *field, *candidate_field;
- int id;
-
/*
* We're only interested in syscalls that have a pointer:
*/
@@ -3382,7 +3365,8 @@ static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace
return NULL;
try_to_find_pair:
- for (id = 0; id < trace->sctbl->syscalls.nr_entries; ++id) {
+ for (int i = 0; i < trace->sctbl->syscalls.nr_entries; ++i) {
+ int id = syscalltbl__id_at_idx(trace->sctbl, i);
struct syscall *pair = trace__syscall_info(trace, NULL, id);
struct bpf_program *pair_prog;
bool is_candidate = false;
@@ -3471,10 +3455,10 @@ static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace)
{
int map_enter_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_enter);
int map_exit_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_exit);
- int err = 0, key;
+ int err = 0;
- for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
- int prog_fd;
+ for (int i = 0; i < trace->sctbl->syscalls.nr_entries; ++i) {
+ int prog_fd, key = syscalltbl__id_at_idx(trace->sctbl, i);
if (!trace__syscall_enabled(trace, key))
continue;
@@ -3520,7 +3504,8 @@ static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace)
* first and second arg (this one on the raw_syscalls:sys_exit prog
* array tail call, then that one will be used.
*/
- for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
+ for (int i = 0; i < trace->sctbl->syscalls.nr_entries; ++i) {
+ int key = syscalltbl__id_at_idx(trace->sctbl, i);
struct syscall *sc = trace__syscall_info(trace, NULL, key);
struct bpf_program *pair_prog;
int prog_fd;
diff --git a/tools/perf/pmu-events/arch/arm64/freescale/imx93/sys/ddrc.json b/tools/perf/pmu-events/arch/arm64/freescale/imx93/sys/ddrc.json
new file mode 100644
index 000000000000..eeeae4d49fce
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/freescale/imx93/sys/ddrc.json
@@ -0,0 +1,9 @@
+[
+ {
+ "BriefDescription": "ddr cycles event",
+ "EventCode": "0x00",
+ "EventName": "imx93_ddr.cycles",
+ "Unit": "imx9_ddr",
+ "Compat": "imx93"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/freescale/imx93/sys/metrics.json b/tools/perf/pmu-events/arch/arm64/freescale/imx93/sys/metrics.json
new file mode 100644
index 000000000000..4d2454ca1259
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/freescale/imx93/sys/metrics.json
@@ -0,0 +1,26 @@
+[
+ {
+ "BriefDescription": "bandwidth usage for lpddr4x evk board",
+ "MetricName": "imx93_bandwidth_usage.lpddr4x",
+ "MetricExpr": "(((( imx9_ddr0@ddrc_pm_0@ ) * 2 * 8 ) + (( imx9_ddr0@ddrc_pm_3@ + imx9_ddr0@ddrc_pm_5@ + imx9_ddr0@ddrc_pm_7@ + imx9_ddr0@ddrc_pm_9@ - imx9_ddr0@ddrc_pm_2@ - imx9_ddr0@ddrc_pm_4@ - imx9_ddr0@ddrc_pm_6@ - imx9_ddr0@ddrc_pm_8@ ) * 32 )) / duration_time) / (3733 * 1000000 * 2)",
+ "ScaleUnit": "1e2%",
+ "Unit": "imx9_ddr",
+ "Compat": "imx93"
+ },
+ {
+ "BriefDescription": "bytes all masters read from ddr",
+ "MetricName": "imx93_ddr_read.all",
+ "MetricExpr": "( imx9_ddr0@ddrc_pm_0@ ) * 2 * 8",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx93"
+ },
+ {
+ "BriefDescription": "bytes all masters write to ddr",
+ "MetricName": "imx93_ddr_write.all",
+ "MetricExpr": "( imx9_ddr0@ddrc_pm_3@ + imx9_ddr0@ddrc_pm_5@ + imx9_ddr0@ddrc_pm_7@ + imx9_ddr0@ddrc_pm_9@ - imx9_ddr0@ddrc_pm_2@ - imx9_ddr0@ddrc_pm_4@ - imx9_ddr0@ddrc_pm_6@ - imx9_ddr0@ddrc_pm_8@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx93"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/freescale/imx95/sys/ddrc.json b/tools/perf/pmu-events/arch/arm64/freescale/imx95/sys/ddrc.json
new file mode 100644
index 000000000000..4dc9d2968bdc
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/freescale/imx95/sys/ddrc.json
@@ -0,0 +1,9 @@
+[
+ {
+ "BriefDescription": "ddr cycles event",
+ "EventCode": "0x00",
+ "EventName": "imx95_ddr.cycles",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/freescale/imx95/sys/metrics.json b/tools/perf/pmu-events/arch/arm64/freescale/imx95/sys/metrics.json
new file mode 100644
index 000000000000..126ce980f6f2
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/freescale/imx95/sys/metrics.json
@@ -0,0 +1,874 @@
+[
+ {
+ "BriefDescription": "bandwidth usage for lpddr5 evk board",
+ "MetricName": "imx95_bandwidth_usage.lpddr5",
+ "MetricExpr": "(( imx9_ddr0@eddrtq_pm_rd_beat_filt0\\,axi_mask\\=0x000\\,axi_id\\=0x000@ + imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x000\\,axi_id\\=0x000@ ) * 32 / duration_time) / (6400 * 1000000 * 4)",
+ "ScaleUnit": "1e2%",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of all masters read from ddr",
+ "MetricName": "imx95_ddr_read.all",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt0\\,axi_mask\\=0x000\\,axi_id\\=0x000@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of all masters write to ddr",
+ "MetricName": "imx95_ddr_write.all",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x000\\,axi_id\\=0x000@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of all a55 read from ddr",
+ "MetricName": "imx95_ddr_read.a55_all",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt0\\,axi_mask\\=0x3fc\\,axi_id\\=0x000@ + imx9_ddr0@eddrtq_pm_rd_beat_filt1\\,axi_mask\\=0x3fe\\,axi_id\\=0x004@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of all a55 write to ddr (part1)",
+ "MetricName": "imx95_ddr_write.a55_all_1",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3fc\\,axi_id\\=0x000@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of all a55 write to ddr (part2)",
+ "MetricName": "imx95_ddr_write.a55_all_2",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3fe\\,axi_id\\=0x004@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of a55 core 0 read from ddr",
+ "MetricName": "imx95_ddr_read.a55_0",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt0\\,axi_mask\\=0x3ff\\,axi_id\\=0x000@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of a55 core 0 write to ddr",
+ "MetricName": "imx95_ddr_write.a55_0",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3ff\\,axi_id\\=0x000@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of a55 core 1 read from ddr",
+ "MetricName": "imx95_ddr_read.a55_1",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt1\\,axi_mask\\=0x00f\\,axi_id\\=0x001@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of a55 core 1 write to ddr",
+ "MetricName": "imx95_ddr_write.a55_1",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x00f\\,axi_id\\=0x001@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of a55 core 2 read from ddr",
+ "MetricName": "imx95_ddr_read.a55_2",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt2\\,axi_mask\\=0x00f\\,axi_id\\=0x002@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of a55 core 2 write to ddr",
+ "MetricName": "imx95_ddr_write.a55_2",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x00f\\,axi_id\\=0x002@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of a55 core 3 read from ddr",
+ "MetricName": "imx95_ddr_read.a55_3",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt0\\,axi_mask\\=0x00f\\,axi_id\\=0x003@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of a55 core 3 write to ddr",
+ "MetricName": "imx95_ddr_write.a55_3",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x00f\\,axi_id\\=0x003@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of a55 core 4 read from ddr",
+ "MetricName": "imx95_ddr_read.a55_4",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt1\\,axi_mask\\=0x00f\\,axi_id\\=0x004@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of a55 core 4 write to ddr",
+ "MetricName": "imx95_ddr_write.a55_4",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x00f\\,axi_id\\=0x004@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of a55 core 5 read from ddr",
+ "MetricName": "imx95_ddr_read.a55_5",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt2\\,axi_mask\\=0x00f\\,axi_id\\=0x005@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of a55 core 5 write to ddr",
+ "MetricName": "imx95_ddr_write.a55_5",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x00f\\,axi_id\\=0x005@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of Cortex-A DSU L3 evicted/ACP transactions read from ddr",
+ "MetricName": "imx95_ddr_read.cortexa_dsu_l3",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt0\\,axi_mask\\=0x00f\\,axi_id\\=0x007@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of Cortex-A DSU L3 evicted/ACP transactions write to ddr",
+ "MetricName": "imx95_ddr_write.cortexa_dsu_l3",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x00f\\,axi_id\\=0x007@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of m33 read from ddr",
+ "MetricName": "imx95_ddr_read.m33",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt0\\,axi_mask\\=0x00f\\,axi_id\\=0x008@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of m33 write to ddr",
+ "MetricName": "imx95_ddr_write.m33",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x00f\\,axi_id\\=0x008@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of m7 read from ddr",
+ "MetricName": "imx95_ddr_read.m7",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt1\\,axi_mask\\=0x00f\\,axi_id\\=0x009@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of m7 write to ddr",
+ "MetricName": "imx95_ddr_write.m7",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x00f\\,axi_id\\=0x009@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of sentinel read from ddr",
+ "MetricName": "imx95_ddr_read.sentinel",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt2\\,axi_mask\\=0x00f\\,axi_id\\=0x00a@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of sentinel write to ddr",
+ "MetricName": "imx95_ddr_write.sentinel",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x00f\\,axi_id\\=0x00a@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of edma1 read from ddr",
+ "MetricName": "imx95_ddr_read.edma1",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt0\\,axi_mask\\=0x00f\\,axi_id\\=0x00b@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of edma1 write to ddr",
+ "MetricName": "imx95_ddr_write.edma1",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x00f\\,axi_id\\=0x00b@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of edma2 read from ddr",
+ "MetricName": "imx95_ddr_read.edma2",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt1\\,axi_mask\\=0x00f\\,axi_id\\=0x00c@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of edma2 write to ddr",
+ "MetricName": "imx95_ddr_write.edma2",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x00f\\,axi_id\\=0x00c@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of netc read from ddr",
+ "MetricName": "imx95_ddr_read.netc",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt2\\,axi_mask\\=0x00f\\,axi_id\\=0x00d@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of netc write to ddr",
+ "MetricName": "imx95_ddr_write.netc",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x00f\\,axi_id\\=0x00d@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of npu read from ddr",
+ "MetricName": "imx95_ddr_read.npu",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt0\\,axi_mask\\=0x3f0\\,axi_id\\=0x010@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of npu write to ddr",
+ "MetricName": "imx95_ddr_write.npu",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x010@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of gpu read from ddr",
+ "MetricName": "imx95_ddr_read.gpu",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt1\\,axi_mask\\=0x3f0\\,axi_id\\=0x020@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of gpu write to ddr",
+ "MetricName": "imx95_ddr_write.gpu",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x020@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of usdhc1 read from ddr",
+ "MetricName": "imx95_ddr_read.usdhc1",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt2\\,axi_mask\\=0x3f0\\,axi_id\\=0x0b0@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of usdhc1 write to ddr",
+ "MetricName": "imx95_ddr_write.usdhc1",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x0b0@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of usdhc2 read from ddr",
+ "MetricName": "imx95_ddr_read.usdhc2",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt0\\,axi_mask\\=0x3f0\\,axi_id\\=0x0c0@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of usdhc2 write to ddr",
+ "MetricName": "imx95_ddr_write.usdhc2",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x0c0@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of usdhc3 read from ddr",
+ "MetricName": "imx95_ddr_read.usdhc3",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt1\\,axi_mask\\=0x3f0\\,axi_id\\=0x0d0@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of usdhc3 write to ddr",
+ "MetricName": "imx95_ddr_write.usdhc3",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x0d0@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of xspi read from ddr",
+ "MetricName": "imx95_ddr_read.xspi",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt2\\,axi_mask\\=0x3f0\\,axi_id\\=0x0f0@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of xspi write to ddr",
+ "MetricName": "imx95_ddr_write.xspi",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x0f0@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of pcie1 read from ddr",
+ "MetricName": "imx95_ddr_read.pcie1",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt0\\,axi_mask\\=0x3f0\\,axi_id\\=0x100@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of pcie1 write to ddr",
+ "MetricName": "imx95_ddr_write.pcie1",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x100@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of pcie2 read from ddr",
+ "MetricName": "imx95_ddr_read.pcie2",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt1\\,axi_mask\\=0x00f\\,axi_id\\=0x006@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of pcie2 write to ddr",
+ "MetricName": "imx95_ddr_write.pcie2",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x00f\\,axi_id\\=0x006@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of pcie3 read from ddr",
+ "MetricName": "imx95_ddr_read.pcie3",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt2\\,axi_mask\\=0x3f0\\,axi_id\\=0x120@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of pcie3 write to ddr",
+ "MetricName": "imx95_ddr_write.pcie3",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x120@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of pcie4 read from ddr",
+ "MetricName": "imx95_ddr_read.pcie4",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt0\\,axi_mask\\=0x3f0\\,axi_id\\=0x130@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of pcie4 write to ddr",
+ "MetricName": "imx95_ddr_write.pcie4",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x130@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of usb1 read from ddr",
+ "MetricName": "imx95_ddr_read.usb1",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt1\\,axi_mask\\=0x3f0\\,axi_id\\=0x140@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of usb1 write to ddr",
+ "MetricName": "imx95_ddr_write.usb1",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x140@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of usb2 read from ddr",
+ "MetricName": "imx95_ddr_read.usb2",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt2\\,axi_mask\\=0x3f0\\,axi_id\\=0x150@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of usb2 write to ddr",
+ "MetricName": "imx95_ddr_write.usb2",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x150@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of vpu codec primary bus read from ddr",
+ "MetricName": "imx95_ddr_read.vpu_primy",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt0\\,axi_mask\\=0x3f0\\,axi_id\\=0x180@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of vpu codec primary bus write to ddr",
+ "MetricName": "imx95_ddr_write.vpu_primy",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x180@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of vpu codec secondary bus read from ddr",
+ "MetricName": "imx95_ddr_read.vpu_secndy",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt1\\,axi_mask\\=0x3f0\\,axi_id\\=0x190@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of vpu codec secondary bus write to ddr",
+ "MetricName": "imx95_ddr_write.vpu_secndy",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x190@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of jpeg decoder read from ddr",
+ "MetricName": "imx95_ddr_read.jpeg_dec",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt2\\,axi_mask\\=0x3f0\\,axi_id\\=0x1a0@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of jpeg decoder write to ddr",
+ "MetricName": "imx95_ddr_write.jpeg_dec",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x1a0@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of jpeg encoder read from ddr",
+ "MetricName": "imx95_ddr_read.jpeg_dec",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt0\\,axi_mask\\=0x3f0\\,axi_id\\=0x1b0@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of jpeg encoder write to ddr",
+ "MetricName": "imx95_ddr_write.jpeg_enc",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x1b0@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of all vpu submodules read from ddr",
+ "MetricName": "imx95_ddr_read.vpu_all",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt1\\,axi_mask\\=0x380\\,axi_id\\=0x180@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of all vpu submodules write to ddr",
+ "MetricName": "imx95_ddr_write.vpu_all",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x380\\,axi_id\\=0x180@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of cortex m0+ read from ddr",
+ "MetricName": "imx95_ddr_read.m0",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt2\\,axi_mask\\=0x3f0\\,axi_id\\=0x200@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of cortex m0+ write to ddr",
+ "MetricName": "imx95_ddr_write.m0",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x200@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of camera edma read from ddr",
+ "MetricName": "imx95_ddr_read.camera_edma",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt0\\,axi_mask\\=0x3f0\\,axi_id\\=0x210@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of camera edma write to ddr",
+ "MetricName": "imx95_ddr_write.camera_edma",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x210@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of isi rd read from ddr",
+ "MetricName": "imx95_ddr_read.isi_rd",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt1\\,axi_mask\\=0x3f0\\,axi_id\\=0x220@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of isi rd write to ddr",
+ "MetricName": "imx95_ddr_write.isi_rd",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x220@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of isi wr y read from ddr",
+ "MetricName": "imx95_ddr_read.isi_wr_y",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt2\\,axi_mask\\=0x3f0\\,axi_id\\=0x230@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of isi wr y write to ddr",
+ "MetricName": "imx95_ddr_write.isi_wr_y",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x230@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of isi wr u read from ddr",
+ "MetricName": "imx95_ddr_read.isi_wr_u",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt0\\,axi_mask\\=0x3f0\\,axi_id\\=0x240@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of isi wr u write to ddr",
+ "MetricName": "imx95_ddr_write.isi_wr_u",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x240@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of isi wr v read from ddr",
+ "MetricName": "imx95_ddr_read.isi_wr_v",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt1\\,axi_mask\\=0x3f0\\,axi_id\\=0x250@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of isi wr v write to ddr",
+ "MetricName": "imx95_ddr_write.isi_wr_v",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x250@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of isp input dma1 read from ddr",
+ "MetricName": "imx95_ddr_read.isp_in_dma1",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt2\\,axi_mask\\=0x3f0\\,axi_id\\=0x260@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of isp input dma1 write to ddr",
+ "MetricName": "imx95_ddr_write.isp_in_dma1",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x260@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of isp input dma2 read from ddr",
+ "MetricName": "imx95_ddr_read.isp_in_dma2",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt0\\,axi_mask\\=0x3f0\\,axi_id\\=0x270@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of isp input dma2 write to ddr",
+ "MetricName": "imx95_ddr_write.isp_in_dma2",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x270@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of isp output dma1 read from ddr",
+ "MetricName": "imx95_ddr_read.isp_out_dma1",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt1\\,axi_mask\\=0x3f0\\,axi_id\\=0x280@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of isp output dma1 write to ddr",
+ "MetricName": "imx95_ddr_write.isp_out_dma1",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x280@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of isp output dma2 read from ddr",
+ "MetricName": "imx95_ddr_read.isp_out_dma2",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt2\\,axi_mask\\=0x3f0\\,axi_id\\=0x290@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of isp output dma2 write to ddr",
+ "MetricName": "imx95_ddr_write.isp_out_dma2",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x290@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of all camera submodules read from ddr",
+ "MetricName": "imx95_ddr_read.camera_all",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt0\\,axi_mask\\=0x380\\,axi_id\\=0x200@ + imx9_ddr0@eddrtq_pm_rd_beat_filt1\\,axi_mask\\=0x3f0\\,axi_id\\=0x280@ + imx9_ddr0@eddrtq_pm_rd_beat_filt2\\,axi_mask\\=0x3f0\\,axi_id\\=0x290@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of all camera submodules write to ddr (part1)",
+ "MetricName": "imx95_ddr_write.camera_all_1",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x380\\,axi_id\\=0x200@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of all camera submodules write to ddr (part2)",
+ "MetricName": "imx95_ddr_write.camera_all_2",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x280@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of all camera submodules write to ddr (part3)",
+ "MetricName": "imx95_ddr_write.camera_all_3",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x290@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of display layer0 read from ddr",
+ "MetricName": "imx95_ddr_read.disp_layer0",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt0\\,axi_mask\\=0x3f0\\,axi_id\\=0x2f0@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of display layer0 write to ddr",
+ "MetricName": "imx95_ddr_write.disp_layer0",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x2f0@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of display layer1 read from ddr",
+ "MetricName": "imx95_ddr_read.disp_layer1",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt1\\,axi_mask\\=0x3f0\\,axi_id\\=0x300@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of display layer1 write to ddr",
+ "MetricName": "imx95_ddr_write.disp_layer1",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x300@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of display layer2 read from ddr",
+ "MetricName": "imx95_ddr_read.disp_layer2",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt2\\,axi_mask\\=0x3f0\\,axi_id\\=0x310@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of display layer2 write to ddr",
+ "MetricName": "imx95_ddr_write.disp_layer2",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x310@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of display layer3 read from ddr",
+ "MetricName": "imx95_ddr_read.disp_layer3",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt0\\,axi_mask\\=0x3f0\\,axi_id\\=0x320@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of display layer3 write to ddr",
+ "MetricName": "imx95_ddr_write.disp_layer3",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x320@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of display layer4 read from ddr",
+ "MetricName": "imx95_ddr_read.disp_layer4",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt1\\,axi_mask\\=0x3f0\\,axi_id\\=0x330@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of display layer4 write to ddr",
+ "MetricName": "imx95_ddr_write.disp_layer4",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x330@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of display layer5 read from ddr",
+ "MetricName": "imx95_ddr_read.disp_layer5",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt2\\,axi_mask\\=0x3f0\\,axi_id\\=0x340@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of display layer5 write to ddr",
+ "MetricName": "imx95_ddr_write.disp_layer5",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x340@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of display blitter read from ddr",
+ "MetricName": "imx95_ddr_read.disp_blit",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt0\\,axi_mask\\=0x3f0\\,axi_id\\=0x350@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of display blitter write to ddr",
+ "MetricName": "imx95_ddr_write.disp_blit",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x350@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of display command sequencer read from ddr",
+ "MetricName": "imx95_ddr_read.disp_cmd",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt1\\,axi_mask\\=0x3f0\\,axi_id\\=0x360@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of display command sequencer write to ddr",
+ "MetricName": "imx95_ddr_write.disp_cmd",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x360@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of all display submodules read from ddr",
+ "MetricName": "imx95_ddr_read.disp_all",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt0\\,axi_mask\\=0x300\\,axi_id\\=0x300@ + imx9_ddr0@eddrtq_pm_rd_beat_filt1\\,axi_mask\\=0x3a0\\,axi_id\\=0x2a0@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of all display submodules write to ddr (part1)",
+ "MetricName": "imx95_ddr_write.disp_all_1",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x300\\,axi_id\\=0x300@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ },
+ {
+ "BriefDescription": "bytes of all display submodules write to ddr (part2)",
+ "MetricName": "imx95_ddr_write.disp_all_2",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3a0\\,axi_id\\=0x2a0@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx95"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json b/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json
index b72c0e2cb946..8fdf4a4225de 100644
--- a/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json
@@ -113,42 +113,30 @@
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to certain allocation restrictions.",
- "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS@ / tma_info_core_slots",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
- "MetricName": "tma_alloc_restriction",
- "MetricThreshold": "tma_alloc_restriction > 0.1",
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to certain allocation restrictions",
+ "MetricExpr": "tma_core_bound",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_allocation_restriction",
+ "MetricThreshold": "tma_allocation_restriction > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls",
+ "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls",
"DefaultMetricgroupName": "TopdownL1",
- "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.ALL@ / tma_info_core_slots",
+ "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.ALL@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "Default;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
"MetricThreshold": "tma_backend_bound > 0.1",
"MetricgroupNoGroup": "TopdownL1;Default",
- "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls. Note that uops must be available for consumption in order for this event to count. If a uop is not available (IQ is empty), this event will not count. The rest of these subevents count backend stalls, in cycles, due to an outstanding request which is memory bound vs core bound. The subevents are not slot based events and therefore can not be precisely added or subtracted from the Backend_Bound_Aux subevents which are slot based.",
- "ScaleUnit": "100%",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls",
- "DefaultMetricgroupName": "TopdownL1",
- "MetricExpr": "tma_backend_bound",
- "MetricGroup": "Default;TopdownL1;tma_L1_group",
- "MetricName": "tma_backend_bound_aux",
- "MetricThreshold": "tma_backend_bound_aux > 0.2",
- "MetricgroupNoGroup": "TopdownL1;Default",
- "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls. Note that UOPS must be available for consumption in order for this event to count. If a uop is not available (IQ is empty), this event will not count. All of these subevents count backend stalls, in slots, due to a resource limitation. These are not cycle based events and therefore can not be precisely added or subtracted from the Backend_Bound subevents which are cycle based. These subevents are supplementary to Backend_Bound and can be used to analyze results from a resource perspective at allocation.",
+ "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls. Note that uops must be available for consumption in order for this event to count. If a uop is not available (IQ is empty), this event will not count",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear",
"DefaultMetricgroupName": "TopdownL1",
- "MetricExpr": "(tma_info_core_slots - (cpu_atom@TOPDOWN_FE_BOUND.ALL@ + cpu_atom@TOPDOWN_BE_BOUND.ALL@ + cpu_atom@TOPDOWN_RETIRING.ALL@)) / tma_info_core_slots",
+ "MetricExpr": "(5 * cpu_atom@CPU_CLK_UNHALTED.CORE@ - (cpu_atom@TOPDOWN_FE_BOUND.ALL@ + cpu_atom@TOPDOWN_BE_BOUND.ALL@ + cpu_atom@TOPDOWN_RETIRING.ALL@)) / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "Default;TopdownL1;tma_L1_group",
"MetricName": "tma_bad_speculation",
"MetricThreshold": "tma_bad_speculation > 0.15",
@@ -158,644 +146,564 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of uops that are not from the microsequencer.",
- "MetricExpr": "(cpu_atom@TOPDOWN_RETIRING.ALL@ - cpu_atom@UOPS_RETIRED.MS@) / tma_info_core_slots",
- "MetricGroup": "TopdownL2;tma_L2_group;tma_retiring_group",
- "MetricName": "tma_base",
- "MetricThreshold": "tma_base > 0.6",
- "MetricgroupNoGroup": "TopdownL2",
- "ScaleUnit": "100%",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend",
- "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.BRANCH_DETECT@ / tma_info_core_slots",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend",
+ "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.BRANCH_DETECT@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
"MetricName": "tma_branch_detect",
- "MetricThreshold": "tma_branch_detect > 0.05",
- "PublicDescription": "Counts the number of issue slots that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
+ "MetricThreshold": "tma_branch_detect > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
+ "PublicDescription": "Counts the number of issue slots that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to branch mispredicts.",
- "MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.MISPREDICT@ / tma_info_core_slots",
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to branch mispredicts",
+ "MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.MISPREDICT@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
"MetricName": "tma_branch_mispredicts",
- "MetricThreshold": "tma_branch_mispredicts > 0.05",
+ "MetricThreshold": "tma_branch_mispredicts > 0.05 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to BTCLEARS, which occurs when the Branch Target Buffer (BTB) predicts a taken branch.",
- "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.BRANCH_RESTEER@ / tma_info_core_slots",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to BTCLEARS, which occurs when the Branch Target Buffer (BTB) predicts a taken branch.",
+ "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.BRANCH_RESTEER@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
"MetricName": "tma_branch_resteer",
- "MetricThreshold": "tma_branch_resteer > 0.05",
+ "MetricThreshold": "tma_branch_resteer > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to the microcode sequencer (MS).",
- "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.CISC@ / tma_info_core_slots",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to the microcode sequencer (MS).",
+ "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.CISC@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
"MetricName": "tma_cisc",
- "MetricThreshold": "tma_cisc > 0.05",
+ "MetricThreshold": "tma_cisc > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of cycles due to backend bound stalls that are core execution bound and not attributed to outstanding demand load or store stalls.",
- "MetricExpr": "max(0, tma_backend_bound - tma_memory_bound)",
+ "BriefDescription": "Counts the number of cycles due to backend bound stalls that are bounded by core restrictions and not attributed to an outstanding load or stores, or resource limitation",
+ "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
"MetricName": "tma_core_bound",
- "MetricThreshold": "tma_core_bound > 0.1",
+ "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.1",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to decode stalls.",
- "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.DECODE@ / tma_info_core_slots",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to decode stalls.",
+ "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.DECODE@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
"MetricName": "tma_decode",
- "MetricThreshold": "tma_decode > 0.05",
+ "MetricThreshold": "tma_decode > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of machine clears relative to the number of nuke slots due to memory disambiguation.",
- "MetricExpr": "tma_nuke * (cpu_atom@MACHINE_CLEARS.DISAMBIGUATION@ / cpu_atom@MACHINE_CLEARS.SLOW@)",
- "MetricGroup": "TopdownL4;tma_L4_group;tma_nuke_group",
- "MetricName": "tma_disambiguation",
- "MetricThreshold": "tma_disambiguation > 0.02",
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to a machine clear that does not require the use of microcode, classified as a fast nuke, due to memory ordering, memory disambiguation and memory renaming",
+ "MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.FASTNUKE@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group",
+ "MetricName": "tma_fast_nuke",
+ "MetricThreshold": "tma_fast_nuke > 0.05 & (tma_machine_clears > 0.05 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of cycles the core is stalled due to a demand load miss which hit in DRAM or MMIO (Non-DRAM).",
- "MetricExpr": "cpu_atom@MEM_BOUND_STALLS.LOAD_DRAM_HIT@ / tma_info_core_clks - max((cpu_atom@MEM_BOUND_STALLS.LOAD@ - cpu_atom@LD_HEAD.L1_MISS_AT_RET@) / tma_info_core_clks, 0) * cpu_atom@MEM_BOUND_STALLS.LOAD_DRAM_HIT@ / cpu_atom@MEM_BOUND_STALLS.LOAD@",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
- "MetricName": "tma_dram_bound",
- "MetricThreshold": "tma_dram_bound > 0.1",
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to frontend stalls.",
+ "DefaultMetricgroupName": "TopdownL1",
+ "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.ALL@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "Default;TopdownL1;tma_L1_group",
+ "MetricName": "tma_frontend_bound",
+ "MetricThreshold": "tma_frontend_bound > 0.2",
+ "MetricgroupNoGroup": "TopdownL1;Default",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to a machine clear classified as a fast nuke due to memory ordering, memory disambiguation and memory renaming.",
- "MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.FASTNUKE@ / tma_info_core_slots",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group",
- "MetricName": "tma_fast_nuke",
- "MetricThreshold": "tma_fast_nuke > 0.05",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to instruction cache misses.",
+ "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.ICACHE@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
+ "MetricName": "tma_icache_misses",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations.",
- "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.FRONTEND_BANDWIDTH@ / tma_info_core_slots",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations.",
+ "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.FRONTEND_BANDWIDTH@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
- "MetricName": "tma_fetch_bandwidth",
- "MetricThreshold": "tma_fetch_bandwidth > 0.1",
+ "MetricName": "tma_ifetch_bandwidth",
+ "MetricThreshold": "tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations.",
- "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.FRONTEND_LATENCY@ / tma_info_core_slots",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to frontend latency restrictions due to icache misses, itlb misses, branch detection, and resteer limitations.",
+ "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.FRONTEND_LATENCY@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
- "MetricName": "tma_fetch_latency",
- "MetricThreshold": "tma_fetch_latency > 0.15",
+ "MetricName": "tma_ifetch_latency",
+ "MetricThreshold": "tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of machine clears relative to the number of nuke slots due to FP assists.",
- "MetricExpr": "tma_nuke * (cpu_atom@MACHINE_CLEARS.FP_ASSIST@ / cpu_atom@MACHINE_CLEARS.SLOW@)",
- "MetricGroup": "TopdownL4;tma_L4_group;tma_nuke_group",
- "MetricName": "tma_fp_assist",
- "MetricThreshold": "tma_fp_assist > 0.02",
- "ScaleUnit": "100%",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts the number of floating point divide operations per uop.",
- "MetricExpr": "cpu_atom@UOPS_RETIRED.FPDIV@ / tma_info_core_slots",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_base_group",
- "MetricName": "tma_fpdiv_uops",
- "MetricThreshold": "tma_fpdiv_uops > 0.2",
- "ScaleUnit": "100%",
+ "BriefDescription": "Percentage of time that retirement is stalled due to a first level data TLB miss",
+ "MetricExpr": "100 * (cpu_atom@LD_HEAD.DTLB_MISS_AT_RET@ + cpu_atom@LD_HEAD.PGWALK_AT_RET@) / cpu_atom@CPU_CLK_UNHALTED.CORE@",
+ "MetricName": "tma_info_bottleneck_%_dtlb_miss_bound_cycles",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to frontend stalls.",
- "DefaultMetricgroupName": "TopdownL1",
- "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.ALL@ / tma_info_core_slots",
- "MetricGroup": "Default;TopdownL1;tma_L1_group",
- "MetricName": "tma_frontend_bound",
- "MetricThreshold": "tma_frontend_bound > 0.2",
- "MetricgroupNoGroup": "TopdownL1;Default",
- "ScaleUnit": "100%",
+ "BriefDescription": "Percentage of time that allocation and retirement is stalled by the Frontend Cluster due to an Ifetch Miss, either Icache or ITLB Miss",
+ "MetricExpr": "100 * cpu_atom@MEM_BOUND_STALLS.IFETCH@ / cpu_atom@CPU_CLK_UNHALTED.CORE@",
+ "MetricGroup": "Ifetch",
+ "MetricName": "tma_info_bottleneck_%_ifetch_miss_bound_cycles",
+ "PublicDescription": "Percentage of time that allocation and retirement is stalled by the Frontend Cluster due to an Ifetch Miss, either Icache or ITLB Miss. See Info.Ifetch_Bound",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to instruction cache misses.",
- "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.ICACHE@ / tma_info_core_slots",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_latency_group",
- "MetricName": "tma_icache_misses",
- "MetricThreshold": "tma_icache_misses > 0.05",
- "ScaleUnit": "100%",
+ "BriefDescription": "Percentage of time that retirement is stalled due to an L1 miss",
+ "MetricExpr": "100 * cpu_atom@MEM_BOUND_STALLS.LOAD@ / cpu_atom@CPU_CLK_UNHALTED.CORE@",
+ "MetricGroup": "Load_Store_Miss",
+ "MetricName": "tma_info_bottleneck_%_load_miss_bound_cycles",
+ "PublicDescription": "Percentage of time that retirement is stalled due to an L1 miss. See Info.Load_Miss_Bound",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "",
- "MetricExpr": "cpu_atom@CPU_CLK_UNHALTED.CORE@",
- "MetricName": "tma_info_core_clks",
+ "BriefDescription": "Percentage of time that retirement is stalled by the Memory Cluster due to a pipeline stall",
+ "MetricExpr": "100 * cpu_atom@LD_HEAD.ANY_AT_RET@ / cpu_atom@CPU_CLK_UNHALTED.CORE@",
+ "MetricGroup": "Mem_Exec",
+ "MetricName": "tma_info_bottleneck_%_mem_exec_bound_cycles",
+ "PublicDescription": "Percentage of time that retirement is stalled by the Memory Cluster due to a pipeline stall. See Info.Mem_Exec_Bound",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "",
- "MetricExpr": "cpu_atom@CPU_CLK_UNHALTED.CORE_P@",
- "MetricName": "tma_info_core_clks_p",
+ "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
+ "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / cpu_atom@BR_INST_RETIRED.ALL_BRANCHES@",
+ "MetricName": "tma_info_br_inst_mix_ipbranch",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Cycles Per Instruction",
- "MetricExpr": "tma_info_core_clks / INST_RETIRED.ANY",
- "MetricName": "tma_info_core_cpi",
+ "BriefDescription": "Instruction per (near) call (lower number means higher occurrence rate)",
+ "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / cpu_atom@BR_INST_RETIRED.CALL@",
+ "MetricName": "tma_info_br_inst_mix_ipcall",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Instructions Per Cycle",
- "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / tma_info_core_clks",
- "MetricName": "tma_info_core_ipc",
+ "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
+ "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / cpu_atom@BR_INST_RETIRED.FAR_BRANCH@u",
+ "MetricName": "tma_info_br_inst_mix_ipfarbranch",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "",
- "MetricExpr": "5 * tma_info_core_clks",
- "MetricName": "tma_info_core_slots",
+ "BriefDescription": "Instructions per retired conditional Branch Misprediction where the branch was not taken",
+ "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / (cpu_atom@BR_MISP_RETIRED.COND@ - cpu_atom@BR_MISP_RETIRED.COND_TAKEN@)",
+ "MetricName": "tma_info_br_inst_mix_ipmisp_cond_ntaken",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Uops Per Instruction",
- "MetricExpr": "cpu_atom@UOPS_RETIRED.ALL@ / INST_RETIRED.ANY",
- "MetricName": "tma_info_core_upi",
+ "BriefDescription": "Instructions per retired conditional Branch Misprediction where the branch was taken",
+ "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / cpu_atom@BR_MISP_RETIRED.COND_TAKEN@",
+ "MetricName": "tma_info_br_inst_mix_ipmisp_cond_taken",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Percent of instruction miss cost that hit in DRAM",
- "MetricExpr": "100 * cpu_atom@MEM_BOUND_STALLS.IFETCH_DRAM_HIT@ / cpu_atom@MEM_BOUND_STALLS.IFETCH@",
- "MetricName": "tma_info_frontend_inst_miss_cost_dramhit_percent",
+ "BriefDescription": "Instructions per retired indirect call or jump Branch Misprediction",
+ "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / cpu_atom@BR_MISP_RETIRED.INDIRECT@",
+ "MetricName": "tma_info_br_inst_mix_ipmisp_indirect",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Percent of instruction miss cost that hit in the L2",
- "MetricExpr": "100 * cpu_atom@MEM_BOUND_STALLS.IFETCH_L2_HIT@ / cpu_atom@MEM_BOUND_STALLS.IFETCH@",
- "MetricName": "tma_info_frontend_inst_miss_cost_l2hit_percent",
+ "BriefDescription": "Instructions per retired return Branch Misprediction",
+ "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / cpu_atom@BR_MISP_RETIRED.RETURN@",
+ "MetricName": "tma_info_br_inst_mix_ipmisp_ret",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Percent of instruction miss cost that hit in the L3",
- "MetricExpr": "100 * cpu_atom@MEM_BOUND_STALLS.IFETCH_LLC_HIT@ / cpu_atom@MEM_BOUND_STALLS.IFETCH@",
- "MetricName": "tma_info_frontend_inst_miss_cost_l3hit_percent",
+ "BriefDescription": "Instructions per retired Branch Misprediction",
+ "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / cpu_atom@BR_MISP_RETIRED.ALL_BRANCHES@",
+ "MetricName": "tma_info_br_inst_mix_ipmispredict",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Ratio of all branches which mispredict",
- "MetricExpr": "cpu_atom@BR_MISP_RETIRED.ALL_BRANCHES@ / BR_INST_RETIRED.ALL_BRANCHES",
- "MetricName": "tma_info_inst_mix_branch_mispredict_ratio",
+ "MetricExpr": "cpu_atom@BR_MISP_RETIRED.ALL_BRANCHES@ / cpu_atom@BR_INST_RETIRED.ALL_BRANCHES@",
+ "MetricName": "tma_info_br_mispredict_bound_branch_mispredict_ratio",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Ratio between Mispredicted branches and unknown branches",
- "MetricExpr": "cpu_atom@BR_MISP_RETIRED.ALL_BRANCHES@ / BACLEARS.ANY",
- "MetricName": "tma_info_inst_mix_branch_mispredict_to_unknown_branch_ratio",
+ "MetricExpr": "cpu_atom@BR_MISP_RETIRED.ALL_BRANCHES@ / cpu_atom@BACLEARS.ANY@",
+ "MetricName": "tma_info_br_mispredict_bound_branch_mispredict_to_unknown_branch_ratio",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Percentage of all uops which are FPDiv uops",
- "MetricExpr": "100 * cpu_atom@UOPS_RETIRED.FPDIV@ / UOPS_RETIRED.ALL",
- "MetricName": "tma_info_inst_mix_fpdiv_uop_ratio",
+ "BriefDescription": "Percentage of time that allocation is stalled due to load buffer full",
+ "MetricExpr": "100 * cpu_atom@MEM_SCHEDULER_BLOCK.LD_BUF@ / cpu_atom@CPU_CLK_UNHALTED.CORE@",
+ "MetricName": "tma_info_buffer_stalls_%_load_buffer_stall_cycles",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Percentage of all uops which are IDiv uops",
- "MetricExpr": "100 * cpu_atom@UOPS_RETIRED.IDIV@ / UOPS_RETIRED.ALL",
- "MetricName": "tma_info_inst_mix_idiv_uop_ratio",
+ "BriefDescription": "Percentage of time that allocation is stalled due to memory reservation stations full",
+ "MetricExpr": "100 * cpu_atom@MEM_SCHEDULER_BLOCK.RSV@ / cpu_atom@CPU_CLK_UNHALTED.CORE@",
+ "MetricName": "tma_info_buffer_stalls_%_mem_rsv_stall_cycles",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
- "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / BR_INST_RETIRED.ALL_BRANCHES",
- "MetricName": "tma_info_inst_mix_ipbranch",
+ "BriefDescription": "Percentage of time that allocation is stalled due to store buffer full",
+ "MetricExpr": "100 * cpu_atom@MEM_SCHEDULER_BLOCK.ST_BUF@ / cpu_atom@CPU_CLK_UNHALTED.CORE@",
+ "MetricName": "tma_info_buffer_stalls_%_store_buffer_stall_cycles",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Instruction per (near) call (lower number means higher occurrence rate)",
- "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / BR_INST_RETIRED.CALL",
- "MetricName": "tma_info_inst_mix_ipcall",
+ "BriefDescription": "Cycles Per Instruction",
+ "MetricExpr": "cpu_atom@CPU_CLK_UNHALTED.CORE@ / cpu_atom@INST_RETIRED.ANY@",
+ "MetricName": "tma_info_core_cpi",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Instructions per Far Branch",
- "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / (cpu_atom@BR_INST_RETIRED.FAR_BRANCH@ / 2)",
- "MetricName": "tma_info_inst_mix_ipfarbranch",
+ "BriefDescription": "Instructions Per Cycle",
+ "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / cpu_atom@CPU_CLK_UNHALTED.CORE@",
+ "MetricName": "tma_info_core_ipc",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Instructions per Load",
- "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / MEM_UOPS_RETIRED.ALL_LOADS",
- "MetricName": "tma_info_inst_mix_ipload",
+ "BriefDescription": "Uops Per Instruction",
+ "MetricExpr": "cpu_atom@UOPS_RETIRED.ALL@ / cpu_atom@INST_RETIRED.ANY@",
+ "MetricName": "tma_info_core_upi",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Instructions per retired conditional Branch Misprediction where the branch was not taken",
- "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / (cpu_atom@BR_MISP_RETIRED.COND@ - cpu_atom@BR_MISP_RETIRED.COND_TAKEN@)",
- "MetricName": "tma_info_inst_mix_ipmisp_cond_ntaken",
+ "BriefDescription": "Percentage of ifetch miss bound stalls, where the ifetch miss hits in the L2",
+ "MetricExpr": "100 * cpu_atom@MEM_BOUND_STALLS.IFETCH_L2_HIT@ / cpu_atom@MEM_BOUND_STALLS.IFETCH@",
+ "MetricName": "tma_info_ifetch_miss_bound_%_ifetchmissbound_with_l2hit",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Instructions per retired conditional Branch Misprediction where the branch was taken",
- "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / BR_MISP_RETIRED.COND_TAKEN",
- "MetricName": "tma_info_inst_mix_ipmisp_cond_taken",
+ "BriefDescription": "Percentage of ifetch miss bound stalls, where the ifetch miss hits in the L3",
+ "MetricExpr": "100 * cpu_atom@MEM_BOUND_STALLS.IFETCH_LLC_HIT@ / cpu_atom@MEM_BOUND_STALLS.IFETCH@",
+ "MetricName": "tma_info_ifetch_miss_bound_%_ifetchmissbound_with_l3hit",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Instructions per retired indirect call or jump Branch Misprediction",
- "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / BR_MISP_RETIRED.INDIRECT",
- "MetricName": "tma_info_inst_mix_ipmisp_indirect",
+ "BriefDescription": "Percentage of ifetch miss bound stalls, where the ifetch miss subsequently misses in the L3",
+ "MetricExpr": "100 * cpu_atom@MEM_BOUND_STALLS.IFETCH_DRAM_HIT@ / cpu_atom@MEM_BOUND_STALLS.IFETCH@",
+ "MetricName": "tma_info_ifetch_miss_bound_%_ifetchmissbound_with_l3miss",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Instructions per retired return Branch Misprediction",
- "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / BR_MISP_RETIRED.RETURN",
- "MetricName": "tma_info_inst_mix_ipmisp_ret",
+ "BriefDescription": "Percentage of memory bound stalls where retirement is stalled due to an L1 miss that hit the L2",
+ "MetricExpr": "100 * cpu_atom@MEM_BOUND_STALLS.LOAD_L2_HIT@ / cpu_atom@MEM_BOUND_STALLS.LOAD@",
+ "MetricGroup": "load_store_bound",
+ "MetricName": "tma_info_load_miss_bound_%_loadmissbound_with_l2hit",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Instructions per retired Branch Misprediction",
- "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricName": "tma_info_inst_mix_ipmispredict",
+ "BriefDescription": "Percentage of memory bound stalls where retirement is stalled due to an L1 miss that hit the L3",
+ "MetricExpr": "100 * cpu_atom@MEM_BOUND_STALLS.LOAD_LLC_HIT@ / cpu_atom@MEM_BOUND_STALLS.LOAD@",
+ "MetricGroup": "load_store_bound",
+ "MetricName": "tma_info_load_miss_bound_%_loadmissbound_with_l3hit",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Instructions per Store",
- "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / MEM_UOPS_RETIRED.ALL_STORES",
- "MetricName": "tma_info_inst_mix_ipstore",
+ "BriefDescription": "Percentage of memory bound stalls where retirement is stalled due to an L1 miss that subsequently misses the L3",
+ "MetricExpr": "100 * cpu_atom@MEM_BOUND_STALLS.LOAD_DRAM_HIT@ / cpu_atom@MEM_BOUND_STALLS.LOAD@",
+ "MetricGroup": "load_store_bound",
+ "MetricName": "tma_info_load_miss_bound_%_loadmissbound_with_l3miss",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Percentage of all uops which are ucode ops",
- "MetricExpr": "100 * cpu_atom@UOPS_RETIRED.MS@ / UOPS_RETIRED.ALL",
- "MetricName": "tma_info_inst_mix_microcode_uop_ratio",
+ "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a pipeline block",
+ "MetricExpr": "100 * cpu_atom@LD_HEAD.L1_BOUND_AT_RET@ / cpu_atom@CPU_CLK_UNHALTED.CORE@",
+ "MetricGroup": "load_store_bound",
+ "MetricName": "tma_info_load_store_bound_l1_bound",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Percentage of all uops which are x87 uops",
- "MetricExpr": "100 * cpu_atom@UOPS_RETIRED.X87@ / UOPS_RETIRED.ALL",
- "MetricName": "tma_info_inst_mix_x87_uop_ratio",
+ "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement",
+ "MetricExpr": "100 * (cpu_atom@LD_HEAD.L1_BOUND_AT_RET@ + cpu_atom@MEM_BOUND_STALLS.LOAD@) / cpu_atom@CPU_CLK_UNHALTED.CORE@",
+ "MetricGroup": "load_store_bound",
+ "MetricName": "tma_info_load_store_bound_load_bound",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Percentage of total non-speculative loads with a address aliasing block",
- "MetricExpr": "100 * cpu_atom@LD_BLOCKS.4K_ALIAS@ / MEM_UOPS_RETIRED.ALL_LOADS",
- "MetricName": "tma_info_l1_bound_address_alias_blocks",
+ "BriefDescription": "Counts the number of cycles the core is stalled due to store buffer full",
+ "MetricExpr": "100 * (cpu_atom@MEM_SCHEDULER_BLOCK.ST_BUF@ / cpu_atom@MEM_SCHEDULER_BLOCK.ALL@) * tma_mem_scheduler",
+ "MetricGroup": "load_store_bound",
+ "MetricName": "tma_info_load_store_bound_store_bound",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Percentage of total non-speculative loads that are splits",
- "MetricExpr": "100 * cpu_atom@MEM_UOPS_RETIRED.SPLIT_LOADS@ / MEM_UOPS_RETIRED.ALL_LOADS",
- "MetricName": "tma_info_l1_bound_load_splits",
+ "BriefDescription": "Counts the number of machine clears relative to thousands of instructions retired, due to memory disambiguation",
+ "MetricExpr": "1e3 * cpu_atom@MACHINE_CLEARS.DISAMBIGUATION@ / cpu_atom@INST_RETIRED.ANY@",
+ "MetricName": "tma_info_machine_clear_bound_machine_clears_disamb_pki",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Percentage of total non-speculative loads with a store forward or unknown store address block",
- "MetricExpr": "100 * cpu_atom@LD_BLOCKS.DATA_UNKNOWN@ / MEM_UOPS_RETIRED.ALL_LOADS",
- "MetricName": "tma_info_l1_bound_store_fwd_blocks",
+ "BriefDescription": "Counts the number of machine clears relative to thousands of instructions retired, due to floating point assists",
+ "MetricExpr": "1e3 * cpu_atom@MACHINE_CLEARS.FP_ASSIST@ / cpu_atom@INST_RETIRED.ANY@",
+ "MetricName": "tma_info_machine_clear_bound_machine_clears_fp_assist_pki",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Cycle cost per DRAM hit",
- "MetricExpr": "cpu_atom@MEM_BOUND_STALLS.LOAD_DRAM_HIT@ / MEM_LOAD_UOPS_RETIRED.DRAM_HIT",
- "MetricName": "tma_info_memory_cycles_per_demand_load_dram_hit",
+ "BriefDescription": "Counts the number of machine clears relative to thousands of instructions retired, due to memory ordering",
+ "MetricExpr": "1e3 * cpu_atom@MACHINE_CLEARS.MEMORY_ORDERING@ / cpu_atom@INST_RETIRED.ANY@",
+ "MetricName": "tma_info_machine_clear_bound_machine_clears_monuke_pki",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Cycle cost per L2 hit",
- "MetricExpr": "cpu_atom@MEM_BOUND_STALLS.LOAD_L2_HIT@ / MEM_LOAD_UOPS_RETIRED.L2_HIT",
- "MetricName": "tma_info_memory_cycles_per_demand_load_l2_hit",
+ "BriefDescription": "Counts the number of machine clears relative to thousands of instructions retired, due to memory renaming",
+ "MetricExpr": "1e3 * cpu_atom@MACHINE_CLEARS.MRN_NUKE@ / cpu_atom@INST_RETIRED.ANY@",
+ "MetricName": "tma_info_machine_clear_bound_machine_clears_mrn_pki",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Cycle cost per LLC hit",
- "MetricExpr": "cpu_atom@MEM_BOUND_STALLS.LOAD_LLC_HIT@ / MEM_LOAD_UOPS_RETIRED.L3_HIT",
- "MetricName": "tma_info_memory_cycles_per_demand_load_l3_hit",
+ "BriefDescription": "Counts the number of machine clears relative to thousands of instructions retired, due to page faults",
+ "MetricExpr": "1e3 * cpu_atom@MACHINE_CLEARS.PAGE_FAULT@ / cpu_atom@INST_RETIRED.ANY@",
+ "MetricName": "tma_info_machine_clear_bound_machine_clears_page_fault_pki",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "load ops retired per 1000 instruction",
- "MetricExpr": "1e3 * cpu_atom@MEM_UOPS_RETIRED.ALL_LOADS@ / INST_RETIRED.ANY",
- "MetricName": "tma_info_memory_memloadpki",
+ "BriefDescription": "Counts the number of machine clears relative to thousands of instructions retired, due to self-modifying code",
+ "MetricExpr": "1e3 * cpu_atom@MACHINE_CLEARS.SMC@ / cpu_atom@INST_RETIRED.ANY@",
+ "MetricName": "tma_info_machine_clear_bound_machine_clears_smc_pki",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Average CPU Utilization",
- "MetricExpr": "cpu_atom@CPU_CLK_UNHALTED.REF_TSC@ / TSC",
- "MetricName": "tma_info_system_cpu_utilization",
+ "BriefDescription": "Percentage of total non-speculative loads with an address aliasing block",
+ "MetricExpr": "100 * cpu_atom@LD_BLOCKS.4K_ALIAS@ / cpu_atom@MEM_UOPS_RETIRED.ALL_LOADS@",
+ "MetricName": "tma_info_mem_exec_blocks_%_loads_with_adressaliasing",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Fraction of cycles spent in Kernel mode",
- "MetricExpr": "cpu_atom@CPU_CLK_UNHALTED.CORE@k / CPU_CLK_UNHALTED.CORE",
- "MetricGroup": "Summary",
- "MetricName": "tma_info_system_kernel_utilization",
+ "BriefDescription": "Percentage of total non-speculative loads with a store forward or unknown store address block",
+ "MetricExpr": "100 * cpu_atom@LD_BLOCKS.DATA_UNKNOWN@ / cpu_atom@MEM_UOPS_RETIRED.ALL_LOADS@",
+ "MetricName": "tma_info_mem_exec_blocks_%_loads_with_storefwdblk",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Average Frequency Utilization relative nominal frequency",
- "MetricExpr": "tma_info_core_clks / CPU_CLK_UNHALTED.REF_TSC",
- "MetricGroup": "Power",
- "MetricName": "tma_info_system_turbo_utilization",
+ "BriefDescription": "Percentage of Memory Execution Bound due to a first level data cache miss",
+ "MetricExpr": "100 * cpu_atom@LD_HEAD.L1_MISS_AT_RET@ / cpu_atom@LD_HEAD.ANY_AT_RET@",
+ "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_l1miss",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to Instruction Table Lookaside Buffer (ITLB) misses.",
- "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.ITLB@ / tma_info_core_slots",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_latency_group",
- "MetricName": "tma_itlb_misses",
- "MetricThreshold": "tma_itlb_misses > 0.05",
- "ScaleUnit": "100%",
+ "BriefDescription": "Percentage of Memory Execution Bound due to other block cases, such as pipeline conflicts, fences, etc",
+ "MetricExpr": "100 * cpu_atom@LD_HEAD.OTHER_AT_RET@ / cpu_atom@LD_HEAD.ANY_AT_RET@",
+ "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_otherpipelineblks",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a load block.",
- "MetricExpr": "cpu_atom@LD_HEAD.L1_BOUND_AT_RET@ / tma_info_core_clks",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
- "MetricName": "tma_l1_bound",
- "MetricThreshold": "tma_l1_bound > 0.1",
- "ScaleUnit": "100%",
+ "BriefDescription": "Percentage of Memory Execution Bound due to a pagewalk",
+ "MetricExpr": "100 * cpu_atom@LD_HEAD.PGWALK_AT_RET@ / cpu_atom@LD_HEAD.ANY_AT_RET@",
+ "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_pagewalk",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of cycles a core is stalled due to a demand load which hit in the L2 Cache.",
- "MetricExpr": "cpu_atom@MEM_BOUND_STALLS.LOAD_L2_HIT@ / tma_info_core_clks - max((cpu_atom@MEM_BOUND_STALLS.LOAD@ - cpu_atom@LD_HEAD.L1_MISS_AT_RET@) / tma_info_core_clks, 0) * cpu_atom@MEM_BOUND_STALLS.LOAD_L2_HIT@ / cpu_atom@MEM_BOUND_STALLS.LOAD@",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
- "MetricName": "tma_l2_bound",
- "MetricThreshold": "tma_l2_bound > 0.1",
- "ScaleUnit": "100%",
+ "BriefDescription": "Percentage of Memory Execution Bound due to a second level TLB miss",
+ "MetricExpr": "100 * cpu_atom@LD_HEAD.DTLB_MISS_AT_RET@ / cpu_atom@LD_HEAD.ANY_AT_RET@",
+ "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_stlbhit",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of cycles a core is stalled due to a demand load which hit in the Last Level Cache (LLC) or other core with HITE/F/M.",
- "MetricExpr": "cpu_atom@MEM_BOUND_STALLS.LOAD_LLC_HIT@ / tma_info_core_clks - max((cpu_atom@MEM_BOUND_STALLS.LOAD@ - cpu_atom@LD_HEAD.L1_MISS_AT_RET@) / tma_info_core_clks, 0) * cpu_atom@MEM_BOUND_STALLS.LOAD_LLC_HIT@ / cpu_atom@MEM_BOUND_STALLS.LOAD@",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
- "MetricName": "tma_l3_bound",
- "MetricThreshold": "tma_l3_bound > 0.1",
- "ScaleUnit": "100%",
+ "BriefDescription": "Percentage of Memory Execution Bound due to a store forward address match",
+ "MetricExpr": "100 * cpu_atom@LD_HEAD.ST_ADDR_AT_RET@ / cpu_atom@LD_HEAD.ANY_AT_RET@",
+ "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_storefwding",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of cycles, relative to the number of mem_scheduler slots, in which uops are blocked due to load buffer full",
- "MetricExpr": "tma_mem_scheduler * cpu_atom@MEM_SCHEDULER_BLOCK.LD_BUF@ / MEM_SCHEDULER_BLOCK.ALL",
- "MetricGroup": "TopdownL4;tma_L4_group;tma_mem_scheduler_group",
- "MetricName": "tma_ld_buffer",
- "MetricThreshold": "tma_ld_buffer > 0.05",
- "ScaleUnit": "100%",
+ "BriefDescription": "Instructions per Load",
+ "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / cpu_atom@MEM_UOPS_RETIRED.ALL_LOADS@",
+ "MetricName": "tma_info_mem_mix_ipload",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a machine clear (nuke) of any kind including memory ordering and memory disambiguation.",
- "MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS@ / tma_info_core_slots",
- "MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
- "MetricName": "tma_machine_clears",
- "MetricThreshold": "tma_machine_clears > 0.05",
- "MetricgroupNoGroup": "TopdownL2",
- "ScaleUnit": "100%",
+ "BriefDescription": "Instructions per Store",
+ "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / cpu_atom@MEM_UOPS_RETIRED.ALL_STORES@",
+ "MetricName": "tma_info_mem_mix_ipstore",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to memory reservation stalls in which a scheduler is not able to accept uops.",
- "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.MEM_SCHEDULER@ / tma_info_core_slots",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
- "MetricName": "tma_mem_scheduler",
- "MetricThreshold": "tma_mem_scheduler > 0.1",
- "ScaleUnit": "100%",
+ "BriefDescription": "Percentage of total non-speculative loads that perform one or more locks",
+ "MetricExpr": "100 * cpu_atom@MEM_UOPS_RETIRED.LOCK_LOADS@ / cpu_atom@MEM_UOPS_RETIRED.ALL_LOADS@",
+ "MetricName": "tma_info_mem_mix_load_locks_ratio",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of cycles the core is stalled due to stores or loads.",
- "MetricExpr": "min(tma_backend_bound, cpu_atom@LD_HEAD.ANY_AT_RET@ / tma_info_core_clks + tma_store_bound)",
- "MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
- "MetricName": "tma_memory_bound",
- "MetricThreshold": "tma_memory_bound > 0.2",
- "MetricgroupNoGroup": "TopdownL2",
- "ScaleUnit": "100%",
+ "BriefDescription": "Percentage of total non-speculative loads that are splits",
+ "MetricExpr": "100 * cpu_atom@MEM_UOPS_RETIRED.SPLIT_LOADS@ / cpu_atom@MEM_UOPS_RETIRED.ALL_LOADS@",
+ "MetricName": "tma_info_mem_mix_load_splits_ratio",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of machine clears relative to the number of nuke slots due to memory ordering.",
- "MetricExpr": "tma_nuke * (cpu_atom@MACHINE_CLEARS.MEMORY_ORDERING@ / cpu_atom@MACHINE_CLEARS.SLOW@)",
- "MetricGroup": "TopdownL4;tma_L4_group;tma_nuke_group",
- "MetricName": "tma_memory_ordering",
- "MetricThreshold": "tma_memory_ordering > 0.02",
- "ScaleUnit": "100%",
+ "BriefDescription": "Ratio of mem load uops to all uops",
+ "MetricExpr": "1e3 * cpu_atom@MEM_UOPS_RETIRED.ALL_LOADS@ / cpu_atom@UOPS_RETIRED.ALL@",
+ "MetricName": "tma_info_mem_mix_memload_ratio",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of uops that are from the complex flows issued by the micro-sequencer (MS)",
- "MetricExpr": "cpu_atom@UOPS_RETIRED.MS@ / tma_info_core_slots",
- "MetricGroup": "TopdownL2;tma_L2_group;tma_retiring_group",
- "MetricName": "tma_ms_uops",
- "MetricThreshold": "tma_ms_uops > 0.05",
- "MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "Counts the number of uops that are from the complex flows issued by the micro-sequencer (MS). This includes uops from flows due to complex instructions, faults, assists, and inserted flows.",
- "ScaleUnit": "100%",
+ "BriefDescription": "Percentage of time that the core is stalled due to a TPAUSE or UMWAIT instruction",
+ "MetricExpr": "100 * cpu_atom@SERIALIZATION.C01_MS_SCB@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricName": "tma_info_serialization _%_tpause_cycles",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to IEC or FPC RAT stalls, which can be due to FIQ or IEC reservation stalls in which the integer, floating point or SIMD scheduler is not able to accept uops.",
- "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER@ / tma_info_core_slots",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
- "MetricName": "tma_non_mem_scheduler",
- "MetricThreshold": "tma_non_mem_scheduler > 0.1",
- "ScaleUnit": "100%",
+ "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "cpu_atom@CPU_CLK_UNHALTED.REF_TSC@ / TSC",
+ "MetricName": "tma_info_system_cpu_utilization",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to a machine clear (slow nuke).",
- "MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.NUKE@ / tma_info_core_slots",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group",
- "MetricName": "tma_nuke",
- "MetricThreshold": "tma_nuke > 0.05",
- "ScaleUnit": "100%",
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
+ "MetricExpr": "cpu_atom@CPU_CLK_UNHALTED.CORE_P@k / cpu_atom@CPU_CLK_UNHALTED.CORE@",
+ "MetricGroup": "Summary",
+ "MetricName": "tma_info_system_kernel_utilization",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to other common frontend stalls not categorized.",
- "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.OTHER@ / tma_info_core_slots",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
- "MetricName": "tma_other_fb",
- "MetricThreshold": "tma_other_fb > 0.05",
- "ScaleUnit": "100%",
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "cpu_atom@CPU_CLK_UNHALTED.CORE@ / cpu_atom@CPU_CLK_UNHALTED.REF_TSC@",
+ "MetricGroup": "Power",
+ "MetricName": "tma_info_system_turbo_utilization",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a number of other load blocks.",
- "MetricExpr": "cpu_atom@LD_HEAD.OTHER_AT_RET@ / tma_info_core_clks",
- "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
- "MetricName": "tma_other_l1",
- "MetricThreshold": "tma_other_l1 > 0.05",
- "ScaleUnit": "100%",
+ "BriefDescription": "Percentage of all uops which are FPDiv uops",
+ "MetricExpr": "100 * cpu_atom@UOPS_RETIRED.FPDIV@ / cpu_atom@UOPS_RETIRED.ALL@",
+ "MetricName": "tma_info_uop_mix_fpdiv_uop_ratio",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of cycles the core is stalled due to a demand load miss which hits in the L2, LLC, DRAM or MMIO (Non-DRAM) but could not be correctly attributed or cycles in which the load miss is waiting on a request buffer.",
- "MetricExpr": "max(0, tma_memory_bound - (tma_store_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound))",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
- "MetricName": "tma_other_load_store",
- "MetricThreshold": "tma_other_load_store > 0.1",
- "ScaleUnit": "100%",
+ "BriefDescription": "Percentage of all uops which are IDiv uops",
+ "MetricExpr": "100 * cpu_atom@UOPS_RETIRED.IDIV@ / cpu_atom@UOPS_RETIRED.ALL@",
+ "MetricName": "tma_info_uop_mix_idiv_uop_ratio",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of uops retired excluding ms and fp div uops.",
- "MetricExpr": "(cpu_atom@TOPDOWN_RETIRING.ALL@ - cpu_atom@UOPS_RETIRED.MS@ - cpu_atom@UOPS_RETIRED.FPDIV@) / tma_info_core_slots",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_base_group",
- "MetricName": "tma_other_ret",
- "MetricThreshold": "tma_other_ret > 0.3",
- "ScaleUnit": "100%",
+ "BriefDescription": "Percentage of all uops which are microcode ops",
+ "MetricExpr": "100 * cpu_atom@UOPS_RETIRED.MS@ / cpu_atom@UOPS_RETIRED.ALL@",
+ "MetricName": "tma_info_uop_mix_microcode_uop_ratio",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of machine clears relative to the number of nuke slots due to page faults.",
- "MetricExpr": "tma_nuke * (cpu_atom@MACHINE_CLEARS.PAGE_FAULT@ / cpu_atom@MACHINE_CLEARS.SLOW@)",
- "MetricGroup": "TopdownL4;tma_L4_group;tma_nuke_group",
- "MetricName": "tma_page_fault",
- "MetricThreshold": "tma_page_fault > 0.02",
- "ScaleUnit": "100%",
+ "BriefDescription": "Percentage of all uops which are x87 uops",
+ "MetricExpr": "100 * cpu_atom@UOPS_RETIRED.X87@ / cpu_atom@UOPS_RETIRED.ALL@",
+ "MetricName": "tma_info_uop_mix_x87_uop_ratio",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to wrong predecodes.",
- "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.PREDECODE@ / tma_info_core_slots",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
- "MetricName": "tma_predecode",
- "MetricThreshold": "tma_predecode > 0.05",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to Instruction Table Lookaside Buffer (ITLB) misses.",
+ "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.ITLB@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
+ "MetricName": "tma_itlb_misses",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to the physical register file unable to accept an entry (marble stalls).",
- "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.REGISTER@ / tma_info_core_slots",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
- "MetricName": "tma_register",
- "MetricThreshold": "tma_register > 0.1",
+ "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a machine clear (nuke) of any kind including memory ordering and memory disambiguation",
+ "MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
+ "MetricName": "tma_machine_clears",
+ "MetricThreshold": "tma_machine_clears > 0.05 & tma_bad_speculation > 0.15",
+ "MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to the reorder buffer being full (ROB stalls).",
- "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.REORDER_BUFFER@ / tma_info_core_slots",
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to memory reservation stalls in which a scheduler is not able to accept uops",
+ "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.MEM_SCHEDULER@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
- "MetricName": "tma_reorder_buffer",
- "MetricThreshold": "tma_reorder_buffer > 0.1",
- "ScaleUnit": "100%",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls",
- "MetricExpr": "tma_backend_bound",
- "MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_aux_group",
- "MetricName": "tma_resource_bound",
- "MetricThreshold": "tma_resource_bound > 0.2",
- "MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls. Note that uops must be available for consumption in order for this event to count. If a uop is not available (IQ is empty), this event will not count.",
+ "MetricName": "tma_mem_scheduler",
+ "MetricThreshold": "tma_mem_scheduler > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of issue slots that result in retirement slots.",
- "DefaultMetricgroupName": "TopdownL1",
- "MetricExpr": "cpu_atom@TOPDOWN_RETIRING.ALL@ / tma_info_core_slots",
- "MetricGroup": "Default;TopdownL1;tma_L1_group",
- "MetricName": "tma_retiring",
- "MetricThreshold": "tma_retiring > 0.75",
- "MetricgroupNoGroup": "TopdownL1;Default",
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to IEC or FPC RAT stalls, which can be due to FIQ or IEC reservation stalls in which the integer, floating point or SIMD scheduler is not able to accept uops",
+ "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricName": "tma_non_mem_scheduler",
+ "MetricThreshold": "tma_non_mem_scheduler > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of cycles, relative to the number of mem_scheduler slots, in which uops are blocked due to RSV full relative",
- "MetricExpr": "tma_mem_scheduler * cpu_atom@MEM_SCHEDULER_BLOCK.RSV@ / MEM_SCHEDULER_BLOCK.ALL",
- "MetricGroup": "TopdownL4;tma_L4_group;tma_mem_scheduler_group",
- "MetricName": "tma_rsv",
- "MetricThreshold": "tma_rsv > 0.05",
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to a machine clear that requires the use of microcode (slow nuke)",
+ "MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.NUKE@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group",
+ "MetricName": "tma_nuke",
+ "MetricThreshold": "tma_nuke > 0.05 & (tma_machine_clears > 0.05 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to scoreboards from the instruction queue (IQ), jump execution unit (JEU), or microcode sequencer (MS).",
- "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.SERIALIZATION@ / tma_info_core_slots",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
- "MetricName": "tma_serialization",
- "MetricThreshold": "tma_serialization > 0.1",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to other common frontend stalls not categorized.",
+ "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.OTHER@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
+ "MetricName": "tma_other_fb",
+ "MetricThreshold": "tma_other_fb > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of machine clears relative to the number of nuke slots due to SMC.",
- "MetricExpr": "tma_nuke * (cpu_atom@MACHINE_CLEARS.SMC@ / cpu_atom@MACHINE_CLEARS.SLOW@)",
- "MetricGroup": "TopdownL4;tma_L4_group;tma_nuke_group",
- "MetricName": "tma_smc",
- "MetricThreshold": "tma_smc > 0.02",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to wrong predecodes.",
+ "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.PREDECODE@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
+ "MetricName": "tma_predecode",
+ "MetricThreshold": "tma_predecode > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of cycles, relative to the number of mem_scheduler slots, in which uops are blocked due to store buffer full",
- "MetricExpr": "tma_store_bound",
- "MetricGroup": "TopdownL4;tma_L4_group;tma_mem_scheduler_group",
- "MetricName": "tma_st_buffer",
- "MetricThreshold": "tma_st_buffer > 0.05",
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to the physical register file unable to accept an entry (marble stalls)",
+ "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.REGISTER@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricName": "tma_register",
+ "MetricThreshold": "tma_register > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a first level TLB miss.",
- "MetricExpr": "cpu_atom@LD_HEAD.DTLB_MISS_AT_RET@ / tma_info_core_clks",
- "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
- "MetricName": "tma_stlb_hit",
- "MetricThreshold": "tma_stlb_hit > 0.05",
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to the reorder buffer being full (ROB stalls)",
+ "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.REORDER_BUFFER@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricName": "tma_reorder_buffer",
+ "MetricThreshold": "tma_reorder_buffer > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a second level TLB miss requiring a page walk.",
- "MetricExpr": "cpu_atom@LD_HEAD.PGWALK_AT_RET@ / tma_info_core_clks",
- "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
- "MetricName": "tma_stlb_miss",
- "MetricThreshold": "tma_stlb_miss > 0.05",
+ "BriefDescription": "Counts the number of cycles the core is stalled due to a resource limitation",
+ "MetricExpr": "tma_backend_bound - tma_core_bound",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_resource_bound",
+ "MetricThreshold": "tma_resource_bound > 0.2 & tma_backend_bound > 0.1",
+ "MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of cycles the core is stalled due to store buffer full.",
- "MetricExpr": "tma_mem_scheduler * (cpu_atom@MEM_SCHEDULER_BLOCK.ST_BUF@ / cpu_atom@MEM_SCHEDULER_BLOCK.ALL@)",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
- "MetricName": "tma_store_bound",
- "MetricThreshold": "tma_store_bound > 0.1",
+ "BriefDescription": "Counts the number of issue slots that result in retirement slots",
+ "DefaultMetricgroupName": "TopdownL1",
+ "MetricExpr": "cpu_atom@TOPDOWN_RETIRING.ALL@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "Default;TopdownL1;tma_L1_group",
+ "MetricName": "tma_retiring",
+ "MetricThreshold": "tma_retiring > 0.75",
+ "MetricgroupNoGroup": "TopdownL1;Default",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a store forward block.",
- "MetricExpr": "cpu_atom@LD_HEAD.ST_ADDR_AT_RET@ / tma_info_core_clks",
- "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
- "MetricName": "tma_store_fwd_blk",
- "MetricThreshold": "tma_store_fwd_blk > 0.05",
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to scoreboards from the instruction queue (IQ), jump execution unit (JEU), or microcode sequencer (MS)",
+ "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.SERIALIZATION@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricName": "tma_serialization",
+ "MetricThreshold": "tma_serialization > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -818,7 +726,7 @@
{
"BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists",
"MetricExpr": "78 * cpu_core@ASSISTS.ANY@ / tma_info_thread_slots",
- "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricGroup": "BvIO;TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_assists",
"MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
"PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: ASSISTS.ANY",
@@ -838,7 +746,7 @@
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
"DefaultMetricgroupName": "TopdownL1",
"MetricExpr": "cpu_core@topdown\\-be\\-bound@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@) + 0 * tma_info_thread_slots",
- "MetricGroup": "Default;TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvOB;Default;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
"MetricThreshold": "tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL1;Default",
@@ -861,7 +769,7 @@
{
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
"MetricExpr": "cpu_core@topdown\\-br\\-mispredict@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@) + 0 * tma_info_thread_slots",
- "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
+ "MetricGroup": "BadSpec;BrMispredicts;BvMP;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
"MetricName": "tma_branch_mispredicts",
"MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
@@ -920,7 +828,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
"MetricExpr": "(25 * tma_info_system_core_frequency * (cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@ * (cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ / (cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ + cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD@))) + 24 * tma_info_system_core_frequency * cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS@) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks",
- "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricGroup": "BvMS;DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_contested_accesses",
"MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
@@ -941,7 +849,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
"MetricExpr": "24 * tma_info_system_core_frequency * (cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD@ + cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@ * (1 - cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ / (cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ + cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD@))) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks",
- "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricGroup": "BvMS;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_data_sharing",
"MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
@@ -961,7 +869,7 @@
{
"BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
"MetricExpr": "cpu_core@ARITH.DIV_ACTIVE@ / tma_info_thread_clks",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
"MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_ACTIVE",
@@ -994,14 +902,14 @@
"MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_dsb_switches",
"MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
"MetricExpr": "min(7 * cpu_core@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=1@ + cpu_core@DTLB_LOAD_MISSES.WALK_ACTIVE@, max(cpu_core@CYCLE_ACTIVITY.CYCLES_MEM_ANY@ - cpu_core@MEMORY_ACTIVITY.CYCLES_L1D_MISS@, 0)) / tma_info_thread_clks",
- "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
+ "MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
"MetricName": "tma_dtlb_load",
"MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs, tma_info_bottleneck_memory_synchronization",
@@ -1011,7 +919,7 @@
{
"BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
"MetricExpr": "(7 * cpu_core@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=1@ + cpu_core@DTLB_STORE_MISSES.WALK_ACTIVE@) / tma_info_core_core_clks",
- "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
+ "MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
"MetricName": "tma_dtlb_store",
"MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load, tma_info_bottleneck_memory_data_tlbs, tma_info_bottleneck_memory_synchronization",
@@ -1021,7 +929,7 @@
{
"BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing",
"MetricExpr": "28 * tma_info_system_core_frequency * cpu_core@OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM@ / tma_info_thread_clks",
- "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
+ "MetricGroup": "BvMS;DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
"MetricName": "tma_false_sharing",
"MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
@@ -1031,7 +939,7 @@
{
"BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
"MetricExpr": "cpu_core@L1D_PEND_MISS.FB_FULL@ / tma_info_thread_clks",
- "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
+ "MetricGroup": "BvMS;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
"MetricName": "tma_fb_full",
"MetricThreshold": "tma_fb_full > 0.3",
"PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
@@ -1045,7 +953,7 @@
"MetricName": "tma_fetch_bandwidth",
"MetricThreshold": "tma_fetch_bandwidth > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1092,7 +1000,7 @@
},
{
"BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired",
- "MetricExpr": "cpu_core@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ / (tma_retiring * tma_info_thread_slots)",
+ "MetricExpr": "cpu_core@FP_ARITH_INST_RETIRED.SCALAR@ / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_scalar",
"MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
@@ -1102,7 +1010,7 @@
},
{
"BriefDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths",
- "MetricExpr": "cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0x3c@ / (tma_retiring * tma_info_thread_slots)",
+ "MetricExpr": "cpu_core@FP_ARITH_INST_RETIRED.VECTOR@ / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_vector",
"MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
@@ -1134,7 +1042,7 @@
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
"DefaultMetricgroupName": "TopdownL1",
"MetricExpr": "cpu_core@topdown\\-fe\\-bound@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@) - cpu_core@INT_MISC.UOP_DROPPING@ / tma_info_thread_slots",
- "MetricGroup": "Default;PGO;TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvFB;BvIO;Default;PGO;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_frontend_bound",
"MetricThreshold": "tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL1;Default",
@@ -1145,7 +1053,7 @@
{
"BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions",
"MetricExpr": "tma_light_operations * cpu_core@INST_RETIRED.MACRO_FUSED@ / (tma_retiring * tma_info_thread_slots)",
- "MetricGroup": "Branches;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_fused_instructions",
"MetricThreshold": "tma_fused_instructions > 0.1 & tma_light_operations > 0.6",
"PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. CMP+JCC or DEC+JCC are common examples of legacy fusions. {([MTL] Note new MOV+OP and Load+OP fusions appear under Other_Light_Ops in MTL!)}",
@@ -1166,7 +1074,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses",
"MetricExpr": "cpu_core@ICACHE_DATA.STALLS@ / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_icache_misses",
"MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS",
@@ -1183,7 +1091,7 @@
},
{
"BriefDescription": "Instructions per retired mispredicts for conditional non-taken branches (lower number means higher occurrence rate).",
- "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / BR_MISP_RETIRED.COND_NTAKEN",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_MISP_RETIRED.COND_NTAKEN@",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_cond_ntaken",
"MetricThreshold": "tma_info_bad_spec_ipmisp_cond_ntaken < 200",
@@ -1191,7 +1099,7 @@
},
{
"BriefDescription": "Instructions per retired mispredicts for conditional taken branches (lower number means higher occurrence rate).",
- "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / BR_MISP_RETIRED.COND_TAKEN",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_MISP_RETIRED.COND_TAKEN@",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_cond_taken",
"MetricThreshold": "tma_info_bad_spec_ipmisp_cond_taken < 200",
@@ -1199,7 +1107,7 @@
},
{
"BriefDescription": "Instructions per retired mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
- "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / BR_MISP_RETIRED.INDIRECT",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_MISP_RETIRED.INDIRECT@",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_indirect",
"MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1e3",
@@ -1207,7 +1115,7 @@
},
{
"BriefDescription": "Instructions per retired mispredicts for return branches (lower number means higher occurrence rate).",
- "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / BR_MISP_RETIRED.RET",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_MISP_RETIRED.RET@",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_ret",
"MetricThreshold": "tma_info_bad_spec_ipmisp_ret < 500",
@@ -1215,7 +1123,7 @@
},
{
"BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
- "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_MISP_RETIRED.ALL_BRANCHES@",
"MetricGroup": "Bad;BadSpec;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmispredict",
"MetricThreshold": "tma_info_bad_spec_ipmispredict < 200",
@@ -1237,12 +1145,21 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck",
+ "MetricExpr": "100 * (tma_frontend_bound * (tma_fetch_bandwidth / (tma_fetch_bandwidth + tma_fetch_latency)) * (tma_dsb / (tma_dsb + tma_lsd + tma_mite)))",
+ "MetricGroup": "DSB;FetchBW;tma_issueFB",
+ "MetricName": "tma_info_botlnk_l2_dsb_bandwidth",
+ "MetricThreshold": "tma_info_botlnk_l2_dsb_bandwidth > 10",
+ "PublicDescription": "Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck",
"MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_lsd + tma_mite))",
"MetricGroup": "DSBmiss;Fed;tma_issueFB",
"MetricName": "tma_info_botlnk_l2_dsb_misses",
"MetricThreshold": "tma_info_botlnk_l2_dsb_misses > 10",
- "PublicDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "PublicDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"Unit": "cpu_core"
},
{
@@ -1255,33 +1172,26 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Total pipeline cost of \"useful operations\" - the baseline operations not covered by Branching_Overhead nor Irregular_Overhead.",
- "MetricExpr": "100 * (tma_retiring - (cpu_core@BR_INST_RETIRED.ALL_BRANCHES@ + cpu_core@BR_INST_RETIRED.NEAR_CALL@) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
- "MetricGroup": "Ret",
- "MetricName": "tma_info_bottleneck_base_non_br",
- "MetricThreshold": "tma_info_bottleneck_base_non_br > 20",
- "Unit": "cpu_core"
- },
- {
"BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
"MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)",
- "MetricGroup": "BigFootprint;Fed;Frontend;IcMiss;MemoryTLB",
+ "MetricGroup": "BigFootprint;BvBC;Fed;Frontend;IcMiss;MemoryTLB",
"MetricName": "tma_info_bottleneck_big_code",
"MetricThreshold": "tma_info_bottleneck_big_code > 20",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls)",
- "MetricExpr": "100 * ((cpu_core@BR_INST_RETIRED.ALL_BRANCHES@ + cpu_core@BR_INST_RETIRED.NEAR_CALL@) / tma_info_thread_slots)",
- "MetricGroup": "Ret",
+ "BriefDescription": "Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA",
+ "MetricExpr": "100 * ((cpu_core@BR_INST_RETIRED.ALL_BRANCHES@ + 2 * cpu_core@BR_INST_RETIRED.NEAR_CALL@ + cpu_core@INST_RETIRED.NOP@) / tma_info_thread_slots)",
+ "MetricGroup": "BvBO;Ret",
"MetricName": "tma_info_bottleneck_branching_overhead",
"MetricThreshold": "tma_info_bottleneck_branching_overhead > 5",
+ "PublicDescription": "Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA. Examples include function calls; loops and alignments. (A lower bound)",
"Unit": "cpu_core"
},
{
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_fb_full / (tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
- "MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_fb_full / (tma_dtlb_load + tma_fb_full + tma_l1_hit_latency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
+ "MetricGroup": "BvMB;Mem;MemoryBW;Offcore;tma_issueBW",
"MetricName": "tma_info_bottleneck_cache_memory_bandwidth",
"MetricThreshold": "tma_info_bottleneck_cache_memory_bandwidth > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full",
@@ -1289,8 +1199,8 @@
},
{
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
- "MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l1_hit_latency / (tma_dtlb_load + tma_fb_full + tma_l1_hit_latency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
+ "MetricGroup": "BvML;Mem;MemoryLat;Offcore;tma_issueLat",
"MetricName": "tma_info_bottleneck_cache_memory_latency",
"MetricThreshold": "tma_info_bottleneck_cache_memory_latency > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks. Related metrics: tma_l3_hit_latency, tma_mem_latency",
@@ -1299,16 +1209,16 @@
{
"BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation",
"MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_ports_utilization + tma_serializing_operation)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))",
- "MetricGroup": "Cor;tma_issueComp",
+ "MetricGroup": "BvCB;Cor;tma_issueComp",
"MetricName": "tma_info_bottleneck_compute_bound_est",
"MetricThreshold": "tma_info_bottleneck_compute_bound_est > 20",
"PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. Related metrics: ",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks",
+ "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end)",
"MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) - (1 - cpu_core@INST_RETIRED.REP_ITERATION@ / cpu_core@UOPS_RETIRED.MS\\,cmask\\=1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))) - tma_info_bottleneck_big_code",
- "MetricGroup": "Fed;FetchBW;Frontend",
+ "MetricGroup": "BvFB;Fed;FetchBW;Frontend",
"MetricName": "tma_info_bottleneck_instruction_fetch_bw",
"MetricThreshold": "tma_info_bottleneck_instruction_fetch_bw > 20",
"Unit": "cpu_core"
@@ -1316,7 +1226,7 @@
{
"BriefDescription": "Total pipeline cost of irregular execution (e.g",
"MetricExpr": "100 * ((1 - cpu_core@INST_RETIRED.REP_ITERATION@ / cpu_core@UOPS_RETIRED.MS\\,cmask\\=1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + cpu_core@RS.EMPTY\\,umask\\=1@ / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
- "MetricGroup": "Bad;Cor;Ret;tma_issueMS",
+ "MetricGroup": "Bad;BvIO;Cor;Ret;tma_issueMS",
"MetricName": "tma_info_bottleneck_irregular_overhead",
"MetricThreshold": "tma_info_bottleneck_irregular_overhead > 10",
"PublicDescription": "Total pipeline cost of irregular execution (e.g. FP-assists in HPC, Wait time with work imbalance multithreaded workloads, overhead in system services or virtualized environments). Related metrics: tma_microcode_sequencer, tma_ms_switches",
@@ -1324,8 +1234,8 @@
},
{
"BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
- "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
- "MetricGroup": "Mem;MemoryTLB;Offcore;tma_issueTLB",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_dtlb_load + tma_fb_full + tma_l1_hit_latency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
+ "MetricGroup": "BvMT;Mem;MemoryTLB;Offcore;tma_issueTLB",
"MetricName": "tma_info_bottleneck_memory_data_tlbs",
"MetricThreshold": "tma_info_bottleneck_memory_data_tlbs > 20",
"PublicDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs). Related metrics: tma_dtlb_load, tma_dtlb_store, tma_info_bottleneck_memory_synchronization",
@@ -1334,7 +1244,7 @@
{
"BriefDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)",
"MetricExpr": "100 * (tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * tma_false_sharing / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))",
- "MetricGroup": "Mem;Offcore;tma_issueTLB",
+ "MetricGroup": "BvMS;Mem;Offcore;tma_issueTLB",
"MetricName": "tma_info_bottleneck_memory_synchronization",
"MetricThreshold": "tma_info_bottleneck_memory_synchronization > 10",
"PublicDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors). Related metrics: tma_dtlb_load, tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs",
@@ -1343,45 +1253,53 @@
{
"BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks",
"MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
- "MetricGroup": "Bad;BadSpec;BrMispredicts;tma_issueBM",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts;BvMP;tma_issueBM",
"MetricName": "tma_info_bottleneck_mispredictions",
"MetricThreshold": "tma_info_bottleneck_mispredictions > 20",
"PublicDescription": "Total pipeline cost of Branch Misprediction related bottlenecks. Related metrics: tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost, tma_mispredicts_resteers",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Total pipeline cost of remaining bottlenecks (apart from those listed in the Info.Bottlenecks metrics class)",
- "MetricExpr": "100 - (tma_info_bottleneck_big_code + tma_info_bottleneck_instruction_fetch_bw + tma_info_bottleneck_mispredictions + tma_info_bottleneck_cache_memory_bandwidth + tma_info_bottleneck_cache_memory_latency + tma_info_bottleneck_memory_data_tlbs + tma_info_bottleneck_memory_synchronization + tma_info_bottleneck_compute_bound_est + tma_info_bottleneck_irregular_overhead + tma_info_bottleneck_branching_overhead + tma_info_bottleneck_base_non_br)",
- "MetricGroup": "Cor;Offcore",
+ "BriefDescription": "Total pipeline cost of remaining bottlenecks in the back-end",
+ "MetricExpr": "100 - (tma_info_bottleneck_big_code + tma_info_bottleneck_instruction_fetch_bw + tma_info_bottleneck_mispredictions + tma_info_bottleneck_cache_memory_bandwidth + tma_info_bottleneck_cache_memory_latency + tma_info_bottleneck_memory_data_tlbs + tma_info_bottleneck_memory_synchronization + tma_info_bottleneck_compute_bound_est + tma_info_bottleneck_irregular_overhead + tma_info_bottleneck_branching_overhead + tma_info_bottleneck_useful_work)",
+ "MetricGroup": "BvOB;Cor;Offcore",
"MetricName": "tma_info_bottleneck_other_bottlenecks",
"MetricThreshold": "tma_info_bottleneck_other_bottlenecks > 20",
- "PublicDescription": "Total pipeline cost of remaining bottlenecks (apart from those listed in the Info.Bottlenecks metrics class). Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls.",
+ "PublicDescription": "Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls.",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead.",
+ "MetricExpr": "100 * (tma_retiring - (cpu_core@BR_INST_RETIRED.ALL_BRANCHES@ + 2 * cpu_core@BR_INST_RETIRED.NEAR_CALL@ + cpu_core@INST_RETIRED.NOP@) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
+ "MetricGroup": "BvUW;Ret",
+ "MetricName": "tma_info_bottleneck_useful_work",
+ "MetricThreshold": "tma_info_bottleneck_useful_work > 20",
"Unit": "cpu_core"
},
{
"BriefDescription": "Fraction of branches that are CALL or RET",
- "MetricExpr": "(cpu_core@BR_INST_RETIRED.NEAR_CALL@ + cpu_core@BR_INST_RETIRED.NEAR_RETURN@) / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricExpr": "(cpu_core@BR_INST_RETIRED.NEAR_CALL@ + cpu_core@BR_INST_RETIRED.NEAR_RETURN@) / cpu_core@BR_INST_RETIRED.ALL_BRANCHES@",
"MetricGroup": "Bad;Branches",
"MetricName": "tma_info_branches_callret",
"Unit": "cpu_core"
},
{
"BriefDescription": "Fraction of branches that are non-taken conditionals",
- "MetricExpr": "cpu_core@BR_INST_RETIRED.COND_NTAKEN@ / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricExpr": "cpu_core@BR_INST_RETIRED.COND_NTAKEN@ / cpu_core@BR_INST_RETIRED.ALL_BRANCHES@",
"MetricGroup": "Bad;Branches;CodeGen;PGO",
"MetricName": "tma_info_branches_cond_nt",
"Unit": "cpu_core"
},
{
"BriefDescription": "Fraction of branches that are taken conditionals",
- "MetricExpr": "cpu_core@BR_INST_RETIRED.COND_TAKEN@ / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricExpr": "cpu_core@BR_INST_RETIRED.COND_TAKEN@ / cpu_core@BR_INST_RETIRED.ALL_BRANCHES@",
"MetricGroup": "Bad;Branches;CodeGen;PGO",
"MetricName": "tma_info_branches_cond_tk",
"Unit": "cpu_core"
},
{
"BriefDescription": "Fraction of branches that are unconditional (direct or indirect) jumps",
- "MetricExpr": "(cpu_core@BR_INST_RETIRED.NEAR_TAKEN@ - cpu_core@BR_INST_RETIRED.COND_TAKEN@ - 2 * cpu_core@BR_INST_RETIRED.NEAR_CALL@) / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricExpr": "(cpu_core@BR_INST_RETIRED.NEAR_TAKEN@ - cpu_core@BR_INST_RETIRED.COND_TAKEN@ - 2 * cpu_core@BR_INST_RETIRED.NEAR_CALL@) / cpu_core@BR_INST_RETIRED.ALL_BRANCHES@",
"MetricGroup": "Bad;Branches",
"MetricName": "tma_info_branches_jump",
"Unit": "cpu_core"
@@ -1442,7 +1360,7 @@
"MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
"MetricName": "tma_info_frontend_dsb_coverage",
"MetricThreshold": "tma_info_frontend_dsb_coverage < 0.7 & tma_info_thread_ipc / 6 > 0.35",
- "PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_inst_mix_iptb, tma_lcp",
+ "PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_inst_mix_iptb, tma_lcp",
"Unit": "cpu_core"
},
{
@@ -1468,7 +1386,7 @@
},
{
"BriefDescription": "Instructions per non-speculative DSB miss (lower number means higher occurrence rate)",
- "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / FRONTEND_RETIRED.ANY_DSB_MISS",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@FRONTEND_RETIRED.ANY_DSB_MISS@",
"MetricGroup": "DSBmiss;Fed",
"MetricName": "tma_info_frontend_ipdsb_miss_ret",
"MetricThreshold": "tma_info_frontend_ipdsb_miss_ret < 50",
@@ -1476,21 +1394,21 @@
},
{
"BriefDescription": "Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate)",
- "MetricExpr": "tma_info_inst_mix_instructions / BACLEARS.ANY",
+ "MetricExpr": "tma_info_inst_mix_instructions / cpu_core@BACLEARS.ANY@",
"MetricGroup": "Fed",
"MetricName": "tma_info_frontend_ipunknown_branch",
"Unit": "cpu_core"
},
{
"BriefDescription": "L2 cache true code cacheline misses per kilo instruction",
- "MetricExpr": "1e3 * cpu_core@FRONTEND_RETIRED.L2_MISS@ / INST_RETIRED.ANY",
+ "MetricExpr": "1e3 * cpu_core@FRONTEND_RETIRED.L2_MISS@ / cpu_core@INST_RETIRED.ANY@",
"MetricGroup": "IcMiss",
"MetricName": "tma_info_frontend_l2mpki_code",
"Unit": "cpu_core"
},
{
"BriefDescription": "L2 cache speculative code cacheline misses per kilo instruction",
- "MetricExpr": "1e3 * cpu_core@L2_RQSTS.CODE_RD_MISS@ / INST_RETIRED.ANY",
+ "MetricExpr": "1e3 * cpu_core@L2_RQSTS.CODE_RD_MISS@ / cpu_core@INST_RETIRED.ANY@",
"MetricGroup": "IcMiss",
"MetricName": "tma_info_frontend_l2mpki_code_all",
"Unit": "cpu_core"
@@ -1512,7 +1430,7 @@
},
{
"BriefDescription": "Branch instructions per taken branch.",
- "MetricExpr": "cpu_core@BR_INST_RETIRED.ALL_BRANCHES@ / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricExpr": "cpu_core@BR_INST_RETIRED.ALL_BRANCHES@ / cpu_core@BR_INST_RETIRED.NEAR_TAKEN@",
"MetricGroup": "Branches;Fed;PGO",
"MetricName": "tma_info_inst_mix_bptkbranch",
"Unit": "cpu_core"
@@ -1527,7 +1445,7 @@
},
{
"BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
- "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / (cpu_core@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0x3c@)",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / (cpu_core@FP_ARITH_INST_RETIRED.SCALAR@ + cpu_core@FP_ARITH_INST_RETIRED.VECTOR@)",
"MetricGroup": "Flops;InsType",
"MetricName": "tma_info_inst_mix_iparith",
"MetricThreshold": "tma_info_inst_mix_iparith < 10",
@@ -1554,7 +1472,7 @@
},
{
"BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)",
- "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@FP_ARITH_INST_RETIRED.SCALAR_DOUBLE@",
"MetricGroup": "Flops;FpScalar;InsType",
"MetricName": "tma_info_inst_mix_iparith_scalar_dp",
"MetricThreshold": "tma_info_inst_mix_iparith_scalar_dp < 10",
@@ -1563,7 +1481,7 @@
},
{
"BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)",
- "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@FP_ARITH_INST_RETIRED.SCALAR_SINGLE@",
"MetricGroup": "Flops;FpScalar;InsType",
"MetricName": "tma_info_inst_mix_iparith_scalar_sp",
"MetricThreshold": "tma_info_inst_mix_iparith_scalar_sp < 10",
@@ -1572,7 +1490,7 @@
},
{
"BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
- "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_INST_RETIRED.ALL_BRANCHES@",
"MetricGroup": "Branches;Fed;InsType",
"MetricName": "tma_info_inst_mix_ipbranch",
"MetricThreshold": "tma_info_inst_mix_ipbranch < 8",
@@ -1580,7 +1498,7 @@
},
{
"BriefDescription": "Instructions per (near) call (lower number means higher occurrence rate)",
- "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / BR_INST_RETIRED.NEAR_CALL",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_INST_RETIRED.NEAR_CALL@",
"MetricGroup": "Branches;Fed;PGO",
"MetricName": "tma_info_inst_mix_ipcall",
"MetricThreshold": "tma_info_inst_mix_ipcall < 200",
@@ -1596,7 +1514,7 @@
},
{
"BriefDescription": "Instructions per Load (lower number means higher occurrence rate)",
- "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / MEM_INST_RETIRED.ALL_LOADS",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@MEM_INST_RETIRED.ALL_LOADS@",
"MetricGroup": "InsType",
"MetricName": "tma_info_inst_mix_ipload",
"MetricThreshold": "tma_info_inst_mix_ipload < 3",
@@ -1604,14 +1522,14 @@
},
{
"BriefDescription": "Instructions per PAUSE (lower number means higher occurrence rate)",
- "MetricExpr": "tma_info_inst_mix_instructions / CPU_CLK_UNHALTED.PAUSE_INST",
+ "MetricExpr": "tma_info_inst_mix_instructions / cpu_core@CPU_CLK_UNHALTED.PAUSE_INST@",
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_ippause",
"Unit": "cpu_core"
},
{
"BriefDescription": "Instructions per Store (lower number means higher occurrence rate)",
- "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / MEM_INST_RETIRED.ALL_STORES",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@MEM_INST_RETIRED.ALL_STORES@",
"MetricGroup": "InsType",
"MetricName": "tma_info_inst_mix_ipstore",
"MetricThreshold": "tma_info_inst_mix_ipstore < 8",
@@ -1626,12 +1544,12 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Instruction per taken branch",
- "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Instructions per taken branch",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_INST_RETIRED.NEAR_TAKEN@",
"MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
"MetricName": "tma_info_inst_mix_iptb",
"MetricThreshold": "tma_info_inst_mix_iptb < 13",
- "PublicDescription": "Instruction per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_lcp",
+ "PublicDescription": "Instructions per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_lcp",
"Unit": "cpu_core"
},
{
@@ -1664,13 +1582,13 @@
},
{
"BriefDescription": "Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)",
- "MetricExpr": "1e3 * cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / INST_RETIRED.ANY",
+ "MetricExpr": "1e3 * cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@INST_RETIRED.ANY@",
"MetricGroup": "CacheHits;Mem",
"MetricName": "tma_info_memory_fb_hpki",
"Unit": "cpu_core"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
"MetricExpr": "64 * cpu_core@L1D.REPLACEMENT@ / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l1d_cache_fill_bw",
@@ -1678,20 +1596,20 @@
},
{
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
- "MetricExpr": "1e3 * cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / INST_RETIRED.ANY",
+ "MetricExpr": "1e3 * cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / cpu_core@INST_RETIRED.ANY@",
"MetricGroup": "CacheHits;Mem",
"MetricName": "tma_info_memory_l1mpki",
"Unit": "cpu_core"
},
{
"BriefDescription": "L1 cache true misses per kilo instruction for all demand loads (including speculative)",
- "MetricExpr": "1e3 * cpu_core@L2_RQSTS.ALL_DEMAND_DATA_RD@ / INST_RETIRED.ANY",
+ "MetricExpr": "1e3 * cpu_core@L2_RQSTS.ALL_DEMAND_DATA_RD@ / cpu_core@INST_RETIRED.ANY@",
"MetricGroup": "CacheHits;Mem",
"MetricName": "tma_info_memory_l1mpki_load",
"Unit": "cpu_core"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
"MetricExpr": "64 * cpu_core@L2_LINES_IN.ALL@ / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l2_cache_fill_bw",
@@ -1699,48 +1617,55 @@
},
{
"BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
- "MetricExpr": "1e3 * (cpu_core@L2_RQSTS.REFERENCES@ - cpu_core@L2_RQSTS.MISS@) / INST_RETIRED.ANY",
+ "MetricExpr": "1e3 * (cpu_core@L2_RQSTS.REFERENCES@ - cpu_core@L2_RQSTS.MISS@) / cpu_core@INST_RETIRED.ANY@",
"MetricGroup": "CacheHits;Mem",
"MetricName": "tma_info_memory_l2hpki_all",
"Unit": "cpu_core"
},
{
"BriefDescription": "L2 cache hits per kilo instruction for all demand loads (including speculative)",
- "MetricExpr": "1e3 * cpu_core@L2_RQSTS.DEMAND_DATA_RD_HIT@ / INST_RETIRED.ANY",
+ "MetricExpr": "1e3 * cpu_core@L2_RQSTS.DEMAND_DATA_RD_HIT@ / cpu_core@INST_RETIRED.ANY@",
"MetricGroup": "CacheHits;Mem",
"MetricName": "tma_info_memory_l2hpki_load",
"Unit": "cpu_core"
},
{
"BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
- "MetricExpr": "1e3 * cpu_core@MEM_LOAD_RETIRED.L2_MISS@ / INST_RETIRED.ANY",
+ "MetricExpr": "1e3 * cpu_core@MEM_LOAD_RETIRED.L2_MISS@ / cpu_core@INST_RETIRED.ANY@",
"MetricGroup": "Backend;CacheHits;Mem",
"MetricName": "tma_info_memory_l2mpki",
"Unit": "cpu_core"
},
{
"BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)",
- "MetricExpr": "1e3 * cpu_core@L2_RQSTS.MISS@ / INST_RETIRED.ANY",
+ "MetricExpr": "1e3 * cpu_core@L2_RQSTS.MISS@ / cpu_core@INST_RETIRED.ANY@",
"MetricGroup": "CacheHits;Mem;Offcore",
"MetricName": "tma_info_memory_l2mpki_all",
"Unit": "cpu_core"
},
{
"BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)",
- "MetricExpr": "1e3 * cpu_core@L2_RQSTS.DEMAND_DATA_RD_MISS@ / INST_RETIRED.ANY",
+ "MetricExpr": "1e3 * cpu_core@L2_RQSTS.DEMAND_DATA_RD_MISS@ / cpu_core@INST_RETIRED.ANY@",
"MetricGroup": "CacheHits;Mem",
"MetricName": "tma_info_memory_l2mpki_load",
"Unit": "cpu_core"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Offcore requests (L2 cache miss) per kilo instruction for demand RFOs",
+ "MetricExpr": "1e3 * cpu_core@L2_RQSTS.RFO_MISS@ / cpu_core@INST_RETIRED.ANY@",
+ "MetricGroup": "CacheMisses;Offcore",
+ "MetricName": "tma_info_memory_l2mpki_rfo",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
"MetricExpr": "64 * cpu_core@OFFCORE_REQUESTS.ALL_REQUESTS@ / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW;Offcore",
"MetricName": "tma_info_memory_l3_cache_access_bw",
"Unit": "cpu_core"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
"MetricExpr": "64 * cpu_core@LONGEST_LAT_CACHE.MISS@ / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l3_cache_fill_bw",
@@ -1748,21 +1673,21 @@
},
{
"BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
- "MetricExpr": "1e3 * cpu_core@MEM_LOAD_RETIRED.L3_MISS@ / INST_RETIRED.ANY",
+ "MetricExpr": "1e3 * cpu_core@MEM_LOAD_RETIRED.L3_MISS@ / cpu_core@INST_RETIRED.ANY@",
"MetricGroup": "Mem",
"MetricName": "tma_info_memory_l3mpki",
"Unit": "cpu_core"
},
{
"BriefDescription": "Average Parallel L2 cache miss data reads",
- "MetricExpr": "cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DATA_RD@ / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "MetricExpr": "cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DATA_RD@ / cpu_core@OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD@",
"MetricGroup": "Memory_BW;Offcore",
"MetricName": "tma_info_memory_latency_data_l2_mlp",
"Unit": "cpu_core"
},
{
"BriefDescription": "Average Latency for L2 cache miss demand Loads",
- "MetricExpr": "cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD@ / OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "MetricExpr": "cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD@ / cpu_core@OFFCORE_REQUESTS.DEMAND_DATA_RD@",
"MetricGroup": "Memory_Lat;Offcore",
"MetricName": "tma_info_memory_latency_load_l2_miss_latency",
"Unit": "cpu_core"
@@ -1776,35 +1701,35 @@
},
{
"BriefDescription": "Average Latency for L3 cache miss demand Loads",
- "MetricExpr": "cpu_core@OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD@ / OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
+ "MetricExpr": "cpu_core@OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD@ / cpu_core@OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD@",
"MetricGroup": "Memory_Lat;Offcore",
"MetricName": "tma_info_memory_latency_load_l3_miss_latency",
"Unit": "cpu_core"
},
{
"BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
- "MetricExpr": "cpu_core@L1D_PEND_MISS.PENDING@ / MEM_LOAD_COMPLETED.L1_MISS_ANY",
+ "MetricExpr": "cpu_core@L1D_PEND_MISS.PENDING@ / cpu_core@MEM_LOAD_COMPLETED.L1_MISS_ANY@",
"MetricGroup": "Mem;MemoryBound;MemoryLat",
"MetricName": "tma_info_memory_load_miss_real_latency",
"Unit": "cpu_core"
},
{
"BriefDescription": "\"Bus lock\" per kilo instruction",
- "MetricExpr": "1e3 * cpu_core@SQ_MISC.BUS_LOCK@ / INST_RETIRED.ANY",
+ "MetricExpr": "1e3 * cpu_core@SQ_MISC.BUS_LOCK@ / cpu_core@INST_RETIRED.ANY@",
"MetricGroup": "Mem",
"MetricName": "tma_info_memory_mix_bus_lock_pki",
"Unit": "cpu_core"
},
{
"BriefDescription": "Un-cacheable retired load per kilo instruction",
- "MetricExpr": "1e3 * cpu_core@MEM_LOAD_MISC_RETIRED.UC@ / INST_RETIRED.ANY",
+ "MetricExpr": "1e3 * cpu_core@MEM_LOAD_MISC_RETIRED.UC@ / cpu_core@INST_RETIRED.ANY@",
"MetricGroup": "Mem",
"MetricName": "tma_info_memory_mix_uc_load_pki",
"Unit": "cpu_core"
},
{
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss",
- "MetricExpr": "cpu_core@L1D_PEND_MISS.PENDING@ / L1D_PEND_MISS.PENDING_CYCLES",
+ "MetricExpr": "cpu_core@L1D_PEND_MISS.PENDING@ / cpu_core@L1D_PEND_MISS.PENDING_CYCLES@",
"MetricGroup": "Mem;MemoryBW;MemoryBound",
"MetricName": "tma_info_memory_mlp",
"PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
@@ -1812,14 +1737,14 @@
},
{
"BriefDescription": "STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
- "MetricExpr": "1e3 * cpu_core@ITLB_MISSES.WALK_COMPLETED@ / INST_RETIRED.ANY",
+ "MetricExpr": "1e3 * cpu_core@ITLB_MISSES.WALK_COMPLETED@ / cpu_core@INST_RETIRED.ANY@",
"MetricGroup": "Fed;MemoryTLB",
"MetricName": "tma_info_memory_tlb_code_stlb_mpki",
"Unit": "cpu_core"
},
{
"BriefDescription": "STLB (2nd level TLB) data load speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
- "MetricExpr": "1e3 * cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED@ / INST_RETIRED.ANY",
+ "MetricExpr": "1e3 * cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED@ / cpu_core@INST_RETIRED.ANY@",
"MetricGroup": "Mem;MemoryTLB",
"MetricName": "tma_info_memory_tlb_load_stlb_mpki",
"Unit": "cpu_core"
@@ -1834,21 +1759,42 @@
},
{
"BriefDescription": "STLB (2nd level TLB) data store speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
- "MetricExpr": "1e3 * cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED@ / INST_RETIRED.ANY",
+ "MetricExpr": "1e3 * cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED@ / cpu_core@INST_RETIRED.ANY@",
"MetricGroup": "Mem;MemoryTLB",
"MetricName": "tma_info_memory_tlb_store_stlb_mpki",
"Unit": "cpu_core"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per core",
"MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / (cpu_core@UOPS_EXECUTED.CORE_CYCLES_GE_1@ / 2 if #SMT_on else cpu_core@UOPS_EXECUTED.THREAD\\,cmask\\=1@)",
"MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
"MetricName": "tma_info_pipeline_execute",
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Average number of uops fetched from DSB per cycle",
+ "MetricExpr": "cpu_core@IDQ.DSB_UOPS@ / cpu_core@IDQ.DSB_CYCLES_ANY@",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "tma_info_pipeline_fetch_dsb",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average number of uops fetched from LSD per cycle",
+ "MetricExpr": "cpu_core@LSD.UOPS@ / cpu_core@LSD.CYCLES_ACTIVE@",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "tma_info_pipeline_fetch_lsd",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average number of uops fetched from MITE per cycle",
+ "MetricExpr": "cpu_core@IDQ.MITE_UOPS@ / cpu_core@IDQ.MITE_CYCLES_ANY@",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "tma_info_pipeline_fetch_mite",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Instructions per a microcode Assist invocation",
- "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / ASSISTS.ANY",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@ASSISTS.ANY@",
"MetricGroup": "MicroSeq;Pipeline;Ret;Retire",
"MetricName": "tma_info_pipeline_ipassist",
"MetricThreshold": "tma_info_pipeline_ipassist < 100e3",
@@ -1887,14 +1833,14 @@
},
{
"BriefDescription": "Average CPU Utilization (percentage)",
- "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.REF_TSC@ / TSC",
+ "MetricExpr": "tma_info_system_cpus_utilized / #num_cpus_online",
"MetricGroup": "HPC;Summary",
"MetricName": "tma_info_system_cpu_utilization",
"Unit": "cpu_core"
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "#num_cpus_online * tma_info_system_cpu_utilization",
+ "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.REF_TSC@ / TSC",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized",
"Unit": "cpu_core"
@@ -1925,14 +1871,14 @@
},
{
"BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / cpu_core@INST_RETIRED.ANY_P@k",
+ "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.THREAD_P@k / cpu_core@INST_RETIRED.ANY_P@k",
"MetricGroup": "OS",
"MetricName": "tma_info_system_kernel_cpi",
"Unit": "cpu_core"
},
{
"BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / CPU_CLK_UNHALTED.THREAD",
+ "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.THREAD_P@k / cpu_core@CPU_CLK_UNHALTED.THREAD@",
"MetricGroup": "OS",
"MetricName": "tma_info_system_kernel_utilization",
"MetricThreshold": "tma_info_system_kernel_utilization > 0.05",
@@ -1971,7 +1917,7 @@
},
{
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
- "MetricExpr": "tma_info_thread_clks / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricExpr": "tma_info_thread_clks / cpu_core@CPU_CLK_UNHALTED.REF_TSC@",
"MetricGroup": "Power",
"MetricName": "tma_info_system_turbo_utilization",
"Unit": "cpu_core"
@@ -1992,7 +1938,7 @@
},
{
"BriefDescription": "The ratio of Executed- by Issued-Uops",
- "MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / UOPS_ISSUED.ANY",
+ "MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / cpu_core@UOPS_ISSUED.ANY@",
"MetricGroup": "Cor;Pipeline",
"MetricName": "tma_info_thread_execute_per_issue",
"PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage.",
@@ -2021,15 +1967,15 @@
},
{
"BriefDescription": "Uops Per Instruction",
- "MetricExpr": "tma_retiring * tma_info_thread_slots / INST_RETIRED.ANY",
+ "MetricExpr": "tma_retiring * tma_info_thread_slots / cpu_core@INST_RETIRED.ANY@",
"MetricGroup": "Pipeline;Ret;Retire",
"MetricName": "tma_info_thread_uoppi",
"MetricThreshold": "tma_info_thread_uoppi > 1.05",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Instruction per taken branch",
- "MetricExpr": "tma_retiring * tma_info_thread_slots / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Uops per taken branch",
+ "MetricExpr": "tma_retiring * tma_info_thread_slots / cpu_core@BR_INST_RETIRED.NEAR_TAKEN@",
"MetricGroup": "Branches;Fed;FetchBW",
"MetricName": "tma_info_thread_uptb",
"MetricThreshold": "tma_info_thread_uptb < 9",
@@ -2068,7 +2014,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
"MetricExpr": "cpu_core@ICACHE_TAG.STALLS@ / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_itlb_misses",
"MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS",
@@ -2086,9 +2032,19 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "This metric roughly estimates fraction of cycles with demand load accesses that hit the L1 cache",
+ "MetricExpr": "min(2 * (cpu_core@MEM_INST_RETIRED.ALL_LOADS@ - cpu_core@MEM_LOAD_RETIRED.FB_HIT@ - cpu_core@MEM_LOAD_RETIRED.L1_MISS@) * 20 / 100, max(cpu_core@CYCLE_ACTIVITY.CYCLES_MEM_ANY@ - cpu_core@MEMORY_ACTIVITY.CYCLES_L1D_MISS@, 0)) / tma_info_thread_clks",
+ "MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_l1_hit_latency",
+ "MetricThreshold": "tma_l1_hit_latency > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles with demand load accesses that hit the L1 cache. The short latency of the L1 data cache may be exposed in pointer-chasing memory access patterns as an example. Sample with: MEM_LOAD_RETIRED.L1_HIT",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
"MetricExpr": "(cpu_core@MEMORY_ACTIVITY.STALLS_L1D_MISS@ - cpu_core@MEMORY_ACTIVITY.STALLS_L2_MISS@) / tma_info_thread_clks",
- "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricGroup": "BvML;CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l2_bound",
"MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT_PS",
@@ -2108,7 +2064,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
"MetricExpr": "9 * tma_info_system_core_frequency * (cpu_core@MEM_LOAD_RETIRED.L3_HIT@ * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2)) / tma_info_thread_clks",
- "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
+ "MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
"MetricName": "tma_l3_hit_latency",
"MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_info_bottleneck_cache_memory_latency, tma_mem_latency",
@@ -2121,7 +2077,7 @@
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_lcp",
"MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
- "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2170,7 +2126,7 @@
"MetricGroup": "Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
"MetricName": "tma_lock_latency",
"MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
- "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS_PS. Related metrics: tma_store_latency",
+ "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS. Related metrics: tma_store_latency",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2187,7 +2143,7 @@
{
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears",
"MetricExpr": "max(0, tma_bad_speculation - tma_branch_mispredicts)",
- "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
+ "MetricGroup": "BadSpec;BvMS;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
"MetricName": "tma_machine_clears",
"MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
@@ -2198,7 +2154,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
"MetricExpr": "min(cpu_core@CPU_CLK_UNHALTED.THREAD@, cpu_core@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_thread_clks",
- "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
"MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_sq_full",
@@ -2208,7 +2164,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM)",
"MetricExpr": "min(cpu_core@CPU_CLK_UNHALTED.THREAD@, cpu_core@OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD@) / tma_info_thread_clks - tma_mem_bandwidth",
- "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
+ "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
"MetricName": "tma_mem_latency",
"MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_info_bottleneck_cache_memory_latency, tma_l3_hit_latency",
@@ -2258,7 +2214,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage",
"MetricExpr": "tma_branch_mispredicts / tma_bad_speculation * cpu_core@INT_MISC.CLEAR_RESTEER_CYCLES@ / tma_info_thread_clks",
- "MetricGroup": "BadSpec;BrMispredicts;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
+ "MetricGroup": "BadSpec;BrMispredicts;BvMP;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
"MetricName": "tma_mispredicts_resteers",
"MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost, tma_info_bottleneck_mispredictions",
@@ -2298,7 +2254,7 @@
{
"BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused",
"MetricExpr": "tma_light_operations * (cpu_core@BR_INST_RETIRED.ALL_BRANCHES@ - cpu_core@INST_RETIRED.MACRO_FUSED@) / (tma_retiring * tma_info_thread_slots)",
- "MetricGroup": "Branches;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_non_fused_branches",
"MetricThreshold": "tma_non_fused_branches > 0.1 & tma_light_operations > 0.6",
"PublicDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused.",
@@ -2308,7 +2264,7 @@
{
"BriefDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions",
"MetricExpr": "tma_light_operations * cpu_core@INST_RETIRED.NOP@ / (tma_retiring * tma_info_thread_slots)",
- "MetricGroup": "Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
+ "MetricGroup": "BvBO;Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
"MetricName": "tma_nop_instructions",
"MetricThreshold": "tma_nop_instructions > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)",
"PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP",
@@ -2328,7 +2284,7 @@
{
"BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).",
"MetricExpr": "max(tma_branch_mispredicts * (1 - cpu_core@BR_MISP_RETIRED.ALL_BRANCHES@ / (cpu_core@INT_MISC.CLEARS_COUNT@ - cpu_core@MACHINE_CLEARS.COUNT@)), 0.0001)",
- "MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
+ "MetricGroup": "BrMispredicts;BvIO;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_other_mispredicts",
"MetricThreshold": "tma_other_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%",
@@ -2337,7 +2293,7 @@
{
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.",
"MetricExpr": "max(tma_machine_clears * (1 - cpu_core@MACHINE_CLEARS.MEMORY_ORDERING@ / cpu_core@MACHINE_CLEARS.COUNT@), 0.0001)",
- "MetricGroup": "Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group",
+ "MetricGroup": "BvIO;Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_other_nukes",
"MetricThreshold": "tma_other_nukes > 0.05 & (tma_machine_clears > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%",
@@ -2395,7 +2351,7 @@
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricExpr": "(cpu_core@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ + cpu_core@RS.EMPTY\\,umask\\=1@) / tma_info_thread_clks * (cpu_core@CYCLE_ACTIVITY.STALLS_TOTAL@ - cpu_core@EXE_ACTIVITY.BOUND_ON_LOADS@) / tma_info_thread_clks",
+ "MetricExpr": "(cpu_core@EXE_ACTIVITY.EXE_BOUND_0_PORTS@ + max(cpu_core@RS.EMPTY\\,umask\\=1@ - cpu_core@RESOURCE_STALLS.SCOREBOARD@, 0)) / tma_info_thread_clks * (cpu_core@CYCLE_ACTIVITY.STALLS_TOTAL@ - cpu_core@EXE_ACTIVITY.BOUND_ON_LOADS@) / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_0",
"MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
@@ -2428,7 +2384,7 @@
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
"MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "cpu_core@UOPS_EXECUTED.CYCLES_GE_3@ / tma_info_thread_clks",
- "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricGroup": "BvCB;PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_3m",
"MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Sample with: UOPS_EXECUTED.CYCLES_GE_3",
@@ -2439,7 +2395,7 @@
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
"DefaultMetricgroupName": "TopdownL1",
"MetricExpr": "cpu_core@topdown\\-retiring@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@) + 0 * tma_info_thread_slots",
- "MetricGroup": "Default;TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvUW;Default;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_retiring",
"MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL1;Default",
@@ -2450,7 +2406,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations",
"MetricExpr": "cpu_core@RESOURCE_STALLS.SCOREBOARD@ / tma_info_thread_clks + tma_c02_wait",
- "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO",
+ "MetricGroup": "BvIO;PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO",
"MetricName": "tma_serializing_operation",
"MetricThreshold": "tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: RESOURCE_STALLS.SCOREBOARD. Related metrics: tma_ms_switches",
@@ -2501,7 +2457,7 @@
{
"BriefDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors)",
"MetricExpr": "(cpu_core@XQ.FULL_CYCLES@ + cpu_core@L1D_PEND_MISS.L2_STALLS@) / tma_info_thread_clks",
- "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
+ "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
"MetricName": "tma_sq_full",
"MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth",
@@ -2531,7 +2487,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses",
"MetricExpr": "(cpu_core@MEM_STORE_RETIRED.L2_HIT@ * 10 * (1 - cpu_core@MEM_INST_RETIRED.LOCK_LOADS@ / cpu_core@MEM_INST_RETIRED.ALL_STORES@) + (1 - cpu_core@MEM_INST_RETIRED.LOCK_LOADS@ / cpu_core@MEM_INST_RETIRED.ALL_STORES@) * min(cpu_core@CPU_CLK_UNHALTED.THREAD@, cpu_core@OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO@)) / tma_info_thread_clks",
- "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
+ "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
"MetricName": "tma_store_latency",
"MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
@@ -2579,7 +2535,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears",
"MetricExpr": "cpu_core@INT_MISC.UNKNOWN_BRANCH_CYCLES@ / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
"MetricName": "tma_unknown_branches",
"MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: FRONTEND_RETIRED.UNKNOWN_BRANCH",
@@ -2588,7 +2544,7 @@
},
{
"BriefDescription": "This metric serves as an approximation of legacy x87 usage",
- "MetricExpr": "tma_retiring * cpu_core@UOPS_EXECUTED.X87@ / UOPS_EXECUTED.THREAD",
+ "MetricExpr": "tma_retiring * cpu_core@UOPS_EXECUTED.X87@ / cpu_core@UOPS_EXECUTED.THREAD@",
"MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group",
"MetricName": "tma_x87_use",
"MetricThreshold": "tma_x87_use > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/cache.json b/tools/perf/pmu-events/arch/x86/alderlake/cache.json
index b3d7f8fb50df..3f51686fe7a8 100644
--- a/tools/perf/pmu-events/arch/x86/alderlake/cache.json
+++ b/tools/perf/pmu-events/arch/x86/alderlake/cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "L1D.HWPF_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "L1D.HWPF_MISS",
"SampleAfterValue": "1000003",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Counts the number of cache lines replaced in L1 data cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "L1D.REPLACEMENT",
"PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "Number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.FB_FULL",
"PublicDescription": "Counts number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
@@ -27,6 +30,7 @@
},
{
"BriefDescription": "Number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x48",
@@ -38,6 +42,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event L1D_PEND_MISS.L2_STALLS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.L2_STALL",
@@ -47,6 +52,7 @@
},
{
"BriefDescription": "Number of cycles a demand request has waited due to L1D due to lack of L2 resources.",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.L2_STALLS",
"PublicDescription": "Counts number of cycles a demand request has waited due to L1D due to lack of L2 resources. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
@@ -56,6 +62,7 @@
},
{
"BriefDescription": "Number of L1D misses that are outstanding",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING",
"PublicDescription": "Counts number of L1D misses that are outstanding in each cycle, that is each cycle the number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch. Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
@@ -65,6 +72,7 @@
},
{
"BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING_CYCLES",
@@ -75,6 +83,7 @@
},
{
"BriefDescription": "L2 cache lines filling L2",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "L2_LINES_IN.ALL",
"PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
@@ -84,6 +93,7 @@
},
{
"BriefDescription": "Cache lines that have been L2 hardware prefetched but not used by demand accesses",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_LINES_OUT.USELESS_HWPF",
"PublicDescription": "Counts the number of cache lines that have been prefetched by the L2 hardware prefetcher but not used by demand access when evicted from the L2 cache",
@@ -93,6 +103,7 @@
},
{
"BriefDescription": "All accesses to L2 cache [This event is alias to L2_RQSTS.REFERENCES]",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_REQUEST.ALL",
"PublicDescription": "Counts all requests that were hit or true misses in L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. [This event is alias to L2_RQSTS.REFERENCES]",
@@ -102,6 +113,7 @@
},
{
"BriefDescription": "Read requests with true-miss in L2 cache. [This event is alias to L2_RQSTS.MISS]",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_REQUEST.MISS",
"PublicDescription": "Counts read requests of any type with true-miss in the L2 cache. True-miss excludes L2 misses that were merged with ongoing L2 misses. [This event is alias to L2_RQSTS.MISS]",
@@ -111,6 +123,7 @@
},
{
"BriefDescription": "L2 code requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_CODE_RD",
"PublicDescription": "Counts the total number of L2 code requests.",
@@ -120,6 +133,7 @@
},
{
"BriefDescription": "Demand Data Read access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
"PublicDescription": "Counts Demand Data Read requests accessing the L2 cache. These requests may hit or miss L2 cache. True-miss exclude misses that were merged with ongoing L2 misses. An access is counted once.",
@@ -129,6 +143,7 @@
},
{
"BriefDescription": "Demand requests that miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_MISS",
"PublicDescription": "Counts demand requests that miss L2 cache.",
@@ -138,6 +153,7 @@
},
{
"BriefDescription": "L2_RQSTS.ALL_HWPF",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_HWPF",
"SampleAfterValue": "200003",
@@ -146,6 +162,7 @@
},
{
"BriefDescription": "RFO requests to L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_RFO",
"PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
@@ -155,6 +172,7 @@
},
{
"BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_HIT",
"PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
@@ -164,6 +182,7 @@
},
{
"BriefDescription": "L2 cache misses when fetching instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_MISS",
"PublicDescription": "Counts L2 cache misses when fetching instructions.",
@@ -173,6 +192,7 @@
},
{
"BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
"PublicDescription": "Counts the number of demand Data Read requests initiated by load instructions that hit L2 cache.",
@@ -182,6 +202,7 @@
},
{
"BriefDescription": "Demand Data Read miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
"PublicDescription": "Counts demand Data Read requests with true-miss in the L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. An access is counted once.",
@@ -191,6 +212,7 @@
},
{
"BriefDescription": "L2_RQSTS.HWPF_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.HWPF_MISS",
"SampleAfterValue": "200003",
@@ -199,6 +221,7 @@
},
{
"BriefDescription": "Read requests with true-miss in L2 cache. [This event is alias to L2_REQUEST.MISS]",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.MISS",
"PublicDescription": "Counts read requests of any type with true-miss in the L2 cache. True-miss excludes L2 misses that were merged with ongoing L2 misses. [This event is alias to L2_REQUEST.MISS]",
@@ -208,6 +231,7 @@
},
{
"BriefDescription": "All accesses to L2 cache [This event is alias to L2_REQUEST.ALL]",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.REFERENCES",
"PublicDescription": "Counts all requests that were hit or true misses in L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. [This event is alias to L2_REQUEST.ALL]",
@@ -217,6 +241,7 @@
},
{
"BriefDescription": "RFO requests that hit L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_HIT",
"PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
@@ -226,6 +251,7 @@
},
{
"BriefDescription": "RFO requests that miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_MISS",
"PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
@@ -235,6 +261,7 @@
},
{
"BriefDescription": "SW prefetch requests that hit L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.SWPF_HIT",
"PublicDescription": "Counts Software prefetch requests that hit the L2 cache. Accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions when FB is not full.",
@@ -244,6 +271,7 @@
},
{
"BriefDescription": "SW prefetch requests that miss L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.SWPF_MISS",
"PublicDescription": "Counts Software prefetch requests that miss the L2 cache. Accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions when FB is not full.",
@@ -252,16 +280,28 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "L2 writebacks that access L2 cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "L2_TRANS.L2_WB",
+ "PublicDescription": "Counts L2 writebacks that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x40",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Counts the number of cacheable memory requests that miss in the LLC. Counts on a per core basis.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.MISS",
- "PublicDescription": "Counts the number of cacheable memory requests that miss in the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the platform has an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
+ "PublicDescription": "Counts the number of cacheable memory requests that miss in the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the core has access to an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
"SampleAfterValue": "200003",
"UMask": "0x41",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Core-originated cacheable requests that missed L3 (Except hardware prefetches to the L3)",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.MISS",
"PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.",
@@ -271,15 +311,17 @@
},
{
"BriefDescription": "Counts the number of cacheable memory requests that access the LLC. Counts on a per core basis.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
- "PublicDescription": "Counts the number of cacheable memory requests that access the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the platform has an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
+ "PublicDescription": "Counts the number of cacheable memory requests that access the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the core has access to an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
"SampleAfterValue": "200003",
"UMask": "0x4f",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Core-originated cacheable requests that refer to L3 (Except hardware prefetches to the L3)",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
"PublicDescription": "Counts core-originated cacheable requests to the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.",
@@ -289,6 +331,7 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in the L2, LLC, DRAM or MMIO (Non-DRAM).",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.IFETCH",
"PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or translation lookaside buffer (TLB) miss which hit in the L2, LLC, DRAM or MMIO (Non-DRAM).",
@@ -298,6 +341,7 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in DRAM or MMIO (Non-DRAM).",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.IFETCH_DRAM_HIT",
"PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or translation lookaside buffer (TLB) miss which hit in DRAM or MMIO (non-DRAM).",
@@ -307,6 +351,7 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in the L2 cache.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.IFETCH_L2_HIT",
"PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or Translation Lookaside Buffer (TLB) miss which hit in the L2 cache.",
@@ -316,6 +361,7 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in the LLC or other core with HITE/F/M.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.IFETCH_LLC_HIT",
"PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or Translation Lookaside Buffer (TLB) miss which hit in the Last Level Cache (LLC) or other core with HITE/F/M.",
@@ -325,6 +371,7 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to a demand load miss which hit in the L2, LLC, DRAM or MMIO (Non-DRAM).",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.LOAD",
"SampleAfterValue": "200003",
@@ -333,6 +380,7 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to a demand load miss which hit in DRAM or MMIO (Non-DRAM).",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.LOAD_DRAM_HIT",
"SampleAfterValue": "200003",
@@ -341,6 +389,7 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to a demand load which hit in the L2 cache.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.LOAD_L2_HIT",
"SampleAfterValue": "200003",
@@ -349,6 +398,7 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to a demand load which hit in the LLC or other core with HITE/F/M.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.LOAD_LLC_HIT",
"PublicDescription": "Counts the number of cycles the core is stalled due to a demand load which hit in the Last Level Cache (LLC) or other core with HITE/F/M.",
@@ -358,6 +408,7 @@
},
{
"BriefDescription": "Retired load instructions.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ALL_LOADS",
@@ -369,6 +420,7 @@
},
{
"BriefDescription": "Retired store instructions.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ALL_STORES",
@@ -380,6 +432,7 @@
},
{
"BriefDescription": "All retired memory instructions.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ANY",
@@ -391,6 +444,7 @@
},
{
"BriefDescription": "Retired load instructions with locked access.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.LOCK_LOADS",
@@ -402,6 +456,7 @@
},
{
"BriefDescription": "Retired load instructions that split across a cacheline boundary.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
@@ -413,6 +468,7 @@
},
{
"BriefDescription": "Retired store instructions that split across a cacheline boundary.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.SPLIT_STORES",
@@ -424,6 +480,7 @@
},
{
"BriefDescription": "Retired load instructions that miss the STLB.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
@@ -435,6 +492,7 @@
},
{
"BriefDescription": "Retired store instructions that miss the STLB.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
@@ -446,6 +504,7 @@
},
{
"BriefDescription": "Completed demand load uops that miss the L1 d-cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "MEM_LOAD_COMPLETED.L1_MISS_ANY",
"PublicDescription": "Number of completed demand load requests that missed the L1 data cache including shadow misses (FB hits, merge to an ongoing L1D miss)",
@@ -455,6 +514,7 @@
},
{
"BriefDescription": "Retired load instructions whose data sources were HitM responses from shared L3",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD",
@@ -466,6 +526,7 @@
},
{
"BriefDescription": "Retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT",
@@ -477,6 +538,7 @@
},
{
"BriefDescription": "Retired load instructions whose data sources were HitM responses from shared L3",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM",
@@ -488,6 +550,7 @@
},
{
"BriefDescription": "Retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
@@ -499,6 +562,7 @@
},
{
"BriefDescription": "Retired load instructions whose data sources were hits in L3 without snoops required",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
@@ -510,6 +574,7 @@
},
{
"BriefDescription": "Retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD",
@@ -521,6 +586,7 @@
},
{
"BriefDescription": "Retired load instructions which data sources missed L3 but serviced from local dram",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
@@ -532,6 +598,7 @@
},
{
"BriefDescription": "Retired instructions with at least 1 uncacheable load or lock.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd4",
"EventName": "MEM_LOAD_MISC_RETIRED.UC",
@@ -543,6 +610,7 @@
},
{
"BriefDescription": "Number of completed demand load requests that missed the L1, but hit the FB(fill buffer), because a preceding miss to the same cacheline initiated the line to be brought into L1, but data is not yet ready in L1.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.FB_HIT",
@@ -554,6 +622,7 @@
},
{
"BriefDescription": "Retired load instructions with L1 cache hits as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L1_HIT",
@@ -565,6 +634,7 @@
},
{
"BriefDescription": "Retired load instructions missed L1 cache as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L1_MISS",
@@ -576,6 +646,7 @@
},
{
"BriefDescription": "Retired load instructions with L2 cache hits as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L2_HIT",
@@ -587,6 +658,7 @@
},
{
"BriefDescription": "Retired load instructions missed L2 cache as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L2_MISS",
@@ -598,6 +670,7 @@
},
{
"BriefDescription": "Retired load instructions with L3 cache hits as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L3_HIT",
@@ -609,6 +682,7 @@
},
{
"BriefDescription": "Retired load instructions missed L3 cache as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L3_MISS",
@@ -620,6 +694,7 @@
},
{
"BriefDescription": "Counts the number of load uops retired that hit in DRAM.",
+ "Counter": "0,1,2,3,4,5",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.DRAM_HIT",
@@ -630,6 +705,7 @@
},
{
"BriefDescription": "Counts the number of load uops retired that hit in the L2 cache.",
+ "Counter": "0,1,2,3,4,5",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
@@ -640,6 +716,7 @@
},
{
"BriefDescription": "Counts the number of load uops retired that hit in the L3 cache.",
+ "Counter": "0,1,2,3,4,5",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
@@ -650,6 +727,7 @@
},
{
"BriefDescription": "Counts the number of cycles that uops are blocked for any of the following reasons: load buffer, store buffer or RSV full.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x04",
"EventName": "MEM_SCHEDULER_BLOCK.ALL",
"SampleAfterValue": "20003",
@@ -658,6 +736,7 @@
},
{
"BriefDescription": "Counts the number of cycles that uops are blocked due to a load buffer full condition.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x04",
"EventName": "MEM_SCHEDULER_BLOCK.LD_BUF",
"SampleAfterValue": "20003",
@@ -666,6 +745,7 @@
},
{
"BriefDescription": "Counts the number of cycles that uops are blocked due to an RSV full condition.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x04",
"EventName": "MEM_SCHEDULER_BLOCK.RSV",
"SampleAfterValue": "20003",
@@ -674,6 +754,7 @@
},
{
"BriefDescription": "Counts the number of cycles that uops are blocked due to a store buffer full condition.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x04",
"EventName": "MEM_SCHEDULER_BLOCK.ST_BUF",
"SampleAfterValue": "20003",
@@ -682,6 +763,7 @@
},
{
"BriefDescription": "MEM_STORE_RETIRED.L2_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "MEM_STORE_RETIRED.L2_HIT",
"SampleAfterValue": "200003",
@@ -690,6 +772,7 @@
},
{
"BriefDescription": "Counts the number of load uops retired.",
+ "Counter": "0,1,2,3,4,5",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
@@ -701,6 +784,7 @@
},
{
"BriefDescription": "Counts the number of store uops retired.",
+ "Counter": "0,1,2,3,4,5",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
@@ -712,6 +796,7 @@
},
{
"BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 128 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_128",
@@ -725,6 +810,7 @@
},
{
"BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 16 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_16",
@@ -738,6 +824,7 @@
},
{
"BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 256 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_256",
@@ -751,6 +838,7 @@
},
{
"BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 32 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_32",
@@ -764,6 +852,7 @@
},
{
"BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 4 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_4",
@@ -777,6 +866,7 @@
},
{
"BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 512 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_512",
@@ -790,6 +880,7 @@
},
{
"BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 64 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_64",
@@ -803,6 +894,7 @@
},
{
"BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 8 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_8",
@@ -815,7 +907,19 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of load uops retired that performed one or more locks.",
+ "Counter": "0,1,2,3,4,5",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x21",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of retired split load uops.",
+ "Counter": "0,1,2,3,4,5",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
@@ -826,6 +930,7 @@
},
{
"BriefDescription": "Counts the number of stores uops retired. Counts with or without PEBS enabled.",
+ "Counter": "0,1,2,3,4,5",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.STORE_LATENCY",
@@ -837,6 +942,7 @@
},
{
"BriefDescription": "Retired memory uops for any access",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe5",
"EventName": "MEM_UOP_RETIRED.ANY",
"PublicDescription": "Number of retired micro-operations (uops) for load or store memory accesses",
@@ -846,6 +952,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -856,6 +963,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -866,6 +974,7 @@
},
{
"BriefDescription": "Counts demand data reads that resulted in a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -876,6 +985,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -886,6 +996,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -896,6 +1007,7 @@
},
{
"BriefDescription": "Counts demand data reads that resulted in a snoop hit in another cores caches which forwarded the unmodified data to the requesting core.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -906,6 +1018,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_RFO.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -916,6 +1029,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -926,6 +1040,7 @@
},
{
"BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that resulted in a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -936,6 +1051,7 @@
},
{
"BriefDescription": "OFFCORE_REQUESTS.ALL_REQUESTS",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
"SampleAfterValue": "100003",
@@ -944,6 +1060,7 @@
},
{
"BriefDescription": "Demand and prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "OFFCORE_REQUESTS.DATA_RD",
"PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
@@ -952,7 +1069,18 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Cacheable and noncacheable code read requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
+ "PublicDescription": "Counts both cacheable and non-cacheable code read requests.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Demand Data Read requests sent to uncore",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
"PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
@@ -961,7 +1089,18 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
+ "PublicDescription": "Counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "This event is deprecated. Refer to new event OFFCORE_REQUESTS_OUTSTANDING.DATA_RD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"Errata": "ADL038",
"EventCode": "0x20",
@@ -972,6 +1111,7 @@
},
{
"BriefDescription": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"Errata": "ADL038",
"EventCode": "0x20",
@@ -981,7 +1121,19 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Cycles with offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
+ "PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Cycles where at least 1 outstanding demand data read request is pending.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x20",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
@@ -991,6 +1143,7 @@
},
{
"BriefDescription": "For every cycle where the core is waiting on at least 1 outstanding Demand RFO request, increments by 1.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x20",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
@@ -1001,6 +1154,7 @@
},
{
"BriefDescription": "OFFCORE_REQUESTS_OUTSTANDING.DATA_RD",
+ "Counter": "0,1,2,3",
"Errata": "ADL038",
"EventCode": "0x20",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DATA_RD",
@@ -1009,7 +1163,18 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore, every cycle.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
+ "PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "For every cycle, increments by the number of outstanding demand data read requests pending.",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
"PublicDescription": "For every cycle, increments by the number of outstanding demand data read requests pending. Requests are considered outstanding from the time they miss the core's L2 cache until the transaction completion message is sent to the requestor.",
@@ -1019,6 +1184,7 @@
},
{
"BriefDescription": "Counts bus locks, accounts for cache line split locks and UC locks.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2c",
"EventName": "SQ_MISC.BUS_LOCK",
"PublicDescription": "Counts the more expensive bus lock needed to enforce cache coherency for certain memory accesses that need to be done atomically. Can be created by issuing an atomic instruction (via the LOCK prefix) which causes a cache line split or accesses uncacheable memory.",
@@ -1027,7 +1193,17 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of PREFETCHNTA, PREFETCHW, PREFETCHT0, PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "SW_PREFETCH_ACCESS.ANY",
+ "SampleAfterValue": "100003",
+ "UMask": "0xf",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Number of PREFETCHNTA instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "SW_PREFETCH_ACCESS.NTA",
"PublicDescription": "Counts the number of PREFETCHNTA instructions executed.",
@@ -1037,6 +1213,7 @@
},
{
"BriefDescription": "Number of PREFETCHW instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
"PublicDescription": "Counts the number of PREFETCHW instructions executed.",
@@ -1046,6 +1223,7 @@
},
{
"BriefDescription": "Number of PREFETCHT0 instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "SW_PREFETCH_ACCESS.T0",
"PublicDescription": "Counts the number of PREFETCHT0 instructions executed.",
@@ -1055,6 +1233,7 @@
},
{
"BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "SW_PREFETCH_ACCESS.T1_T2",
"PublicDescription": "Counts the number of PREFETCHT1 or PREFETCHT2 instructions executed.",
@@ -1064,6 +1243,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to instruction cache misses.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.ICACHE",
"SampleAfterValue": "1000003",
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/floating-point.json b/tools/perf/pmu-events/arch/x86/alderlake/floating-point.json
index cd291943dc08..b4621c221f58 100644
--- a/tools/perf/pmu-events/arch/x86/alderlake/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/alderlake/floating-point.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "ARITH.FPDIV_ACTIVE",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xb0",
"EventName": "ARITH.FPDIV_ACTIVE",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "Counts all microcode FP assists.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc1",
"EventName": "ASSISTS.FP",
"PublicDescription": "Counts all microcode Floating Point assists.",
@@ -19,6 +21,7 @@
},
{
"BriefDescription": "ASSISTS.SSE_AVX_MIX",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc1",
"EventName": "ASSISTS.SSE_AVX_MIX",
"SampleAfterValue": "1000003",
@@ -27,6 +30,7 @@
},
{
"BriefDescription": "FP_ARITH_DISPATCHED.PORT_0 [This event is alias to FP_ARITH_DISPATCHED.V0]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb3",
"EventName": "FP_ARITH_DISPATCHED.PORT_0",
"SampleAfterValue": "2000003",
@@ -35,6 +39,7 @@
},
{
"BriefDescription": "FP_ARITH_DISPATCHED.PORT_1 [This event is alias to FP_ARITH_DISPATCHED.V1]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb3",
"EventName": "FP_ARITH_DISPATCHED.PORT_1",
"SampleAfterValue": "2000003",
@@ -43,6 +48,7 @@
},
{
"BriefDescription": "FP_ARITH_DISPATCHED.PORT_5 [This event is alias to FP_ARITH_DISPATCHED.V2]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb3",
"EventName": "FP_ARITH_DISPATCHED.PORT_5",
"SampleAfterValue": "2000003",
@@ -51,6 +57,7 @@
},
{
"BriefDescription": "FP_ARITH_DISPATCHED.V0 [This event is alias to FP_ARITH_DISPATCHED.PORT_0]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb3",
"EventName": "FP_ARITH_DISPATCHED.V0",
"SampleAfterValue": "2000003",
@@ -59,6 +66,7 @@
},
{
"BriefDescription": "FP_ARITH_DISPATCHED.V1 [This event is alias to FP_ARITH_DISPATCHED.PORT_1]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb3",
"EventName": "FP_ARITH_DISPATCHED.V1",
"SampleAfterValue": "2000003",
@@ -67,6 +75,7 @@
},
{
"BriefDescription": "FP_ARITH_DISPATCHED.V2 [This event is alias to FP_ARITH_DISPATCHED.PORT_5]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb3",
"EventName": "FP_ARITH_DISPATCHED.V2",
"SampleAfterValue": "2000003",
@@ -75,6 +84,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -84,6 +94,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
"PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -93,6 +104,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -102,6 +114,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
"PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -111,6 +124,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 128-bit packed single and 256-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and packed double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.4_FLOPS",
"PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision and 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point and packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -120,6 +134,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired; some instructions will count twice as noted below. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 RANGE SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR",
"PublicDescription": "Number of SSE/AVX computational scalar single precision and double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -129,6 +144,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -138,6 +154,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
"PublicDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -147,6 +164,7 @@
},
{
"BriefDescription": "Number of any Vector retired FP arithmetic instructions",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.VECTOR",
"PublicDescription": "Number of any Vector retired FP arithmetic instructions. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -156,6 +174,7 @@
},
{
"BriefDescription": "Counts the number of floating point operations retired that required microcode assist.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.FP_ASSIST",
"PublicDescription": "Counts the number of floating point operations retired that required microcode assist, which is not a reflection of the number of FP operations, instructions or uops.",
@@ -165,6 +184,7 @@
},
{
"BriefDescription": "Counts the number of floating point divide uops retired (x87 and SSE, including x87 sqrt).",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.FPDIV",
"PEBS": "1",
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/frontend.json b/tools/perf/pmu-events/arch/x86/alderlake/frontend.json
index 542ba4a81996..66735a612ebd 100644
--- a/tools/perf/pmu-events/arch/x86/alderlake/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/alderlake/frontend.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the total number of BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xe6",
"EventName": "BACLEARS.ANY",
"PublicDescription": "Counts the total number of BACLEARS, which occur when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "Clears due to Unknown Branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "BACLEARS.ANY",
"PublicDescription": "Number of times the front-end is resteered when it finds a branch instruction in a fetch line. This is called Unknown Branch which occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
@@ -19,6 +21,7 @@
},
{
"BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "DECODE.LCP",
"PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.",
@@ -28,6 +31,7 @@
},
{
"BriefDescription": "Cycles the Microcode Sequencer is busy.",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "DECODE.MS_BUSY",
"SampleAfterValue": "500009",
@@ -36,6 +40,7 @@
},
{
"BriefDescription": "DSB-to-MITE switch true penalty cycles.",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
"PublicDescription": "Decode Stream Buffer (DSB) is a Uop-cache that holds translations of previously fetched instructions that were decoded by the legacy x86 decode pipeline (MITE). This event counts fetch penalty cycles when a transition occurs from DSB to MITE.",
@@ -45,6 +50,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced DSB miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.ANY_DSB_MISS",
"MSRIndex": "0x3F7",
@@ -57,6 +63,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced a critical DSB miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.DSB_MISS",
"MSRIndex": "0x3F7",
@@ -69,6 +76,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced iTLB true miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.ITLB_MISS",
"MSRIndex": "0x3F7",
@@ -81,6 +89,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.L1I_MISS",
"MSRIndex": "0x3F7",
@@ -93,6 +102,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.L2_MISS",
"MSRIndex": "0x3F7",
@@ -105,6 +115,7 @@
},
{
"BriefDescription": "Retired instructions after front-end starvation of at least 1 cycle",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_1",
"MSRIndex": "0x3F7",
@@ -117,6 +128,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
"MSRIndex": "0x3F7",
@@ -129,6 +141,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
"MSRIndex": "0x3F7",
@@ -141,6 +154,7 @@
},
{
"BriefDescription": "Retired instructions after front-end starvation of at least 2 cycles",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
"MSRIndex": "0x3F7",
@@ -153,6 +167,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
"MSRIndex": "0x3F7",
@@ -165,6 +180,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
"MSRIndex": "0x3F7",
@@ -177,6 +193,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
"MSRIndex": "0x3F7",
@@ -189,6 +206,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
"MSRIndex": "0x3F7",
@@ -201,6 +219,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
"MSRIndex": "0x3F7",
@@ -213,6 +232,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
"MSRIndex": "0x3F7",
@@ -225,6 +245,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
"MSRIndex": "0x3F7",
@@ -237,6 +258,7 @@
},
{
"BriefDescription": "FRONTEND_RETIRED.MS_FLOWS",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.MS_FLOWS",
"MSRIndex": "0x3F7",
@@ -248,6 +270,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.STLB_MISS",
"MSRIndex": "0x3F7",
@@ -260,6 +283,7 @@
},
{
"BriefDescription": "FRONTEND_RETIRED.UNKNOWN_BRANCH",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.UNKNOWN_BRANCH",
"MSRIndex": "0x3F7",
@@ -271,6 +295,7 @@
},
{
"BriefDescription": "Counts the number of requests to the instruction cache for one or more bytes of a cache line.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x80",
"EventName": "ICACHE.ACCESSES",
"PublicDescription": "Counts the total number of requests to the instruction cache. The event only counts new cache line accesses, so that multiple back to back fetches to the exact same cache line or byte chunk count as one. Specifically, the event counts when accesses from sequential code crosses the cache line boundary, or when a branch target is moved to a new line or to a non-sequential byte chunk of the same line.",
@@ -280,6 +305,7 @@
},
{
"BriefDescription": "Counts the number of instruction cache misses.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x80",
"EventName": "ICACHE.MISSES",
"PublicDescription": "Counts the number of missed requests to the instruction cache. The event only counts new cache line accesses, so that multiple back to back fetches to the exact same cache line and byte chunk count as one. Specifically, the event counts when accesses from sequential code crosses the cache line boundary, or when a branch target is moved to a new line or to a non-sequential byte chunk of the same line.",
@@ -289,6 +315,7 @@
},
{
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE_DATA.STALLS",
"PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The decode pipeline works at a 32 Byte granularity.",
@@ -297,7 +324,19 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "ICACHE_DATA.STALL_PERIODS",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x80",
+ "EventName": "ICACHE_DATA.STALL_PERIODS",
+ "SampleAfterValue": "500009",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "ICACHE_TAG.STALLS",
"PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
@@ -307,6 +346,7 @@
},
{
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.DSB_CYCLES_ANY",
@@ -317,16 +357,18 @@
},
{
"BriefDescription": "Cycles DSB is delivering optimal number of Uops",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0x79",
"EventName": "IDQ.DSB_CYCLES_OK",
- "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the DSB (Decode Stream Buffer) path. Count includes uops that may 'bypass' the IDQ.",
"SampleAfterValue": "2000003",
"UMask": "0x8",
"Unit": "cpu_core"
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.DSB_UOPS",
"PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
@@ -336,6 +378,7 @@
},
{
"BriefDescription": "Cycles MITE is delivering any Uop",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MITE_CYCLES_ANY",
@@ -346,6 +389,7 @@
},
{
"BriefDescription": "Cycles MITE is delivering optimal number of Uops",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0x79",
"EventName": "IDQ.MITE_CYCLES_OK",
@@ -356,6 +400,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MITE_UOPS",
"PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
@@ -365,6 +410,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to IDQ while MS is busy",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MS_CYCLES_ANY",
@@ -375,6 +421,7 @@
},
{
"BriefDescription": "Number of switches from DSB or MITE to the MS",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x79",
@@ -386,6 +433,7 @@
},
{
"BriefDescription": "Uops delivered to IDQ while MS is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_UOPS",
"PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS).",
@@ -395,6 +443,7 @@
},
{
"BriefDescription": "Uops not delivered by IDQ when backend of the machine is not stalled [This event is alias to IDQ_UOPS_NOT_DELIVERED.CORE]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x9c",
"EventName": "IDQ_BUBBLES.CORE",
"PublicDescription": "Counts the number of uops not delivered to by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle. [This event is alias to IDQ_UOPS_NOT_DELIVERED.CORE]",
@@ -404,6 +453,7 @@
},
{
"BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled [This event is alias to IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE]",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "6",
"EventCode": "0x9c",
"EventName": "IDQ_BUBBLES.CYCLES_0_UOPS_DELIV.CORE",
@@ -414,6 +464,7 @@
},
{
"BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled [This event is alias to IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK]",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0x9c",
"EventName": "IDQ_BUBBLES.CYCLES_FE_WAS_OK",
@@ -425,6 +476,7 @@
},
{
"BriefDescription": "Uops not delivered by IDQ when backend of the machine is not stalled [This event is alias to IDQ_BUBBLES.CORE]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x9c",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
"PublicDescription": "Counts the number of uops not delivered to by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle. [This event is alias to IDQ_BUBBLES.CORE]",
@@ -434,6 +486,7 @@
},
{
"BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled [This event is alias to IDQ_BUBBLES.CYCLES_0_UOPS_DELIV.CORE]",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "6",
"EventCode": "0x9c",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
@@ -444,6 +497,7 @@
},
{
"BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled [This event is alias to IDQ_BUBBLES.CYCLES_FE_WAS_OK]",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0x9c",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/memory.json b/tools/perf/pmu-events/arch/x86/alderlake/memory.json
index 23d36164433f..81a03f53aadc 100644
--- a/tools/perf/pmu-events/arch/x86/alderlake/memory.json
+++ b/tools/perf/pmu-events/arch/x86/alderlake/memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to any number of reasons, including an L1 miss, WCB full, pagewalk, store address block or store data block, on a load that retires.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x05",
"EventName": "LD_HEAD.ANY_AT_RET",
"SampleAfterValue": "1000003",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to a core bound stall including a store address match, a DTLB miss or a page walk that detains the load from retiring.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x05",
"EventName": "LD_HEAD.L1_BOUND_AT_RET",
"SampleAfterValue": "1000003",
@@ -26,6 +29,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a DL1 miss.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x05",
"EventName": "LD_HEAD.L1_MISS_AT_RET",
"SampleAfterValue": "1000003",
@@ -34,6 +38,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to other block cases.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x05",
"EventName": "LD_HEAD.OTHER_AT_RET",
"PublicDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to other block cases such as pipeline conflicts, fences, etc.",
@@ -43,6 +48,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a pagewalk.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x05",
"EventName": "LD_HEAD.PGWALK_AT_RET",
"SampleAfterValue": "1000003",
@@ -51,6 +57,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a store address match.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x05",
"EventName": "LD_HEAD.ST_ADDR_AT_RET",
"SampleAfterValue": "1000003",
@@ -59,6 +66,7 @@
},
{
"BriefDescription": "Counts the number of machine clears due to memory ordering caused by a snoop from an external agent. Does not count internally generated machine clears such as those due to memory disambiguation.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
"SampleAfterValue": "20003",
@@ -67,6 +75,7 @@
},
{
"BriefDescription": "Number of machine clears due to memory ordering conflicts.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
"PublicDescription": "Counts the number of Machine Clears detected dye to memory ordering. Memory Ordering Machine Clears may apply when a memory read may not conform to the memory ordering rules of the x86 architecture",
@@ -76,6 +85,7 @@
},
{
"BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0x47",
"EventName": "MEMORY_ACTIVITY.CYCLES_L1D_MISS",
@@ -85,6 +95,7 @@
},
{
"BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "3",
"EventCode": "0x47",
"EventName": "MEMORY_ACTIVITY.STALLS_L1D_MISS",
@@ -94,6 +105,7 @@
},
{
"BriefDescription": "Execution stalls while L2 cache miss demand cacheable load request is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"EventCode": "0x47",
"EventName": "MEMORY_ACTIVITY.STALLS_L2_MISS",
@@ -104,6 +116,7 @@
},
{
"BriefDescription": "Execution stalls while L3 cache miss demand cacheable load request is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "9",
"EventCode": "0x47",
"EventName": "MEMORY_ACTIVITY.STALLS_L3_MISS",
@@ -113,7 +126,22 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 1024 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_1024",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x400",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 1024 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "53",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
@@ -127,6 +155,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
@@ -140,6 +169,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
@@ -153,6 +183,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
@@ -166,6 +197,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
@@ -179,6 +211,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
@@ -192,6 +225,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
@@ -205,6 +239,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
@@ -218,6 +253,7 @@
},
{
"BriefDescription": "Retired memory store access operations. A PDist event for PEBS Store Latency Facility.",
+ "Counter": "0",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.STORE_SAMPLE",
@@ -229,6 +265,7 @@
},
{
"BriefDescription": "Counts demand data reads that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -239,6 +276,7 @@
},
{
"BriefDescription": "Counts demand data reads that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -249,6 +287,7 @@
},
{
"BriefDescription": "Counts demand data reads that were not supplied by the L3 cache. [L3_MISS_LOCAL is alias to L3_MISS]",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -259,6 +298,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -269,6 +309,7 @@
},
{
"BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -279,6 +320,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache. [L3_MISS_LOCAL is alias to L3_MISS]",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -289,6 +331,7 @@
},
{
"BriefDescription": "Counts demand data read requests that miss the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
"SampleAfterValue": "100003",
@@ -297,6 +340,7 @@
},
{
"BriefDescription": "For every cycle, increments by the number of demand data read requests pending that are known to have missed the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD",
"PublicDescription": "For every cycle, increments by the number of demand data read requests pending that are known to have missed the L3 cache. Note that this does not capture all elapsed cycles while requests are outstanding - only cycles from when the requests were known by the requesting core to have missed the L3 cache.",
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/metricgroups.json b/tools/perf/pmu-events/arch/x86/alderlake/metricgroups.json
index 7a03835f262c..b54a5fc0861f 100644
--- a/tools/perf/pmu-events/arch/x86/alderlake/metricgroups.json
+++ b/tools/perf/pmu-events/arch/x86/alderlake/metricgroups.json
@@ -5,8 +5,21 @@
"BigFootprint": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"BrMispredicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Branches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvBC": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvBO": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvCB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvFB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvIO": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvML": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMP": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMS": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMT": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvOB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvUW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"C0Wait": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"CacheHits": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "CacheMisses": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"CodeGen": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Compute": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Cor": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
@@ -22,14 +35,17 @@
"Frontend": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"HPC": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"IcMiss": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Ifetch": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"InsType": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"IntVector": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"L2Evicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"LSD": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Load_Store_Miss": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"MachineClears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Machine_Clears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Mem": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"MemOffcore": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Mem_Exec": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"MemoryBW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"MemoryBound": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"MemoryLat": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
@@ -60,6 +76,7 @@
"TopdownL4": "Metrics for top-down breakdown at level 4",
"TopdownL5": "Metrics for top-down breakdown at level 5",
"TopdownL6": "Metrics for top-down breakdown at level 6",
+ "load_store_bound": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"tma_L1_group": "Metrics for top-down breakdown at level 1",
"tma_L2_group": "Metrics for top-down breakdown at level 2",
"tma_L3_group": "Metrics for top-down breakdown at level 3",
@@ -68,10 +85,8 @@
"tma_L6_group": "Metrics for top-down breakdown at level 6",
"tma_alu_op_utilization_group": "Metrics contributing to tma_alu_op_utilization category",
"tma_assists_group": "Metrics contributing to tma_assists category",
- "tma_backend_bound_aux_group": "Metrics contributing to tma_backend_bound_aux category",
"tma_backend_bound_group": "Metrics contributing to tma_backend_bound category",
"tma_bad_speculation_group": "Metrics contributing to tma_bad_speculation category",
- "tma_base_group": "Metrics contributing to tma_base category",
"tma_branch_mispredicts_group": "Metrics contributing to tma_branch_mispredicts category",
"tma_branch_resteers_group": "Metrics contributing to tma_branch_resteers category",
"tma_core_bound_group": "Metrics contributing to tma_core_bound category",
@@ -84,6 +99,8 @@
"tma_fp_vector_group": "Metrics contributing to tma_fp_vector category",
"tma_frontend_bound_group": "Metrics contributing to tma_frontend_bound category",
"tma_heavy_operations_group": "Metrics contributing to tma_heavy_operations category",
+ "tma_ifetch_bandwidth_group": "Metrics contributing to tma_ifetch_bandwidth category",
+ "tma_ifetch_latency_group": "Metrics contributing to tma_ifetch_latency category",
"tma_int_operations_group": "Metrics contributing to tma_int_operations category",
"tma_issue2P": "Metrics related by the issue $issue2P",
"tma_issueBM": "Metrics related by the issue $issueBM",
@@ -110,11 +127,9 @@
"tma_load_op_utilization_group": "Metrics contributing to tma_load_op_utilization category",
"tma_machine_clears_group": "Metrics contributing to tma_machine_clears category",
"tma_mem_latency_group": "Metrics contributing to tma_mem_latency category",
- "tma_mem_scheduler_group": "Metrics contributing to tma_mem_scheduler category",
"tma_memory_bound_group": "Metrics contributing to tma_memory_bound category",
"tma_microcode_sequencer_group": "Metrics contributing to tma_microcode_sequencer category",
"tma_mite_group": "Metrics contributing to tma_mite category",
- "tma_nuke_group": "Metrics contributing to tma_nuke category",
"tma_other_light_ops_group": "Metrics contributing to tma_other_light_ops category",
"tma_ports_utilization_group": "Metrics contributing to tma_ports_utilization category",
"tma_ports_utilized_0_group": "Metrics contributing to tma_ports_utilized_0 category",
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/other.json b/tools/perf/pmu-events/arch/x86/alderlake/other.json
index 5250a17d9cae..f95e093f8fcf 100644
--- a/tools/perf/pmu-events/arch/x86/alderlake/other.json
+++ b/tools/perf/pmu-events/arch/x86/alderlake/other.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "ASSISTS.HARDWARE",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc1",
"EventName": "ASSISTS.HARDWARE",
"SampleAfterValue": "100003",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "ASSISTS.PAGE_FAULT",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc1",
"EventName": "ASSISTS.PAGE_FAULT",
"SampleAfterValue": "1000003",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "CORE_POWER.LICENSE_1",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "CORE_POWER.LICENSE_1",
"SampleAfterValue": "200003",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "CORE_POWER.LICENSE_2",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "CORE_POWER.LICENSE_2",
"SampleAfterValue": "200003",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "CORE_POWER.LICENSE_3",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "CORE_POWER.LICENSE_3",
"SampleAfterValue": "200003",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "This event is deprecated. [This event is alias to MISC_RETIRED.LBR_INSERTS]",
+ "Counter": "0,1,2,3,4,5",
"Deprecated": "1",
"EventCode": "0xe4",
"EventName": "LBR_INSERTS.ANY",
@@ -51,6 +57,7 @@
},
{
"BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that have any type of response.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.COREWB_M.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -61,6 +68,7 @@
},
{
"BriefDescription": "Counts demand data reads that have any type of response.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -71,6 +79,7 @@
},
{
"BriefDescription": "Counts demand data reads that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -81,6 +90,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -91,6 +101,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -101,6 +112,7 @@
},
{
"BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -111,6 +123,7 @@
},
{
"BriefDescription": "Counts streaming stores that have any type of response.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -121,6 +134,7 @@
},
{
"BriefDescription": "Counts streaming stores that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -131,6 +145,7 @@
},
{
"BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa5",
"EventName": "RS.EMPTY",
"PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into starvation periods (e.g. branch mispredictions or i-cache misses)",
@@ -140,6 +155,7 @@
},
{
"BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xa5",
@@ -151,7 +167,17 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty due to a resource in the back-end",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY_RESOURCE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "This event is deprecated. Refer to new event RS.EMPTY_COUNT",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"Deprecated": "1",
"EdgeDetect": "1",
@@ -164,6 +190,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event RS.EMPTY",
+ "Counter": "0,1,2,3,4,5,6,7",
"Deprecated": "1",
"EventCode": "0xa5",
"EventName": "RS_EMPTY.CYCLES",
@@ -172,7 +199,17 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of issue slots in a UMWAIT or TPAUSE instruction where no uop issues due to the instruction putting the CPU into the C0.1 activity state. For Tremont, UMWAIT and TPAUSE will only put the CPU into C0.1 activity state (not C0.2 activity state)",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0x75",
+ "EventName": "SERIALIZATION.C01_MS_SCB",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Cycles the uncore cannot take further requests",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x2d",
"EventName": "XQ.FULL_CYCLES",
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/pipeline.json b/tools/perf/pmu-events/arch/x86/alderlake/pipeline.json
index df6032e816d4..b7656f77dee9 100644
--- a/tools/perf/pmu-events/arch/x86/alderlake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/alderlake/pipeline.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "This event is deprecated. Refer to new event ARITH.DIV_ACTIVE",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"Deprecated": "1",
"EventCode": "0xb0",
@@ -11,6 +12,7 @@
},
{
"BriefDescription": "Cycles when divide unit is busy executing divide or square root operations.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xb0",
"EventName": "ARITH.DIV_ACTIVE",
@@ -21,6 +23,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event ARITH.FPDIV_ACTIVE",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"Deprecated": "1",
"EventCode": "0xb0",
@@ -31,6 +34,7 @@
},
{
"BriefDescription": "This event counts the cycles the integer divider is busy.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xb0",
"EventName": "ARITH.IDIV_ACTIVE",
@@ -40,6 +44,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event ARITH.IDIV_ACTIVE",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"Deprecated": "1",
"EventCode": "0xb0",
@@ -50,6 +55,7 @@
},
{
"BriefDescription": "Number of occurrences where a microcode assist is invoked by hardware.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc1",
"EventName": "ASSISTS.ANY",
"PublicDescription": "Counts the number of occurrences where a microcode assist is invoked by hardware. Examples include AD (page Access Dirty), FP and AVX related assists.",
@@ -59,6 +65,7 @@
},
{
"BriefDescription": "Counts the total number of branch instructions retired for all branch types.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -68,6 +75,7 @@
},
{
"BriefDescription": "All branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -77,6 +85,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.NEAR_CALL",
+ "Counter": "0,1,2,3,4,5",
"Deprecated": "1",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.CALL",
@@ -87,6 +96,7 @@
},
{
"BriefDescription": "Counts the number of retired JCC (Jump on Conditional Code) branch instructions retired, includes both taken and not taken branches.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND",
"PEBS": "1",
@@ -96,6 +106,7 @@
},
{
"BriefDescription": "Conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND",
"PEBS": "1",
@@ -106,6 +117,7 @@
},
{
"BriefDescription": "Not taken branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_NTAKEN",
"PEBS": "1",
@@ -116,6 +128,7 @@
},
{
"BriefDescription": "Counts the number of taken JCC (Jump on Conditional Code) branch instructions retired.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_TAKEN",
"PEBS": "1",
@@ -125,6 +138,7 @@
},
{
"BriefDescription": "Taken conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_TAKEN",
"PEBS": "1",
@@ -135,6 +149,7 @@
},
{
"BriefDescription": "Counts the number of far branch instructions retired, includes far jump, far call and return, and interrupt call and return.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"PEBS": "1",
@@ -144,6 +159,7 @@
},
{
"BriefDescription": "Far branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"PEBS": "1",
@@ -154,6 +170,7 @@
},
{
"BriefDescription": "Counts the number of near indirect JMP and near indirect CALL branch instructions retired.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.INDIRECT",
"PEBS": "1",
@@ -163,6 +180,7 @@
},
{
"BriefDescription": "Indirect near branch instructions retired (excluding returns)",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.INDIRECT",
"PEBS": "1",
@@ -173,6 +191,7 @@
},
{
"BriefDescription": "Counts the number of near indirect CALL branch instructions retired.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.INDIRECT_CALL",
"PEBS": "1",
@@ -182,6 +201,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.INDIRECT_CALL",
+ "Counter": "0,1,2,3,4,5",
"Deprecated": "1",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.IND_CALL",
@@ -192,6 +212,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.COND",
+ "Counter": "0,1,2,3,4,5",
"Deprecated": "1",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.JCC",
@@ -202,6 +223,7 @@
},
{
"BriefDescription": "Counts the number of near CALL branch instructions retired.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
"PEBS": "1",
@@ -211,6 +233,7 @@
},
{
"BriefDescription": "Direct and indirect near call instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
"PEBS": "1",
@@ -221,6 +244,7 @@
},
{
"BriefDescription": "Counts the number of near RET branch instructions retired.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
"PEBS": "1",
@@ -230,6 +254,7 @@
},
{
"BriefDescription": "Return instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
"PEBS": "1",
@@ -240,6 +265,7 @@
},
{
"BriefDescription": "Counts the number of near taken branch instructions retired.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
"PEBS": "1",
@@ -249,6 +275,7 @@
},
{
"BriefDescription": "Taken branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
"PEBS": "1",
@@ -259,6 +286,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.INDIRECT",
+ "Counter": "0,1,2,3,4,5",
"Deprecated": "1",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NON_RETURN_IND",
@@ -269,6 +297,7 @@
},
{
"BriefDescription": "Counts the number of near relative CALL branch instructions retired.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.REL_CALL",
"PEBS": "1",
@@ -278,6 +307,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.NEAR_RETURN",
+ "Counter": "0,1,2,3,4,5",
"Deprecated": "1",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.RETURN",
@@ -288,6 +318,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.COND_TAKEN",
+ "Counter": "0,1,2,3,4,5",
"Deprecated": "1",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.TAKEN_JCC",
@@ -298,6 +329,7 @@
},
{
"BriefDescription": "Counts the total number of mispredicted branch instructions retired for all branch types.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -307,6 +339,7 @@
},
{
"BriefDescription": "All mispredicted branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -316,6 +349,7 @@
},
{
"BriefDescription": "Counts the number of mispredicted JCC (Jump on Conditional Code) branch instructions retired.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND",
"PEBS": "1",
@@ -325,6 +359,7 @@
},
{
"BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND",
"PEBS": "1",
@@ -335,6 +370,7 @@
},
{
"BriefDescription": "Mispredicted non-taken conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_NTAKEN",
"PEBS": "1",
@@ -345,6 +381,7 @@
},
{
"BriefDescription": "Counts the number of mispredicted taken JCC (Jump on Conditional Code) branch instructions retired.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_TAKEN",
"PEBS": "1",
@@ -354,6 +391,7 @@
},
{
"BriefDescription": "number of branch instructions retired that were mispredicted and taken.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_TAKEN",
"PEBS": "1",
@@ -364,6 +402,7 @@
},
{
"BriefDescription": "Counts the number of mispredicted near indirect JMP and near indirect CALL branch instructions retired.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT",
"PEBS": "1",
@@ -373,6 +412,7 @@
},
{
"BriefDescription": "Miss-predicted near indirect branch instructions retired (excluding returns)",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT",
"PEBS": "1",
@@ -383,6 +423,7 @@
},
{
"BriefDescription": "Counts the number of mispredicted near indirect CALL branch instructions retired.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT_CALL",
"PEBS": "1",
@@ -392,6 +433,7 @@
},
{
"BriefDescription": "Mispredicted indirect CALL retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT_CALL",
"PEBS": "1",
@@ -402,6 +444,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event BR_MISP_RETIRED.INDIRECT_CALL",
+ "Counter": "0,1,2,3,4,5",
"Deprecated": "1",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.IND_CALL",
@@ -412,6 +455,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event BR_MISP_RETIRED.COND",
+ "Counter": "0,1,2,3,4,5",
"Deprecated": "1",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.JCC",
@@ -422,6 +466,7 @@
},
{
"BriefDescription": "Counts the number of mispredicted near taken branch instructions retired.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
"PEBS": "1",
@@ -431,6 +476,7 @@
},
{
"BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
"PEBS": "1",
@@ -441,6 +487,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event BR_MISP_RETIRED.INDIRECT",
+ "Counter": "0,1,2,3,4,5",
"Deprecated": "1",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.NON_RETURN_IND",
@@ -451,6 +498,7 @@
},
{
"BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.RET",
"PEBS": "1",
@@ -461,6 +509,7 @@
},
{
"BriefDescription": "Counts the number of mispredicted near RET branch instructions retired.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.RETURN",
"PEBS": "1",
@@ -470,6 +519,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event BR_MISP_RETIRED.COND_TAKEN",
+ "Counter": "0,1,2,3,4,5",
"Deprecated": "1",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.TAKEN_JCC",
@@ -480,6 +530,7 @@
},
{
"BriefDescription": "Core clocks when the thread is in the C0.1 light-weight slower wakeup time but more power saving optimized state.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xec",
"EventName": "CPU_CLK_UNHALTED.C01",
"PublicDescription": "Counts core clocks when the thread is in the C0.1 light-weight slower wakeup time but more power saving optimized state. This state can be entered via the TPAUSE or UMWAIT instructions.",
@@ -489,6 +540,7 @@
},
{
"BriefDescription": "Core clocks when the thread is in the C0.2 light-weight faster wakeup time but less power saving optimized state.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xec",
"EventName": "CPU_CLK_UNHALTED.C02",
"PublicDescription": "Counts core clocks when the thread is in the C0.2 light-weight faster wakeup time but less power saving optimized state. This state can be entered via the TPAUSE or UMWAIT instructions.",
@@ -498,6 +550,7 @@
},
{
"BriefDescription": "Core clocks when the thread is in the C0.1 or C0.2 or running a PAUSE in C0 ACPI state.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xec",
"EventName": "CPU_CLK_UNHALTED.C0_WAIT",
"PublicDescription": "Counts core clocks when the thread is in the C0.1 or C0.2 power saving optimized states (TPAUSE or UMWAIT instructions) or running the PAUSE instruction.",
@@ -507,6 +560,7 @@
},
{
"BriefDescription": "Counts the number of unhalted core clock cycles. (Fixed event)",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.CORE",
"PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1.",
"SampleAfterValue": "2000003",
@@ -515,6 +569,7 @@
},
{
"BriefDescription": "Counts the number of unhalted core clock cycles.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.CORE_P",
"PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses a programmable general purpose performance counter.",
@@ -523,6 +578,7 @@
},
{
"BriefDescription": "Cycle counts are evenly distributed between active threads in the Core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xec",
"EventName": "CPU_CLK_UNHALTED.DISTRIBUTED",
"PublicDescription": "This event distributes cycle counts between active hyperthreads, i.e., those in C0. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If all other hyperthreads are inactive (or disabled or do not exist), all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
@@ -532,6 +588,7 @@
},
{
"BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
"PublicDescription": "Counts Core crystal clock cycles when current thread is unhalted and the other thread is halted.",
@@ -541,6 +598,7 @@
},
{
"BriefDescription": "CPU_CLK_UNHALTED.PAUSE",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xec",
"EventName": "CPU_CLK_UNHALTED.PAUSE",
"SampleAfterValue": "2000003",
@@ -549,6 +607,7 @@
},
{
"BriefDescription": "CPU_CLK_UNHALTED.PAUSE_INST",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xec",
@@ -559,6 +618,7 @@
},
{
"BriefDescription": "Core crystal clock cycles. Cycle counts are evenly distributed between active threads in the Core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.REF_DISTRIBUTED",
"PublicDescription": "This event distributes Core crystal clock cycle counts between active hyperthreads, i.e., those in C0 sleep-state. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If one thread is active in a core, all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
@@ -568,6 +628,7 @@
},
{
"BriefDescription": "Counts the number of unhalted reference clock cycles at TSC frequency. (Fixed event)",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is not affected by core frequency changes and increments at a fixed frequency that is also used for the Time Stamp Counter (TSC). This event uses fixed counter 2.",
"SampleAfterValue": "2000003",
@@ -576,6 +637,7 @@
},
{
"BriefDescription": "Reference cycles when the core is not in halt state.",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
"SampleAfterValue": "2000003",
@@ -584,6 +646,7 @@
},
{
"BriefDescription": "Counts the number of unhalted reference clock cycles at TSC frequency.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.REF_TSC_P",
"PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is not affected by core frequency changes and increments at a fixed frequency that is also used for the Time Stamp Counter (TSC). This event uses a programmable general purpose performance counter.",
@@ -593,6 +656,7 @@
},
{
"BriefDescription": "Reference cycles when the core is not in halt state.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.REF_TSC_P",
"PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
@@ -602,6 +666,7 @@
},
{
"BriefDescription": "Counts the number of unhalted core clock cycles. (Fixed event)",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1.",
"SampleAfterValue": "2000003",
@@ -610,6 +675,7 @@
},
{
"BriefDescription": "Core cycles when the thread is not in halt state",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events.",
"SampleAfterValue": "2000003",
@@ -618,6 +684,7 @@
},
{
"BriefDescription": "Counts the number of unhalted core clock cycles.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
"PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses a programmable general purpose performance counter.",
@@ -626,6 +693,7 @@
},
{
"BriefDescription": "Thread cycles when thread is not in halt state",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
"PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
@@ -634,6 +702,7 @@
},
{
"BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "8",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
@@ -643,6 +712,7 @@
},
{
"BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
@@ -652,6 +722,7 @@
},
{
"BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "16",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
@@ -661,6 +732,7 @@
},
{
"BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "12",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
@@ -670,6 +742,7 @@
},
{
"BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
@@ -679,6 +752,7 @@
},
{
"BriefDescription": "Total execution stalls.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "4",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
@@ -688,6 +762,7 @@
},
{
"BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.1_PORTS_UTIL",
"PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
@@ -696,7 +771,17 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Cycles total of 2 or 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.2_3_PORTS_UTIL",
+ "SampleAfterValue": "2000003",
+ "UMask": "0xc",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.2_PORTS_UTIL",
"PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
@@ -706,6 +791,7 @@
},
{
"BriefDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.3_PORTS_UTIL",
"PublicDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
@@ -715,6 +801,7 @@
},
{
"BriefDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.4_PORTS_UTIL",
"PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.",
@@ -724,6 +811,7 @@
},
{
"BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "5",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.BOUND_ON_LOADS",
@@ -733,6 +821,7 @@
},
{
"BriefDescription": "Cycles where the Store Buffer was full and no loads caused an execution stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "2",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.BOUND_ON_STORES",
@@ -743,6 +832,7 @@
},
{
"BriefDescription": "Cycles no uop executed while RS was not empty, the SB was not full and there was no outstanding load.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.EXE_BOUND_0_PORTS",
"PublicDescription": "Number of cycles total of 0 uops executed on all ports, Reservation Station (RS) was not empty, the Store Buffer (SB) was not full and there was no outstanding load.",
@@ -752,6 +842,7 @@
},
{
"BriefDescription": "Instruction decoders utilized in a cycle",
+ "Counter": "0,1,2,3",
"EventCode": "0x75",
"EventName": "INST_DECODED.DECODERS",
"PublicDescription": "Number of decoders utilized in a cycle when the MITE (legacy decode pipeline) fetches instructions.",
@@ -761,6 +852,7 @@
},
{
"BriefDescription": "Counts the total number of instructions retired. (Fixed event)",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PEBS": "1",
"PublicDescription": "Counts the total number of instructions that retired. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. This event continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0.",
@@ -770,6 +862,7 @@
},
{
"BriefDescription": "Number of instructions retired. Fixed Counter - architectural event",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PEBS": "1",
"PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
@@ -779,6 +872,7 @@
},
{
"BriefDescription": "Counts the total number of instructions retired.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.ANY_P",
"PEBS": "1",
@@ -788,6 +882,7 @@
},
{
"BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.ANY_P",
"PEBS": "1",
@@ -797,6 +892,7 @@
},
{
"BriefDescription": "INST_RETIRED.MACRO_FUSED",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.MACRO_FUSED",
"PEBS": "1",
@@ -806,6 +902,7 @@
},
{
"BriefDescription": "Retired NOP instructions.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.NOP",
"PEBS": "1",
@@ -816,6 +913,7 @@
},
{
"BriefDescription": "Precise instruction retired with PEBS precise-distribution",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.PREC_DIST",
"PEBS": "1",
"PublicDescription": "A version of INST_RETIRED that allows for a precise distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR++) feature to fix bias in how retired instructions get sampled. Use on Fixed Counter 0.",
@@ -825,6 +923,7 @@
},
{
"BriefDescription": "Iterations of Repeat string retired instructions.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.REP_ITERATION",
"PEBS": "1",
@@ -835,6 +934,7 @@
},
{
"BriefDescription": "Clears speculative count",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xad",
@@ -846,6 +946,7 @@
},
{
"BriefDescription": "Counts cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xad",
"EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
"PublicDescription": "Cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
@@ -855,6 +956,7 @@
},
{
"BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xad",
"EventName": "INT_MISC.RECOVERY_CYCLES",
"PublicDescription": "Counts core cycles when the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
@@ -864,6 +966,7 @@
},
{
"BriefDescription": "Bubble cycles of BAClear (Unknown Branch).",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xad",
"EventName": "INT_MISC.UNKNOWN_BRANCH_CYCLES",
"MSRIndex": "0x3F7",
@@ -874,6 +977,7 @@
},
{
"BriefDescription": "TMA slots where uops got dropped",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xad",
"EventName": "INT_MISC.UOP_DROPPING",
"PublicDescription": "Estimated number of Top-down Microarchitecture Analysis slots that got dropped due to non front-end reasons",
@@ -883,6 +987,7 @@
},
{
"BriefDescription": "INT_VEC_RETIRED.128BIT",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe7",
"EventName": "INT_VEC_RETIRED.128BIT",
"SampleAfterValue": "1000003",
@@ -891,6 +996,7 @@
},
{
"BriefDescription": "INT_VEC_RETIRED.256BIT",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe7",
"EventName": "INT_VEC_RETIRED.256BIT",
"SampleAfterValue": "1000003",
@@ -899,6 +1005,7 @@
},
{
"BriefDescription": "integer ADD, SUB, SAD 128-bit vector instructions.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe7",
"EventName": "INT_VEC_RETIRED.ADD_128",
"PublicDescription": "Number of retired integer ADD/SUB (regular or horizontal), SAD 128-bit vector instructions.",
@@ -908,6 +1015,7 @@
},
{
"BriefDescription": "integer ADD, SUB, SAD 256-bit vector instructions.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe7",
"EventName": "INT_VEC_RETIRED.ADD_256",
"PublicDescription": "Number of retired integer ADD/SUB (regular or horizontal), SAD 256-bit vector instructions.",
@@ -917,6 +1025,7 @@
},
{
"BriefDescription": "INT_VEC_RETIRED.MUL_256",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe7",
"EventName": "INT_VEC_RETIRED.MUL_256",
"SampleAfterValue": "1000003",
@@ -925,6 +1034,7 @@
},
{
"BriefDescription": "INT_VEC_RETIRED.SHUFFLES",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe7",
"EventName": "INT_VEC_RETIRED.SHUFFLES",
"SampleAfterValue": "1000003",
@@ -933,6 +1043,7 @@
},
{
"BriefDescription": "INT_VEC_RETIRED.VNNI_128",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe7",
"EventName": "INT_VEC_RETIRED.VNNI_128",
"SampleAfterValue": "1000003",
@@ -941,6 +1052,7 @@
},
{
"BriefDescription": "INT_VEC_RETIRED.VNNI_256",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe7",
"EventName": "INT_VEC_RETIRED.VNNI_256",
"SampleAfterValue": "1000003",
@@ -949,6 +1061,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event LD_BLOCKS.ADDRESS_ALIAS",
+ "Counter": "0,1,2,3,4,5",
"Deprecated": "1",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.4K_ALIAS",
@@ -959,6 +1072,7 @@
},
{
"BriefDescription": "Counts the number of retired loads that are blocked because it initially appears to be store forward blocked, but subsequently is shown not to be blocked based on 4K alias check.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.ADDRESS_ALIAS",
"PEBS": "1",
@@ -968,6 +1082,7 @@
},
{
"BriefDescription": "False dependencies in MOB due to partial compare on address.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.ADDRESS_ALIAS",
"PublicDescription": "Counts the number of times a load got blocked due to false dependencies in MOB due to partial compare on address.",
@@ -977,6 +1092,7 @@
},
{
"BriefDescription": "Counts the number of retired loads that are blocked because its address exactly matches an older store whose data is not ready.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.DATA_UNKNOWN",
"PEBS": "1",
@@ -986,6 +1102,7 @@
},
{
"BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.NO_SR",
"PublicDescription": "Counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
@@ -995,6 +1112,7 @@
},
{
"BriefDescription": "Loads blocked due to overlapping with a preceding store that cannot be forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.STORE_FORWARD",
"PublicDescription": "Counts the number of times where store forwarding was prevented for a load operation. The most common case is a load blocked due to the address of memory access (partially) overlapping with a preceding uncompleted store. Note: See the table of not supported store forwards in the Optimization Guide.",
@@ -1004,6 +1122,7 @@
},
{
"BriefDescription": "Counts the number of demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.",
+ "Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "LOAD_HIT_PREFETCH.SWPF",
"PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
@@ -1013,6 +1132,7 @@
},
{
"BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xa8",
"EventName": "LSD.CYCLES_ACTIVE",
@@ -1023,6 +1143,7 @@
},
{
"BriefDescription": "Cycles optimal number of Uops delivered by the LSD, but did not come from the decoder.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "6",
"EventCode": "0xa8",
"EventName": "LSD.CYCLES_OK",
@@ -1033,6 +1154,7 @@
},
{
"BriefDescription": "Number of Uops delivered by the LSD.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa8",
"EventName": "LSD.UOPS",
"PublicDescription": "Counts the number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
@@ -1042,6 +1164,7 @@
},
{
"BriefDescription": "Number of machine clears (nukes) of any type.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xc3",
@@ -1053,6 +1176,7 @@
},
{
"BriefDescription": "Counts the number of machine clears due to memory ordering in which an internal load passes an older store within the same CPU.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.DISAMBIGUATION",
"SampleAfterValue": "20003",
@@ -1061,6 +1185,7 @@
},
{
"BriefDescription": "Counts the number of machines clears due to memory renaming.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.MRN_NUKE",
"SampleAfterValue": "1000003",
@@ -1069,6 +1194,7 @@
},
{
"BriefDescription": "Counts the number of machine clears due to a page fault. Counts both I-Side and D-Side (Loads/Stores) page faults. A page fault occurs when either the page is not present, or an access violation occurs.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.PAGE_FAULT",
"SampleAfterValue": "20003",
@@ -1077,6 +1203,7 @@
},
{
"BriefDescription": "Counts the number of machine clears that flush the pipeline and restart the machine with the use of microcode due to SMC, MEMORY_ORDERING, FP_ASSISTS, PAGE_FAULT, DISAMBIGUATION, and FPC_VIRTUAL_TRAP.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.SLOW",
"SampleAfterValue": "20003",
@@ -1085,6 +1212,7 @@
},
{
"BriefDescription": "Counts the number of machine clears due to program modifying data (self modifying code) within 1K of a recently fetched code page.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.SMC",
"SampleAfterValue": "20003",
@@ -1093,6 +1221,7 @@
},
{
"BriefDescription": "Self-modifying code (SMC) detected.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.SMC",
"PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
@@ -1102,6 +1231,7 @@
},
{
"BriefDescription": "LFENCE instructions retired",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe0",
"EventName": "MISC2_RETIRED.LFENCE",
"PublicDescription": "number of LFENCE retired instructions",
@@ -1111,6 +1241,7 @@
},
{
"BriefDescription": "Counts the number of LBR entries recorded. Requires LBRs to be enabled in IA32_LBR_CTL. [This event is alias to LBR_INSERTS.ANY]",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xe4",
"EventName": "MISC_RETIRED.LBR_INSERTS",
"PEBS": "1",
@@ -1121,6 +1252,7 @@
},
{
"BriefDescription": "Increments whenever there is an update to the LBR array.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xcc",
"EventName": "MISC_RETIRED.LBR_INSERTS",
"PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR enable via IA32_DEBUGCTL MSR and branch type selection via MSR_LBR_SELECT.",
@@ -1130,6 +1262,7 @@
},
{
"BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa2",
"EventName": "RESOURCE_STALLS.SB",
"PublicDescription": "Counts allocation stall cycles caused by the store buffer (SB) being full. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
@@ -1139,6 +1272,7 @@
},
{
"BriefDescription": "Counts cycles where the pipeline is stalled due to serializing operations.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa2",
"EventName": "RESOURCE_STALLS.SCOREBOARD",
"SampleAfterValue": "100003",
@@ -1147,6 +1281,7 @@
},
{
"BriefDescription": "Counts the number of issue slots not consumed by the backend due to a micro-sequencer (MS) scoreboard, which stalls the front-end from issuing from the UROM until a specified older uop retires.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x75",
"EventName": "SERIALIZATION.NON_C01_MS_SCB",
"PublicDescription": "Counts the number of issue slots not consumed by the backend due to a micro-sequencer (MS) scoreboard, which stalls the front-end from issuing from the UROM until a specified older uop retires. The most commonly executed instruction with an MS scoreboard is PAUSE.",
@@ -1156,6 +1291,7 @@
},
{
"BriefDescription": "TMA slots where no uops were being issued due to lack of back-end resources.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa4",
"EventName": "TOPDOWN.BACKEND_BOUND_SLOTS",
"PublicDescription": "Number of slots in TMA method where no micro-operations were being issued from front-end to back-end of the machine due to lack of back-end resources.",
@@ -1165,6 +1301,7 @@
},
{
"BriefDescription": "TMA slots wasted due to incorrect speculations.",
+ "Counter": "0",
"EventCode": "0xa4",
"EventName": "TOPDOWN.BAD_SPEC_SLOTS",
"PublicDescription": "Number of slots of TMA method that were wasted due to incorrect speculation. It covers all types of control-flow or data-related mis-speculations.",
@@ -1174,6 +1311,7 @@
},
{
"BriefDescription": "TMA slots wasted due to incorrect speculation by branch mispredictions",
+ "Counter": "0",
"EventCode": "0xa4",
"EventName": "TOPDOWN.BR_MISPREDICT_SLOTS",
"PublicDescription": "Number of TMA slots that were wasted due to incorrect speculation by (any type of) branch mispredictions. This event estimates number of speculative operations that were issued but not retired as well as the out-of-order engine recovery past a branch misprediction.",
@@ -1183,6 +1321,7 @@
},
{
"BriefDescription": "TOPDOWN.MEMORY_BOUND_SLOTS",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa4",
"EventName": "TOPDOWN.MEMORY_BOUND_SLOTS",
"SampleAfterValue": "10000003",
@@ -1191,6 +1330,7 @@
},
{
"BriefDescription": "TMA slots available for an unhalted logical processor. Fixed counter - architectural event",
+ "Counter": "Fixed counter 3",
"EventName": "TOPDOWN.SLOTS",
"PublicDescription": "Number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method (TMA). The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core. Software can use this event as the denominator for the top-level metrics of the TMA method. This architectural event is counted on a designated fixed counter (Fixed Counter 3).",
"SampleAfterValue": "10000003",
@@ -1199,6 +1339,7 @@
},
{
"BriefDescription": "TMA slots available for an unhalted logical processor. General counter - architectural event",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa4",
"EventName": "TOPDOWN.SLOTS_P",
"PublicDescription": "Counts the number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method. The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core.",
@@ -1208,6 +1349,7 @@
},
{
"BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.ALL",
"PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window including relevant microcode flows and while uops are not yet available in the instruction queue (IQ) even if an FE_bound event occurs during this period. Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.",
@@ -1216,6 +1358,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to fast nukes such as memory ordering and memory disambiguation machine clears.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.FASTNUKE",
"SampleAfterValue": "1000003",
@@ -1224,6 +1367,7 @@
},
{
"BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a machine clear (nuke) of any kind including memory ordering and memory disambiguation.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS",
"SampleAfterValue": "1000003",
@@ -1232,6 +1376,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to branch mispredicts.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.MISPREDICT",
"SampleAfterValue": "1000003",
@@ -1240,6 +1385,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to a machine clear (nuke).",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.NUKE",
"SampleAfterValue": "1000003",
@@ -1248,6 +1394,7 @@
},
{
"BriefDescription": "Counts the total number of issue slots every cycle that were not consumed by the backend due to backend stalls.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.ALL",
"SampleAfterValue": "1000003",
@@ -1255,6 +1402,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to certain allocation restrictions.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS",
"SampleAfterValue": "1000003",
@@ -1263,6 +1411,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to memory reservation stalls in which a scheduler is not able to accept uops.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.MEM_SCHEDULER",
"SampleAfterValue": "1000003",
@@ -1271,6 +1420,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to IEC or FPC RAT stalls, which can be due to FIQ or IEC reservation stalls in which the integer, floating point or SIMD scheduler is not able to accept uops.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER",
"SampleAfterValue": "1000003",
@@ -1279,6 +1429,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to the physical register file unable to accept an entry (marble stalls).",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.REGISTER",
"SampleAfterValue": "1000003",
@@ -1287,6 +1438,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to the reorder buffer being full (ROB stalls).",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.REORDER_BUFFER",
"SampleAfterValue": "1000003",
@@ -1295,6 +1447,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to scoreboards from the instruction queue (IQ), jump execution unit (JEU), or microcode sequencer (MS).",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.SERIALIZATION",
"SampleAfterValue": "1000003",
@@ -1303,6 +1456,7 @@
},
{
"BriefDescription": "Counts the total number of issue slots every cycle that were not consumed by the backend due to frontend stalls.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.ALL",
"SampleAfterValue": "1000003",
@@ -1310,6 +1464,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BACLEARS.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.BRANCH_DETECT",
"PublicDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
@@ -1319,6 +1474,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BTCLEARS.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.BRANCH_RESTEER",
"PublicDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BTCLEARS, which occurs when the Branch Target Buffer (BTB) predicts a taken branch.",
@@ -1328,6 +1484,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to the microcode sequencer (MS).",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.CISC",
"SampleAfterValue": "1000003",
@@ -1336,6 +1493,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to decode stalls.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.DECODE",
"SampleAfterValue": "1000003",
@@ -1344,6 +1502,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.FRONTEND_BANDWIDTH",
"SampleAfterValue": "1000003",
@@ -1352,6 +1511,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to a latency related stalls including BACLEARs, BTCLEARs, ITLB misses, and ICache misses.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.FRONTEND_LATENCY",
"SampleAfterValue": "1000003",
@@ -1360,6 +1520,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to ITLB misses.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.ITLB",
"PublicDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to Instruction Table Lookaside Buffer (ITLB) misses.",
@@ -1369,6 +1530,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to other common frontend stalls not categorized.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.OTHER",
"SampleAfterValue": "1000003",
@@ -1377,6 +1539,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to wrong predecodes.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.PREDECODE",
"SampleAfterValue": "1000003",
@@ -1385,6 +1548,7 @@
},
{
"BriefDescription": "Counts the total number of consumed retirement slots.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc2",
"EventName": "TOPDOWN_RETIRING.ALL",
"PEBS": "1",
@@ -1393,6 +1557,7 @@
},
{
"BriefDescription": "UOPS_DECODED.DEC0_UOPS",
+ "Counter": "0,1,2,3",
"EventCode": "0x76",
"EventName": "UOPS_DECODED.DEC0_UOPS",
"SampleAfterValue": "1000003",
@@ -1401,6 +1566,7 @@
},
{
"BriefDescription": "Uops executed on port 0",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb2",
"EventName": "UOPS_DISPATCHED.PORT_0",
"PublicDescription": "Number of uops dispatch to execution port 0.",
@@ -1410,6 +1576,7 @@
},
{
"BriefDescription": "Uops executed on port 1",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb2",
"EventName": "UOPS_DISPATCHED.PORT_1",
"PublicDescription": "Number of uops dispatch to execution port 1.",
@@ -1419,6 +1586,7 @@
},
{
"BriefDescription": "Uops executed on ports 2, 3 and 10",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb2",
"EventName": "UOPS_DISPATCHED.PORT_2_3_10",
"PublicDescription": "Number of uops dispatch to execution ports 2, 3 and 10",
@@ -1428,6 +1596,7 @@
},
{
"BriefDescription": "Uops executed on ports 4 and 9",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb2",
"EventName": "UOPS_DISPATCHED.PORT_4_9",
"PublicDescription": "Number of uops dispatch to execution ports 4 and 9",
@@ -1437,6 +1606,7 @@
},
{
"BriefDescription": "Uops executed on ports 5 and 11",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb2",
"EventName": "UOPS_DISPATCHED.PORT_5_11",
"PublicDescription": "Number of uops dispatch to execution ports 5 and 11",
@@ -1446,6 +1616,7 @@
},
{
"BriefDescription": "Uops executed on port 6",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb2",
"EventName": "UOPS_DISPATCHED.PORT_6",
"PublicDescription": "Number of uops dispatch to execution port 6.",
@@ -1455,6 +1626,7 @@
},
{
"BriefDescription": "Uops executed on ports 7 and 8",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb2",
"EventName": "UOPS_DISPATCHED.PORT_7_8",
"PublicDescription": "Number of uops dispatch to execution ports 7 and 8.",
@@ -1464,6 +1636,7 @@
},
{
"BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
@@ -1474,6 +1647,7 @@
},
{
"BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "2",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
@@ -1484,6 +1658,7 @@
},
{
"BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "3",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
@@ -1494,6 +1669,7 @@
},
{
"BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "4",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
@@ -1504,6 +1680,7 @@
},
{
"BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_1",
@@ -1514,6 +1691,7 @@
},
{
"BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "2",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_2",
@@ -1524,6 +1702,7 @@
},
{
"BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "3",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_3",
@@ -1534,6 +1713,7 @@
},
{
"BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "4",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_4",
@@ -1544,6 +1724,7 @@
},
{
"BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.STALLS",
@@ -1555,6 +1736,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UOPS_EXECUTED.STALLS",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"Deprecated": "1",
"EventCode": "0xb1",
@@ -1566,6 +1748,7 @@
},
{
"BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.THREAD",
"SampleAfterValue": "2000003",
@@ -1574,6 +1757,7 @@
},
{
"BriefDescription": "Counts the number of x87 uops dispatched.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.X87",
"PublicDescription": "Counts the number of x87 uops executed.",
@@ -1582,7 +1766,17 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of uops issued by the front end every cycle.",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0x0e",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PublicDescription": "Counts the number of uops issued by the front end every cycle. When 4-uops are requested and only 2-uops are delivered, the event counts 2. Uops_issued correlates to the number of ROB entries. If uop takes 2 ROB slots it counts as 2 uops_issued.",
+ "SampleAfterValue": "200003",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Uops that RAT issues to RS",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xae",
"EventName": "UOPS_ISSUED.ANY",
"PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
@@ -1591,7 +1785,18 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "UOPS_ISSUED.CYCLES",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xae",
+ "EventName": "UOPS_ISSUED.CYCLES",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Counts the total number of uops retired.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.ALL",
"PEBS": "1",
@@ -1600,6 +1805,7 @@
},
{
"BriefDescription": "Cycles with retired uop(s).",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.CYCLES",
@@ -1610,6 +1816,7 @@
},
{
"BriefDescription": "Retired uops except the last uop of each instruction.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.HEAVY",
"PublicDescription": "Counts the number of retired micro-operations (uops) except the last uop of each instruction. An instruction that is decoded into less than two uops does not contribute to the count.",
@@ -1619,6 +1826,7 @@
},
{
"BriefDescription": "Counts the number of integer divide uops retired.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.IDIV",
"PEBS": "1",
@@ -1628,6 +1836,7 @@
},
{
"BriefDescription": "Counts the number of uops that are from complex flows issued by the micro-sequencer (MS).",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.MS",
"PEBS": "1",
@@ -1638,6 +1847,7 @@
},
{
"BriefDescription": "UOPS_RETIRED.MS",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.MS",
"MSRIndex": "0x3F7",
@@ -1648,6 +1858,7 @@
},
{
"BriefDescription": "Retirement slots used.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.SLOTS",
"PublicDescription": "Counts the retirement slots used each cycle.",
@@ -1657,6 +1868,7 @@
},
{
"BriefDescription": "Cycles without actually retired uops.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.STALLS",
@@ -1668,6 +1880,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UOPS_RETIRED.STALLS",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"Deprecated": "1",
"EventCode": "0xc2",
@@ -1679,6 +1892,7 @@
},
{
"BriefDescription": "Counts the number of x87 uops retired, includes those in MS flows.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.X87",
"PEBS": "1",
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/alderlake/uncore-interconnect.json
index 8bf020a9dfa8..7c0779c74154 100644
--- a/tools/perf/pmu-events/arch/x86/alderlake/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/alderlake/uncore-interconnect.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Number of requests allocated in Coherency Tracker.",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_ARB_COH_TRK_REQUESTS.ALL",
"PerPkg": "1",
@@ -9,56 +10,69 @@
},
{
"BriefDescription": "Each cycle counts number of any coherent request at memory controller that were issued by any core.",
+ "Counter": "0",
"EventCode": "0x85",
"EventName": "UNC_ARB_DAT_OCCUPANCY.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "ARB"
},
{
"BriefDescription": "Each cycle counts number of coherent reads pending on data return from memory controller that were issued by any core.",
+ "Counter": "0",
"EventCode": "0x85",
"EventName": "UNC_ARB_DAT_OCCUPANCY.RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "ARB"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_ARB_REQ_TRK_REQUEST.DRD",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x81",
"EventName": "UNC_ARB_DAT_REQUESTS.RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "ARB"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_ARB_DAT_OCCUPANCY.ALL",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x85",
"EventName": "UNC_ARB_IFA_OCCUPANCY.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "ARB"
},
{
"BriefDescription": "Each cycle count number of 'valid' coherent Data Read entries . Such entry is defined as valid when it is allocated till deallocation. Doesn't include prefetches [This event is alias to UNC_ARB_TRK_OCCUPANCY.RD]",
+ "Counter": "0",
"EventCode": "0x80",
"EventName": "UNC_ARB_REQ_TRK_OCCUPANCY.DRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "ARB"
},
{
"BriefDescription": "Number of all coherent Data Read entries. Doesn't include prefetches [This event is alias to UNC_ARB_TRK_REQUESTS.RD]",
+ "Counter": "0,1",
"EventCode": "0x81",
"EventName": "UNC_ARB_REQ_TRK_REQUEST.DRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "ARB"
},
{
"BriefDescription": "Each cycle counts number of all outgoing valid entries in ReqTrk. Such entry is defined as valid from its allocation in ReqTrk till deallocation. Accounts for Coherent and non-coherent traffic.",
+ "Counter": "0",
"EventCode": "0x80",
"EventName": "UNC_ARB_TRK_OCCUPANCY.ALL",
"PerPkg": "1",
@@ -67,14 +81,17 @@
},
{
"BriefDescription": "Each cycle count number of 'valid' coherent Data Read entries . Such entry is defined as valid when it is allocated till deallocation. Doesn't include prefetches [This event is alias to UNC_ARB_REQ_TRK_OCCUPANCY.DRD]",
+ "Counter": "0",
"EventCode": "0x80",
"EventName": "UNC_ARB_TRK_OCCUPANCY.RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "ARB"
},
{
"BriefDescription": "Counts the number of coherent and in-coherent requests initiated by IA cores, processor graphic units, or LLC.",
+ "Counter": "0,1",
"EventCode": "0x81",
"EventName": "UNC_ARB_TRK_REQUESTS.ALL",
"PerPkg": "1",
@@ -83,8 +100,10 @@
},
{
"BriefDescription": "Number of all coherent Data Read entries. Doesn't include prefetches [This event is alias to UNC_ARB_REQ_TRK_REQUEST.DRD]",
+ "Counter": "0,1",
"EventCode": "0x81",
"EventName": "UNC_ARB_TRK_REQUESTS.RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "ARB"
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/uncore-memory.json b/tools/perf/pmu-events/arch/x86/alderlake/uncore-memory.json
index 163d7e7755c4..bcf275cd592a 100644
--- a/tools/perf/pmu-events/arch/x86/alderlake/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/alderlake/uncore-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts every 64B read request entering the Memory Controller 0 to DRAM (sum of all channels).",
+ "Counter": "0",
"EventCode": "0xff",
"EventName": "UNC_MC0_RDCAS_COUNT_FREERUN",
"PerPkg": "1",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "Counts every 64B write request entering the Memory Controller 0 to DRAM (sum of all channels). Each write request counts as a new request incrementing this counter. However, same cache line write requests (both full and partial) are combined to a single 64 byte data transfer to DRAM.",
+ "Counter": "1",
"EventCode": "0xff",
"EventName": "UNC_MC0_WRCAS_COUNT_FREERUN",
"PerPkg": "1",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "Counts every 64B read request entering the Memory Controller 1 to DRAM (sum of all channels).",
+ "Counter": "3",
"EventCode": "0xff",
"EventName": "UNC_MC1_RDCAS_COUNT_FREERUN",
"PerPkg": "1",
@@ -27,6 +30,7 @@
},
{
"BriefDescription": "Counts every 64B write request entering the Memory Controller 1 to DRAM (sum of all channels). Each write request counts as a new request incrementing this counter. However, same cache line write requests (both full and partial) are combined to a single 64 byte data transfer to DRAM.",
+ "Counter": "4",
"EventCode": "0xff",
"EventName": "UNC_MC1_WRCAS_COUNT_FREERUN",
"PerPkg": "1",
@@ -35,6 +39,7 @@
},
{
"BriefDescription": "ACT command for a read request sent to DRAM",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x24",
"EventName": "UNC_M_ACT_COUNT_RD",
"PerPkg": "1",
@@ -42,6 +47,7 @@
},
{
"BriefDescription": "ACT command sent to DRAM",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x26",
"EventName": "UNC_M_ACT_COUNT_TOTAL",
"PerPkg": "1",
@@ -49,6 +55,7 @@
},
{
"BriefDescription": "ACT command for a write request sent to DRAM",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x25",
"EventName": "UNC_M_ACT_COUNT_WR",
"PerPkg": "1",
@@ -56,6 +63,7 @@
},
{
"BriefDescription": "Read CAS command sent to DRAM",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x22",
"EventName": "UNC_M_CAS_COUNT_RD",
"PerPkg": "1",
@@ -63,6 +71,7 @@
},
{
"BriefDescription": "Write CAS command sent to DRAM",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x23",
"EventName": "UNC_M_CAS_COUNT_WR",
"PerPkg": "1",
@@ -70,6 +79,7 @@
},
{
"BriefDescription": "Number of clocks",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x01",
"EventName": "UNC_M_CLOCKTICKS",
"PerPkg": "1",
@@ -77,6 +87,7 @@
},
{
"BriefDescription": "incoming read request page status is Page Empty",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x1D",
"EventName": "UNC_M_DRAM_PAGE_EMPTY_RD",
"PerPkg": "1",
@@ -84,6 +95,7 @@
},
{
"BriefDescription": "incoming write request page status is Page Empty",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x20",
"EventName": "UNC_M_DRAM_PAGE_EMPTY_WR",
"PerPkg": "1",
@@ -91,6 +103,7 @@
},
{
"BriefDescription": "incoming read request page status is Page Hit",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x1C",
"EventName": "UNC_M_DRAM_PAGE_HIT_RD",
"PerPkg": "1",
@@ -98,6 +111,7 @@
},
{
"BriefDescription": "incoming write request page status is Page Hit",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x1F",
"EventName": "UNC_M_DRAM_PAGE_HIT_WR",
"PerPkg": "1",
@@ -105,6 +119,7 @@
},
{
"BriefDescription": "incoming read request page status is Page Miss",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x1E",
"EventName": "UNC_M_DRAM_PAGE_MISS_RD",
"PerPkg": "1",
@@ -112,6 +127,7 @@
},
{
"BriefDescription": "incoming write request page status is Page Miss",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x21",
"EventName": "UNC_M_DRAM_PAGE_MISS_WR",
"PerPkg": "1",
@@ -119,6 +135,7 @@
},
{
"BriefDescription": "Any Rank at Hot state",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x19",
"EventName": "UNC_M_DRAM_THERMAL_HOT",
"PerPkg": "1",
@@ -126,6 +143,7 @@
},
{
"BriefDescription": "Any Rank at Warm state",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x1A",
"EventName": "UNC_M_DRAM_THERMAL_WARM",
"PerPkg": "1",
@@ -133,6 +151,7 @@
},
{
"BriefDescription": "Incoming read prefetch request from IA.",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x0A",
"EventName": "UNC_M_PREFETCH_RD",
"PerPkg": "1",
@@ -140,6 +159,7 @@
},
{
"BriefDescription": "PRE command sent to DRAM due to page table idle timer expiration",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x28",
"EventName": "UNC_M_PRE_COUNT_IDLE",
"PerPkg": "1",
@@ -147,6 +167,7 @@
},
{
"BriefDescription": "PRE command sent to DRAM for a read/write request",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x27",
"EventName": "UNC_M_PRE_COUNT_PAGE_MISS",
"PerPkg": "1",
@@ -154,6 +175,7 @@
},
{
"BriefDescription": "Incoming VC0 read request",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x02",
"EventName": "UNC_M_VC0_REQUESTS_RD",
"PerPkg": "1",
@@ -161,6 +183,7 @@
},
{
"BriefDescription": "Incoming VC0 write request",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x03",
"EventName": "UNC_M_VC0_REQUESTS_WR",
"PerPkg": "1",
@@ -168,6 +191,7 @@
},
{
"BriefDescription": "Incoming VC1 read request",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x04",
"EventName": "UNC_M_VC1_REQUESTS_RD",
"PerPkg": "1",
@@ -175,6 +199,7 @@
},
{
"BriefDescription": "Incoming VC1 write request",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x05",
"EventName": "UNC_M_VC1_REQUESTS_WR",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/uncore-other.json b/tools/perf/pmu-events/arch/x86/alderlake/uncore-other.json
index 2af92e43b28a..1ac5b5ef8094 100644
--- a/tools/perf/pmu-events/arch/x86/alderlake/uncore-other.json
+++ b/tools/perf/pmu-events/arch/x86/alderlake/uncore-other.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "This 48-bit fixed counter counts the UCLK cycles.",
+ "Counter": "FIXED",
"EventCode": "0xff",
"EventName": "UNC_CLOCK.SOCKET",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/virtual-memory.json b/tools/perf/pmu-events/arch/x86/alderlake/virtual-memory.json
index 3827d292da80..e0d8f3070778 100644
--- a/tools/perf/pmu-events/arch/x86/alderlake/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/alderlake/virtual-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Loads that miss the DTLB and hit the STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for a demand load.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x12",
"EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
@@ -20,6 +22,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to load DTLB misses to any page size.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
@@ -29,6 +32,7 @@
},
{
"BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (all page sizes) caused by demand data loads. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -38,6 +42,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data load to a 1G page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
"PublicDescription": "Counts completed page walks (1G sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -47,6 +52,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data load to a 2M/4M page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -56,6 +62,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data load to a 4K page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts completed page walks (4K sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -65,6 +72,7 @@
},
{
"BriefDescription": "Number of page walks outstanding for a demand load in the PMH each cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding for a demand load in the PMH (Page Miss Handler) each cycle.",
@@ -74,6 +82,7 @@
},
{
"BriefDescription": "Stores that miss the DTLB and hit the STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "DTLB_STORE_MISSES.STLB_HIT",
"PublicDescription": "Counts stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
@@ -83,6 +92,7 @@
},
{
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x13",
"EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
@@ -93,6 +103,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to store DTLB misses to any page size.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
@@ -102,6 +113,7 @@
},
{
"BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (all page sizes) caused by demand data stores. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -111,6 +123,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data store to a 1G page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
"PublicDescription": "Counts completed page walks (1G sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -120,6 +133,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data store to a 2M/4M page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -129,6 +143,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data store to a 4K page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts completed page walks (4K sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -138,6 +153,7 @@
},
{
"BriefDescription": "Number of page walks outstanding for a store in the PMH each cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "DTLB_STORE_MISSES.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding for a store in the PMH (Page Miss Handler) each cycle.",
@@ -147,6 +163,7 @@
},
{
"BriefDescription": "Counts the number of page walks initiated by a instruction fetch that missed the first and second level TLBs.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.MISS_CAUSED_WALK",
"SampleAfterValue": "1000003",
@@ -155,6 +172,7 @@
},
{
"BriefDescription": "Counts the number of page walks due to an instruction fetch that miss the PDE (Page Directory Entry) cache.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.PDE_CACHE_MISS",
"SampleAfterValue": "2000003",
@@ -163,6 +181,7 @@
},
{
"BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "ITLB_MISSES.STLB_HIT",
"PublicDescription": "Counts instruction fetch requests that miss the ITLB (Instruction TLB) and hit the STLB (Second-level TLB).",
@@ -172,6 +191,7 @@
},
{
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x11",
"EventName": "ITLB_MISSES.WALK_ACTIVE",
@@ -182,6 +202,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to any page size.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
@@ -191,6 +212,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (all page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
@@ -200,6 +222,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts completed page walks (2M/4M page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
@@ -209,6 +232,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts completed page walks (4K page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
@@ -218,6 +242,7 @@
},
{
"BriefDescription": "Number of page walks outstanding for an outstanding code request in the PMH each cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "ITLB_MISSES.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding for an outstanding code (instruction fetch) request in the PMH (Page Miss Handler) each cycle.",
@@ -227,6 +252,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a DTLB miss.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x05",
"EventName": "LD_HEAD.DTLB_MISS_AT_RET",
"SampleAfterValue": "1000003",
diff --git a/tools/perf/pmu-events/arch/x86/alderlaken/adln-metrics.json b/tools/perf/pmu-events/arch/x86/alderlaken/adln-metrics.json
index a35edf7d86a9..447596f924ab 100644
--- a/tools/perf/pmu-events/arch/x86/alderlaken/adln-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/alderlaken/adln-metrics.json
@@ -85,39 +85,28 @@
"ScaleUnit": "1SMI#"
},
{
- "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to certain allocation restrictions.",
- "MetricExpr": "TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS / tma_info_core_slots",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
- "MetricName": "tma_alloc_restriction",
- "MetricThreshold": "tma_alloc_restriction > 0.1",
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to certain allocation restrictions",
+ "MetricExpr": "tma_core_bound",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_allocation_restriction",
+ "MetricThreshold": "tma_allocation_restriction > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls",
+ "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls",
"DefaultMetricgroupName": "TopdownL1",
- "MetricExpr": "TOPDOWN_BE_BOUND.ALL / tma_info_core_slots",
+ "MetricExpr": "TOPDOWN_BE_BOUND.ALL / (5 * CPU_CLK_UNHALTED.CORE)",
"MetricGroup": "Default;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
"MetricThreshold": "tma_backend_bound > 0.1",
"MetricgroupNoGroup": "TopdownL1;Default",
- "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls. Note that uops must be available for consumption in order for this event to count. If a uop is not available (IQ is empty), this event will not count. The rest of these subevents count backend stalls, in cycles, due to an outstanding request which is memory bound vs core bound. The subevents are not slot based events and therefore can not be precisely added or subtracted from the Backend_Bound_Aux subevents which are slot based.",
- "ScaleUnit": "100%"
- },
- {
- "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls",
- "DefaultMetricgroupName": "TopdownL1",
- "MetricExpr": "tma_backend_bound",
- "MetricGroup": "Default;TopdownL1;tma_L1_group",
- "MetricName": "tma_backend_bound_aux",
- "MetricThreshold": "tma_backend_bound_aux > 0.2",
- "MetricgroupNoGroup": "TopdownL1;Default",
- "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls. Note that UOPS must be available for consumption in order for this event to count. If a uop is not available (IQ is empty), this event will not count. All of these subevents count backend stalls, in slots, due to a resource limitation. These are not cycle based events and therefore can not be precisely added or subtracted from the Backend_Bound subevents which are cycle based. These subevents are supplementary to Backend_Bound and can be used to analyze results from a resource perspective at allocation.",
+ "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls. Note that uops must be available for consumption in order for this event to count. If a uop is not available (IQ is empty), this event will not count",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear",
"DefaultMetricgroupName": "TopdownL1",
- "MetricExpr": "(tma_info_core_slots - (TOPDOWN_FE_BOUND.ALL + TOPDOWN_BE_BOUND.ALL + TOPDOWN_RETIRING.ALL)) / tma_info_core_slots",
+ "MetricExpr": "(5 * CPU_CLK_UNHALTED.CORE - (TOPDOWN_FE_BOUND.ALL + TOPDOWN_BE_BOUND.ALL + TOPDOWN_RETIRING.ALL)) / (5 * CPU_CLK_UNHALTED.CORE)",
"MetricGroup": "Default;TopdownL1;tma_L1_group",
"MetricName": "tma_bad_speculation",
"MetricThreshold": "tma_bad_speculation > 0.15",
@@ -126,300 +115,351 @@
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of uops that are not from the microsequencer.",
- "MetricExpr": "(TOPDOWN_RETIRING.ALL - UOPS_RETIRED.MS) / tma_info_core_slots",
- "MetricGroup": "TopdownL2;tma_L2_group;tma_retiring_group",
- "MetricName": "tma_base",
- "MetricThreshold": "tma_base > 0.6",
- "MetricgroupNoGroup": "TopdownL2",
- "ScaleUnit": "100%"
- },
- {
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend",
- "MetricExpr": "TOPDOWN_FE_BOUND.BRANCH_DETECT / tma_info_core_slots",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend",
+ "MetricExpr": "TOPDOWN_FE_BOUND.BRANCH_DETECT / (5 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
"MetricName": "tma_branch_detect",
- "MetricThreshold": "tma_branch_detect > 0.05",
- "PublicDescription": "Counts the number of issue slots that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
+ "MetricThreshold": "tma_branch_detect > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
+ "PublicDescription": "Counts the number of issue slots that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to branch mispredicts.",
- "MetricExpr": "TOPDOWN_BAD_SPECULATION.MISPREDICT / tma_info_core_slots",
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to branch mispredicts",
+ "MetricExpr": "TOPDOWN_BAD_SPECULATION.MISPREDICT / (5 * CPU_CLK_UNHALTED.CORE)",
"MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
"MetricName": "tma_branch_mispredicts",
- "MetricThreshold": "tma_branch_mispredicts > 0.05",
+ "MetricThreshold": "tma_branch_mispredicts > 0.05 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to BTCLEARS, which occurs when the Branch Target Buffer (BTB) predicts a taken branch.",
- "MetricExpr": "TOPDOWN_FE_BOUND.BRANCH_RESTEER / tma_info_core_slots",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to BTCLEARS, which occurs when the Branch Target Buffer (BTB) predicts a taken branch.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.BRANCH_RESTEER / (5 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
"MetricName": "tma_branch_resteer",
- "MetricThreshold": "tma_branch_resteer > 0.05",
+ "MetricThreshold": "tma_branch_resteer > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to the microcode sequencer (MS).",
- "MetricExpr": "TOPDOWN_FE_BOUND.CISC / tma_info_core_slots",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to the microcode sequencer (MS).",
+ "MetricExpr": "TOPDOWN_FE_BOUND.CISC / (5 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
"MetricName": "tma_cisc",
- "MetricThreshold": "tma_cisc > 0.05",
+ "MetricThreshold": "tma_cisc > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of cycles due to backend bound stalls that are core execution bound and not attributed to outstanding demand load or store stalls.",
- "MetricExpr": "max(0, tma_backend_bound - tma_memory_bound)",
+ "BriefDescription": "Counts the number of cycles due to backend bound stalls that are bounded by core restrictions and not attributed to an outstanding load or stores, or resource limitation",
+ "MetricExpr": "TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS / (5 * CPU_CLK_UNHALTED.CORE)",
"MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
"MetricName": "tma_core_bound",
- "MetricThreshold": "tma_core_bound > 0.1",
+ "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.1",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to decode stalls.",
- "MetricExpr": "TOPDOWN_FE_BOUND.DECODE / tma_info_core_slots",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to decode stalls.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.DECODE / (5 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
"MetricName": "tma_decode",
- "MetricThreshold": "tma_decode > 0.05",
+ "MetricThreshold": "tma_decode > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of machine clears relative to the number of nuke slots due to memory disambiguation.",
- "MetricExpr": "tma_nuke * (MACHINE_CLEARS.DISAMBIGUATION / MACHINE_CLEARS.SLOW)",
- "MetricGroup": "TopdownL4;tma_L4_group;tma_nuke_group",
- "MetricName": "tma_disambiguation",
- "MetricThreshold": "tma_disambiguation > 0.02",
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to a machine clear that does not require the use of microcode, classified as a fast nuke, due to memory ordering, memory disambiguation and memory renaming",
+ "MetricExpr": "TOPDOWN_BAD_SPECULATION.FASTNUKE / (5 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group",
+ "MetricName": "tma_fast_nuke",
+ "MetricThreshold": "tma_fast_nuke > 0.05 & (tma_machine_clears > 0.05 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of cycles the core is stalled due to a demand load miss which hit in DRAM or MMIO (Non-DRAM).",
- "MetricExpr": "MEM_BOUND_STALLS.LOAD_DRAM_HIT / tma_info_core_clks - max((MEM_BOUND_STALLS.LOAD - LD_HEAD.L1_MISS_AT_RET) / tma_info_core_clks, 0) * MEM_BOUND_STALLS.LOAD_DRAM_HIT / MEM_BOUND_STALLS.LOAD",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
- "MetricName": "tma_dram_bound",
- "MetricThreshold": "tma_dram_bound > 0.1",
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to frontend stalls.",
+ "DefaultMetricgroupName": "TopdownL1",
+ "MetricExpr": "TOPDOWN_FE_BOUND.ALL / (5 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "Default;TopdownL1;tma_L1_group",
+ "MetricName": "tma_frontend_bound",
+ "MetricThreshold": "tma_frontend_bound > 0.2",
+ "MetricgroupNoGroup": "TopdownL1;Default",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to a machine clear classified as a fast nuke due to memory ordering, memory disambiguation and memory renaming.",
- "MetricExpr": "TOPDOWN_BAD_SPECULATION.FASTNUKE / tma_info_core_slots",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group",
- "MetricName": "tma_fast_nuke",
- "MetricThreshold": "tma_fast_nuke > 0.05",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to instruction cache misses.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.ICACHE / (5 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
+ "MetricName": "tma_icache_misses",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations.",
- "MetricExpr": "TOPDOWN_FE_BOUND.FRONTEND_BANDWIDTH / tma_info_core_slots",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.FRONTEND_BANDWIDTH / (5 * CPU_CLK_UNHALTED.CORE)",
"MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
- "MetricName": "tma_fetch_bandwidth",
- "MetricThreshold": "tma_fetch_bandwidth > 0.1",
+ "MetricName": "tma_ifetch_bandwidth",
+ "MetricThreshold": "tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations.",
- "MetricExpr": "TOPDOWN_FE_BOUND.FRONTEND_LATENCY / tma_info_core_slots",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to frontend latency restrictions due to icache misses, itlb misses, branch detection, and resteer limitations.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.FRONTEND_LATENCY / (5 * CPU_CLK_UNHALTED.CORE)",
"MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
- "MetricName": "tma_fetch_latency",
- "MetricThreshold": "tma_fetch_latency > 0.15",
+ "MetricName": "tma_ifetch_latency",
+ "MetricThreshold": "tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of machine clears relative to the number of nuke slots due to FP assists.",
- "MetricExpr": "tma_nuke * (MACHINE_CLEARS.FP_ASSIST / MACHINE_CLEARS.SLOW)",
- "MetricGroup": "TopdownL4;tma_L4_group;tma_nuke_group",
- "MetricName": "tma_fp_assist",
- "MetricThreshold": "tma_fp_assist > 0.02",
- "ScaleUnit": "100%"
+ "BriefDescription": "Percentage of time that retirement is stalled due to a first level data TLB miss",
+ "MetricExpr": "100 * (LD_HEAD.DTLB_MISS_AT_RET + LD_HEAD.PGWALK_AT_RET) / CPU_CLK_UNHALTED.CORE",
+ "MetricName": "tma_info_bottleneck_%_dtlb_miss_bound_cycles"
},
{
- "BriefDescription": "Counts the number of floating point divide operations per uop.",
- "MetricExpr": "UOPS_RETIRED.FPDIV / tma_info_core_slots",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_base_group",
- "MetricName": "tma_fpdiv_uops",
- "MetricThreshold": "tma_fpdiv_uops > 0.2",
- "ScaleUnit": "100%"
+ "BriefDescription": "Percentage of time that allocation and retirement is stalled by the Frontend Cluster due to an Ifetch Miss, either Icache or ITLB Miss",
+ "MetricExpr": "100 * MEM_BOUND_STALLS.IFETCH / CPU_CLK_UNHALTED.CORE",
+ "MetricGroup": "Ifetch",
+ "MetricName": "tma_info_bottleneck_%_ifetch_miss_bound_cycles",
+ "PublicDescription": "Percentage of time that allocation and retirement is stalled by the Frontend Cluster due to an Ifetch Miss, either Icache or ITLB Miss. See Info.Ifetch_Bound"
},
{
- "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to frontend stalls.",
- "DefaultMetricgroupName": "TopdownL1",
- "MetricExpr": "TOPDOWN_FE_BOUND.ALL / tma_info_core_slots",
- "MetricGroup": "Default;TopdownL1;tma_L1_group",
- "MetricName": "tma_frontend_bound",
- "MetricThreshold": "tma_frontend_bound > 0.2",
- "MetricgroupNoGroup": "TopdownL1;Default",
- "ScaleUnit": "100%"
+ "BriefDescription": "Percentage of time that retirement is stalled due to an L1 miss",
+ "MetricExpr": "100 * MEM_BOUND_STALLS.LOAD / CPU_CLK_UNHALTED.CORE",
+ "MetricGroup": "Load_Store_Miss",
+ "MetricName": "tma_info_bottleneck_%_load_miss_bound_cycles",
+ "PublicDescription": "Percentage of time that retirement is stalled due to an L1 miss. See Info.Load_Miss_Bound"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to instruction cache misses.",
- "MetricExpr": "TOPDOWN_FE_BOUND.ICACHE / tma_info_core_slots",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_latency_group",
- "MetricName": "tma_icache_misses",
- "MetricThreshold": "tma_icache_misses > 0.05",
- "ScaleUnit": "100%"
+ "BriefDescription": "Percentage of time that retirement is stalled by the Memory Cluster due to a pipeline stall",
+ "MetricExpr": "100 * LD_HEAD.ANY_AT_RET / CPU_CLK_UNHALTED.CORE",
+ "MetricGroup": "Mem_Exec",
+ "MetricName": "tma_info_bottleneck_%_mem_exec_bound_cycles",
+ "PublicDescription": "Percentage of time that retirement is stalled by the Memory Cluster due to a pipeline stall. See Info.Mem_Exec_Bound"
+ },
+ {
+ "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricName": "tma_info_br_inst_mix_ipbranch"
+ },
+ {
+ "BriefDescription": "Instruction per (near) call (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.CALL",
+ "MetricName": "tma_info_br_inst_mix_ipcall"
+ },
+ {
+ "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
+ "MetricName": "tma_info_br_inst_mix_ipfarbranch"
+ },
+ {
+ "BriefDescription": "Instructions per retired conditional Branch Misprediction where the branch was not taken",
+ "MetricExpr": "INST_RETIRED.ANY / (BR_MISP_RETIRED.COND - BR_MISP_RETIRED.COND_TAKEN)",
+ "MetricName": "tma_info_br_inst_mix_ipmisp_cond_ntaken"
+ },
+ {
+ "BriefDescription": "Instructions per retired conditional Branch Misprediction where the branch was taken",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_TAKEN",
+ "MetricName": "tma_info_br_inst_mix_ipmisp_cond_taken"
+ },
+ {
+ "BriefDescription": "Instructions per retired indirect call or jump Branch Misprediction",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.INDIRECT",
+ "MetricName": "tma_info_br_inst_mix_ipmisp_indirect"
+ },
+ {
+ "BriefDescription": "Instructions per retired return Branch Misprediction",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.RETURN",
+ "MetricName": "tma_info_br_inst_mix_ipmisp_ret"
+ },
+ {
+ "BriefDescription": "Instructions per retired Branch Misprediction",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricName": "tma_info_br_inst_mix_ipmispredict"
+ },
+ {
+ "BriefDescription": "Ratio of all branches which mispredict",
+ "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricName": "tma_info_br_mispredict_bound_branch_mispredict_ratio"
+ },
+ {
+ "BriefDescription": "Ratio between Mispredicted branches and unknown branches",
+ "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / BACLEARS.ANY",
+ "MetricName": "tma_info_br_mispredict_bound_branch_mispredict_to_unknown_branch_ratio"
},
{
- "BriefDescription": "",
- "MetricExpr": "CPU_CLK_UNHALTED.CORE",
- "MetricName": "tma_info_core_clks"
+ "BriefDescription": "Percentage of time that allocation is stalled due to load buffer full",
+ "MetricExpr": "100 * MEM_SCHEDULER_BLOCK.LD_BUF / CPU_CLK_UNHALTED.CORE",
+ "MetricName": "tma_info_buffer_stalls_%_load_buffer_stall_cycles"
},
{
- "BriefDescription": "",
- "MetricExpr": "CPU_CLK_UNHALTED.CORE_P",
- "MetricName": "tma_info_core_clks_p"
+ "BriefDescription": "Percentage of time that allocation is stalled due to memory reservation stations full",
+ "MetricExpr": "100 * MEM_SCHEDULER_BLOCK.RSV / CPU_CLK_UNHALTED.CORE",
+ "MetricName": "tma_info_buffer_stalls_%_mem_rsv_stall_cycles"
+ },
+ {
+ "BriefDescription": "Percentage of time that allocation is stalled due to store buffer full",
+ "MetricExpr": "100 * MEM_SCHEDULER_BLOCK.ST_BUF / CPU_CLK_UNHALTED.CORE",
+ "MetricName": "tma_info_buffer_stalls_%_store_buffer_stall_cycles"
},
{
"BriefDescription": "Cycles Per Instruction",
- "MetricExpr": "tma_info_core_clks / INST_RETIRED.ANY",
+ "MetricExpr": "CPU_CLK_UNHALTED.CORE / INST_RETIRED.ANY",
"MetricName": "tma_info_core_cpi"
},
{
"BriefDescription": "Instructions Per Cycle",
- "MetricExpr": "INST_RETIRED.ANY / tma_info_core_clks",
+ "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.CORE",
"MetricName": "tma_info_core_ipc"
},
{
- "BriefDescription": "",
- "MetricExpr": "5 * tma_info_core_clks",
- "MetricName": "tma_info_core_slots"
- },
- {
"BriefDescription": "Uops Per Instruction",
"MetricExpr": "UOPS_RETIRED.ALL / INST_RETIRED.ANY",
"MetricName": "tma_info_core_upi"
},
{
- "BriefDescription": "Percent of instruction miss cost that hit in DRAM",
+ "BriefDescription": "Percentage of ifetch miss bound stalls, where the ifetch miss hits in the L2",
+ "MetricExpr": "100 * MEM_BOUND_STALLS.IFETCH_L2_HIT / MEM_BOUND_STALLS.IFETCH",
+ "MetricName": "tma_info_ifetch_miss_bound_%_ifetchmissbound_with_l2hit"
+ },
+ {
+ "BriefDescription": "Percentage of ifetch miss bound stalls, where the ifetch miss hits in the L3",
+ "MetricExpr": "100 * MEM_BOUND_STALLS.IFETCH_LLC_HIT / MEM_BOUND_STALLS.IFETCH",
+ "MetricName": "tma_info_ifetch_miss_bound_%_ifetchmissbound_with_l3hit"
+ },
+ {
+ "BriefDescription": "Percentage of ifetch miss bound stalls, where the ifetch miss subsequently misses in the L3",
"MetricExpr": "100 * MEM_BOUND_STALLS.IFETCH_DRAM_HIT / MEM_BOUND_STALLS.IFETCH",
- "MetricName": "tma_info_frontend_inst_miss_cost_dramhit_percent"
+ "MetricName": "tma_info_ifetch_miss_bound_%_ifetchmissbound_with_l3miss"
},
{
- "BriefDescription": "Percent of instruction miss cost that hit in the L2",
- "MetricExpr": "100 * MEM_BOUND_STALLS.IFETCH_L2_HIT / MEM_BOUND_STALLS.IFETCH",
- "MetricName": "tma_info_frontend_inst_miss_cost_l2hit_percent"
+ "BriefDescription": "Percentage of memory bound stalls where retirement is stalled due to an L1 miss that hit the L2",
+ "MetricExpr": "100 * MEM_BOUND_STALLS.LOAD_L2_HIT / MEM_BOUND_STALLS.LOAD",
+ "MetricGroup": "load_store_bound",
+ "MetricName": "tma_info_load_miss_bound_%_loadmissbound_with_l2hit"
},
{
- "BriefDescription": "Percent of instruction miss cost that hit in the L3",
- "MetricExpr": "100 * MEM_BOUND_STALLS.IFETCH_LLC_HIT / MEM_BOUND_STALLS.IFETCH",
- "MetricName": "tma_info_frontend_inst_miss_cost_l3hit_percent"
+ "BriefDescription": "Percentage of memory bound stalls where retirement is stalled due to an L1 miss that hit the L3",
+ "MetricExpr": "100 * MEM_BOUND_STALLS.LOAD_LLC_HIT / MEM_BOUND_STALLS.LOAD",
+ "MetricGroup": "load_store_bound",
+ "MetricName": "tma_info_load_miss_bound_%_loadmissbound_with_l3hit"
},
{
- "BriefDescription": "Ratio of all branches which mispredict",
- "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.ALL_BRANCHES",
- "MetricName": "tma_info_inst_mix_branch_mispredict_ratio"
+ "BriefDescription": "Percentage of memory bound stalls where retirement is stalled due to an L1 miss that subsequently misses the L3",
+ "MetricExpr": "100 * MEM_BOUND_STALLS.LOAD_DRAM_HIT / MEM_BOUND_STALLS.LOAD",
+ "MetricGroup": "load_store_bound",
+ "MetricName": "tma_info_load_miss_bound_%_loadmissbound_with_l3miss"
},
{
- "BriefDescription": "Ratio between Mispredicted branches and unknown branches",
- "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / BACLEARS.ANY",
- "MetricName": "tma_info_inst_mix_branch_mispredict_to_unknown_branch_ratio"
+ "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a pipeline block",
+ "MetricExpr": "100 * LD_HEAD.L1_BOUND_AT_RET / CPU_CLK_UNHALTED.CORE",
+ "MetricGroup": "load_store_bound",
+ "MetricName": "tma_info_load_store_bound_l1_bound"
},
{
- "BriefDescription": "Percentage of all uops which are FPDiv uops",
- "MetricExpr": "100 * UOPS_RETIRED.FPDIV / UOPS_RETIRED.ALL",
- "MetricName": "tma_info_inst_mix_fpdiv_uop_ratio"
+ "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement",
+ "MetricExpr": "100 * (LD_HEAD.L1_BOUND_AT_RET + MEM_BOUND_STALLS.LOAD) / CPU_CLK_UNHALTED.CORE",
+ "MetricGroup": "load_store_bound",
+ "MetricName": "tma_info_load_store_bound_load_bound"
},
{
- "BriefDescription": "Percentage of all uops which are IDiv uops",
- "MetricExpr": "100 * UOPS_RETIRED.IDIV / UOPS_RETIRED.ALL",
- "MetricName": "tma_info_inst_mix_idiv_uop_ratio"
+ "BriefDescription": "Counts the number of cycles the core is stalled due to store buffer full",
+ "MetricExpr": "100 * (MEM_SCHEDULER_BLOCK.ST_BUF / MEM_SCHEDULER_BLOCK.ALL) * tma_mem_scheduler",
+ "MetricGroup": "load_store_bound",
+ "MetricName": "tma_info_load_store_bound_store_bound"
},
{
- "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
- "MetricName": "tma_info_inst_mix_ipbranch"
+ "BriefDescription": "Counts the number of machine clears relative to thousands of instructions retired, due to memory disambiguation",
+ "MetricExpr": "1e3 * MACHINE_CLEARS.DISAMBIGUATION / INST_RETIRED.ANY",
+ "MetricName": "tma_info_machine_clear_bound_machine_clears_disamb_pki"
},
{
- "BriefDescription": "Instruction per (near) call (lower number means higher occurrence rate)",
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.CALL",
- "MetricName": "tma_info_inst_mix_ipcall"
+ "BriefDescription": "Counts the number of machine clears relative to thousands of instructions retired, due to floating point assists",
+ "MetricExpr": "1e3 * MACHINE_CLEARS.FP_ASSIST / INST_RETIRED.ANY",
+ "MetricName": "tma_info_machine_clear_bound_machine_clears_fp_assist_pki"
},
{
- "BriefDescription": "Instructions per Far Branch",
- "MetricExpr": "INST_RETIRED.ANY / (BR_INST_RETIRED.FAR_BRANCH / 2)",
- "MetricName": "tma_info_inst_mix_ipfarbranch"
+ "BriefDescription": "Counts the number of machine clears relative to thousands of instructions retired, due to memory ordering",
+ "MetricExpr": "1e3 * MACHINE_CLEARS.MEMORY_ORDERING / INST_RETIRED.ANY",
+ "MetricName": "tma_info_machine_clear_bound_machine_clears_monuke_pki"
},
{
- "BriefDescription": "Instructions per Load",
- "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
- "MetricName": "tma_info_inst_mix_ipload"
+ "BriefDescription": "Counts the number of machine clears relative to thousands of instructions retired, due to memory renaming",
+ "MetricExpr": "1e3 * MACHINE_CLEARS.MRN_NUKE / INST_RETIRED.ANY",
+ "MetricName": "tma_info_machine_clear_bound_machine_clears_mrn_pki"
},
{
- "BriefDescription": "Instructions per retired conditional Branch Misprediction where the branch was not taken",
- "MetricExpr": "INST_RETIRED.ANY / (BR_MISP_RETIRED.COND - BR_MISP_RETIRED.COND_TAKEN)",
- "MetricName": "tma_info_inst_mix_ipmisp_cond_ntaken"
+ "BriefDescription": "Counts the number of machine clears relative to thousands of instructions retired, due to page faults",
+ "MetricExpr": "1e3 * MACHINE_CLEARS.PAGE_FAULT / INST_RETIRED.ANY",
+ "MetricName": "tma_info_machine_clear_bound_machine_clears_page_fault_pki"
},
{
- "BriefDescription": "Instructions per retired conditional Branch Misprediction where the branch was taken",
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_TAKEN",
- "MetricName": "tma_info_inst_mix_ipmisp_cond_taken"
+ "BriefDescription": "Counts the number of machine clears relative to thousands of instructions retired, due to self-modifying code",
+ "MetricExpr": "1e3 * MACHINE_CLEARS.SMC / INST_RETIRED.ANY",
+ "MetricName": "tma_info_machine_clear_bound_machine_clears_smc_pki"
},
{
- "BriefDescription": "Instructions per retired indirect call or jump Branch Misprediction",
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.INDIRECT",
- "MetricName": "tma_info_inst_mix_ipmisp_indirect"
+ "BriefDescription": "Percentage of total non-speculative loads with an address aliasing block",
+ "MetricExpr": "100 * LD_BLOCKS.4K_ALIAS / MEM_UOPS_RETIRED.ALL_LOADS",
+ "MetricName": "tma_info_mem_exec_blocks_%_loads_with_adressaliasing"
},
{
- "BriefDescription": "Instructions per retired return Branch Misprediction",
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.RETURN",
- "MetricName": "tma_info_inst_mix_ipmisp_ret"
+ "BriefDescription": "Percentage of total non-speculative loads with a store forward or unknown store address block",
+ "MetricExpr": "100 * LD_BLOCKS.DATA_UNKNOWN / MEM_UOPS_RETIRED.ALL_LOADS",
+ "MetricName": "tma_info_mem_exec_blocks_%_loads_with_storefwdblk"
},
{
- "BriefDescription": "Instructions per retired Branch Misprediction",
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricName": "tma_info_inst_mix_ipmispredict"
+ "BriefDescription": "Percentage of Memory Execution Bound due to a first level data cache miss",
+ "MetricExpr": "100 * LD_HEAD.L1_MISS_AT_RET / LD_HEAD.ANY_AT_RET",
+ "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_l1miss"
},
{
- "BriefDescription": "Instructions per Store",
- "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
- "MetricName": "tma_info_inst_mix_ipstore"
+ "BriefDescription": "Percentage of Memory Execution Bound due to other block cases, such as pipeline conflicts, fences, etc",
+ "MetricExpr": "100 * LD_HEAD.OTHER_AT_RET / LD_HEAD.ANY_AT_RET",
+ "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_otherpipelineblks"
},
{
- "BriefDescription": "Percentage of all uops which are ucode ops",
- "MetricExpr": "100 * UOPS_RETIRED.MS / UOPS_RETIRED.ALL",
- "MetricName": "tma_info_inst_mix_microcode_uop_ratio"
+ "BriefDescription": "Percentage of Memory Execution Bound due to a pagewalk",
+ "MetricExpr": "100 * LD_HEAD.PGWALK_AT_RET / LD_HEAD.ANY_AT_RET",
+ "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_pagewalk"
},
{
- "BriefDescription": "Percentage of all uops which are x87 uops",
- "MetricExpr": "100 * UOPS_RETIRED.X87 / UOPS_RETIRED.ALL",
- "MetricName": "tma_info_inst_mix_x87_uop_ratio"
+ "BriefDescription": "Percentage of Memory Execution Bound due to a second level TLB miss",
+ "MetricExpr": "100 * LD_HEAD.DTLB_MISS_AT_RET / LD_HEAD.ANY_AT_RET",
+ "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_stlbhit"
},
{
- "BriefDescription": "Percentage of total non-speculative loads with a address aliasing block",
- "MetricExpr": "100 * LD_BLOCKS.4K_ALIAS / MEM_UOPS_RETIRED.ALL_LOADS",
- "MetricName": "tma_info_l1_bound_address_alias_blocks"
+ "BriefDescription": "Percentage of Memory Execution Bound due to a store forward address match",
+ "MetricExpr": "100 * LD_HEAD.ST_ADDR_AT_RET / LD_HEAD.ANY_AT_RET",
+ "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_storefwding"
},
{
- "BriefDescription": "Percentage of total non-speculative loads that are splits",
- "MetricExpr": "100 * MEM_UOPS_RETIRED.SPLIT_LOADS / MEM_UOPS_RETIRED.ALL_LOADS",
- "MetricName": "tma_info_l1_bound_load_splits"
+ "BriefDescription": "Instructions per Load",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
+ "MetricName": "tma_info_mem_mix_ipload"
},
{
- "BriefDescription": "Percentage of total non-speculative loads with a store forward or unknown store address block",
- "MetricExpr": "100 * LD_BLOCKS.DATA_UNKNOWN / MEM_UOPS_RETIRED.ALL_LOADS",
- "MetricName": "tma_info_l1_bound_store_fwd_blocks"
+ "BriefDescription": "Instructions per Store",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
+ "MetricName": "tma_info_mem_mix_ipstore"
},
{
- "BriefDescription": "Cycle cost per DRAM hit",
- "MetricExpr": "MEM_BOUND_STALLS.LOAD_DRAM_HIT / MEM_LOAD_UOPS_RETIRED.DRAM_HIT",
- "MetricName": "tma_info_memory_cycles_per_demand_load_dram_hit"
+ "BriefDescription": "Percentage of total non-speculative loads that perform one or more locks",
+ "MetricExpr": "100 * MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_LOADS",
+ "MetricName": "tma_info_mem_mix_load_locks_ratio"
},
{
- "BriefDescription": "Cycle cost per L2 hit",
- "MetricExpr": "MEM_BOUND_STALLS.LOAD_L2_HIT / MEM_LOAD_UOPS_RETIRED.L2_HIT",
- "MetricName": "tma_info_memory_cycles_per_demand_load_l2_hit"
+ "BriefDescription": "Percentage of total non-speculative loads that are splits",
+ "MetricExpr": "100 * MEM_UOPS_RETIRED.SPLIT_LOADS / MEM_UOPS_RETIRED.ALL_LOADS",
+ "MetricName": "tma_info_mem_mix_load_splits_ratio"
},
{
- "BriefDescription": "Cycle cost per LLC hit",
- "MetricExpr": "MEM_BOUND_STALLS.LOAD_LLC_HIT / MEM_LOAD_UOPS_RETIRED.L3_HIT",
- "MetricName": "tma_info_memory_cycles_per_demand_load_l3_hit"
+ "BriefDescription": "Ratio of mem load uops to all uops",
+ "MetricExpr": "1e3 * MEM_UOPS_RETIRED.ALL_LOADS / UOPS_RETIRED.ALL",
+ "MetricName": "tma_info_mem_mix_memload_ratio"
},
{
- "BriefDescription": "load ops retired per 1000 instruction",
- "MetricExpr": "1e3 * MEM_UOPS_RETIRED.ALL_LOADS / INST_RETIRED.ANY",
- "MetricName": "tma_info_memory_memloadpki"
+ "BriefDescription": "Percentage of time that the core is stalled due to a TPAUSE or UMWAIT instruction",
+ "MetricExpr": "100 * SERIALIZATION.C01_MS_SCB / (5 * CPU_CLK_UNHALTED.CORE)",
+ "MetricName": "tma_info_serialization _%_tpause_cycles"
},
{
"BriefDescription": "Average CPU Utilization",
@@ -428,194 +468,122 @@
},
{
"BriefDescription": "Fraction of cycles spent in Kernel mode",
- "MetricExpr": "cpu@CPU_CLK_UNHALTED.CORE@k / CPU_CLK_UNHALTED.CORE",
+ "MetricExpr": "cpu@CPU_CLK_UNHALTED.CORE_P@k / CPU_CLK_UNHALTED.CORE",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_kernel_utilization"
},
{
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
- "MetricExpr": "tma_info_core_clks / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricExpr": "CPU_CLK_UNHALTED.CORE / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
"MetricName": "tma_info_system_turbo_utilization"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to Instruction Table Lookaside Buffer (ITLB) misses.",
- "MetricExpr": "TOPDOWN_FE_BOUND.ITLB / tma_info_core_slots",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_latency_group",
- "MetricName": "tma_itlb_misses",
- "MetricThreshold": "tma_itlb_misses > 0.05",
- "ScaleUnit": "100%"
+ "BriefDescription": "Percentage of all uops which are FPDiv uops",
+ "MetricExpr": "100 * UOPS_RETIRED.FPDIV / UOPS_RETIRED.ALL",
+ "MetricName": "tma_info_uop_mix_fpdiv_uop_ratio"
},
{
- "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a load block.",
- "MetricExpr": "LD_HEAD.L1_BOUND_AT_RET / tma_info_core_clks",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
- "MetricName": "tma_l1_bound",
- "MetricThreshold": "tma_l1_bound > 0.1",
- "ScaleUnit": "100%"
+ "BriefDescription": "Percentage of all uops which are IDiv uops",
+ "MetricExpr": "100 * UOPS_RETIRED.IDIV / UOPS_RETIRED.ALL",
+ "MetricName": "tma_info_uop_mix_idiv_uop_ratio"
},
{
- "BriefDescription": "Counts the number of cycles a core is stalled due to a demand load which hit in the L2 Cache.",
- "MetricExpr": "MEM_BOUND_STALLS.LOAD_L2_HIT / tma_info_core_clks - max((MEM_BOUND_STALLS.LOAD - LD_HEAD.L1_MISS_AT_RET) / tma_info_core_clks, 0) * MEM_BOUND_STALLS.LOAD_L2_HIT / MEM_BOUND_STALLS.LOAD",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
- "MetricName": "tma_l2_bound",
- "MetricThreshold": "tma_l2_bound > 0.1",
- "ScaleUnit": "100%"
+ "BriefDescription": "Percentage of all uops which are microcode ops",
+ "MetricExpr": "100 * UOPS_RETIRED.MS / UOPS_RETIRED.ALL",
+ "MetricName": "tma_info_uop_mix_microcode_uop_ratio"
},
{
- "BriefDescription": "Counts the number of cycles a core is stalled due to a demand load which hit in the Last Level Cache (LLC) or other core with HITE/F/M.",
- "MetricExpr": "MEM_BOUND_STALLS.LOAD_LLC_HIT / tma_info_core_clks - max((MEM_BOUND_STALLS.LOAD - LD_HEAD.L1_MISS_AT_RET) / tma_info_core_clks, 0) * MEM_BOUND_STALLS.LOAD_LLC_HIT / MEM_BOUND_STALLS.LOAD",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
- "MetricName": "tma_l3_bound",
- "MetricThreshold": "tma_l3_bound > 0.1",
- "ScaleUnit": "100%"
+ "BriefDescription": "Percentage of all uops which are x87 uops",
+ "MetricExpr": "100 * UOPS_RETIRED.X87 / UOPS_RETIRED.ALL",
+ "MetricName": "tma_info_uop_mix_x87_uop_ratio"
},
{
- "BriefDescription": "Counts the number of cycles, relative to the number of mem_scheduler slots, in which uops are blocked due to load buffer full",
- "MetricExpr": "tma_mem_scheduler * MEM_SCHEDULER_BLOCK.LD_BUF / MEM_SCHEDULER_BLOCK.ALL",
- "MetricGroup": "TopdownL4;tma_L4_group;tma_mem_scheduler_group",
- "MetricName": "tma_ld_buffer",
- "MetricThreshold": "tma_ld_buffer > 0.05",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to Instruction Table Lookaside Buffer (ITLB) misses.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.ITLB / (5 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
+ "MetricName": "tma_itlb_misses",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a machine clear (nuke) of any kind including memory ordering and memory disambiguation.",
- "MetricExpr": "TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS / tma_info_core_slots",
+ "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a machine clear (nuke) of any kind including memory ordering and memory disambiguation",
+ "MetricExpr": "TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS / (5 * CPU_CLK_UNHALTED.CORE)",
"MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
"MetricName": "tma_machine_clears",
- "MetricThreshold": "tma_machine_clears > 0.05",
+ "MetricThreshold": "tma_machine_clears > 0.05 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to memory reservation stalls in which a scheduler is not able to accept uops.",
- "MetricExpr": "TOPDOWN_BE_BOUND.MEM_SCHEDULER / tma_info_core_slots",
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to memory reservation stalls in which a scheduler is not able to accept uops",
+ "MetricExpr": "TOPDOWN_BE_BOUND.MEM_SCHEDULER / (5 * CPU_CLK_UNHALTED.CORE)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_mem_scheduler",
- "MetricThreshold": "tma_mem_scheduler > 0.1",
+ "MetricThreshold": "tma_mem_scheduler > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of cycles the core is stalled due to stores or loads.",
- "MetricExpr": "min(tma_backend_bound, LD_HEAD.ANY_AT_RET / tma_info_core_clks + tma_store_bound)",
- "MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
- "MetricName": "tma_memory_bound",
- "MetricThreshold": "tma_memory_bound > 0.2",
- "MetricgroupNoGroup": "TopdownL2",
- "ScaleUnit": "100%"
- },
- {
- "BriefDescription": "Counts the number of machine clears relative to the number of nuke slots due to memory ordering.",
- "MetricExpr": "tma_nuke * (MACHINE_CLEARS.MEMORY_ORDERING / MACHINE_CLEARS.SLOW)",
- "MetricGroup": "TopdownL4;tma_L4_group;tma_nuke_group",
- "MetricName": "tma_memory_ordering",
- "MetricThreshold": "tma_memory_ordering > 0.02",
- "ScaleUnit": "100%"
- },
- {
- "BriefDescription": "Counts the number of uops that are from the complex flows issued by the micro-sequencer (MS)",
- "MetricExpr": "UOPS_RETIRED.MS / tma_info_core_slots",
- "MetricGroup": "TopdownL2;tma_L2_group;tma_retiring_group",
- "MetricName": "tma_ms_uops",
- "MetricThreshold": "tma_ms_uops > 0.05",
- "MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "Counts the number of uops that are from the complex flows issued by the micro-sequencer (MS). This includes uops from flows due to complex instructions, faults, assists, and inserted flows.",
- "ScaleUnit": "100%"
- },
- {
- "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to IEC or FPC RAT stalls, which can be due to FIQ or IEC reservation stalls in which the integer, floating point or SIMD scheduler is not able to accept uops.",
- "MetricExpr": "TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER / tma_info_core_slots",
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to IEC or FPC RAT stalls, which can be due to FIQ or IEC reservation stalls in which the integer, floating point or SIMD scheduler is not able to accept uops",
+ "MetricExpr": "TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER / (5 * CPU_CLK_UNHALTED.CORE)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_non_mem_scheduler",
- "MetricThreshold": "tma_non_mem_scheduler > 0.1",
+ "MetricThreshold": "tma_non_mem_scheduler > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to a machine clear (slow nuke).",
- "MetricExpr": "TOPDOWN_BAD_SPECULATION.NUKE / tma_info_core_slots",
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to a machine clear that requires the use of microcode (slow nuke)",
+ "MetricExpr": "TOPDOWN_BAD_SPECULATION.NUKE / (5 * CPU_CLK_UNHALTED.CORE)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_nuke",
- "MetricThreshold": "tma_nuke > 0.05",
+ "MetricThreshold": "tma_nuke > 0.05 & (tma_machine_clears > 0.05 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to other common frontend stalls not categorized.",
- "MetricExpr": "TOPDOWN_FE_BOUND.OTHER / tma_info_core_slots",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to other common frontend stalls not categorized.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.OTHER / (5 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
"MetricName": "tma_other_fb",
- "MetricThreshold": "tma_other_fb > 0.05",
- "ScaleUnit": "100%"
- },
- {
- "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a number of other load blocks.",
- "MetricExpr": "LD_HEAD.OTHER_AT_RET / tma_info_core_clks",
- "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
- "MetricName": "tma_other_l1",
- "MetricThreshold": "tma_other_l1 > 0.05",
- "ScaleUnit": "100%"
- },
- {
- "BriefDescription": "Counts the number of cycles the core is stalled due to a demand load miss which hits in the L2, LLC, DRAM or MMIO (Non-DRAM) but could not be correctly attributed or cycles in which the load miss is waiting on a request buffer.",
- "MetricExpr": "max(0, tma_memory_bound - (tma_store_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound))",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
- "MetricName": "tma_other_load_store",
- "MetricThreshold": "tma_other_load_store > 0.1",
- "ScaleUnit": "100%"
- },
- {
- "BriefDescription": "Counts the number of uops retired excluding ms and fp div uops.",
- "MetricExpr": "(TOPDOWN_RETIRING.ALL - UOPS_RETIRED.MS - UOPS_RETIRED.FPDIV) / tma_info_core_slots",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_base_group",
- "MetricName": "tma_other_ret",
- "MetricThreshold": "tma_other_ret > 0.3",
- "ScaleUnit": "100%"
- },
- {
- "BriefDescription": "Counts the number of machine clears relative to the number of nuke slots due to page faults.",
- "MetricExpr": "tma_nuke * (MACHINE_CLEARS.PAGE_FAULT / MACHINE_CLEARS.SLOW)",
- "MetricGroup": "TopdownL4;tma_L4_group;tma_nuke_group",
- "MetricName": "tma_page_fault",
- "MetricThreshold": "tma_page_fault > 0.02",
+ "MetricThreshold": "tma_other_fb > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to wrong predecodes.",
- "MetricExpr": "TOPDOWN_FE_BOUND.PREDECODE / tma_info_core_slots",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to wrong predecodes.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.PREDECODE / (5 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
"MetricName": "tma_predecode",
- "MetricThreshold": "tma_predecode > 0.05",
+ "MetricThreshold": "tma_predecode > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to the physical register file unable to accept an entry (marble stalls).",
- "MetricExpr": "TOPDOWN_BE_BOUND.REGISTER / tma_info_core_slots",
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to the physical register file unable to accept an entry (marble stalls)",
+ "MetricExpr": "TOPDOWN_BE_BOUND.REGISTER / (5 * CPU_CLK_UNHALTED.CORE)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_register",
- "MetricThreshold": "tma_register > 0.1",
+ "MetricThreshold": "tma_register > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to the reorder buffer being full (ROB stalls).",
- "MetricExpr": "TOPDOWN_BE_BOUND.REORDER_BUFFER / tma_info_core_slots",
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to the reorder buffer being full (ROB stalls)",
+ "MetricExpr": "TOPDOWN_BE_BOUND.REORDER_BUFFER / (5 * CPU_CLK_UNHALTED.CORE)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_reorder_buffer",
- "MetricThreshold": "tma_reorder_buffer > 0.1",
+ "MetricThreshold": "tma_reorder_buffer > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls",
- "MetricExpr": "tma_backend_bound",
- "MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_aux_group",
+ "BriefDescription": "Counts the number of cycles the core is stalled due to a resource limitation",
+ "MetricExpr": "tma_backend_bound - tma_core_bound",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
"MetricName": "tma_resource_bound",
- "MetricThreshold": "tma_resource_bound > 0.2",
+ "MetricThreshold": "tma_resource_bound > 0.2 & tma_backend_bound > 0.1",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls. Note that uops must be available for consumption in order for this event to count. If a uop is not available (IQ is empty), this event will not count.",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that result in retirement slots.",
+ "BriefDescription": "Counts the number of issue slots that result in retirement slots",
"DefaultMetricgroupName": "TopdownL1",
- "MetricExpr": "TOPDOWN_RETIRING.ALL / tma_info_core_slots",
+ "MetricExpr": "TOPDOWN_RETIRING.ALL / (5 * CPU_CLK_UNHALTED.CORE)",
"MetricGroup": "Default;TopdownL1;tma_L1_group",
"MetricName": "tma_retiring",
"MetricThreshold": "tma_retiring > 0.75",
@@ -623,67 +591,11 @@
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of cycles, relative to the number of mem_scheduler slots, in which uops are blocked due to RSV full relative",
- "MetricExpr": "tma_mem_scheduler * MEM_SCHEDULER_BLOCK.RSV / MEM_SCHEDULER_BLOCK.ALL",
- "MetricGroup": "TopdownL4;tma_L4_group;tma_mem_scheduler_group",
- "MetricName": "tma_rsv",
- "MetricThreshold": "tma_rsv > 0.05",
- "ScaleUnit": "100%"
- },
- {
- "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to scoreboards from the instruction queue (IQ), jump execution unit (JEU), or microcode sequencer (MS).",
- "MetricExpr": "TOPDOWN_BE_BOUND.SERIALIZATION / tma_info_core_slots",
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to scoreboards from the instruction queue (IQ), jump execution unit (JEU), or microcode sequencer (MS)",
+ "MetricExpr": "TOPDOWN_BE_BOUND.SERIALIZATION / (5 * CPU_CLK_UNHALTED.CORE)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_serialization",
- "MetricThreshold": "tma_serialization > 0.1",
- "ScaleUnit": "100%"
- },
- {
- "BriefDescription": "Counts the number of machine clears relative to the number of nuke slots due to SMC.",
- "MetricExpr": "tma_nuke * (MACHINE_CLEARS.SMC / MACHINE_CLEARS.SLOW)",
- "MetricGroup": "TopdownL4;tma_L4_group;tma_nuke_group",
- "MetricName": "tma_smc",
- "MetricThreshold": "tma_smc > 0.02",
- "ScaleUnit": "100%"
- },
- {
- "BriefDescription": "Counts the number of cycles, relative to the number of mem_scheduler slots, in which uops are blocked due to store buffer full",
- "MetricExpr": "tma_store_bound",
- "MetricGroup": "TopdownL4;tma_L4_group;tma_mem_scheduler_group",
- "MetricName": "tma_st_buffer",
- "MetricThreshold": "tma_st_buffer > 0.05",
- "ScaleUnit": "100%"
- },
- {
- "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a first level TLB miss.",
- "MetricExpr": "LD_HEAD.DTLB_MISS_AT_RET / tma_info_core_clks",
- "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
- "MetricName": "tma_stlb_hit",
- "MetricThreshold": "tma_stlb_hit > 0.05",
- "ScaleUnit": "100%"
- },
- {
- "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a second level TLB miss requiring a page walk.",
- "MetricExpr": "LD_HEAD.PGWALK_AT_RET / tma_info_core_clks",
- "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
- "MetricName": "tma_stlb_miss",
- "MetricThreshold": "tma_stlb_miss > 0.05",
- "ScaleUnit": "100%"
- },
- {
- "BriefDescription": "Counts the number of cycles the core is stalled due to store buffer full.",
- "MetricExpr": "tma_mem_scheduler * (MEM_SCHEDULER_BLOCK.ST_BUF / MEM_SCHEDULER_BLOCK.ALL)",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
- "MetricName": "tma_store_bound",
- "MetricThreshold": "tma_store_bound > 0.1",
- "ScaleUnit": "100%"
- },
- {
- "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a store forward block.",
- "MetricExpr": "LD_HEAD.ST_ADDR_AT_RET / tma_info_core_clks",
- "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
- "MetricName": "tma_store_fwd_blk",
- "MetricThreshold": "tma_store_fwd_blk > 0.05",
+ "MetricThreshold": "tma_serialization > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/alderlaken/cache.json b/tools/perf/pmu-events/arch/x86/alderlaken/cache.json
index 043445ae14a8..1500033ee19f 100644
--- a/tools/perf/pmu-events/arch/x86/alderlaken/cache.json
+++ b/tools/perf/pmu-events/arch/x86/alderlaken/cache.json
@@ -1,22 +1,25 @@
[
{
"BriefDescription": "Counts the number of cacheable memory requests that miss in the LLC. Counts on a per core basis.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.MISS",
- "PublicDescription": "Counts the number of cacheable memory requests that miss in the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the platform has an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
+ "PublicDescription": "Counts the number of cacheable memory requests that miss in the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the core has access to an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
"SampleAfterValue": "200003",
"UMask": "0x41"
},
{
"BriefDescription": "Counts the number of cacheable memory requests that access the LLC. Counts on a per core basis.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
- "PublicDescription": "Counts the number of cacheable memory requests that access the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the platform has an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
+ "PublicDescription": "Counts the number of cacheable memory requests that access the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the core has access to an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
"SampleAfterValue": "200003",
"UMask": "0x4f"
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in the L2, LLC, DRAM or MMIO (Non-DRAM).",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.IFETCH",
"PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or translation lookaside buffer (TLB) miss which hit in the L2, LLC, DRAM or MMIO (Non-DRAM).",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in DRAM or MMIO (Non-DRAM).",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.IFETCH_DRAM_HIT",
"PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or translation lookaside buffer (TLB) miss which hit in DRAM or MMIO (non-DRAM).",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in the L2 cache.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.IFETCH_L2_HIT",
"PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or Translation Lookaside Buffer (TLB) miss which hit in the L2 cache.",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in the LLC or other core with HITE/F/M.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.IFETCH_LLC_HIT",
"PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or Translation Lookaside Buffer (TLB) miss which hit in the Last Level Cache (LLC) or other core with HITE/F/M.",
@@ -49,6 +55,7 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to a demand load miss which hit in the L2, LLC, DRAM or MMIO (Non-DRAM).",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.LOAD",
"SampleAfterValue": "200003",
@@ -56,6 +63,7 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to a demand load miss which hit in DRAM or MMIO (Non-DRAM).",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.LOAD_DRAM_HIT",
"SampleAfterValue": "200003",
@@ -63,6 +71,7 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to a demand load which hit in the L2 cache.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.LOAD_L2_HIT",
"SampleAfterValue": "200003",
@@ -70,6 +79,7 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to a demand load which hit in the LLC or other core with HITE/F/M.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.LOAD_LLC_HIT",
"PublicDescription": "Counts the number of cycles the core is stalled due to a demand load which hit in the Last Level Cache (LLC) or other core with HITE/F/M.",
@@ -78,6 +88,7 @@
},
{
"BriefDescription": "Counts the number of load uops retired that hit in DRAM.",
+ "Counter": "0,1,2,3,4,5",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.DRAM_HIT",
@@ -87,6 +98,7 @@
},
{
"BriefDescription": "Counts the number of load uops retired that hit in the L2 cache.",
+ "Counter": "0,1,2,3,4,5",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
@@ -96,6 +108,7 @@
},
{
"BriefDescription": "Counts the number of load uops retired that hit in the L3 cache.",
+ "Counter": "0,1,2,3,4,5",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
@@ -105,6 +118,7 @@
},
{
"BriefDescription": "Counts the number of cycles that uops are blocked for any of the following reasons: load buffer, store buffer or RSV full.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x04",
"EventName": "MEM_SCHEDULER_BLOCK.ALL",
"SampleAfterValue": "20003",
@@ -112,6 +126,7 @@
},
{
"BriefDescription": "Counts the number of cycles that uops are blocked due to a load buffer full condition.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x04",
"EventName": "MEM_SCHEDULER_BLOCK.LD_BUF",
"SampleAfterValue": "20003",
@@ -119,6 +134,7 @@
},
{
"BriefDescription": "Counts the number of cycles that uops are blocked due to an RSV full condition.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x04",
"EventName": "MEM_SCHEDULER_BLOCK.RSV",
"SampleAfterValue": "20003",
@@ -126,6 +142,7 @@
},
{
"BriefDescription": "Counts the number of cycles that uops are blocked due to a store buffer full condition.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x04",
"EventName": "MEM_SCHEDULER_BLOCK.ST_BUF",
"SampleAfterValue": "20003",
@@ -133,6 +150,7 @@
},
{
"BriefDescription": "Counts the number of load uops retired.",
+ "Counter": "0,1,2,3,4,5",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
@@ -143,6 +161,7 @@
},
{
"BriefDescription": "Counts the number of store uops retired.",
+ "Counter": "0,1,2,3,4,5",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
@@ -153,6 +172,7 @@
},
{
"BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 128 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_128",
@@ -165,6 +185,7 @@
},
{
"BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 16 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_16",
@@ -177,6 +198,7 @@
},
{
"BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 256 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_256",
@@ -189,6 +211,7 @@
},
{
"BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 32 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_32",
@@ -201,6 +224,7 @@
},
{
"BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 4 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_4",
@@ -213,6 +237,7 @@
},
{
"BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 512 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_512",
@@ -225,6 +250,7 @@
},
{
"BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 64 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_64",
@@ -237,6 +263,7 @@
},
{
"BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 8 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_8",
@@ -248,7 +275,18 @@
"UMask": "0x5"
},
{
+ "BriefDescription": "Counts the number of load uops retired that performed one or more locks.",
+ "Counter": "0,1,2,3,4,5",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x21"
+ },
+ {
"BriefDescription": "Counts the number of retired split load uops.",
+ "Counter": "0,1,2,3,4,5",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
@@ -258,6 +296,7 @@
},
{
"BriefDescription": "Counts the number of stores uops retired. Counts with or without PEBS enabled.",
+ "Counter": "0,1,2,3,4,5",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.STORE_LATENCY",
@@ -268,6 +307,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -277,6 +317,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -286,6 +327,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -295,6 +337,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -304,6 +347,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_RFO.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -313,6 +357,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -322,6 +367,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to instruction cache misses.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.ICACHE",
"SampleAfterValue": "1000003",
diff --git a/tools/perf/pmu-events/arch/x86/alderlaken/floating-point.json b/tools/perf/pmu-events/arch/x86/alderlaken/floating-point.json
index 30e8ca3c1485..484d8b3167f0 100644
--- a/tools/perf/pmu-events/arch/x86/alderlaken/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/alderlaken/floating-point.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of floating point operations retired that required microcode assist.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.FP_ASSIST",
"PublicDescription": "Counts the number of floating point operations retired that required microcode assist, which is not a reflection of the number of FP operations, instructions or uops.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Counts the number of floating point divide uops retired (x87 and SSE, including x87 sqrt).",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.FPDIV",
"PEBS": "1",
diff --git a/tools/perf/pmu-events/arch/x86/alderlaken/frontend.json b/tools/perf/pmu-events/arch/x86/alderlaken/frontend.json
index 36898bab2bba..2a68f9969da0 100644
--- a/tools/perf/pmu-events/arch/x86/alderlaken/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/alderlaken/frontend.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the total number of BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xe6",
"EventName": "BACLEARS.ANY",
"PublicDescription": "Counts the total number of BACLEARS, which occur when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Counts the number of requests to the instruction cache for one or more bytes of a cache line.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x80",
"EventName": "ICACHE.ACCESSES",
"PublicDescription": "Counts the total number of requests to the instruction cache. The event only counts new cache line accesses, so that multiple back to back fetches to the exact same cache line or byte chunk count as one. Specifically, the event counts when accesses from sequential code crosses the cache line boundary, or when a branch target is moved to a new line or to a non-sequential byte chunk of the same line.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Counts the number of instruction cache misses.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x80",
"EventName": "ICACHE.MISSES",
"PublicDescription": "Counts the number of missed requests to the instruction cache. The event only counts new cache line accesses, so that multiple back to back fetches to the exact same cache line and byte chunk count as one. Specifically, the event counts when accesses from sequential code crosses the cache line boundary, or when a branch target is moved to a new line or to a non-sequential byte chunk of the same line.",
diff --git a/tools/perf/pmu-events/arch/x86/alderlaken/memory.json b/tools/perf/pmu-events/arch/x86/alderlaken/memory.json
index 863a3ba2b4b2..619488d42a4a 100644
--- a/tools/perf/pmu-events/arch/x86/alderlaken/memory.json
+++ b/tools/perf/pmu-events/arch/x86/alderlaken/memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to any number of reasons, including an L1 miss, WCB full, pagewalk, store address block or store data block, on a load that retires.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x05",
"EventName": "LD_HEAD.ANY_AT_RET",
"SampleAfterValue": "1000003",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to a core bound stall including a store address match, a DTLB miss or a page walk that detains the load from retiring.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x05",
"EventName": "LD_HEAD.L1_BOUND_AT_RET",
"SampleAfterValue": "1000003",
@@ -15,6 +17,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a DL1 miss.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x05",
"EventName": "LD_HEAD.L1_MISS_AT_RET",
"SampleAfterValue": "1000003",
@@ -22,6 +25,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to other block cases.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x05",
"EventName": "LD_HEAD.OTHER_AT_RET",
"PublicDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to other block cases such as pipeline conflicts, fences, etc.",
@@ -30,6 +34,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a pagewalk.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x05",
"EventName": "LD_HEAD.PGWALK_AT_RET",
"SampleAfterValue": "1000003",
@@ -37,6 +42,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a store address match.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x05",
"EventName": "LD_HEAD.ST_ADDR_AT_RET",
"SampleAfterValue": "1000003",
@@ -44,6 +50,7 @@
},
{
"BriefDescription": "Counts the number of machine clears due to memory ordering caused by a snoop from an external agent. Does not count internally generated machine clears such as those due to memory disambiguation.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
"SampleAfterValue": "20003",
@@ -51,6 +58,7 @@
},
{
"BriefDescription": "Counts demand data reads that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -60,6 +68,7 @@
},
{
"BriefDescription": "Counts demand data reads that were not supplied by the L3 cache. [L3_MISS_LOCAL is alias to L3_MISS]",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -69,6 +78,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -78,6 +88,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache. [L3_MISS_LOCAL is alias to L3_MISS]",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
diff --git a/tools/perf/pmu-events/arch/x86/alderlaken/metricgroups.json b/tools/perf/pmu-events/arch/x86/alderlaken/metricgroups.json
index 7b2049cd2694..40984c23a6c9 100644
--- a/tools/perf/pmu-events/arch/x86/alderlaken/metricgroups.json
+++ b/tools/perf/pmu-events/arch/x86/alderlaken/metricgroups.json
@@ -1,26 +1,23 @@
{
+ "Flops": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Ifetch": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Load_Store_Miss": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Mem_Exec": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Power": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Summary": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"TopdownL1": "Metrics for top-down breakdown at level 1",
"TopdownL2": "Metrics for top-down breakdown at level 2",
"TopdownL3": "Metrics for top-down breakdown at level 3",
- "TopdownL4": "Metrics for top-down breakdown at level 4",
+ "load_store_bound": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"tma_L1_group": "Metrics for top-down breakdown at level 1",
"tma_L2_group": "Metrics for top-down breakdown at level 2",
"tma_L3_group": "Metrics for top-down breakdown at level 3",
- "tma_L4_group": "Metrics for top-down breakdown at level 4",
- "tma_backend_bound_aux_group": "Metrics contributing to tma_backend_bound_aux category",
"tma_backend_bound_group": "Metrics contributing to tma_backend_bound category",
"tma_bad_speculation_group": "Metrics contributing to tma_bad_speculation category",
- "tma_base_group": "Metrics contributing to tma_base category",
- "tma_fetch_bandwidth_group": "Metrics contributing to tma_fetch_bandwidth category",
- "tma_fetch_latency_group": "Metrics contributing to tma_fetch_latency category",
+ "tma_core_bound_group": "Metrics contributing to tma_core_bound category",
"tma_frontend_bound_group": "Metrics contributing to tma_frontend_bound category",
- "tma_l1_bound_group": "Metrics contributing to tma_l1_bound category",
+ "tma_ifetch_bandwidth_group": "Metrics contributing to tma_ifetch_bandwidth category",
+ "tma_ifetch_latency_group": "Metrics contributing to tma_ifetch_latency category",
"tma_machine_clears_group": "Metrics contributing to tma_machine_clears category",
- "tma_mem_scheduler_group": "Metrics contributing to tma_mem_scheduler category",
- "tma_memory_bound_group": "Metrics contributing to tma_memory_bound category",
- "tma_nuke_group": "Metrics contributing to tma_nuke category",
- "tma_resource_bound_group": "Metrics contributing to tma_resource_bound category",
- "tma_retiring_group": "Metrics contributing to tma_retiring category"
+ "tma_resource_bound_group": "Metrics contributing to tma_resource_bound category"
}
diff --git a/tools/perf/pmu-events/arch/x86/alderlaken/other.json b/tools/perf/pmu-events/arch/x86/alderlaken/other.json
index ccc892149dbe..54ddbe2b3b9b 100644
--- a/tools/perf/pmu-events/arch/x86/alderlaken/other.json
+++ b/tools/perf/pmu-events/arch/x86/alderlaken/other.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "This event is deprecated. [This event is alias to MISC_RETIRED.LBR_INSERTS]",
+ "Counter": "0,1,2,3,4,5",
"Deprecated": "1",
"EventCode": "0xe4",
"EventName": "LBR_INSERTS.ANY",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that have any type of response.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.COREWB_M.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -19,6 +21,7 @@
},
{
"BriefDescription": "Counts demand data reads that have any type of response.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -28,6 +31,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -37,11 +41,20 @@
},
{
"BriefDescription": "Counts streaming stores that have any type of response.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10800",
"SampleAfterValue": "100003",
"UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots in a UMWAIT or TPAUSE instruction where no uop issues due to the instruction putting the CPU into the C0.1 activity state. For Tremont, UMWAIT and TPAUSE will only put the CPU into C0.1 activity state (not C0.2 activity state)",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0x75",
+ "EventName": "SERIALIZATION.C01_MS_SCB",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/alderlaken/pipeline.json b/tools/perf/pmu-events/arch/x86/alderlaken/pipeline.json
index 846bcdafca6d..f05db45578ff 100644
--- a/tools/perf/pmu-events/arch/x86/alderlaken/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/alderlaken/pipeline.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the total number of branch instructions retired for all branch types.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.NEAR_CALL",
+ "Counter": "0,1,2,3,4,5",
"Deprecated": "1",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.CALL",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "Counts the number of retired JCC (Jump on Conditional Code) branch instructions retired, includes both taken and not taken branches.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND",
"PEBS": "1",
@@ -26,6 +29,7 @@
},
{
"BriefDescription": "Counts the number of taken JCC (Jump on Conditional Code) branch instructions retired.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_TAKEN",
"PEBS": "1",
@@ -34,6 +38,7 @@
},
{
"BriefDescription": "Counts the number of far branch instructions retired, includes far jump, far call and return, and interrupt call and return.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"PEBS": "1",
@@ -42,6 +47,7 @@
},
{
"BriefDescription": "Counts the number of near indirect JMP and near indirect CALL branch instructions retired.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.INDIRECT",
"PEBS": "1",
@@ -50,6 +56,7 @@
},
{
"BriefDescription": "Counts the number of near indirect CALL branch instructions retired.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.INDIRECT_CALL",
"PEBS": "1",
@@ -58,6 +65,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.INDIRECT_CALL",
+ "Counter": "0,1,2,3,4,5",
"Deprecated": "1",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.IND_CALL",
@@ -67,6 +75,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.COND",
+ "Counter": "0,1,2,3,4,5",
"Deprecated": "1",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.JCC",
@@ -76,6 +85,7 @@
},
{
"BriefDescription": "Counts the number of near CALL branch instructions retired.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
"PEBS": "1",
@@ -84,6 +94,7 @@
},
{
"BriefDescription": "Counts the number of near RET branch instructions retired.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
"PEBS": "1",
@@ -92,6 +103,7 @@
},
{
"BriefDescription": "Counts the number of near taken branch instructions retired.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
"PEBS": "1",
@@ -100,6 +112,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.INDIRECT",
+ "Counter": "0,1,2,3,4,5",
"Deprecated": "1",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NON_RETURN_IND",
@@ -109,6 +122,7 @@
},
{
"BriefDescription": "Counts the number of near relative CALL branch instructions retired.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.REL_CALL",
"PEBS": "1",
@@ -117,6 +131,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.NEAR_RETURN",
+ "Counter": "0,1,2,3,4,5",
"Deprecated": "1",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.RETURN",
@@ -126,6 +141,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.COND_TAKEN",
+ "Counter": "0,1,2,3,4,5",
"Deprecated": "1",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.TAKEN_JCC",
@@ -135,6 +151,7 @@
},
{
"BriefDescription": "Counts the total number of mispredicted branch instructions retired for all branch types.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -143,6 +160,7 @@
},
{
"BriefDescription": "Counts the number of mispredicted JCC (Jump on Conditional Code) branch instructions retired.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND",
"PEBS": "1",
@@ -151,6 +169,7 @@
},
{
"BriefDescription": "Counts the number of mispredicted taken JCC (Jump on Conditional Code) branch instructions retired.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_TAKEN",
"PEBS": "1",
@@ -159,6 +178,7 @@
},
{
"BriefDescription": "Counts the number of mispredicted near indirect JMP and near indirect CALL branch instructions retired.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT",
"PEBS": "1",
@@ -167,6 +187,7 @@
},
{
"BriefDescription": "Counts the number of mispredicted near indirect CALL branch instructions retired.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT_CALL",
"PEBS": "1",
@@ -175,6 +196,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event BR_MISP_RETIRED.INDIRECT_CALL",
+ "Counter": "0,1,2,3,4,5",
"Deprecated": "1",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.IND_CALL",
@@ -184,6 +206,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event BR_MISP_RETIRED.COND",
+ "Counter": "0,1,2,3,4,5",
"Deprecated": "1",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.JCC",
@@ -193,6 +216,7 @@
},
{
"BriefDescription": "Counts the number of mispredicted near taken branch instructions retired.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
"PEBS": "1",
@@ -201,6 +225,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event BR_MISP_RETIRED.INDIRECT",
+ "Counter": "0,1,2,3,4,5",
"Deprecated": "1",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.NON_RETURN_IND",
@@ -210,6 +235,7 @@
},
{
"BriefDescription": "Counts the number of mispredicted near RET branch instructions retired.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.RETURN",
"PEBS": "1",
@@ -218,6 +244,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event BR_MISP_RETIRED.COND_TAKEN",
+ "Counter": "0,1,2,3,4,5",
"Deprecated": "1",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.TAKEN_JCC",
@@ -227,6 +254,7 @@
},
{
"BriefDescription": "Counts the number of unhalted core clock cycles. (Fixed event)",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.CORE",
"PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1.",
"SampleAfterValue": "2000003",
@@ -234,6 +262,7 @@
},
{
"BriefDescription": "Counts the number of unhalted core clock cycles.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.CORE_P",
"PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses a programmable general purpose performance counter.",
@@ -241,6 +270,7 @@
},
{
"BriefDescription": "Counts the number of unhalted reference clock cycles at TSC frequency. (Fixed event)",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is not affected by core frequency changes and increments at a fixed frequency that is also used for the Time Stamp Counter (TSC). This event uses fixed counter 2.",
"SampleAfterValue": "2000003",
@@ -248,6 +278,7 @@
},
{
"BriefDescription": "Counts the number of unhalted reference clock cycles at TSC frequency.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.REF_TSC_P",
"PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is not affected by core frequency changes and increments at a fixed frequency that is also used for the Time Stamp Counter (TSC). This event uses a programmable general purpose performance counter.",
@@ -256,6 +287,7 @@
},
{
"BriefDescription": "Counts the number of unhalted core clock cycles. (Fixed event)",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1.",
"SampleAfterValue": "2000003",
@@ -263,6 +295,7 @@
},
{
"BriefDescription": "Counts the number of unhalted core clock cycles.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
"PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses a programmable general purpose performance counter.",
@@ -270,6 +303,7 @@
},
{
"BriefDescription": "Counts the total number of instructions retired. (Fixed event)",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PEBS": "1",
"PublicDescription": "Counts the total number of instructions that retired. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. This event continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0.",
@@ -278,6 +312,7 @@
},
{
"BriefDescription": "Counts the total number of instructions retired.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.ANY_P",
"PEBS": "1",
@@ -286,6 +321,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event LD_BLOCKS.ADDRESS_ALIAS",
+ "Counter": "0,1,2,3,4,5",
"Deprecated": "1",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.4K_ALIAS",
@@ -295,6 +331,7 @@
},
{
"BriefDescription": "Counts the number of retired loads that are blocked because it initially appears to be store forward blocked, but subsequently is shown not to be blocked based on 4K alias check.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.ADDRESS_ALIAS",
"PEBS": "1",
@@ -303,6 +340,7 @@
},
{
"BriefDescription": "Counts the number of retired loads that are blocked because its address exactly matches an older store whose data is not ready.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.DATA_UNKNOWN",
"PEBS": "1",
@@ -311,6 +349,7 @@
},
{
"BriefDescription": "Counts the number of machine clears due to memory ordering in which an internal load passes an older store within the same CPU.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.DISAMBIGUATION",
"SampleAfterValue": "20003",
@@ -318,6 +357,7 @@
},
{
"BriefDescription": "Counts the number of machines clears due to memory renaming.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.MRN_NUKE",
"SampleAfterValue": "1000003",
@@ -325,6 +365,7 @@
},
{
"BriefDescription": "Counts the number of machine clears due to a page fault. Counts both I-Side and D-Side (Loads/Stores) page faults. A page fault occurs when either the page is not present, or an access violation occurs.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.PAGE_FAULT",
"SampleAfterValue": "20003",
@@ -332,6 +373,7 @@
},
{
"BriefDescription": "Counts the number of machine clears that flush the pipeline and restart the machine with the use of microcode due to SMC, MEMORY_ORDERING, FP_ASSISTS, PAGE_FAULT, DISAMBIGUATION, and FPC_VIRTUAL_TRAP.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.SLOW",
"SampleAfterValue": "20003",
@@ -339,6 +381,7 @@
},
{
"BriefDescription": "Counts the number of machine clears due to program modifying data (self modifying code) within 1K of a recently fetched code page.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.SMC",
"SampleAfterValue": "20003",
@@ -346,6 +389,7 @@
},
{
"BriefDescription": "Counts the number of LBR entries recorded. Requires LBRs to be enabled in IA32_LBR_CTL. [This event is alias to LBR_INSERTS.ANY]",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xe4",
"EventName": "MISC_RETIRED.LBR_INSERTS",
"PEBS": "1",
@@ -355,6 +399,7 @@
},
{
"BriefDescription": "Counts the number of issue slots not consumed by the backend due to a micro-sequencer (MS) scoreboard, which stalls the front-end from issuing from the UROM until a specified older uop retires.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x75",
"EventName": "SERIALIZATION.NON_C01_MS_SCB",
"PublicDescription": "Counts the number of issue slots not consumed by the backend due to a micro-sequencer (MS) scoreboard, which stalls the front-end from issuing from the UROM until a specified older uop retires. The most commonly executed instruction with an MS scoreboard is PAUSE.",
@@ -363,6 +408,7 @@
},
{
"BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.ALL",
"PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window including relevant microcode flows and while uops are not yet available in the instruction queue (IQ) even if an FE_bound event occurs during this period. Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.",
@@ -370,6 +416,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to fast nukes such as memory ordering and memory disambiguation machine clears.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.FASTNUKE",
"SampleAfterValue": "1000003",
@@ -377,6 +424,7 @@
},
{
"BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a machine clear (nuke) of any kind including memory ordering and memory disambiguation.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS",
"SampleAfterValue": "1000003",
@@ -384,6 +432,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to branch mispredicts.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.MISPREDICT",
"SampleAfterValue": "1000003",
@@ -391,6 +440,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to a machine clear (nuke).",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.NUKE",
"SampleAfterValue": "1000003",
@@ -398,12 +448,14 @@
},
{
"BriefDescription": "Counts the total number of issue slots every cycle that were not consumed by the backend due to backend stalls.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.ALL",
"SampleAfterValue": "1000003"
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to certain allocation restrictions.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS",
"SampleAfterValue": "1000003",
@@ -411,6 +463,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to memory reservation stalls in which a scheduler is not able to accept uops.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.MEM_SCHEDULER",
"SampleAfterValue": "1000003",
@@ -418,6 +471,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to IEC or FPC RAT stalls, which can be due to FIQ or IEC reservation stalls in which the integer, floating point or SIMD scheduler is not able to accept uops.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER",
"SampleAfterValue": "1000003",
@@ -425,6 +479,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to the physical register file unable to accept an entry (marble stalls).",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.REGISTER",
"SampleAfterValue": "1000003",
@@ -432,6 +487,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to the reorder buffer being full (ROB stalls).",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.REORDER_BUFFER",
"SampleAfterValue": "1000003",
@@ -439,6 +495,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to scoreboards from the instruction queue (IQ), jump execution unit (JEU), or microcode sequencer (MS).",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.SERIALIZATION",
"SampleAfterValue": "1000003",
@@ -446,12 +503,14 @@
},
{
"BriefDescription": "Counts the total number of issue slots every cycle that were not consumed by the backend due to frontend stalls.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.ALL",
"SampleAfterValue": "1000003"
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BACLEARS.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.BRANCH_DETECT",
"PublicDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
@@ -460,6 +519,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BTCLEARS.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.BRANCH_RESTEER",
"PublicDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BTCLEARS, which occurs when the Branch Target Buffer (BTB) predicts a taken branch.",
@@ -468,6 +528,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to the microcode sequencer (MS).",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.CISC",
"SampleAfterValue": "1000003",
@@ -475,6 +536,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to decode stalls.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.DECODE",
"SampleAfterValue": "1000003",
@@ -482,6 +544,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.FRONTEND_BANDWIDTH",
"SampleAfterValue": "1000003",
@@ -489,6 +552,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to a latency related stalls including BACLEARs, BTCLEARs, ITLB misses, and ICache misses.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.FRONTEND_LATENCY",
"SampleAfterValue": "1000003",
@@ -496,6 +560,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to ITLB misses.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.ITLB",
"PublicDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to Instruction Table Lookaside Buffer (ITLB) misses.",
@@ -504,6 +569,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to other common frontend stalls not categorized.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.OTHER",
"SampleAfterValue": "1000003",
@@ -511,6 +577,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to wrong predecodes.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.PREDECODE",
"SampleAfterValue": "1000003",
@@ -518,13 +585,23 @@
},
{
"BriefDescription": "Counts the total number of consumed retirement slots.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc2",
"EventName": "TOPDOWN_RETIRING.ALL",
"PEBS": "1",
"SampleAfterValue": "1000003"
},
{
+ "BriefDescription": "Counts the number of uops issued by the front end every cycle.",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0x0e",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PublicDescription": "Counts the number of uops issued by the front end every cycle. When 4-uops are requested and only 2-uops are delivered, the event counts 2. Uops_issued correlates to the number of ROB entries. If uop takes 2 ROB slots it counts as 2 uops_issued.",
+ "SampleAfterValue": "200003"
+ },
+ {
"BriefDescription": "Counts the total number of uops retired.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.ALL",
"PEBS": "1",
@@ -532,6 +609,7 @@
},
{
"BriefDescription": "Counts the number of integer divide uops retired.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.IDIV",
"PEBS": "1",
@@ -540,6 +618,7 @@
},
{
"BriefDescription": "Counts the number of uops that are from complex flows issued by the micro-sequencer (MS).",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.MS",
"PEBS": "1",
@@ -549,6 +628,7 @@
},
{
"BriefDescription": "Counts the number of x87 uops retired, includes those in MS flows.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.X87",
"PEBS": "1",
diff --git a/tools/perf/pmu-events/arch/x86/alderlaken/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/alderlaken/uncore-interconnect.json
index 8bf020a9dfa8..7c0779c74154 100644
--- a/tools/perf/pmu-events/arch/x86/alderlaken/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/alderlaken/uncore-interconnect.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Number of requests allocated in Coherency Tracker.",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_ARB_COH_TRK_REQUESTS.ALL",
"PerPkg": "1",
@@ -9,56 +10,69 @@
},
{
"BriefDescription": "Each cycle counts number of any coherent request at memory controller that were issued by any core.",
+ "Counter": "0",
"EventCode": "0x85",
"EventName": "UNC_ARB_DAT_OCCUPANCY.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "ARB"
},
{
"BriefDescription": "Each cycle counts number of coherent reads pending on data return from memory controller that were issued by any core.",
+ "Counter": "0",
"EventCode": "0x85",
"EventName": "UNC_ARB_DAT_OCCUPANCY.RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "ARB"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_ARB_REQ_TRK_REQUEST.DRD",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x81",
"EventName": "UNC_ARB_DAT_REQUESTS.RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "ARB"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_ARB_DAT_OCCUPANCY.ALL",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x85",
"EventName": "UNC_ARB_IFA_OCCUPANCY.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "ARB"
},
{
"BriefDescription": "Each cycle count number of 'valid' coherent Data Read entries . Such entry is defined as valid when it is allocated till deallocation. Doesn't include prefetches [This event is alias to UNC_ARB_TRK_OCCUPANCY.RD]",
+ "Counter": "0",
"EventCode": "0x80",
"EventName": "UNC_ARB_REQ_TRK_OCCUPANCY.DRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "ARB"
},
{
"BriefDescription": "Number of all coherent Data Read entries. Doesn't include prefetches [This event is alias to UNC_ARB_TRK_REQUESTS.RD]",
+ "Counter": "0,1",
"EventCode": "0x81",
"EventName": "UNC_ARB_REQ_TRK_REQUEST.DRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "ARB"
},
{
"BriefDescription": "Each cycle counts number of all outgoing valid entries in ReqTrk. Such entry is defined as valid from its allocation in ReqTrk till deallocation. Accounts for Coherent and non-coherent traffic.",
+ "Counter": "0",
"EventCode": "0x80",
"EventName": "UNC_ARB_TRK_OCCUPANCY.ALL",
"PerPkg": "1",
@@ -67,14 +81,17 @@
},
{
"BriefDescription": "Each cycle count number of 'valid' coherent Data Read entries . Such entry is defined as valid when it is allocated till deallocation. Doesn't include prefetches [This event is alias to UNC_ARB_REQ_TRK_OCCUPANCY.DRD]",
+ "Counter": "0",
"EventCode": "0x80",
"EventName": "UNC_ARB_TRK_OCCUPANCY.RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "ARB"
},
{
"BriefDescription": "Counts the number of coherent and in-coherent requests initiated by IA cores, processor graphic units, or LLC.",
+ "Counter": "0,1",
"EventCode": "0x81",
"EventName": "UNC_ARB_TRK_REQUESTS.ALL",
"PerPkg": "1",
@@ -83,8 +100,10 @@
},
{
"BriefDescription": "Number of all coherent Data Read entries. Doesn't include prefetches [This event is alias to UNC_ARB_REQ_TRK_REQUEST.DRD]",
+ "Counter": "0,1",
"EventCode": "0x81",
"EventName": "UNC_ARB_TRK_REQUESTS.RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "ARB"
diff --git a/tools/perf/pmu-events/arch/x86/alderlaken/uncore-memory.json b/tools/perf/pmu-events/arch/x86/alderlaken/uncore-memory.json
index 163d7e7755c4..bcf275cd592a 100644
--- a/tools/perf/pmu-events/arch/x86/alderlaken/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/alderlaken/uncore-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts every 64B read request entering the Memory Controller 0 to DRAM (sum of all channels).",
+ "Counter": "0",
"EventCode": "0xff",
"EventName": "UNC_MC0_RDCAS_COUNT_FREERUN",
"PerPkg": "1",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "Counts every 64B write request entering the Memory Controller 0 to DRAM (sum of all channels). Each write request counts as a new request incrementing this counter. However, same cache line write requests (both full and partial) are combined to a single 64 byte data transfer to DRAM.",
+ "Counter": "1",
"EventCode": "0xff",
"EventName": "UNC_MC0_WRCAS_COUNT_FREERUN",
"PerPkg": "1",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "Counts every 64B read request entering the Memory Controller 1 to DRAM (sum of all channels).",
+ "Counter": "3",
"EventCode": "0xff",
"EventName": "UNC_MC1_RDCAS_COUNT_FREERUN",
"PerPkg": "1",
@@ -27,6 +30,7 @@
},
{
"BriefDescription": "Counts every 64B write request entering the Memory Controller 1 to DRAM (sum of all channels). Each write request counts as a new request incrementing this counter. However, same cache line write requests (both full and partial) are combined to a single 64 byte data transfer to DRAM.",
+ "Counter": "4",
"EventCode": "0xff",
"EventName": "UNC_MC1_WRCAS_COUNT_FREERUN",
"PerPkg": "1",
@@ -35,6 +39,7 @@
},
{
"BriefDescription": "ACT command for a read request sent to DRAM",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x24",
"EventName": "UNC_M_ACT_COUNT_RD",
"PerPkg": "1",
@@ -42,6 +47,7 @@
},
{
"BriefDescription": "ACT command sent to DRAM",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x26",
"EventName": "UNC_M_ACT_COUNT_TOTAL",
"PerPkg": "1",
@@ -49,6 +55,7 @@
},
{
"BriefDescription": "ACT command for a write request sent to DRAM",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x25",
"EventName": "UNC_M_ACT_COUNT_WR",
"PerPkg": "1",
@@ -56,6 +63,7 @@
},
{
"BriefDescription": "Read CAS command sent to DRAM",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x22",
"EventName": "UNC_M_CAS_COUNT_RD",
"PerPkg": "1",
@@ -63,6 +71,7 @@
},
{
"BriefDescription": "Write CAS command sent to DRAM",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x23",
"EventName": "UNC_M_CAS_COUNT_WR",
"PerPkg": "1",
@@ -70,6 +79,7 @@
},
{
"BriefDescription": "Number of clocks",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x01",
"EventName": "UNC_M_CLOCKTICKS",
"PerPkg": "1",
@@ -77,6 +87,7 @@
},
{
"BriefDescription": "incoming read request page status is Page Empty",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x1D",
"EventName": "UNC_M_DRAM_PAGE_EMPTY_RD",
"PerPkg": "1",
@@ -84,6 +95,7 @@
},
{
"BriefDescription": "incoming write request page status is Page Empty",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x20",
"EventName": "UNC_M_DRAM_PAGE_EMPTY_WR",
"PerPkg": "1",
@@ -91,6 +103,7 @@
},
{
"BriefDescription": "incoming read request page status is Page Hit",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x1C",
"EventName": "UNC_M_DRAM_PAGE_HIT_RD",
"PerPkg": "1",
@@ -98,6 +111,7 @@
},
{
"BriefDescription": "incoming write request page status is Page Hit",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x1F",
"EventName": "UNC_M_DRAM_PAGE_HIT_WR",
"PerPkg": "1",
@@ -105,6 +119,7 @@
},
{
"BriefDescription": "incoming read request page status is Page Miss",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x1E",
"EventName": "UNC_M_DRAM_PAGE_MISS_RD",
"PerPkg": "1",
@@ -112,6 +127,7 @@
},
{
"BriefDescription": "incoming write request page status is Page Miss",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x21",
"EventName": "UNC_M_DRAM_PAGE_MISS_WR",
"PerPkg": "1",
@@ -119,6 +135,7 @@
},
{
"BriefDescription": "Any Rank at Hot state",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x19",
"EventName": "UNC_M_DRAM_THERMAL_HOT",
"PerPkg": "1",
@@ -126,6 +143,7 @@
},
{
"BriefDescription": "Any Rank at Warm state",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x1A",
"EventName": "UNC_M_DRAM_THERMAL_WARM",
"PerPkg": "1",
@@ -133,6 +151,7 @@
},
{
"BriefDescription": "Incoming read prefetch request from IA.",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x0A",
"EventName": "UNC_M_PREFETCH_RD",
"PerPkg": "1",
@@ -140,6 +159,7 @@
},
{
"BriefDescription": "PRE command sent to DRAM due to page table idle timer expiration",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x28",
"EventName": "UNC_M_PRE_COUNT_IDLE",
"PerPkg": "1",
@@ -147,6 +167,7 @@
},
{
"BriefDescription": "PRE command sent to DRAM for a read/write request",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x27",
"EventName": "UNC_M_PRE_COUNT_PAGE_MISS",
"PerPkg": "1",
@@ -154,6 +175,7 @@
},
{
"BriefDescription": "Incoming VC0 read request",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x02",
"EventName": "UNC_M_VC0_REQUESTS_RD",
"PerPkg": "1",
@@ -161,6 +183,7 @@
},
{
"BriefDescription": "Incoming VC0 write request",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x03",
"EventName": "UNC_M_VC0_REQUESTS_WR",
"PerPkg": "1",
@@ -168,6 +191,7 @@
},
{
"BriefDescription": "Incoming VC1 read request",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x04",
"EventName": "UNC_M_VC1_REQUESTS_RD",
"PerPkg": "1",
@@ -175,6 +199,7 @@
},
{
"BriefDescription": "Incoming VC1 write request",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x05",
"EventName": "UNC_M_VC1_REQUESTS_WR",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/alderlaken/uncore-other.json b/tools/perf/pmu-events/arch/x86/alderlaken/uncore-other.json
index 2af92e43b28a..1ac5b5ef8094 100644
--- a/tools/perf/pmu-events/arch/x86/alderlaken/uncore-other.json
+++ b/tools/perf/pmu-events/arch/x86/alderlaken/uncore-other.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "This 48-bit fixed counter counts the UCLK cycles.",
+ "Counter": "FIXED",
"EventCode": "0xff",
"EventName": "UNC_CLOCK.SOCKET",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/alderlaken/virtual-memory.json b/tools/perf/pmu-events/arch/x86/alderlaken/virtual-memory.json
index 67fd640f790e..ad2b1349bab4 100644
--- a/tools/perf/pmu-events/arch/x86/alderlaken/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/alderlaken/virtual-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of page walks completed due to load DTLB misses to any page size.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to store DTLB misses to any page size.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Counts the number of page walks initiated by a instruction fetch that missed the first and second level TLBs.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.MISS_CAUSED_WALK",
"SampleAfterValue": "1000003",
@@ -24,6 +27,7 @@
},
{
"BriefDescription": "Counts the number of page walks due to an instruction fetch that miss the PDE (Page Directory Entry) cache.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.PDE_CACHE_MISS",
"SampleAfterValue": "2000003",
@@ -31,6 +35,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to any page size.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
@@ -39,6 +44,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a DTLB miss.",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x05",
"EventName": "LD_HEAD.DTLB_MISS_AT_RET",
"SampleAfterValue": "1000003",
diff --git a/tools/perf/pmu-events/arch/x86/bonnell/cache.json b/tools/perf/pmu-events/arch/x86/bonnell/cache.json
index 1ca95a70d48a..86582bb8aa39 100644
--- a/tools/perf/pmu-events/arch/x86/bonnell/cache.json
+++ b/tools/perf/pmu-events/arch/x86/bonnell/cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "L1 Data Cacheable reads and writes",
+ "Counter": "0,1",
"EventCode": "0x40",
"EventName": "L1D_CACHE.ALL_CACHE_REF",
"SampleAfterValue": "2000000",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "L1 Data reads and writes",
+ "Counter": "0,1",
"EventCode": "0x40",
"EventName": "L1D_CACHE.ALL_REF",
"SampleAfterValue": "2000000",
@@ -15,6 +17,7 @@
},
{
"BriefDescription": "Modified cache lines evicted from the L1 data cache",
+ "Counter": "0,1",
"EventCode": "0x40",
"EventName": "L1D_CACHE.EVICT",
"SampleAfterValue": "200000",
@@ -22,6 +25,7 @@
},
{
"BriefDescription": "L1 Cacheable Data Reads",
+ "Counter": "0,1",
"EventCode": "0x40",
"EventName": "L1D_CACHE.LD",
"SampleAfterValue": "2000000",
@@ -29,6 +33,7 @@
},
{
"BriefDescription": "L1 Data line replacements",
+ "Counter": "0,1",
"EventCode": "0x40",
"EventName": "L1D_CACHE.REPL",
"SampleAfterValue": "200000",
@@ -36,6 +41,7 @@
},
{
"BriefDescription": "Modified cache lines allocated in the L1 data cache",
+ "Counter": "0,1",
"EventCode": "0x40",
"EventName": "L1D_CACHE.REPLM",
"SampleAfterValue": "200000",
@@ -43,6 +49,7 @@
},
{
"BriefDescription": "L1 Cacheable Data Writes",
+ "Counter": "0,1",
"EventCode": "0x40",
"EventName": "L1D_CACHE.ST",
"SampleAfterValue": "2000000",
@@ -50,6 +57,7 @@
},
{
"BriefDescription": "Cycles L2 address bus is in use.",
+ "Counter": "0,1",
"EventCode": "0x21",
"EventName": "L2_ADS.SELF",
"SampleAfterValue": "200000",
@@ -57,6 +65,7 @@
},
{
"BriefDescription": "All data requests from the L1 data cache",
+ "Counter": "0,1",
"EventCode": "0x2C",
"EventName": "L2_DATA_RQSTS.SELF.E_STATE",
"SampleAfterValue": "200000",
@@ -64,6 +73,7 @@
},
{
"BriefDescription": "All data requests from the L1 data cache",
+ "Counter": "0,1",
"EventCode": "0x2C",
"EventName": "L2_DATA_RQSTS.SELF.I_STATE",
"SampleAfterValue": "200000",
@@ -71,6 +81,7 @@
},
{
"BriefDescription": "All data requests from the L1 data cache",
+ "Counter": "0,1",
"EventCode": "0x2C",
"EventName": "L2_DATA_RQSTS.SELF.MESI",
"SampleAfterValue": "200000",
@@ -78,6 +89,7 @@
},
{
"BriefDescription": "All data requests from the L1 data cache",
+ "Counter": "0,1",
"EventCode": "0x2C",
"EventName": "L2_DATA_RQSTS.SELF.M_STATE",
"SampleAfterValue": "200000",
@@ -85,6 +97,7 @@
},
{
"BriefDescription": "All data requests from the L1 data cache",
+ "Counter": "0,1",
"EventCode": "0x2C",
"EventName": "L2_DATA_RQSTS.SELF.S_STATE",
"SampleAfterValue": "200000",
@@ -92,6 +105,7 @@
},
{
"BriefDescription": "Cycles the L2 cache data bus is busy.",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "L2_DBUS_BUSY.SELF",
"SampleAfterValue": "200000",
@@ -99,6 +113,7 @@
},
{
"BriefDescription": "Cycles the L2 transfers data to the core.",
+ "Counter": "0,1",
"EventCode": "0x23",
"EventName": "L2_DBUS_BUSY_RD.SELF",
"SampleAfterValue": "200000",
@@ -106,6 +121,7 @@
},
{
"BriefDescription": "L2 cacheable instruction fetch requests",
+ "Counter": "0,1",
"EventCode": "0x28",
"EventName": "L2_IFETCH.SELF.E_STATE",
"SampleAfterValue": "200000",
@@ -113,6 +129,7 @@
},
{
"BriefDescription": "L2 cacheable instruction fetch requests",
+ "Counter": "0,1",
"EventCode": "0x28",
"EventName": "L2_IFETCH.SELF.I_STATE",
"SampleAfterValue": "200000",
@@ -120,6 +137,7 @@
},
{
"BriefDescription": "L2 cacheable instruction fetch requests",
+ "Counter": "0,1",
"EventCode": "0x28",
"EventName": "L2_IFETCH.SELF.MESI",
"SampleAfterValue": "200000",
@@ -127,6 +145,7 @@
},
{
"BriefDescription": "L2 cacheable instruction fetch requests",
+ "Counter": "0,1",
"EventCode": "0x28",
"EventName": "L2_IFETCH.SELF.M_STATE",
"SampleAfterValue": "200000",
@@ -134,6 +153,7 @@
},
{
"BriefDescription": "L2 cacheable instruction fetch requests",
+ "Counter": "0,1",
"EventCode": "0x28",
"EventName": "L2_IFETCH.SELF.S_STATE",
"SampleAfterValue": "200000",
@@ -141,6 +161,7 @@
},
{
"BriefDescription": "L2 cache reads",
+ "Counter": "0,1",
"EventCode": "0x29",
"EventName": "L2_LD.SELF.ANY.E_STATE",
"SampleAfterValue": "200000",
@@ -148,6 +169,7 @@
},
{
"BriefDescription": "L2 cache reads",
+ "Counter": "0,1",
"EventCode": "0x29",
"EventName": "L2_LD.SELF.ANY.I_STATE",
"SampleAfterValue": "200000",
@@ -155,6 +177,7 @@
},
{
"BriefDescription": "L2 cache reads",
+ "Counter": "0,1",
"EventCode": "0x29",
"EventName": "L2_LD.SELF.ANY.MESI",
"SampleAfterValue": "200000",
@@ -162,6 +185,7 @@
},
{
"BriefDescription": "L2 cache reads",
+ "Counter": "0,1",
"EventCode": "0x29",
"EventName": "L2_LD.SELF.ANY.M_STATE",
"SampleAfterValue": "200000",
@@ -169,6 +193,7 @@
},
{
"BriefDescription": "L2 cache reads",
+ "Counter": "0,1",
"EventCode": "0x29",
"EventName": "L2_LD.SELF.ANY.S_STATE",
"SampleAfterValue": "200000",
@@ -176,6 +201,7 @@
},
{
"BriefDescription": "L2 cache reads",
+ "Counter": "0,1",
"EventCode": "0x29",
"EventName": "L2_LD.SELF.DEMAND.E_STATE",
"SampleAfterValue": "200000",
@@ -183,6 +209,7 @@
},
{
"BriefDescription": "L2 cache reads",
+ "Counter": "0,1",
"EventCode": "0x29",
"EventName": "L2_LD.SELF.DEMAND.I_STATE",
"SampleAfterValue": "200000",
@@ -190,6 +217,7 @@
},
{
"BriefDescription": "L2 cache reads",
+ "Counter": "0,1",
"EventCode": "0x29",
"EventName": "L2_LD.SELF.DEMAND.MESI",
"SampleAfterValue": "200000",
@@ -197,6 +225,7 @@
},
{
"BriefDescription": "L2 cache reads",
+ "Counter": "0,1",
"EventCode": "0x29",
"EventName": "L2_LD.SELF.DEMAND.M_STATE",
"SampleAfterValue": "200000",
@@ -204,6 +233,7 @@
},
{
"BriefDescription": "L2 cache reads",
+ "Counter": "0,1",
"EventCode": "0x29",
"EventName": "L2_LD.SELF.DEMAND.S_STATE",
"SampleAfterValue": "200000",
@@ -211,6 +241,7 @@
},
{
"BriefDescription": "L2 cache reads",
+ "Counter": "0,1",
"EventCode": "0x29",
"EventName": "L2_LD.SELF.PREFETCH.E_STATE",
"SampleAfterValue": "200000",
@@ -218,6 +249,7 @@
},
{
"BriefDescription": "L2 cache reads",
+ "Counter": "0,1",
"EventCode": "0x29",
"EventName": "L2_LD.SELF.PREFETCH.I_STATE",
"SampleAfterValue": "200000",
@@ -225,6 +257,7 @@
},
{
"BriefDescription": "L2 cache reads",
+ "Counter": "0,1",
"EventCode": "0x29",
"EventName": "L2_LD.SELF.PREFETCH.MESI",
"SampleAfterValue": "200000",
@@ -232,6 +265,7 @@
},
{
"BriefDescription": "L2 cache reads",
+ "Counter": "0,1",
"EventCode": "0x29",
"EventName": "L2_LD.SELF.PREFETCH.M_STATE",
"SampleAfterValue": "200000",
@@ -239,6 +273,7 @@
},
{
"BriefDescription": "L2 cache reads",
+ "Counter": "0,1",
"EventCode": "0x29",
"EventName": "L2_LD.SELF.PREFETCH.S_STATE",
"SampleAfterValue": "200000",
@@ -246,6 +281,7 @@
},
{
"BriefDescription": "All read requests from L1 instruction and data caches",
+ "Counter": "0,1",
"EventCode": "0x2D",
"EventName": "L2_LD_IFETCH.SELF.E_STATE",
"SampleAfterValue": "200000",
@@ -253,6 +289,7 @@
},
{
"BriefDescription": "All read requests from L1 instruction and data caches",
+ "Counter": "0,1",
"EventCode": "0x2D",
"EventName": "L2_LD_IFETCH.SELF.I_STATE",
"SampleAfterValue": "200000",
@@ -260,6 +297,7 @@
},
{
"BriefDescription": "All read requests from L1 instruction and data caches",
+ "Counter": "0,1",
"EventCode": "0x2D",
"EventName": "L2_LD_IFETCH.SELF.MESI",
"SampleAfterValue": "200000",
@@ -267,6 +305,7 @@
},
{
"BriefDescription": "All read requests from L1 instruction and data caches",
+ "Counter": "0,1",
"EventCode": "0x2D",
"EventName": "L2_LD_IFETCH.SELF.M_STATE",
"SampleAfterValue": "200000",
@@ -274,6 +313,7 @@
},
{
"BriefDescription": "All read requests from L1 instruction and data caches",
+ "Counter": "0,1",
"EventCode": "0x2D",
"EventName": "L2_LD_IFETCH.SELF.S_STATE",
"SampleAfterValue": "200000",
@@ -281,6 +321,7 @@
},
{
"BriefDescription": "L2 cache misses.",
+ "Counter": "0,1",
"EventCode": "0x24",
"EventName": "L2_LINES_IN.SELF.ANY",
"SampleAfterValue": "200000",
@@ -288,6 +329,7 @@
},
{
"BriefDescription": "L2 cache misses.",
+ "Counter": "0,1",
"EventCode": "0x24",
"EventName": "L2_LINES_IN.SELF.DEMAND",
"SampleAfterValue": "200000",
@@ -295,6 +337,7 @@
},
{
"BriefDescription": "L2 cache misses.",
+ "Counter": "0,1",
"EventCode": "0x24",
"EventName": "L2_LINES_IN.SELF.PREFETCH",
"SampleAfterValue": "200000",
@@ -302,6 +345,7 @@
},
{
"BriefDescription": "L2 cache lines evicted.",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "L2_LINES_OUT.SELF.ANY",
"SampleAfterValue": "200000",
@@ -309,6 +353,7 @@
},
{
"BriefDescription": "L2 cache lines evicted.",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "L2_LINES_OUT.SELF.DEMAND",
"SampleAfterValue": "200000",
@@ -316,6 +361,7 @@
},
{
"BriefDescription": "L2 cache lines evicted.",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "L2_LINES_OUT.SELF.PREFETCH",
"SampleAfterValue": "200000",
@@ -323,6 +369,7 @@
},
{
"BriefDescription": "L2 locked accesses",
+ "Counter": "0,1",
"EventCode": "0x2B",
"EventName": "L2_LOCK.SELF.E_STATE",
"SampleAfterValue": "200000",
@@ -330,6 +377,7 @@
},
{
"BriefDescription": "L2 locked accesses",
+ "Counter": "0,1",
"EventCode": "0x2B",
"EventName": "L2_LOCK.SELF.I_STATE",
"SampleAfterValue": "200000",
@@ -337,6 +385,7 @@
},
{
"BriefDescription": "L2 locked accesses",
+ "Counter": "0,1",
"EventCode": "0x2B",
"EventName": "L2_LOCK.SELF.MESI",
"SampleAfterValue": "200000",
@@ -344,6 +393,7 @@
},
{
"BriefDescription": "L2 locked accesses",
+ "Counter": "0,1",
"EventCode": "0x2B",
"EventName": "L2_LOCK.SELF.M_STATE",
"SampleAfterValue": "200000",
@@ -351,6 +401,7 @@
},
{
"BriefDescription": "L2 locked accesses",
+ "Counter": "0,1",
"EventCode": "0x2B",
"EventName": "L2_LOCK.SELF.S_STATE",
"SampleAfterValue": "200000",
@@ -358,6 +409,7 @@
},
{
"BriefDescription": "L2 cache line modifications.",
+ "Counter": "0,1",
"EventCode": "0x25",
"EventName": "L2_M_LINES_IN.SELF",
"SampleAfterValue": "200000",
@@ -365,6 +417,7 @@
},
{
"BriefDescription": "Modified lines evicted from the L2 cache",
+ "Counter": "0,1",
"EventCode": "0x27",
"EventName": "L2_M_LINES_OUT.SELF.ANY",
"SampleAfterValue": "200000",
@@ -372,6 +425,7 @@
},
{
"BriefDescription": "Modified lines evicted from the L2 cache",
+ "Counter": "0,1",
"EventCode": "0x27",
"EventName": "L2_M_LINES_OUT.SELF.DEMAND",
"SampleAfterValue": "200000",
@@ -379,6 +433,7 @@
},
{
"BriefDescription": "Modified lines evicted from the L2 cache",
+ "Counter": "0,1",
"EventCode": "0x27",
"EventName": "L2_M_LINES_OUT.SELF.PREFETCH",
"SampleAfterValue": "200000",
@@ -386,6 +441,7 @@
},
{
"BriefDescription": "Cycles no L2 cache requests are pending",
+ "Counter": "0,1",
"EventCode": "0x32",
"EventName": "L2_NO_REQ.SELF",
"SampleAfterValue": "200000",
@@ -393,6 +449,7 @@
},
{
"BriefDescription": "Rejected L2 cache requests",
+ "Counter": "0,1",
"EventCode": "0x30",
"EventName": "L2_REJECT_BUSQ.SELF.ANY.E_STATE",
"SampleAfterValue": "200000",
@@ -400,6 +457,7 @@
},
{
"BriefDescription": "Rejected L2 cache requests",
+ "Counter": "0,1",
"EventCode": "0x30",
"EventName": "L2_REJECT_BUSQ.SELF.ANY.I_STATE",
"SampleAfterValue": "200000",
@@ -407,6 +465,7 @@
},
{
"BriefDescription": "Rejected L2 cache requests",
+ "Counter": "0,1",
"EventCode": "0x30",
"EventName": "L2_REJECT_BUSQ.SELF.ANY.MESI",
"SampleAfterValue": "200000",
@@ -414,6 +473,7 @@
},
{
"BriefDescription": "Rejected L2 cache requests",
+ "Counter": "0,1",
"EventCode": "0x30",
"EventName": "L2_REJECT_BUSQ.SELF.ANY.M_STATE",
"SampleAfterValue": "200000",
@@ -421,6 +481,7 @@
},
{
"BriefDescription": "Rejected L2 cache requests",
+ "Counter": "0,1",
"EventCode": "0x30",
"EventName": "L2_REJECT_BUSQ.SELF.ANY.S_STATE",
"SampleAfterValue": "200000",
@@ -428,6 +489,7 @@
},
{
"BriefDescription": "Rejected L2 cache requests",
+ "Counter": "0,1",
"EventCode": "0x30",
"EventName": "L2_REJECT_BUSQ.SELF.DEMAND.E_STATE",
"SampleAfterValue": "200000",
@@ -435,6 +497,7 @@
},
{
"BriefDescription": "Rejected L2 cache requests",
+ "Counter": "0,1",
"EventCode": "0x30",
"EventName": "L2_REJECT_BUSQ.SELF.DEMAND.I_STATE",
"SampleAfterValue": "200000",
@@ -442,6 +505,7 @@
},
{
"BriefDescription": "Rejected L2 cache requests",
+ "Counter": "0,1",
"EventCode": "0x30",
"EventName": "L2_REJECT_BUSQ.SELF.DEMAND.MESI",
"SampleAfterValue": "200000",
@@ -449,6 +513,7 @@
},
{
"BriefDescription": "Rejected L2 cache requests",
+ "Counter": "0,1",
"EventCode": "0x30",
"EventName": "L2_REJECT_BUSQ.SELF.DEMAND.M_STATE",
"SampleAfterValue": "200000",
@@ -456,6 +521,7 @@
},
{
"BriefDescription": "Rejected L2 cache requests",
+ "Counter": "0,1",
"EventCode": "0x30",
"EventName": "L2_REJECT_BUSQ.SELF.DEMAND.S_STATE",
"SampleAfterValue": "200000",
@@ -463,6 +529,7 @@
},
{
"BriefDescription": "Rejected L2 cache requests",
+ "Counter": "0,1",
"EventCode": "0x30",
"EventName": "L2_REJECT_BUSQ.SELF.PREFETCH.E_STATE",
"SampleAfterValue": "200000",
@@ -470,6 +537,7 @@
},
{
"BriefDescription": "Rejected L2 cache requests",
+ "Counter": "0,1",
"EventCode": "0x30",
"EventName": "L2_REJECT_BUSQ.SELF.PREFETCH.I_STATE",
"SampleAfterValue": "200000",
@@ -477,6 +545,7 @@
},
{
"BriefDescription": "Rejected L2 cache requests",
+ "Counter": "0,1",
"EventCode": "0x30",
"EventName": "L2_REJECT_BUSQ.SELF.PREFETCH.MESI",
"SampleAfterValue": "200000",
@@ -484,6 +553,7 @@
},
{
"BriefDescription": "Rejected L2 cache requests",
+ "Counter": "0,1",
"EventCode": "0x30",
"EventName": "L2_REJECT_BUSQ.SELF.PREFETCH.M_STATE",
"SampleAfterValue": "200000",
@@ -491,6 +561,7 @@
},
{
"BriefDescription": "Rejected L2 cache requests",
+ "Counter": "0,1",
"EventCode": "0x30",
"EventName": "L2_REJECT_BUSQ.SELF.PREFETCH.S_STATE",
"SampleAfterValue": "200000",
@@ -498,6 +569,7 @@
},
{
"BriefDescription": "L2 cache requests",
+ "Counter": "0,1",
"EventCode": "0x2E",
"EventName": "L2_RQSTS.SELF.ANY.E_STATE",
"SampleAfterValue": "200000",
@@ -505,6 +577,7 @@
},
{
"BriefDescription": "L2 cache requests",
+ "Counter": "0,1",
"EventCode": "0x2E",
"EventName": "L2_RQSTS.SELF.ANY.I_STATE",
"SampleAfterValue": "200000",
@@ -512,6 +585,7 @@
},
{
"BriefDescription": "L2 cache requests",
+ "Counter": "0,1",
"EventCode": "0x2E",
"EventName": "L2_RQSTS.SELF.ANY.MESI",
"SampleAfterValue": "200000",
@@ -519,6 +593,7 @@
},
{
"BriefDescription": "L2 cache requests",
+ "Counter": "0,1",
"EventCode": "0x2E",
"EventName": "L2_RQSTS.SELF.ANY.M_STATE",
"SampleAfterValue": "200000",
@@ -526,6 +601,7 @@
},
{
"BriefDescription": "L2 cache requests",
+ "Counter": "0,1",
"EventCode": "0x2E",
"EventName": "L2_RQSTS.SELF.ANY.S_STATE",
"SampleAfterValue": "200000",
@@ -533,6 +609,7 @@
},
{
"BriefDescription": "L2 cache requests",
+ "Counter": "0,1",
"EventCode": "0x2E",
"EventName": "L2_RQSTS.SELF.DEMAND.E_STATE",
"SampleAfterValue": "200000",
@@ -540,6 +617,7 @@
},
{
"BriefDescription": "L2 cache demand requests from this core that missed the L2",
+ "Counter": "0,1",
"EventCode": "0x2E",
"EventName": "L2_RQSTS.SELF.DEMAND.I_STATE",
"SampleAfterValue": "200000",
@@ -547,6 +625,7 @@
},
{
"BriefDescription": "L2 cache demand requests from this core",
+ "Counter": "0,1",
"EventCode": "0x2E",
"EventName": "L2_RQSTS.SELF.DEMAND.MESI",
"SampleAfterValue": "200000",
@@ -554,6 +633,7 @@
},
{
"BriefDescription": "L2 cache requests",
+ "Counter": "0,1",
"EventCode": "0x2E",
"EventName": "L2_RQSTS.SELF.DEMAND.M_STATE",
"SampleAfterValue": "200000",
@@ -561,6 +641,7 @@
},
{
"BriefDescription": "L2 cache requests",
+ "Counter": "0,1",
"EventCode": "0x2E",
"EventName": "L2_RQSTS.SELF.DEMAND.S_STATE",
"SampleAfterValue": "200000",
@@ -568,6 +649,7 @@
},
{
"BriefDescription": "L2 cache requests",
+ "Counter": "0,1",
"EventCode": "0x2E",
"EventName": "L2_RQSTS.SELF.PREFETCH.E_STATE",
"SampleAfterValue": "200000",
@@ -575,6 +657,7 @@
},
{
"BriefDescription": "L2 cache requests",
+ "Counter": "0,1",
"EventCode": "0x2E",
"EventName": "L2_RQSTS.SELF.PREFETCH.I_STATE",
"SampleAfterValue": "200000",
@@ -582,6 +665,7 @@
},
{
"BriefDescription": "L2 cache requests",
+ "Counter": "0,1",
"EventCode": "0x2E",
"EventName": "L2_RQSTS.SELF.PREFETCH.MESI",
"SampleAfterValue": "200000",
@@ -589,6 +673,7 @@
},
{
"BriefDescription": "L2 cache requests",
+ "Counter": "0,1",
"EventCode": "0x2E",
"EventName": "L2_RQSTS.SELF.PREFETCH.M_STATE",
"SampleAfterValue": "200000",
@@ -596,6 +681,7 @@
},
{
"BriefDescription": "L2 cache requests",
+ "Counter": "0,1",
"EventCode": "0x2E",
"EventName": "L2_RQSTS.SELF.PREFETCH.S_STATE",
"SampleAfterValue": "200000",
@@ -603,6 +689,7 @@
},
{
"BriefDescription": "L2 store requests",
+ "Counter": "0,1",
"EventCode": "0x2A",
"EventName": "L2_ST.SELF.E_STATE",
"SampleAfterValue": "200000",
@@ -610,6 +697,7 @@
},
{
"BriefDescription": "L2 store requests",
+ "Counter": "0,1",
"EventCode": "0x2A",
"EventName": "L2_ST.SELF.I_STATE",
"SampleAfterValue": "200000",
@@ -617,6 +705,7 @@
},
{
"BriefDescription": "L2 store requests",
+ "Counter": "0,1",
"EventCode": "0x2A",
"EventName": "L2_ST.SELF.MESI",
"SampleAfterValue": "200000",
@@ -624,6 +713,7 @@
},
{
"BriefDescription": "L2 store requests",
+ "Counter": "0,1",
"EventCode": "0x2A",
"EventName": "L2_ST.SELF.M_STATE",
"SampleAfterValue": "200000",
@@ -631,6 +721,7 @@
},
{
"BriefDescription": "L2 store requests",
+ "Counter": "0,1",
"EventCode": "0x2A",
"EventName": "L2_ST.SELF.S_STATE",
"SampleAfterValue": "200000",
@@ -638,6 +729,7 @@
},
{
"BriefDescription": "Retired loads that hit the L2 cache (precise event).",
+ "Counter": "0,1",
"EventCode": "0xCB",
"EventName": "MEM_LOAD_RETIRED.L2_HIT",
"SampleAfterValue": "200000",
@@ -645,6 +737,7 @@
},
{
"BriefDescription": "Retired loads that miss the L2 cache",
+ "Counter": "0,1",
"EventCode": "0xCB",
"EventName": "MEM_LOAD_RETIRED.L2_MISS",
"SampleAfterValue": "10000",
diff --git a/tools/perf/pmu-events/arch/x86/bonnell/counter.json b/tools/perf/pmu-events/arch/x86/bonnell/counter.json
new file mode 100644
index 000000000000..eb89b55f31bd
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/bonnell/counter.json
@@ -0,0 +1,7 @@
+[
+ {
+ "Unit": "core",
+ "CountersNumFixed": "4",
+ "CountersNumGeneric": "2"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/bonnell/floating-point.json b/tools/perf/pmu-events/arch/x86/bonnell/floating-point.json
index 18bf5ec47e72..d1bd5be95a15 100644
--- a/tools/perf/pmu-events/arch/x86/bonnell/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/bonnell/floating-point.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Floating point assists for retired operations.",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "FP_ASSIST.AR",
"SampleAfterValue": "10000",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Floating point assists.",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "FP_ASSIST.S",
"SampleAfterValue": "10000",
@@ -15,12 +17,14 @@
},
{
"BriefDescription": "SIMD assists invoked.",
+ "Counter": "0,1",
"EventCode": "0xCD",
"EventName": "SIMD_ASSIST",
"SampleAfterValue": "100000"
},
{
"BriefDescription": "Retired computational Streaming SIMD Extensions (SSE) packed-single instructions.",
+ "Counter": "0,1",
"EventCode": "0xCA",
"EventName": "SIMD_COMP_INST_RETIRED.PACKED_SINGLE",
"SampleAfterValue": "2000000",
@@ -28,6 +32,7 @@
},
{
"BriefDescription": "Retired computational Streaming SIMD Extensions 2 (SSE2) scalar-double instructions.",
+ "Counter": "0,1",
"EventCode": "0xCA",
"EventName": "SIMD_COMP_INST_RETIRED.SCALAR_DOUBLE",
"SampleAfterValue": "2000000",
@@ -35,6 +40,7 @@
},
{
"BriefDescription": "Retired computational Streaming SIMD Extensions (SSE) scalar-single instructions.",
+ "Counter": "0,1",
"EventCode": "0xCA",
"EventName": "SIMD_COMP_INST_RETIRED.SCALAR_SINGLE",
"SampleAfterValue": "2000000",
@@ -42,12 +48,14 @@
},
{
"BriefDescription": "SIMD Instructions retired.",
+ "Counter": "0,1",
"EventCode": "0xCE",
"EventName": "SIMD_INSTR_RETIRED",
"SampleAfterValue": "2000000"
},
{
"BriefDescription": "Retired Streaming SIMD Extensions (SSE) packed-single instructions.",
+ "Counter": "0,1",
"EventCode": "0xC7",
"EventName": "SIMD_INST_RETIRED.PACKED_SINGLE",
"SampleAfterValue": "2000000",
@@ -55,6 +63,7 @@
},
{
"BriefDescription": "Retired Streaming SIMD Extensions 2 (SSE2) scalar-double instructions.",
+ "Counter": "0,1",
"EventCode": "0xC7",
"EventName": "SIMD_INST_RETIRED.SCALAR_DOUBLE",
"SampleAfterValue": "2000000",
@@ -62,6 +71,7 @@
},
{
"BriefDescription": "Retired Streaming SIMD Extensions (SSE) scalar-single instructions.",
+ "Counter": "0,1",
"EventCode": "0xC7",
"EventName": "SIMD_INST_RETIRED.SCALAR_SINGLE",
"SampleAfterValue": "2000000",
@@ -69,6 +79,7 @@
},
{
"BriefDescription": "Retired Streaming SIMD Extensions 2 (SSE2) vector instructions.",
+ "Counter": "0,1",
"EventCode": "0xC7",
"EventName": "SIMD_INST_RETIRED.VECTOR",
"SampleAfterValue": "2000000",
@@ -76,12 +87,14 @@
},
{
"BriefDescription": "Saturated arithmetic instructions retired.",
+ "Counter": "0,1",
"EventCode": "0xCF",
"EventName": "SIMD_SAT_INSTR_RETIRED",
"SampleAfterValue": "2000000"
},
{
"BriefDescription": "SIMD saturated arithmetic micro-ops retired.",
+ "Counter": "0,1",
"EventCode": "0xB1",
"EventName": "SIMD_SAT_UOP_EXEC.AR",
"SampleAfterValue": "2000000",
@@ -89,12 +102,14 @@
},
{
"BriefDescription": "SIMD saturated arithmetic micro-ops executed.",
+ "Counter": "0,1",
"EventCode": "0xB1",
"EventName": "SIMD_SAT_UOP_EXEC.S",
"SampleAfterValue": "2000000"
},
{
"BriefDescription": "SIMD micro-ops retired (excluding stores).",
+ "Counter": "0,1",
"EventCode": "0xB0",
"EventName": "SIMD_UOPS_EXEC.AR",
"PEBS": "2",
@@ -103,12 +118,14 @@
},
{
"BriefDescription": "SIMD micro-ops executed (excluding stores).",
+ "Counter": "0,1",
"EventCode": "0xB0",
"EventName": "SIMD_UOPS_EXEC.S",
"SampleAfterValue": "2000000"
},
{
"BriefDescription": "SIMD packed arithmetic micro-ops retired",
+ "Counter": "0,1",
"EventCode": "0xB3",
"EventName": "SIMD_UOP_TYPE_EXEC.ARITHMETIC.AR",
"SampleAfterValue": "2000000",
@@ -116,6 +133,7 @@
},
{
"BriefDescription": "SIMD packed arithmetic micro-ops executed",
+ "Counter": "0,1",
"EventCode": "0xB3",
"EventName": "SIMD_UOP_TYPE_EXEC.ARITHMETIC.S",
"SampleAfterValue": "2000000",
@@ -123,6 +141,7 @@
},
{
"BriefDescription": "SIMD packed logical micro-ops retired",
+ "Counter": "0,1",
"EventCode": "0xB3",
"EventName": "SIMD_UOP_TYPE_EXEC.LOGICAL.AR",
"SampleAfterValue": "2000000",
@@ -130,6 +149,7 @@
},
{
"BriefDescription": "SIMD packed logical micro-ops executed",
+ "Counter": "0,1",
"EventCode": "0xB3",
"EventName": "SIMD_UOP_TYPE_EXEC.LOGICAL.S",
"SampleAfterValue": "2000000",
@@ -137,6 +157,7 @@
},
{
"BriefDescription": "SIMD packed multiply micro-ops retired",
+ "Counter": "0,1",
"EventCode": "0xB3",
"EventName": "SIMD_UOP_TYPE_EXEC.MUL.AR",
"SampleAfterValue": "2000000",
@@ -144,6 +165,7 @@
},
{
"BriefDescription": "SIMD packed multiply micro-ops executed",
+ "Counter": "0,1",
"EventCode": "0xB3",
"EventName": "SIMD_UOP_TYPE_EXEC.MUL.S",
"SampleAfterValue": "2000000",
@@ -151,6 +173,7 @@
},
{
"BriefDescription": "SIMD packed micro-ops retired",
+ "Counter": "0,1",
"EventCode": "0xB3",
"EventName": "SIMD_UOP_TYPE_EXEC.PACK.AR",
"SampleAfterValue": "2000000",
@@ -158,6 +181,7 @@
},
{
"BriefDescription": "SIMD packed micro-ops executed",
+ "Counter": "0,1",
"EventCode": "0xB3",
"EventName": "SIMD_UOP_TYPE_EXEC.PACK.S",
"SampleAfterValue": "2000000",
@@ -165,6 +189,7 @@
},
{
"BriefDescription": "SIMD packed shift micro-ops retired",
+ "Counter": "0,1",
"EventCode": "0xB3",
"EventName": "SIMD_UOP_TYPE_EXEC.SHIFT.AR",
"SampleAfterValue": "2000000",
@@ -172,6 +197,7 @@
},
{
"BriefDescription": "SIMD packed shift micro-ops executed",
+ "Counter": "0,1",
"EventCode": "0xB3",
"EventName": "SIMD_UOP_TYPE_EXEC.SHIFT.S",
"SampleAfterValue": "2000000",
@@ -179,6 +205,7 @@
},
{
"BriefDescription": "SIMD unpacked micro-ops retired",
+ "Counter": "0,1",
"EventCode": "0xB3",
"EventName": "SIMD_UOP_TYPE_EXEC.UNPACK.AR",
"SampleAfterValue": "2000000",
@@ -186,6 +213,7 @@
},
{
"BriefDescription": "SIMD unpacked micro-ops executed",
+ "Counter": "0,1",
"EventCode": "0xB3",
"EventName": "SIMD_UOP_TYPE_EXEC.UNPACK.S",
"SampleAfterValue": "2000000",
@@ -193,6 +221,7 @@
},
{
"BriefDescription": "Floating point computational micro-ops retired.",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "X87_COMP_OPS_EXE.ANY.AR",
"PEBS": "2",
@@ -201,6 +230,7 @@
},
{
"BriefDescription": "Floating point computational micro-ops executed.",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "X87_COMP_OPS_EXE.ANY.S",
"SampleAfterValue": "2000000",
@@ -208,6 +238,7 @@
},
{
"BriefDescription": "FXCH uops retired.",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "X87_COMP_OPS_EXE.FXCH.AR",
"PEBS": "2",
@@ -216,6 +247,7 @@
},
{
"BriefDescription": "FXCH uops executed.",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "X87_COMP_OPS_EXE.FXCH.S",
"SampleAfterValue": "2000000",
diff --git a/tools/perf/pmu-events/arch/x86/bonnell/frontend.json b/tools/perf/pmu-events/arch/x86/bonnell/frontend.json
index 42284c02c11d..7657fd6d3a11 100644
--- a/tools/perf/pmu-events/arch/x86/bonnell/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/bonnell/frontend.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "BACLEARS asserted.",
+ "Counter": "0,1",
"EventCode": "0xE6",
"EventName": "BACLEARS.ANY",
"SampleAfterValue": "2000000",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Cycles during which instruction fetches are stalled.",
+ "Counter": "0,1",
"EventCode": "0x86",
"EventName": "CYCLES_ICACHE_MEM_STALLED.ICACHE_MEM_STALLED",
"SampleAfterValue": "2000000",
@@ -15,6 +17,7 @@
},
{
"BriefDescription": "Decode stall due to IQ full",
+ "Counter": "0,1",
"EventCode": "0x87",
"EventName": "DECODE_STALL.IQ_FULL",
"SampleAfterValue": "2000000",
@@ -22,6 +25,7 @@
},
{
"BriefDescription": "Decode stall due to PFB empty",
+ "Counter": "0,1",
"EventCode": "0x87",
"EventName": "DECODE_STALL.PFB_EMPTY",
"SampleAfterValue": "2000000",
@@ -29,6 +33,7 @@
},
{
"BriefDescription": "Instruction fetches.",
+ "Counter": "0,1",
"EventCode": "0x80",
"EventName": "ICACHE.ACCESSES",
"SampleAfterValue": "200000",
@@ -36,6 +41,7 @@
},
{
"BriefDescription": "Icache hit",
+ "Counter": "0,1",
"EventCode": "0x80",
"EventName": "ICACHE.HIT",
"SampleAfterValue": "200000",
@@ -43,6 +49,7 @@
},
{
"BriefDescription": "Icache miss",
+ "Counter": "0,1",
"EventCode": "0x80",
"EventName": "ICACHE.MISSES",
"SampleAfterValue": "200000",
@@ -50,6 +57,7 @@
},
{
"BriefDescription": "All Instructions decoded",
+ "Counter": "0,1",
"EventCode": "0xAA",
"EventName": "MACRO_INSTS.ALL_DECODED",
"SampleAfterValue": "2000000",
@@ -57,6 +65,7 @@
},
{
"BriefDescription": "CISC macro instructions decoded",
+ "Counter": "0,1",
"EventCode": "0xAA",
"EventName": "MACRO_INSTS.CISC_DECODED",
"SampleAfterValue": "2000000",
@@ -64,6 +73,7 @@
},
{
"BriefDescription": "Non-CISC macro instructions decoded",
+ "Counter": "0,1",
"EventCode": "0xAA",
"EventName": "MACRO_INSTS.NON_CISC_DECODED",
"SampleAfterValue": "2000000",
@@ -71,6 +81,7 @@
},
{
"BriefDescription": "This event counts the cycles where 1 or more uops are issued by the micro-sequencer (MS), including microcode assists and inserted flows, and written to the IQ.",
+ "Counter": "0,1",
"CounterMask": "1",
"EventCode": "0xA9",
"EventName": "UOPS.MS_CYCLES",
diff --git a/tools/perf/pmu-events/arch/x86/bonnell/memory.json b/tools/perf/pmu-events/arch/x86/bonnell/memory.json
index ac02dc2482c8..f8b45b6fb4d3 100644
--- a/tools/perf/pmu-events/arch/x86/bonnell/memory.json
+++ b/tools/perf/pmu-events/arch/x86/bonnell/memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Nonzero segbase 1 bubble",
+ "Counter": "0,1",
"EventCode": "0x5",
"EventName": "MISALIGN_MEM_REF.BUBBLE",
"SampleAfterValue": "200000",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Nonzero segbase load 1 bubble",
+ "Counter": "0,1",
"EventCode": "0x5",
"EventName": "MISALIGN_MEM_REF.LD_BUBBLE",
"SampleAfterValue": "200000",
@@ -15,6 +17,7 @@
},
{
"BriefDescription": "Load splits",
+ "Counter": "0,1",
"EventCode": "0x5",
"EventName": "MISALIGN_MEM_REF.LD_SPLIT",
"SampleAfterValue": "200000",
@@ -22,6 +25,7 @@
},
{
"BriefDescription": "Load splits (At Retirement)",
+ "Counter": "0,1",
"EventCode": "0x5",
"EventName": "MISALIGN_MEM_REF.LD_SPLIT.AR",
"SampleAfterValue": "200000",
@@ -29,6 +33,7 @@
},
{
"BriefDescription": "Nonzero segbase ld-op-st 1 bubble",
+ "Counter": "0,1",
"EventCode": "0x5",
"EventName": "MISALIGN_MEM_REF.RMW_BUBBLE",
"SampleAfterValue": "200000",
@@ -36,6 +41,7 @@
},
{
"BriefDescription": "ld-op-st splits",
+ "Counter": "0,1",
"EventCode": "0x5",
"EventName": "MISALIGN_MEM_REF.RMW_SPLIT",
"SampleAfterValue": "200000",
@@ -43,6 +49,7 @@
},
{
"BriefDescription": "Memory references that cross an 8-byte boundary.",
+ "Counter": "0,1",
"EventCode": "0x5",
"EventName": "MISALIGN_MEM_REF.SPLIT",
"SampleAfterValue": "200000",
@@ -50,6 +57,7 @@
},
{
"BriefDescription": "Memory references that cross an 8-byte boundary (At Retirement)",
+ "Counter": "0,1",
"EventCode": "0x5",
"EventName": "MISALIGN_MEM_REF.SPLIT.AR",
"SampleAfterValue": "200000",
@@ -57,6 +65,7 @@
},
{
"BriefDescription": "Nonzero segbase store 1 bubble",
+ "Counter": "0,1",
"EventCode": "0x5",
"EventName": "MISALIGN_MEM_REF.ST_BUBBLE",
"SampleAfterValue": "200000",
@@ -64,6 +73,7 @@
},
{
"BriefDescription": "Store splits",
+ "Counter": "0,1",
"EventCode": "0x5",
"EventName": "MISALIGN_MEM_REF.ST_SPLIT",
"SampleAfterValue": "200000",
@@ -71,6 +81,7 @@
},
{
"BriefDescription": "Store splits (Ar Retirement)",
+ "Counter": "0,1",
"EventCode": "0x5",
"EventName": "MISALIGN_MEM_REF.ST_SPLIT.AR",
"SampleAfterValue": "200000",
@@ -78,6 +89,7 @@
},
{
"BriefDescription": "L1 hardware prefetch request",
+ "Counter": "0,1",
"EventCode": "0x7",
"EventName": "PREFETCH.HW_PREFETCH",
"SampleAfterValue": "2000000",
@@ -85,6 +97,7 @@
},
{
"BriefDescription": "Streaming SIMD Extensions (SSE) Prefetch NTA instructions executed",
+ "Counter": "0,1",
"EventCode": "0x7",
"EventName": "PREFETCH.PREFETCHNTA",
"SampleAfterValue": "200000",
@@ -92,6 +105,7 @@
},
{
"BriefDescription": "Streaming SIMD Extensions (SSE) PrefetchT0 instructions executed.",
+ "Counter": "0,1",
"EventCode": "0x7",
"EventName": "PREFETCH.PREFETCHT0",
"SampleAfterValue": "200000",
@@ -99,6 +113,7 @@
},
{
"BriefDescription": "Streaming SIMD Extensions (SSE) PrefetchT1 instructions executed.",
+ "Counter": "0,1",
"EventCode": "0x7",
"EventName": "PREFETCH.PREFETCHT1",
"SampleAfterValue": "200000",
@@ -106,6 +121,7 @@
},
{
"BriefDescription": "Streaming SIMD Extensions (SSE) PrefetchT2 instructions executed.",
+ "Counter": "0,1",
"EventCode": "0x7",
"EventName": "PREFETCH.PREFETCHT2",
"SampleAfterValue": "200000",
@@ -113,6 +129,7 @@
},
{
"BriefDescription": "Any Software prefetch",
+ "Counter": "0,1",
"EventCode": "0x7",
"EventName": "PREFETCH.SOFTWARE_PREFETCH",
"SampleAfterValue": "200000",
@@ -120,6 +137,7 @@
},
{
"BriefDescription": "Any Software prefetch",
+ "Counter": "0,1",
"EventCode": "0x7",
"EventName": "PREFETCH.SOFTWARE_PREFETCH.AR",
"SampleAfterValue": "200000",
@@ -127,6 +145,7 @@
},
{
"BriefDescription": "Streaming SIMD Extensions (SSE) PrefetchT1 and PrefetchT2 instructions executed",
+ "Counter": "0,1",
"EventCode": "0x7",
"EventName": "PREFETCH.SW_L2",
"SampleAfterValue": "200000",
diff --git a/tools/perf/pmu-events/arch/x86/bonnell/other.json b/tools/perf/pmu-events/arch/x86/bonnell/other.json
index 782594c8bda5..3a55c101fbf7 100644
--- a/tools/perf/pmu-events/arch/x86/bonnell/other.json
+++ b/tools/perf/pmu-events/arch/x86/bonnell/other.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Bus queue is empty.",
+ "Counter": "0,1",
"EventCode": "0x7D",
"EventName": "BUSQ_EMPTY.SELF",
"SampleAfterValue": "200000",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Number of Bus Not Ready signals asserted.",
+ "Counter": "0,1",
"EventCode": "0x61",
"EventName": "BUS_BNR_DRV.ALL_AGENTS",
"SampleAfterValue": "200000",
@@ -15,12 +17,14 @@
},
{
"BriefDescription": "Number of Bus Not Ready signals asserted.",
+ "Counter": "0,1",
"EventCode": "0x61",
"EventName": "BUS_BNR_DRV.THIS_AGENT",
"SampleAfterValue": "200000"
},
{
"BriefDescription": "Bus cycles while processor receives data.",
+ "Counter": "0,1",
"EventCode": "0x64",
"EventName": "BUS_DATA_RCV.SELF",
"SampleAfterValue": "200000",
@@ -28,6 +32,7 @@
},
{
"BriefDescription": "Bus cycles when data is sent on the bus.",
+ "Counter": "0,1",
"EventCode": "0x62",
"EventName": "BUS_DRDY_CLOCKS.ALL_AGENTS",
"SampleAfterValue": "200000",
@@ -35,12 +40,14 @@
},
{
"BriefDescription": "Bus cycles when data is sent on the bus.",
+ "Counter": "0,1",
"EventCode": "0x62",
"EventName": "BUS_DRDY_CLOCKS.THIS_AGENT",
"SampleAfterValue": "200000"
},
{
"BriefDescription": "HITM signal asserted.",
+ "Counter": "0,1",
"EventCode": "0x7B",
"EventName": "BUS_HITM_DRV.ALL_AGENTS",
"SampleAfterValue": "200000",
@@ -48,12 +55,14 @@
},
{
"BriefDescription": "HITM signal asserted.",
+ "Counter": "0,1",
"EventCode": "0x7B",
"EventName": "BUS_HITM_DRV.THIS_AGENT",
"SampleAfterValue": "200000"
},
{
"BriefDescription": "HIT signal asserted.",
+ "Counter": "0,1",
"EventCode": "0x7A",
"EventName": "BUS_HIT_DRV.ALL_AGENTS",
"SampleAfterValue": "200000",
@@ -61,12 +70,14 @@
},
{
"BriefDescription": "HIT signal asserted.",
+ "Counter": "0,1",
"EventCode": "0x7A",
"EventName": "BUS_HIT_DRV.THIS_AGENT",
"SampleAfterValue": "200000"
},
{
"BriefDescription": "IO requests waiting in the bus queue.",
+ "Counter": "0,1",
"EventCode": "0x7F",
"EventName": "BUS_IO_WAIT.SELF",
"SampleAfterValue": "200000",
@@ -74,6 +85,7 @@
},
{
"BriefDescription": "Bus cycles when a LOCK signal is asserted.",
+ "Counter": "0,1",
"EventCode": "0x63",
"EventName": "BUS_LOCK_CLOCKS.ALL_AGENTS",
"SampleAfterValue": "200000",
@@ -81,6 +93,7 @@
},
{
"BriefDescription": "Bus cycles when a LOCK signal is asserted.",
+ "Counter": "0,1",
"EventCode": "0x63",
"EventName": "BUS_LOCK_CLOCKS.SELF",
"SampleAfterValue": "200000",
@@ -88,6 +101,7 @@
},
{
"BriefDescription": "Outstanding cacheable data read bus requests duration.",
+ "Counter": "0,1",
"EventCode": "0x60",
"EventName": "BUS_REQUEST_OUTSTANDING.ALL_AGENTS",
"SampleAfterValue": "200000",
@@ -95,6 +109,7 @@
},
{
"BriefDescription": "Outstanding cacheable data read bus requests duration.",
+ "Counter": "0,1",
"EventCode": "0x60",
"EventName": "BUS_REQUEST_OUTSTANDING.SELF",
"SampleAfterValue": "200000",
@@ -102,6 +117,7 @@
},
{
"BriefDescription": "All bus transactions.",
+ "Counter": "0,1",
"EventCode": "0x70",
"EventName": "BUS_TRANS_ANY.ALL_AGENTS",
"SampleAfterValue": "200000",
@@ -109,6 +125,7 @@
},
{
"BriefDescription": "All bus transactions.",
+ "Counter": "0,1",
"EventCode": "0x70",
"EventName": "BUS_TRANS_ANY.SELF",
"SampleAfterValue": "200000",
@@ -116,6 +133,7 @@
},
{
"BriefDescription": "Burst read bus transactions.",
+ "Counter": "0,1",
"EventCode": "0x65",
"EventName": "BUS_TRANS_BRD.ALL_AGENTS",
"SampleAfterValue": "200000",
@@ -123,6 +141,7 @@
},
{
"BriefDescription": "Burst read bus transactions.",
+ "Counter": "0,1",
"EventCode": "0x65",
"EventName": "BUS_TRANS_BRD.SELF",
"SampleAfterValue": "200000",
@@ -130,6 +149,7 @@
},
{
"BriefDescription": "Burst (full cache-line) bus transactions.",
+ "Counter": "0,1",
"EventCode": "0x6E",
"EventName": "BUS_TRANS_BURST.ALL_AGENTS",
"SampleAfterValue": "200000",
@@ -137,6 +157,7 @@
},
{
"BriefDescription": "Burst (full cache-line) bus transactions.",
+ "Counter": "0,1",
"EventCode": "0x6E",
"EventName": "BUS_TRANS_BURST.SELF",
"SampleAfterValue": "200000",
@@ -144,6 +165,7 @@
},
{
"BriefDescription": "Deferred bus transactions.",
+ "Counter": "0,1",
"EventCode": "0x6D",
"EventName": "BUS_TRANS_DEF.ALL_AGENTS",
"SampleAfterValue": "200000",
@@ -151,6 +173,7 @@
},
{
"BriefDescription": "Deferred bus transactions.",
+ "Counter": "0,1",
"EventCode": "0x6D",
"EventName": "BUS_TRANS_DEF.SELF",
"SampleAfterValue": "200000",
@@ -158,6 +181,7 @@
},
{
"BriefDescription": "Instruction-fetch bus transactions.",
+ "Counter": "0,1",
"EventCode": "0x68",
"EventName": "BUS_TRANS_IFETCH.ALL_AGENTS",
"SampleAfterValue": "200000",
@@ -165,6 +189,7 @@
},
{
"BriefDescription": "Instruction-fetch bus transactions.",
+ "Counter": "0,1",
"EventCode": "0x68",
"EventName": "BUS_TRANS_IFETCH.SELF",
"SampleAfterValue": "200000",
@@ -172,6 +197,7 @@
},
{
"BriefDescription": "Invalidate bus transactions.",
+ "Counter": "0,1",
"EventCode": "0x69",
"EventName": "BUS_TRANS_INVAL.ALL_AGENTS",
"SampleAfterValue": "200000",
@@ -179,6 +205,7 @@
},
{
"BriefDescription": "Invalidate bus transactions.",
+ "Counter": "0,1",
"EventCode": "0x69",
"EventName": "BUS_TRANS_INVAL.SELF",
"SampleAfterValue": "200000",
@@ -186,6 +213,7 @@
},
{
"BriefDescription": "IO bus transactions.",
+ "Counter": "0,1",
"EventCode": "0x6C",
"EventName": "BUS_TRANS_IO.ALL_AGENTS",
"SampleAfterValue": "200000",
@@ -193,6 +221,7 @@
},
{
"BriefDescription": "IO bus transactions.",
+ "Counter": "0,1",
"EventCode": "0x6C",
"EventName": "BUS_TRANS_IO.SELF",
"SampleAfterValue": "200000",
@@ -200,6 +229,7 @@
},
{
"BriefDescription": "Memory bus transactions.",
+ "Counter": "0,1",
"EventCode": "0x6F",
"EventName": "BUS_TRANS_MEM.ALL_AGENTS",
"SampleAfterValue": "200000",
@@ -207,6 +237,7 @@
},
{
"BriefDescription": "Memory bus transactions.",
+ "Counter": "0,1",
"EventCode": "0x6F",
"EventName": "BUS_TRANS_MEM.SELF",
"SampleAfterValue": "200000",
@@ -214,6 +245,7 @@
},
{
"BriefDescription": "Partial bus transactions.",
+ "Counter": "0,1",
"EventCode": "0x6B",
"EventName": "BUS_TRANS_P.ALL_AGENTS",
"SampleAfterValue": "200000",
@@ -221,6 +253,7 @@
},
{
"BriefDescription": "Partial bus transactions.",
+ "Counter": "0,1",
"EventCode": "0x6B",
"EventName": "BUS_TRANS_P.SELF",
"SampleAfterValue": "200000",
@@ -228,6 +261,7 @@
},
{
"BriefDescription": "Partial write bus transaction.",
+ "Counter": "0,1",
"EventCode": "0x6A",
"EventName": "BUS_TRANS_PWR.ALL_AGENTS",
"SampleAfterValue": "200000",
@@ -235,6 +269,7 @@
},
{
"BriefDescription": "Partial write bus transaction.",
+ "Counter": "0,1",
"EventCode": "0x6A",
"EventName": "BUS_TRANS_PWR.SELF",
"SampleAfterValue": "200000",
@@ -242,6 +277,7 @@
},
{
"BriefDescription": "RFO bus transactions.",
+ "Counter": "0,1",
"EventCode": "0x66",
"EventName": "BUS_TRANS_RFO.ALL_AGENTS",
"SampleAfterValue": "200000",
@@ -249,6 +285,7 @@
},
{
"BriefDescription": "RFO bus transactions.",
+ "Counter": "0,1",
"EventCode": "0x66",
"EventName": "BUS_TRANS_RFO.SELF",
"SampleAfterValue": "200000",
@@ -256,6 +293,7 @@
},
{
"BriefDescription": "Explicit writeback bus transactions.",
+ "Counter": "0,1",
"EventCode": "0x67",
"EventName": "BUS_TRANS_WB.ALL_AGENTS",
"SampleAfterValue": "200000",
@@ -263,6 +301,7 @@
},
{
"BriefDescription": "Explicit writeback bus transactions.",
+ "Counter": "0,1",
"EventCode": "0x67",
"EventName": "BUS_TRANS_WB.SELF",
"SampleAfterValue": "200000",
@@ -270,6 +309,7 @@
},
{
"BriefDescription": "Cycles during which interrupts are disabled.",
+ "Counter": "0,1",
"EventCode": "0xC6",
"EventName": "CYCLES_INT_MASKED.CYCLES_INT_MASKED",
"SampleAfterValue": "2000000",
@@ -277,6 +317,7 @@
},
{
"BriefDescription": "Cycles during which interrupts are pending and disabled.",
+ "Counter": "0,1",
"EventCode": "0xC6",
"EventName": "CYCLES_INT_MASKED.CYCLES_INT_PENDING_AND_MASKED",
"SampleAfterValue": "2000000",
@@ -284,6 +325,7 @@
},
{
"BriefDescription": "Memory cluster signals to block micro-op dispatch for any reason",
+ "Counter": "0,1",
"EventCode": "0x9",
"EventName": "DISPATCH_BLOCKED.ANY",
"SampleAfterValue": "200000",
@@ -291,12 +333,14 @@
},
{
"BriefDescription": "Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions",
+ "Counter": "0,1",
"EventCode": "0x3A",
"EventName": "EIST_TRANS",
"SampleAfterValue": "200000"
},
{
"BriefDescription": "External snoops.",
+ "Counter": "0,1",
"EventCode": "0x77",
"EventName": "EXT_SNOOP.ALL_AGENTS.ANY",
"SampleAfterValue": "200000",
@@ -304,6 +348,7 @@
},
{
"BriefDescription": "External snoops.",
+ "Counter": "0,1",
"EventCode": "0x77",
"EventName": "EXT_SNOOP.ALL_AGENTS.CLEAN",
"SampleAfterValue": "200000",
@@ -311,6 +356,7 @@
},
{
"BriefDescription": "External snoops.",
+ "Counter": "0,1",
"EventCode": "0x77",
"EventName": "EXT_SNOOP.ALL_AGENTS.HIT",
"SampleAfterValue": "200000",
@@ -318,6 +364,7 @@
},
{
"BriefDescription": "External snoops.",
+ "Counter": "0,1",
"EventCode": "0x77",
"EventName": "EXT_SNOOP.ALL_AGENTS.HITM",
"SampleAfterValue": "200000",
@@ -325,6 +372,7 @@
},
{
"BriefDescription": "External snoops.",
+ "Counter": "0,1",
"EventCode": "0x77",
"EventName": "EXT_SNOOP.THIS_AGENT.ANY",
"SampleAfterValue": "200000",
@@ -332,6 +380,7 @@
},
{
"BriefDescription": "External snoops.",
+ "Counter": "0,1",
"EventCode": "0x77",
"EventName": "EXT_SNOOP.THIS_AGENT.CLEAN",
"SampleAfterValue": "200000",
@@ -339,6 +388,7 @@
},
{
"BriefDescription": "External snoops.",
+ "Counter": "0,1",
"EventCode": "0x77",
"EventName": "EXT_SNOOP.THIS_AGENT.HIT",
"SampleAfterValue": "200000",
@@ -346,6 +396,7 @@
},
{
"BriefDescription": "External snoops.",
+ "Counter": "0,1",
"EventCode": "0x77",
"EventName": "EXT_SNOOP.THIS_AGENT.HITM",
"SampleAfterValue": "200000",
@@ -353,12 +404,14 @@
},
{
"BriefDescription": "Hardware interrupts received.",
+ "Counter": "0,1",
"EventCode": "0xC8",
"EventName": "HW_INT_RCV",
"SampleAfterValue": "200000"
},
{
"BriefDescription": "Number of segment register loads.",
+ "Counter": "0,1",
"EventCode": "0x6",
"EventName": "SEGMENT_REG_LOADS.ANY",
"SampleAfterValue": "200000",
@@ -366,6 +419,7 @@
},
{
"BriefDescription": "Bus stalled for snoops.",
+ "Counter": "0,1",
"EventCode": "0x7E",
"EventName": "SNOOP_STALL_DRV.ALL_AGENTS",
"SampleAfterValue": "200000",
@@ -373,6 +427,7 @@
},
{
"BriefDescription": "Bus stalled for snoops.",
+ "Counter": "0,1",
"EventCode": "0x7E",
"EventName": "SNOOP_STALL_DRV.SELF",
"SampleAfterValue": "200000",
@@ -380,6 +435,7 @@
},
{
"BriefDescription": "Number of thermal trips",
+ "Counter": "0,1",
"EventCode": "0x3B",
"EventName": "THERMAL_TRIP",
"SampleAfterValue": "200000",
diff --git a/tools/perf/pmu-events/arch/x86/bonnell/pipeline.json b/tools/perf/pmu-events/arch/x86/bonnell/pipeline.json
index 91b98ee8ba9a..9ff032ab11e2 100644
--- a/tools/perf/pmu-events/arch/x86/bonnell/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/bonnell/pipeline.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Bogus branches",
+ "Counter": "0,1",
"EventCode": "0xE4",
"EventName": "BOGUS_BR",
"SampleAfterValue": "2000000",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Branch instructions decoded",
+ "Counter": "0,1",
"EventCode": "0xE0",
"EventName": "BR_INST_DECODED",
"SampleAfterValue": "2000000",
@@ -15,12 +17,14 @@
},
{
"BriefDescription": "Retired branch instructions.",
+ "Counter": "0,1",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.ANY",
"SampleAfterValue": "2000000"
},
{
"BriefDescription": "Retired branch instructions.",
+ "Counter": "0,1",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.ANY1",
"SampleAfterValue": "2000000",
@@ -28,6 +32,7 @@
},
{
"BriefDescription": "Retired mispredicted branch instructions (precise event).",
+ "Counter": "0,1",
"EventCode": "0xC5",
"EventName": "BR_INST_RETIRED.MISPRED",
"PEBS": "1",
@@ -35,6 +40,7 @@
},
{
"BriefDescription": "Retired branch instructions that were mispredicted not-taken.",
+ "Counter": "0,1",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.MISPRED_NOT_TAKEN",
"SampleAfterValue": "200000",
@@ -42,6 +48,7 @@
},
{
"BriefDescription": "Retired branch instructions that were mispredicted taken.",
+ "Counter": "0,1",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.MISPRED_TAKEN",
"SampleAfterValue": "200000",
@@ -49,6 +56,7 @@
},
{
"BriefDescription": "Retired branch instructions that were predicted not-taken.",
+ "Counter": "0,1",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.PRED_NOT_TAKEN",
"SampleAfterValue": "2000000",
@@ -56,6 +64,7 @@
},
{
"BriefDescription": "Retired branch instructions that were predicted taken.",
+ "Counter": "0,1",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.PRED_TAKEN",
"SampleAfterValue": "2000000",
@@ -63,6 +72,7 @@
},
{
"BriefDescription": "Retired taken branch instructions.",
+ "Counter": "0,1",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.TAKEN",
"SampleAfterValue": "2000000",
@@ -70,6 +80,7 @@
},
{
"BriefDescription": "All macro conditional branch instructions.",
+ "Counter": "0,1",
"EventCode": "0x88",
"EventName": "BR_INST_TYPE_RETIRED.COND",
"SampleAfterValue": "2000000",
@@ -77,6 +88,7 @@
},
{
"BriefDescription": "Only taken macro conditional branch instructions",
+ "Counter": "0,1",
"EventCode": "0x88",
"EventName": "BR_INST_TYPE_RETIRED.COND_TAKEN",
"SampleAfterValue": "2000000",
@@ -84,6 +96,7 @@
},
{
"BriefDescription": "All non-indirect calls",
+ "Counter": "0,1",
"EventCode": "0x88",
"EventName": "BR_INST_TYPE_RETIRED.DIR_CALL",
"SampleAfterValue": "2000000",
@@ -91,6 +104,7 @@
},
{
"BriefDescription": "All indirect branches that are not calls.",
+ "Counter": "0,1",
"EventCode": "0x88",
"EventName": "BR_INST_TYPE_RETIRED.IND",
"SampleAfterValue": "2000000",
@@ -98,6 +112,7 @@
},
{
"BriefDescription": "All indirect calls, including both register and memory indirect.",
+ "Counter": "0,1",
"EventCode": "0x88",
"EventName": "BR_INST_TYPE_RETIRED.IND_CALL",
"SampleAfterValue": "2000000",
@@ -105,6 +120,7 @@
},
{
"BriefDescription": "All indirect branches that have a return mnemonic",
+ "Counter": "0,1",
"EventCode": "0x88",
"EventName": "BR_INST_TYPE_RETIRED.RET",
"SampleAfterValue": "2000000",
@@ -112,6 +128,7 @@
},
{
"BriefDescription": "All macro unconditional branch instructions, excluding calls and indirects",
+ "Counter": "0,1",
"EventCode": "0x88",
"EventName": "BR_INST_TYPE_RETIRED.UNCOND",
"SampleAfterValue": "2000000",
@@ -119,6 +136,7 @@
},
{
"BriefDescription": "Mispredicted cond branch instructions retired",
+ "Counter": "0,1",
"EventCode": "0x89",
"EventName": "BR_MISSP_TYPE_RETIRED.COND",
"SampleAfterValue": "200000",
@@ -126,6 +144,7 @@
},
{
"BriefDescription": "Mispredicted and taken cond branch instructions retired",
+ "Counter": "0,1",
"EventCode": "0x89",
"EventName": "BR_MISSP_TYPE_RETIRED.COND_TAKEN",
"SampleAfterValue": "200000",
@@ -133,6 +152,7 @@
},
{
"BriefDescription": "Mispredicted ind branches that are not calls",
+ "Counter": "0,1",
"EventCode": "0x89",
"EventName": "BR_MISSP_TYPE_RETIRED.IND",
"SampleAfterValue": "200000",
@@ -140,6 +160,7 @@
},
{
"BriefDescription": "Mispredicted indirect calls, including both register and memory indirect.",
+ "Counter": "0,1",
"EventCode": "0x89",
"EventName": "BR_MISSP_TYPE_RETIRED.IND_CALL",
"SampleAfterValue": "200000",
@@ -147,6 +168,7 @@
},
{
"BriefDescription": "Mispredicted return branches",
+ "Counter": "0,1",
"EventCode": "0x89",
"EventName": "BR_MISSP_TYPE_RETIRED.RETURN",
"SampleAfterValue": "200000",
@@ -154,6 +176,7 @@
},
{
"BriefDescription": "Bus cycles when core is not halted",
+ "Counter": "0,1",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.BUS",
"SampleAfterValue": "200000",
@@ -161,24 +184,28 @@
},
{
"BriefDescription": "Core cycles when core is not halted",
+ "Counter": "Fixed counter 2",
"EventCode": "0xA",
"EventName": "CPU_CLK_UNHALTED.CORE",
"SampleAfterValue": "2000000"
},
{
"BriefDescription": "Core cycles when core is not halted",
+ "Counter": "0,1",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.CORE_P",
"SampleAfterValue": "2000000"
},
{
"BriefDescription": "Reference cycles when core is not halted.",
+ "Counter": "Fixed counter 3",
"EventCode": "0xA",
"EventName": "CPU_CLK_UNHALTED.REF",
"SampleAfterValue": "2000000"
},
{
"BriefDescription": "Cycles the divider is busy.",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "CYCLES_DIV_BUSY",
"SampleAfterValue": "2000000",
@@ -186,6 +213,7 @@
},
{
"BriefDescription": "Divide operations retired",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "DIV.AR",
"SampleAfterValue": "2000000",
@@ -193,6 +221,7 @@
},
{
"BriefDescription": "Divide operations executed.",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "DIV.S",
"SampleAfterValue": "2000000",
@@ -200,12 +229,14 @@
},
{
"BriefDescription": "Instructions retired.",
+ "Counter": "Fixed counter 1",
"EventCode": "0xA",
"EventName": "INST_RETIRED.ANY",
"SampleAfterValue": "2000000"
},
{
"BriefDescription": "Instructions retired (precise event).",
+ "Counter": "0,1",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.ANY_P",
"PEBS": "2",
@@ -213,6 +244,7 @@
},
{
"BriefDescription": "Self-Modifying Code detected.",
+ "Counter": "0,1",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.SMC",
"SampleAfterValue": "200000",
@@ -220,6 +252,7 @@
},
{
"BriefDescription": "Multiply operations retired",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "MUL.AR",
"SampleAfterValue": "2000000",
@@ -227,6 +260,7 @@
},
{
"BriefDescription": "Multiply operations executed.",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "MUL.S",
"SampleAfterValue": "2000000",
@@ -234,6 +268,7 @@
},
{
"BriefDescription": "Micro-op reissues for any cause",
+ "Counter": "0,1",
"EventCode": "0x3",
"EventName": "REISSUE.ANY",
"SampleAfterValue": "200000",
@@ -241,6 +276,7 @@
},
{
"BriefDescription": "Micro-op reissues for any cause (At Retirement)",
+ "Counter": "0,1",
"EventCode": "0x3",
"EventName": "REISSUE.ANY.AR",
"SampleAfterValue": "200000",
@@ -248,6 +284,7 @@
},
{
"BriefDescription": "Micro-op reissues on a store-load collision",
+ "Counter": "0,1",
"EventCode": "0x3",
"EventName": "REISSUE.OVERLAP_STORE",
"SampleAfterValue": "200000",
@@ -255,6 +292,7 @@
},
{
"BriefDescription": "Micro-op reissues on a store-load collision (At Retirement)",
+ "Counter": "0,1",
"EventCode": "0x3",
"EventName": "REISSUE.OVERLAP_STORE.AR",
"SampleAfterValue": "200000",
@@ -262,6 +300,7 @@
},
{
"BriefDescription": "Cycles issue is stalled due to div busy.",
+ "Counter": "0,1",
"EventCode": "0xDC",
"EventName": "RESOURCE_STALLS.DIV_BUSY",
"SampleAfterValue": "2000000",
@@ -269,6 +308,7 @@
},
{
"BriefDescription": "All store forwards",
+ "Counter": "0,1",
"EventCode": "0x2",
"EventName": "STORE_FORWARDS.ANY",
"SampleAfterValue": "200000",
@@ -276,6 +316,7 @@
},
{
"BriefDescription": "Good store forwards",
+ "Counter": "0,1",
"EventCode": "0x2",
"EventName": "STORE_FORWARDS.GOOD",
"SampleAfterValue": "200000",
@@ -283,6 +324,7 @@
},
{
"BriefDescription": "Micro-ops retired.",
+ "Counter": "0,1",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.ANY",
"SampleAfterValue": "2000000",
@@ -290,6 +332,7 @@
},
{
"BriefDescription": "Cycles no micro-ops retired.",
+ "Counter": "0,1",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.STALLED_CYCLES",
"SampleAfterValue": "2000000",
@@ -297,6 +340,7 @@
},
{
"BriefDescription": "Periods no micro-ops retired.",
+ "Counter": "0,1",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.STALLS",
"SampleAfterValue": "2000000",
diff --git a/tools/perf/pmu-events/arch/x86/bonnell/virtual-memory.json b/tools/perf/pmu-events/arch/x86/bonnell/virtual-memory.json
index 82e07c73cff0..e8512c585572 100644
--- a/tools/perf/pmu-events/arch/x86/bonnell/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/bonnell/virtual-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Memory accesses that missed the DTLB.",
+ "Counter": "0,1",
"EventCode": "0x8",
"EventName": "DATA_TLB_MISSES.DTLB_MISS",
"SampleAfterValue": "200000",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "DTLB misses due to load operations.",
+ "Counter": "0,1",
"EventCode": "0x8",
"EventName": "DATA_TLB_MISSES.DTLB_MISS_LD",
"SampleAfterValue": "200000",
@@ -15,6 +17,7 @@
},
{
"BriefDescription": "DTLB misses due to store operations.",
+ "Counter": "0,1",
"EventCode": "0x8",
"EventName": "DATA_TLB_MISSES.DTLB_MISS_ST",
"SampleAfterValue": "200000",
@@ -22,6 +25,7 @@
},
{
"BriefDescription": "L0 DTLB misses due to load operations.",
+ "Counter": "0,1",
"EventCode": "0x8",
"EventName": "DATA_TLB_MISSES.L0_DTLB_MISS_LD",
"SampleAfterValue": "200000",
@@ -29,6 +33,7 @@
},
{
"BriefDescription": "L0 DTLB misses due to store operations",
+ "Counter": "0,1",
"EventCode": "0x8",
"EventName": "DATA_TLB_MISSES.L0_DTLB_MISS_ST",
"SampleAfterValue": "200000",
@@ -36,6 +41,7 @@
},
{
"BriefDescription": "ITLB flushes.",
+ "Counter": "0,1",
"EventCode": "0x82",
"EventName": "ITLB.FLUSH",
"SampleAfterValue": "200000",
@@ -43,6 +49,7 @@
},
{
"BriefDescription": "ITLB hits.",
+ "Counter": "0,1",
"EventCode": "0x82",
"EventName": "ITLB.HIT",
"SampleAfterValue": "200000",
@@ -50,6 +57,7 @@
},
{
"BriefDescription": "ITLB misses.",
+ "Counter": "0,1",
"EventCode": "0x82",
"EventName": "ITLB.MISSES",
"PEBS": "2",
@@ -58,6 +66,7 @@
},
{
"BriefDescription": "Retired loads that miss the DTLB (precise event).",
+ "Counter": "0,1",
"EventCode": "0xCB",
"EventName": "MEM_LOAD_RETIRED.DTLB_MISS",
"PEBS": "1",
@@ -66,6 +75,7 @@
},
{
"BriefDescription": "Duration of page-walks in core cycles",
+ "Counter": "0,1",
"EventCode": "0xC",
"EventName": "PAGE_WALKS.CYCLES",
"SampleAfterValue": "2000000",
@@ -73,6 +83,7 @@
},
{
"BriefDescription": "Duration of D-side only page walks",
+ "Counter": "0,1",
"EventCode": "0xC",
"EventName": "PAGE_WALKS.D_SIDE_CYCLES",
"SampleAfterValue": "2000000",
@@ -80,6 +91,7 @@
},
{
"BriefDescription": "Number of D-side only page walks",
+ "Counter": "0,1",
"EventCode": "0xC",
"EventName": "PAGE_WALKS.D_SIDE_WALKS",
"SampleAfterValue": "200000",
@@ -87,6 +99,7 @@
},
{
"BriefDescription": "Duration of I-Side page walks",
+ "Counter": "0,1",
"EventCode": "0xC",
"EventName": "PAGE_WALKS.I_SIDE_CYCLES",
"SampleAfterValue": "2000000",
@@ -94,6 +107,7 @@
},
{
"BriefDescription": "Number of I-Side page walks",
+ "Counter": "0,1",
"EventCode": "0xC",
"EventName": "PAGE_WALKS.I_SIDE_WALKS",
"SampleAfterValue": "200000",
@@ -101,6 +115,7 @@
},
{
"BriefDescription": "Number of page-walks executed.",
+ "Counter": "0,1",
"EventCode": "0xC",
"EventName": "PAGE_WALKS.WALKS",
"SampleAfterValue": "200000",
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json b/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json
index c20833fb1f58..af620553f958 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json
@@ -90,7 +90,7 @@
{
"BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists",
"MetricExpr": "66 * OTHER_ASSISTS.ANY_WB_ASSIST / tma_info_thread_slots",
- "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricGroup": "BvIO;TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_assists",
"MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
"PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: OTHER_ASSISTS.ANY",
@@ -100,7 +100,7 @@
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
"MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "1 - (tma_frontend_bound + tma_bad_speculation + tma_retiring)",
- "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvOB;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
"MetricThreshold": "tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL1",
@@ -121,7 +121,7 @@
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * tma_bad_speculation",
- "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
+ "MetricGroup": "BadSpec;BrMispredicts;BvMP;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
"MetricName": "tma_branch_mispredicts",
"MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
@@ -160,7 +160,7 @@
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(60 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.L3_MISS))) + 43 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.L3_MISS)))) / tma_info_thread_clks",
- "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricGroup": "BvMS;DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_contested_accesses",
"MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS_PS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
@@ -181,7 +181,7 @@
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "43 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.L3_MISS))) / tma_info_thread_clks",
- "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricGroup": "BvMS;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_data_sharing",
"MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT_PS. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
@@ -190,7 +190,7 @@
{
"BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
"MetricExpr": "ARITH.FPU_DIV_ACTIVE / tma_info_core_core_clks",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
"MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_UOPS",
@@ -227,7 +227,7 @@
{
"BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
"MetricExpr": "(8 * DTLB_LOAD_MISSES.STLB_HIT + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * DTLB_LOAD_MISSES.WALK_COMPLETED) / tma_info_thread_clks",
- "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
+ "MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
"MetricName": "tma_dtlb_load",
"MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_UOPS_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store",
@@ -236,7 +236,7 @@
{
"BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
"MetricExpr": "(8 * DTLB_STORE_MISSES.STLB_HIT + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * DTLB_STORE_MISSES.WALK_COMPLETED) / tma_info_thread_clks",
- "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
+ "MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
"MetricName": "tma_dtlb_store",
"MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_UOPS_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load",
@@ -245,7 +245,7 @@
{
"BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing",
"MetricExpr": "60 * OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM / tma_info_thread_clks",
- "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
+ "MetricGroup": "BvMS;DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
"MetricName": "tma_false_sharing",
"MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
@@ -255,7 +255,7 @@
"BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_info_memory_load_miss_real_latency * cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@ / tma_info_thread_clks",
- "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
+ "MetricGroup": "BvMS;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
"MetricName": "tma_fb_full",
"MetricThreshold": "tma_fb_full > 0.3",
"PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
@@ -292,7 +292,7 @@
},
{
"BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired",
- "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricExpr": "FP_ARITH_INST_RETIRED.SCALAR / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_scalar",
"MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
@@ -301,7 +301,7 @@
},
{
"BriefDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths",
- "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0x3c@ / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricExpr": "FP_ARITH_INST_RETIRED.VECTOR / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_vector",
"MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
@@ -329,7 +329,7 @@
{
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / tma_info_thread_slots",
- "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvFB;BvIO;PGO;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_frontend_bound",
"MetricThreshold": "tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL1",
@@ -349,7 +349,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses.",
"MetricExpr": "ICACHE.IFDATA_STALL / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_icache_misses",
"MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"ScaleUnit": "100%"
@@ -388,7 +388,7 @@
},
{
"BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
- "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0x3c@) / (2 * tma_info_core_core_clks)",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + FP_ARITH_INST_RETIRED.VECTOR) / (2 * tma_info_core_core_clks)",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "tma_info_core_fp_arith_utilization",
"PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
@@ -428,7 +428,7 @@
},
{
"BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
- "MetricExpr": "INST_RETIRED.ANY / (cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0x3c@)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR + FP_ARITH_INST_RETIRED.VECTOR)",
"MetricGroup": "Flops;InsType",
"MetricName": "tma_info_inst_mix_iparith",
"MetricThreshold": "tma_info_inst_mix_iparith < 10",
@@ -502,12 +502,12 @@
"MetricThreshold": "tma_info_inst_mix_ipstore < 8"
},
{
- "BriefDescription": "Instruction per taken branch",
+ "BriefDescription": "Instructions per taken branch",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
"MetricName": "tma_info_inst_mix_iptb",
"MetricThreshold": "tma_info_inst_mix_iptb < 9",
- "PublicDescription": "Instruction per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_lcp"
+ "PublicDescription": "Instructions per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_lcp"
},
{
"BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
@@ -528,7 +528,7 @@
"MetricName": "tma_info_memory_core_l3_cache_fill_bw_2t"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
"MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l1d_cache_fill_bw"
@@ -540,7 +540,7 @@
"MetricName": "tma_info_memory_l1mpki"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
"MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l2_cache_fill_bw"
@@ -576,7 +576,13 @@
"MetricName": "tma_info_memory_l2mpki_load"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Offcore requests (L2 cache miss) per kilo instruction for demand RFOs",
+ "MetricExpr": "1e3 * OFFCORE_REQUESTS.DEMAND_RFO / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Offcore",
+ "MetricName": "tma_info_memory_l2mpki_rfo"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
"MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l3_cache_fill_bw"
@@ -628,7 +634,7 @@
"MetricThreshold": "tma_info_memory_tlb_page_walks_utilization > 0.5"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per core",
"MetricExpr": "UOPS_EXECUTED.THREAD / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
"MetricName": "tma_info_pipeline_execute"
@@ -647,13 +653,13 @@
},
{
"BriefDescription": "Average CPU Utilization (percentage)",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "tma_info_system_cpus_utilized / #num_cpus_online",
"MetricGroup": "HPC;Summary",
"MetricName": "tma_info_system_cpu_utilization"
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "#num_cpus_online * tma_info_system_cpu_utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized"
},
@@ -748,7 +754,7 @@
"MetricThreshold": "tma_info_thread_uoppi > 1.05"
},
{
- "BriefDescription": "Instruction per taken branch",
+ "BriefDescription": "Uops per taken branch",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW",
"MetricName": "tma_info_thread_uptb",
@@ -757,7 +763,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
"MetricExpr": "(14 * ITLB_MISSES.STLB_HIT + cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * ITLB_MISSES.WALK_COMPLETED) / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_itlb_misses",
"MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: ITLB_MISSES.WALK_COMPLETED",
@@ -775,7 +781,7 @@
{
"BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
"MetricExpr": "(CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks",
- "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricGroup": "BvML;CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l2_bound",
"MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L2_HIT_PS",
@@ -795,7 +801,7 @@
"BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "29 * (MEM_LOAD_UOPS_RETIRED.L3_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.L3_MISS))) / tma_info_thread_clks",
- "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
+ "MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
"MetricName": "tma_l3_hit_latency",
"MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS. Related metrics: tma_mem_latency",
@@ -844,7 +850,7 @@
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_bad_speculation - tma_branch_mispredicts",
- "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
+ "MetricGroup": "BadSpec;BvMS;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
"MetricName": "tma_machine_clears",
"MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
@@ -854,7 +860,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_thread_clks",
- "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
"MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full",
@@ -863,7 +869,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM)",
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth",
- "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
+ "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
"MetricName": "tma_mem_latency",
"MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_l3_hit_latency",
@@ -892,7 +898,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage",
"MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES * tma_branch_resteers / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY)",
- "MetricGroup": "BadSpec;BrMispredicts;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
+ "MetricGroup": "BadSpec;BrMispredicts;BvMP;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
"MetricName": "tma_mispredicts_resteers",
"MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Related metrics: tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost",
@@ -1028,7 +1034,7 @@
{
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise).",
"MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC) / tma_info_core_core_clks",
- "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricGroup": "BvCB;PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_3m",
"MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%"
@@ -1036,7 +1042,7 @@
{
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / tma_info_thread_slots",
- "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvUW;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_retiring",
"MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL1",
@@ -1065,7 +1071,7 @@
{
"BriefDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors)",
"MetricExpr": "(OFFCORE_REQUESTS_BUFFER.SQ_FULL / 2 if #SMT_on else OFFCORE_REQUESTS_BUFFER.SQ_FULL) / tma_info_core_core_clks",
- "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
+ "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
"MetricName": "tma_sq_full",
"MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth",
@@ -1093,7 +1099,7 @@
"BriefDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(L2_RQSTS.RFO_HIT * 9 * (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) + (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_thread_clks",
- "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
+ "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
"MetricName": "tma_store_latency",
"MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
@@ -1110,7 +1116,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears",
"MetricExpr": "tma_branch_resteers - tma_mispredicts_resteers - tma_clears_resteers",
- "MetricGroup": "BigFootprint;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
"MetricName": "tma_unknown_branches",
"MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: BACLEARS.ANY",
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/cache.json b/tools/perf/pmu-events/arch/x86/broadwell/cache.json
index f8ee5aefccea..063ec8c2b2a1 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/cache.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "L1D data line replacements",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "L1D.REPLACEMENT",
"PublicDescription": "This event counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Cycles a demand request was blocked due to Fill Buffers unavailability.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.FB_FULL",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "L1D miss outstandings duration in cycles",
+ "Counter": "2",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING",
"PublicDescription": "This event counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand; from the demand Hit FB, if it is allocated by hardware or software prefetch.\nNote: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "Counter": "2",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING_CYCLES",
@@ -35,6 +39,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "Counter": "2",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
@@ -43,6 +48,7 @@
},
{
"BriefDescription": "Not rejected writebacks that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_DEMAND_RQSTS.WB_HIT",
"PublicDescription": "This event counts the number of WB requests that hit L2 cache.",
@@ -51,6 +57,7 @@
},
{
"BriefDescription": "L2 cache lines filling L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.ALL",
"PublicDescription": "This event counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
@@ -59,6 +66,7 @@
},
{
"BriefDescription": "L2 cache lines in E state filling L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.E",
"PublicDescription": "This event counts the number of L2 cache lines in the Exclusive state filling the L2. Counting does not cover rejects.",
@@ -67,6 +75,7 @@
},
{
"BriefDescription": "L2 cache lines in I state filling L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.I",
"PublicDescription": "This event counts the number of L2 cache lines in the Invalidate state filling the L2. Counting does not cover rejects.",
@@ -75,6 +84,7 @@
},
{
"BriefDescription": "L2 cache lines in S state filling L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.S",
"PublicDescription": "This event counts the number of L2 cache lines in the Shared state filling the L2. Counting does not cover rejects.",
@@ -83,6 +93,7 @@
},
{
"BriefDescription": "Clean L2 cache lines evicted by demand.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.DEMAND_CLEAN",
"SampleAfterValue": "100003",
@@ -90,6 +101,7 @@
},
{
"BriefDescription": "L2 code requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_CODE_RD",
"PublicDescription": "This event counts the total number of L2 code requests.",
@@ -98,6 +110,7 @@
},
{
"BriefDescription": "Demand Data Read requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
"PublicDescription": "This event counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
@@ -106,6 +119,7 @@
},
{
"BriefDescription": "Demand requests that miss L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_MISS",
"SampleAfterValue": "200003",
@@ -113,6 +127,7 @@
},
{
"BriefDescription": "Demand requests to L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
"SampleAfterValue": "200003",
@@ -120,6 +135,7 @@
},
{
"BriefDescription": "Requests from L2 hardware prefetchers",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_PF",
"PublicDescription": "This event counts the total number of requests from the L2 hardware prefetchers.",
@@ -128,6 +144,7 @@
},
{
"BriefDescription": "RFO requests to L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_RFO",
"PublicDescription": "This event counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
@@ -136,6 +153,7 @@
},
{
"BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_HIT",
"SampleAfterValue": "200003",
@@ -143,6 +161,7 @@
},
{
"BriefDescription": "L2 cache misses when fetching instructions.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_MISS",
"SampleAfterValue": "200003",
@@ -150,6 +169,7 @@
},
{
"BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
"PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache.",
@@ -158,6 +178,7 @@
},
{
"BriefDescription": "Demand Data Read miss L2, no rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
"PublicDescription": "This event counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
@@ -166,6 +187,7 @@
},
{
"BriefDescription": "L2 prefetch requests that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.L2_PF_HIT",
"PublicDescription": "This event counts the number of requests from the L2 hardware prefetchers that hit L2 cache. L3 prefetch new types.",
@@ -174,6 +196,7 @@
},
{
"BriefDescription": "L2 prefetch requests that miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.L2_PF_MISS",
"PublicDescription": "This event counts the number of requests from the L2 hardware prefetchers that miss L2 cache.",
@@ -182,6 +205,7 @@
},
{
"BriefDescription": "All requests that miss L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.MISS",
"SampleAfterValue": "200003",
@@ -189,6 +213,7 @@
},
{
"BriefDescription": "All L2 requests.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.REFERENCES",
"SampleAfterValue": "200003",
@@ -196,6 +221,7 @@
},
{
"BriefDescription": "RFO requests that hit L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_HIT",
"SampleAfterValue": "200003",
@@ -203,6 +229,7 @@
},
{
"BriefDescription": "RFO requests that miss L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_MISS",
"SampleAfterValue": "200003",
@@ -210,6 +237,7 @@
},
{
"BriefDescription": "L2 or L3 HW prefetches that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.ALL_PF",
"PublicDescription": "This event counts L2 or L3 HW prefetches that access L2 cache including rejects.",
@@ -218,6 +246,7 @@
},
{
"BriefDescription": "Transactions accessing L2 pipe",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.ALL_REQUESTS",
"PublicDescription": "This event counts transactions that access the L2 pipe including snoops, pagewalks, and so on.",
@@ -226,6 +255,7 @@
},
{
"BriefDescription": "L2 cache accesses when fetching instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.CODE_RD",
"PublicDescription": "This event counts the number of L2 cache accesses when fetching instructions.",
@@ -234,6 +264,7 @@
},
{
"BriefDescription": "Demand Data Read requests that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.DEMAND_DATA_RD",
"PublicDescription": "This event counts Demand Data Read requests that access L2 cache, including rejects.",
@@ -242,6 +273,7 @@
},
{
"BriefDescription": "L1D writebacks that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.L1D_WB",
"PublicDescription": "This event counts L1D writebacks that access L2 cache.",
@@ -250,6 +282,7 @@
},
{
"BriefDescription": "L2 fill requests that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.L2_FILL",
"PublicDescription": "This event counts L2 fill requests that access L2 cache.",
@@ -258,6 +291,7 @@
},
{
"BriefDescription": "L2 writebacks that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.L2_WB",
"PublicDescription": "This event counts L2 writebacks that access L2 cache.",
@@ -266,6 +300,7 @@
},
{
"BriefDescription": "RFO requests that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.RFO",
"PublicDescription": "This event counts Read for Ownership (RFO) requests that access L2 cache.",
@@ -274,6 +309,7 @@
},
{
"BriefDescription": "Cycles when L1D is locked",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
"PublicDescription": "This event counts the number of cycles when the L1D is locked. It is a superset of the 0x1 mask (BUS_LOCK_CLOCKS.BUS_LOCK_DURATION).",
@@ -282,6 +318,7 @@
},
{
"BriefDescription": "Core-originated cacheable demand requests missed L3",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.MISS",
"PublicDescription": "This event counts core-originated cacheable demand requests that miss the last level cache (LLC). Demand requests include loads, RFOs, and hardware prefetches from L1D, and instruction fetches from IFU.",
@@ -290,6 +327,7 @@
},
{
"BriefDescription": "Core-originated cacheable demand requests that refer to L3",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
"PublicDescription": "This event counts core-originated cacheable demand requests that refer to the last level cache (LLC). Demand requests include loads, RFOs, and hardware prefetches from L1D, and instruction fetches from IFU.",
@@ -298,6 +336,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "BDM100",
"EventCode": "0xD2",
@@ -309,6 +348,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "BDM100",
"EventCode": "0xD2",
@@ -320,6 +360,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "BDM100",
"EventCode": "0xD2",
@@ -331,6 +372,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "BDM100",
"EventCode": "0xD2",
@@ -342,6 +384,7 @@
},
{
"BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "BDE70, BDM100",
"EventCode": "0xD3",
@@ -353,6 +396,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
@@ -363,6 +407,7 @@
},
{
"BriefDescription": "Retired load uops with L1 cache hits as data sources.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
@@ -373,6 +418,7 @@
},
{
"BriefDescription": "Retired load uops misses in L1 cache as data sources.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
@@ -383,6 +429,7 @@
},
{
"BriefDescription": "Retired load uops with L2 cache hits as data sources.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "BDM35",
"EventCode": "0xD1",
@@ -394,6 +441,7 @@
},
{
"BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
@@ -404,6 +452,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were data hits in L3 without snoops required.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "BDM100",
"EventCode": "0xD1",
@@ -415,6 +464,7 @@
},
{
"BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "BDM100, BDE70",
"EventCode": "0xD1",
@@ -425,6 +475,7 @@
},
{
"BriefDescription": "Retired load uops.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
@@ -435,6 +486,7 @@
},
{
"BriefDescription": "Retired store uops.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
@@ -445,6 +497,7 @@
},
{
"BriefDescription": "Retired load uops with locked access.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "BDM35",
"EventCode": "0xD0",
@@ -456,6 +509,7 @@
},
{
"BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
@@ -466,6 +520,7 @@
},
{
"BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
@@ -476,6 +531,7 @@
},
{
"BriefDescription": "Retired load uops that miss the STLB.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
@@ -486,6 +542,7 @@
},
{
"BriefDescription": "Retired store uops that miss the STLB.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
@@ -496,6 +553,7 @@
},
{
"BriefDescription": "Demand and prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
"PublicDescription": "This event counts the demand and prefetch data reads. All Core Data Reads include cacheable Demands and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
@@ -504,6 +562,7 @@
},
{
"BriefDescription": "Any memory transaction that reached the SQ.",
+ "Counter": "0,1,2,3",
"EventCode": "0xb0",
"EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
"PublicDescription": "This event counts memory transactions reached the super queue including requests initiated by the core, all L3 prefetches, page walks, and so on.",
@@ -512,6 +571,7 @@
},
{
"BriefDescription": "Cacheable and non-cacheable code read requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
"PublicDescription": "This event counts both cacheable and non-cacheable code read requests.",
@@ -520,6 +580,7 @@
},
{
"BriefDescription": "Demand Data Read requests sent to uncore",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
"PublicDescription": "This event counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
@@ -528,6 +589,7 @@
},
{
"BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
"PublicDescription": "This event counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
@@ -536,6 +598,7 @@
},
{
"BriefDescription": "Offcore requests buffer cannot take more entries for this thread core.",
+ "Counter": "0,1,2,3",
"EventCode": "0xb2",
"EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
"PublicDescription": "This event counts the number of cases when the offcore requests buffer cannot take more entries for the core. This can happen when the superqueue does not contain eligible entries, or when L1D writeback pending FIFO requests is full.\nNote: Writeback pending FIFO has six entries.",
@@ -544,6 +607,7 @@
},
{
"BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
"Errata": "BDM76",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
@@ -553,6 +617,7 @@
},
{
"BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"Errata": "BDM76",
"EventCode": "0x60",
@@ -563,6 +628,7 @@
},
{
"BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"Errata": "BDM76",
"EventCode": "0x60",
@@ -573,6 +639,7 @@
},
{
"BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"Errata": "BDM76",
"EventCode": "0x60",
@@ -583,6 +650,7 @@
},
{
"BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "Counter": "0,1,2,3",
"Errata": "BDM76",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
@@ -592,6 +660,7 @@
},
{
"BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
"Errata": "BDM76",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
@@ -601,6 +670,7 @@
},
{
"BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"Errata": "BDM76",
"EventCode": "0x60",
@@ -610,6 +680,7 @@
},
{
"BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
"Errata": "BDM76",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
@@ -619,6 +690,7 @@
},
{
"BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE",
"SampleAfterValue": "100003",
@@ -626,6 +698,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -635,6 +708,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -644,6 +718,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -653,6 +728,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -662,6 +738,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -671,6 +748,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -680,6 +758,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -689,6 +768,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -698,6 +778,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -707,6 +788,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -716,6 +798,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -725,6 +808,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -734,6 +818,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -743,6 +828,7 @@
},
{
"BriefDescription": "Counts all prefetch code reads have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -752,6 +838,7 @@
},
{
"BriefDescription": "Counts all prefetch code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -761,6 +848,7 @@
},
{
"BriefDescription": "Counts all prefetch code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -770,6 +858,7 @@
},
{
"BriefDescription": "Counts all prefetch code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -779,6 +868,7 @@
},
{
"BriefDescription": "Counts all prefetch code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -788,6 +878,7 @@
},
{
"BriefDescription": "Counts all prefetch code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -797,6 +888,7 @@
},
{
"BriefDescription": "Counts all prefetch code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -806,6 +898,7 @@
},
{
"BriefDescription": "Counts all prefetch code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -815,6 +908,7 @@
},
{
"BriefDescription": "Counts all prefetch code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -824,6 +918,7 @@
},
{
"BriefDescription": "Counts all prefetch code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -833,6 +928,7 @@
},
{
"BriefDescription": "Counts all prefetch code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -842,6 +938,7 @@
},
{
"BriefDescription": "Counts all prefetch code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -851,6 +948,7 @@
},
{
"BriefDescription": "Counts all prefetch code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -860,6 +958,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -869,6 +968,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -878,6 +978,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -887,6 +988,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -896,6 +998,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -905,6 +1008,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -914,6 +1018,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -923,6 +1028,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -932,6 +1038,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -941,6 +1048,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -950,6 +1058,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -959,6 +1068,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -968,6 +1078,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -977,6 +1088,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -986,6 +1098,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -995,6 +1108,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1004,6 +1118,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1013,6 +1128,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1022,6 +1138,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1031,6 +1148,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1040,6 +1158,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1049,6 +1168,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1058,6 +1178,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1067,6 +1188,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1076,6 +1198,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1085,6 +1208,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1094,6 +1218,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1103,6 +1228,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1112,6 +1238,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1121,6 +1248,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1130,6 +1258,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1139,6 +1268,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1148,6 +1278,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1157,6 +1288,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1166,6 +1298,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1175,6 +1308,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1184,6 +1318,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1193,6 +1328,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1202,6 +1338,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1211,6 +1348,7 @@
},
{
"BriefDescription": "Counts writebacks (modified to exclusive) have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1220,6 +1358,7 @@
},
{
"BriefDescription": "Counts writebacks (modified to exclusive)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1229,6 +1368,7 @@
},
{
"BriefDescription": "Counts writebacks (modified to exclusive)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1238,6 +1378,7 @@
},
{
"BriefDescription": "Counts writebacks (modified to exclusive)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1247,6 +1388,7 @@
},
{
"BriefDescription": "Counts writebacks (modified to exclusive)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1256,6 +1398,7 @@
},
{
"BriefDescription": "Counts writebacks (modified to exclusive)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1265,6 +1408,7 @@
},
{
"BriefDescription": "Counts writebacks (modified to exclusive)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1274,6 +1418,7 @@
},
{
"BriefDescription": "Counts writebacks (modified to exclusive)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1283,6 +1428,7 @@
},
{
"BriefDescription": "Counts writebacks (modified to exclusive)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1292,6 +1438,7 @@
},
{
"BriefDescription": "Counts writebacks (modified to exclusive)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1301,6 +1448,7 @@
},
{
"BriefDescription": "Counts writebacks (modified to exclusive)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1310,6 +1458,7 @@
},
{
"BriefDescription": "Counts writebacks (modified to exclusive)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1319,6 +1468,7 @@
},
{
"BriefDescription": "Counts writebacks (modified to exclusive)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1328,6 +1478,7 @@
},
{
"BriefDescription": "Counts all demand code reads have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1337,6 +1488,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1346,6 +1498,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1355,6 +1508,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1364,6 +1518,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1373,6 +1528,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1382,6 +1538,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1391,6 +1548,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1400,6 +1558,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1409,6 +1568,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1418,6 +1578,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1427,6 +1588,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1436,6 +1598,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1445,6 +1608,7 @@
},
{
"BriefDescription": "Counts demand data reads have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1454,6 +1618,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1463,6 +1628,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1472,6 +1638,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1481,6 +1648,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1490,6 +1658,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1499,6 +1668,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1508,6 +1678,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1517,6 +1688,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1526,6 +1698,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1535,6 +1708,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1544,6 +1718,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1553,6 +1728,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1562,6 +1738,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1571,6 +1748,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1580,6 +1758,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1589,6 +1768,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1598,6 +1778,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1607,6 +1788,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1616,6 +1798,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1625,6 +1808,7 @@
},
{
"BriefDescription": "Counts any other requests have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1634,6 +1818,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1643,6 +1828,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1652,6 +1838,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1661,6 +1848,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1670,6 +1858,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1679,6 +1868,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1688,6 +1878,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1697,6 +1888,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1706,6 +1898,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1715,6 +1908,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1724,6 +1918,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1733,6 +1928,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1742,6 +1938,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1751,6 +1948,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1760,6 +1958,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1769,6 +1968,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1778,6 +1978,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1787,6 +1988,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1796,6 +1998,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1805,6 +2008,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1814,6 +2018,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1823,6 +2028,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1832,6 +2038,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1841,6 +2048,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1850,6 +2058,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1859,6 +2068,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1868,6 +2078,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1877,6 +2088,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1886,6 +2098,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1895,6 +2108,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1904,6 +2118,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1913,6 +2128,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1922,6 +2138,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1931,6 +2148,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1940,6 +2158,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1949,6 +2168,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1958,6 +2178,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1967,6 +2188,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1976,6 +2198,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1985,6 +2208,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1994,6 +2218,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2003,6 +2228,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2012,6 +2238,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -2021,6 +2248,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2030,6 +2258,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -2039,6 +2268,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -2048,6 +2278,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2057,6 +2288,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2066,6 +2298,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -2075,6 +2308,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2084,6 +2318,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -2093,6 +2328,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) code reads have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2102,6 +2338,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -2111,6 +2348,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2120,6 +2358,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2129,6 +2368,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -2138,6 +2378,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2147,6 +2388,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -2156,6 +2398,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -2165,6 +2408,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2174,6 +2418,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2183,6 +2428,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -2192,6 +2438,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2201,6 +2448,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -2210,6 +2458,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2219,6 +2468,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -2228,6 +2478,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2237,6 +2488,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2246,6 +2498,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -2255,6 +2508,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2264,6 +2518,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -2273,6 +2528,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -2282,6 +2538,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2291,6 +2548,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2300,6 +2558,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -2309,6 +2568,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2318,6 +2578,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -2327,6 +2588,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2336,6 +2598,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -2345,6 +2608,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2354,6 +2618,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2363,6 +2628,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -2372,6 +2638,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2381,6 +2648,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -2390,6 +2658,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -2399,6 +2668,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2408,6 +2678,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2417,6 +2688,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -2426,6 +2698,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2435,6 +2708,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -2444,6 +2718,7 @@
},
{
"BriefDescription": "Split locks in SQ",
+ "Counter": "0,1,2,3",
"EventCode": "0xf4",
"EventName": "SQ_MISC.SPLIT_LOCK",
"PublicDescription": "This event counts the number of split locks in the super queue.",
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/counter.json b/tools/perf/pmu-events/arch/x86/broadwell/counter.json
new file mode 100644
index 000000000000..1be6522e2bbc
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/broadwell/counter.json
@@ -0,0 +1,22 @@
+[
+ {
+ "Unit": "core",
+ "CountersNumFixed": "3",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "CBOX",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "2"
+ },
+ {
+ "Unit": "ARB",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "2"
+ },
+ {
+ "Unit": "cbox_0",
+ "CountersNumFixed": 1,
+ "CountersNumGeneric": "0"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/floating-point.json b/tools/perf/pmu-events/arch/x86/broadwell/floating-point.json
index 986869252e71..9bf595af3f42 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/floating-point.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 4 calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
"PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 4 calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 8 calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
"PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 128-bit packed single and 256-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and packed double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.4_FLOPS",
"PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision and 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point and packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired; some instructions will count twice as noted below. Applies to SSE* and AVX* scalar and packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.DOUBLE",
"SampleAfterValue": "2000006",
@@ -48,6 +54,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired; some instructions will count twice as noted below. Applies to SSE* and AVX* packed double and single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.PACKED",
"SampleAfterValue": "2000004",
@@ -55,6 +62,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computation operation. Applies to SSE* and AVX* scalar double and single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR",
"PublicDescription": "Number of SSE/AVX computational scalar single precision and double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -63,6 +71,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -71,6 +80,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
"PublicDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -79,6 +89,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired; some instructions will count twice as noted below. Applies to SSE* and AVX* scalar and packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SINGLE",
"SampleAfterValue": "2000005",
@@ -86,6 +97,7 @@
},
{
"BriefDescription": "Number of any Vector retired FP arithmetic instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.VECTOR",
"SampleAfterValue": "2000003",
@@ -93,6 +105,7 @@
},
{
"BriefDescription": "Cycles with any input/output SSE or FP assist",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.ANY",
@@ -102,6 +115,7 @@
},
{
"BriefDescription": "Number of SIMD FP assists due to input values",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.SIMD_INPUT",
"PublicDescription": "This event counts any input SSE* FP assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.",
@@ -110,6 +124,7 @@
},
{
"BriefDescription": "Number of SIMD FP assists due to Output values",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.SIMD_OUTPUT",
"PublicDescription": "This event counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.",
@@ -118,6 +133,7 @@
},
{
"BriefDescription": "Number of X87 assists due to input value.",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.X87_INPUT",
"PublicDescription": "This event counts x87 floating point (FP) micro-code assist (invalid operation, denormal operand, SNaN operand) when the input value (one of the source operands to an FP instruction) is invalid.",
@@ -126,6 +142,7 @@
},
{
"BriefDescription": "Number of X87 assists due to output value.",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.X87_OUTPUT",
"PublicDescription": "This event counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid.",
@@ -134,6 +151,7 @@
},
{
"BriefDescription": "Number of SIMD Move Elimination candidate uops that were eliminated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "MOVE_ELIMINATION.SIMD_ELIMINATED",
"SampleAfterValue": "1000003",
@@ -141,6 +159,7 @@
},
{
"BriefDescription": "Number of SIMD Move Elimination candidate uops that were not eliminated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "MOVE_ELIMINATION.SIMD_NOT_ELIMINATED",
"SampleAfterValue": "1000003",
@@ -148,6 +167,7 @@
},
{
"BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
+ "Counter": "0,1,2,3",
"Errata": "BDM30",
"EventCode": "0xC1",
"EventName": "OTHER_ASSISTS.AVX_TO_SSE",
@@ -157,6 +177,7 @@
},
{
"BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
+ "Counter": "0,1,2,3",
"Errata": "BDM30",
"EventCode": "0xC1",
"EventName": "OTHER_ASSISTS.SSE_TO_AVX",
@@ -166,6 +187,7 @@
},
{
"BriefDescription": "Micro-op dispatches cancelled due to insufficient SIMD physical register file read ports",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UOP_DISPATCHES_CANCELLED.SIMD_PRF",
"PublicDescription": "This event counts the number of micro-operations cancelled after they were dispatched from the scheduler to the execution units when the total number of physical register read ports across all dispatch ports exceeds the read bandwidth of the physical register file. The SIMD_PRF subevent applies to the following instructions: VDPPS, DPPS, VPCMPESTRI, PCMPESTRI, VPCMPESTRM, PCMPESTRM, VFMADD*, VFMADDSUB*, VFMSUB*, VMSUBADD*, VFNMADD*, VFNMSUB*. See the Broadwell Optimization Guide for more information.",
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/frontend.json b/tools/perf/pmu-events/arch/x86/broadwell/frontend.json
index bd5da39564e1..db3488abf9fc 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/frontend.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "Counter": "0,1,2,3",
"EventCode": "0xe6",
"EventName": "BACLEARS.ANY",
"SampleAfterValue": "100003",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
"PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
@@ -16,6 +18,7 @@
},
{
"BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.HIT",
"PublicDescription": "This event counts the number of both cacheable and noncacheable Instruction Cache, Streaming Buffer and Victim Cache Reads including UC fetches.",
@@ -24,6 +27,7 @@
},
{
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction-cache miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.IFDATA_STALL",
"PublicDescription": "This event counts cycles during which the demand fetch waits for data (wfdM104H) from L2 or iSB (opportunistic hit).",
@@ -32,6 +36,7 @@
},
{
"BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Misses. Includes Uncacheable accesses.",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.MISSES",
"PublicDescription": "This event counts the number of instruction cache, streaming buffer and victim cache misses. Counting includes UC accesses.",
@@ -40,6 +45,7 @@
},
{
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0x79",
"EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
@@ -49,6 +55,7 @@
},
{
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
@@ -58,6 +65,7 @@
},
{
"BriefDescription": "Cycles MITE is delivering 4 Uops",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0x79",
"EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
@@ -67,6 +75,7 @@
},
{
"BriefDescription": "Cycles MITE is delivering any Uop",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
@@ -76,6 +85,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.DSB_CYCLES",
@@ -85,6 +95,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.DSB_UOPS",
"PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
@@ -93,6 +104,7 @@
},
{
"BriefDescription": "Instruction Decode Queue (IDQ) empty cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.EMPTY",
"PublicDescription": "This counts the number of cycles that the instruction decoder queue is empty and can indicate that the application may be bound in the front end. It does not determine whether there are uops being delivered to the Alloc stage since uops can be delivered by bypass skipping the Instruction Decode Queue (IDQ) when it is empty.",
@@ -101,6 +113,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MITE_ALL_UOPS",
"PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
@@ -109,6 +122,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MITE_CYCLES",
@@ -118,6 +132,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MITE_UOPS",
"PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
@@ -126,6 +141,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MS_CYCLES",
@@ -135,6 +151,7 @@
},
{
"BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MS_DSB_CYCLES",
@@ -144,6 +161,7 @@
},
{
"BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x79",
@@ -154,6 +172,7 @@
},
{
"BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_DSB_UOPS",
"PublicDescription": "This event counts the number of uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
@@ -162,6 +181,7 @@
},
{
"BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_MITE_UOPS",
"PublicDescription": "This event counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
@@ -170,6 +190,7 @@
},
{
"BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x79",
@@ -179,6 +200,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_UOPS",
"PublicDescription": "This event counts the total number of uops delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
@@ -187,6 +209,7 @@
},
{
"BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
"PublicDescription": "This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4 x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when:\n a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread;\n b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions); \n c. Instruction Decode Queue (IDQ) delivers four uops.",
@@ -195,6 +218,7 @@
},
{
"BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
@@ -204,6 +228,7 @@
},
{
"BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
@@ -213,6 +238,7 @@
},
{
"BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+ "Counter": "0,1,2,3",
"CounterMask": "3",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
@@ -222,6 +248,7 @@
},
{
"BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
@@ -230,6 +257,7 @@
},
{
"BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/memory.json b/tools/perf/pmu-events/arch/x86/broadwell/memory.json
index b01ed47072bc..77fbfe99a522 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Number of times HLE abort was triggered",
+ "Counter": "0,1,2,3",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED",
"PEBS": "1",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "Counter": "0,1,2,3",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_MISC1",
"PublicDescription": "Number of times an HLE abort was attributed to a Memory condition (See TSX_Memory event for additional details).",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to uncommon conditions",
+ "Counter": "0,1,2,3",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_MISC2",
"PublicDescription": "Number of times the TSX watchdog signaled an HLE abort.",
@@ -26,6 +29,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_MISC3",
"PublicDescription": "Number of times a disallowed operation caused an HLE abort.",
@@ -34,6 +38,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to incompatible memory type",
+ "Counter": "0,1,2,3",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_MISC4",
"PublicDescription": "Number of times HLE caused a fault.",
@@ -42,6 +47,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to none of the previous 4 categories (e.g. interrupts)",
+ "Counter": "0,1,2,3",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_MISC5",
"PublicDescription": "Number of times HLE aborted and was not due to the abort conditions in subevents 3-6.",
@@ -50,6 +56,7 @@
},
{
"BriefDescription": "Number of times HLE commit succeeded",
+ "Counter": "0,1,2,3",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.COMMIT",
"PublicDescription": "Number of times HLE commit succeeded.",
@@ -58,6 +65,7 @@
},
{
"BriefDescription": "Number of times we entered an HLE region; does not count nested transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.START",
"PublicDescription": "Number of times we entered an HLE region\n does not count nested transactions.",
@@ -66,6 +74,7 @@
},
{
"BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
"PublicDescription": "This event counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from one of the following:\n1. memory disambiguation,\n2. external snoop, or\n3. cross SMT-HW-thread snoop (stores) hitting load buffer.",
@@ -74,6 +83,7 @@
},
{
"BriefDescription": "Randomly selected loads with latency value being above 128",
+ "Counter": "3",
"Data_LA": "1",
"Errata": "BDM100, BDM35",
"EventCode": "0xcd",
@@ -87,6 +97,7 @@
},
{
"BriefDescription": "Randomly selected loads with latency value being above 16",
+ "Counter": "3",
"Data_LA": "1",
"Errata": "BDM100, BDM35",
"EventCode": "0xcd",
@@ -100,6 +111,7 @@
},
{
"BriefDescription": "Randomly selected loads with latency value being above 256",
+ "Counter": "3",
"Data_LA": "1",
"Errata": "BDM100, BDM35",
"EventCode": "0xcd",
@@ -113,6 +125,7 @@
},
{
"BriefDescription": "Randomly selected loads with latency value being above 32",
+ "Counter": "3",
"Data_LA": "1",
"Errata": "BDM100, BDM35",
"EventCode": "0xcd",
@@ -126,6 +139,7 @@
},
{
"BriefDescription": "Randomly selected loads with latency value being above 4",
+ "Counter": "3",
"Data_LA": "1",
"Errata": "BDM100, BDM35",
"EventCode": "0xcd",
@@ -139,6 +153,7 @@
},
{
"BriefDescription": "Randomly selected loads with latency value being above 512",
+ "Counter": "3",
"Data_LA": "1",
"Errata": "BDM100, BDM35",
"EventCode": "0xcd",
@@ -152,6 +167,7 @@
},
{
"BriefDescription": "Randomly selected loads with latency value being above 64",
+ "Counter": "3",
"Data_LA": "1",
"Errata": "BDM100, BDM35",
"EventCode": "0xcd",
@@ -165,6 +181,7 @@
},
{
"BriefDescription": "Randomly selected loads with latency value being above 8",
+ "Counter": "3",
"Data_LA": "1",
"Errata": "BDM100, BDM35",
"EventCode": "0xcd",
@@ -178,6 +195,7 @@
},
{
"BriefDescription": "Speculative cache line split load uops dispatched to L1 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "MISALIGN_MEM_REF.LOADS",
"PublicDescription": "This event counts speculative cache-line split load uops dispatched to the L1 cache.",
@@ -186,6 +204,7 @@
},
{
"BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "MISALIGN_MEM_REF.STORES",
"PublicDescription": "This event counts speculative cache line split store-address (STA) uops dispatched to the L1 cache.",
@@ -194,6 +213,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -203,6 +223,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -212,6 +233,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -221,6 +243,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -230,6 +253,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -239,6 +263,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -248,6 +273,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -257,6 +283,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -266,6 +293,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -275,6 +303,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -284,6 +313,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -293,6 +323,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -302,6 +333,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -311,6 +343,7 @@
},
{
"BriefDescription": "Counts all prefetch code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -320,6 +353,7 @@
},
{
"BriefDescription": "Counts all prefetch code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -329,6 +363,7 @@
},
{
"BriefDescription": "Counts all prefetch code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -338,6 +373,7 @@
},
{
"BriefDescription": "Counts all prefetch code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -347,6 +383,7 @@
},
{
"BriefDescription": "Counts all prefetch code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -356,6 +393,7 @@
},
{
"BriefDescription": "Counts all prefetch code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -365,6 +403,7 @@
},
{
"BriefDescription": "Counts all prefetch code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -374,6 +413,7 @@
},
{
"BriefDescription": "Counts all prefetch code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -383,6 +423,7 @@
},
{
"BriefDescription": "Counts all prefetch code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -392,6 +433,7 @@
},
{
"BriefDescription": "Counts all prefetch code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -401,6 +443,7 @@
},
{
"BriefDescription": "Counts all prefetch code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -410,6 +453,7 @@
},
{
"BriefDescription": "Counts all prefetch code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -419,6 +463,7 @@
},
{
"BriefDescription": "Counts all prefetch code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -428,6 +473,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -437,6 +483,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -446,6 +493,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -455,6 +503,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -464,6 +513,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -473,6 +523,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -482,6 +533,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -491,6 +543,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -500,6 +553,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -509,6 +563,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -518,6 +573,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -527,6 +583,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -536,6 +593,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -545,6 +603,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -554,6 +613,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -563,6 +623,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -572,6 +633,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -581,6 +643,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -590,6 +653,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -599,6 +663,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -608,6 +673,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -617,6 +683,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -626,6 +693,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -635,6 +703,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -644,6 +713,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -653,6 +723,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -662,6 +733,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -671,6 +743,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -680,6 +753,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -689,6 +763,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -698,6 +773,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -707,6 +783,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -716,6 +793,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -725,6 +803,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -734,6 +813,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -743,6 +823,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -752,6 +833,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -761,6 +843,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -770,6 +853,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -779,6 +863,7 @@
},
{
"BriefDescription": "Counts writebacks (modified to exclusive)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -788,6 +873,7 @@
},
{
"BriefDescription": "Counts writebacks (modified to exclusive)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -797,6 +883,7 @@
},
{
"BriefDescription": "Counts writebacks (modified to exclusive)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -806,6 +893,7 @@
},
{
"BriefDescription": "Counts writebacks (modified to exclusive)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -815,6 +903,7 @@
},
{
"BriefDescription": "Counts writebacks (modified to exclusive)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -824,6 +913,7 @@
},
{
"BriefDescription": "Counts writebacks (modified to exclusive)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -833,6 +923,7 @@
},
{
"BriefDescription": "Counts writebacks (modified to exclusive)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -842,6 +933,7 @@
},
{
"BriefDescription": "Counts writebacks (modified to exclusive)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -851,6 +943,7 @@
},
{
"BriefDescription": "Counts writebacks (modified to exclusive)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -860,6 +953,7 @@
},
{
"BriefDescription": "Counts writebacks (modified to exclusive)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -869,6 +963,7 @@
},
{
"BriefDescription": "Counts writebacks (modified to exclusive)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -878,6 +973,7 @@
},
{
"BriefDescription": "Counts writebacks (modified to exclusive)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -887,6 +983,7 @@
},
{
"BriefDescription": "Counts writebacks (modified to exclusive)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -896,6 +993,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -905,6 +1003,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -914,6 +1013,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -923,6 +1023,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -932,6 +1033,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -941,6 +1043,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -950,6 +1053,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -959,6 +1063,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -968,6 +1073,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -977,6 +1083,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -986,6 +1093,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -995,6 +1103,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1004,6 +1113,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1013,6 +1123,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1022,6 +1133,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1031,6 +1143,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1040,6 +1153,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1049,6 +1163,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1058,6 +1173,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1067,6 +1183,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1076,6 +1193,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1085,6 +1203,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1094,6 +1213,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1103,6 +1223,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1112,6 +1233,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1121,6 +1243,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1130,6 +1253,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1139,6 +1263,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1148,6 +1273,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1157,6 +1283,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1166,6 +1293,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1175,6 +1303,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1184,6 +1313,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1193,6 +1323,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1202,6 +1333,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1211,6 +1343,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1220,6 +1353,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1229,6 +1363,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1238,6 +1373,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1247,6 +1383,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1256,6 +1393,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1265,6 +1403,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1274,6 +1413,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1283,6 +1423,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1292,6 +1433,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1301,6 +1443,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1310,6 +1453,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1319,6 +1463,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1328,6 +1473,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1337,6 +1483,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1346,6 +1493,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1355,6 +1503,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1364,6 +1513,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1373,6 +1523,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1382,6 +1533,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1391,6 +1543,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1400,6 +1553,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1409,6 +1563,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1418,6 +1573,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1427,6 +1583,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1436,6 +1593,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1445,6 +1603,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1454,6 +1613,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1463,6 +1623,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1472,6 +1633,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1481,6 +1643,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1490,6 +1653,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1499,6 +1663,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1508,6 +1673,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1517,6 +1683,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1526,6 +1693,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1535,6 +1703,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1544,6 +1713,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1553,6 +1723,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1562,6 +1733,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1571,6 +1743,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1580,6 +1753,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1589,6 +1763,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1598,6 +1773,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1607,6 +1783,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1616,6 +1793,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1625,6 +1803,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1634,6 +1813,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1643,6 +1823,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1652,6 +1833,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1661,6 +1843,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1670,6 +1853,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1679,6 +1863,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1688,6 +1873,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1697,6 +1883,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1706,6 +1893,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1715,6 +1903,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1724,6 +1913,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1733,6 +1923,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1742,6 +1933,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1751,6 +1943,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1760,6 +1953,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1769,6 +1963,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1778,6 +1973,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1787,6 +1983,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1796,6 +1993,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1805,6 +2003,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1814,6 +2013,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1823,6 +2023,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1832,6 +2033,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1841,6 +2043,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1850,6 +2053,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1859,6 +2063,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1868,6 +2073,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1877,6 +2083,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1886,6 +2093,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1895,6 +2103,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1904,6 +2113,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1913,6 +2123,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1922,6 +2133,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1931,6 +2143,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1940,6 +2153,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1949,6 +2163,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1958,6 +2173,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1967,6 +2183,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1976,6 +2193,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1985,6 +2203,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1994,6 +2213,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2003,6 +2223,7 @@
},
{
"BriefDescription": "Number of times RTM abort was triggered",
+ "Counter": "0,1,2,3",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED",
"PEBS": "2",
@@ -2012,6 +2233,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
+ "Counter": "0,1,2,3",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_MISC1",
"PublicDescription": "Number of times an RTM abort was attributed to a Memory condition (See TSX_Memory event for additional details).",
@@ -2020,6 +2242,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "Counter": "0,1,2,3",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_MISC2",
"PublicDescription": "Number of times the TSX watchdog signaled an RTM abort.",
@@ -2028,6 +2251,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_MISC3",
"PublicDescription": "Number of times a disallowed operation caused an RTM abort.",
@@ -2036,6 +2260,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
+ "Counter": "0,1,2,3",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_MISC4",
"PublicDescription": "Number of times a RTM caused a fault.",
@@ -2044,6 +2269,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
+ "Counter": "0,1,2,3",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_MISC5",
"PublicDescription": "Number of times RTM aborted and was not due to the abort conditions in subevents 3-6.",
@@ -2052,6 +2278,7 @@
},
{
"BriefDescription": "Number of times RTM commit succeeded",
+ "Counter": "0,1,2,3",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.COMMIT",
"PublicDescription": "Number of times RTM commit succeeded.",
@@ -2060,6 +2287,7 @@
},
{
"BriefDescription": "Number of times we entered an RTM region; does not count nested transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.START",
"PublicDescription": "Number of times we entered an RTM region\n does not count nested transactions.",
@@ -2068,6 +2296,7 @@
},
{
"BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed. Since this is the count of execution, it may not always cause a transactional abort.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC1",
"SampleAfterValue": "2000003",
@@ -2075,6 +2304,7 @@
},
{
"BriefDescription": "Counts the number of times a class of instructions (e.g., vzeroupper) that may cause a transactional abort was executed inside a transactional region",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC2",
"PublicDescription": "Unfriendly TSX abort triggered by a vzeroupper instruction.",
@@ -2083,6 +2313,7 @@
},
{
"BriefDescription": "Counts the number of times an instruction execution caused the transactional nest count supported to be exceeded",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC3",
"PublicDescription": "Unfriendly TSX abort triggered by a nest count that is too deep.",
@@ -2091,6 +2322,7 @@
},
{
"BriefDescription": "Counts the number of times a XBEGIN instruction was executed inside an HLE transactional region.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC4",
"PublicDescription": "RTM region detected inside HLE.",
@@ -2099,6 +2331,7 @@
},
{
"BriefDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC5",
"SampleAfterValue": "2000003",
@@ -2106,6 +2339,7 @@
},
{
"BriefDescription": "Number of times a TSX Abort was triggered due to an evicted line caused by a transaction overflow",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
"PublicDescription": "Number of times a TSX Abort was triggered due to an evicted line caused by a transaction overflow.",
@@ -2114,6 +2348,7 @@
},
{
"BriefDescription": "Number of times a TSX line had a cache conflict",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_CONFLICT",
"PublicDescription": "Number of times a TSX line had a cache conflict.",
@@ -2122,6 +2357,7 @@
},
{
"BriefDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
"PublicDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch.",
@@ -2130,6 +2366,7 @@
},
{
"BriefDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
"PublicDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty.",
@@ -2138,6 +2375,7 @@
},
{
"BriefDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
"PublicDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer.",
@@ -2146,6 +2384,7 @@
},
{
"BriefDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
"PublicDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock.",
@@ -2154,6 +2393,7 @@
},
{
"BriefDescription": "Number of times we could not allocate Lock Buffer",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
"PublicDescription": "Number of times we could not allocate Lock Buffer.",
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/metricgroups.json b/tools/perf/pmu-events/arch/x86/broadwell/metricgroups.json
index 8c808347f6da..4193c90c3459 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/metricgroups.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/metricgroups.json
@@ -5,7 +5,18 @@
"BigFootprint": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"BrMispredicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Branches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvBC": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvCB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvFB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvIO": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvML": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMP": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMS": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMT": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvOB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvUW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"CacheHits": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "CacheMisses": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Compute": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Cor": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"DSB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/other.json b/tools/perf/pmu-events/arch/x86/broadwell/other.json
index 1c2a5b001949..f0de6a71719b 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/other.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/other.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Unhalted core cycles when the thread is in ring 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "CPL_CYCLES.RING0",
"PublicDescription": "This event counts the unhalted core cycles during which the thread is in the ring 0 privileged mode.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Number of intervals between processor halts while thread is in ring 0",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x5C",
@@ -19,6 +21,7 @@
},
{
"BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "CPL_CYCLES.RING123",
"PublicDescription": "This event counts unhalted core cycles during which the thread is in rings 1, 2, or 3.",
@@ -27,6 +30,7 @@
},
{
"BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "LOCK_CYCLES.SPLIT_LOCK_UC_LOCK_DURATION",
"PublicDescription": "This event counts cycles in which the L1 and L2 are locked due to a UC lock or split lock. A lock is asserted in case of locked memory access, due to noncacheable memory, locked operation that spans two cache lines, or a page walk from the noncacheable page table. L1D and L2 locks have a very high performance penalty and it is highly recommended to avoid such access.",
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json b/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json
index 9a902d2160e6..c03f77539362 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Cycles when divider is busy executing divide operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "ARITH.FPU_DIV_ACTIVE",
"PublicDescription": "This event counts the number of the divide operations executed. Uses edge-detect and a cmask value of 1 on ARITH.FPU_DIV_ACTIVE to get the number of the divide operations executed.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Speculative and retired branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_BRANCHES",
"PublicDescription": "This event counts both taken and not taken speculative and retired branch instructions.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Speculative and retired macro-conditional branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
"PublicDescription": "This event counts both taken and not taken speculative and retired macro-conditional branch instructions.",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
"PublicDescription": "This event counts both taken and not taken speculative and retired macro-unconditional branch instructions, excluding calls and indirects.",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Speculative and retired direct near calls",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
"PublicDescription": "This event counts both taken and not taken speculative and retired direct near calls.",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "Speculative and retired indirect branches excluding calls and returns",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
"PublicDescription": "This event counts both taken and not taken speculative and retired indirect branches excluding calls and return branches.",
@@ -49,6 +55,7 @@
},
{
"BriefDescription": "Speculative and retired indirect return branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
"PublicDescription": "This event counts both taken and not taken speculative and retired indirect branches that have a return mnemonic.",
@@ -57,6 +64,7 @@
},
{
"BriefDescription": "Not taken macro-conditional branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL",
"PublicDescription": "This event counts not taken macro-conditional branch instructions.",
@@ -65,6 +73,7 @@
},
{
"BriefDescription": "Taken speculative and retired macro-conditional branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL",
"PublicDescription": "This event counts taken speculative and retired macro-conditional branch instructions.",
@@ -73,6 +82,7 @@
},
{
"BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP",
"PublicDescription": "This event counts taken speculative and retired macro-conditional branch instructions excluding calls and indirect branches.",
@@ -81,6 +91,7 @@
},
{
"BriefDescription": "Taken speculative and retired direct near calls",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
"PublicDescription": "This event counts taken speculative and retired direct near calls.",
@@ -89,6 +100,7 @@
},
{
"BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
"PublicDescription": "This event counts taken speculative and retired indirect branches excluding calls and return branches.",
@@ -97,6 +109,7 @@
},
{
"BriefDescription": "Taken speculative and retired indirect calls",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
"PublicDescription": "This event counts taken speculative and retired indirect calls including both register and memory indirect.",
@@ -105,6 +118,7 @@
},
{
"BriefDescription": "Taken speculative and retired indirect branches with return mnemonic",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
"PublicDescription": "This event counts taken speculative and retired indirect branches that have a return mnemonic.",
@@ -113,6 +127,7 @@
},
{
"BriefDescription": "All (macro) branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"PublicDescription": "This event counts all (macro) branch instructions retired.",
@@ -120,6 +135,7 @@
},
{
"BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS)",
+ "Counter": "0,1,2,3",
"Errata": "BDW98",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
@@ -130,6 +146,7 @@
},
{
"BriefDescription": "Conditional branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
"PEBS": "1",
@@ -139,6 +156,7 @@
},
{
"BriefDescription": "Far branch instructions retired.",
+ "Counter": "0,1,2,3",
"Errata": "BDW98",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
@@ -148,6 +166,7 @@
},
{
"BriefDescription": "Direct and indirect near call instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
"PEBS": "1",
@@ -157,6 +176,7 @@
},
{
"BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3).",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
"PEBS": "1",
@@ -166,6 +186,7 @@
},
{
"BriefDescription": "Return instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
"PEBS": "1",
@@ -175,6 +196,7 @@
},
{
"BriefDescription": "Taken branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
"PEBS": "1",
@@ -184,6 +206,7 @@
},
{
"BriefDescription": "Not taken branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NOT_TAKEN",
"PublicDescription": "This event counts not taken branch instructions retired.",
@@ -192,6 +215,7 @@
},
{
"BriefDescription": "Speculative and retired mispredicted macro conditional branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.ALL_BRANCHES",
"PublicDescription": "This event counts both taken and not taken speculative and retired mispredicted branch instructions.",
@@ -200,6 +224,7 @@
},
{
"BriefDescription": "Speculative and retired mispredicted macro conditional branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
"PublicDescription": "This event counts both taken and not taken speculative and retired mispredicted macro conditional branch instructions.",
@@ -208,6 +233,7 @@
},
{
"BriefDescription": "Mispredicted indirect branches excluding calls and returns",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
"PublicDescription": "This event counts both taken and not taken mispredicted indirect branches excluding calls and returns.",
@@ -216,6 +242,7 @@
},
{
"BriefDescription": "Speculative mispredicted indirect branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.INDIRECT",
"PublicDescription": "Counts speculatively miss-predicted indirect branches at execution time. Counts for indirect near CALL or JMP instructions (RET excluded).",
@@ -224,6 +251,7 @@
},
{
"BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
"PublicDescription": "This event counts not taken speculative and retired mispredicted macro conditional branch instructions.",
@@ -232,6 +260,7 @@
},
{
"BriefDescription": "Taken speculative and retired mispredicted macro conditional branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
"PublicDescription": "This event counts taken speculative and retired mispredicted macro conditional branch instructions.",
@@ -240,6 +269,7 @@
},
{
"BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
"PublicDescription": "This event counts taken speculative and retired mispredicted indirect branches excluding calls and returns.",
@@ -248,6 +278,7 @@
},
{
"BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
"SampleAfterValue": "200003",
@@ -255,6 +286,7 @@
},
{
"BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
"PublicDescription": "This event counts taken speculative and retired mispredicted indirect branches that have a return mnemonic.",
@@ -263,6 +295,7 @@
},
{
"BriefDescription": "All mispredicted macro branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"PublicDescription": "This event counts all mispredicted macro branch instructions retired.",
@@ -270,6 +303,7 @@
},
{
"BriefDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
"PEBS": "2",
@@ -279,6 +313,7 @@
},
{
"BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.CONDITIONAL",
"PEBS": "1",
@@ -288,6 +323,7 @@
},
{
"BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
"PEBS": "1",
@@ -297,6 +333,7 @@
},
{
"BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.RET",
"PEBS": "1",
@@ -306,6 +343,7 @@
},
{
"BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3c",
"EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "100003",
@@ -313,6 +351,7 @@
},
{
"BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
"PublicDescription": "This is a fixed-frequency event programmed to general counters. It counts when the core is unhalted at 100 Mhz.",
@@ -322,6 +361,7 @@
{
"AnyThread": "1",
"BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
"SampleAfterValue": "100003",
@@ -329,6 +369,7 @@
},
{
"BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "100003",
@@ -336,6 +377,7 @@
},
{
"BriefDescription": "Reference cycles when the core is not in halt state.",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. \nNote: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. This event is clocked by base clock (100 Mhz) on Sandy Bridge. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
"SampleAfterValue": "2000003",
@@ -343,6 +385,7 @@
},
{
"BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.REF_XCLK",
"PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
@@ -352,6 +395,7 @@
{
"AnyThread": "1",
"BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
"SampleAfterValue": "100003",
@@ -359,6 +403,7 @@
},
{
"BriefDescription": "Core cycles when the thread is not in halt state",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"SampleAfterValue": "2000003",
@@ -367,12 +412,14 @@
{
"AnyThread": "1",
"BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
"SampleAfterValue": "2000003",
"UMask": "0x2"
},
{
"BriefDescription": "Thread cycles when thread is not in halt state",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
"PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
@@ -381,12 +428,14 @@
{
"AnyThread": "1",
"BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
"SampleAfterValue": "2000003"
},
{
"BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
"CounterMask": "8",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
@@ -395,6 +444,7 @@
},
{
"BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
"CounterMask": "8",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
@@ -404,6 +454,7 @@
},
{
"BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
@@ -412,6 +463,7 @@
},
{
"BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
@@ -421,6 +473,7 @@
},
{
"BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_LDM_PENDING",
@@ -430,6 +483,7 @@
},
{
"BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
@@ -438,6 +492,7 @@
},
{
"BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
@@ -447,6 +502,7 @@
},
{
"BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
"CounterMask": "12",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
@@ -455,6 +511,7 @@
},
{
"BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
"CounterMask": "12",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
@@ -464,6 +521,7 @@
},
{
"BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
@@ -472,6 +530,7 @@
},
{
"BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
@@ -481,6 +540,7 @@
},
{
"BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_LDM_PENDING",
@@ -490,6 +550,7 @@
},
{
"BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
@@ -498,6 +559,7 @@
},
{
"BriefDescription": "Total execution stalls.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
@@ -506,6 +568,7 @@
},
{
"BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.LCP",
"PublicDescription": "This event counts stalls occurred due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
@@ -514,6 +577,7 @@
},
{
"BriefDescription": "Instructions retired from execution.",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. \nNotes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. \nCounting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
"SampleAfterValue": "2000003",
@@ -521,6 +585,7 @@
},
{
"BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Counter": "0,1,2,3",
"Errata": "BDM61",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.ANY_P",
@@ -529,6 +594,7 @@
},
{
"BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
+ "Counter": "1",
"Errata": "BDM11, BDM55",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.PREC_DIST",
@@ -539,6 +605,7 @@
},
{
"BriefDescription": "FP operations retired. X87 FP operations that have no exceptions:",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.X87",
"PublicDescription": "This event counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
@@ -547,6 +614,7 @@
},
{
"BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread",
+ "Counter": "0,1,2,3",
"EventCode": "0x0D",
"EventName": "INT_MISC.RAT_STALL_CYCLES",
"PublicDescription": "This event counts the number of cycles during which Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the current thread. This also includes the cycles during which the Allocator is serving another thread.",
@@ -555,6 +623,7 @@
},
{
"BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x0D",
"EventName": "INT_MISC.RECOVERY_CYCLES",
@@ -565,6 +634,7 @@
{
"AnyThread": "1",
"BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x0D",
"EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
@@ -573,6 +643,7 @@
},
{
"BriefDescription": "This event counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.NO_SR",
"SampleAfterValue": "100003",
@@ -580,6 +651,7 @@
},
{
"BriefDescription": "Cases when loads get true Block-on-Store blocking code preventing store forwarding",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.STORE_FORWARD",
"PublicDescription": "This event counts how many times the load operation got the true Block-on-Store blocking code preventing store forwarding. This includes cases when:\n - preceding store conflicts with the load (incomplete overlap);\n - store forwarding is impossible due to u-arch limitations;\n - preceding lock RMW operations are not forwarded;\n - store has the no-forward bit set (uncacheable/page-split/masked stores);\n - all-blocking stores are used (mostly, fences and port I/O);\nand others.\nThe most common case is a load blocked due to its address range overlapping with a preceding smaller uncompleted store. Note: This event does not take into account cases of out-of-SW-control (for example, SbTailHit), unknown physical STA, and cases of blocking loads on store due to being non-WB memory type or a lock. These cases are covered by other events.\nSee the table of not supported store forwards in the Optimization Guide.",
@@ -588,6 +660,7 @@
},
{
"BriefDescription": "False dependencies in MOB due to partial compare",
+ "Counter": "0,1,2,3",
"EventCode": "0x07",
"EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
"PublicDescription": "This event counts false dependencies in MOB when the partial comparison upon loose net check and dependency was resolved by the Enhanced Loose net mechanism. This may not result in high performance penalties. Loose net checks can fail when loads and stores are 4k aliased.",
@@ -596,6 +669,7 @@
},
{
"BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch",
+ "Counter": "0,1,2,3",
"EventCode": "0x4C",
"EventName": "LOAD_HIT_PRE.HW_PF",
"PublicDescription": "This event counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the hardware prefetch.",
@@ -604,6 +678,7 @@
},
{
"BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch",
+ "Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "LOAD_HIT_PRE.SW_PF",
"PublicDescription": "This event counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by asm inspection of the nearby instructions.",
@@ -612,6 +687,7 @@
},
{
"BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xA8",
"EventName": "LSD.CYCLES_4_UOPS",
@@ -620,6 +696,7 @@
},
{
"BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA8",
"EventName": "LSD.CYCLES_ACTIVE",
@@ -628,6 +705,7 @@
},
{
"BriefDescription": "Number of Uops delivered by the LSD.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA8",
"EventName": "LSD.UOPS",
"SampleAfterValue": "2000003",
@@ -635,6 +713,7 @@
},
{
"BriefDescription": "Number of machine clears (nukes) of any type.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xC3",
@@ -644,6 +723,7 @@
},
{
"BriefDescription": "Cycles there was a Nuke. Account for both thread-specific and All Thread Nukes.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.CYCLES",
"PublicDescription": "This event counts both thread-specific (TS) and all-thread (AT) nukes.",
@@ -652,6 +732,7 @@
},
{
"BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.MASKMOV",
"PublicDescription": "Maskmov false fault - counts number of time ucode passes through Maskmov flow due to instruction's mask being 0 while the flow was completed without raising a fault.",
@@ -660,6 +741,7 @@
},
{
"BriefDescription": "Self-modifying code (SMC) detected.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.SMC",
"PublicDescription": "This event counts self-modifying code (SMC) detected, which causes a machine clear.",
@@ -668,6 +750,7 @@
},
{
"BriefDescription": "Number of integer Move Elimination candidate uops that were eliminated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "MOVE_ELIMINATION.INT_ELIMINATED",
"SampleAfterValue": "1000003",
@@ -675,6 +758,7 @@
},
{
"BriefDescription": "Number of integer Move Elimination candidate uops that were not eliminated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "MOVE_ELIMINATION.INT_NOT_ELIMINATED",
"SampleAfterValue": "1000003",
@@ -682,6 +766,7 @@
},
{
"BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "OTHER_ASSISTS.ANY_WB_ASSIST",
"SampleAfterValue": "100003",
@@ -689,6 +774,7 @@
},
{
"BriefDescription": "Resource-related stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xa2",
"EventName": "RESOURCE_STALLS.ANY",
"PublicDescription": "This event counts resource-related stall cycles.",
@@ -697,6 +783,7 @@
},
{
"BriefDescription": "Cycles stalled due to re-order buffer full.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.ROB",
"PublicDescription": "This event counts ROB full stall cycles. This counts cycles that the pipeline backend blocked uop delivery from the front end.",
@@ -705,6 +792,7 @@
},
{
"BriefDescription": "Cycles stalled due to no eligible RS entry available.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.RS",
"PublicDescription": "This event counts stall cycles caused by absence of eligible entries in the reservation station (RS). This may result from RS overflow, or from RS deallocation because of the RS array Write Port allocation scheme (each RS entry has two write ports instead of four. As a result, empty entries could not be used, although RS is not really full). This counts cycles that the pipeline backend blocked uop delivery from the front end.",
@@ -713,6 +801,7 @@
},
{
"BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.SB",
"PublicDescription": "This event counts stall cycles caused by the store buffer (SB) overflow (excluding draining from synch). This counts cycles that the pipeline backend blocked uop delivery from the front end.",
@@ -721,6 +810,7 @@
},
{
"BriefDescription": "Count cases of saving new LBR",
+ "Counter": "0,1,2,3",
"EventCode": "0xCC",
"EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
"PublicDescription": "This event counts cases of saving new LBR records by hardware. This assumes proper enabling of LBRs and takes into account LBR filtering done by the LBR_SELECT register.",
@@ -729,6 +819,7 @@
},
{
"BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "Counter": "0,1,2,3",
"EventCode": "0x5E",
"EventName": "RS_EVENTS.EMPTY_CYCLES",
"PublicDescription": "This event counts cycles during which the reservation station (RS) is empty for the thread.\nNote: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.",
@@ -737,6 +828,7 @@
},
{
"BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x5E",
@@ -747,6 +839,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_0",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
@@ -755,6 +848,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_1",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
@@ -763,6 +857,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_2",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
@@ -771,6 +866,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_3",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
@@ -779,6 +875,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_4",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
@@ -787,6 +884,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_5",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
@@ -795,6 +893,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_6",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
@@ -803,6 +902,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_7",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
@@ -811,6 +911,7 @@
},
{
"BriefDescription": "Number of uops executed on the core.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE",
"PublicDescription": "Number of uops executed from any thread.",
@@ -819,6 +920,7 @@
},
{
"BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
@@ -827,6 +929,7 @@
},
{
"BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
@@ -835,6 +938,7 @@
},
{
"BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "3",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
@@ -843,6 +947,7 @@
},
{
"BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
@@ -851,6 +956,7 @@
},
{
"BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
"Invert": "1",
@@ -859,6 +965,7 @@
},
{
"BriefDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
@@ -867,6 +974,7 @@
},
{
"BriefDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
@@ -875,6 +983,7 @@
},
{
"BriefDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "Counter": "0,1,2,3",
"CounterMask": "3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
@@ -883,6 +992,7 @@
},
{
"BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
@@ -891,6 +1001,7 @@
},
{
"BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.STALL_CYCLES",
@@ -901,6 +1012,7 @@
},
{
"BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.THREAD",
"PublicDescription": "Number of uops to be executed per-thread each cycle.",
@@ -909,6 +1021,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_0",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
@@ -918,6 +1031,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are executed in port 0.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
"SampleAfterValue": "2000003",
@@ -925,6 +1039,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_1",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
@@ -934,6 +1049,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are executed in port 1.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
"SampleAfterValue": "2000003",
@@ -941,6 +1057,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_2",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
@@ -950,6 +1067,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are dispatched to port 2.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
"SampleAfterValue": "2000003",
@@ -957,6 +1075,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_3",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
@@ -966,6 +1085,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are dispatched to port 3.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
"SampleAfterValue": "2000003",
@@ -973,6 +1093,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_4",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
@@ -982,6 +1103,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are executed in port 4.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
"SampleAfterValue": "2000003",
@@ -989,6 +1111,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_5",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
@@ -998,6 +1121,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are executed in port 5.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
"SampleAfterValue": "2000003",
@@ -1005,6 +1129,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_6",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
@@ -1014,6 +1139,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are executed in port 6.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
"SampleAfterValue": "2000003",
@@ -1021,6 +1147,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_7",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
@@ -1030,6 +1157,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are dispatched to port 7.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
"SampleAfterValue": "2000003",
@@ -1037,6 +1165,7 @@
},
{
"BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.ANY",
"PublicDescription": "This event counts the number of Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS).",
@@ -1045,6 +1174,7 @@
},
{
"BriefDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive; added by GSR u-arch.",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.FLAGS_MERGE",
"PublicDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive\n added by GSR u-arch.",
@@ -1053,6 +1183,7 @@
},
{
"BriefDescription": "Number of Multiply packed/scalar single precision uops allocated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.SINGLE_MUL",
"SampleAfterValue": "2000003",
@@ -1060,6 +1191,7 @@
},
{
"BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.SLOW_LEA",
"SampleAfterValue": "2000003",
@@ -1067,6 +1199,7 @@
},
{
"BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.STALL_CYCLES",
@@ -1077,6 +1210,7 @@
},
{
"BriefDescription": "Actually retired uops.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.ALL",
"PEBS": "1",
@@ -1086,6 +1220,7 @@
},
{
"BriefDescription": "Retirement slots used.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.RETIRE_SLOTS",
"PEBS": "1",
@@ -1095,6 +1230,7 @@
},
{
"BriefDescription": "Cycles without actually retired uops.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.STALL_CYCLES",
@@ -1105,6 +1241,7 @@
},
{
"BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "Counter": "0,1,2,3",
"CounterMask": "16",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.TOTAL_CYCLES",
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/uncore-cache.json b/tools/perf/pmu-events/arch/x86/broadwell/uncore-cache.json
index c5cc43825cb9..c4c57febdc72 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/uncore-cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "L3 Lookup any request that access cache and found line in E or S-state",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.ANY_ES",
"PerPkg": "1",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "L3 Lookup any request that access cache and found line in I-state",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.ANY_I",
"PerPkg": "1",
@@ -19,6 +21,7 @@
},
{
"BriefDescription": "L3 Lookup any request that access cache and found line in M-state",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.ANY_M",
"PerPkg": "1",
@@ -28,6 +31,7 @@
},
{
"BriefDescription": "L3 Lookup any request that access cache and found line in MESI-state",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.ANY_MESI",
"PerPkg": "1",
@@ -37,6 +41,7 @@
},
{
"BriefDescription": "L3 Lookup read request that access cache and found line in E or S-state",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.READ_ES",
"PerPkg": "1",
@@ -46,6 +51,7 @@
},
{
"BriefDescription": "L3 Lookup read request that access cache and found line in I-state",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.READ_I",
"PerPkg": "1",
@@ -55,6 +61,7 @@
},
{
"BriefDescription": "L3 Lookup read request that access cache and found line in M-state",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.READ_M",
"PerPkg": "1",
@@ -64,6 +71,7 @@
},
{
"BriefDescription": "L3 Lookup read request that access cache and found line in any MESI-state",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.READ_MESI",
"PerPkg": "1",
@@ -73,6 +81,7 @@
},
{
"BriefDescription": "L3 Lookup write request that access cache and found line in E or S-state",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_ES",
"PerPkg": "1",
@@ -82,6 +91,7 @@
},
{
"BriefDescription": "L3 Lookup write request that access cache and found line in M-state",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_M",
"PerPkg": "1",
@@ -91,6 +101,7 @@
},
{
"BriefDescription": "L3 Lookup write request that access cache and found line in MESI-state",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_MESI",
"PerPkg": "1",
@@ -100,6 +111,7 @@
},
{
"BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a modified line in some processor core.",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_CBO_XSNP_RESPONSE.HITM_XCORE",
"PerPkg": "1",
@@ -108,6 +120,7 @@
},
{
"BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a non-modified line in some processor core.",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_CBO_XSNP_RESPONSE.HIT_XCORE",
"PerPkg": "1",
@@ -116,6 +129,7 @@
},
{
"BriefDescription": "A cross-core snoop resulted from L3 Eviction which misses in some processor core.",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_CBO_XSNP_RESPONSE.MISS_EVICTION",
"PerPkg": "1",
@@ -124,10 +138,20 @@
},
{
"BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which misses in some processor core.",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_CBO_XSNP_RESPONSE.MISS_XCORE",
"PerPkg": "1",
"UMask": "0x41",
"Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "This 48-bit fixed counter counts the UCLK cycles",
+ "Counter": "FIXED",
+ "EventCode": "0xff",
+ "EventName": "UNC_CLOCK.SOCKET",
+ "PerPkg": "1",
+ "PublicDescription": "This 48-bit fixed counter counts the UCLK cycles.",
+ "Unit": "cbox_0"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/broadwell/uncore-interconnect.json
index 64af685274a2..99f8cc992a24 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/uncore-interconnect.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Number of entries allocated. Account for Any type: e.g. Snoop, Core aperture, etc.",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_ARB_COH_TRK_REQUESTS.ALL",
"PerPkg": "1",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Each cycle counts number of all Core outgoing valid entries. Such entry is defined as valid from its allocation till first of IDI0 or DRS0 messages is sent out. Accounts for Coherent and non-coherent traffic.",
+ "Counter": "0",
"EventCode": "0x80",
"EventName": "UNC_ARB_TRK_OCCUPANCY.ALL",
"PerPkg": "1",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Cycles with at least one request outstanding is waiting for data return from memory controller. Account for coherent and non-coherent requests initiated by IA Cores, Processor Graphics Unit, or LLC.;",
+ "Counter": "0",
"CounterMask": "1",
"EventCode": "0x80",
"EventName": "UNC_ARB_TRK_OCCUPANCY.CYCLES_WITH_ANY_REQUEST",
@@ -26,6 +29,7 @@
},
{
"BriefDescription": "Each cycle count number of 'valid' coherent Data Read entries that are in DirectData mode. Such entry is defined as valid when it is allocated till data sent to Core (first chunk, IDI0). Applicable for IA Cores' requests in normal case.",
+ "Counter": "0",
"EventCode": "0x80",
"EventName": "UNC_ARB_TRK_OCCUPANCY.DRD_DIRECT",
"PerPkg": "1",
@@ -35,6 +39,7 @@
},
{
"BriefDescription": "Total number of Core outgoing entries allocated. Accounts for Coherent and non-coherent traffic.",
+ "Counter": "0,1",
"EventCode": "0x81",
"EventName": "UNC_ARB_TRK_REQUESTS.ALL",
"PerPkg": "1",
@@ -43,6 +48,7 @@
},
{
"BriefDescription": "Number of Core coherent Data Read entries allocated in DirectData mode",
+ "Counter": "0,1",
"EventCode": "0x81",
"EventName": "UNC_ARB_TRK_REQUESTS.DRD_DIRECT",
"PerPkg": "1",
@@ -52,6 +58,7 @@
},
{
"BriefDescription": "Number of Writes allocated - any write transactions: full/partials writes and evictions.",
+ "Counter": "0,1",
"EventCode": "0x81",
"EventName": "UNC_ARB_TRK_REQUESTS.WRITES",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/uncore-other.json b/tools/perf/pmu-events/arch/x86/broadwell/uncore-other.json
deleted file mode 100644
index 58be90d7cc93..000000000000
--- a/tools/perf/pmu-events/arch/x86/broadwell/uncore-other.json
+++ /dev/null
@@ -1,10 +0,0 @@
-[
- {
- "BriefDescription": "This 48-bit fixed counter counts the UCLK cycles",
- "EventCode": "0xff",
- "EventName": "UNC_CLOCK.SOCKET",
- "PerPkg": "1",
- "PublicDescription": "This 48-bit fixed counter counts the UCLK cycles.",
- "Unit": "CLOCK"
- }
-]
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/virtual-memory.json b/tools/perf/pmu-events/arch/x86/broadwell/virtual-memory.json
index 93621e004d88..eb1d9541e26c 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/virtual-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Load misses in all DTLB levels that cause page walks",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"SampleAfterValue": "2000003",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Load misses that miss the DTLB and hit the STLB (2M).",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT_2M",
"SampleAfterValue": "2000003",
@@ -24,6 +27,7 @@
},
{
"BriefDescription": "Load misses that miss the DTLB and hit the STLB (4K).",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT_4K",
"SampleAfterValue": "2000003",
@@ -31,6 +35,7 @@
},
{
"BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
@@ -39,6 +44,7 @@
},
{
"BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (1G)",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
@@ -48,6 +54,7 @@
},
{
"BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (2M/4M).",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
@@ -57,6 +64,7 @@
},
{
"BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (4K).",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
@@ -66,6 +74,7 @@
},
{
"BriefDescription": "Cycles when PMH is busy with page walks",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
@@ -75,6 +84,7 @@
},
{
"BriefDescription": "Store misses in all DTLB levels that cause page walks",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
@@ -84,6 +94,7 @@
},
{
"BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.STLB_HIT",
"SampleAfterValue": "100003",
@@ -91,6 +102,7 @@
},
{
"BriefDescription": "Store misses that miss the DTLB and hit the STLB (2M).",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.STLB_HIT_2M",
"SampleAfterValue": "100003",
@@ -98,6 +110,7 @@
},
{
"BriefDescription": "Store misses that miss the DTLB and hit the STLB (4K).",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.STLB_HIT_4K",
"SampleAfterValue": "100003",
@@ -105,6 +118,7 @@
},
{
"BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
@@ -113,6 +127,7 @@
},
{
"BriefDescription": "Store misses in all DTLB levels that cause completed page walks (1G)",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
@@ -122,6 +137,7 @@
},
{
"BriefDescription": "Store misses in all DTLB levels that cause completed page walks (2M/4M)",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
@@ -131,6 +147,7 @@
},
{
"BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (4K)",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
@@ -140,6 +157,7 @@
},
{
"BriefDescription": "Cycles when PMH is busy with page walks",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_DURATION",
@@ -149,6 +167,7 @@
},
{
"BriefDescription": "Cycle count for an Extended Page table walk.",
+ "Counter": "0,1,2,3",
"EventCode": "0x4F",
"EventName": "EPT.WALK_CYCLES",
"PublicDescription": "This event counts cycles for an extended page table walk. The Extended Page directory cache differs from standard TLB caches by the operating system that use it. Virtual machine operating systems use the extended page directory cache, while guest operating systems use the standard TLB caches.",
@@ -157,6 +176,7 @@
},
{
"BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAE",
"EventName": "ITLB.ITLB_FLUSH",
"PublicDescription": "This event counts the number of flushes of the big or small ITLB pages. Counting include both TLB Flush (covering all sets) and TLB Set Clear (set-specific).",
@@ -165,6 +185,7 @@
},
{
"BriefDescription": "Misses at all ITLB levels that cause page walks",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
@@ -174,6 +195,7 @@
},
{
"BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.STLB_HIT",
"SampleAfterValue": "100003",
@@ -181,6 +203,7 @@
},
{
"BriefDescription": "Code misses that miss the DTLB and hit the STLB (2M).",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.STLB_HIT_2M",
"SampleAfterValue": "100003",
@@ -188,6 +211,7 @@
},
{
"BriefDescription": "Core misses that miss the DTLB and hit the STLB (4K).",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.STLB_HIT_4K",
"SampleAfterValue": "100003",
@@ -195,6 +219,7 @@
},
{
"BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
@@ -203,6 +228,7 @@
},
{
"BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (1G)",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
@@ -212,6 +238,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
@@ -221,6 +248,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
@@ -230,6 +258,7 @@
},
{
"BriefDescription": "Cycles when PMH is busy with page walks",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_DURATION",
@@ -239,6 +268,7 @@
},
{
"BriefDescription": "Number of DTLB page walker hits in the L1+FB.",
+ "Counter": "0,1,2,3",
"Errata": "BDM69, BDM98",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.DTLB_L1",
@@ -247,6 +277,7 @@
},
{
"BriefDescription": "Number of DTLB page walker hits in the L2.",
+ "Counter": "0,1,2,3",
"Errata": "BDM69, BDM98",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.DTLB_L2",
@@ -255,6 +286,7 @@
},
{
"BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP.",
+ "Counter": "0,1,2,3",
"Errata": "BDM69, BDM98",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.DTLB_L3",
@@ -263,6 +295,7 @@
},
{
"BriefDescription": "Number of DTLB page walker hits in Memory.",
+ "Counter": "0,1,2,3",
"Errata": "BDM69, BDM98",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
@@ -271,6 +304,7 @@
},
{
"BriefDescription": "Number of ITLB page walker hits in the L1+FB.",
+ "Counter": "0,1,2,3",
"Errata": "BDM69, BDM98",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.ITLB_L1",
@@ -279,6 +313,7 @@
},
{
"BriefDescription": "Number of ITLB page walker hits in the L2.",
+ "Counter": "0,1,2,3",
"Errata": "BDM69, BDM98",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.ITLB_L2",
@@ -287,6 +322,7 @@
},
{
"BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP.",
+ "Counter": "0,1,2,3",
"Errata": "BDM69, BDM98",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.ITLB_L3",
@@ -295,6 +331,7 @@
},
{
"BriefDescription": "DTLB flush attempts of the thread-specific entries",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "TLB_FLUSH.DTLB_THREAD",
"PublicDescription": "This event counts the number of DTLB flush attempts of the thread-specific entries.",
@@ -303,6 +340,7 @@
},
{
"BriefDescription": "STLB flush attempts",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "TLB_FLUSH.STLB_ANY",
"PublicDescription": "This event counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, and so on).",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json b/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json
index 826357787201..2e1380248684 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json
@@ -90,7 +90,7 @@
{
"BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists",
"MetricExpr": "66 * OTHER_ASSISTS.ANY_WB_ASSIST / tma_info_thread_slots",
- "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricGroup": "BvIO;TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_assists",
"MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
"PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: ASSISTS.ANY",
@@ -100,7 +100,7 @@
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
"MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "1 - (tma_frontend_bound + tma_bad_speculation + tma_retiring)",
- "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvOB;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
"MetricThreshold": "tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL1",
@@ -121,7 +121,7 @@
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * tma_bad_speculation",
- "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
+ "MetricGroup": "BadSpec;BrMispredicts;BvMP;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
"MetricName": "tma_branch_mispredicts",
"MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
@@ -160,7 +160,7 @@
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(60 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.L3_MISS))) + 43 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.L3_MISS)))) / tma_info_thread_clks",
- "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricGroup": "BvMS;DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_contested_accesses",
"MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
@@ -181,7 +181,7 @@
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "43 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.L3_MISS))) / tma_info_thread_clks",
- "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricGroup": "BvMS;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_data_sharing",
"MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
@@ -190,7 +190,7 @@
{
"BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
"MetricExpr": "ARITH.FPU_DIV_ACTIVE / tma_info_core_core_clks",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
"MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_ACTIVE",
@@ -227,7 +227,7 @@
{
"BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
"MetricExpr": "(8 * DTLB_LOAD_MISSES.STLB_HIT + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * DTLB_LOAD_MISSES.WALK_COMPLETED) / tma_info_thread_clks",
- "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
+ "MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
"MetricName": "tma_dtlb_load",
"MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store",
@@ -236,7 +236,7 @@
{
"BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
"MetricExpr": "(8 * DTLB_STORE_MISSES.STLB_HIT + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * DTLB_STORE_MISSES.WALK_COMPLETED) / tma_info_thread_clks",
- "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
+ "MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
"MetricName": "tma_dtlb_store",
"MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load",
@@ -246,7 +246,7 @@
"BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_info_memory_load_miss_real_latency * cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@ / tma_info_thread_clks",
- "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
+ "MetricGroup": "BvMS;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
"MetricName": "tma_fb_full",
"MetricThreshold": "tma_fb_full > 0.3",
"PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
@@ -283,7 +283,7 @@
},
{
"BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired",
- "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricExpr": "FP_ARITH_INST_RETIRED.SCALAR / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_scalar",
"MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
@@ -292,7 +292,7 @@
},
{
"BriefDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths",
- "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0x3c@ / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricExpr": "FP_ARITH_INST_RETIRED.VECTOR / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_vector",
"MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
@@ -320,7 +320,7 @@
{
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / tma_info_thread_slots",
- "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvFB;BvIO;PGO;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_frontend_bound",
"MetricThreshold": "tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL1",
@@ -340,7 +340,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses",
"MetricExpr": "ICACHE.IFDATA_STALL / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_icache_misses",
"MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS",
@@ -380,7 +380,7 @@
},
{
"BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
- "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0x3c@) / (2 * tma_info_core_core_clks)",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + FP_ARITH_INST_RETIRED.VECTOR) / (2 * tma_info_core_core_clks)",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "tma_info_core_fp_arith_utilization",
"PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
@@ -420,7 +420,7 @@
},
{
"BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
- "MetricExpr": "INST_RETIRED.ANY / (cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0x3c@)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR + FP_ARITH_INST_RETIRED.VECTOR)",
"MetricGroup": "Flops;InsType",
"MetricName": "tma_info_inst_mix_iparith",
"MetricThreshold": "tma_info_inst_mix_iparith < 10",
@@ -494,12 +494,12 @@
"MetricThreshold": "tma_info_inst_mix_ipstore < 8"
},
{
- "BriefDescription": "Instruction per taken branch",
+ "BriefDescription": "Instructions per taken branch",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
"MetricName": "tma_info_inst_mix_iptb",
"MetricThreshold": "tma_info_inst_mix_iptb < 9",
- "PublicDescription": "Instruction per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_lcp"
+ "PublicDescription": "Instructions per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_lcp"
},
{
"BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
@@ -520,7 +520,7 @@
"MetricName": "tma_info_memory_core_l3_cache_fill_bw_2t"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
"MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l1d_cache_fill_bw"
@@ -532,7 +532,7 @@
"MetricName": "tma_info_memory_l1mpki"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
"MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l2_cache_fill_bw"
@@ -568,7 +568,13 @@
"MetricName": "tma_info_memory_l2mpki_load"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Offcore requests (L2 cache miss) per kilo instruction for demand RFOs",
+ "MetricExpr": "1e3 * OFFCORE_REQUESTS.DEMAND_RFO / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Offcore",
+ "MetricName": "tma_info_memory_l2mpki_rfo"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
"MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l3_cache_fill_bw"
@@ -620,7 +626,7 @@
"MetricThreshold": "tma_info_memory_tlb_page_walks_utilization > 0.5"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per core",
"MetricExpr": "UOPS_EXECUTED.THREAD / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
"MetricName": "tma_info_pipeline_execute"
@@ -639,13 +645,13 @@
},
{
"BriefDescription": "Average CPU Utilization (percentage)",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "tma_info_system_cpus_utilized / #num_cpus_online",
"MetricGroup": "HPC;Summary",
"MetricName": "tma_info_system_cpu_utilization"
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "#num_cpus_online * tma_info_system_cpu_utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized"
},
@@ -740,7 +746,7 @@
"MetricThreshold": "tma_info_thread_uoppi > 1.05"
},
{
- "BriefDescription": "Instruction per taken branch",
+ "BriefDescription": "Uops per taken branch",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW",
"MetricName": "tma_info_thread_uptb",
@@ -749,7 +755,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
"MetricExpr": "(14 * ITLB_MISSES.STLB_HIT + cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * ITLB_MISSES.WALK_COMPLETED) / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_itlb_misses",
"MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS",
@@ -767,7 +773,7 @@
{
"BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
"MetricExpr": "(CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks",
- "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricGroup": "BvML;CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l2_bound",
"MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT_PS",
@@ -787,7 +793,7 @@
"BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "29 * (MEM_LOAD_UOPS_RETIRED.L3_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.L3_MISS))) / tma_info_thread_clks",
- "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
+ "MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
"MetricName": "tma_l3_hit_latency",
"MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_mem_latency",
@@ -829,14 +835,14 @@
"MetricGroup": "Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
"MetricName": "tma_lock_latency",
"MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
- "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS_PS. Related metrics: tma_store_latency",
+ "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS. Related metrics: tma_store_latency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_bad_speculation - tma_branch_mispredicts",
- "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
+ "MetricGroup": "BadSpec;BvMS;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
"MetricName": "tma_machine_clears",
"MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
@@ -846,7 +852,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_thread_clks",
- "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
"MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full",
@@ -855,7 +861,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM)",
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth",
- "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
+ "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
"MetricName": "tma_mem_latency",
"MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_l3_hit_latency",
@@ -884,7 +890,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage",
"MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES * tma_branch_resteers / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY)",
- "MetricGroup": "BadSpec;BrMispredicts;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
+ "MetricGroup": "BadSpec;BrMispredicts;BvMP;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
"MetricName": "tma_mispredicts_resteers",
"MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost",
@@ -1017,7 +1023,7 @@
{
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
"MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC) / tma_info_core_core_clks",
- "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricGroup": "BvCB;PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_3m",
"MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Sample with: UOPS_EXECUTED.CYCLES_GE_3",
@@ -1026,7 +1032,7 @@
{
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / tma_info_thread_slots",
- "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvUW;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_retiring",
"MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL1",
@@ -1055,7 +1061,7 @@
{
"BriefDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors)",
"MetricExpr": "(OFFCORE_REQUESTS_BUFFER.SQ_FULL / 2 if #SMT_on else OFFCORE_REQUESTS_BUFFER.SQ_FULL) / tma_info_core_core_clks",
- "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
+ "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
"MetricName": "tma_sq_full",
"MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth",
@@ -1083,7 +1089,7 @@
"BriefDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(L2_RQSTS.RFO_HIT * 9 * (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) + (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_thread_clks",
- "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
+ "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
"MetricName": "tma_store_latency",
"MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
@@ -1101,7 +1107,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears",
"MetricExpr": "tma_branch_resteers - tma_mispredicts_resteers - tma_clears_resteers",
- "MetricGroup": "BigFootprint;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
"MetricName": "tma_unknown_branches",
"MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: FRONTEND_RETIRED.UNKNOWN_BRANCH",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/cache.json b/tools/perf/pmu-events/arch/x86/broadwellde/cache.json
index 6784331ac1cb..315d7f041731 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/cache.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "L1D data line replacements",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "L1D.REPLACEMENT",
"PublicDescription": "This event counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Cycles a demand request was blocked due to Fill Buffers unavailability.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.FB_FULL",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "L1D miss outstandings duration in cycles",
+ "Counter": "2",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING",
"PublicDescription": "This event counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand; from the demand Hit FB, if it is allocated by hardware or software prefetch.\nNote: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "Counter": "2",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING_CYCLES",
@@ -35,6 +39,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "Counter": "2",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
@@ -43,6 +48,7 @@
},
{
"BriefDescription": "Not rejected writebacks that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_DEMAND_RQSTS.WB_HIT",
"PublicDescription": "This event counts the number of WB requests that hit L2 cache.",
@@ -51,6 +57,7 @@
},
{
"BriefDescription": "L2 cache lines filling L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.ALL",
"PublicDescription": "This event counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
@@ -59,6 +66,7 @@
},
{
"BriefDescription": "L2 cache lines in E state filling L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.E",
"PublicDescription": "This event counts the number of L2 cache lines in the Exclusive state filling the L2. Counting does not cover rejects.",
@@ -67,6 +75,7 @@
},
{
"BriefDescription": "L2 cache lines in I state filling L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.I",
"PublicDescription": "This event counts the number of L2 cache lines in the Invalidate state filling the L2. Counting does not cover rejects.",
@@ -75,6 +84,7 @@
},
{
"BriefDescription": "L2 cache lines in S state filling L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.S",
"PublicDescription": "This event counts the number of L2 cache lines in the Shared state filling the L2. Counting does not cover rejects.",
@@ -83,6 +93,7 @@
},
{
"BriefDescription": "Clean L2 cache lines evicted by demand.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.DEMAND_CLEAN",
"SampleAfterValue": "100003",
@@ -90,6 +101,7 @@
},
{
"BriefDescription": "L2 code requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_CODE_RD",
"PublicDescription": "This event counts the total number of L2 code requests.",
@@ -98,6 +110,7 @@
},
{
"BriefDescription": "Demand Data Read requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
"PublicDescription": "This event counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
@@ -106,6 +119,7 @@
},
{
"BriefDescription": "Demand requests that miss L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_MISS",
"SampleAfterValue": "200003",
@@ -113,6 +127,7 @@
},
{
"BriefDescription": "Demand requests to L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
"SampleAfterValue": "200003",
@@ -120,6 +135,7 @@
},
{
"BriefDescription": "Requests from L2 hardware prefetchers",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_PF",
"PublicDescription": "This event counts the total number of requests from the L2 hardware prefetchers.",
@@ -128,6 +144,7 @@
},
{
"BriefDescription": "RFO requests to L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_RFO",
"PublicDescription": "This event counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
@@ -136,6 +153,7 @@
},
{
"BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_HIT",
"SampleAfterValue": "200003",
@@ -143,6 +161,7 @@
},
{
"BriefDescription": "L2 cache misses when fetching instructions.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_MISS",
"SampleAfterValue": "200003",
@@ -150,6 +169,7 @@
},
{
"BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
"PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache.",
@@ -158,6 +178,7 @@
},
{
"BriefDescription": "Demand Data Read miss L2, no rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
"PublicDescription": "This event counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
@@ -166,6 +187,7 @@
},
{
"BriefDescription": "L2 prefetch requests that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.L2_PF_HIT",
"PublicDescription": "This event counts the number of requests from the L2 hardware prefetchers that hit L2 cache. L3 prefetch new types.",
@@ -174,6 +196,7 @@
},
{
"BriefDescription": "L2 prefetch requests that miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.L2_PF_MISS",
"PublicDescription": "This event counts the number of requests from the L2 hardware prefetchers that miss L2 cache.",
@@ -182,6 +205,7 @@
},
{
"BriefDescription": "All requests that miss L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.MISS",
"SampleAfterValue": "200003",
@@ -189,6 +213,7 @@
},
{
"BriefDescription": "All L2 requests.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.REFERENCES",
"SampleAfterValue": "200003",
@@ -196,6 +221,7 @@
},
{
"BriefDescription": "RFO requests that hit L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_HIT",
"SampleAfterValue": "200003",
@@ -203,6 +229,7 @@
},
{
"BriefDescription": "RFO requests that miss L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_MISS",
"SampleAfterValue": "200003",
@@ -210,6 +237,7 @@
},
{
"BriefDescription": "L2 or L3 HW prefetches that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.ALL_PF",
"PublicDescription": "This event counts L2 or L3 HW prefetches that access L2 cache including rejects.",
@@ -218,6 +246,7 @@
},
{
"BriefDescription": "Transactions accessing L2 pipe",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.ALL_REQUESTS",
"PublicDescription": "This event counts transactions that access the L2 pipe including snoops, pagewalks, and so on.",
@@ -226,6 +255,7 @@
},
{
"BriefDescription": "L2 cache accesses when fetching instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.CODE_RD",
"PublicDescription": "This event counts the number of L2 cache accesses when fetching instructions.",
@@ -234,6 +264,7 @@
},
{
"BriefDescription": "Demand Data Read requests that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.DEMAND_DATA_RD",
"PublicDescription": "This event counts Demand Data Read requests that access L2 cache, including rejects.",
@@ -242,6 +273,7 @@
},
{
"BriefDescription": "L1D writebacks that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.L1D_WB",
"PublicDescription": "This event counts L1D writebacks that access L2 cache.",
@@ -250,6 +282,7 @@
},
{
"BriefDescription": "L2 fill requests that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.L2_FILL",
"PublicDescription": "This event counts L2 fill requests that access L2 cache.",
@@ -258,6 +291,7 @@
},
{
"BriefDescription": "L2 writebacks that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.L2_WB",
"PublicDescription": "This event counts L2 writebacks that access L2 cache.",
@@ -266,6 +300,7 @@
},
{
"BriefDescription": "RFO requests that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.RFO",
"PublicDescription": "This event counts Read for Ownership (RFO) requests that access L2 cache.",
@@ -274,6 +309,7 @@
},
{
"BriefDescription": "Cycles when L1D is locked",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
"PublicDescription": "This event counts the number of cycles when the L1D is locked. It is a superset of the 0x1 mask (BUS_LOCK_CLOCKS.BUS_LOCK_DURATION).",
@@ -282,6 +318,7 @@
},
{
"BriefDescription": "Core-originated cacheable demand requests missed L3",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.MISS",
"PublicDescription": "This event counts core-originated cacheable demand requests that miss the last level cache (LLC). Demand requests include loads, RFOs, and hardware prefetches from L1D, and instruction fetches from IFU.",
@@ -290,6 +327,7 @@
},
{
"BriefDescription": "Core-originated cacheable demand requests that refer to L3",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
"PublicDescription": "This event counts core-originated cacheable demand requests that refer to the last level cache (LLC). Demand requests include loads, RFOs, and hardware prefetches from L1D, and instruction fetches from IFU.",
@@ -298,6 +336,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "BDM100",
"EventCode": "0xD2",
@@ -309,6 +348,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "BDM100",
"EventCode": "0xD2",
@@ -320,6 +360,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "BDM100",
"EventCode": "0xD2",
@@ -331,6 +372,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "BDM100",
"EventCode": "0xD2",
@@ -342,6 +384,7 @@
},
{
"BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "BDE70, BDM100",
"EventCode": "0xD3",
@@ -353,6 +396,7 @@
},
{
"BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "BDE70",
"EventCode": "0xD3",
@@ -363,6 +407,7 @@
},
{
"BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "BDE70",
"EventCode": "0xD3",
@@ -373,6 +418,7 @@
},
{
"BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "BDE70",
"EventCode": "0xD3",
@@ -383,6 +429,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
@@ -393,6 +440,7 @@
},
{
"BriefDescription": "Retired load uops with L1 cache hits as data sources.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
@@ -403,6 +451,7 @@
},
{
"BriefDescription": "Retired load uops misses in L1 cache as data sources.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
@@ -413,6 +462,7 @@
},
{
"BriefDescription": "Retired load uops with L2 cache hits as data sources.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "BDM35",
"EventCode": "0xD1",
@@ -424,6 +474,7 @@
},
{
"BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
@@ -434,6 +485,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were data hits in L3 without snoops required.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "BDM100",
"EventCode": "0xD1",
@@ -445,6 +497,7 @@
},
{
"BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "BDM100, BDE70",
"EventCode": "0xD1",
@@ -455,6 +508,7 @@
},
{
"BriefDescription": "Retired load uops.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
@@ -465,6 +519,7 @@
},
{
"BriefDescription": "Retired store uops.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
@@ -475,6 +530,7 @@
},
{
"BriefDescription": "Retired load uops with locked access.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "BDM35",
"EventCode": "0xD0",
@@ -486,6 +542,7 @@
},
{
"BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
@@ -496,6 +553,7 @@
},
{
"BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
@@ -506,6 +564,7 @@
},
{
"BriefDescription": "Retired load uops that miss the STLB.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
@@ -516,6 +575,7 @@
},
{
"BriefDescription": "Retired store uops that miss the STLB.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
@@ -526,6 +586,7 @@
},
{
"BriefDescription": "Demand and prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
"PublicDescription": "This event counts the demand and prefetch data reads. All Core Data Reads include cacheable Demands and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
@@ -534,6 +595,7 @@
},
{
"BriefDescription": "Any memory transaction that reached the SQ.",
+ "Counter": "0,1,2,3",
"EventCode": "0xb0",
"EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
"PublicDescription": "This event counts memory transactions reached the super queue including requests initiated by the core, all L3 prefetches, page walks, and so on.",
@@ -542,6 +604,7 @@
},
{
"BriefDescription": "Cacheable and non-cacheable code read requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
"PublicDescription": "This event counts both cacheable and non-cacheable code read requests.",
@@ -550,6 +613,7 @@
},
{
"BriefDescription": "Demand Data Read requests sent to uncore",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
"PublicDescription": "This event counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
@@ -558,6 +622,7 @@
},
{
"BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
"PublicDescription": "This event counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
@@ -566,6 +631,7 @@
},
{
"BriefDescription": "Offcore requests buffer cannot take more entries for this thread core.",
+ "Counter": "0,1,2,3",
"EventCode": "0xb2",
"EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
"PublicDescription": "This event counts the number of cases when the offcore requests buffer cannot take more entries for the core. This can happen when the superqueue does not contain eligible entries, or when L1D writeback pending FIFO requests is full.\nNote: Writeback pending FIFO has six entries.",
@@ -574,6 +640,7 @@
},
{
"BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
"Errata": "BDM76",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
@@ -583,6 +650,7 @@
},
{
"BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"Errata": "BDM76",
"EventCode": "0x60",
@@ -593,6 +661,7 @@
},
{
"BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"Errata": "BDM76",
"EventCode": "0x60",
@@ -603,6 +672,7 @@
},
{
"BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"Errata": "BDM76",
"EventCode": "0x60",
@@ -613,6 +683,7 @@
},
{
"BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "Counter": "0,1,2,3",
"Errata": "BDM76",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
@@ -622,6 +693,7 @@
},
{
"BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
"Errata": "BDM76",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
@@ -631,6 +703,7 @@
},
{
"BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"Errata": "BDM76",
"EventCode": "0x60",
@@ -640,6 +713,7 @@
},
{
"BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
"Errata": "BDM76",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
@@ -649,6 +723,7 @@
},
{
"BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE",
"SampleAfterValue": "100003",
@@ -656,6 +731,7 @@
},
{
"BriefDescription": "Split locks in SQ",
+ "Counter": "0,1,2,3",
"EventCode": "0xf4",
"EventName": "SQ_MISC.SPLIT_LOCK",
"PublicDescription": "This event counts the number of split locks in the super queue.",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/counter.json b/tools/perf/pmu-events/arch/x86/broadwellde/counter.json
new file mode 100644
index 000000000000..ada968d0a038
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/counter.json
@@ -0,0 +1,42 @@
+[
+ {
+ "Unit": "core",
+ "CountersNumFixed": "3",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "CBOX",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "HA",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "IRP",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "2"
+ },
+ {
+ "Unit": "PCU",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "R2PCIe",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "UBOX",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "2"
+ },
+ {
+ "Unit": "iMC",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/floating-point.json b/tools/perf/pmu-events/arch/x86/broadwellde/floating-point.json
index 986869252e71..9bf595af3f42 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/floating-point.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 4 calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
"PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 4 calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 8 calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
"PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 128-bit packed single and 256-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and packed double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.4_FLOPS",
"PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision and 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point and packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired; some instructions will count twice as noted below. Applies to SSE* and AVX* scalar and packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.DOUBLE",
"SampleAfterValue": "2000006",
@@ -48,6 +54,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired; some instructions will count twice as noted below. Applies to SSE* and AVX* packed double and single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.PACKED",
"SampleAfterValue": "2000004",
@@ -55,6 +62,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computation operation. Applies to SSE* and AVX* scalar double and single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR",
"PublicDescription": "Number of SSE/AVX computational scalar single precision and double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -63,6 +71,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -71,6 +80,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
"PublicDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -79,6 +89,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired; some instructions will count twice as noted below. Applies to SSE* and AVX* scalar and packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SINGLE",
"SampleAfterValue": "2000005",
@@ -86,6 +97,7 @@
},
{
"BriefDescription": "Number of any Vector retired FP arithmetic instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.VECTOR",
"SampleAfterValue": "2000003",
@@ -93,6 +105,7 @@
},
{
"BriefDescription": "Cycles with any input/output SSE or FP assist",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.ANY",
@@ -102,6 +115,7 @@
},
{
"BriefDescription": "Number of SIMD FP assists due to input values",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.SIMD_INPUT",
"PublicDescription": "This event counts any input SSE* FP assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.",
@@ -110,6 +124,7 @@
},
{
"BriefDescription": "Number of SIMD FP assists due to Output values",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.SIMD_OUTPUT",
"PublicDescription": "This event counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.",
@@ -118,6 +133,7 @@
},
{
"BriefDescription": "Number of X87 assists due to input value.",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.X87_INPUT",
"PublicDescription": "This event counts x87 floating point (FP) micro-code assist (invalid operation, denormal operand, SNaN operand) when the input value (one of the source operands to an FP instruction) is invalid.",
@@ -126,6 +142,7 @@
},
{
"BriefDescription": "Number of X87 assists due to output value.",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.X87_OUTPUT",
"PublicDescription": "This event counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid.",
@@ -134,6 +151,7 @@
},
{
"BriefDescription": "Number of SIMD Move Elimination candidate uops that were eliminated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "MOVE_ELIMINATION.SIMD_ELIMINATED",
"SampleAfterValue": "1000003",
@@ -141,6 +159,7 @@
},
{
"BriefDescription": "Number of SIMD Move Elimination candidate uops that were not eliminated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "MOVE_ELIMINATION.SIMD_NOT_ELIMINATED",
"SampleAfterValue": "1000003",
@@ -148,6 +167,7 @@
},
{
"BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
+ "Counter": "0,1,2,3",
"Errata": "BDM30",
"EventCode": "0xC1",
"EventName": "OTHER_ASSISTS.AVX_TO_SSE",
@@ -157,6 +177,7 @@
},
{
"BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
+ "Counter": "0,1,2,3",
"Errata": "BDM30",
"EventCode": "0xC1",
"EventName": "OTHER_ASSISTS.SSE_TO_AVX",
@@ -166,6 +187,7 @@
},
{
"BriefDescription": "Micro-op dispatches cancelled due to insufficient SIMD physical register file read ports",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UOP_DISPATCHES_CANCELLED.SIMD_PRF",
"PublicDescription": "This event counts the number of micro-operations cancelled after they were dispatched from the scheduler to the execution units when the total number of physical register read ports across all dispatch ports exceeds the read bandwidth of the physical register file. The SIMD_PRF subevent applies to the following instructions: VDPPS, DPPS, VPCMPESTRI, PCMPESTRI, VPCMPESTRM, PCMPESTRM, VFMADD*, VFMADDSUB*, VFMSUB*, VMSUBADD*, VFNMADD*, VFNMSUB*. See the Broadwell Optimization Guide for more information.",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/frontend.json b/tools/perf/pmu-events/arch/x86/broadwellde/frontend.json
index bd5da39564e1..db3488abf9fc 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/frontend.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "Counter": "0,1,2,3",
"EventCode": "0xe6",
"EventName": "BACLEARS.ANY",
"SampleAfterValue": "100003",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
"PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
@@ -16,6 +18,7 @@
},
{
"BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.HIT",
"PublicDescription": "This event counts the number of both cacheable and noncacheable Instruction Cache, Streaming Buffer and Victim Cache Reads including UC fetches.",
@@ -24,6 +27,7 @@
},
{
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction-cache miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.IFDATA_STALL",
"PublicDescription": "This event counts cycles during which the demand fetch waits for data (wfdM104H) from L2 or iSB (opportunistic hit).",
@@ -32,6 +36,7 @@
},
{
"BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Misses. Includes Uncacheable accesses.",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.MISSES",
"PublicDescription": "This event counts the number of instruction cache, streaming buffer and victim cache misses. Counting includes UC accesses.",
@@ -40,6 +45,7 @@
},
{
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0x79",
"EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
@@ -49,6 +55,7 @@
},
{
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
@@ -58,6 +65,7 @@
},
{
"BriefDescription": "Cycles MITE is delivering 4 Uops",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0x79",
"EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
@@ -67,6 +75,7 @@
},
{
"BriefDescription": "Cycles MITE is delivering any Uop",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
@@ -76,6 +85,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.DSB_CYCLES",
@@ -85,6 +95,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.DSB_UOPS",
"PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
@@ -93,6 +104,7 @@
},
{
"BriefDescription": "Instruction Decode Queue (IDQ) empty cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.EMPTY",
"PublicDescription": "This counts the number of cycles that the instruction decoder queue is empty and can indicate that the application may be bound in the front end. It does not determine whether there are uops being delivered to the Alloc stage since uops can be delivered by bypass skipping the Instruction Decode Queue (IDQ) when it is empty.",
@@ -101,6 +113,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MITE_ALL_UOPS",
"PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
@@ -109,6 +122,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MITE_CYCLES",
@@ -118,6 +132,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MITE_UOPS",
"PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
@@ -126,6 +141,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MS_CYCLES",
@@ -135,6 +151,7 @@
},
{
"BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MS_DSB_CYCLES",
@@ -144,6 +161,7 @@
},
{
"BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x79",
@@ -154,6 +172,7 @@
},
{
"BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_DSB_UOPS",
"PublicDescription": "This event counts the number of uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
@@ -162,6 +181,7 @@
},
{
"BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_MITE_UOPS",
"PublicDescription": "This event counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
@@ -170,6 +190,7 @@
},
{
"BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x79",
@@ -179,6 +200,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_UOPS",
"PublicDescription": "This event counts the total number of uops delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
@@ -187,6 +209,7 @@
},
{
"BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
"PublicDescription": "This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4 x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when:\n a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread;\n b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions); \n c. Instruction Decode Queue (IDQ) delivers four uops.",
@@ -195,6 +218,7 @@
},
{
"BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
@@ -204,6 +228,7 @@
},
{
"BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
@@ -213,6 +238,7 @@
},
{
"BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+ "Counter": "0,1,2,3",
"CounterMask": "3",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
@@ -222,6 +248,7 @@
},
{
"BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
@@ -230,6 +257,7 @@
},
{
"BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/memory.json b/tools/perf/pmu-events/arch/x86/broadwellde/memory.json
index 041b6ff4062e..31a74eed2f7d 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Number of times HLE abort was triggered",
+ "Counter": "0,1,2,3",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED",
"PEBS": "1",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "Counter": "0,1,2,3",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_MISC1",
"PublicDescription": "Number of times an HLE abort was attributed to a Memory condition (See TSX_Memory event for additional details).",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to uncommon conditions",
+ "Counter": "0,1,2,3",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_MISC2",
"PublicDescription": "Number of times the TSX watchdog signaled an HLE abort.",
@@ -26,6 +29,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_MISC3",
"PublicDescription": "Number of times a disallowed operation caused an HLE abort.",
@@ -34,6 +38,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to incompatible memory type",
+ "Counter": "0,1,2,3",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_MISC4",
"PublicDescription": "Number of times HLE caused a fault.",
@@ -42,6 +47,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to none of the previous 4 categories (e.g. interrupts)",
+ "Counter": "0,1,2,3",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_MISC5",
"PublicDescription": "Number of times HLE aborted and was not due to the abort conditions in subevents 3-6.",
@@ -50,6 +56,7 @@
},
{
"BriefDescription": "Number of times HLE commit succeeded",
+ "Counter": "0,1,2,3",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.COMMIT",
"PublicDescription": "Number of times HLE commit succeeded.",
@@ -58,6 +65,7 @@
},
{
"BriefDescription": "Number of times we entered an HLE region; does not count nested transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.START",
"PublicDescription": "Number of times we entered an HLE region\n does not count nested transactions.",
@@ -66,6 +74,7 @@
},
{
"BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
"PublicDescription": "This event counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from one of the following:\n1. memory disambiguation,\n2. external snoop, or\n3. cross SMT-HW-thread snoop (stores) hitting load buffer.",
@@ -74,6 +83,7 @@
},
{
"BriefDescription": "Randomly selected loads with latency value being above 128",
+ "Counter": "3",
"Data_LA": "1",
"Errata": "BDM100, BDM35",
"EventCode": "0xcd",
@@ -87,6 +97,7 @@
},
{
"BriefDescription": "Randomly selected loads with latency value being above 16",
+ "Counter": "3",
"Data_LA": "1",
"Errata": "BDM100, BDM35",
"EventCode": "0xcd",
@@ -100,6 +111,7 @@
},
{
"BriefDescription": "Randomly selected loads with latency value being above 256",
+ "Counter": "3",
"Data_LA": "1",
"Errata": "BDM100, BDM35",
"EventCode": "0xcd",
@@ -113,6 +125,7 @@
},
{
"BriefDescription": "Randomly selected loads with latency value being above 32",
+ "Counter": "3",
"Data_LA": "1",
"Errata": "BDM100, BDM35",
"EventCode": "0xcd",
@@ -126,6 +139,7 @@
},
{
"BriefDescription": "Randomly selected loads with latency value being above 4",
+ "Counter": "3",
"Data_LA": "1",
"Errata": "BDM100, BDM35",
"EventCode": "0xcd",
@@ -139,6 +153,7 @@
},
{
"BriefDescription": "Randomly selected loads with latency value being above 512",
+ "Counter": "3",
"Data_LA": "1",
"Errata": "BDM100, BDM35",
"EventCode": "0xcd",
@@ -152,6 +167,7 @@
},
{
"BriefDescription": "Randomly selected loads with latency value being above 64",
+ "Counter": "3",
"Data_LA": "1",
"Errata": "BDM100, BDM35",
"EventCode": "0xcd",
@@ -165,6 +181,7 @@
},
{
"BriefDescription": "Randomly selected loads with latency value being above 8",
+ "Counter": "3",
"Data_LA": "1",
"Errata": "BDM100, BDM35",
"EventCode": "0xcd",
@@ -178,6 +195,7 @@
},
{
"BriefDescription": "Speculative cache line split load uops dispatched to L1 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "MISALIGN_MEM_REF.LOADS",
"PublicDescription": "This event counts speculative cache-line split load uops dispatched to the L1 cache.",
@@ -186,6 +204,7 @@
},
{
"BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "MISALIGN_MEM_REF.STORES",
"PublicDescription": "This event counts speculative cache line split store-address (STA) uops dispatched to the L1 cache.",
@@ -194,6 +213,7 @@
},
{
"BriefDescription": "Number of times RTM abort was triggered",
+ "Counter": "0,1,2,3",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED",
"PEBS": "1",
@@ -203,6 +223,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
+ "Counter": "0,1,2,3",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_MISC1",
"PublicDescription": "Number of times an RTM abort was attributed to a Memory condition (See TSX_Memory event for additional details).",
@@ -211,6 +232,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "Counter": "0,1,2,3",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_MISC2",
"PublicDescription": "Number of times the TSX watchdog signaled an RTM abort.",
@@ -219,6 +241,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_MISC3",
"PublicDescription": "Number of times a disallowed operation caused an RTM abort.",
@@ -227,6 +250,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
+ "Counter": "0,1,2,3",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_MISC4",
"PublicDescription": "Number of times a RTM caused a fault.",
@@ -235,6 +259,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
+ "Counter": "0,1,2,3",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_MISC5",
"PublicDescription": "Number of times RTM aborted and was not due to the abort conditions in subevents 3-6.",
@@ -243,6 +268,7 @@
},
{
"BriefDescription": "Number of times RTM commit succeeded",
+ "Counter": "0,1,2,3",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.COMMIT",
"PublicDescription": "Number of times RTM commit succeeded.",
@@ -251,6 +277,7 @@
},
{
"BriefDescription": "Number of times we entered an RTM region; does not count nested transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.START",
"PublicDescription": "Number of times we entered an RTM region\n does not count nested transactions.",
@@ -259,6 +286,7 @@
},
{
"BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed. Since this is the count of execution, it may not always cause a transactional abort.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC1",
"SampleAfterValue": "2000003",
@@ -266,6 +294,7 @@
},
{
"BriefDescription": "Counts the number of times a class of instructions (e.g., vzeroupper) that may cause a transactional abort was executed inside a transactional region",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC2",
"PublicDescription": "Unfriendly TSX abort triggered by a vzeroupper instruction.",
@@ -274,6 +303,7 @@
},
{
"BriefDescription": "Counts the number of times an instruction execution caused the transactional nest count supported to be exceeded",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC3",
"PublicDescription": "Unfriendly TSX abort triggered by a nest count that is too deep.",
@@ -282,6 +312,7 @@
},
{
"BriefDescription": "Counts the number of times a XBEGIN instruction was executed inside an HLE transactional region.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC4",
"PublicDescription": "RTM region detected inside HLE.",
@@ -290,6 +321,7 @@
},
{
"BriefDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC5",
"SampleAfterValue": "2000003",
@@ -297,6 +329,7 @@
},
{
"BriefDescription": "Number of times a TSX Abort was triggered due to an evicted line caused by a transaction overflow",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
"PublicDescription": "Number of times a TSX Abort was triggered due to an evicted line caused by a transaction overflow.",
@@ -305,6 +338,7 @@
},
{
"BriefDescription": "Number of times a TSX line had a cache conflict",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_CONFLICT",
"PublicDescription": "Number of times a TSX line had a cache conflict.",
@@ -313,6 +347,7 @@
},
{
"BriefDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
"PublicDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch.",
@@ -321,6 +356,7 @@
},
{
"BriefDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
"PublicDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty.",
@@ -329,6 +365,7 @@
},
{
"BriefDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
"PublicDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer.",
@@ -337,6 +374,7 @@
},
{
"BriefDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
"PublicDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock.",
@@ -345,6 +383,7 @@
},
{
"BriefDescription": "Number of times we could not allocate Lock Buffer",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
"PublicDescription": "Number of times we could not allocate Lock Buffer.",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/metricgroups.json b/tools/perf/pmu-events/arch/x86/broadwellde/metricgroups.json
index 8c808347f6da..4193c90c3459 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/metricgroups.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/metricgroups.json
@@ -5,7 +5,18 @@
"BigFootprint": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"BrMispredicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Branches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvBC": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvCB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvFB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvIO": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvML": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMP": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMS": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMT": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvOB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvUW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"CacheHits": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "CacheMisses": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Compute": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Cor": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"DSB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/other.json b/tools/perf/pmu-events/arch/x86/broadwellde/other.json
index 1c2a5b001949..f0de6a71719b 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/other.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/other.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Unhalted core cycles when the thread is in ring 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "CPL_CYCLES.RING0",
"PublicDescription": "This event counts the unhalted core cycles during which the thread is in the ring 0 privileged mode.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Number of intervals between processor halts while thread is in ring 0",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x5C",
@@ -19,6 +21,7 @@
},
{
"BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "CPL_CYCLES.RING123",
"PublicDescription": "This event counts unhalted core cycles during which the thread is in rings 1, 2, or 3.",
@@ -27,6 +30,7 @@
},
{
"BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "LOCK_CYCLES.SPLIT_LOCK_UC_LOCK_DURATION",
"PublicDescription": "This event counts cycles in which the L1 and L2 are locked due to a UC lock or split lock. A lock is asserted in case of locked memory access, due to noncacheable memory, locked operation that spans two cache lines, or a page walk from the noncacheable page table. L1D and L2 locks have a very high performance penalty and it is highly recommended to avoid such access.",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json b/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json
index 9a902d2160e6..c03f77539362 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Cycles when divider is busy executing divide operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "ARITH.FPU_DIV_ACTIVE",
"PublicDescription": "This event counts the number of the divide operations executed. Uses edge-detect and a cmask value of 1 on ARITH.FPU_DIV_ACTIVE to get the number of the divide operations executed.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Speculative and retired branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_BRANCHES",
"PublicDescription": "This event counts both taken and not taken speculative and retired branch instructions.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Speculative and retired macro-conditional branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
"PublicDescription": "This event counts both taken and not taken speculative and retired macro-conditional branch instructions.",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
"PublicDescription": "This event counts both taken and not taken speculative and retired macro-unconditional branch instructions, excluding calls and indirects.",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Speculative and retired direct near calls",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
"PublicDescription": "This event counts both taken and not taken speculative and retired direct near calls.",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "Speculative and retired indirect branches excluding calls and returns",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
"PublicDescription": "This event counts both taken and not taken speculative and retired indirect branches excluding calls and return branches.",
@@ -49,6 +55,7 @@
},
{
"BriefDescription": "Speculative and retired indirect return branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
"PublicDescription": "This event counts both taken and not taken speculative and retired indirect branches that have a return mnemonic.",
@@ -57,6 +64,7 @@
},
{
"BriefDescription": "Not taken macro-conditional branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL",
"PublicDescription": "This event counts not taken macro-conditional branch instructions.",
@@ -65,6 +73,7 @@
},
{
"BriefDescription": "Taken speculative and retired macro-conditional branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL",
"PublicDescription": "This event counts taken speculative and retired macro-conditional branch instructions.",
@@ -73,6 +82,7 @@
},
{
"BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP",
"PublicDescription": "This event counts taken speculative and retired macro-conditional branch instructions excluding calls and indirect branches.",
@@ -81,6 +91,7 @@
},
{
"BriefDescription": "Taken speculative and retired direct near calls",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
"PublicDescription": "This event counts taken speculative and retired direct near calls.",
@@ -89,6 +100,7 @@
},
{
"BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
"PublicDescription": "This event counts taken speculative and retired indirect branches excluding calls and return branches.",
@@ -97,6 +109,7 @@
},
{
"BriefDescription": "Taken speculative and retired indirect calls",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
"PublicDescription": "This event counts taken speculative and retired indirect calls including both register and memory indirect.",
@@ -105,6 +118,7 @@
},
{
"BriefDescription": "Taken speculative and retired indirect branches with return mnemonic",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
"PublicDescription": "This event counts taken speculative and retired indirect branches that have a return mnemonic.",
@@ -113,6 +127,7 @@
},
{
"BriefDescription": "All (macro) branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"PublicDescription": "This event counts all (macro) branch instructions retired.",
@@ -120,6 +135,7 @@
},
{
"BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS)",
+ "Counter": "0,1,2,3",
"Errata": "BDW98",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
@@ -130,6 +146,7 @@
},
{
"BriefDescription": "Conditional branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
"PEBS": "1",
@@ -139,6 +156,7 @@
},
{
"BriefDescription": "Far branch instructions retired.",
+ "Counter": "0,1,2,3",
"Errata": "BDW98",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
@@ -148,6 +166,7 @@
},
{
"BriefDescription": "Direct and indirect near call instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
"PEBS": "1",
@@ -157,6 +176,7 @@
},
{
"BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3).",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
"PEBS": "1",
@@ -166,6 +186,7 @@
},
{
"BriefDescription": "Return instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
"PEBS": "1",
@@ -175,6 +196,7 @@
},
{
"BriefDescription": "Taken branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
"PEBS": "1",
@@ -184,6 +206,7 @@
},
{
"BriefDescription": "Not taken branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NOT_TAKEN",
"PublicDescription": "This event counts not taken branch instructions retired.",
@@ -192,6 +215,7 @@
},
{
"BriefDescription": "Speculative and retired mispredicted macro conditional branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.ALL_BRANCHES",
"PublicDescription": "This event counts both taken and not taken speculative and retired mispredicted branch instructions.",
@@ -200,6 +224,7 @@
},
{
"BriefDescription": "Speculative and retired mispredicted macro conditional branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
"PublicDescription": "This event counts both taken and not taken speculative and retired mispredicted macro conditional branch instructions.",
@@ -208,6 +233,7 @@
},
{
"BriefDescription": "Mispredicted indirect branches excluding calls and returns",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
"PublicDescription": "This event counts both taken and not taken mispredicted indirect branches excluding calls and returns.",
@@ -216,6 +242,7 @@
},
{
"BriefDescription": "Speculative mispredicted indirect branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.INDIRECT",
"PublicDescription": "Counts speculatively miss-predicted indirect branches at execution time. Counts for indirect near CALL or JMP instructions (RET excluded).",
@@ -224,6 +251,7 @@
},
{
"BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
"PublicDescription": "This event counts not taken speculative and retired mispredicted macro conditional branch instructions.",
@@ -232,6 +260,7 @@
},
{
"BriefDescription": "Taken speculative and retired mispredicted macro conditional branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
"PublicDescription": "This event counts taken speculative and retired mispredicted macro conditional branch instructions.",
@@ -240,6 +269,7 @@
},
{
"BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
"PublicDescription": "This event counts taken speculative and retired mispredicted indirect branches excluding calls and returns.",
@@ -248,6 +278,7 @@
},
{
"BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
"SampleAfterValue": "200003",
@@ -255,6 +286,7 @@
},
{
"BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
"PublicDescription": "This event counts taken speculative and retired mispredicted indirect branches that have a return mnemonic.",
@@ -263,6 +295,7 @@
},
{
"BriefDescription": "All mispredicted macro branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"PublicDescription": "This event counts all mispredicted macro branch instructions retired.",
@@ -270,6 +303,7 @@
},
{
"BriefDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
"PEBS": "2",
@@ -279,6 +313,7 @@
},
{
"BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.CONDITIONAL",
"PEBS": "1",
@@ -288,6 +323,7 @@
},
{
"BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
"PEBS": "1",
@@ -297,6 +333,7 @@
},
{
"BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.RET",
"PEBS": "1",
@@ -306,6 +343,7 @@
},
{
"BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3c",
"EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "100003",
@@ -313,6 +351,7 @@
},
{
"BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
"PublicDescription": "This is a fixed-frequency event programmed to general counters. It counts when the core is unhalted at 100 Mhz.",
@@ -322,6 +361,7 @@
{
"AnyThread": "1",
"BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
"SampleAfterValue": "100003",
@@ -329,6 +369,7 @@
},
{
"BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "100003",
@@ -336,6 +377,7 @@
},
{
"BriefDescription": "Reference cycles when the core is not in halt state.",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. \nNote: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. This event is clocked by base clock (100 Mhz) on Sandy Bridge. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
"SampleAfterValue": "2000003",
@@ -343,6 +385,7 @@
},
{
"BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.REF_XCLK",
"PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
@@ -352,6 +395,7 @@
{
"AnyThread": "1",
"BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
"SampleAfterValue": "100003",
@@ -359,6 +403,7 @@
},
{
"BriefDescription": "Core cycles when the thread is not in halt state",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"SampleAfterValue": "2000003",
@@ -367,12 +412,14 @@
{
"AnyThread": "1",
"BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
"SampleAfterValue": "2000003",
"UMask": "0x2"
},
{
"BriefDescription": "Thread cycles when thread is not in halt state",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
"PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
@@ -381,12 +428,14 @@
{
"AnyThread": "1",
"BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
"SampleAfterValue": "2000003"
},
{
"BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
"CounterMask": "8",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
@@ -395,6 +444,7 @@
},
{
"BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
"CounterMask": "8",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
@@ -404,6 +454,7 @@
},
{
"BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
@@ -412,6 +463,7 @@
},
{
"BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
@@ -421,6 +473,7 @@
},
{
"BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_LDM_PENDING",
@@ -430,6 +483,7 @@
},
{
"BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
@@ -438,6 +492,7 @@
},
{
"BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
@@ -447,6 +502,7 @@
},
{
"BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
"CounterMask": "12",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
@@ -455,6 +511,7 @@
},
{
"BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
"CounterMask": "12",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
@@ -464,6 +521,7 @@
},
{
"BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
@@ -472,6 +530,7 @@
},
{
"BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
@@ -481,6 +540,7 @@
},
{
"BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_LDM_PENDING",
@@ -490,6 +550,7 @@
},
{
"BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
@@ -498,6 +559,7 @@
},
{
"BriefDescription": "Total execution stalls.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
@@ -506,6 +568,7 @@
},
{
"BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.LCP",
"PublicDescription": "This event counts stalls occurred due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
@@ -514,6 +577,7 @@
},
{
"BriefDescription": "Instructions retired from execution.",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. \nNotes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. \nCounting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
"SampleAfterValue": "2000003",
@@ -521,6 +585,7 @@
},
{
"BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Counter": "0,1,2,3",
"Errata": "BDM61",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.ANY_P",
@@ -529,6 +594,7 @@
},
{
"BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
+ "Counter": "1",
"Errata": "BDM11, BDM55",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.PREC_DIST",
@@ -539,6 +605,7 @@
},
{
"BriefDescription": "FP operations retired. X87 FP operations that have no exceptions:",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.X87",
"PublicDescription": "This event counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
@@ -547,6 +614,7 @@
},
{
"BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread",
+ "Counter": "0,1,2,3",
"EventCode": "0x0D",
"EventName": "INT_MISC.RAT_STALL_CYCLES",
"PublicDescription": "This event counts the number of cycles during which Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the current thread. This also includes the cycles during which the Allocator is serving another thread.",
@@ -555,6 +623,7 @@
},
{
"BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x0D",
"EventName": "INT_MISC.RECOVERY_CYCLES",
@@ -565,6 +634,7 @@
{
"AnyThread": "1",
"BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x0D",
"EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
@@ -573,6 +643,7 @@
},
{
"BriefDescription": "This event counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.NO_SR",
"SampleAfterValue": "100003",
@@ -580,6 +651,7 @@
},
{
"BriefDescription": "Cases when loads get true Block-on-Store blocking code preventing store forwarding",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.STORE_FORWARD",
"PublicDescription": "This event counts how many times the load operation got the true Block-on-Store blocking code preventing store forwarding. This includes cases when:\n - preceding store conflicts with the load (incomplete overlap);\n - store forwarding is impossible due to u-arch limitations;\n - preceding lock RMW operations are not forwarded;\n - store has the no-forward bit set (uncacheable/page-split/masked stores);\n - all-blocking stores are used (mostly, fences and port I/O);\nand others.\nThe most common case is a load blocked due to its address range overlapping with a preceding smaller uncompleted store. Note: This event does not take into account cases of out-of-SW-control (for example, SbTailHit), unknown physical STA, and cases of blocking loads on store due to being non-WB memory type or a lock. These cases are covered by other events.\nSee the table of not supported store forwards in the Optimization Guide.",
@@ -588,6 +660,7 @@
},
{
"BriefDescription": "False dependencies in MOB due to partial compare",
+ "Counter": "0,1,2,3",
"EventCode": "0x07",
"EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
"PublicDescription": "This event counts false dependencies in MOB when the partial comparison upon loose net check and dependency was resolved by the Enhanced Loose net mechanism. This may not result in high performance penalties. Loose net checks can fail when loads and stores are 4k aliased.",
@@ -596,6 +669,7 @@
},
{
"BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch",
+ "Counter": "0,1,2,3",
"EventCode": "0x4C",
"EventName": "LOAD_HIT_PRE.HW_PF",
"PublicDescription": "This event counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the hardware prefetch.",
@@ -604,6 +678,7 @@
},
{
"BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch",
+ "Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "LOAD_HIT_PRE.SW_PF",
"PublicDescription": "This event counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by asm inspection of the nearby instructions.",
@@ -612,6 +687,7 @@
},
{
"BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xA8",
"EventName": "LSD.CYCLES_4_UOPS",
@@ -620,6 +696,7 @@
},
{
"BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA8",
"EventName": "LSD.CYCLES_ACTIVE",
@@ -628,6 +705,7 @@
},
{
"BriefDescription": "Number of Uops delivered by the LSD.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA8",
"EventName": "LSD.UOPS",
"SampleAfterValue": "2000003",
@@ -635,6 +713,7 @@
},
{
"BriefDescription": "Number of machine clears (nukes) of any type.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xC3",
@@ -644,6 +723,7 @@
},
{
"BriefDescription": "Cycles there was a Nuke. Account for both thread-specific and All Thread Nukes.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.CYCLES",
"PublicDescription": "This event counts both thread-specific (TS) and all-thread (AT) nukes.",
@@ -652,6 +732,7 @@
},
{
"BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.MASKMOV",
"PublicDescription": "Maskmov false fault - counts number of time ucode passes through Maskmov flow due to instruction's mask being 0 while the flow was completed without raising a fault.",
@@ -660,6 +741,7 @@
},
{
"BriefDescription": "Self-modifying code (SMC) detected.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.SMC",
"PublicDescription": "This event counts self-modifying code (SMC) detected, which causes a machine clear.",
@@ -668,6 +750,7 @@
},
{
"BriefDescription": "Number of integer Move Elimination candidate uops that were eliminated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "MOVE_ELIMINATION.INT_ELIMINATED",
"SampleAfterValue": "1000003",
@@ -675,6 +758,7 @@
},
{
"BriefDescription": "Number of integer Move Elimination candidate uops that were not eliminated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "MOVE_ELIMINATION.INT_NOT_ELIMINATED",
"SampleAfterValue": "1000003",
@@ -682,6 +766,7 @@
},
{
"BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "OTHER_ASSISTS.ANY_WB_ASSIST",
"SampleAfterValue": "100003",
@@ -689,6 +774,7 @@
},
{
"BriefDescription": "Resource-related stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xa2",
"EventName": "RESOURCE_STALLS.ANY",
"PublicDescription": "This event counts resource-related stall cycles.",
@@ -697,6 +783,7 @@
},
{
"BriefDescription": "Cycles stalled due to re-order buffer full.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.ROB",
"PublicDescription": "This event counts ROB full stall cycles. This counts cycles that the pipeline backend blocked uop delivery from the front end.",
@@ -705,6 +792,7 @@
},
{
"BriefDescription": "Cycles stalled due to no eligible RS entry available.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.RS",
"PublicDescription": "This event counts stall cycles caused by absence of eligible entries in the reservation station (RS). This may result from RS overflow, or from RS deallocation because of the RS array Write Port allocation scheme (each RS entry has two write ports instead of four. As a result, empty entries could not be used, although RS is not really full). This counts cycles that the pipeline backend blocked uop delivery from the front end.",
@@ -713,6 +801,7 @@
},
{
"BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.SB",
"PublicDescription": "This event counts stall cycles caused by the store buffer (SB) overflow (excluding draining from synch). This counts cycles that the pipeline backend blocked uop delivery from the front end.",
@@ -721,6 +810,7 @@
},
{
"BriefDescription": "Count cases of saving new LBR",
+ "Counter": "0,1,2,3",
"EventCode": "0xCC",
"EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
"PublicDescription": "This event counts cases of saving new LBR records by hardware. This assumes proper enabling of LBRs and takes into account LBR filtering done by the LBR_SELECT register.",
@@ -729,6 +819,7 @@
},
{
"BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "Counter": "0,1,2,3",
"EventCode": "0x5E",
"EventName": "RS_EVENTS.EMPTY_CYCLES",
"PublicDescription": "This event counts cycles during which the reservation station (RS) is empty for the thread.\nNote: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.",
@@ -737,6 +828,7 @@
},
{
"BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x5E",
@@ -747,6 +839,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_0",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
@@ -755,6 +848,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_1",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
@@ -763,6 +857,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_2",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
@@ -771,6 +866,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_3",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
@@ -779,6 +875,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_4",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
@@ -787,6 +884,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_5",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
@@ -795,6 +893,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_6",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
@@ -803,6 +902,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_7",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
@@ -811,6 +911,7 @@
},
{
"BriefDescription": "Number of uops executed on the core.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE",
"PublicDescription": "Number of uops executed from any thread.",
@@ -819,6 +920,7 @@
},
{
"BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
@@ -827,6 +929,7 @@
},
{
"BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
@@ -835,6 +938,7 @@
},
{
"BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "3",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
@@ -843,6 +947,7 @@
},
{
"BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
@@ -851,6 +956,7 @@
},
{
"BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
"Invert": "1",
@@ -859,6 +965,7 @@
},
{
"BriefDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
@@ -867,6 +974,7 @@
},
{
"BriefDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
@@ -875,6 +983,7 @@
},
{
"BriefDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "Counter": "0,1,2,3",
"CounterMask": "3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
@@ -883,6 +992,7 @@
},
{
"BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
@@ -891,6 +1001,7 @@
},
{
"BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.STALL_CYCLES",
@@ -901,6 +1012,7 @@
},
{
"BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.THREAD",
"PublicDescription": "Number of uops to be executed per-thread each cycle.",
@@ -909,6 +1021,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_0",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
@@ -918,6 +1031,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are executed in port 0.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
"SampleAfterValue": "2000003",
@@ -925,6 +1039,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_1",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
@@ -934,6 +1049,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are executed in port 1.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
"SampleAfterValue": "2000003",
@@ -941,6 +1057,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_2",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
@@ -950,6 +1067,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are dispatched to port 2.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
"SampleAfterValue": "2000003",
@@ -957,6 +1075,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_3",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
@@ -966,6 +1085,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are dispatched to port 3.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
"SampleAfterValue": "2000003",
@@ -973,6 +1093,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_4",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
@@ -982,6 +1103,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are executed in port 4.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
"SampleAfterValue": "2000003",
@@ -989,6 +1111,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_5",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
@@ -998,6 +1121,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are executed in port 5.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
"SampleAfterValue": "2000003",
@@ -1005,6 +1129,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_6",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
@@ -1014,6 +1139,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are executed in port 6.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
"SampleAfterValue": "2000003",
@@ -1021,6 +1147,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_7",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
@@ -1030,6 +1157,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are dispatched to port 7.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
"SampleAfterValue": "2000003",
@@ -1037,6 +1165,7 @@
},
{
"BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.ANY",
"PublicDescription": "This event counts the number of Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS).",
@@ -1045,6 +1174,7 @@
},
{
"BriefDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive; added by GSR u-arch.",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.FLAGS_MERGE",
"PublicDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive\n added by GSR u-arch.",
@@ -1053,6 +1183,7 @@
},
{
"BriefDescription": "Number of Multiply packed/scalar single precision uops allocated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.SINGLE_MUL",
"SampleAfterValue": "2000003",
@@ -1060,6 +1191,7 @@
},
{
"BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.SLOW_LEA",
"SampleAfterValue": "2000003",
@@ -1067,6 +1199,7 @@
},
{
"BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.STALL_CYCLES",
@@ -1077,6 +1210,7 @@
},
{
"BriefDescription": "Actually retired uops.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.ALL",
"PEBS": "1",
@@ -1086,6 +1220,7 @@
},
{
"BriefDescription": "Retirement slots used.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.RETIRE_SLOTS",
"PEBS": "1",
@@ -1095,6 +1230,7 @@
},
{
"BriefDescription": "Cycles without actually retired uops.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.STALL_CYCLES",
@@ -1105,6 +1241,7 @@
},
{
"BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "Counter": "0,1,2,3",
"CounterMask": "16",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.TOTAL_CYCLES",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/uncore-cache.json b/tools/perf/pmu-events/arch/x86/broadwellde/uncore-cache.json
index 56bba6d4e0f6..f5b5ae1150c3 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/uncore-cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Bounce Control",
+ "Counter": "0,1,2,3",
"EventCode": "0xA",
"EventName": "UNC_C_BOUNCE_CONTROL",
"PerPkg": "1",
@@ -8,12 +9,14 @@
},
{
"BriefDescription": "Uncore Clocks",
+ "Counter": "0,1,2,3",
"EventName": "UNC_C_CLOCKTICKS",
"PerPkg": "1",
"Unit": "CBOX"
},
{
"BriefDescription": "Counter 0 Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_C_COUNTER0_OCCUPANCY",
"PerPkg": "1",
@@ -22,6 +25,7 @@
},
{
"BriefDescription": "FaST wire asserted",
+ "Counter": "0,1",
"EventCode": "0x9",
"EventName": "UNC_C_FAST_ASSERTED",
"PerPkg": "1",
@@ -30,6 +34,7 @@
},
{
"BriefDescription": "Cache Lookups; Any Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.ANY",
"PerPkg": "1",
@@ -39,6 +44,7 @@
},
{
"BriefDescription": "Cache Lookups; Data Read Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.DATA_READ",
"PerPkg": "1",
@@ -48,6 +54,7 @@
},
{
"BriefDescription": "Cache Lookups; Lookups that Match NID",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.NID",
"PerPkg": "1",
@@ -57,6 +64,7 @@
},
{
"BriefDescription": "Cache Lookups; Any Read Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.READ",
"PerPkg": "1",
@@ -66,6 +74,7 @@
},
{
"BriefDescription": "Cache Lookups; External Snoop Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.REMOTE_SNOOP",
"PerPkg": "1",
@@ -75,6 +84,7 @@
},
{
"BriefDescription": "Cache Lookups; Write Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.WRITE",
"PerPkg": "1",
@@ -84,6 +94,7 @@
},
{
"BriefDescription": "Lines Victimized; Lines in E state",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.E_STATE",
"PerPkg": "1",
@@ -93,6 +104,7 @@
},
{
"BriefDescription": "Lines Victimized",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.F_STATE",
"PerPkg": "1",
@@ -102,6 +114,7 @@
},
{
"BriefDescription": "Lines Victimized; Lines in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.I_STATE",
"PerPkg": "1",
@@ -111,6 +124,7 @@
},
{
"BriefDescription": "Lines Victimized",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.MISS",
"PerPkg": "1",
@@ -120,6 +134,7 @@
},
{
"BriefDescription": "Lines Victimized; Lines in M state",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.M_STATE",
"PerPkg": "1",
@@ -129,6 +144,7 @@
},
{
"BriefDescription": "Lines Victimized; Victimized Lines that Match NID",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.NID",
"PerPkg": "1",
@@ -138,6 +154,7 @@
},
{
"BriefDescription": "Cbo Misc; DRd hitting non-M with raw CV=0",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_C_MISC.CVZERO_PREFETCH_MISS",
"PerPkg": "1",
@@ -147,6 +164,7 @@
},
{
"BriefDescription": "Cbo Misc; Clean Victim with raw CV=0",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_C_MISC.CVZERO_PREFETCH_VICTIM",
"PerPkg": "1",
@@ -156,6 +174,7 @@
},
{
"BriefDescription": "Cbo Misc; RFO HitS",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_C_MISC.RFO_HIT_S",
"PerPkg": "1",
@@ -165,6 +184,7 @@
},
{
"BriefDescription": "Cbo Misc; Silent Snoop Eviction",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_C_MISC.RSPI_WAS_FSE",
"PerPkg": "1",
@@ -174,6 +194,7 @@
},
{
"BriefDescription": "Cbo Misc",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_C_MISC.STARTED",
"PerPkg": "1",
@@ -183,6 +204,7 @@
},
{
"BriefDescription": "Cbo Misc; Write Combining Aliasing",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_C_MISC.WC_ALIASING",
"PerPkg": "1",
@@ -192,6 +214,7 @@
},
{
"BriefDescription": "LRU Queue; LRU Age 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "UNC_C_QLRU.AGE0",
"PerPkg": "1",
@@ -201,6 +224,7 @@
},
{
"BriefDescription": "LRU Queue; LRU Age 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "UNC_C_QLRU.AGE1",
"PerPkg": "1",
@@ -210,6 +234,7 @@
},
{
"BriefDescription": "LRU Queue; LRU Age 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "UNC_C_QLRU.AGE2",
"PerPkg": "1",
@@ -219,6 +244,7 @@
},
{
"BriefDescription": "LRU Queue; LRU Age 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "UNC_C_QLRU.AGE3",
"PerPkg": "1",
@@ -228,6 +254,7 @@
},
{
"BriefDescription": "LRU Queue; LRU Bits Decremented",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "UNC_C_QLRU.LRU_DECREMENT",
"PerPkg": "1",
@@ -237,6 +264,7 @@
},
{
"BriefDescription": "LRU Queue; Non-0 Aged Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "UNC_C_QLRU.VICTIM_NON_ZERO",
"PerPkg": "1",
@@ -246,6 +274,7 @@
},
{
"BriefDescription": "AD Ring In Use; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_C_RING_AD_USED.ALL",
"PerPkg": "1",
@@ -255,6 +284,7 @@
},
{
"BriefDescription": "AD Ring In Use; Down",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_C_RING_AD_USED.CCW",
"PerPkg": "1",
@@ -264,6 +294,7 @@
},
{
"BriefDescription": "AD Ring In Use; Up",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_C_RING_AD_USED.CW",
"PerPkg": "1",
@@ -273,6 +304,7 @@
},
{
"BriefDescription": "AD Ring In Use; Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_C_RING_AD_USED.DOWN_EVEN",
"PerPkg": "1",
@@ -282,6 +314,7 @@
},
{
"BriefDescription": "AD Ring In Use; Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_C_RING_AD_USED.DOWN_ODD",
"PerPkg": "1",
@@ -291,6 +324,7 @@
},
{
"BriefDescription": "AD Ring In Use; Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_C_RING_AD_USED.UP_EVEN",
"PerPkg": "1",
@@ -300,6 +334,7 @@
},
{
"BriefDescription": "AD Ring In Use; Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_C_RING_AD_USED.UP_ODD",
"PerPkg": "1",
@@ -309,6 +344,7 @@
},
{
"BriefDescription": "AK Ring In Use; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_C_RING_AK_USED.ALL",
"PerPkg": "1",
@@ -318,6 +354,7 @@
},
{
"BriefDescription": "AK Ring In Use; Down",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_C_RING_AK_USED.CCW",
"PerPkg": "1",
@@ -327,6 +364,7 @@
},
{
"BriefDescription": "AK Ring In Use; Up",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_C_RING_AK_USED.CW",
"PerPkg": "1",
@@ -336,6 +374,7 @@
},
{
"BriefDescription": "AK Ring In Use; Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_C_RING_AK_USED.DOWN_EVEN",
"PerPkg": "1",
@@ -345,6 +384,7 @@
},
{
"BriefDescription": "AK Ring In Use; Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_C_RING_AK_USED.DOWN_ODD",
"PerPkg": "1",
@@ -354,6 +394,7 @@
},
{
"BriefDescription": "AK Ring In Use; Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_C_RING_AK_USED.UP_EVEN",
"PerPkg": "1",
@@ -363,6 +404,7 @@
},
{
"BriefDescription": "AK Ring In Use; Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_C_RING_AK_USED.UP_ODD",
"PerPkg": "1",
@@ -372,6 +414,7 @@
},
{
"BriefDescription": "BL Ring in Use; Down",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_C_RING_BL_USED.ALL",
"PerPkg": "1",
@@ -381,6 +424,7 @@
},
{
"BriefDescription": "BL Ring in Use; Down",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_C_RING_BL_USED.CCW",
"PerPkg": "1",
@@ -390,6 +434,7 @@
},
{
"BriefDescription": "BL Ring in Use; Up",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_C_RING_BL_USED.CW",
"PerPkg": "1",
@@ -399,6 +444,7 @@
},
{
"BriefDescription": "BL Ring in Use; Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_C_RING_BL_USED.DOWN_EVEN",
"PerPkg": "1",
@@ -408,6 +454,7 @@
},
{
"BriefDescription": "BL Ring in Use; Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_C_RING_BL_USED.DOWN_ODD",
"PerPkg": "1",
@@ -417,6 +464,7 @@
},
{
"BriefDescription": "BL Ring in Use; Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_C_RING_BL_USED.UP_EVEN",
"PerPkg": "1",
@@ -426,6 +474,7 @@
},
{
"BriefDescription": "BL Ring in Use; Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_C_RING_BL_USED.UP_ODD",
"PerPkg": "1",
@@ -435,6 +484,7 @@
},
{
"BriefDescription": "Number of LLC responses that bounced on the Ring.; AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_C_RING_BOUNCES.AD",
"PerPkg": "1",
@@ -443,6 +493,7 @@
},
{
"BriefDescription": "Number of LLC responses that bounced on the Ring.; AK",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_C_RING_BOUNCES.AK",
"PerPkg": "1",
@@ -451,6 +502,7 @@
},
{
"BriefDescription": "Number of LLC responses that bounced on the Ring.; BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_C_RING_BOUNCES.BL",
"PerPkg": "1",
@@ -459,6 +511,7 @@
},
{
"BriefDescription": "Number of LLC responses that bounced on the Ring.; Snoops of processor's cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_C_RING_BOUNCES.IV",
"PerPkg": "1",
@@ -467,6 +520,7 @@
},
{
"BriefDescription": "BL Ring in Use; Any",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_C_RING_IV_USED.ANY",
"PerPkg": "1",
@@ -476,6 +530,7 @@
},
{
"BriefDescription": "BL Ring in Use; Any",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_C_RING_IV_USED.DN",
"PerPkg": "1",
@@ -485,6 +540,7 @@
},
{
"BriefDescription": "BL Ring in Use; Down",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_C_RING_IV_USED.DOWN",
"PerPkg": "1",
@@ -494,6 +550,7 @@
},
{
"BriefDescription": "BL Ring in Use; Any",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_C_RING_IV_USED.UP",
"PerPkg": "1",
@@ -503,6 +560,7 @@
},
{
"BriefDescription": "AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_C_RING_SINK_STARVED.AD",
"PerPkg": "1",
@@ -511,6 +569,7 @@
},
{
"BriefDescription": "AK",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_C_RING_SINK_STARVED.AK",
"PerPkg": "1",
@@ -519,6 +578,7 @@
},
{
"BriefDescription": "BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_C_RING_SINK_STARVED.BL",
"PerPkg": "1",
@@ -527,6 +587,7 @@
},
{
"BriefDescription": "IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_C_RING_SINK_STARVED.IV",
"PerPkg": "1",
@@ -535,6 +596,7 @@
},
{
"BriefDescription": "Number of cycles the Cbo is actively throttling traffic onto the Ring in order to limit bounce traffic.",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_C_RING_SRC_THRTL",
"PerPkg": "1",
@@ -542,6 +604,7 @@
},
{
"BriefDescription": "Ingress Arbiter Blocking Cycles; IRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_C_RxR_EXT_STARVED.IPQ",
"PerPkg": "1",
@@ -551,6 +614,7 @@
},
{
"BriefDescription": "Ingress Arbiter Blocking Cycles; IPQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_C_RxR_EXT_STARVED.IRQ",
"PerPkg": "1",
@@ -560,6 +624,7 @@
},
{
"BriefDescription": "Ingress Arbiter Blocking Cycles; ISMQ_BID",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_C_RxR_EXT_STARVED.ISMQ_BIDS",
"PerPkg": "1",
@@ -569,6 +634,7 @@
},
{
"BriefDescription": "Ingress Arbiter Blocking Cycles; PRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_C_RxR_EXT_STARVED.PRQ",
"PerPkg": "1",
@@ -578,6 +644,7 @@
},
{
"BriefDescription": "Ingress Allocations; IPQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_C_RxR_INSERTS.IPQ",
"PerPkg": "1",
@@ -587,6 +654,7 @@
},
{
"BriefDescription": "Ingress Allocations; IRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_C_RxR_INSERTS.IRQ",
"PerPkg": "1",
@@ -596,6 +664,7 @@
},
{
"BriefDescription": "Ingress Allocations; IRQ Rejected",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_C_RxR_INSERTS.IRQ_REJ",
"PerPkg": "1",
@@ -605,6 +674,7 @@
},
{
"BriefDescription": "Ingress Allocations; PRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_C_RxR_INSERTS.PRQ",
"PerPkg": "1",
@@ -614,6 +684,7 @@
},
{
"BriefDescription": "Ingress Allocations; PRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_C_RxR_INSERTS.PRQ_REJ",
"PerPkg": "1",
@@ -623,6 +694,7 @@
},
{
"BriefDescription": "Ingress Internal Starvation Cycles; IPQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_C_RxR_INT_STARVED.IPQ",
"PerPkg": "1",
@@ -632,6 +704,7 @@
},
{
"BriefDescription": "Ingress Internal Starvation Cycles; IRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_C_RxR_INT_STARVED.IRQ",
"PerPkg": "1",
@@ -641,6 +714,7 @@
},
{
"BriefDescription": "Ingress Internal Starvation Cycles; ISMQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_C_RxR_INT_STARVED.ISMQ",
"PerPkg": "1",
@@ -650,6 +724,7 @@
},
{
"BriefDescription": "Ingress Internal Starvation Cycles; PRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_C_RxR_INT_STARVED.PRQ",
"PerPkg": "1",
@@ -659,6 +734,7 @@
},
{
"BriefDescription": "Probe Queue Retries; Address Conflict",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_C_RxR_IPQ_RETRY.ADDR_CONFLICT",
"PerPkg": "1",
@@ -668,6 +744,7 @@
},
{
"BriefDescription": "Probe Queue Retries; Any Reject",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_C_RxR_IPQ_RETRY.ANY",
"PerPkg": "1",
@@ -677,6 +754,7 @@
},
{
"BriefDescription": "Probe Queue Retries; No Egress Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_C_RxR_IPQ_RETRY.FULL",
"PerPkg": "1",
@@ -686,6 +764,7 @@
},
{
"BriefDescription": "Probe Queue Retries; No QPI Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_C_RxR_IPQ_RETRY.QPI_CREDITS",
"PerPkg": "1",
@@ -695,6 +774,7 @@
},
{
"BriefDescription": "Probe Queue Retries; No AD Sbo Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_C_RxR_IPQ_RETRY2.AD_SBO",
"PerPkg": "1",
@@ -704,6 +784,7 @@
},
{
"BriefDescription": "Probe Queue Retries; Target Node Filter",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_C_RxR_IPQ_RETRY2.TARGET",
"PerPkg": "1",
@@ -713,6 +794,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects; Address Conflict",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_C_RxR_IRQ_RETRY.ADDR_CONFLICT",
"PerPkg": "1",
@@ -722,6 +804,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects; Any Reject",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_C_RxR_IRQ_RETRY.ANY",
"PerPkg": "1",
@@ -731,6 +814,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects; No Egress Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_C_RxR_IRQ_RETRY.FULL",
"PerPkg": "1",
@@ -740,6 +824,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects; No IIO Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_C_RxR_IRQ_RETRY.IIO_CREDITS",
"PerPkg": "1",
@@ -749,6 +834,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_C_RxR_IRQ_RETRY.NID",
"PerPkg": "1",
@@ -758,6 +844,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects; No QPI Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_C_RxR_IRQ_RETRY.QPI_CREDITS",
"PerPkg": "1",
@@ -767,6 +854,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects; No RTIDs",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_C_RxR_IRQ_RETRY.RTID",
"PerPkg": "1",
@@ -776,6 +864,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects; No AD Sbo Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_C_RxR_IRQ_RETRY2.AD_SBO",
"PerPkg": "1",
@@ -785,6 +874,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects; No BL Sbo Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_C_RxR_IRQ_RETRY2.BL_SBO",
"PerPkg": "1",
@@ -794,6 +884,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects; Target Node Filter",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_C_RxR_IRQ_RETRY2.TARGET",
"PerPkg": "1",
@@ -803,6 +894,7 @@
},
{
"BriefDescription": "ISMQ Retries; Any Reject",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_C_RxR_ISMQ_RETRY.ANY",
"PerPkg": "1",
@@ -812,6 +904,7 @@
},
{
"BriefDescription": "ISMQ Retries; No Egress Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_C_RxR_ISMQ_RETRY.FULL",
"PerPkg": "1",
@@ -821,6 +914,7 @@
},
{
"BriefDescription": "ISMQ Retries; No IIO Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_C_RxR_ISMQ_RETRY.IIO_CREDITS",
"PerPkg": "1",
@@ -830,6 +924,7 @@
},
{
"BriefDescription": "ISMQ Retries",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_C_RxR_ISMQ_RETRY.NID",
"PerPkg": "1",
@@ -839,6 +934,7 @@
},
{
"BriefDescription": "ISMQ Retries; No QPI Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_C_RxR_ISMQ_RETRY.QPI_CREDITS",
"PerPkg": "1",
@@ -848,6 +944,7 @@
},
{
"BriefDescription": "ISMQ Retries; No RTIDs",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_C_RxR_ISMQ_RETRY.RTID",
"PerPkg": "1",
@@ -857,6 +954,7 @@
},
{
"BriefDescription": "ISMQ Retries",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_C_RxR_ISMQ_RETRY.WB_CREDITS",
"PerPkg": "1",
@@ -866,6 +964,7 @@
},
{
"BriefDescription": "ISMQ Request Queue Rejects; No AD Sbo Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_C_RxR_ISMQ_RETRY2.AD_SBO",
"PerPkg": "1",
@@ -875,6 +974,7 @@
},
{
"BriefDescription": "ISMQ Request Queue Rejects; No BL Sbo Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_C_RxR_ISMQ_RETRY2.BL_SBO",
"PerPkg": "1",
@@ -884,6 +984,7 @@
},
{
"BriefDescription": "ISMQ Request Queue Rejects; Target Node Filter",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_C_RxR_ISMQ_RETRY2.TARGET",
"PerPkg": "1",
@@ -893,6 +994,7 @@
},
{
"BriefDescription": "Ingress Occupancy; IPQ",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_C_RxR_OCCUPANCY.IPQ",
"PerPkg": "1",
@@ -902,6 +1004,7 @@
},
{
"BriefDescription": "Ingress Occupancy; IRQ",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_C_RxR_OCCUPANCY.IRQ",
"PerPkg": "1",
@@ -911,6 +1014,7 @@
},
{
"BriefDescription": "Ingress Occupancy; IRQ Rejected",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_C_RxR_OCCUPANCY.IRQ_REJ",
"PerPkg": "1",
@@ -920,6 +1024,7 @@
},
{
"BriefDescription": "Ingress Occupancy; PRQ Rejects",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_C_RxR_OCCUPANCY.PRQ_REJ",
"PerPkg": "1",
@@ -929,6 +1034,7 @@
},
{
"BriefDescription": "SBo Credits Acquired; For AD Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x3D",
"EventName": "UNC_C_SBO_CREDITS_ACQUIRED.AD",
"PerPkg": "1",
@@ -938,6 +1044,7 @@
},
{
"BriefDescription": "SBo Credits Acquired; For BL Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x3D",
"EventName": "UNC_C_SBO_CREDITS_ACQUIRED.BL",
"PerPkg": "1",
@@ -947,6 +1054,7 @@
},
{
"BriefDescription": "SBo Credits Occupancy; For AD Ring",
+ "Counter": "0",
"EventCode": "0x3E",
"EventName": "UNC_C_SBO_CREDIT_OCCUPANCY.AD",
"PerPkg": "1",
@@ -956,6 +1064,7 @@
},
{
"BriefDescription": "SBo Credits Occupancy; For BL Ring",
+ "Counter": "0",
"EventCode": "0x3E",
"EventName": "UNC_C_SBO_CREDIT_OCCUPANCY.BL",
"PerPkg": "1",
@@ -965,6 +1074,7 @@
},
{
"BriefDescription": "TOR Inserts; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.ALL",
"PerPkg": "1",
@@ -974,6 +1084,7 @@
},
{
"BriefDescription": "TOR Inserts; Evictions",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.EVICTION",
"PerPkg": "1",
@@ -983,6 +1094,7 @@
},
{
"BriefDescription": "TOR Inserts; Local Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.LOCAL",
"PerPkg": "1",
@@ -992,6 +1104,7 @@
},
{
"BriefDescription": "TOR Inserts; Local Memory - Opcode Matched",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.LOCAL_OPCODE",
"PerPkg": "1",
@@ -1001,6 +1114,7 @@
},
{
"BriefDescription": "TOR Inserts; Misses to Local Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.MISS_LOCAL",
"PerPkg": "1",
@@ -1010,6 +1124,7 @@
},
{
"BriefDescription": "TOR Inserts; Misses to Local Memory - Opcode Matched",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.MISS_LOCAL_OPCODE",
"PerPkg": "1",
@@ -1019,6 +1134,7 @@
},
{
"BriefDescription": "TOR Inserts; Miss Opcode Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.MISS_OPCODE",
"PerPkg": "1",
@@ -1028,6 +1144,7 @@
},
{
"BriefDescription": "TOR Inserts; Misses to Remote Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.MISS_REMOTE",
"PerPkg": "1",
@@ -1037,6 +1154,7 @@
},
{
"BriefDescription": "TOR Inserts; Misses to Remote Memory - Opcode Matched",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.MISS_REMOTE_OPCODE",
"PerPkg": "1",
@@ -1046,6 +1164,7 @@
},
{
"BriefDescription": "TOR Inserts; NID Matched",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.NID_ALL",
"PerPkg": "1",
@@ -1055,6 +1174,7 @@
},
{
"BriefDescription": "TOR Inserts; NID Matched Evictions",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.NID_EVICTION",
"PerPkg": "1",
@@ -1064,6 +1184,7 @@
},
{
"BriefDescription": "TOR Inserts; NID Matched Miss All",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.NID_MISS_ALL",
"PerPkg": "1",
@@ -1073,6 +1194,7 @@
},
{
"BriefDescription": "TOR Inserts; NID and Opcode Matched Miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.NID_MISS_OPCODE",
"PerPkg": "1",
@@ -1082,6 +1204,7 @@
},
{
"BriefDescription": "TOR Inserts; NID and Opcode Matched",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.NID_OPCODE",
"PerPkg": "1",
@@ -1091,6 +1214,7 @@
},
{
"BriefDescription": "TOR Inserts; NID Matched Writebacks",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.NID_WB",
"PerPkg": "1",
@@ -1100,6 +1224,7 @@
},
{
"BriefDescription": "TOR Inserts; Opcode Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.OPCODE",
"PerPkg": "1",
@@ -1109,6 +1234,7 @@
},
{
"BriefDescription": "TOR Inserts; Remote Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.REMOTE",
"PerPkg": "1",
@@ -1118,6 +1244,7 @@
},
{
"BriefDescription": "TOR Inserts; Remote Memory - Opcode Matched",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.REMOTE_OPCODE",
"PerPkg": "1",
@@ -1127,6 +1254,7 @@
},
{
"BriefDescription": "TOR Inserts; Writebacks",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.WB",
"PerPkg": "1",
@@ -1136,6 +1264,7 @@
},
{
"BriefDescription": "TOR Occupancy; Any",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.ALL",
"PerPkg": "1",
@@ -1145,6 +1274,7 @@
},
{
"BriefDescription": "TOR Occupancy; Evictions",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.EVICTION",
"PerPkg": "1",
@@ -1154,6 +1284,7 @@
},
{
"BriefDescription": "TOR Occupancy",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.LOCAL",
"PerPkg": "1",
@@ -1163,6 +1294,7 @@
},
{
"BriefDescription": "TOR Occupancy; Local Memory - Opcode Matched",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.LOCAL_OPCODE",
"PerPkg": "1",
@@ -1172,6 +1304,7 @@
},
{
"BriefDescription": "TOR Occupancy; Miss All",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.MISS_ALL",
"PerPkg": "1",
@@ -1181,6 +1314,7 @@
},
{
"BriefDescription": "TOR Occupancy",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.MISS_LOCAL",
"PerPkg": "1",
@@ -1190,6 +1324,7 @@
},
{
"BriefDescription": "TOR Occupancy; Misses to Local Memory - Opcode Matched",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.MISS_LOCAL_OPCODE",
"PerPkg": "1",
@@ -1199,6 +1334,7 @@
},
{
"BriefDescription": "TOR Occupancy; Miss Opcode Match",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.MISS_OPCODE",
"PerPkg": "1",
@@ -1208,6 +1344,7 @@
},
{
"BriefDescription": "TOR Occupancy",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.MISS_REMOTE",
"PerPkg": "1",
@@ -1217,6 +1354,7 @@
},
{
"BriefDescription": "TOR Occupancy; Misses to Remote Memory - Opcode Matched",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.MISS_REMOTE_OPCODE",
"PerPkg": "1",
@@ -1226,6 +1364,7 @@
},
{
"BriefDescription": "TOR Occupancy; NID Matched",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.NID_ALL",
"PerPkg": "1",
@@ -1235,6 +1374,7 @@
},
{
"BriefDescription": "TOR Occupancy; NID Matched Evictions",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.NID_EVICTION",
"PerPkg": "1",
@@ -1244,6 +1384,7 @@
},
{
"BriefDescription": "TOR Occupancy; NID Matched",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.NID_MISS_ALL",
"PerPkg": "1",
@@ -1253,6 +1394,7 @@
},
{
"BriefDescription": "TOR Occupancy; NID and Opcode Matched Miss",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.NID_MISS_OPCODE",
"PerPkg": "1",
@@ -1262,6 +1404,7 @@
},
{
"BriefDescription": "TOR Occupancy; NID and Opcode Matched",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.NID_OPCODE",
"PerPkg": "1",
@@ -1271,6 +1414,7 @@
},
{
"BriefDescription": "TOR Occupancy; NID Matched Writebacks",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.NID_WB",
"PerPkg": "1",
@@ -1280,6 +1424,7 @@
},
{
"BriefDescription": "TOR Occupancy; Opcode Match",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.OPCODE",
"PerPkg": "1",
@@ -1289,6 +1434,7 @@
},
{
"BriefDescription": "TOR Occupancy",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.REMOTE",
"PerPkg": "1",
@@ -1298,6 +1444,7 @@
},
{
"BriefDescription": "TOR Occupancy; Remote Memory - Opcode Matched",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.REMOTE_OPCODE",
"PerPkg": "1",
@@ -1307,6 +1454,7 @@
},
{
"BriefDescription": "TOR Occupancy; Writebacks",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.WB",
"PerPkg": "1",
@@ -1316,6 +1464,7 @@
},
{
"BriefDescription": "Onto AD Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_C_TxR_ADS_USED.AD",
"PerPkg": "1",
@@ -1324,6 +1473,7 @@
},
{
"BriefDescription": "Onto AK Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_C_TxR_ADS_USED.AK",
"PerPkg": "1",
@@ -1332,6 +1482,7 @@
},
{
"BriefDescription": "Onto BL Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_C_TxR_ADS_USED.BL",
"PerPkg": "1",
@@ -1340,6 +1491,7 @@
},
{
"BriefDescription": "Egress Allocations; AD - Cachebo",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_C_TxR_INSERTS.AD_CACHE",
"PerPkg": "1",
@@ -1349,6 +1501,7 @@
},
{
"BriefDescription": "Egress Allocations; AD - Corebo",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_C_TxR_INSERTS.AD_CORE",
"PerPkg": "1",
@@ -1358,6 +1511,7 @@
},
{
"BriefDescription": "Egress Allocations; AK - Cachebo",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_C_TxR_INSERTS.AK_CACHE",
"PerPkg": "1",
@@ -1367,6 +1521,7 @@
},
{
"BriefDescription": "Egress Allocations; AK - Corebo",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_C_TxR_INSERTS.AK_CORE",
"PerPkg": "1",
@@ -1376,6 +1531,7 @@
},
{
"BriefDescription": "Egress Allocations; BL - Cacheno",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_C_TxR_INSERTS.BL_CACHE",
"PerPkg": "1",
@@ -1385,6 +1541,7 @@
},
{
"BriefDescription": "Egress Allocations; BL - Corebo",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_C_TxR_INSERTS.BL_CORE",
"PerPkg": "1",
@@ -1394,6 +1551,7 @@
},
{
"BriefDescription": "Egress Allocations; IV - Cachebo",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_C_TxR_INSERTS.IV_CACHE",
"PerPkg": "1",
@@ -1403,6 +1561,7 @@
},
{
"BriefDescription": "Injection Starvation; Onto AD Ring (to core)",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_C_TxR_STARVED.AD_CORE",
"PerPkg": "1",
@@ -1412,6 +1571,7 @@
},
{
"BriefDescription": "Injection Starvation; Onto AK Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_C_TxR_STARVED.AK_BOTH",
"PerPkg": "1",
@@ -1421,6 +1581,7 @@
},
{
"BriefDescription": "Injection Starvation; Onto BL Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_C_TxR_STARVED.BL_BOTH",
"PerPkg": "1",
@@ -1430,6 +1591,7 @@
},
{
"BriefDescription": "Injection Starvation; Onto IV Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_C_TxR_STARVED.IV",
"PerPkg": "1",
@@ -1439,6 +1601,7 @@
},
{
"BriefDescription": "BT Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_H_BT_CYCLES_NE",
"PerPkg": "1",
@@ -1447,6 +1610,7 @@
},
{
"BriefDescription": "BT to HT Not Issued; Incoming Data Hazard",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.INCOMING_BL_HAZARD",
"PerPkg": "1",
@@ -1456,6 +1620,7 @@
},
{
"BriefDescription": "BT to HT Not Issued; Incoming Snoop Hazard",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.INCOMING_SNP_HAZARD",
"PerPkg": "1",
@@ -1465,6 +1630,7 @@
},
{
"BriefDescription": "BT to HT Not Issued; Incoming Data Hazard",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.RSPACKCFLT_HAZARD",
"PerPkg": "1",
@@ -1474,6 +1640,7 @@
},
{
"BriefDescription": "BT to HT Not Issued; Incoming Data Hazard",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.WBMDATA_HAZARD",
"PerPkg": "1",
@@ -1483,6 +1650,7 @@
},
{
"BriefDescription": "HA to iMC Bypass; Not Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_H_BYPASS_IMC.NOT_TAKEN",
"PerPkg": "1",
@@ -1492,6 +1660,7 @@
},
{
"BriefDescription": "HA to iMC Bypass; Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_H_BYPASS_IMC.TAKEN",
"PerPkg": "1",
@@ -1501,6 +1670,7 @@
},
{
"BriefDescription": "uclks",
+ "Counter": "0,1,2,3",
"EventName": "UNC_H_CLOCKTICKS",
"PerPkg": "1",
"PublicDescription": "Counts the number of uclks in the HA. This will be slightly different than the count in the Ubox because of enable/freeze delays. The HA is on the other side of the die from the fixed Ubox uclk counter, so the drift could be somewhat larger than in units that are closer like the QPI Agent.",
@@ -1508,6 +1678,7 @@
},
{
"BriefDescription": "Direct2Core Messages Sent",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_H_DIRECT2CORE_COUNT",
"PerPkg": "1",
@@ -1516,6 +1687,7 @@
},
{
"BriefDescription": "Cycles when Direct2Core was Disabled",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_H_DIRECT2CORE_CYCLES_DISABLED",
"PerPkg": "1",
@@ -1524,6 +1696,7 @@
},
{
"BriefDescription": "Number of Reads that had Direct2Core Overridden",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_H_DIRECT2CORE_TXN_OVERRIDE",
"PerPkg": "1",
@@ -1532,6 +1705,7 @@
},
{
"BriefDescription": "Directory Lat Opt Return",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_H_DIRECTORY_LAT_OPT",
"PerPkg": "1",
@@ -1540,6 +1714,7 @@
},
{
"BriefDescription": "Directory Lookups; Snoop Not Needed",
+ "Counter": "0,1,2,3",
"EventCode": "0xC",
"EventName": "UNC_H_DIRECTORY_LOOKUP.NO_SNP",
"PerPkg": "1",
@@ -1549,6 +1724,7 @@
},
{
"BriefDescription": "Directory Lookups; Snoop Needed",
+ "Counter": "0,1,2,3",
"EventCode": "0xC",
"EventName": "UNC_H_DIRECTORY_LOOKUP.SNP",
"PerPkg": "1",
@@ -1558,6 +1734,7 @@
},
{
"BriefDescription": "Directory Updates; Any Directory Update",
+ "Counter": "0,1,2,3",
"EventCode": "0xD",
"EventName": "UNC_H_DIRECTORY_UPDATE.ANY",
"PerPkg": "1",
@@ -1567,6 +1744,7 @@
},
{
"BriefDescription": "Directory Updates; Directory Clear",
+ "Counter": "0,1,2,3",
"EventCode": "0xD",
"EventName": "UNC_H_DIRECTORY_UPDATE.CLEAR",
"PerPkg": "1",
@@ -1576,6 +1754,7 @@
},
{
"BriefDescription": "Directory Updates; Directory Set",
+ "Counter": "0,1,2,3",
"EventCode": "0xD",
"EventName": "UNC_H_DIRECTORY_UPDATE.SET",
"PerPkg": "1",
@@ -1585,6 +1764,7 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; op is AckCnfltWbI",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_H_HITME_HIT.ACKCNFLTWBI",
"PerPkg": "1",
@@ -1593,6 +1773,7 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; All Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_H_HITME_HIT.ALL",
"PerPkg": "1",
@@ -1601,6 +1782,7 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_H_HITME_HIT.ALLOCS",
"PerPkg": "1",
@@ -1609,6 +1791,7 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_H_HITME_HIT.EVICTS",
"PerPkg": "1",
@@ -1617,6 +1800,7 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; HOM Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_H_HITME_HIT.HOM",
"PerPkg": "1",
@@ -1625,6 +1809,7 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; Invalidations",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_H_HITME_HIT.INVALS",
"PerPkg": "1",
@@ -1633,6 +1818,7 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; op is RdCode, RdData, RdDataMigratory, RdInvOwn, RdCur or InvItoE",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_H_HITME_HIT.READ_OR_INVITOE",
"PerPkg": "1",
@@ -1641,6 +1827,7 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; op is RspI, RspIWb, RspS, RspSWb, RspCnflt or RspCnfltWbI",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_H_HITME_HIT.RSP",
"PerPkg": "1",
@@ -1649,6 +1836,7 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; op is RspIFwd or RspIFwdWb for a local request",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_H_HITME_HIT.RSPFWDI_LOCAL",
"PerPkg": "1",
@@ -1657,6 +1845,7 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; op is RspIFwd or RspIFwdWb for a remote request",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_H_HITME_HIT.RSPFWDI_REMOTE",
"PerPkg": "1",
@@ -1665,6 +1854,7 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; op is RsSFwd or RspSFwdWb",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_H_HITME_HIT.RSPFWDS",
"PerPkg": "1",
@@ -1673,6 +1863,7 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; op is WbMtoE or WbMtoS",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_H_HITME_HIT.WBMTOE_OR_S",
"PerPkg": "1",
@@ -1681,6 +1872,7 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; op is WbMtoI",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_H_HITME_HIT.WBMTOI",
"PerPkg": "1",
@@ -1689,6 +1881,7 @@
},
{
"BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is AckCnfltWbI",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_H_HITME_HIT_PV_BITS_SET.ACKCNFLTWBI",
"PerPkg": "1",
@@ -1697,6 +1890,7 @@
},
{
"BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; All Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_H_HITME_HIT_PV_BITS_SET.ALL",
"PerPkg": "1",
@@ -1705,6 +1899,7 @@
},
{
"BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; HOM Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_H_HITME_HIT_PV_BITS_SET.HOM",
"PerPkg": "1",
@@ -1713,6 +1908,7 @@
},
{
"BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RdCode, RdData, RdDataMigratory, RdInvOwn, RdCur or InvItoE",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_H_HITME_HIT_PV_BITS_SET.READ_OR_INVITOE",
"PerPkg": "1",
@@ -1721,6 +1917,7 @@
},
{
"BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RspI, RspIWb, RspS, RspSWb, RspCnflt or RspCnfltWbI",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_H_HITME_HIT_PV_BITS_SET.RSP",
"PerPkg": "1",
@@ -1729,6 +1926,7 @@
},
{
"BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RspIFwd or RspIFwdWb for a local request",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_H_HITME_HIT_PV_BITS_SET.RSPFWDI_LOCAL",
"PerPkg": "1",
@@ -1737,6 +1935,7 @@
},
{
"BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RspIFwd or RspIFwdWb for a remote request",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_H_HITME_HIT_PV_BITS_SET.RSPFWDI_REMOTE",
"PerPkg": "1",
@@ -1745,6 +1944,7 @@
},
{
"BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RsSFwd or RspSFwdWb",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_H_HITME_HIT_PV_BITS_SET.RSPFWDS",
"PerPkg": "1",
@@ -1753,6 +1953,7 @@
},
{
"BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is WbMtoE or WbMtoS",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_H_HITME_HIT_PV_BITS_SET.WBMTOE_OR_S",
"PerPkg": "1",
@@ -1761,6 +1962,7 @@
},
{
"BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is WbMtoI",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_H_HITME_HIT_PV_BITS_SET.WBMTOI",
"PerPkg": "1",
@@ -1769,6 +1971,7 @@
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed; op is AckCnfltWbI",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_H_HITME_LOOKUP.ACKCNFLTWBI",
"PerPkg": "1",
@@ -1777,6 +1980,7 @@
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed; All Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_H_HITME_LOOKUP.ALL",
"PerPkg": "1",
@@ -1785,6 +1989,7 @@
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed; Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_H_HITME_LOOKUP.ALLOCS",
"PerPkg": "1",
@@ -1793,6 +1998,7 @@
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed; HOM Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_H_HITME_LOOKUP.HOM",
"PerPkg": "1",
@@ -1801,6 +2007,7 @@
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed; Invalidations",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_H_HITME_LOOKUP.INVALS",
"PerPkg": "1",
@@ -1809,6 +2016,7 @@
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RdCode, RdData, RdDataMigratory, RdInvOwn, RdCur or InvItoE",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_H_HITME_LOOKUP.READ_OR_INVITOE",
"PerPkg": "1",
@@ -1817,6 +2025,7 @@
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RspI, RspIWb, RspS, RspSWb, RspCnflt or RspCnfltWbI",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_H_HITME_LOOKUP.RSP",
"PerPkg": "1",
@@ -1825,6 +2034,7 @@
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RspIFwd or RspIFwdWb for a local request",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_H_HITME_LOOKUP.RSPFWDI_LOCAL",
"PerPkg": "1",
@@ -1833,6 +2043,7 @@
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RspIFwd or RspIFwdWb for a remote request",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_H_HITME_LOOKUP.RSPFWDI_REMOTE",
"PerPkg": "1",
@@ -1841,6 +2052,7 @@
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RsSFwd or RspSFwdWb",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_H_HITME_LOOKUP.RSPFWDS",
"PerPkg": "1",
@@ -1849,6 +2061,7 @@
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed; op is WbMtoE or WbMtoS",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_H_HITME_LOOKUP.WBMTOE_OR_S",
"PerPkg": "1",
@@ -1857,6 +2070,7 @@
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed; op is WbMtoI",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_H_HITME_LOOKUP.WBMTOI",
"PerPkg": "1",
@@ -1865,6 +2079,7 @@
},
{
"BriefDescription": "Cycles without QPI Ingress Credits; AD to QPI Link 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI0",
"PerPkg": "1",
@@ -1874,6 +2089,7 @@
},
{
"BriefDescription": "Cycles without QPI Ingress Credits; AD to QPI Link 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI1",
"PerPkg": "1",
@@ -1883,6 +2099,7 @@
},
{
"BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI2",
"PerPkg": "1",
@@ -1892,6 +2109,7 @@
},
{
"BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI0",
"PerPkg": "1",
@@ -1901,6 +2119,7 @@
},
{
"BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI1",
"PerPkg": "1",
@@ -1910,6 +2129,7 @@
},
{
"BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI2",
"PerPkg": "1",
@@ -1919,6 +2139,7 @@
},
{
"BriefDescription": "HA to iMC Normal Priority Reads Issued; Normal Priority",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_H_IMC_READS.NORMAL",
"PerPkg": "1",
@@ -1928,6 +2149,7 @@
},
{
"BriefDescription": "Retry Events",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_H_IMC_RETRY",
"PerPkg": "1",
@@ -1935,6 +2157,7 @@
},
{
"BriefDescription": "HA to iMC Full Line Writes Issued; All Writes",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_H_IMC_WRITES.ALL",
"PerPkg": "1",
@@ -1944,6 +2167,7 @@
},
{
"BriefDescription": "HA to iMC Full Line Writes Issued; Full Line Non-ISOCH",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_H_IMC_WRITES.FULL",
"PerPkg": "1",
@@ -1953,6 +2177,7 @@
},
{
"BriefDescription": "HA to iMC Full Line Writes Issued; ISOCH Full Line",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_H_IMC_WRITES.FULL_ISOCH",
"PerPkg": "1",
@@ -1962,6 +2187,7 @@
},
{
"BriefDescription": "HA to iMC Full Line Writes Issued; Partial Non-ISOCH",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_H_IMC_WRITES.PARTIAL",
"PerPkg": "1",
@@ -1971,6 +2197,7 @@
},
{
"BriefDescription": "HA to iMC Full Line Writes Issued; ISOCH Partial",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_H_IMC_WRITES.PARTIAL_ISOCH",
"PerPkg": "1",
@@ -1980,6 +2207,7 @@
},
{
"BriefDescription": "IOT Backpressure",
+ "Counter": "0,1,2",
"EventCode": "0x61",
"EventName": "UNC_H_IOT_BACKPRESSURE.HUB",
"PerPkg": "1",
@@ -1988,6 +2216,7 @@
},
{
"BriefDescription": "IOT Backpressure",
+ "Counter": "0,1,2",
"EventCode": "0x61",
"EventName": "UNC_H_IOT_BACKPRESSURE.SAT",
"PerPkg": "1",
@@ -1996,6 +2225,7 @@
},
{
"BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "Counter": "0,1,2",
"EventCode": "0x64",
"EventName": "UNC_H_IOT_CTS_EAST_LO.CTS0",
"PerPkg": "1",
@@ -2005,6 +2235,7 @@
},
{
"BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "Counter": "0,1,2",
"EventCode": "0x64",
"EventName": "UNC_H_IOT_CTS_EAST_LO.CTS1",
"PerPkg": "1",
@@ -2014,6 +2245,7 @@
},
{
"BriefDescription": "IOT Common Trigger Sequencer - Hi",
+ "Counter": "0,1,2",
"EventCode": "0x65",
"EventName": "UNC_H_IOT_CTS_HI.CTS2",
"PerPkg": "1",
@@ -2023,6 +2255,7 @@
},
{
"BriefDescription": "IOT Common Trigger Sequencer - Hi",
+ "Counter": "0,1,2",
"EventCode": "0x65",
"EventName": "UNC_H_IOT_CTS_HI.CTS3",
"PerPkg": "1",
@@ -2032,6 +2265,7 @@
},
{
"BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "Counter": "0,1,2",
"EventCode": "0x62",
"EventName": "UNC_H_IOT_CTS_WEST_LO.CTS0",
"PerPkg": "1",
@@ -2041,6 +2275,7 @@
},
{
"BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "Counter": "0,1,2",
"EventCode": "0x62",
"EventName": "UNC_H_IOT_CTS_WEST_LO.CTS1",
"PerPkg": "1",
@@ -2050,6 +2285,7 @@
},
{
"BriefDescription": "OSB Snoop Broadcast; Cancelled",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_H_OSB.CANCELLED",
"PerPkg": "1",
@@ -2059,6 +2295,7 @@
},
{
"BriefDescription": "OSB Snoop Broadcast; Local InvItoE",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_H_OSB.INVITOE_LOCAL",
"PerPkg": "1",
@@ -2068,6 +2305,7 @@
},
{
"BriefDescription": "OSB Snoop Broadcast; Local Reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_H_OSB.READS_LOCAL",
"PerPkg": "1",
@@ -2077,6 +2315,7 @@
},
{
"BriefDescription": "OSB Snoop Broadcast; Reads Local - Useful",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_H_OSB.READS_LOCAL_USEFUL",
"PerPkg": "1",
@@ -2086,6 +2325,7 @@
},
{
"BriefDescription": "OSB Snoop Broadcast; Remote",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_H_OSB.REMOTE",
"PerPkg": "1",
@@ -2095,6 +2335,7 @@
},
{
"BriefDescription": "OSB Snoop Broadcast; Remote - Useful",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_H_OSB.REMOTE_USEFUL",
"PerPkg": "1",
@@ -2104,6 +2345,7 @@
},
{
"BriefDescription": "OSB Early Data Return; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_H_OSB_EDR.ALL",
"PerPkg": "1",
@@ -2113,6 +2355,7 @@
},
{
"BriefDescription": "OSB Early Data Return; Reads to Local I",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_H_OSB_EDR.READS_LOCAL_I",
"PerPkg": "1",
@@ -2122,6 +2365,7 @@
},
{
"BriefDescription": "OSB Early Data Return; Reads to Local S",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_H_OSB_EDR.READS_LOCAL_S",
"PerPkg": "1",
@@ -2131,6 +2375,7 @@
},
{
"BriefDescription": "OSB Early Data Return; Reads to Remote I",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_H_OSB_EDR.READS_REMOTE_I",
"PerPkg": "1",
@@ -2140,6 +2385,7 @@
},
{
"BriefDescription": "OSB Early Data Return; Reads to Remote S",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_H_OSB_EDR.READS_REMOTE_S",
"PerPkg": "1",
@@ -2149,6 +2395,7 @@
},
{
"BriefDescription": "Read and Write Requests; Local InvItoEs",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.INVITOE_LOCAL",
"PerPkg": "1",
@@ -2158,6 +2405,7 @@
},
{
"BriefDescription": "Read and Write Requests; Remote InvItoEs",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.INVITOE_REMOTE",
"PerPkg": "1",
@@ -2167,6 +2415,7 @@
},
{
"BriefDescription": "Read and Write Requests; Reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.READS",
"PerPkg": "1",
@@ -2176,6 +2425,7 @@
},
{
"BriefDescription": "Read and Write Requests; Local Reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.READS_LOCAL",
"PerPkg": "1",
@@ -2185,6 +2435,7 @@
},
{
"BriefDescription": "Read and Write Requests; Remote Reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.READS_REMOTE",
"PerPkg": "1",
@@ -2194,6 +2445,7 @@
},
{
"BriefDescription": "Read and Write Requests; Writes",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.WRITES",
"PerPkg": "1",
@@ -2203,6 +2455,7 @@
},
{
"BriefDescription": "Read and Write Requests; Local Writes",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.WRITES_LOCAL",
"PerPkg": "1",
@@ -2212,6 +2465,7 @@
},
{
"BriefDescription": "Read and Write Requests; Remote Writes",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.WRITES_REMOTE",
"PerPkg": "1",
@@ -2221,6 +2475,7 @@
},
{
"BriefDescription": "HA AD Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x3E",
"EventName": "UNC_H_RING_AD_USED.CCW",
"PerPkg": "1",
@@ -2230,6 +2485,7 @@
},
{
"BriefDescription": "HA AD Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x3E",
"EventName": "UNC_H_RING_AD_USED.CCW_EVEN",
"PerPkg": "1",
@@ -2239,6 +2495,7 @@
},
{
"BriefDescription": "HA AD Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x3E",
"EventName": "UNC_H_RING_AD_USED.CCW_ODD",
"PerPkg": "1",
@@ -2248,6 +2505,7 @@
},
{
"BriefDescription": "HA AD Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x3E",
"EventName": "UNC_H_RING_AD_USED.CW",
"PerPkg": "1",
@@ -2257,6 +2515,7 @@
},
{
"BriefDescription": "HA AD Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x3E",
"EventName": "UNC_H_RING_AD_USED.CW_EVEN",
"PerPkg": "1",
@@ -2266,6 +2525,7 @@
},
{
"BriefDescription": "HA AD Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x3E",
"EventName": "UNC_H_RING_AD_USED.CW_ODD",
"PerPkg": "1",
@@ -2275,6 +2535,7 @@
},
{
"BriefDescription": "HA AK Ring in Use; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x3F",
"EventName": "UNC_H_RING_AK_USED.ALL",
"PerPkg": "1",
@@ -2284,6 +2545,7 @@
},
{
"BriefDescription": "HA AK Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x3F",
"EventName": "UNC_H_RING_AK_USED.CCW",
"PerPkg": "1",
@@ -2293,6 +2555,7 @@
},
{
"BriefDescription": "HA AK Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x3F",
"EventName": "UNC_H_RING_AK_USED.CCW_EVEN",
"PerPkg": "1",
@@ -2302,6 +2565,7 @@
},
{
"BriefDescription": "HA AK Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x3F",
"EventName": "UNC_H_RING_AK_USED.CCW_ODD",
"PerPkg": "1",
@@ -2311,6 +2575,7 @@
},
{
"BriefDescription": "HA AK Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x3F",
"EventName": "UNC_H_RING_AK_USED.CW",
"PerPkg": "1",
@@ -2320,6 +2585,7 @@
},
{
"BriefDescription": "HA AK Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x3F",
"EventName": "UNC_H_RING_AK_USED.CW_EVEN",
"PerPkg": "1",
@@ -2329,6 +2595,7 @@
},
{
"BriefDescription": "HA AK Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x3F",
"EventName": "UNC_H_RING_AK_USED.CW_ODD",
"PerPkg": "1",
@@ -2338,6 +2605,7 @@
},
{
"BriefDescription": "HA BL Ring in Use; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_H_RING_BL_USED.ALL",
"PerPkg": "1",
@@ -2347,6 +2615,7 @@
},
{
"BriefDescription": "HA BL Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_H_RING_BL_USED.CCW",
"PerPkg": "1",
@@ -2356,6 +2625,7 @@
},
{
"BriefDescription": "HA BL Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_H_RING_BL_USED.CCW_EVEN",
"PerPkg": "1",
@@ -2365,6 +2635,7 @@
},
{
"BriefDescription": "HA BL Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_H_RING_BL_USED.CCW_ODD",
"PerPkg": "1",
@@ -2374,6 +2645,7 @@
},
{
"BriefDescription": "HA BL Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_H_RING_BL_USED.CW",
"PerPkg": "1",
@@ -2383,6 +2655,7 @@
},
{
"BriefDescription": "HA BL Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_H_RING_BL_USED.CW_EVEN",
"PerPkg": "1",
@@ -2392,6 +2665,7 @@
},
{
"BriefDescription": "HA BL Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_H_RING_BL_USED.CW_ODD",
"PerPkg": "1",
@@ -2401,6 +2675,7 @@
},
{
"BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN0",
"PerPkg": "1",
@@ -2410,6 +2685,7 @@
},
{
"BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN1",
"PerPkg": "1",
@@ -2419,6 +2695,7 @@
},
{
"BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN2",
"PerPkg": "1",
@@ -2428,6 +2705,7 @@
},
{
"BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN3",
"PerPkg": "1",
@@ -2437,6 +2715,7 @@
},
{
"BriefDescription": "iMC RPQ Credits Empty - Special; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN0",
"PerPkg": "1",
@@ -2446,6 +2725,7 @@
},
{
"BriefDescription": "iMC RPQ Credits Empty - Special; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN1",
"PerPkg": "1",
@@ -2455,6 +2735,7 @@
},
{
"BriefDescription": "iMC RPQ Credits Empty - Special; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN2",
"PerPkg": "1",
@@ -2464,6 +2745,7 @@
},
{
"BriefDescription": "iMC RPQ Credits Empty - Special; Channel 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN3",
"PerPkg": "1",
@@ -2473,6 +2755,7 @@
},
{
"BriefDescription": "SBo0 Credits Acquired; For AD Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x68",
"EventName": "UNC_H_SBO0_CREDITS_ACQUIRED.AD",
"PerPkg": "1",
@@ -2482,6 +2765,7 @@
},
{
"BriefDescription": "SBo0 Credits Acquired; For BL Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x68",
"EventName": "UNC_H_SBO0_CREDITS_ACQUIRED.BL",
"PerPkg": "1",
@@ -2491,6 +2775,7 @@
},
{
"BriefDescription": "SBo0 Credits Occupancy; For AD Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x6A",
"EventName": "UNC_H_SBO0_CREDIT_OCCUPANCY.AD",
"PerPkg": "1",
@@ -2500,6 +2785,7 @@
},
{
"BriefDescription": "SBo0 Credits Occupancy; For BL Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x6A",
"EventName": "UNC_H_SBO0_CREDIT_OCCUPANCY.BL",
"PerPkg": "1",
@@ -2509,6 +2795,7 @@
},
{
"BriefDescription": "SBo1 Credits Acquired; For AD Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x69",
"EventName": "UNC_H_SBO1_CREDITS_ACQUIRED.AD",
"PerPkg": "1",
@@ -2518,6 +2805,7 @@
},
{
"BriefDescription": "SBo1 Credits Acquired; For BL Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x69",
"EventName": "UNC_H_SBO1_CREDITS_ACQUIRED.BL",
"PerPkg": "1",
@@ -2527,6 +2815,7 @@
},
{
"BriefDescription": "SBo1 Credits Occupancy; For AD Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x6B",
"EventName": "UNC_H_SBO1_CREDIT_OCCUPANCY.AD",
"PerPkg": "1",
@@ -2536,6 +2825,7 @@
},
{
"BriefDescription": "SBo1 Credits Occupancy; For BL Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x6B",
"EventName": "UNC_H_SBO1_CREDIT_OCCUPANCY.BL",
"PerPkg": "1",
@@ -2545,6 +2835,7 @@
},
{
"BriefDescription": "Data beat the Snoop Responses; Local Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xA",
"EventName": "UNC_H_SNOOPS_RSP_AFTER_DATA.LOCAL",
"PerPkg": "1",
@@ -2554,6 +2845,7 @@
},
{
"BriefDescription": "Data beat the Snoop Responses; Remote Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xA",
"EventName": "UNC_H_SNOOPS_RSP_AFTER_DATA.REMOTE",
"PerPkg": "1",
@@ -2563,6 +2855,7 @@
},
{
"BriefDescription": "Cycles with Snoops Outstanding; All Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_H_SNOOP_CYCLES_NE.ALL",
"PerPkg": "1",
@@ -2572,6 +2865,7 @@
},
{
"BriefDescription": "Cycles with Snoops Outstanding; Local Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_H_SNOOP_CYCLES_NE.LOCAL",
"PerPkg": "1",
@@ -2581,6 +2875,7 @@
},
{
"BriefDescription": "Cycles with Snoops Outstanding; Remote Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_H_SNOOP_CYCLES_NE.REMOTE",
"PerPkg": "1",
@@ -2590,6 +2885,7 @@
},
{
"BriefDescription": "Tracker Snoops Outstanding Accumulator; Local Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_H_SNOOP_OCCUPANCY.LOCAL",
"PerPkg": "1",
@@ -2599,6 +2895,7 @@
},
{
"BriefDescription": "Tracker Snoops Outstanding Accumulator; Remote Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_H_SNOOP_OCCUPANCY.REMOTE",
"PerPkg": "1",
@@ -2608,6 +2905,7 @@
},
{
"BriefDescription": "Snoop Responses Received; RSPCNFLCT*",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPCNFLCT",
"PerPkg": "1",
@@ -2617,6 +2915,7 @@
},
{
"BriefDescription": "Snoop Responses Received; RspI",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPI",
"PerPkg": "1",
@@ -2626,6 +2925,7 @@
},
{
"BriefDescription": "Snoop Responses Received; RspIFwd",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPIFWD",
"PerPkg": "1",
@@ -2635,6 +2935,7 @@
},
{
"BriefDescription": "Snoop Responses Received; RspS",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPS",
"PerPkg": "1",
@@ -2644,6 +2945,7 @@
},
{
"BriefDescription": "Snoop Responses Received; RspSFwd",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPSFWD",
"PerPkg": "1",
@@ -2653,6 +2955,7 @@
},
{
"BriefDescription": "Snoop Responses Received; Rsp*Fwd*WB",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSP_FWD_WB",
"PerPkg": "1",
@@ -2662,6 +2965,7 @@
},
{
"BriefDescription": "Snoop Responses Received; Rsp*WB",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSP_WB",
"PerPkg": "1",
@@ -2671,6 +2975,7 @@
},
{
"BriefDescription": "Snoop Responses Received Local; Other",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_H_SNP_RESP_RECV_LOCAL.OTHER",
"PerPkg": "1",
@@ -2680,6 +2985,7 @@
},
{
"BriefDescription": "Snoop Responses Received Local; RspCnflct",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPCNFLCT",
"PerPkg": "1",
@@ -2689,6 +2995,7 @@
},
{
"BriefDescription": "Snoop Responses Received Local; RspI",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPI",
"PerPkg": "1",
@@ -2698,6 +3005,7 @@
},
{
"BriefDescription": "Snoop Responses Received Local; RspIFwd",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPIFWD",
"PerPkg": "1",
@@ -2707,6 +3015,7 @@
},
{
"BriefDescription": "Snoop Responses Received Local; RspS",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPS",
"PerPkg": "1",
@@ -2716,6 +3025,7 @@
},
{
"BriefDescription": "Snoop Responses Received Local; RspSFwd",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPSFWD",
"PerPkg": "1",
@@ -2725,6 +3035,7 @@
},
{
"BriefDescription": "Snoop Responses Received Local; Rsp*FWD*WB",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPxFWDxWB",
"PerPkg": "1",
@@ -2734,6 +3045,7 @@
},
{
"BriefDescription": "Snoop Responses Received Local; Rsp*WB",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPxWB",
"PerPkg": "1",
@@ -2743,6 +3055,7 @@
},
{
"BriefDescription": "Stall on No Sbo Credits; For SBo0, AD Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x6C",
"EventName": "UNC_H_STALL_NO_SBO_CREDIT.SBO0_AD",
"PerPkg": "1",
@@ -2752,6 +3065,7 @@
},
{
"BriefDescription": "Stall on No Sbo Credits; For SBo0, BL Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x6C",
"EventName": "UNC_H_STALL_NO_SBO_CREDIT.SBO0_BL",
"PerPkg": "1",
@@ -2761,6 +3075,7 @@
},
{
"BriefDescription": "Stall on No Sbo Credits; For SBo1, AD Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x6C",
"EventName": "UNC_H_STALL_NO_SBO_CREDIT.SBO1_AD",
"PerPkg": "1",
@@ -2770,6 +3085,7 @@
},
{
"BriefDescription": "Stall on No Sbo Credits; For SBo1, BL Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x6C",
"EventName": "UNC_H_STALL_NO_SBO_CREDIT.SBO1_BL",
"PerPkg": "1",
@@ -2779,6 +3095,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_H_TAD_REQUESTS_G0.REGION0",
"PerPkg": "1",
@@ -2788,6 +3105,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_H_TAD_REQUESTS_G0.REGION1",
"PerPkg": "1",
@@ -2797,6 +3115,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_H_TAD_REQUESTS_G0.REGION2",
"PerPkg": "1",
@@ -2806,6 +3125,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_H_TAD_REQUESTS_G0.REGION3",
"PerPkg": "1",
@@ -2815,6 +3135,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_H_TAD_REQUESTS_G0.REGION4",
"PerPkg": "1",
@@ -2824,6 +3145,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_H_TAD_REQUESTS_G0.REGION5",
"PerPkg": "1",
@@ -2833,6 +3155,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_H_TAD_REQUESTS_G0.REGION6",
"PerPkg": "1",
@@ -2842,6 +3165,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_H_TAD_REQUESTS_G0.REGION7",
"PerPkg": "1",
@@ -2851,6 +3175,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_H_TAD_REQUESTS_G1.REGION10",
"PerPkg": "1",
@@ -2860,6 +3185,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 11",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_H_TAD_REQUESTS_G1.REGION11",
"PerPkg": "1",
@@ -2869,6 +3195,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_H_TAD_REQUESTS_G1.REGION8",
"PerPkg": "1",
@@ -2878,6 +3205,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_H_TAD_REQUESTS_G1.REGION9",
"PerPkg": "1",
@@ -2887,6 +3215,7 @@
},
{
"BriefDescription": "Tracker Cycles Full; Cycles Completely Used",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_H_TRACKER_CYCLES_FULL.ALL",
"PerPkg": "1",
@@ -2896,6 +3225,7 @@
},
{
"BriefDescription": "Tracker Cycles Full; Cycles GP Completely Used",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_H_TRACKER_CYCLES_FULL.GP",
"PerPkg": "1",
@@ -2905,6 +3235,7 @@
},
{
"BriefDescription": "Tracker Cycles Not Empty; All Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_H_TRACKER_CYCLES_NE.ALL",
"PerPkg": "1",
@@ -2914,6 +3245,7 @@
},
{
"BriefDescription": "Tracker Cycles Not Empty; Local Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_H_TRACKER_CYCLES_NE.LOCAL",
"PerPkg": "1",
@@ -2923,6 +3255,7 @@
},
{
"BriefDescription": "Tracker Cycles Not Empty; Remote Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_H_TRACKER_CYCLES_NE.REMOTE",
"PerPkg": "1",
@@ -2932,6 +3265,7 @@
},
{
"BriefDescription": "Tracker Occupancy Accumulator; Local InvItoE Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_H_TRACKER_OCCUPANCY.INVITOE_LOCAL",
"PerPkg": "1",
@@ -2941,6 +3275,7 @@
},
{
"BriefDescription": "Tracker Occupancy Accumulator; Remote InvItoE Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_H_TRACKER_OCCUPANCY.INVITOE_REMOTE",
"PerPkg": "1",
@@ -2950,6 +3285,7 @@
},
{
"BriefDescription": "Tracker Occupancy Accumulator; Local Read Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_H_TRACKER_OCCUPANCY.READS_LOCAL",
"PerPkg": "1",
@@ -2959,6 +3295,7 @@
},
{
"BriefDescription": "Tracker Occupancy Accumulator; Remote Read Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_H_TRACKER_OCCUPANCY.READS_REMOTE",
"PerPkg": "1",
@@ -2968,6 +3305,7 @@
},
{
"BriefDescription": "Tracker Occupancy Accumulator; Local Write Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_H_TRACKER_OCCUPANCY.WRITES_LOCAL",
"PerPkg": "1",
@@ -2977,6 +3315,7 @@
},
{
"BriefDescription": "Tracker Occupancy Accumulator; Remote Write Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_H_TRACKER_OCCUPANCY.WRITES_REMOTE",
"PerPkg": "1",
@@ -2986,6 +3325,7 @@
},
{
"BriefDescription": "Data Pending Occupancy Accumulator; Local Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_H_TRACKER_PENDING_OCCUPANCY.LOCAL",
"PerPkg": "1",
@@ -2995,6 +3335,7 @@
},
{
"BriefDescription": "Data Pending Occupancy Accumulator; Remote Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_H_TRACKER_PENDING_OCCUPANCY.REMOTE",
"PerPkg": "1",
@@ -3004,6 +3345,7 @@
},
{
"BriefDescription": "Outbound NDR Ring Transactions; Non-data Responses",
+ "Counter": "0,1,2,3",
"EventCode": "0xF",
"EventName": "UNC_H_TxR_AD.HOM",
"PerPkg": "1",
@@ -3013,6 +3355,7 @@
},
{
"BriefDescription": "AD Egress Full; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_H_TxR_AD_CYCLES_FULL.ALL",
"PerPkg": "1",
@@ -3022,6 +3365,7 @@
},
{
"BriefDescription": "AD Egress Full; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_H_TxR_AD_CYCLES_FULL.SCHED0",
"PerPkg": "1",
@@ -3031,6 +3375,7 @@
},
{
"BriefDescription": "AD Egress Full; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_H_TxR_AD_CYCLES_FULL.SCHED1",
"PerPkg": "1",
@@ -3040,6 +3385,7 @@
},
{
"BriefDescription": "AD Egress Not Empty; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_H_TxR_AD_CYCLES_NE.ALL",
"PerPkg": "1",
@@ -3049,6 +3395,7 @@
},
{
"BriefDescription": "AD Egress Not Empty; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_H_TxR_AD_CYCLES_NE.SCHED0",
"PerPkg": "1",
@@ -3058,6 +3405,7 @@
},
{
"BriefDescription": "AD Egress Not Empty; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_H_TxR_AD_CYCLES_NE.SCHED1",
"PerPkg": "1",
@@ -3067,6 +3415,7 @@
},
{
"BriefDescription": "AD Egress Allocations; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_H_TxR_AD_INSERTS.ALL",
"PerPkg": "1",
@@ -3076,6 +3425,7 @@
},
{
"BriefDescription": "AD Egress Allocations; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_H_TxR_AD_INSERTS.SCHED0",
"PerPkg": "1",
@@ -3085,6 +3435,7 @@
},
{
"BriefDescription": "AD Egress Allocations; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_H_TxR_AD_INSERTS.SCHED1",
"PerPkg": "1",
@@ -3094,6 +3445,7 @@
},
{
"BriefDescription": "AK Egress Full; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_H_TxR_AK_CYCLES_FULL.ALL",
"PerPkg": "1",
@@ -3103,6 +3455,7 @@
},
{
"BriefDescription": "AK Egress Full; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_H_TxR_AK_CYCLES_FULL.SCHED0",
"PerPkg": "1",
@@ -3112,6 +3465,7 @@
},
{
"BriefDescription": "AK Egress Full; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_H_TxR_AK_CYCLES_FULL.SCHED1",
"PerPkg": "1",
@@ -3121,6 +3475,7 @@
},
{
"BriefDescription": "AK Egress Not Empty; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_H_TxR_AK_CYCLES_NE.ALL",
"PerPkg": "1",
@@ -3130,6 +3485,7 @@
},
{
"BriefDescription": "AK Egress Not Empty; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_H_TxR_AK_CYCLES_NE.SCHED0",
"PerPkg": "1",
@@ -3139,6 +3495,7 @@
},
{
"BriefDescription": "AK Egress Not Empty; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_H_TxR_AK_CYCLES_NE.SCHED1",
"PerPkg": "1",
@@ -3148,6 +3505,7 @@
},
{
"BriefDescription": "AK Egress Allocations; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_H_TxR_AK_INSERTS.ALL",
"PerPkg": "1",
@@ -3157,6 +3515,7 @@
},
{
"BriefDescription": "AK Egress Allocations; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_H_TxR_AK_INSERTS.SCHED0",
"PerPkg": "1",
@@ -3166,6 +3525,7 @@
},
{
"BriefDescription": "AK Egress Allocations; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_H_TxR_AK_INSERTS.SCHED1",
"PerPkg": "1",
@@ -3175,6 +3535,7 @@
},
{
"BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_H_TxR_BL.DRS_CACHE",
"PerPkg": "1",
@@ -3184,6 +3545,7 @@
},
{
"BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Core",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_H_TxR_BL.DRS_CORE",
"PerPkg": "1",
@@ -3193,6 +3555,7 @@
},
{
"BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to QPI",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_H_TxR_BL.DRS_QPI",
"PerPkg": "1",
@@ -3202,6 +3565,7 @@
},
{
"BriefDescription": "BL Egress Full; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x36",
"EventName": "UNC_H_TxR_BL_CYCLES_FULL.ALL",
"PerPkg": "1",
@@ -3211,6 +3575,7 @@
},
{
"BriefDescription": "BL Egress Full; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x36",
"EventName": "UNC_H_TxR_BL_CYCLES_FULL.SCHED0",
"PerPkg": "1",
@@ -3220,6 +3585,7 @@
},
{
"BriefDescription": "BL Egress Full; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x36",
"EventName": "UNC_H_TxR_BL_CYCLES_FULL.SCHED1",
"PerPkg": "1",
@@ -3229,6 +3595,7 @@
},
{
"BriefDescription": "BL Egress Not Empty; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_H_TxR_BL_CYCLES_NE.ALL",
"PerPkg": "1",
@@ -3238,6 +3605,7 @@
},
{
"BriefDescription": "BL Egress Not Empty; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_H_TxR_BL_CYCLES_NE.SCHED0",
"PerPkg": "1",
@@ -3247,6 +3615,7 @@
},
{
"BriefDescription": "BL Egress Not Empty; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_H_TxR_BL_CYCLES_NE.SCHED1",
"PerPkg": "1",
@@ -3256,6 +3625,7 @@
},
{
"BriefDescription": "BL Egress Allocations; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_H_TxR_BL_INSERTS.ALL",
"PerPkg": "1",
@@ -3265,6 +3635,7 @@
},
{
"BriefDescription": "BL Egress Allocations; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_H_TxR_BL_INSERTS.SCHED0",
"PerPkg": "1",
@@ -3274,6 +3645,7 @@
},
{
"BriefDescription": "BL Egress Allocations; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_H_TxR_BL_INSERTS.SCHED1",
"PerPkg": "1",
@@ -3283,6 +3655,7 @@
},
{
"BriefDescription": "Injection Starvation; For AK Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x6D",
"EventName": "UNC_H_TxR_STARVED.AK",
"PerPkg": "1",
@@ -3292,6 +3665,7 @@
},
{
"BriefDescription": "Injection Starvation; For BL Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x6D",
"EventName": "UNC_H_TxR_STARVED.BL",
"PerPkg": "1",
@@ -3301,6 +3675,7 @@
},
{
"BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN0",
"PerPkg": "1",
@@ -3310,6 +3685,7 @@
},
{
"BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN1",
"PerPkg": "1",
@@ -3319,6 +3695,7 @@
},
{
"BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN2",
"PerPkg": "1",
@@ -3328,6 +3705,7 @@
},
{
"BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN3",
"PerPkg": "1",
@@ -3337,6 +3715,7 @@
},
{
"BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN0",
"PerPkg": "1",
@@ -3346,6 +3725,7 @@
},
{
"BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN1",
"PerPkg": "1",
@@ -3355,6 +3735,7 @@
},
{
"BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN2",
"PerPkg": "1",
@@ -3364,6 +3745,7 @@
},
{
"BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN3",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/broadwellde/uncore-interconnect.json
index 910395977a6e..58031f397168 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/uncore-interconnect.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Total Write Cache Occupancy; Any Source",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.ANY",
"PerPkg": "1",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "Total Write Cache Occupancy; Select Source",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.SOURCE",
"PerPkg": "1",
@@ -19,6 +21,7 @@
},
{
"BriefDescription": "Clocks in the IRP",
+ "Counter": "0,1",
"EventName": "UNC_I_CLOCKTICKS",
"PerPkg": "1",
"PublicDescription": "Number of clocks in the IRP.",
@@ -26,6 +29,7 @@
},
{
"BriefDescription": "Coherent Ops; CLFlush",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_COHERENT_OPS.CLFLUSH",
"PerPkg": "1",
@@ -35,6 +39,7 @@
},
{
"BriefDescription": "Coherent Ops; CRd",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_COHERENT_OPS.CRD",
"PerPkg": "1",
@@ -44,6 +49,7 @@
},
{
"BriefDescription": "Coherent Ops; DRd",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_COHERENT_OPS.DRD",
"PerPkg": "1",
@@ -53,6 +59,7 @@
},
{
"BriefDescription": "Coherent Ops; PCIDCAHin5t",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_COHERENT_OPS.PCIDCAHINT",
"PerPkg": "1",
@@ -62,6 +69,7 @@
},
{
"BriefDescription": "Coherent Ops; PCIRdCur",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_COHERENT_OPS.PCIRDCUR",
"PerPkg": "1",
@@ -71,6 +79,7 @@
},
{
"BriefDescription": "Coherent Ops; PCIItoM",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_COHERENT_OPS.PCITOM",
"PerPkg": "1",
@@ -80,6 +89,7 @@
},
{
"BriefDescription": "Coherent Ops; RFO",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_COHERENT_OPS.RFO",
"PerPkg": "1",
@@ -89,6 +99,7 @@
},
{
"BriefDescription": "Coherent Ops; WbMtoI",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_COHERENT_OPS.WBMTOI",
"PerPkg": "1",
@@ -98,6 +109,7 @@
},
{
"BriefDescription": "Misc Events - Set 0; Cache Inserts of Atomic Transactions as Secondary",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_I_MISC0.2ND_ATOMIC_INSERT",
"PerPkg": "1",
@@ -107,6 +119,7 @@
},
{
"BriefDescription": "Misc Events - Set 0; Cache Inserts of Read Transactions as Secondary",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_I_MISC0.2ND_RD_INSERT",
"PerPkg": "1",
@@ -116,6 +129,7 @@
},
{
"BriefDescription": "Misc Events - Set 0; Cache Inserts of Write Transactions as Secondary",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_I_MISC0.2ND_WR_INSERT",
"PerPkg": "1",
@@ -125,6 +139,7 @@
},
{
"BriefDescription": "Misc Events - Set 0; Fastpath Rejects",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_I_MISC0.FAST_REJ",
"PerPkg": "1",
@@ -134,6 +149,7 @@
},
{
"BriefDescription": "Misc Events - Set 0; Fastpath Requests",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_I_MISC0.FAST_REQ",
"PerPkg": "1",
@@ -143,6 +159,7 @@
},
{
"BriefDescription": "Misc Events - Set 0; Fastpath Transfers From Primary to Secondary",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_I_MISC0.FAST_XFER",
"PerPkg": "1",
@@ -152,6 +169,7 @@
},
{
"BriefDescription": "Misc Events - Set 0; Prefetch Ack Hints From Primary to Secondary",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_I_MISC0.PF_ACK_HINT",
"PerPkg": "1",
@@ -161,6 +179,7 @@
},
{
"BriefDescription": "Misc Events - Set 0; Prefetch TimeOut",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_I_MISC0.PF_TIMEOUT",
"PerPkg": "1",
@@ -170,6 +189,7 @@
},
{
"BriefDescription": "Misc Events - Set 1; Data Throttled",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_MISC1.DATA_THROTTLE",
"PerPkg": "1",
@@ -179,6 +199,7 @@
},
{
"BriefDescription": "Misc Events - Set 1",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_MISC1.LOST_FWD",
"PerPkg": "1",
@@ -188,6 +209,7 @@
},
{
"BriefDescription": "Misc Events - Set 1; Received Invalid",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_MISC1.SEC_RCVD_INVLD",
"PerPkg": "1",
@@ -197,6 +219,7 @@
},
{
"BriefDescription": "Misc Events - Set 1; Received Valid",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_MISC1.SEC_RCVD_VLD",
"PerPkg": "1",
@@ -206,6 +229,7 @@
},
{
"BriefDescription": "Misc Events - Set 1; Slow Transfer of E Line",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_MISC1.SLOW_E",
"PerPkg": "1",
@@ -215,6 +239,7 @@
},
{
"BriefDescription": "Misc Events - Set 1; Slow Transfer of I Line",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_MISC1.SLOW_I",
"PerPkg": "1",
@@ -224,6 +249,7 @@
},
{
"BriefDescription": "Misc Events - Set 1; Slow Transfer of M Line",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_MISC1.SLOW_M",
"PerPkg": "1",
@@ -233,6 +259,7 @@
},
{
"BriefDescription": "Misc Events - Set 1; Slow Transfer of S Line",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_MISC1.SLOW_S",
"PerPkg": "1",
@@ -242,6 +269,7 @@
},
{
"BriefDescription": "AK Ingress Occupancy",
+ "Counter": "0,1",
"EventCode": "0xA",
"EventName": "UNC_I_RxR_AK_INSERTS",
"PerPkg": "1",
@@ -250,6 +278,7 @@
},
{
"BriefDescription": "UNC_I_RxR_BL_DRS_CYCLES_FULL",
+ "Counter": "0,1",
"EventCode": "0x4",
"EventName": "UNC_I_RxR_BL_DRS_CYCLES_FULL",
"PerPkg": "1",
@@ -258,6 +287,7 @@
},
{
"BriefDescription": "BL Ingress Occupancy - DRS",
+ "Counter": "0,1",
"EventCode": "0x1",
"EventName": "UNC_I_RxR_BL_DRS_INSERTS",
"PerPkg": "1",
@@ -266,6 +296,7 @@
},
{
"BriefDescription": "UNC_I_RxR_BL_DRS_OCCUPANCY",
+ "Counter": "0,1",
"EventCode": "0x7",
"EventName": "UNC_I_RxR_BL_DRS_OCCUPANCY",
"PerPkg": "1",
@@ -274,6 +305,7 @@
},
{
"BriefDescription": "UNC_I_RxR_BL_NCB_CYCLES_FULL",
+ "Counter": "0,1",
"EventCode": "0x5",
"EventName": "UNC_I_RxR_BL_NCB_CYCLES_FULL",
"PerPkg": "1",
@@ -282,6 +314,7 @@
},
{
"BriefDescription": "BL Ingress Occupancy - NCB",
+ "Counter": "0,1",
"EventCode": "0x2",
"EventName": "UNC_I_RxR_BL_NCB_INSERTS",
"PerPkg": "1",
@@ -290,6 +323,7 @@
},
{
"BriefDescription": "UNC_I_RxR_BL_NCB_OCCUPANCY",
+ "Counter": "0,1",
"EventCode": "0x8",
"EventName": "UNC_I_RxR_BL_NCB_OCCUPANCY",
"PerPkg": "1",
@@ -298,6 +332,7 @@
},
{
"BriefDescription": "UNC_I_RxR_BL_NCS_CYCLES_FULL",
+ "Counter": "0,1",
"EventCode": "0x6",
"EventName": "UNC_I_RxR_BL_NCS_CYCLES_FULL",
"PerPkg": "1",
@@ -306,6 +341,7 @@
},
{
"BriefDescription": "BL Ingress Occupancy - NCS",
+ "Counter": "0,1",
"EventCode": "0x3",
"EventName": "UNC_I_RxR_BL_NCS_INSERTS",
"PerPkg": "1",
@@ -314,6 +350,7 @@
},
{
"BriefDescription": "UNC_I_RxR_BL_NCS_OCCUPANCY",
+ "Counter": "0,1",
"EventCode": "0x9",
"EventName": "UNC_I_RxR_BL_NCS_OCCUPANCY",
"PerPkg": "1",
@@ -322,6 +359,7 @@
},
{
"BriefDescription": "Snoop Responses; Hit E or S",
+ "Counter": "0,1",
"EventCode": "0x17",
"EventName": "UNC_I_SNOOP_RESP.HIT_ES",
"PerPkg": "1",
@@ -331,6 +369,7 @@
},
{
"BriefDescription": "Snoop Responses; Hit I",
+ "Counter": "0,1",
"EventCode": "0x17",
"EventName": "UNC_I_SNOOP_RESP.HIT_I",
"PerPkg": "1",
@@ -340,6 +379,7 @@
},
{
"BriefDescription": "Snoop Responses; Hit M",
+ "Counter": "0,1",
"EventCode": "0x17",
"EventName": "UNC_I_SNOOP_RESP.HIT_M",
"PerPkg": "1",
@@ -349,6 +389,7 @@
},
{
"BriefDescription": "Snoop Responses; Miss",
+ "Counter": "0,1",
"EventCode": "0x17",
"EventName": "UNC_I_SNOOP_RESP.MISS",
"PerPkg": "1",
@@ -358,6 +399,7 @@
},
{
"BriefDescription": "Snoop Responses; SnpCode",
+ "Counter": "0,1",
"EventCode": "0x17",
"EventName": "UNC_I_SNOOP_RESP.SNPCODE",
"PerPkg": "1",
@@ -367,6 +409,7 @@
},
{
"BriefDescription": "Snoop Responses; SnpData",
+ "Counter": "0,1",
"EventCode": "0x17",
"EventName": "UNC_I_SNOOP_RESP.SNPDATA",
"PerPkg": "1",
@@ -376,6 +419,7 @@
},
{
"BriefDescription": "Snoop Responses; SnpInv",
+ "Counter": "0,1",
"EventCode": "0x17",
"EventName": "UNC_I_SNOOP_RESP.SNPINV",
"PerPkg": "1",
@@ -385,6 +429,7 @@
},
{
"BriefDescription": "Inbound Transaction Count; Atomic",
+ "Counter": "0,1",
"EventCode": "0x16",
"EventName": "UNC_I_TRANSACTIONS.ATOMIC",
"PerPkg": "1",
@@ -394,6 +439,7 @@
},
{
"BriefDescription": "Inbound Transaction Count; Other",
+ "Counter": "0,1",
"EventCode": "0x16",
"EventName": "UNC_I_TRANSACTIONS.OTHER",
"PerPkg": "1",
@@ -403,6 +449,7 @@
},
{
"BriefDescription": "Inbound Transaction Count; Read Prefetches",
+ "Counter": "0,1",
"EventCode": "0x16",
"EventName": "UNC_I_TRANSACTIONS.RD_PREF",
"PerPkg": "1",
@@ -412,6 +459,7 @@
},
{
"BriefDescription": "Inbound Transaction Count; Reads",
+ "Counter": "0,1",
"EventCode": "0x16",
"EventName": "UNC_I_TRANSACTIONS.READS",
"PerPkg": "1",
@@ -421,6 +469,7 @@
},
{
"BriefDescription": "Inbound Transaction Count; Writes",
+ "Counter": "0,1",
"EventCode": "0x16",
"EventName": "UNC_I_TRANSACTIONS.WRITES",
"PerPkg": "1",
@@ -430,6 +479,7 @@
},
{
"BriefDescription": "Inbound Transaction Count; Write Prefetches",
+ "Counter": "0,1",
"EventCode": "0x16",
"EventName": "UNC_I_TRANSACTIONS.WR_PREF",
"PerPkg": "1",
@@ -439,6 +489,7 @@
},
{
"BriefDescription": "No AD Egress Credit Stalls",
+ "Counter": "0,1",
"EventCode": "0x18",
"EventName": "UNC_I_TxR_AD_STALL_CREDIT_CYCLES",
"PerPkg": "1",
@@ -447,6 +498,7 @@
},
{
"BriefDescription": "No BL Egress Credit Stalls",
+ "Counter": "0,1",
"EventCode": "0x19",
"EventName": "UNC_I_TxR_BL_STALL_CREDIT_CYCLES",
"PerPkg": "1",
@@ -455,6 +507,7 @@
},
{
"BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
"EventCode": "0xE",
"EventName": "UNC_I_TxR_DATA_INSERTS_NCB",
"PerPkg": "1",
@@ -463,6 +516,7 @@
},
{
"BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
"EventCode": "0xF",
"EventName": "UNC_I_TxR_DATA_INSERTS_NCS",
"PerPkg": "1",
@@ -471,6 +525,7 @@
},
{
"BriefDescription": "Outbound Request Queue Occupancy",
+ "Counter": "0,1",
"EventCode": "0xD",
"EventName": "UNC_I_TxR_REQUEST_OCCUPANCY",
"PerPkg": "1",
@@ -479,6 +534,7 @@
},
{
"BriefDescription": "VLW Received",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.DOORBELL_RCVD",
"PerPkg": "1",
@@ -488,6 +544,7 @@
},
{
"BriefDescription": "Filter Match",
+ "Counter": "0,1",
"EventCode": "0x41",
"EventName": "UNC_U_FILTER_MATCH.DISABLE",
"PerPkg": "1",
@@ -497,6 +554,7 @@
},
{
"BriefDescription": "Filter Match",
+ "Counter": "0,1",
"EventCode": "0x41",
"EventName": "UNC_U_FILTER_MATCH.ENABLE",
"PerPkg": "1",
@@ -506,6 +564,7 @@
},
{
"BriefDescription": "Filter Match",
+ "Counter": "0,1",
"EventCode": "0x41",
"EventName": "UNC_U_FILTER_MATCH.U2C_DISABLE",
"PerPkg": "1",
@@ -515,6 +574,7 @@
},
{
"BriefDescription": "Filter Match",
+ "Counter": "0,1",
"EventCode": "0x41",
"EventName": "UNC_U_FILTER_MATCH.U2C_ENABLE",
"PerPkg": "1",
@@ -524,6 +584,7 @@
},
{
"BriefDescription": "Cycles PHOLD Assert to Ack; Assert to ACK",
+ "Counter": "0,1",
"EventCode": "0x45",
"EventName": "UNC_U_PHOLD_CYCLES.ASSERT_TO_ACK",
"PerPkg": "1",
@@ -533,6 +594,7 @@
},
{
"BriefDescription": "RACU Request",
+ "Counter": "0,1",
"EventCode": "0x46",
"EventName": "UNC_U_RACU_REQUESTS",
"PerPkg": "1",
@@ -541,6 +603,7 @@
},
{
"BriefDescription": "Monitor Sent to T0; Correctable Machine Check",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.CMC",
"PerPkg": "1",
@@ -550,6 +613,7 @@
},
{
"BriefDescription": "Monitor Sent to T0; Livelock",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.LIVELOCK",
"PerPkg": "1",
@@ -559,6 +623,7 @@
},
{
"BriefDescription": "Monitor Sent to T0; LTError",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.LTERROR",
"PerPkg": "1",
@@ -568,6 +633,7 @@
},
{
"BriefDescription": "Monitor Sent to T0; Monitor T0",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.MONITOR_T0",
"PerPkg": "1",
@@ -577,6 +643,7 @@
},
{
"BriefDescription": "Monitor Sent to T0; Monitor T1",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.MONITOR_T1",
"PerPkg": "1",
@@ -586,6 +653,7 @@
},
{
"BriefDescription": "Monitor Sent to T0; Other",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.OTHER",
"PerPkg": "1",
@@ -595,6 +663,7 @@
},
{
"BriefDescription": "Monitor Sent to T0; Trap",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.TRAP",
"PerPkg": "1",
@@ -604,6 +673,7 @@
},
{
"BriefDescription": "Monitor Sent to T0; Uncorrectable Machine Check",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.UMC",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/uncore-io.json b/tools/perf/pmu-events/arch/x86/broadwellde/uncore-io.json
index 01e04daf03da..daef7accdbcb 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/uncore-io.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/uncore-io.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Number of uclks in domain",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_R2_CLOCKTICKS",
"PerPkg": "1",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "UNC_R2_IIO_CREDIT.ISOCH_QPI0",
+ "Counter": "0,1",
"EventCode": "0x2D",
"EventName": "UNC_R2_IIO_CREDIT.ISOCH_QPI0",
"PerPkg": "1",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "UNC_R2_IIO_CREDIT.ISOCH_QPI1",
+ "Counter": "0,1",
"EventCode": "0x2D",
"EventName": "UNC_R2_IIO_CREDIT.ISOCH_QPI1",
"PerPkg": "1",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "UNC_R2_IIO_CREDIT.PRQ_QPI0",
+ "Counter": "0,1",
"EventCode": "0x2D",
"EventName": "UNC_R2_IIO_CREDIT.PRQ_QPI0",
"PerPkg": "1",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "UNC_R2_IIO_CREDIT.PRQ_QPI1",
+ "Counter": "0,1",
"EventCode": "0x2D",
"EventName": "UNC_R2_IIO_CREDIT.PRQ_QPI1",
"PerPkg": "1",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "R2PCIe IIO Credit Acquired; DRS",
+ "Counter": "0,1",
"EventCode": "0x33",
"EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.DRS",
"PerPkg": "1",
@@ -50,6 +56,7 @@
},
{
"BriefDescription": "R2PCIe IIO Credit Acquired; NCB",
+ "Counter": "0,1",
"EventCode": "0x33",
"EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.NCB",
"PerPkg": "1",
@@ -59,6 +66,7 @@
},
{
"BriefDescription": "R2PCIe IIO Credit Acquired; NCS",
+ "Counter": "0,1",
"EventCode": "0x33",
"EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.NCS",
"PerPkg": "1",
@@ -68,6 +76,7 @@
},
{
"BriefDescription": "R2PCIe IIO Credits in Use; DRS",
+ "Counter": "0,1",
"EventCode": "0x32",
"EventName": "UNC_R2_IIO_CREDITS_USED.DRS",
"PerPkg": "1",
@@ -77,6 +86,7 @@
},
{
"BriefDescription": "R2PCIe IIO Credits in Use; NCB",
+ "Counter": "0,1",
"EventCode": "0x32",
"EventName": "UNC_R2_IIO_CREDITS_USED.NCB",
"PerPkg": "1",
@@ -86,6 +96,7 @@
},
{
"BriefDescription": "R2PCIe IIO Credits in Use; NCS",
+ "Counter": "0,1",
"EventCode": "0x32",
"EventName": "UNC_R2_IIO_CREDITS_USED.NCS",
"PerPkg": "1",
@@ -95,6 +106,7 @@
},
{
"BriefDescription": "R2 AD Ring in Use; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_R2_RING_AD_USED.ALL",
"PerPkg": "1",
@@ -104,6 +116,7 @@
},
{
"BriefDescription": "R2 AD Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_R2_RING_AD_USED.CCW",
"PerPkg": "1",
@@ -113,6 +126,7 @@
},
{
"BriefDescription": "R2 AD Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_R2_RING_AD_USED.CCW_EVEN",
"PerPkg": "1",
@@ -122,6 +136,7 @@
},
{
"BriefDescription": "R2 AD Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_R2_RING_AD_USED.CCW_ODD",
"PerPkg": "1",
@@ -131,6 +146,7 @@
},
{
"BriefDescription": "R2 AD Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_R2_RING_AD_USED.CW",
"PerPkg": "1",
@@ -140,6 +156,7 @@
},
{
"BriefDescription": "R2 AD Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_R2_RING_AD_USED.CW_EVEN",
"PerPkg": "1",
@@ -149,6 +166,7 @@
},
{
"BriefDescription": "R2 AD Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_R2_RING_AD_USED.CW_ODD",
"PerPkg": "1",
@@ -158,6 +176,7 @@
},
{
"BriefDescription": "AK Ingress Bounced; Dn",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_R2_RING_AK_BOUNCES.DN",
"PerPkg": "1",
@@ -167,6 +186,7 @@
},
{
"BriefDescription": "AK Ingress Bounced; Up",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_R2_RING_AK_BOUNCES.UP",
"PerPkg": "1",
@@ -176,6 +196,7 @@
},
{
"BriefDescription": "R2 AK Ring in Use; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_R2_RING_AK_USED.ALL",
"PerPkg": "1",
@@ -185,6 +206,7 @@
},
{
"BriefDescription": "R2 AK Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_R2_RING_AK_USED.CCW",
"PerPkg": "1",
@@ -194,6 +216,7 @@
},
{
"BriefDescription": "R2 AK Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_R2_RING_AK_USED.CCW_EVEN",
"PerPkg": "1",
@@ -203,6 +226,7 @@
},
{
"BriefDescription": "R2 AK Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_R2_RING_AK_USED.CCW_ODD",
"PerPkg": "1",
@@ -212,6 +236,7 @@
},
{
"BriefDescription": "R2 AK Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_R2_RING_AK_USED.CW",
"PerPkg": "1",
@@ -221,6 +246,7 @@
},
{
"BriefDescription": "R2 AK Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_R2_RING_AK_USED.CW_EVEN",
"PerPkg": "1",
@@ -230,6 +256,7 @@
},
{
"BriefDescription": "R2 AK Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_R2_RING_AK_USED.CW_ODD",
"PerPkg": "1",
@@ -239,6 +266,7 @@
},
{
"BriefDescription": "R2 BL Ring in Use; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_R2_RING_BL_USED.ALL",
"PerPkg": "1",
@@ -248,6 +276,7 @@
},
{
"BriefDescription": "R2 BL Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_R2_RING_BL_USED.CCW",
"PerPkg": "1",
@@ -257,6 +286,7 @@
},
{
"BriefDescription": "R2 BL Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_R2_RING_BL_USED.CCW_EVEN",
"PerPkg": "1",
@@ -266,6 +296,7 @@
},
{
"BriefDescription": "R2 BL Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_R2_RING_BL_USED.CCW_ODD",
"PerPkg": "1",
@@ -275,6 +306,7 @@
},
{
"BriefDescription": "R2 BL Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_R2_RING_BL_USED.CW",
"PerPkg": "1",
@@ -284,6 +316,7 @@
},
{
"BriefDescription": "R2 BL Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_R2_RING_BL_USED.CW_EVEN",
"PerPkg": "1",
@@ -293,6 +326,7 @@
},
{
"BriefDescription": "R2 BL Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_R2_RING_BL_USED.CW_ODD",
"PerPkg": "1",
@@ -302,6 +336,7 @@
},
{
"BriefDescription": "R2 IV Ring in Use; Any",
+ "Counter": "0,1,2,3",
"EventCode": "0xA",
"EventName": "UNC_R2_RING_IV_USED.ANY",
"PerPkg": "1",
@@ -311,6 +346,7 @@
},
{
"BriefDescription": "R2 IV Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0xA",
"EventName": "UNC_R2_RING_IV_USED.CCW",
"PerPkg": "1",
@@ -320,6 +356,7 @@
},
{
"BriefDescription": "R2 IV Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0xA",
"EventName": "UNC_R2_RING_IV_USED.CW",
"PerPkg": "1",
@@ -329,6 +366,7 @@
},
{
"BriefDescription": "Ingress Cycles Not Empty; NCB",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_R2_RxR_CYCLES_NE.NCB",
"PerPkg": "1",
@@ -338,6 +376,7 @@
},
{
"BriefDescription": "Ingress Cycles Not Empty; NCS",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_R2_RxR_CYCLES_NE.NCS",
"PerPkg": "1",
@@ -347,6 +386,7 @@
},
{
"BriefDescription": "Ingress Allocations; NCB",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_R2_RxR_INSERTS.NCB",
"PerPkg": "1",
@@ -356,6 +396,7 @@
},
{
"BriefDescription": "Ingress Allocations; NCS",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_R2_RxR_INSERTS.NCS",
"PerPkg": "1",
@@ -365,6 +406,7 @@
},
{
"BriefDescription": "Ingress Occupancy Accumulator; DRS",
+ "Counter": "0",
"EventCode": "0x13",
"EventName": "UNC_R2_RxR_OCCUPANCY.DRS",
"PerPkg": "1",
@@ -374,6 +416,7 @@
},
{
"BriefDescription": "SBo0 Credits Acquired; For AD Ring",
+ "Counter": "0,1",
"EventCode": "0x28",
"EventName": "UNC_R2_SBO0_CREDITS_ACQUIRED.AD",
"PerPkg": "1",
@@ -383,6 +426,7 @@
},
{
"BriefDescription": "SBo0 Credits Acquired; For BL Ring",
+ "Counter": "0,1",
"EventCode": "0x28",
"EventName": "UNC_R2_SBO0_CREDITS_ACQUIRED.BL",
"PerPkg": "1",
@@ -392,6 +436,7 @@
},
{
"BriefDescription": "SBo0 Credits Occupancy; For AD Ring",
+ "Counter": "0",
"EventCode": "0x2A",
"EventName": "UNC_R2_SBO0_CREDIT_OCCUPANCY.AD",
"PerPkg": "1",
@@ -401,6 +446,7 @@
},
{
"BriefDescription": "SBo0 Credits Occupancy; For BL Ring",
+ "Counter": "0",
"EventCode": "0x2A",
"EventName": "UNC_R2_SBO0_CREDIT_OCCUPANCY.BL",
"PerPkg": "1",
@@ -410,6 +456,7 @@
},
{
"BriefDescription": "Stall on No Sbo Credits; For SBo0, AD Ring",
+ "Counter": "0,1",
"EventCode": "0x2C",
"EventName": "UNC_R2_STALL_NO_SBO_CREDIT.SBO0_AD",
"PerPkg": "1",
@@ -419,6 +466,7 @@
},
{
"BriefDescription": "Stall on No Sbo Credits; For SBo0, BL Ring",
+ "Counter": "0,1",
"EventCode": "0x2C",
"EventName": "UNC_R2_STALL_NO_SBO_CREDIT.SBO0_BL",
"PerPkg": "1",
@@ -428,6 +476,7 @@
},
{
"BriefDescription": "Stall on No Sbo Credits; For SBo1, AD Ring",
+ "Counter": "0,1",
"EventCode": "0x2C",
"EventName": "UNC_R2_STALL_NO_SBO_CREDIT.SBO1_AD",
"PerPkg": "1",
@@ -437,6 +486,7 @@
},
{
"BriefDescription": "Stall on No Sbo Credits; For SBo1, BL Ring",
+ "Counter": "0,1",
"EventCode": "0x2C",
"EventName": "UNC_R2_STALL_NO_SBO_CREDIT.SBO1_BL",
"PerPkg": "1",
@@ -446,6 +496,7 @@
},
{
"BriefDescription": "Egress Cycles Full; AD",
+ "Counter": "0",
"EventCode": "0x25",
"EventName": "UNC_R2_TxR_CYCLES_FULL.AD",
"PerPkg": "1",
@@ -455,6 +506,7 @@
},
{
"BriefDescription": "Egress Cycles Full; AK",
+ "Counter": "0",
"EventCode": "0x25",
"EventName": "UNC_R2_TxR_CYCLES_FULL.AK",
"PerPkg": "1",
@@ -464,6 +516,7 @@
},
{
"BriefDescription": "Egress Cycles Full; BL",
+ "Counter": "0",
"EventCode": "0x25",
"EventName": "UNC_R2_TxR_CYCLES_FULL.BL",
"PerPkg": "1",
@@ -473,6 +526,7 @@
},
{
"BriefDescription": "Egress Cycles Not Empty; AD",
+ "Counter": "0",
"EventCode": "0x23",
"EventName": "UNC_R2_TxR_CYCLES_NE.AD",
"PerPkg": "1",
@@ -482,6 +536,7 @@
},
{
"BriefDescription": "Egress Cycles Not Empty; AK",
+ "Counter": "0",
"EventCode": "0x23",
"EventName": "UNC_R2_TxR_CYCLES_NE.AK",
"PerPkg": "1",
@@ -491,6 +546,7 @@
},
{
"BriefDescription": "Egress Cycles Not Empty; BL",
+ "Counter": "0",
"EventCode": "0x23",
"EventName": "UNC_R2_TxR_CYCLES_NE.BL",
"PerPkg": "1",
@@ -500,6 +556,7 @@
},
{
"BriefDescription": "Egress CCW NACK; AD CCW",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R2_TxR_NACK_CW.DN_AD",
"PerPkg": "1",
@@ -509,6 +566,7 @@
},
{
"BriefDescription": "Egress CCW NACK; AK CCW",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R2_TxR_NACK_CW.DN_AK",
"PerPkg": "1",
@@ -518,6 +576,7 @@
},
{
"BriefDescription": "Egress CCW NACK; BL CCW",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R2_TxR_NACK_CW.DN_BL",
"PerPkg": "1",
@@ -527,6 +586,7 @@
},
{
"BriefDescription": "Egress CCW NACK; AK CCW",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R2_TxR_NACK_CW.UP_AD",
"PerPkg": "1",
@@ -536,6 +596,7 @@
},
{
"BriefDescription": "Egress CCW NACK; BL CW",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R2_TxR_NACK_CW.UP_AK",
"PerPkg": "1",
@@ -545,6 +606,7 @@
},
{
"BriefDescription": "Egress CCW NACK; BL CCW",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R2_TxR_NACK_CW.UP_BL",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/uncore-memory.json b/tools/perf/pmu-events/arch/x86/broadwellde/uncore-memory.json
index a764234a3584..ddc83d3885ae 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/uncore-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "DRAM Activate Count; Activate due to Write",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_M_ACT_COUNT.BYP",
"PerPkg": "1",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "DRAM Activate Count; Activate due to Read",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_M_ACT_COUNT.RD",
"PerPkg": "1",
@@ -19,6 +21,7 @@
},
{
"BriefDescription": "DRAM Activate Count; Activate due to Write",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_M_ACT_COUNT.WR",
"PerPkg": "1",
@@ -28,6 +31,7 @@
},
{
"BriefDescription": "ACT command issued by 2 cycle bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M_BYP_CMDS.ACT",
"PerPkg": "1",
@@ -36,6 +40,7 @@
},
{
"BriefDescription": "CAS command issued by 2 cycle bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M_BYP_CMDS.CAS",
"PerPkg": "1",
@@ -44,6 +49,7 @@
},
{
"BriefDescription": "PRE command issued by 2 cycle bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M_BYP_CMDS.PRE",
"PerPkg": "1",
@@ -52,6 +58,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM WR_CAS (w/ and w/out auto-pre)",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.ALL",
"PerPkg": "1",
@@ -61,6 +68,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM Reads (RD_CAS + Underfills)",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.RD",
"PerPkg": "1",
@@ -70,6 +78,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM RD_CAS (w/ and w/out auto-pre)",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.RD_REG",
"PerPkg": "1",
@@ -79,6 +88,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Read CAS issued in RMM",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.RD_RMM",
"PerPkg": "1",
@@ -87,6 +97,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Underfill Read Issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
"PerPkg": "1",
@@ -96,6 +107,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Read CAS issued in WMM",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.RD_WMM",
"PerPkg": "1",
@@ -104,6 +116,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM WR_CAS (both Modes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.WR",
"PerPkg": "1",
@@ -113,6 +126,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Read Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.WR_RMM",
"PerPkg": "1",
@@ -122,6 +136,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Write Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.WR_WMM",
"PerPkg": "1",
@@ -131,12 +146,14 @@
},
{
"BriefDescription": "DRAM Clockticks",
+ "Counter": "0,1,2,3",
"EventName": "UNC_M_DCLOCKTICKS",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "DRAM Precharge All Commands",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_M_DRAM_PRE_ALL",
"PerPkg": "1",
@@ -145,6 +162,7 @@
},
{
"BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_M_DRAM_REFRESH.HIGH",
"PerPkg": "1",
@@ -154,6 +172,7 @@
},
{
"BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_M_DRAM_REFRESH.PANIC",
"PerPkg": "1",
@@ -163,6 +182,7 @@
},
{
"BriefDescription": "ECC Correctable Errors",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_M_ECC_CORRECTABLE_ERRORS",
"PerPkg": "1",
@@ -171,6 +191,7 @@
},
{
"BriefDescription": "Cycles in a Major Mode; Isoch Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_M_MAJOR_MODES.ISOCH",
"PerPkg": "1",
@@ -180,6 +201,7 @@
},
{
"BriefDescription": "Cycles in a Major Mode; Partial Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_M_MAJOR_MODES.PARTIAL",
"PerPkg": "1",
@@ -189,6 +211,7 @@
},
{
"BriefDescription": "Cycles in a Major Mode; Read Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_M_MAJOR_MODES.READ",
"PerPkg": "1",
@@ -198,6 +221,7 @@
},
{
"BriefDescription": "Cycles in a Major Mode; Write Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_M_MAJOR_MODES.WRITE",
"PerPkg": "1",
@@ -207,6 +231,7 @@
},
{
"BriefDescription": "Channel DLLOFF Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M_POWER_CHANNEL_DLLOFF",
"PerPkg": "1",
@@ -215,6 +240,7 @@
},
{
"BriefDescription": "Channel PPD Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_M_POWER_CHANNEL_PPD",
"PerPkg": "1",
@@ -223,6 +249,7 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK0",
"PerPkg": "1",
@@ -232,6 +259,7 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK1",
"PerPkg": "1",
@@ -241,6 +269,7 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK2",
"PerPkg": "1",
@@ -250,6 +279,7 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK3",
"PerPkg": "1",
@@ -259,6 +289,7 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK4",
"PerPkg": "1",
@@ -268,6 +299,7 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK5",
"PerPkg": "1",
@@ -277,6 +309,7 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK6",
"PerPkg": "1",
@@ -286,6 +319,7 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK7",
"PerPkg": "1",
@@ -295,6 +329,7 @@
},
{
"BriefDescription": "Critical Throttle Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M_POWER_CRITICAL_THROTTLE_CYCLES",
"PerPkg": "1",
@@ -303,6 +338,7 @@
},
{
"BriefDescription": "UNC_M_POWER_PCU_THROTTLING",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M_POWER_PCU_THROTTLING",
"PerPkg": "1",
@@ -310,6 +346,7 @@
},
{
"BriefDescription": "Clock-Enabled Self-Refresh",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M_POWER_SELF_REFRESH",
"PerPkg": "1",
@@ -318,6 +355,7 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK0",
"PerPkg": "1",
@@ -327,6 +365,7 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK1",
"PerPkg": "1",
@@ -336,6 +375,7 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK2",
"PerPkg": "1",
@@ -345,6 +385,7 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK3",
"PerPkg": "1",
@@ -354,6 +395,7 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK4",
"PerPkg": "1",
@@ -363,6 +405,7 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK5",
"PerPkg": "1",
@@ -372,6 +415,7 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK6",
"PerPkg": "1",
@@ -381,6 +425,7 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK7",
"PerPkg": "1",
@@ -390,6 +435,7 @@
},
{
"BriefDescription": "Read Preemption Count; Read over Read Preemption",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_M_PREEMPTION.RD_PREEMPT_RD",
"PerPkg": "1",
@@ -399,6 +445,7 @@
},
{
"BriefDescription": "Read Preemption Count; Read over Write Preemption",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_M_PREEMPTION.RD_PREEMPT_WR",
"PerPkg": "1",
@@ -408,6 +455,7 @@
},
{
"BriefDescription": "DRAM Precharge commands.; Precharge due to bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.BYP",
"PerPkg": "1",
@@ -417,6 +465,7 @@
},
{
"BriefDescription": "DRAM Precharge commands.; Precharge due to timer expiration",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.PAGE_CLOSE",
"PerPkg": "1",
@@ -426,6 +475,7 @@
},
{
"BriefDescription": "DRAM Precharge commands.; Precharges due to page miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.PAGE_MISS",
"PerPkg": "1",
@@ -435,6 +485,7 @@
},
{
"BriefDescription": "DRAM Precharge commands.; Precharge due to read",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.RD",
"PerPkg": "1",
@@ -444,6 +495,7 @@
},
{
"BriefDescription": "DRAM Precharge commands.; Precharge due to write",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.WR",
"PerPkg": "1",
@@ -453,6 +505,7 @@
},
{
"BriefDescription": "Read CAS issued with HIGH priority",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M_RD_CAS_PRIO.HIGH",
"PerPkg": "1",
@@ -461,6 +514,7 @@
},
{
"BriefDescription": "Read CAS issued with LOW priority",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M_RD_CAS_PRIO.LOW",
"PerPkg": "1",
@@ -469,6 +523,7 @@
},
{
"BriefDescription": "Read CAS issued with MEDIUM priority",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M_RD_CAS_PRIO.MED",
"PerPkg": "1",
@@ -477,6 +532,7 @@
},
{
"BriefDescription": "Read CAS issued with PANIC NON ISOCH priority (starved)",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M_RD_CAS_PRIO.PANIC",
"PerPkg": "1",
@@ -485,6 +541,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.ALLBANKS",
"PerPkg": "1",
@@ -494,6 +551,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK0",
"PerPkg": "1",
@@ -502,6 +560,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK1",
"PerPkg": "1",
@@ -511,6 +570,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK10",
"PerPkg": "1",
@@ -520,6 +580,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK11",
"PerPkg": "1",
@@ -529,6 +590,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK12",
"PerPkg": "1",
@@ -538,6 +600,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK13",
"PerPkg": "1",
@@ -547,6 +610,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK14",
"PerPkg": "1",
@@ -556,6 +620,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK15",
"PerPkg": "1",
@@ -565,6 +630,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK2",
"PerPkg": "1",
@@ -574,6 +640,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK3",
"PerPkg": "1",
@@ -583,6 +650,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK4",
"PerPkg": "1",
@@ -592,6 +660,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK5",
"PerPkg": "1",
@@ -601,6 +670,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK6",
"PerPkg": "1",
@@ -610,6 +680,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK7",
"PerPkg": "1",
@@ -619,6 +690,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK8",
"PerPkg": "1",
@@ -628,6 +700,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK9",
"PerPkg": "1",
@@ -637,6 +710,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANKG0",
"PerPkg": "1",
@@ -646,6 +720,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANKG1",
"PerPkg": "1",
@@ -655,6 +730,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANKG2",
"PerPkg": "1",
@@ -664,6 +740,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANKG3",
"PerPkg": "1",
@@ -673,6 +750,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.ALLBANKS",
"PerPkg": "1",
@@ -682,6 +760,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK0",
"PerPkg": "1",
@@ -690,6 +769,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK1",
"PerPkg": "1",
@@ -699,6 +779,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK10",
"PerPkg": "1",
@@ -708,6 +789,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK11",
"PerPkg": "1",
@@ -717,6 +799,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK12",
"PerPkg": "1",
@@ -726,6 +809,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK13",
"PerPkg": "1",
@@ -735,6 +819,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK14",
"PerPkg": "1",
@@ -744,6 +829,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK15",
"PerPkg": "1",
@@ -753,6 +839,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK2",
"PerPkg": "1",
@@ -762,6 +849,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK3",
"PerPkg": "1",
@@ -771,6 +859,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK4",
"PerPkg": "1",
@@ -780,6 +869,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK5",
"PerPkg": "1",
@@ -789,6 +879,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK6",
"PerPkg": "1",
@@ -798,6 +889,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK7",
"PerPkg": "1",
@@ -807,6 +899,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK8",
"PerPkg": "1",
@@ -816,6 +909,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK9",
"PerPkg": "1",
@@ -825,6 +919,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANKG0",
"PerPkg": "1",
@@ -834,6 +929,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANKG1",
"PerPkg": "1",
@@ -843,6 +939,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANKG2",
"PerPkg": "1",
@@ -852,6 +949,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANKG3",
"PerPkg": "1",
@@ -861,6 +959,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK0",
"PerPkg": "1",
@@ -869,6 +968,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.ALLBANKS",
"PerPkg": "1",
@@ -878,6 +978,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK0",
"PerPkg": "1",
@@ -886,6 +987,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK1",
"PerPkg": "1",
@@ -895,6 +997,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK10",
"PerPkg": "1",
@@ -904,6 +1007,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK11",
"PerPkg": "1",
@@ -913,6 +1017,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK12",
"PerPkg": "1",
@@ -922,6 +1027,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK13",
"PerPkg": "1",
@@ -931,6 +1037,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK14",
"PerPkg": "1",
@@ -940,6 +1047,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK15",
"PerPkg": "1",
@@ -949,6 +1057,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK2",
"PerPkg": "1",
@@ -958,6 +1067,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK3",
"PerPkg": "1",
@@ -967,6 +1077,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK4",
"PerPkg": "1",
@@ -976,6 +1087,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK5",
"PerPkg": "1",
@@ -985,6 +1097,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK6",
"PerPkg": "1",
@@ -994,6 +1107,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK7",
"PerPkg": "1",
@@ -1003,6 +1117,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK8",
"PerPkg": "1",
@@ -1012,6 +1127,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK9",
"PerPkg": "1",
@@ -1021,6 +1137,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANKG0",
"PerPkg": "1",
@@ -1030,6 +1147,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANKG1",
"PerPkg": "1",
@@ -1039,6 +1157,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANKG2",
"PerPkg": "1",
@@ -1048,6 +1167,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANKG3",
"PerPkg": "1",
@@ -1057,6 +1177,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.ALLBANKS",
"PerPkg": "1",
@@ -1066,6 +1187,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK0",
"PerPkg": "1",
@@ -1074,6 +1196,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK1",
"PerPkg": "1",
@@ -1083,6 +1206,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK10",
"PerPkg": "1",
@@ -1092,6 +1216,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK11",
"PerPkg": "1",
@@ -1101,6 +1226,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK12",
"PerPkg": "1",
@@ -1110,6 +1236,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK13",
"PerPkg": "1",
@@ -1119,6 +1246,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK14",
"PerPkg": "1",
@@ -1128,6 +1256,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK15",
"PerPkg": "1",
@@ -1137,6 +1266,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK2",
"PerPkg": "1",
@@ -1146,6 +1276,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK3",
"PerPkg": "1",
@@ -1155,6 +1286,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK4",
"PerPkg": "1",
@@ -1164,6 +1296,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK5",
"PerPkg": "1",
@@ -1173,6 +1306,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK6",
"PerPkg": "1",
@@ -1182,6 +1316,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK7",
"PerPkg": "1",
@@ -1191,6 +1326,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK8",
"PerPkg": "1",
@@ -1200,6 +1336,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK9",
"PerPkg": "1",
@@ -1209,6 +1346,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANKG0",
"PerPkg": "1",
@@ -1218,6 +1356,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANKG1",
"PerPkg": "1",
@@ -1227,6 +1366,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANKG2",
"PerPkg": "1",
@@ -1236,6 +1376,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANKG3",
"PerPkg": "1",
@@ -1245,6 +1386,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.ALLBANKS",
"PerPkg": "1",
@@ -1254,6 +1396,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK0",
"PerPkg": "1",
@@ -1262,6 +1405,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK1",
"PerPkg": "1",
@@ -1271,6 +1415,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK10",
"PerPkg": "1",
@@ -1280,6 +1425,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK11",
"PerPkg": "1",
@@ -1289,6 +1435,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK12",
"PerPkg": "1",
@@ -1298,6 +1445,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK13",
"PerPkg": "1",
@@ -1307,6 +1455,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK14",
"PerPkg": "1",
@@ -1316,6 +1465,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK15",
"PerPkg": "1",
@@ -1325,6 +1475,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK2",
"PerPkg": "1",
@@ -1334,6 +1485,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK3",
"PerPkg": "1",
@@ -1343,6 +1495,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK4",
"PerPkg": "1",
@@ -1352,6 +1505,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK5",
"PerPkg": "1",
@@ -1361,6 +1515,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK6",
"PerPkg": "1",
@@ -1370,6 +1525,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK7",
"PerPkg": "1",
@@ -1379,6 +1535,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK8",
"PerPkg": "1",
@@ -1388,6 +1545,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK9",
"PerPkg": "1",
@@ -1397,6 +1555,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANKG0",
"PerPkg": "1",
@@ -1406,6 +1565,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANKG1",
"PerPkg": "1",
@@ -1415,6 +1575,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANKG2",
"PerPkg": "1",
@@ -1424,6 +1585,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANKG3",
"PerPkg": "1",
@@ -1433,6 +1595,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.ALLBANKS",
"PerPkg": "1",
@@ -1442,6 +1605,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK0",
"PerPkg": "1",
@@ -1450,6 +1614,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK1",
"PerPkg": "1",
@@ -1459,6 +1624,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK10",
"PerPkg": "1",
@@ -1468,6 +1634,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK11",
"PerPkg": "1",
@@ -1477,6 +1644,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK12",
"PerPkg": "1",
@@ -1486,6 +1654,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK13",
"PerPkg": "1",
@@ -1495,6 +1664,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK14",
"PerPkg": "1",
@@ -1504,6 +1674,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK15",
"PerPkg": "1",
@@ -1513,6 +1684,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK2",
"PerPkg": "1",
@@ -1522,6 +1694,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK3",
"PerPkg": "1",
@@ -1531,6 +1704,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK4",
"PerPkg": "1",
@@ -1540,6 +1714,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK5",
"PerPkg": "1",
@@ -1549,6 +1724,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK6",
"PerPkg": "1",
@@ -1558,6 +1734,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK7",
"PerPkg": "1",
@@ -1567,6 +1744,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK8",
"PerPkg": "1",
@@ -1576,6 +1754,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK9",
"PerPkg": "1",
@@ -1585,6 +1764,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANKG0",
"PerPkg": "1",
@@ -1594,6 +1774,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANKG1",
"PerPkg": "1",
@@ -1603,6 +1784,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANKG2",
"PerPkg": "1",
@@ -1612,6 +1794,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANKG3",
"PerPkg": "1",
@@ -1621,6 +1804,7 @@
},
{
"BriefDescription": "Read Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M_RPQ_CYCLES_NE",
"PerPkg": "1",
@@ -1629,6 +1813,7 @@
},
{
"BriefDescription": "Read Pending Queue Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M_RPQ_INSERTS",
"PerPkg": "1",
@@ -1637,6 +1822,7 @@
},
{
"BriefDescription": "VMSE MXB write buffer occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_M_VMSE_MXB_WR_OCCUPANCY",
"PerPkg": "1",
@@ -1644,6 +1830,7 @@
},
{
"BriefDescription": "VMSE WR PUSH issued; VMSE write PUSH issued in RMM",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M_VMSE_WR_PUSH.RMM",
"PerPkg": "1",
@@ -1652,6 +1839,7 @@
},
{
"BriefDescription": "VMSE WR PUSH issued; VMSE write PUSH issued in WMM",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M_VMSE_WR_PUSH.WMM",
"PerPkg": "1",
@@ -1660,6 +1848,7 @@
},
{
"BriefDescription": "Transition from WMM to RMM because of low threshold; Transition from WMM to RMM because of starve counter",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "UNC_M_WMM_TO_RMM.LOW_THRESH",
"PerPkg": "1",
@@ -1668,6 +1857,7 @@
},
{
"BriefDescription": "Transition from WMM to RMM because of low threshold",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "UNC_M_WMM_TO_RMM.STARVE",
"PerPkg": "1",
@@ -1676,6 +1866,7 @@
},
{
"BriefDescription": "Transition from WMM to RMM because of low threshold",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "UNC_M_WMM_TO_RMM.VMSE_RETRY",
"PerPkg": "1",
@@ -1684,6 +1875,7 @@
},
{
"BriefDescription": "Write Pending Queue Full Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M_WPQ_CYCLES_FULL",
"PerPkg": "1",
@@ -1692,6 +1884,7 @@
},
{
"BriefDescription": "Write Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M_WPQ_CYCLES_NE",
"PerPkg": "1",
@@ -1700,6 +1893,7 @@
},
{
"BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_M_WPQ_READ_HIT",
"PerPkg": "1",
@@ -1708,6 +1902,7 @@
},
{
"BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M_WPQ_WRITE_HIT",
"PerPkg": "1",
@@ -1716,6 +1911,7 @@
},
{
"BriefDescription": "Not getting the requested Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_M_WRONG_MM",
"PerPkg": "1",
@@ -1723,6 +1919,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.ALLBANKS",
"PerPkg": "1",
@@ -1732,6 +1929,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK0",
"PerPkg": "1",
@@ -1740,6 +1938,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK1",
"PerPkg": "1",
@@ -1749,6 +1948,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK10",
"PerPkg": "1",
@@ -1758,6 +1958,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK11",
"PerPkg": "1",
@@ -1767,6 +1968,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK12",
"PerPkg": "1",
@@ -1776,6 +1978,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK13",
"PerPkg": "1",
@@ -1785,6 +1988,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK14",
"PerPkg": "1",
@@ -1794,6 +1998,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK15",
"PerPkg": "1",
@@ -1803,6 +2008,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK2",
"PerPkg": "1",
@@ -1812,6 +2018,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK3",
"PerPkg": "1",
@@ -1821,6 +2028,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK4",
"PerPkg": "1",
@@ -1830,6 +2038,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK5",
"PerPkg": "1",
@@ -1839,6 +2048,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK6",
"PerPkg": "1",
@@ -1848,6 +2058,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK7",
"PerPkg": "1",
@@ -1857,6 +2068,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK8",
"PerPkg": "1",
@@ -1866,6 +2078,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK9",
"PerPkg": "1",
@@ -1875,6 +2088,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANKG0",
"PerPkg": "1",
@@ -1884,6 +2098,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANKG1",
"PerPkg": "1",
@@ -1893,6 +2108,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANKG2",
"PerPkg": "1",
@@ -1902,6 +2118,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANKG3",
"PerPkg": "1",
@@ -1911,6 +2128,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.ALLBANKS",
"PerPkg": "1",
@@ -1920,6 +2138,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK0",
"PerPkg": "1",
@@ -1928,6 +2147,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK1",
"PerPkg": "1",
@@ -1937,6 +2157,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK10",
"PerPkg": "1",
@@ -1946,6 +2167,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK11",
"PerPkg": "1",
@@ -1955,6 +2177,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK12",
"PerPkg": "1",
@@ -1964,6 +2187,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK13",
"PerPkg": "1",
@@ -1973,6 +2197,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK14",
"PerPkg": "1",
@@ -1982,6 +2207,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK15",
"PerPkg": "1",
@@ -1991,6 +2217,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK2",
"PerPkg": "1",
@@ -2000,6 +2227,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK3",
"PerPkg": "1",
@@ -2009,6 +2237,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK4",
"PerPkg": "1",
@@ -2018,6 +2247,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK5",
"PerPkg": "1",
@@ -2027,6 +2257,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK6",
"PerPkg": "1",
@@ -2036,6 +2267,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK7",
"PerPkg": "1",
@@ -2045,6 +2277,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK8",
"PerPkg": "1",
@@ -2054,6 +2287,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK9",
"PerPkg": "1",
@@ -2063,6 +2297,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANKG0",
"PerPkg": "1",
@@ -2072,6 +2307,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANKG1",
"PerPkg": "1",
@@ -2081,6 +2317,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANKG2",
"PerPkg": "1",
@@ -2090,6 +2327,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANKG3",
"PerPkg": "1",
@@ -2099,6 +2337,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.ALLBANKS",
"PerPkg": "1",
@@ -2108,6 +2347,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK0",
"PerPkg": "1",
@@ -2116,6 +2356,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK1",
"PerPkg": "1",
@@ -2125,6 +2366,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK10",
"PerPkg": "1",
@@ -2134,6 +2376,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK11",
"PerPkg": "1",
@@ -2143,6 +2386,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK12",
"PerPkg": "1",
@@ -2152,6 +2396,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK13",
"PerPkg": "1",
@@ -2161,6 +2406,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK14",
"PerPkg": "1",
@@ -2170,6 +2416,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK15",
"PerPkg": "1",
@@ -2179,6 +2426,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK2",
"PerPkg": "1",
@@ -2188,6 +2436,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK3",
"PerPkg": "1",
@@ -2197,6 +2446,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK4",
"PerPkg": "1",
@@ -2206,6 +2456,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK5",
"PerPkg": "1",
@@ -2215,6 +2466,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK6",
"PerPkg": "1",
@@ -2224,6 +2476,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK7",
"PerPkg": "1",
@@ -2233,6 +2486,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK8",
"PerPkg": "1",
@@ -2242,6 +2496,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK9",
"PerPkg": "1",
@@ -2251,6 +2506,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANKG0",
"PerPkg": "1",
@@ -2260,6 +2516,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANKG1",
"PerPkg": "1",
@@ -2269,6 +2526,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANKG2",
"PerPkg": "1",
@@ -2278,6 +2536,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANKG3",
"PerPkg": "1",
@@ -2287,6 +2546,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.ALLBANKS",
"PerPkg": "1",
@@ -2296,6 +2556,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK0",
"PerPkg": "1",
@@ -2304,6 +2565,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK1",
"PerPkg": "1",
@@ -2313,6 +2575,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK10",
"PerPkg": "1",
@@ -2322,6 +2585,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK11",
"PerPkg": "1",
@@ -2331,6 +2595,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK12",
"PerPkg": "1",
@@ -2340,6 +2605,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK13",
"PerPkg": "1",
@@ -2349,6 +2615,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK14",
"PerPkg": "1",
@@ -2358,6 +2625,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK15",
"PerPkg": "1",
@@ -2367,6 +2635,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK2",
"PerPkg": "1",
@@ -2376,6 +2645,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK3",
"PerPkg": "1",
@@ -2385,6 +2655,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK4",
"PerPkg": "1",
@@ -2394,6 +2665,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK5",
"PerPkg": "1",
@@ -2403,6 +2675,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK6",
"PerPkg": "1",
@@ -2412,6 +2685,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK7",
"PerPkg": "1",
@@ -2421,6 +2695,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK8",
"PerPkg": "1",
@@ -2430,6 +2705,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK9",
"PerPkg": "1",
@@ -2439,6 +2715,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANKG0",
"PerPkg": "1",
@@ -2448,6 +2725,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANKG1",
"PerPkg": "1",
@@ -2457,6 +2735,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANKG2",
"PerPkg": "1",
@@ -2466,6 +2745,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANKG3",
"PerPkg": "1",
@@ -2475,6 +2755,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.ALLBANKS",
"PerPkg": "1",
@@ -2484,6 +2765,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK0",
"PerPkg": "1",
@@ -2492,6 +2774,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK1",
"PerPkg": "1",
@@ -2501,6 +2784,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK10",
"PerPkg": "1",
@@ -2510,6 +2794,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK11",
"PerPkg": "1",
@@ -2519,6 +2804,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK12",
"PerPkg": "1",
@@ -2528,6 +2814,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK13",
"PerPkg": "1",
@@ -2537,6 +2824,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK14",
"PerPkg": "1",
@@ -2546,6 +2834,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK15",
"PerPkg": "1",
@@ -2555,6 +2844,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK2",
"PerPkg": "1",
@@ -2564,6 +2854,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK3",
"PerPkg": "1",
@@ -2573,6 +2864,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK4",
"PerPkg": "1",
@@ -2582,6 +2874,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK5",
"PerPkg": "1",
@@ -2591,6 +2884,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK6",
"PerPkg": "1",
@@ -2600,6 +2894,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK7",
"PerPkg": "1",
@@ -2609,6 +2904,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK8",
"PerPkg": "1",
@@ -2618,6 +2914,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK9",
"PerPkg": "1",
@@ -2627,6 +2924,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANKG0",
"PerPkg": "1",
@@ -2636,6 +2934,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANKG1",
"PerPkg": "1",
@@ -2645,6 +2944,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANKG2",
"PerPkg": "1",
@@ -2654,6 +2954,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANKG3",
"PerPkg": "1",
@@ -2663,6 +2964,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.ALLBANKS",
"PerPkg": "1",
@@ -2672,6 +2974,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK0",
"PerPkg": "1",
@@ -2680,6 +2983,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK1",
"PerPkg": "1",
@@ -2689,6 +2993,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK10",
"PerPkg": "1",
@@ -2698,6 +3003,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK11",
"PerPkg": "1",
@@ -2707,6 +3013,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK12",
"PerPkg": "1",
@@ -2716,6 +3023,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK13",
"PerPkg": "1",
@@ -2725,6 +3033,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK14",
"PerPkg": "1",
@@ -2734,6 +3043,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK15",
"PerPkg": "1",
@@ -2743,6 +3053,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK2",
"PerPkg": "1",
@@ -2752,6 +3063,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK3",
"PerPkg": "1",
@@ -2761,6 +3073,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK4",
"PerPkg": "1",
@@ -2770,6 +3083,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK5",
"PerPkg": "1",
@@ -2779,6 +3093,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK6",
"PerPkg": "1",
@@ -2788,6 +3103,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK7",
"PerPkg": "1",
@@ -2797,6 +3113,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK8",
"PerPkg": "1",
@@ -2806,6 +3123,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK9",
"PerPkg": "1",
@@ -2815,6 +3133,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANKG0",
"PerPkg": "1",
@@ -2824,6 +3143,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANKG1",
"PerPkg": "1",
@@ -2833,6 +3153,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANKG2",
"PerPkg": "1",
@@ -2842,6 +3163,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANKG3",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/uncore-power.json b/tools/perf/pmu-events/arch/x86/broadwellde/uncore-power.json
index 320aaab53a0b..afdc636b9855 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/uncore-power.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/uncore-power.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "pclk Cycles",
+ "Counter": "0,1,2,3",
"EventName": "UNC_P_CLOCKTICKS",
"PerPkg": "1",
"PublicDescription": "The PCU runs off a fixed 1 GHz clock. This event counts the number of pclk cycles measured while the counter was enabled. The pclk, like the Memory Controller's dclk, counts at a constant rate making it a good measure of actual wall time.",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_P_CORE0_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -16,6 +18,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x6A",
"EventName": "UNC_P_CORE10_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -24,6 +27,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x6B",
"EventName": "UNC_P_CORE11_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -32,6 +36,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x6C",
"EventName": "UNC_P_CORE12_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -40,6 +45,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x6D",
"EventName": "UNC_P_CORE13_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -48,6 +54,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x6E",
"EventName": "UNC_P_CORE14_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -56,6 +63,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x6F",
"EventName": "UNC_P_CORE15_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -64,6 +72,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_P_CORE16_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -72,6 +81,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_P_CORE17_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -80,6 +90,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_P_CORE1_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -88,6 +99,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x62",
"EventName": "UNC_P_CORE2_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -96,6 +108,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "UNC_P_CORE3_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -104,6 +117,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x64",
"EventName": "UNC_P_CORE4_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -112,6 +126,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x65",
"EventName": "UNC_P_CORE5_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -120,6 +135,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x66",
"EventName": "UNC_P_CORE6_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -128,6 +144,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x67",
"EventName": "UNC_P_CORE7_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -136,6 +153,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x68",
"EventName": "UNC_P_CORE8_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -144,6 +162,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x69",
"EventName": "UNC_P_CORE9_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -152,6 +171,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_P_DEMOTIONS_CORE0",
"PerPkg": "1",
@@ -160,6 +180,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_P_DEMOTIONS_CORE1",
"PerPkg": "1",
@@ -168,6 +189,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x3A",
"EventName": "UNC_P_DEMOTIONS_CORE10",
"PerPkg": "1",
@@ -176,6 +198,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x3B",
"EventName": "UNC_P_DEMOTIONS_CORE11",
"PerPkg": "1",
@@ -184,6 +207,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "UNC_P_DEMOTIONS_CORE12",
"PerPkg": "1",
@@ -192,6 +216,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x3D",
"EventName": "UNC_P_DEMOTIONS_CORE13",
"PerPkg": "1",
@@ -200,6 +225,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x3E",
"EventName": "UNC_P_DEMOTIONS_CORE14",
"PerPkg": "1",
@@ -208,6 +234,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x3F",
"EventName": "UNC_P_DEMOTIONS_CORE15",
"PerPkg": "1",
@@ -216,6 +243,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_P_DEMOTIONS_CORE16",
"PerPkg": "1",
@@ -224,6 +252,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_P_DEMOTIONS_CORE17",
"PerPkg": "1",
@@ -232,6 +261,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_P_DEMOTIONS_CORE2",
"PerPkg": "1",
@@ -240,6 +270,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_P_DEMOTIONS_CORE3",
"PerPkg": "1",
@@ -248,6 +279,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_P_DEMOTIONS_CORE4",
"PerPkg": "1",
@@ -256,6 +288,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_P_DEMOTIONS_CORE5",
"PerPkg": "1",
@@ -264,6 +297,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x36",
"EventName": "UNC_P_DEMOTIONS_CORE6",
"PerPkg": "1",
@@ -272,6 +306,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_P_DEMOTIONS_CORE7",
"PerPkg": "1",
@@ -280,6 +315,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_P_DEMOTIONS_CORE8",
"PerPkg": "1",
@@ -288,6 +324,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_P_DEMOTIONS_CORE9",
"PerPkg": "1",
@@ -296,6 +333,7 @@
},
{
"BriefDescription": "Thermal Strongest Upper Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES",
"PerPkg": "1",
@@ -304,6 +342,7 @@
},
{
"BriefDescription": "OS Strongest Upper Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_P_FREQ_MAX_OS_CYCLES",
"PerPkg": "1",
@@ -312,6 +351,7 @@
},
{
"BriefDescription": "Power Strongest Upper Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_P_FREQ_MAX_POWER_CYCLES",
"PerPkg": "1",
@@ -320,6 +360,7 @@
},
{
"BriefDescription": "IO P Limit Strongest Lower Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x73",
"EventName": "UNC_P_FREQ_MIN_IO_P_CYCLES",
"PerPkg": "1",
@@ -328,6 +369,7 @@
},
{
"BriefDescription": "Cycles spent changing Frequency",
+ "Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "UNC_P_FREQ_TRANS_CYCLES",
"PerPkg": "1",
@@ -336,6 +378,7 @@
},
{
"BriefDescription": "Memory Phase Shedding Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_P_MEMORY_PHASE_SHEDDING_CYCLES",
"PerPkg": "1",
@@ -344,6 +387,7 @@
},
{
"BriefDescription": "Package C State Residency - C0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_P_PKG_RESIDENCY_C0_CYCLES",
"PerPkg": "1",
@@ -352,6 +396,7 @@
},
{
"BriefDescription": "Package C State Residency - C1E",
+ "Counter": "0,1,2,3",
"EventCode": "0x4E",
"EventName": "UNC_P_PKG_RESIDENCY_C1E_CYCLES",
"PerPkg": "1",
@@ -360,6 +405,7 @@
},
{
"BriefDescription": "Package C State Residency - C2E",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_P_PKG_RESIDENCY_C2E_CYCLES",
"PerPkg": "1",
@@ -368,6 +414,7 @@
},
{
"BriefDescription": "Package C State Residency - C3",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_P_PKG_RESIDENCY_C3_CYCLES",
"PerPkg": "1",
@@ -376,6 +423,7 @@
},
{
"BriefDescription": "Package C State Residency - C6",
+ "Counter": "0,1,2,3",
"EventCode": "0x2D",
"EventName": "UNC_P_PKG_RESIDENCY_C6_CYCLES",
"PerPkg": "1",
@@ -384,6 +432,7 @@
},
{
"BriefDescription": "Package C7 State Residency",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_P_PKG_RESIDENCY_C7_CYCLES",
"PerPkg": "1",
@@ -392,6 +441,7 @@
},
{
"BriefDescription": "Number of cores in C-State; C0 and C1",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
"Filter": "occ_sel=1",
@@ -401,6 +451,7 @@
},
{
"BriefDescription": "Number of cores in C-State; C3",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
"Filter": "occ_sel=2",
@@ -410,6 +461,7 @@
},
{
"BriefDescription": "Number of cores in C-State; C6 and C7",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
"Filter": "occ_sel=3",
@@ -419,6 +471,7 @@
},
{
"BriefDescription": "External Prochot",
+ "Counter": "0,1,2,3",
"EventCode": "0xA",
"EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
"PerPkg": "1",
@@ -427,6 +480,7 @@
},
{
"BriefDescription": "Internal Prochot",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_P_PROCHOT_INTERNAL_CYCLES",
"PerPkg": "1",
@@ -435,6 +489,7 @@
},
{
"BriefDescription": "Total Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_P_TOTAL_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -443,6 +498,7 @@
},
{
"BriefDescription": "UNC_P_UFS_TRANSITIONS_RING_GV",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "UNC_P_UFS_TRANSITIONS_RING_GV",
"PerPkg": "1",
@@ -451,6 +507,7 @@
},
{
"BriefDescription": "VR Hot",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_P_VR_HOT_CYCLES",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/virtual-memory.json b/tools/perf/pmu-events/arch/x86/broadwellde/virtual-memory.json
index 93621e004d88..eb1d9541e26c 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/virtual-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Load misses in all DTLB levels that cause page walks",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"SampleAfterValue": "2000003",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Load misses that miss the DTLB and hit the STLB (2M).",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT_2M",
"SampleAfterValue": "2000003",
@@ -24,6 +27,7 @@
},
{
"BriefDescription": "Load misses that miss the DTLB and hit the STLB (4K).",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT_4K",
"SampleAfterValue": "2000003",
@@ -31,6 +35,7 @@
},
{
"BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
@@ -39,6 +44,7 @@
},
{
"BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (1G)",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
@@ -48,6 +54,7 @@
},
{
"BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (2M/4M).",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
@@ -57,6 +64,7 @@
},
{
"BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (4K).",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
@@ -66,6 +74,7 @@
},
{
"BriefDescription": "Cycles when PMH is busy with page walks",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
@@ -75,6 +84,7 @@
},
{
"BriefDescription": "Store misses in all DTLB levels that cause page walks",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
@@ -84,6 +94,7 @@
},
{
"BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.STLB_HIT",
"SampleAfterValue": "100003",
@@ -91,6 +102,7 @@
},
{
"BriefDescription": "Store misses that miss the DTLB and hit the STLB (2M).",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.STLB_HIT_2M",
"SampleAfterValue": "100003",
@@ -98,6 +110,7 @@
},
{
"BriefDescription": "Store misses that miss the DTLB and hit the STLB (4K).",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.STLB_HIT_4K",
"SampleAfterValue": "100003",
@@ -105,6 +118,7 @@
},
{
"BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
@@ -113,6 +127,7 @@
},
{
"BriefDescription": "Store misses in all DTLB levels that cause completed page walks (1G)",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
@@ -122,6 +137,7 @@
},
{
"BriefDescription": "Store misses in all DTLB levels that cause completed page walks (2M/4M)",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
@@ -131,6 +147,7 @@
},
{
"BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (4K)",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
@@ -140,6 +157,7 @@
},
{
"BriefDescription": "Cycles when PMH is busy with page walks",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_DURATION",
@@ -149,6 +167,7 @@
},
{
"BriefDescription": "Cycle count for an Extended Page table walk.",
+ "Counter": "0,1,2,3",
"EventCode": "0x4F",
"EventName": "EPT.WALK_CYCLES",
"PublicDescription": "This event counts cycles for an extended page table walk. The Extended Page directory cache differs from standard TLB caches by the operating system that use it. Virtual machine operating systems use the extended page directory cache, while guest operating systems use the standard TLB caches.",
@@ -157,6 +176,7 @@
},
{
"BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAE",
"EventName": "ITLB.ITLB_FLUSH",
"PublicDescription": "This event counts the number of flushes of the big or small ITLB pages. Counting include both TLB Flush (covering all sets) and TLB Set Clear (set-specific).",
@@ -165,6 +185,7 @@
},
{
"BriefDescription": "Misses at all ITLB levels that cause page walks",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
@@ -174,6 +195,7 @@
},
{
"BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.STLB_HIT",
"SampleAfterValue": "100003",
@@ -181,6 +203,7 @@
},
{
"BriefDescription": "Code misses that miss the DTLB and hit the STLB (2M).",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.STLB_HIT_2M",
"SampleAfterValue": "100003",
@@ -188,6 +211,7 @@
},
{
"BriefDescription": "Core misses that miss the DTLB and hit the STLB (4K).",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.STLB_HIT_4K",
"SampleAfterValue": "100003",
@@ -195,6 +219,7 @@
},
{
"BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
@@ -203,6 +228,7 @@
},
{
"BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (1G)",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
@@ -212,6 +238,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
@@ -221,6 +248,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
@@ -230,6 +258,7 @@
},
{
"BriefDescription": "Cycles when PMH is busy with page walks",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_DURATION",
@@ -239,6 +268,7 @@
},
{
"BriefDescription": "Number of DTLB page walker hits in the L1+FB.",
+ "Counter": "0,1,2,3",
"Errata": "BDM69, BDM98",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.DTLB_L1",
@@ -247,6 +277,7 @@
},
{
"BriefDescription": "Number of DTLB page walker hits in the L2.",
+ "Counter": "0,1,2,3",
"Errata": "BDM69, BDM98",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.DTLB_L2",
@@ -255,6 +286,7 @@
},
{
"BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP.",
+ "Counter": "0,1,2,3",
"Errata": "BDM69, BDM98",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.DTLB_L3",
@@ -263,6 +295,7 @@
},
{
"BriefDescription": "Number of DTLB page walker hits in Memory.",
+ "Counter": "0,1,2,3",
"Errata": "BDM69, BDM98",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
@@ -271,6 +304,7 @@
},
{
"BriefDescription": "Number of ITLB page walker hits in the L1+FB.",
+ "Counter": "0,1,2,3",
"Errata": "BDM69, BDM98",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.ITLB_L1",
@@ -279,6 +313,7 @@
},
{
"BriefDescription": "Number of ITLB page walker hits in the L2.",
+ "Counter": "0,1,2,3",
"Errata": "BDM69, BDM98",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.ITLB_L2",
@@ -287,6 +322,7 @@
},
{
"BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP.",
+ "Counter": "0,1,2,3",
"Errata": "BDM69, BDM98",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.ITLB_L3",
@@ -295,6 +331,7 @@
},
{
"BriefDescription": "DTLB flush attempts of the thread-specific entries",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "TLB_FLUSH.DTLB_THREAD",
"PublicDescription": "This event counts the number of DTLB flush attempts of the thread-specific entries.",
@@ -303,6 +340,7 @@
},
{
"BriefDescription": "STLB flush attempts",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "TLB_FLUSH.STLB_ANY",
"PublicDescription": "This event counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, and so on).",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json b/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json
index 0aed533da882..0577d7460082 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json
@@ -68,7 +68,7 @@
},
{
"BriefDescription": "Percentage of time spent in the active CPU power state C0",
- "MetricExpr": "tma_info_system_cpu_utilization",
+ "MetricExpr": "tma_info_system_cpus_utilized",
"MetricName": "cpu_utilization",
"ScaleUnit": "100%"
},
@@ -292,7 +292,7 @@
{
"BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists",
"MetricExpr": "66 * OTHER_ASSISTS.ANY_WB_ASSIST / tma_info_thread_slots",
- "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricGroup": "BvIO;TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_assists",
"MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
"PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: OTHER_ASSISTS.ANY",
@@ -302,7 +302,7 @@
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
"MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "1 - (tma_frontend_bound + tma_bad_speculation + tma_retiring)",
- "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvOB;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
"MetricThreshold": "tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL1",
@@ -323,7 +323,7 @@
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * tma_bad_speculation",
- "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
+ "MetricGroup": "BadSpec;BrMispredicts;BvMP;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
"MetricName": "tma_branch_mispredicts",
"MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
@@ -362,7 +362,7 @@
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(60 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD))) + 43 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD)))) / tma_info_thread_clks",
- "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricGroup": "BvMS;DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_contested_accesses",
"MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS_PS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
@@ -383,7 +383,7 @@
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "43 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD))) / tma_info_thread_clks",
- "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricGroup": "BvMS;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_data_sharing",
"MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT_PS. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
@@ -392,7 +392,7 @@
{
"BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
"MetricExpr": "ARITH.FPU_DIV_ACTIVE / tma_info_core_core_clks",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
"MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_UOPS",
@@ -429,7 +429,7 @@
{
"BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
"MetricExpr": "(8 * DTLB_LOAD_MISSES.STLB_HIT + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * DTLB_LOAD_MISSES.WALK_COMPLETED) / tma_info_thread_clks",
- "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
+ "MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
"MetricName": "tma_dtlb_load",
"MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_UOPS_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store",
@@ -438,7 +438,7 @@
{
"BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
"MetricExpr": "(8 * DTLB_STORE_MISSES.STLB_HIT + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * DTLB_STORE_MISSES.WALK_COMPLETED) / tma_info_thread_clks",
- "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
+ "MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
"MetricName": "tma_dtlb_store",
"MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_UOPS_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load",
@@ -447,7 +447,7 @@
{
"BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing",
"MetricExpr": "(200 * OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM + 60 * OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE) / tma_info_thread_clks",
- "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
+ "MetricGroup": "BvMS;DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
"MetricName": "tma_false_sharing",
"MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
@@ -457,7 +457,7 @@
"BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_info_memory_load_miss_real_latency * cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@ / tma_info_thread_clks",
- "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
+ "MetricGroup": "BvMS;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
"MetricName": "tma_fb_full",
"MetricThreshold": "tma_fb_full > 0.3",
"PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
@@ -494,7 +494,7 @@
},
{
"BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired",
- "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricExpr": "FP_ARITH_INST_RETIRED.SCALAR / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_scalar",
"MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
@@ -503,7 +503,7 @@
},
{
"BriefDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths",
- "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0x3c@ / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricExpr": "FP_ARITH_INST_RETIRED.VECTOR / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_vector",
"MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
@@ -531,7 +531,7 @@
{
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / tma_info_thread_slots",
- "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvFB;BvIO;PGO;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_frontend_bound",
"MetricThreshold": "tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL1",
@@ -551,7 +551,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses.",
"MetricExpr": "ICACHE.IFDATA_STALL / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_icache_misses",
"MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"ScaleUnit": "100%"
@@ -590,7 +590,7 @@
},
{
"BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
- "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0x3c@) / (2 * tma_info_core_core_clks)",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + FP_ARITH_INST_RETIRED.VECTOR) / (2 * tma_info_core_core_clks)",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "tma_info_core_fp_arith_utilization",
"PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
@@ -630,7 +630,7 @@
},
{
"BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
- "MetricExpr": "INST_RETIRED.ANY / (cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0x3c@)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR + FP_ARITH_INST_RETIRED.VECTOR)",
"MetricGroup": "Flops;InsType",
"MetricName": "tma_info_inst_mix_iparith",
"MetricThreshold": "tma_info_inst_mix_iparith < 10",
@@ -704,12 +704,12 @@
"MetricThreshold": "tma_info_inst_mix_ipstore < 8"
},
{
- "BriefDescription": "Instruction per taken branch",
+ "BriefDescription": "Instructions per taken branch",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
"MetricName": "tma_info_inst_mix_iptb",
"MetricThreshold": "tma_info_inst_mix_iptb < 9",
- "PublicDescription": "Instruction per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_lcp"
+ "PublicDescription": "Instructions per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_lcp"
},
{
"BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
@@ -730,42 +730,24 @@
"MetricName": "tma_info_memory_core_l3_cache_fill_bw_2t"
},
{
- "BriefDescription": "Average Parallel L2 cache miss data reads",
- "MetricExpr": "tma_info_memory_latency_data_l2_mlp",
- "MetricGroup": "Memory_BW;Offcore",
- "MetricName": "tma_info_memory_data_l2_mlp"
- },
- {
- "BriefDescription": "",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
"MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l1d_cache_fill_bw"
},
{
- "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
- "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / (duration_time * 1e3 / 1e3)",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "tma_info_memory_l1d_cache_fill_bw_2t"
- },
- {
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
"MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
"MetricGroup": "CacheHits;Mem",
"MetricName": "tma_info_memory_l1mpki"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
"MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l2_cache_fill_bw"
},
{
- "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
- "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / (duration_time * 1e3 / 1e3)",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "tma_info_memory_l2_cache_fill_bw_2t"
- },
- {
"BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
"MetricExpr": "1e3 * (L2_RQSTS.REFERENCES - L2_RQSTS.MISS) / INST_RETIRED.ANY",
"MetricGroup": "CacheHits;Mem",
@@ -796,16 +778,16 @@
"MetricName": "tma_info_memory_l2mpki_load"
},
{
- "BriefDescription": "",
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "tma_info_memory_l3_cache_fill_bw"
+ "BriefDescription": "Offcore requests (L2 cache miss) per kilo instruction for demand RFOs",
+ "MetricExpr": "1e3 * OFFCORE_REQUESTS.DEMAND_RFO / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Offcore",
+ "MetricName": "tma_info_memory_l2mpki_rfo"
},
{
- "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / (duration_time * 1e3 / 1e3)",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
- "MetricName": "tma_info_memory_l3_cache_fill_bw_2t"
+ "MetricName": "tma_info_memory_l3_cache_fill_bw"
},
{
"BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
@@ -821,27 +803,15 @@
},
{
"BriefDescription": "Average Latency for L2 cache miss demand Loads",
- "MetricExpr": "tma_info_memory_load_l2_miss_latency",
- "MetricGroup": "Memory_Lat;Offcore",
- "MetricName": "tma_info_memory_latency_load_l2_miss_latency"
- },
- {
- "BriefDescription": "Average Parallel L2 cache miss demand Loads",
- "MetricExpr": "tma_info_memory_load_l2_mlp",
- "MetricGroup": "Memory_BW;Offcore",
- "MetricName": "tma_info_memory_latency_load_l2_mlp"
- },
- {
- "BriefDescription": "Average Latency for L2 cache miss demand Loads",
"MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD",
"MetricGroup": "Memory_Lat;Offcore",
- "MetricName": "tma_info_memory_load_l2_miss_latency"
+ "MetricName": "tma_info_memory_latency_load_l2_miss_latency"
},
{
"BriefDescription": "Average Parallel L2 cache miss demand Loads",
"MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
"MetricGroup": "Memory_BW;Offcore",
- "MetricName": "tma_info_memory_load_l2_mlp"
+ "MetricName": "tma_info_memory_latency_load_l2_mlp"
},
{
"BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
@@ -860,19 +830,13 @@
},
{
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "tma_info_memory_tlb_page_walks_utilization",
- "MetricGroup": "Mem;MemoryTLB",
- "MetricName": "tma_info_memory_page_walks_utilization"
- },
- {
- "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
"MetricExpr": "(ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION + 7 * (DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED)) / (2 * tma_info_core_core_clks)",
"MetricGroup": "Mem;MemoryTLB",
"MetricName": "tma_info_memory_tlb_page_walks_utilization",
"MetricThreshold": "tma_info_memory_tlb_page_walks_utilization > 0.5"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per core",
"MetricExpr": "UOPS_EXECUTED.THREAD / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
"MetricName": "tma_info_pipeline_execute"
@@ -891,13 +855,13 @@
},
{
"BriefDescription": "Average CPU Utilization (percentage)",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "tma_info_system_cpus_utilized / #num_cpus_online",
"MetricGroup": "HPC;Summary",
"MetricName": "tma_info_system_cpu_utilization"
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "#num_cpus_online * tma_info_system_cpu_utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized"
},
@@ -1012,7 +976,7 @@
"MetricThreshold": "tma_info_thread_uoppi > 1.05"
},
{
- "BriefDescription": "Instruction per taken branch",
+ "BriefDescription": "Uops per taken branch",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW",
"MetricName": "tma_info_thread_uptb",
@@ -1021,7 +985,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
"MetricExpr": "(14 * ITLB_MISSES.STLB_HIT + cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * ITLB_MISSES.WALK_COMPLETED) / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_itlb_misses",
"MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: ITLB_MISSES.WALK_COMPLETED",
@@ -1039,7 +1003,7 @@
{
"BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
"MetricExpr": "(CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks",
- "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricGroup": "BvML;CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l2_bound",
"MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L2_HIT_PS",
@@ -1059,7 +1023,7 @@
"BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "41 * (MEM_LOAD_UOPS_RETIRED.L3_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD))) / tma_info_thread_clks",
- "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
+ "MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
"MetricName": "tma_l3_hit_latency",
"MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS. Related metrics: tma_mem_latency",
@@ -1117,7 +1081,7 @@
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_bad_speculation - tma_branch_mispredicts",
- "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
+ "MetricGroup": "BadSpec;BvMS;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
"MetricName": "tma_machine_clears",
"MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
@@ -1127,7 +1091,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_thread_clks",
- "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
"MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full",
@@ -1136,7 +1100,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM)",
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth",
- "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
+ "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
"MetricName": "tma_mem_latency",
"MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_l3_hit_latency",
@@ -1165,7 +1129,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage",
"MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES * tma_branch_resteers / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY)",
- "MetricGroup": "BadSpec;BrMispredicts;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
+ "MetricGroup": "BadSpec;BrMispredicts;BvMP;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
"MetricName": "tma_mispredicts_resteers",
"MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Related metrics: tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost",
@@ -1301,7 +1265,7 @@
{
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise).",
"MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC) / tma_info_core_core_clks",
- "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricGroup": "BvCB;PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_3m",
"MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%"
@@ -1328,7 +1292,7 @@
{
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / tma_info_thread_slots",
- "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvUW;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_retiring",
"MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL1",
@@ -1357,7 +1321,7 @@
{
"BriefDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors)",
"MetricExpr": "(OFFCORE_REQUESTS_BUFFER.SQ_FULL / 2 if #SMT_on else OFFCORE_REQUESTS_BUFFER.SQ_FULL) / tma_info_core_core_clks",
- "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
+ "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
"MetricName": "tma_sq_full",
"MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth",
@@ -1385,7 +1349,7 @@
"BriefDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(L2_RQSTS.RFO_HIT * 9 * (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) + (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_thread_clks",
- "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
+ "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
"MetricName": "tma_store_latency",
"MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
@@ -1402,7 +1366,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears",
"MetricExpr": "tma_branch_resteers - tma_mispredicts_resteers - tma_clears_resteers",
- "MetricGroup": "BigFootprint;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
"MetricName": "tma_unknown_branches",
"MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: BACLEARS.ANY",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/cache.json b/tools/perf/pmu-events/arch/x86/broadwellx/cache.json
index 781e7c64e71f..beeda41b428a 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/cache.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "L1D data line replacements",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "L1D.REPLACEMENT",
"PublicDescription": "This event counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Cycles a demand request was blocked due to Fill Buffers unavailability.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.FB_FULL",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "L1D miss outstandings duration in cycles",
+ "Counter": "2",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING",
"PublicDescription": "This event counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand; from the demand Hit FB, if it is allocated by hardware or software prefetch.\nNote: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "Counter": "2",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING_CYCLES",
@@ -35,6 +39,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "Counter": "2",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
@@ -43,6 +48,7 @@
},
{
"BriefDescription": "Not rejected writebacks that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_DEMAND_RQSTS.WB_HIT",
"PublicDescription": "This event counts the number of WB requests that hit L2 cache.",
@@ -51,6 +57,7 @@
},
{
"BriefDescription": "L2 cache lines filling L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.ALL",
"PublicDescription": "This event counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
@@ -59,6 +66,7 @@
},
{
"BriefDescription": "L2 cache lines in E state filling L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.E",
"PublicDescription": "This event counts the number of L2 cache lines in the Exclusive state filling the L2. Counting does not cover rejects.",
@@ -67,6 +75,7 @@
},
{
"BriefDescription": "L2 cache lines in I state filling L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.I",
"PublicDescription": "This event counts the number of L2 cache lines in the Invalidate state filling the L2. Counting does not cover rejects.",
@@ -75,6 +84,7 @@
},
{
"BriefDescription": "L2 cache lines in S state filling L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.S",
"PublicDescription": "This event counts the number of L2 cache lines in the Shared state filling the L2. Counting does not cover rejects.",
@@ -83,6 +93,7 @@
},
{
"BriefDescription": "Clean L2 cache lines evicted by demand.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.DEMAND_CLEAN",
"SampleAfterValue": "100003",
@@ -90,6 +101,7 @@
},
{
"BriefDescription": "L2 code requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_CODE_RD",
"PublicDescription": "This event counts the total number of L2 code requests.",
@@ -98,6 +110,7 @@
},
{
"BriefDescription": "Demand Data Read requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
"PublicDescription": "This event counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
@@ -106,6 +119,7 @@
},
{
"BriefDescription": "Demand requests that miss L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_MISS",
"SampleAfterValue": "200003",
@@ -113,6 +127,7 @@
},
{
"BriefDescription": "Demand requests to L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
"SampleAfterValue": "200003",
@@ -120,6 +135,7 @@
},
{
"BriefDescription": "Requests from L2 hardware prefetchers",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_PF",
"PublicDescription": "This event counts the total number of requests from the L2 hardware prefetchers.",
@@ -128,6 +144,7 @@
},
{
"BriefDescription": "RFO requests to L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_RFO",
"PublicDescription": "This event counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
@@ -136,6 +153,7 @@
},
{
"BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_HIT",
"SampleAfterValue": "200003",
@@ -143,6 +161,7 @@
},
{
"BriefDescription": "L2 cache misses when fetching instructions.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_MISS",
"SampleAfterValue": "200003",
@@ -150,6 +169,7 @@
},
{
"BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
"PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache.",
@@ -158,6 +178,7 @@
},
{
"BriefDescription": "Demand Data Read miss L2, no rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
"PublicDescription": "This event counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
@@ -166,6 +187,7 @@
},
{
"BriefDescription": "L2 prefetch requests that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.L2_PF_HIT",
"PublicDescription": "This event counts the number of requests from the L2 hardware prefetchers that hit L2 cache. L3 prefetch new types.",
@@ -174,6 +196,7 @@
},
{
"BriefDescription": "L2 prefetch requests that miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.L2_PF_MISS",
"PublicDescription": "This event counts the number of requests from the L2 hardware prefetchers that miss L2 cache.",
@@ -182,6 +205,7 @@
},
{
"BriefDescription": "All requests that miss L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.MISS",
"SampleAfterValue": "200003",
@@ -189,6 +213,7 @@
},
{
"BriefDescription": "All L2 requests.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.REFERENCES",
"SampleAfterValue": "200003",
@@ -196,6 +221,7 @@
},
{
"BriefDescription": "RFO requests that hit L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_HIT",
"SampleAfterValue": "200003",
@@ -203,6 +229,7 @@
},
{
"BriefDescription": "RFO requests that miss L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_MISS",
"SampleAfterValue": "200003",
@@ -210,6 +237,7 @@
},
{
"BriefDescription": "L2 or L3 HW prefetches that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.ALL_PF",
"PublicDescription": "This event counts L2 or L3 HW prefetches that access L2 cache including rejects.",
@@ -218,6 +246,7 @@
},
{
"BriefDescription": "Transactions accessing L2 pipe",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.ALL_REQUESTS",
"PublicDescription": "This event counts transactions that access the L2 pipe including snoops, pagewalks, and so on.",
@@ -226,6 +255,7 @@
},
{
"BriefDescription": "L2 cache accesses when fetching instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.CODE_RD",
"PublicDescription": "This event counts the number of L2 cache accesses when fetching instructions.",
@@ -234,6 +264,7 @@
},
{
"BriefDescription": "Demand Data Read requests that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.DEMAND_DATA_RD",
"PublicDescription": "This event counts Demand Data Read requests that access L2 cache, including rejects.",
@@ -242,6 +273,7 @@
},
{
"BriefDescription": "L1D writebacks that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.L1D_WB",
"PublicDescription": "This event counts L1D writebacks that access L2 cache.",
@@ -250,6 +282,7 @@
},
{
"BriefDescription": "L2 fill requests that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.L2_FILL",
"PublicDescription": "This event counts L2 fill requests that access L2 cache.",
@@ -258,6 +291,7 @@
},
{
"BriefDescription": "L2 writebacks that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.L2_WB",
"PublicDescription": "This event counts L2 writebacks that access L2 cache.",
@@ -266,6 +300,7 @@
},
{
"BriefDescription": "RFO requests that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.RFO",
"PublicDescription": "This event counts Read for Ownership (RFO) requests that access L2 cache.",
@@ -274,6 +309,7 @@
},
{
"BriefDescription": "Cycles when L1D is locked",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
"PublicDescription": "This event counts the number of cycles when the L1D is locked. It is a superset of the 0x1 mask (BUS_LOCK_CLOCKS.BUS_LOCK_DURATION).",
@@ -282,6 +318,7 @@
},
{
"BriefDescription": "Core-originated cacheable demand requests missed L3",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.MISS",
"PublicDescription": "This event counts core-originated cacheable demand requests that miss the last level cache (LLC). Demand requests include loads, RFOs, and hardware prefetches from L1D, and instruction fetches from IFU.",
@@ -290,6 +327,7 @@
},
{
"BriefDescription": "Core-originated cacheable demand requests that refer to L3",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
"PublicDescription": "This event counts core-originated cacheable demand requests that refer to the last level cache (LLC). Demand requests include loads, RFOs, and hardware prefetches from L1D, and instruction fetches from IFU.",
@@ -298,6 +336,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "BDM100",
"EventCode": "0xD2",
@@ -309,6 +348,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "BDM100",
"EventCode": "0xD2",
@@ -320,6 +360,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "BDM100",
"EventCode": "0xD2",
@@ -331,6 +372,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "BDM100",
"EventCode": "0xD2",
@@ -342,6 +384,7 @@
},
{
"BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "BDE70, BDM100",
"EventCode": "0xD3",
@@ -353,6 +396,7 @@
},
{
"BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "BDE70",
"EventCode": "0xD3",
@@ -363,6 +407,7 @@
},
{
"BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "BDE70",
"EventCode": "0xD3",
@@ -373,6 +418,7 @@
},
{
"BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "BDE70",
"EventCode": "0xD3",
@@ -383,6 +429,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
@@ -393,6 +440,7 @@
},
{
"BriefDescription": "Retired load uops with L1 cache hits as data sources.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
@@ -403,6 +451,7 @@
},
{
"BriefDescription": "Retired load uops misses in L1 cache as data sources.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
@@ -413,6 +462,7 @@
},
{
"BriefDescription": "Retired load uops with L2 cache hits as data sources.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "BDM35",
"EventCode": "0xD1",
@@ -424,6 +474,7 @@
},
{
"BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
@@ -434,6 +485,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were data hits in L3 without snoops required.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "BDM100",
"EventCode": "0xD1",
@@ -445,6 +497,7 @@
},
{
"BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "BDM100, BDE70",
"EventCode": "0xD1",
@@ -455,6 +508,7 @@
},
{
"BriefDescription": "Retired load uops.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
@@ -465,6 +519,7 @@
},
{
"BriefDescription": "Retired store uops.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
@@ -475,6 +530,7 @@
},
{
"BriefDescription": "Retired load uops with locked access.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "BDM35",
"EventCode": "0xD0",
@@ -486,6 +542,7 @@
},
{
"BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
@@ -496,6 +553,7 @@
},
{
"BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
@@ -506,6 +564,7 @@
},
{
"BriefDescription": "Retired load uops that miss the STLB.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
@@ -516,6 +575,7 @@
},
{
"BriefDescription": "Retired store uops that miss the STLB.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
@@ -526,6 +586,7 @@
},
{
"BriefDescription": "Demand and prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
"PublicDescription": "This event counts the demand and prefetch data reads. All Core Data Reads include cacheable Demands and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
@@ -534,6 +595,7 @@
},
{
"BriefDescription": "Any memory transaction that reached the SQ.",
+ "Counter": "0,1,2,3",
"EventCode": "0xb0",
"EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
"PublicDescription": "This event counts memory transactions reached the super queue including requests initiated by the core, all L3 prefetches, page walks, and so on.",
@@ -542,6 +604,7 @@
},
{
"BriefDescription": "Cacheable and non-cacheable code read requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
"PublicDescription": "This event counts both cacheable and non-cacheable code read requests.",
@@ -550,6 +613,7 @@
},
{
"BriefDescription": "Demand Data Read requests sent to uncore",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
"PublicDescription": "This event counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
@@ -558,6 +622,7 @@
},
{
"BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
"PublicDescription": "This event counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
@@ -566,6 +631,7 @@
},
{
"BriefDescription": "Offcore requests buffer cannot take more entries for this thread core.",
+ "Counter": "0,1,2,3",
"EventCode": "0xb2",
"EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
"PublicDescription": "This event counts the number of cases when the offcore requests buffer cannot take more entries for the core. This can happen when the superqueue does not contain eligible entries, or when L1D writeback pending FIFO requests is full.\nNote: Writeback pending FIFO has six entries.",
@@ -574,6 +640,7 @@
},
{
"BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
"Errata": "BDM76",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
@@ -583,6 +650,7 @@
},
{
"BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"Errata": "BDM76",
"EventCode": "0x60",
@@ -593,6 +661,7 @@
},
{
"BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"Errata": "BDM76",
"EventCode": "0x60",
@@ -603,6 +672,7 @@
},
{
"BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"Errata": "BDM76",
"EventCode": "0x60",
@@ -613,6 +683,7 @@
},
{
"BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "Counter": "0,1,2,3",
"Errata": "BDM76",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
@@ -622,6 +693,7 @@
},
{
"BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
"Errata": "BDM76",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
@@ -631,6 +703,7 @@
},
{
"BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"Errata": "BDM76",
"EventCode": "0x60",
@@ -640,6 +713,7 @@
},
{
"BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
"Errata": "BDM76",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
@@ -649,6 +723,7 @@
},
{
"BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE",
"SampleAfterValue": "100003",
@@ -656,6 +731,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -665,6 +741,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -674,6 +751,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -683,6 +761,7 @@
},
{
"BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -692,6 +771,7 @@
},
{
"BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -701,6 +781,7 @@
},
{
"BriefDescription": "Counts all requests hit in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -710,6 +791,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -719,6 +801,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -728,6 +811,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) hit in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -737,6 +821,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -746,6 +831,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) code reads hit in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -755,6 +841,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -764,6 +851,7 @@
},
{
"BriefDescription": "Split locks in SQ",
+ "Counter": "0,1,2,3",
"EventCode": "0xf4",
"EventName": "SQ_MISC.SPLIT_LOCK",
"PublicDescription": "This event counts the number of split locks in the super queue.",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/counter.json b/tools/perf/pmu-events/arch/x86/broadwellx/counter.json
new file mode 100644
index 000000000000..9fde9c0a896d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/counter.json
@@ -0,0 +1,57 @@
+[
+ {
+ "Unit": "core",
+ "CountersNumFixed": "3",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "CBOX",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "HA",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "IRP",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "2"
+ },
+ {
+ "Unit": "PCU",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "QPI",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "R2PCIe",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "R3QPI",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "3"
+ },
+ {
+ "Unit": "SBOX",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "UBOX",
+ "CountersNumFixed": "1",
+ "CountersNumGeneric": "2"
+ },
+ {
+ "Unit": "iMC",
+ "CountersNumFixed": "1",
+ "CountersNumGeneric": "4"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json b/tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json
index 986869252e71..9bf595af3f42 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 4 calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
"PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 4 calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 8 calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
"PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 128-bit packed single and 256-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and packed double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.4_FLOPS",
"PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision and 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point and packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired; some instructions will count twice as noted below. Applies to SSE* and AVX* scalar and packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.DOUBLE",
"SampleAfterValue": "2000006",
@@ -48,6 +54,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired; some instructions will count twice as noted below. Applies to SSE* and AVX* packed double and single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.PACKED",
"SampleAfterValue": "2000004",
@@ -55,6 +62,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computation operation. Applies to SSE* and AVX* scalar double and single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR",
"PublicDescription": "Number of SSE/AVX computational scalar single precision and double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -63,6 +71,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -71,6 +80,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
"PublicDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -79,6 +89,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired; some instructions will count twice as noted below. Applies to SSE* and AVX* scalar and packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SINGLE",
"SampleAfterValue": "2000005",
@@ -86,6 +97,7 @@
},
{
"BriefDescription": "Number of any Vector retired FP arithmetic instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.VECTOR",
"SampleAfterValue": "2000003",
@@ -93,6 +105,7 @@
},
{
"BriefDescription": "Cycles with any input/output SSE or FP assist",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.ANY",
@@ -102,6 +115,7 @@
},
{
"BriefDescription": "Number of SIMD FP assists due to input values",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.SIMD_INPUT",
"PublicDescription": "This event counts any input SSE* FP assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.",
@@ -110,6 +124,7 @@
},
{
"BriefDescription": "Number of SIMD FP assists due to Output values",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.SIMD_OUTPUT",
"PublicDescription": "This event counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.",
@@ -118,6 +133,7 @@
},
{
"BriefDescription": "Number of X87 assists due to input value.",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.X87_INPUT",
"PublicDescription": "This event counts x87 floating point (FP) micro-code assist (invalid operation, denormal operand, SNaN operand) when the input value (one of the source operands to an FP instruction) is invalid.",
@@ -126,6 +142,7 @@
},
{
"BriefDescription": "Number of X87 assists due to output value.",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.X87_OUTPUT",
"PublicDescription": "This event counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid.",
@@ -134,6 +151,7 @@
},
{
"BriefDescription": "Number of SIMD Move Elimination candidate uops that were eliminated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "MOVE_ELIMINATION.SIMD_ELIMINATED",
"SampleAfterValue": "1000003",
@@ -141,6 +159,7 @@
},
{
"BriefDescription": "Number of SIMD Move Elimination candidate uops that were not eliminated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "MOVE_ELIMINATION.SIMD_NOT_ELIMINATED",
"SampleAfterValue": "1000003",
@@ -148,6 +167,7 @@
},
{
"BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
+ "Counter": "0,1,2,3",
"Errata": "BDM30",
"EventCode": "0xC1",
"EventName": "OTHER_ASSISTS.AVX_TO_SSE",
@@ -157,6 +177,7 @@
},
{
"BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
+ "Counter": "0,1,2,3",
"Errata": "BDM30",
"EventCode": "0xC1",
"EventName": "OTHER_ASSISTS.SSE_TO_AVX",
@@ -166,6 +187,7 @@
},
{
"BriefDescription": "Micro-op dispatches cancelled due to insufficient SIMD physical register file read ports",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UOP_DISPATCHES_CANCELLED.SIMD_PRF",
"PublicDescription": "This event counts the number of micro-operations cancelled after they were dispatched from the scheduler to the execution units when the total number of physical register read ports across all dispatch ports exceeds the read bandwidth of the physical register file. The SIMD_PRF subevent applies to the following instructions: VDPPS, DPPS, VPCMPESTRI, PCMPESTRI, VPCMPESTRM, PCMPESTRM, VFMADD*, VFMADDSUB*, VFMSUB*, VMSUBADD*, VFNMADD*, VFNMSUB*. See the Broadwell Optimization Guide for more information.",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/frontend.json b/tools/perf/pmu-events/arch/x86/broadwellx/frontend.json
index bd5da39564e1..db3488abf9fc 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/frontend.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "Counter": "0,1,2,3",
"EventCode": "0xe6",
"EventName": "BACLEARS.ANY",
"SampleAfterValue": "100003",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
"PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
@@ -16,6 +18,7 @@
},
{
"BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.HIT",
"PublicDescription": "This event counts the number of both cacheable and noncacheable Instruction Cache, Streaming Buffer and Victim Cache Reads including UC fetches.",
@@ -24,6 +27,7 @@
},
{
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction-cache miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.IFDATA_STALL",
"PublicDescription": "This event counts cycles during which the demand fetch waits for data (wfdM104H) from L2 or iSB (opportunistic hit).",
@@ -32,6 +36,7 @@
},
{
"BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Misses. Includes Uncacheable accesses.",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.MISSES",
"PublicDescription": "This event counts the number of instruction cache, streaming buffer and victim cache misses. Counting includes UC accesses.",
@@ -40,6 +45,7 @@
},
{
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0x79",
"EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
@@ -49,6 +55,7 @@
},
{
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
@@ -58,6 +65,7 @@
},
{
"BriefDescription": "Cycles MITE is delivering 4 Uops",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0x79",
"EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
@@ -67,6 +75,7 @@
},
{
"BriefDescription": "Cycles MITE is delivering any Uop",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
@@ -76,6 +85,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.DSB_CYCLES",
@@ -85,6 +95,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.DSB_UOPS",
"PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
@@ -93,6 +104,7 @@
},
{
"BriefDescription": "Instruction Decode Queue (IDQ) empty cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.EMPTY",
"PublicDescription": "This counts the number of cycles that the instruction decoder queue is empty and can indicate that the application may be bound in the front end. It does not determine whether there are uops being delivered to the Alloc stage since uops can be delivered by bypass skipping the Instruction Decode Queue (IDQ) when it is empty.",
@@ -101,6 +113,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MITE_ALL_UOPS",
"PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
@@ -109,6 +122,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MITE_CYCLES",
@@ -118,6 +132,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MITE_UOPS",
"PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
@@ -126,6 +141,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MS_CYCLES",
@@ -135,6 +151,7 @@
},
{
"BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MS_DSB_CYCLES",
@@ -144,6 +161,7 @@
},
{
"BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x79",
@@ -154,6 +172,7 @@
},
{
"BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_DSB_UOPS",
"PublicDescription": "This event counts the number of uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
@@ -162,6 +181,7 @@
},
{
"BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_MITE_UOPS",
"PublicDescription": "This event counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
@@ -170,6 +190,7 @@
},
{
"BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x79",
@@ -179,6 +200,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_UOPS",
"PublicDescription": "This event counts the total number of uops delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
@@ -187,6 +209,7 @@
},
{
"BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
"PublicDescription": "This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4 x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when:\n a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread;\n b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions); \n c. Instruction Decode Queue (IDQ) delivers four uops.",
@@ -195,6 +218,7 @@
},
{
"BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
@@ -204,6 +228,7 @@
},
{
"BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
@@ -213,6 +238,7 @@
},
{
"BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+ "Counter": "0,1,2,3",
"CounterMask": "3",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
@@ -222,6 +248,7 @@
},
{
"BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
@@ -230,6 +257,7 @@
},
{
"BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/memory.json b/tools/perf/pmu-events/arch/x86/broadwellx/memory.json
index a7449e5b68dc..86246f632d79 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Number of times HLE abort was triggered",
+ "Counter": "0,1,2,3",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED",
"PEBS": "1",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "Counter": "0,1,2,3",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_MISC1",
"PublicDescription": "Number of times an HLE abort was attributed to a Memory condition (See TSX_Memory event for additional details).",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to uncommon conditions",
+ "Counter": "0,1,2,3",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_MISC2",
"PublicDescription": "Number of times the TSX watchdog signaled an HLE abort.",
@@ -26,6 +29,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_MISC3",
"PublicDescription": "Number of times a disallowed operation caused an HLE abort.",
@@ -34,6 +38,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to incompatible memory type",
+ "Counter": "0,1,2,3",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_MISC4",
"PublicDescription": "Number of times HLE caused a fault.",
@@ -42,6 +47,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to none of the previous 4 categories (e.g. interrupts)",
+ "Counter": "0,1,2,3",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_MISC5",
"PublicDescription": "Number of times HLE aborted and was not due to the abort conditions in subevents 3-6.",
@@ -50,6 +56,7 @@
},
{
"BriefDescription": "Number of times HLE commit succeeded",
+ "Counter": "0,1,2,3",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.COMMIT",
"PublicDescription": "Number of times HLE commit succeeded.",
@@ -58,6 +65,7 @@
},
{
"BriefDescription": "Number of times we entered an HLE region; does not count nested transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.START",
"PublicDescription": "Number of times we entered an HLE region\n does not count nested transactions.",
@@ -66,6 +74,7 @@
},
{
"BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
"PublicDescription": "This event counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from one of the following:\n1. memory disambiguation,\n2. external snoop, or\n3. cross SMT-HW-thread snoop (stores) hitting load buffer.",
@@ -74,6 +83,7 @@
},
{
"BriefDescription": "Randomly selected loads with latency value being above 128",
+ "Counter": "3",
"Data_LA": "1",
"Errata": "BDM100, BDM35",
"EventCode": "0xcd",
@@ -87,6 +97,7 @@
},
{
"BriefDescription": "Randomly selected loads with latency value being above 16",
+ "Counter": "3",
"Data_LA": "1",
"Errata": "BDM100, BDM35",
"EventCode": "0xcd",
@@ -100,6 +111,7 @@
},
{
"BriefDescription": "Randomly selected loads with latency value being above 256",
+ "Counter": "3",
"Data_LA": "1",
"Errata": "BDM100, BDM35",
"EventCode": "0xcd",
@@ -113,6 +125,7 @@
},
{
"BriefDescription": "Randomly selected loads with latency value being above 32",
+ "Counter": "3",
"Data_LA": "1",
"Errata": "BDM100, BDM35",
"EventCode": "0xcd",
@@ -126,6 +139,7 @@
},
{
"BriefDescription": "Randomly selected loads with latency value being above 4",
+ "Counter": "3",
"Data_LA": "1",
"Errata": "BDM100, BDM35",
"EventCode": "0xcd",
@@ -139,6 +153,7 @@
},
{
"BriefDescription": "Randomly selected loads with latency value being above 512",
+ "Counter": "3",
"Data_LA": "1",
"Errata": "BDM100, BDM35",
"EventCode": "0xcd",
@@ -152,6 +167,7 @@
},
{
"BriefDescription": "Randomly selected loads with latency value being above 64",
+ "Counter": "3",
"Data_LA": "1",
"Errata": "BDM100, BDM35",
"EventCode": "0xcd",
@@ -165,6 +181,7 @@
},
{
"BriefDescription": "Randomly selected loads with latency value being above 8",
+ "Counter": "3",
"Data_LA": "1",
"Errata": "BDM100, BDM35",
"EventCode": "0xcd",
@@ -178,6 +195,7 @@
},
{
"BriefDescription": "Speculative cache line split load uops dispatched to L1 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "MISALIGN_MEM_REF.LOADS",
"PublicDescription": "This event counts speculative cache-line split load uops dispatched to the L1 cache.",
@@ -186,6 +204,7 @@
},
{
"BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "MISALIGN_MEM_REF.STORES",
"PublicDescription": "This event counts speculative cache line split store-address (STA) uops dispatched to the L1 cache.",
@@ -194,6 +213,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch code reads miss in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -203,6 +223,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch code reads miss the L3 and the data is returned from local dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -212,6 +233,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads miss in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -221,6 +243,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from local dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -230,6 +253,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from remote dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -239,6 +263,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the modified data is transferred from remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -248,6 +273,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads miss the L3 and clean or shared data is transferred from remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
@@ -257,6 +283,7 @@
},
{
"BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -266,6 +293,7 @@
},
{
"BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from local dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -275,6 +303,7 @@
},
{
"BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from remote dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -284,6 +313,7 @@
},
{
"BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the modified data is transferred from remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -293,6 +323,7 @@
},
{
"BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and clean or shared data is transferred from remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
@@ -302,6 +333,7 @@
},
{
"BriefDescription": "Counts all requests miss in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -311,6 +343,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs miss in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -320,6 +353,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs miss the L3 and the data is returned from local dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -329,6 +363,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) miss in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -338,6 +373,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) miss the L3 and the modified data is transferred from remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -347,6 +383,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) code reads miss in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -356,6 +393,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs miss in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -365,6 +403,7 @@
},
{
"BriefDescription": "Number of times RTM abort was triggered",
+ "Counter": "0,1,2,3",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED",
"PEBS": "1",
@@ -374,6 +413,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
+ "Counter": "0,1,2,3",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_MISC1",
"PublicDescription": "Number of times an RTM abort was attributed to a Memory condition (See TSX_Memory event for additional details).",
@@ -382,6 +422,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "Counter": "0,1,2,3",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_MISC2",
"PublicDescription": "Number of times the TSX watchdog signaled an RTM abort.",
@@ -390,6 +431,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_MISC3",
"PublicDescription": "Number of times a disallowed operation caused an RTM abort.",
@@ -398,6 +440,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
+ "Counter": "0,1,2,3",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_MISC4",
"PublicDescription": "Number of times a RTM caused a fault.",
@@ -406,6 +449,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
+ "Counter": "0,1,2,3",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_MISC5",
"PublicDescription": "Number of times RTM aborted and was not due to the abort conditions in subevents 3-6.",
@@ -414,6 +458,7 @@
},
{
"BriefDescription": "Number of times RTM commit succeeded",
+ "Counter": "0,1,2,3",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.COMMIT",
"PublicDescription": "Number of times RTM commit succeeded.",
@@ -422,6 +467,7 @@
},
{
"BriefDescription": "Number of times we entered an RTM region; does not count nested transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.START",
"PublicDescription": "Number of times we entered an RTM region\n does not count nested transactions.",
@@ -430,6 +476,7 @@
},
{
"BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed. Since this is the count of execution, it may not always cause a transactional abort.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC1",
"SampleAfterValue": "2000003",
@@ -437,6 +484,7 @@
},
{
"BriefDescription": "Counts the number of times a class of instructions (e.g., vzeroupper) that may cause a transactional abort was executed inside a transactional region",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC2",
"PublicDescription": "Unfriendly TSX abort triggered by a vzeroupper instruction.",
@@ -445,6 +493,7 @@
},
{
"BriefDescription": "Counts the number of times an instruction execution caused the transactional nest count supported to be exceeded",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC3",
"PublicDescription": "Unfriendly TSX abort triggered by a nest count that is too deep.",
@@ -453,6 +502,7 @@
},
{
"BriefDescription": "Counts the number of times a XBEGIN instruction was executed inside an HLE transactional region.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC4",
"PublicDescription": "RTM region detected inside HLE.",
@@ -461,6 +511,7 @@
},
{
"BriefDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC5",
"SampleAfterValue": "2000003",
@@ -468,6 +519,7 @@
},
{
"BriefDescription": "Number of times a TSX Abort was triggered due to an evicted line caused by a transaction overflow",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
"PublicDescription": "Number of times a TSX Abort was triggered due to an evicted line caused by a transaction overflow.",
@@ -476,6 +528,7 @@
},
{
"BriefDescription": "Number of times a TSX line had a cache conflict",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_CONFLICT",
"PublicDescription": "Number of times a TSX line had a cache conflict.",
@@ -484,6 +537,7 @@
},
{
"BriefDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
"PublicDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch.",
@@ -492,6 +546,7 @@
},
{
"BriefDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
"PublicDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty.",
@@ -500,6 +555,7 @@
},
{
"BriefDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
"PublicDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer.",
@@ -508,6 +564,7 @@
},
{
"BriefDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
"PublicDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock.",
@@ -516,6 +573,7 @@
},
{
"BriefDescription": "Number of times we could not allocate Lock Buffer",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
"PublicDescription": "Number of times we could not allocate Lock Buffer.",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/metricgroups.json b/tools/perf/pmu-events/arch/x86/broadwellx/metricgroups.json
index 8c808347f6da..4193c90c3459 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/metricgroups.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/metricgroups.json
@@ -5,7 +5,18 @@
"BigFootprint": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"BrMispredicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Branches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvBC": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvCB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvFB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvIO": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvML": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMP": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMS": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMT": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvOB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvUW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"CacheHits": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "CacheMisses": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Compute": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Cor": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"DSB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/other.json b/tools/perf/pmu-events/arch/x86/broadwellx/other.json
index 1c2a5b001949..f0de6a71719b 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/other.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/other.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Unhalted core cycles when the thread is in ring 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "CPL_CYCLES.RING0",
"PublicDescription": "This event counts the unhalted core cycles during which the thread is in the ring 0 privileged mode.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Number of intervals between processor halts while thread is in ring 0",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x5C",
@@ -19,6 +21,7 @@
},
{
"BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "CPL_CYCLES.RING123",
"PublicDescription": "This event counts unhalted core cycles during which the thread is in rings 1, 2, or 3.",
@@ -27,6 +30,7 @@
},
{
"BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "LOCK_CYCLES.SPLIT_LOCK_UC_LOCK_DURATION",
"PublicDescription": "This event counts cycles in which the L1 and L2 are locked due to a UC lock or split lock. A lock is asserted in case of locked memory access, due to noncacheable memory, locked operation that spans two cache lines, or a page walk from the noncacheable page table. L1D and L2 locks have a very high performance penalty and it is highly recommended to avoid such access.",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json b/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json
index 9a902d2160e6..c03f77539362 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Cycles when divider is busy executing divide operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "ARITH.FPU_DIV_ACTIVE",
"PublicDescription": "This event counts the number of the divide operations executed. Uses edge-detect and a cmask value of 1 on ARITH.FPU_DIV_ACTIVE to get the number of the divide operations executed.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Speculative and retired branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_BRANCHES",
"PublicDescription": "This event counts both taken and not taken speculative and retired branch instructions.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Speculative and retired macro-conditional branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
"PublicDescription": "This event counts both taken and not taken speculative and retired macro-conditional branch instructions.",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
"PublicDescription": "This event counts both taken and not taken speculative and retired macro-unconditional branch instructions, excluding calls and indirects.",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Speculative and retired direct near calls",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
"PublicDescription": "This event counts both taken and not taken speculative and retired direct near calls.",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "Speculative and retired indirect branches excluding calls and returns",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
"PublicDescription": "This event counts both taken and not taken speculative and retired indirect branches excluding calls and return branches.",
@@ -49,6 +55,7 @@
},
{
"BriefDescription": "Speculative and retired indirect return branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
"PublicDescription": "This event counts both taken and not taken speculative and retired indirect branches that have a return mnemonic.",
@@ -57,6 +64,7 @@
},
{
"BriefDescription": "Not taken macro-conditional branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL",
"PublicDescription": "This event counts not taken macro-conditional branch instructions.",
@@ -65,6 +73,7 @@
},
{
"BriefDescription": "Taken speculative and retired macro-conditional branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL",
"PublicDescription": "This event counts taken speculative and retired macro-conditional branch instructions.",
@@ -73,6 +82,7 @@
},
{
"BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP",
"PublicDescription": "This event counts taken speculative and retired macro-conditional branch instructions excluding calls and indirect branches.",
@@ -81,6 +91,7 @@
},
{
"BriefDescription": "Taken speculative and retired direct near calls",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
"PublicDescription": "This event counts taken speculative and retired direct near calls.",
@@ -89,6 +100,7 @@
},
{
"BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
"PublicDescription": "This event counts taken speculative and retired indirect branches excluding calls and return branches.",
@@ -97,6 +109,7 @@
},
{
"BriefDescription": "Taken speculative and retired indirect calls",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
"PublicDescription": "This event counts taken speculative and retired indirect calls including both register and memory indirect.",
@@ -105,6 +118,7 @@
},
{
"BriefDescription": "Taken speculative and retired indirect branches with return mnemonic",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
"PublicDescription": "This event counts taken speculative and retired indirect branches that have a return mnemonic.",
@@ -113,6 +127,7 @@
},
{
"BriefDescription": "All (macro) branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"PublicDescription": "This event counts all (macro) branch instructions retired.",
@@ -120,6 +135,7 @@
},
{
"BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS)",
+ "Counter": "0,1,2,3",
"Errata": "BDW98",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
@@ -130,6 +146,7 @@
},
{
"BriefDescription": "Conditional branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
"PEBS": "1",
@@ -139,6 +156,7 @@
},
{
"BriefDescription": "Far branch instructions retired.",
+ "Counter": "0,1,2,3",
"Errata": "BDW98",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
@@ -148,6 +166,7 @@
},
{
"BriefDescription": "Direct and indirect near call instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
"PEBS": "1",
@@ -157,6 +176,7 @@
},
{
"BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3).",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
"PEBS": "1",
@@ -166,6 +186,7 @@
},
{
"BriefDescription": "Return instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
"PEBS": "1",
@@ -175,6 +196,7 @@
},
{
"BriefDescription": "Taken branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
"PEBS": "1",
@@ -184,6 +206,7 @@
},
{
"BriefDescription": "Not taken branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NOT_TAKEN",
"PublicDescription": "This event counts not taken branch instructions retired.",
@@ -192,6 +215,7 @@
},
{
"BriefDescription": "Speculative and retired mispredicted macro conditional branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.ALL_BRANCHES",
"PublicDescription": "This event counts both taken and not taken speculative and retired mispredicted branch instructions.",
@@ -200,6 +224,7 @@
},
{
"BriefDescription": "Speculative and retired mispredicted macro conditional branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
"PublicDescription": "This event counts both taken and not taken speculative and retired mispredicted macro conditional branch instructions.",
@@ -208,6 +233,7 @@
},
{
"BriefDescription": "Mispredicted indirect branches excluding calls and returns",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
"PublicDescription": "This event counts both taken and not taken mispredicted indirect branches excluding calls and returns.",
@@ -216,6 +242,7 @@
},
{
"BriefDescription": "Speculative mispredicted indirect branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.INDIRECT",
"PublicDescription": "Counts speculatively miss-predicted indirect branches at execution time. Counts for indirect near CALL or JMP instructions (RET excluded).",
@@ -224,6 +251,7 @@
},
{
"BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
"PublicDescription": "This event counts not taken speculative and retired mispredicted macro conditional branch instructions.",
@@ -232,6 +260,7 @@
},
{
"BriefDescription": "Taken speculative and retired mispredicted macro conditional branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
"PublicDescription": "This event counts taken speculative and retired mispredicted macro conditional branch instructions.",
@@ -240,6 +269,7 @@
},
{
"BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
"PublicDescription": "This event counts taken speculative and retired mispredicted indirect branches excluding calls and returns.",
@@ -248,6 +278,7 @@
},
{
"BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
"SampleAfterValue": "200003",
@@ -255,6 +286,7 @@
},
{
"BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
"PublicDescription": "This event counts taken speculative and retired mispredicted indirect branches that have a return mnemonic.",
@@ -263,6 +295,7 @@
},
{
"BriefDescription": "All mispredicted macro branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"PublicDescription": "This event counts all mispredicted macro branch instructions retired.",
@@ -270,6 +303,7 @@
},
{
"BriefDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
"PEBS": "2",
@@ -279,6 +313,7 @@
},
{
"BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.CONDITIONAL",
"PEBS": "1",
@@ -288,6 +323,7 @@
},
{
"BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
"PEBS": "1",
@@ -297,6 +333,7 @@
},
{
"BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.RET",
"PEBS": "1",
@@ -306,6 +343,7 @@
},
{
"BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3c",
"EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "100003",
@@ -313,6 +351,7 @@
},
{
"BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
"PublicDescription": "This is a fixed-frequency event programmed to general counters. It counts when the core is unhalted at 100 Mhz.",
@@ -322,6 +361,7 @@
{
"AnyThread": "1",
"BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
"SampleAfterValue": "100003",
@@ -329,6 +369,7 @@
},
{
"BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "100003",
@@ -336,6 +377,7 @@
},
{
"BriefDescription": "Reference cycles when the core is not in halt state.",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. \nNote: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. This event is clocked by base clock (100 Mhz) on Sandy Bridge. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
"SampleAfterValue": "2000003",
@@ -343,6 +385,7 @@
},
{
"BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.REF_XCLK",
"PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
@@ -352,6 +395,7 @@
{
"AnyThread": "1",
"BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
"SampleAfterValue": "100003",
@@ -359,6 +403,7 @@
},
{
"BriefDescription": "Core cycles when the thread is not in halt state",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"SampleAfterValue": "2000003",
@@ -367,12 +412,14 @@
{
"AnyThread": "1",
"BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
"SampleAfterValue": "2000003",
"UMask": "0x2"
},
{
"BriefDescription": "Thread cycles when thread is not in halt state",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
"PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
@@ -381,12 +428,14 @@
{
"AnyThread": "1",
"BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
"SampleAfterValue": "2000003"
},
{
"BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
"CounterMask": "8",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
@@ -395,6 +444,7 @@
},
{
"BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
"CounterMask": "8",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
@@ -404,6 +454,7 @@
},
{
"BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
@@ -412,6 +463,7 @@
},
{
"BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
@@ -421,6 +473,7 @@
},
{
"BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_LDM_PENDING",
@@ -430,6 +483,7 @@
},
{
"BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
@@ -438,6 +492,7 @@
},
{
"BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
@@ -447,6 +502,7 @@
},
{
"BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
"CounterMask": "12",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
@@ -455,6 +511,7 @@
},
{
"BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
"CounterMask": "12",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
@@ -464,6 +521,7 @@
},
{
"BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
@@ -472,6 +530,7 @@
},
{
"BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
@@ -481,6 +540,7 @@
},
{
"BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_LDM_PENDING",
@@ -490,6 +550,7 @@
},
{
"BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
@@ -498,6 +559,7 @@
},
{
"BriefDescription": "Total execution stalls.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
@@ -506,6 +568,7 @@
},
{
"BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.LCP",
"PublicDescription": "This event counts stalls occurred due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
@@ -514,6 +577,7 @@
},
{
"BriefDescription": "Instructions retired from execution.",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. \nNotes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. \nCounting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
"SampleAfterValue": "2000003",
@@ -521,6 +585,7 @@
},
{
"BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Counter": "0,1,2,3",
"Errata": "BDM61",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.ANY_P",
@@ -529,6 +594,7 @@
},
{
"BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
+ "Counter": "1",
"Errata": "BDM11, BDM55",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.PREC_DIST",
@@ -539,6 +605,7 @@
},
{
"BriefDescription": "FP operations retired. X87 FP operations that have no exceptions:",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.X87",
"PublicDescription": "This event counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
@@ -547,6 +614,7 @@
},
{
"BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread",
+ "Counter": "0,1,2,3",
"EventCode": "0x0D",
"EventName": "INT_MISC.RAT_STALL_CYCLES",
"PublicDescription": "This event counts the number of cycles during which Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the current thread. This also includes the cycles during which the Allocator is serving another thread.",
@@ -555,6 +623,7 @@
},
{
"BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x0D",
"EventName": "INT_MISC.RECOVERY_CYCLES",
@@ -565,6 +634,7 @@
{
"AnyThread": "1",
"BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x0D",
"EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
@@ -573,6 +643,7 @@
},
{
"BriefDescription": "This event counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.NO_SR",
"SampleAfterValue": "100003",
@@ -580,6 +651,7 @@
},
{
"BriefDescription": "Cases when loads get true Block-on-Store blocking code preventing store forwarding",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.STORE_FORWARD",
"PublicDescription": "This event counts how many times the load operation got the true Block-on-Store blocking code preventing store forwarding. This includes cases when:\n - preceding store conflicts with the load (incomplete overlap);\n - store forwarding is impossible due to u-arch limitations;\n - preceding lock RMW operations are not forwarded;\n - store has the no-forward bit set (uncacheable/page-split/masked stores);\n - all-blocking stores are used (mostly, fences and port I/O);\nand others.\nThe most common case is a load blocked due to its address range overlapping with a preceding smaller uncompleted store. Note: This event does not take into account cases of out-of-SW-control (for example, SbTailHit), unknown physical STA, and cases of blocking loads on store due to being non-WB memory type or a lock. These cases are covered by other events.\nSee the table of not supported store forwards in the Optimization Guide.",
@@ -588,6 +660,7 @@
},
{
"BriefDescription": "False dependencies in MOB due to partial compare",
+ "Counter": "0,1,2,3",
"EventCode": "0x07",
"EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
"PublicDescription": "This event counts false dependencies in MOB when the partial comparison upon loose net check and dependency was resolved by the Enhanced Loose net mechanism. This may not result in high performance penalties. Loose net checks can fail when loads and stores are 4k aliased.",
@@ -596,6 +669,7 @@
},
{
"BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch",
+ "Counter": "0,1,2,3",
"EventCode": "0x4C",
"EventName": "LOAD_HIT_PRE.HW_PF",
"PublicDescription": "This event counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the hardware prefetch.",
@@ -604,6 +678,7 @@
},
{
"BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch",
+ "Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "LOAD_HIT_PRE.SW_PF",
"PublicDescription": "This event counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by asm inspection of the nearby instructions.",
@@ -612,6 +687,7 @@
},
{
"BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xA8",
"EventName": "LSD.CYCLES_4_UOPS",
@@ -620,6 +696,7 @@
},
{
"BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA8",
"EventName": "LSD.CYCLES_ACTIVE",
@@ -628,6 +705,7 @@
},
{
"BriefDescription": "Number of Uops delivered by the LSD.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA8",
"EventName": "LSD.UOPS",
"SampleAfterValue": "2000003",
@@ -635,6 +713,7 @@
},
{
"BriefDescription": "Number of machine clears (nukes) of any type.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xC3",
@@ -644,6 +723,7 @@
},
{
"BriefDescription": "Cycles there was a Nuke. Account for both thread-specific and All Thread Nukes.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.CYCLES",
"PublicDescription": "This event counts both thread-specific (TS) and all-thread (AT) nukes.",
@@ -652,6 +732,7 @@
},
{
"BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.MASKMOV",
"PublicDescription": "Maskmov false fault - counts number of time ucode passes through Maskmov flow due to instruction's mask being 0 while the flow was completed without raising a fault.",
@@ -660,6 +741,7 @@
},
{
"BriefDescription": "Self-modifying code (SMC) detected.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.SMC",
"PublicDescription": "This event counts self-modifying code (SMC) detected, which causes a machine clear.",
@@ -668,6 +750,7 @@
},
{
"BriefDescription": "Number of integer Move Elimination candidate uops that were eliminated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "MOVE_ELIMINATION.INT_ELIMINATED",
"SampleAfterValue": "1000003",
@@ -675,6 +758,7 @@
},
{
"BriefDescription": "Number of integer Move Elimination candidate uops that were not eliminated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "MOVE_ELIMINATION.INT_NOT_ELIMINATED",
"SampleAfterValue": "1000003",
@@ -682,6 +766,7 @@
},
{
"BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "OTHER_ASSISTS.ANY_WB_ASSIST",
"SampleAfterValue": "100003",
@@ -689,6 +774,7 @@
},
{
"BriefDescription": "Resource-related stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xa2",
"EventName": "RESOURCE_STALLS.ANY",
"PublicDescription": "This event counts resource-related stall cycles.",
@@ -697,6 +783,7 @@
},
{
"BriefDescription": "Cycles stalled due to re-order buffer full.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.ROB",
"PublicDescription": "This event counts ROB full stall cycles. This counts cycles that the pipeline backend blocked uop delivery from the front end.",
@@ -705,6 +792,7 @@
},
{
"BriefDescription": "Cycles stalled due to no eligible RS entry available.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.RS",
"PublicDescription": "This event counts stall cycles caused by absence of eligible entries in the reservation station (RS). This may result from RS overflow, or from RS deallocation because of the RS array Write Port allocation scheme (each RS entry has two write ports instead of four. As a result, empty entries could not be used, although RS is not really full). This counts cycles that the pipeline backend blocked uop delivery from the front end.",
@@ -713,6 +801,7 @@
},
{
"BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.SB",
"PublicDescription": "This event counts stall cycles caused by the store buffer (SB) overflow (excluding draining from synch). This counts cycles that the pipeline backend blocked uop delivery from the front end.",
@@ -721,6 +810,7 @@
},
{
"BriefDescription": "Count cases of saving new LBR",
+ "Counter": "0,1,2,3",
"EventCode": "0xCC",
"EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
"PublicDescription": "This event counts cases of saving new LBR records by hardware. This assumes proper enabling of LBRs and takes into account LBR filtering done by the LBR_SELECT register.",
@@ -729,6 +819,7 @@
},
{
"BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "Counter": "0,1,2,3",
"EventCode": "0x5E",
"EventName": "RS_EVENTS.EMPTY_CYCLES",
"PublicDescription": "This event counts cycles during which the reservation station (RS) is empty for the thread.\nNote: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.",
@@ -737,6 +828,7 @@
},
{
"BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x5E",
@@ -747,6 +839,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_0",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
@@ -755,6 +848,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_1",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
@@ -763,6 +857,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_2",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
@@ -771,6 +866,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_3",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
@@ -779,6 +875,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_4",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
@@ -787,6 +884,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_5",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
@@ -795,6 +893,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_6",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
@@ -803,6 +902,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_7",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
@@ -811,6 +911,7 @@
},
{
"BriefDescription": "Number of uops executed on the core.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE",
"PublicDescription": "Number of uops executed from any thread.",
@@ -819,6 +920,7 @@
},
{
"BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
@@ -827,6 +929,7 @@
},
{
"BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
@@ -835,6 +938,7 @@
},
{
"BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "3",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
@@ -843,6 +947,7 @@
},
{
"BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
@@ -851,6 +956,7 @@
},
{
"BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
"Invert": "1",
@@ -859,6 +965,7 @@
},
{
"BriefDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
@@ -867,6 +974,7 @@
},
{
"BriefDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
@@ -875,6 +983,7 @@
},
{
"BriefDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "Counter": "0,1,2,3",
"CounterMask": "3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
@@ -883,6 +992,7 @@
},
{
"BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
@@ -891,6 +1001,7 @@
},
{
"BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.STALL_CYCLES",
@@ -901,6 +1012,7 @@
},
{
"BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.THREAD",
"PublicDescription": "Number of uops to be executed per-thread each cycle.",
@@ -909,6 +1021,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_0",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
@@ -918,6 +1031,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are executed in port 0.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
"SampleAfterValue": "2000003",
@@ -925,6 +1039,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_1",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
@@ -934,6 +1049,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are executed in port 1.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
"SampleAfterValue": "2000003",
@@ -941,6 +1057,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_2",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
@@ -950,6 +1067,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are dispatched to port 2.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
"SampleAfterValue": "2000003",
@@ -957,6 +1075,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_3",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
@@ -966,6 +1085,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are dispatched to port 3.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
"SampleAfterValue": "2000003",
@@ -973,6 +1093,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_4",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
@@ -982,6 +1103,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are executed in port 4.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
"SampleAfterValue": "2000003",
@@ -989,6 +1111,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_5",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
@@ -998,6 +1121,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are executed in port 5.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
"SampleAfterValue": "2000003",
@@ -1005,6 +1129,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_6",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
@@ -1014,6 +1139,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are executed in port 6.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
"SampleAfterValue": "2000003",
@@ -1021,6 +1147,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_7",
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
@@ -1030,6 +1157,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are dispatched to port 7.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
"SampleAfterValue": "2000003",
@@ -1037,6 +1165,7 @@
},
{
"BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.ANY",
"PublicDescription": "This event counts the number of Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS).",
@@ -1045,6 +1174,7 @@
},
{
"BriefDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive; added by GSR u-arch.",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.FLAGS_MERGE",
"PublicDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive\n added by GSR u-arch.",
@@ -1053,6 +1183,7 @@
},
{
"BriefDescription": "Number of Multiply packed/scalar single precision uops allocated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.SINGLE_MUL",
"SampleAfterValue": "2000003",
@@ -1060,6 +1191,7 @@
},
{
"BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.SLOW_LEA",
"SampleAfterValue": "2000003",
@@ -1067,6 +1199,7 @@
},
{
"BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.STALL_CYCLES",
@@ -1077,6 +1210,7 @@
},
{
"BriefDescription": "Actually retired uops.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.ALL",
"PEBS": "1",
@@ -1086,6 +1220,7 @@
},
{
"BriefDescription": "Retirement slots used.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.RETIRE_SLOTS",
"PEBS": "1",
@@ -1095,6 +1230,7 @@
},
{
"BriefDescription": "Cycles without actually retired uops.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.STALL_CYCLES",
@@ -1105,6 +1241,7 @@
},
{
"BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "Counter": "0,1,2,3",
"CounterMask": "16",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.TOTAL_CYCLES",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/uncore-cache.json b/tools/perf/pmu-events/arch/x86/broadwellx/uncore-cache.json
index 400d784d1457..b55b305aecaa 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/uncore-cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "LLC prefetch misses for code reads. Derived from unc_c_tor_inserts.miss_opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_MISSES.CODE_LLC_PREFETCH",
"Filter": "filter_opc=0x191",
@@ -12,6 +13,7 @@
},
{
"BriefDescription": "LLC prefetch misses for data reads. Derived from unc_c_tor_inserts.miss_opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_MISSES.DATA_LLC_PREFETCH",
"Filter": "filter_opc=0x192",
@@ -23,6 +25,7 @@
},
{
"BriefDescription": "LLC misses - demand and prefetch data reads - excludes LLC prefetches. Derived from unc_c_tor_inserts.miss_opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_MISSES.DATA_READ",
"Filter": "filter_opc=0x182",
@@ -34,6 +37,7 @@
},
{
"BriefDescription": "MMIO reads. Derived from unc_c_tor_inserts.miss_opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_MISSES.MMIO_READ",
"Filter": "filter_opc=0x187,filter_nc=1",
@@ -45,6 +49,7 @@
},
{
"BriefDescription": "MMIO writes. Derived from unc_c_tor_inserts.miss_opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_MISSES.MMIO_WRITE",
"Filter": "filter_opc=0x18f,filter_nc=1",
@@ -56,6 +61,7 @@
},
{
"BriefDescription": "PCIe write misses (full cache line). Derived from unc_c_tor_inserts.miss_opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_MISSES.PCIE_NON_SNOOP_WRITE",
"Filter": "filter_opc=0x1c8,filter_tid=0x3e",
@@ -67,6 +73,7 @@
},
{
"BriefDescription": "LLC misses for PCIe read current. Derived from unc_c_tor_inserts.miss_opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_MISSES.PCIE_READ",
"Filter": "filter_opc=0x19e",
@@ -78,6 +85,7 @@
},
{
"BriefDescription": "ItoM write misses (as part of fast string memcpy stores) + PCIe full line writes. Derived from unc_c_tor_inserts.miss_opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_MISSES.PCIE_WRITE",
"Filter": "filter_opc=0x1c8",
@@ -89,6 +97,7 @@
},
{
"BriefDescription": "LLC prefetch misses for RFO. Derived from unc_c_tor_inserts.miss_opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_MISSES.RFO_LLC_PREFETCH",
"Filter": "filter_opc=0x190",
@@ -100,6 +109,7 @@
},
{
"BriefDescription": "LLC misses - Uncacheable reads (from cpu) . Derived from unc_c_tor_inserts.miss_opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_MISSES.UNCACHEABLE",
"Filter": "filter_opc=0x187",
@@ -111,6 +121,7 @@
},
{
"BriefDescription": "L2 demand and L2 prefetch code references to LLC. Derived from unc_c_tor_inserts.opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_REFERENCES.CODE_LLC_PREFETCH",
"Filter": "filter_opc=0x181",
@@ -122,6 +133,7 @@
},
{
"BriefDescription": "PCIe writes (partial cache line). Derived from unc_c_tor_inserts.opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_REFERENCES.PCIE_NS_PARTIAL_WRITE",
"Filter": "filter_opc=0x180,filter_tid=0x3e",
@@ -132,6 +144,7 @@
},
{
"BriefDescription": "PCIe read current. Derived from unc_c_tor_inserts.opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_REFERENCES.PCIE_READ",
"Filter": "filter_opc=0x19e",
@@ -143,6 +156,7 @@
},
{
"BriefDescription": "PCIe write references (full cache line). Derived from unc_c_tor_inserts.opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_REFERENCES.PCIE_WRITE",
"Filter": "filter_opc=0x1c8,filter_tid=0x3e",
@@ -154,6 +168,7 @@
},
{
"BriefDescription": "Streaming stores (full cache line). Derived from unc_c_tor_inserts.opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_REFERENCES.STREAMING_FULL",
"Filter": "filter_opc=0x18c",
@@ -165,6 +180,7 @@
},
{
"BriefDescription": "Streaming stores (partial cache line). Derived from unc_c_tor_inserts.opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_REFERENCES.STREAMING_PARTIAL",
"Filter": "filter_opc=0x18d",
@@ -176,6 +192,7 @@
},
{
"BriefDescription": "Bounce Control",
+ "Counter": "0,1,2,3",
"EventCode": "0xA",
"EventName": "UNC_C_BOUNCE_CONTROL",
"PerPkg": "1",
@@ -183,12 +200,14 @@
},
{
"BriefDescription": "Uncore Clocks",
+ "Counter": "0,1,2,3",
"EventName": "UNC_C_CLOCKTICKS",
"PerPkg": "1",
"Unit": "CBOX"
},
{
"BriefDescription": "Counter 0 Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_C_COUNTER0_OCCUPANCY",
"PerPkg": "1",
@@ -197,6 +216,7 @@
},
{
"BriefDescription": "FaST wire asserted",
+ "Counter": "0,1",
"EventCode": "0x9",
"EventName": "UNC_C_FAST_ASSERTED",
"PerPkg": "1",
@@ -205,6 +225,7 @@
},
{
"BriefDescription": "All LLC Misses (code+ data rd + data wr - including demand and prefetch)",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.ANY",
"Filter": "filter_state=0x1",
@@ -216,6 +237,7 @@
},
{
"BriefDescription": "Cache Lookups; Data Read Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.DATA_READ",
"PerPkg": "1",
@@ -225,6 +247,7 @@
},
{
"BriefDescription": "Cache Lookups; Lookups that Match NID",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.NID",
"PerPkg": "1",
@@ -234,6 +257,7 @@
},
{
"BriefDescription": "Cache Lookups; Any Read Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.READ",
"PerPkg": "1",
@@ -243,6 +267,7 @@
},
{
"BriefDescription": "Cache Lookups; External Snoop Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.REMOTE_SNOOP",
"PerPkg": "1",
@@ -252,6 +277,7 @@
},
{
"BriefDescription": "Cache Lookups; Write Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.WRITE",
"PerPkg": "1",
@@ -261,6 +287,7 @@
},
{
"BriefDescription": "Lines Victimized; Lines in E state",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.E_STATE",
"PerPkg": "1",
@@ -270,6 +297,7 @@
},
{
"BriefDescription": "Lines Victimized",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.F_STATE",
"PerPkg": "1",
@@ -279,6 +307,7 @@
},
{
"BriefDescription": "Lines Victimized; Lines in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.I_STATE",
"PerPkg": "1",
@@ -288,6 +317,7 @@
},
{
"BriefDescription": "Lines Victimized",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.MISS",
"PerPkg": "1",
@@ -297,6 +327,7 @@
},
{
"BriefDescription": "M line evictions from LLC (writebacks to memory)",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.M_STATE",
"PerPkg": "1",
@@ -307,6 +338,7 @@
},
{
"BriefDescription": "Lines Victimized; Victimized Lines that Match NID",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.NID",
"PerPkg": "1",
@@ -316,6 +348,7 @@
},
{
"BriefDescription": "Cbo Misc; DRd hitting non-M with raw CV=0",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_C_MISC.CVZERO_PREFETCH_MISS",
"PerPkg": "1",
@@ -325,6 +358,7 @@
},
{
"BriefDescription": "Cbo Misc; Clean Victim with raw CV=0",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_C_MISC.CVZERO_PREFETCH_VICTIM",
"PerPkg": "1",
@@ -334,6 +368,7 @@
},
{
"BriefDescription": "Cbo Misc; RFO HitS",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_C_MISC.RFO_HIT_S",
"PerPkg": "1",
@@ -343,6 +378,7 @@
},
{
"BriefDescription": "Cbo Misc; Silent Snoop Eviction",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_C_MISC.RSPI_WAS_FSE",
"PerPkg": "1",
@@ -352,6 +388,7 @@
},
{
"BriefDescription": "Cbo Misc",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_C_MISC.STARTED",
"PerPkg": "1",
@@ -361,6 +398,7 @@
},
{
"BriefDescription": "Cbo Misc; Write Combining Aliasing",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_C_MISC.WC_ALIASING",
"PerPkg": "1",
@@ -370,6 +408,7 @@
},
{
"BriefDescription": "LRU Queue; LRU Age 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "UNC_C_QLRU.AGE0",
"PerPkg": "1",
@@ -379,6 +418,7 @@
},
{
"BriefDescription": "LRU Queue; LRU Age 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "UNC_C_QLRU.AGE1",
"PerPkg": "1",
@@ -388,6 +428,7 @@
},
{
"BriefDescription": "LRU Queue; LRU Age 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "UNC_C_QLRU.AGE2",
"PerPkg": "1",
@@ -397,6 +438,7 @@
},
{
"BriefDescription": "LRU Queue; LRU Age 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "UNC_C_QLRU.AGE3",
"PerPkg": "1",
@@ -406,6 +448,7 @@
},
{
"BriefDescription": "LRU Queue; LRU Bits Decremented",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "UNC_C_QLRU.LRU_DECREMENT",
"PerPkg": "1",
@@ -415,6 +458,7 @@
},
{
"BriefDescription": "LRU Queue; Non-0 Aged Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "UNC_C_QLRU.VICTIM_NON_ZERO",
"PerPkg": "1",
@@ -424,6 +468,7 @@
},
{
"BriefDescription": "AD Ring In Use; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_C_RING_AD_USED.ALL",
"PerPkg": "1",
@@ -433,6 +478,7 @@
},
{
"BriefDescription": "AD Ring In Use; Down",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_C_RING_AD_USED.DOWN",
"PerPkg": "1",
@@ -442,6 +488,7 @@
},
{
"BriefDescription": "AD Ring In Use; Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_C_RING_AD_USED.DOWN_EVEN",
"PerPkg": "1",
@@ -451,6 +498,7 @@
},
{
"BriefDescription": "AD Ring In Use; Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_C_RING_AD_USED.DOWN_ODD",
"PerPkg": "1",
@@ -460,6 +508,7 @@
},
{
"BriefDescription": "AD Ring In Use; Up",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_C_RING_AD_USED.UP",
"PerPkg": "1",
@@ -469,6 +518,7 @@
},
{
"BriefDescription": "AD Ring In Use; Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_C_RING_AD_USED.UP_EVEN",
"PerPkg": "1",
@@ -478,6 +528,7 @@
},
{
"BriefDescription": "AD Ring In Use; Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_C_RING_AD_USED.UP_ODD",
"PerPkg": "1",
@@ -487,6 +538,7 @@
},
{
"BriefDescription": "AK Ring In Use; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_C_RING_AK_USED.ALL",
"PerPkg": "1",
@@ -496,6 +548,7 @@
},
{
"BriefDescription": "AK Ring In Use; Down",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_C_RING_AK_USED.DOWN",
"PerPkg": "1",
@@ -505,6 +558,7 @@
},
{
"BriefDescription": "AK Ring In Use; Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_C_RING_AK_USED.DOWN_EVEN",
"PerPkg": "1",
@@ -514,6 +568,7 @@
},
{
"BriefDescription": "AK Ring In Use; Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_C_RING_AK_USED.DOWN_ODD",
"PerPkg": "1",
@@ -523,6 +578,7 @@
},
{
"BriefDescription": "AK Ring In Use; Up",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_C_RING_AK_USED.UP",
"PerPkg": "1",
@@ -532,6 +588,7 @@
},
{
"BriefDescription": "AK Ring In Use; Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_C_RING_AK_USED.UP_EVEN",
"PerPkg": "1",
@@ -541,6 +598,7 @@
},
{
"BriefDescription": "AK Ring In Use; Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_C_RING_AK_USED.UP_ODD",
"PerPkg": "1",
@@ -550,6 +608,7 @@
},
{
"BriefDescription": "BL Ring in Use; Down",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_C_RING_BL_USED.ALL",
"PerPkg": "1",
@@ -559,6 +618,7 @@
},
{
"BriefDescription": "BL Ring in Use; Down",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_C_RING_BL_USED.DOWN",
"PerPkg": "1",
@@ -568,6 +628,7 @@
},
{
"BriefDescription": "BL Ring in Use; Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_C_RING_BL_USED.DOWN_EVEN",
"PerPkg": "1",
@@ -577,6 +638,7 @@
},
{
"BriefDescription": "BL Ring in Use; Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_C_RING_BL_USED.DOWN_ODD",
"PerPkg": "1",
@@ -586,6 +648,7 @@
},
{
"BriefDescription": "BL Ring in Use; Up",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_C_RING_BL_USED.UP",
"PerPkg": "1",
@@ -595,6 +658,7 @@
},
{
"BriefDescription": "BL Ring in Use; Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_C_RING_BL_USED.UP_EVEN",
"PerPkg": "1",
@@ -604,6 +668,7 @@
},
{
"BriefDescription": "BL Ring in Use; Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_C_RING_BL_USED.UP_ODD",
"PerPkg": "1",
@@ -613,6 +678,7 @@
},
{
"BriefDescription": "Number of LLC responses that bounced on the Ring.; AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_C_RING_BOUNCES.AD",
"PerPkg": "1",
@@ -621,6 +687,7 @@
},
{
"BriefDescription": "Number of LLC responses that bounced on the Ring.; AK",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_C_RING_BOUNCES.AK",
"PerPkg": "1",
@@ -629,6 +696,7 @@
},
{
"BriefDescription": "Number of LLC responses that bounced on the Ring.; BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_C_RING_BOUNCES.BL",
"PerPkg": "1",
@@ -637,6 +705,7 @@
},
{
"BriefDescription": "Number of LLC responses that bounced on the Ring.; Snoops of processor's cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_C_RING_BOUNCES.IV",
"PerPkg": "1",
@@ -645,6 +714,7 @@
},
{
"BriefDescription": "BL Ring in Use; Any",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_C_RING_IV_USED.ANY",
"PerPkg": "1",
@@ -654,6 +724,7 @@
},
{
"BriefDescription": "BL Ring in Use; Any",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_C_RING_IV_USED.DN",
"PerPkg": "1",
@@ -663,6 +734,7 @@
},
{
"BriefDescription": "BL Ring in Use; Down",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_C_RING_IV_USED.DOWN",
"PerPkg": "1",
@@ -672,6 +744,7 @@
},
{
"BriefDescription": "BL Ring in Use; Any",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_C_RING_IV_USED.UP",
"PerPkg": "1",
@@ -681,6 +754,7 @@
},
{
"BriefDescription": "AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_C_RING_SINK_STARVED.AD",
"PerPkg": "1",
@@ -689,6 +763,7 @@
},
{
"BriefDescription": "AK",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_C_RING_SINK_STARVED.AK",
"PerPkg": "1",
@@ -697,6 +772,7 @@
},
{
"BriefDescription": "BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_C_RING_SINK_STARVED.BL",
"PerPkg": "1",
@@ -705,6 +781,7 @@
},
{
"BriefDescription": "IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_C_RING_SINK_STARVED.IV",
"PerPkg": "1",
@@ -713,6 +790,7 @@
},
{
"BriefDescription": "Number of cycles the Cbo is actively throttling traffic onto the Ring in order to limit bounce traffic.",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_C_RING_SRC_THRTL",
"PerPkg": "1",
@@ -720,6 +798,7 @@
},
{
"BriefDescription": "Ingress Arbiter Blocking Cycles; IRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_C_RxR_EXT_STARVED.IPQ",
"PerPkg": "1",
@@ -729,6 +808,7 @@
},
{
"BriefDescription": "Ingress Arbiter Blocking Cycles; IPQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_C_RxR_EXT_STARVED.IRQ",
"PerPkg": "1",
@@ -738,6 +818,7 @@
},
{
"BriefDescription": "Ingress Arbiter Blocking Cycles; ISMQ_BID",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_C_RxR_EXT_STARVED.ISMQ_BIDS",
"PerPkg": "1",
@@ -747,6 +828,7 @@
},
{
"BriefDescription": "Ingress Arbiter Blocking Cycles; PRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_C_RxR_EXT_STARVED.PRQ",
"PerPkg": "1",
@@ -756,6 +838,7 @@
},
{
"BriefDescription": "Ingress Allocations; IPQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_C_RxR_INSERTS.IPQ",
"PerPkg": "1",
@@ -765,6 +848,7 @@
},
{
"BriefDescription": "Ingress Allocations; IRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_C_RxR_INSERTS.IRQ",
"PerPkg": "1",
@@ -774,6 +858,7 @@
},
{
"BriefDescription": "Ingress Allocations; IRQ Rejected",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_C_RxR_INSERTS.IRQ_REJ",
"PerPkg": "1",
@@ -783,6 +868,7 @@
},
{
"BriefDescription": "Ingress Allocations; PRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_C_RxR_INSERTS.PRQ",
"PerPkg": "1",
@@ -792,6 +878,7 @@
},
{
"BriefDescription": "Ingress Allocations; PRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_C_RxR_INSERTS.PRQ_REJ",
"PerPkg": "1",
@@ -801,6 +888,7 @@
},
{
"BriefDescription": "Ingress Internal Starvation Cycles; IPQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_C_RxR_INT_STARVED.IPQ",
"PerPkg": "1",
@@ -810,6 +898,7 @@
},
{
"BriefDescription": "Ingress Internal Starvation Cycles; IRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_C_RxR_INT_STARVED.IRQ",
"PerPkg": "1",
@@ -819,6 +908,7 @@
},
{
"BriefDescription": "Ingress Internal Starvation Cycles; ISMQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_C_RxR_INT_STARVED.ISMQ",
"PerPkg": "1",
@@ -828,6 +918,7 @@
},
{
"BriefDescription": "Ingress Internal Starvation Cycles; PRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_C_RxR_INT_STARVED.PRQ",
"PerPkg": "1",
@@ -837,6 +928,7 @@
},
{
"BriefDescription": "Probe Queue Retries; Address Conflict",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_C_RxR_IPQ_RETRY.ADDR_CONFLICT",
"PerPkg": "1",
@@ -846,6 +938,7 @@
},
{
"BriefDescription": "Probe Queue Retries; Any Reject",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_C_RxR_IPQ_RETRY.ANY",
"PerPkg": "1",
@@ -855,6 +948,7 @@
},
{
"BriefDescription": "Probe Queue Retries; No Egress Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_C_RxR_IPQ_RETRY.FULL",
"PerPkg": "1",
@@ -864,6 +958,7 @@
},
{
"BriefDescription": "Probe Queue Retries; No QPI Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_C_RxR_IPQ_RETRY.QPI_CREDITS",
"PerPkg": "1",
@@ -873,6 +968,7 @@
},
{
"BriefDescription": "Probe Queue Retries; No AD Sbo Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_C_RxR_IPQ_RETRY2.AD_SBO",
"PerPkg": "1",
@@ -882,6 +978,7 @@
},
{
"BriefDescription": "Probe Queue Retries; Target Node Filter",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_C_RxR_IPQ_RETRY2.TARGET",
"PerPkg": "1",
@@ -891,6 +988,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects; Address Conflict",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_C_RxR_IRQ_RETRY.ADDR_CONFLICT",
"PerPkg": "1",
@@ -900,6 +998,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects; Any Reject",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_C_RxR_IRQ_RETRY.ANY",
"PerPkg": "1",
@@ -909,6 +1008,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects; No Egress Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_C_RxR_IRQ_RETRY.FULL",
"PerPkg": "1",
@@ -918,6 +1018,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects; No IIO Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_C_RxR_IRQ_RETRY.IIO_CREDITS",
"PerPkg": "1",
@@ -927,6 +1028,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_C_RxR_IRQ_RETRY.NID",
"PerPkg": "1",
@@ -936,6 +1038,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects; No QPI Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_C_RxR_IRQ_RETRY.QPI_CREDITS",
"PerPkg": "1",
@@ -945,6 +1048,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects; No RTIDs",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_C_RxR_IRQ_RETRY.RTID",
"PerPkg": "1",
@@ -954,6 +1058,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects; No AD Sbo Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_C_RxR_IRQ_RETRY2.AD_SBO",
"PerPkg": "1",
@@ -963,6 +1068,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects; No BL Sbo Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_C_RxR_IRQ_RETRY2.BL_SBO",
"PerPkg": "1",
@@ -972,6 +1078,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects; Target Node Filter",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_C_RxR_IRQ_RETRY2.TARGET",
"PerPkg": "1",
@@ -981,6 +1088,7 @@
},
{
"BriefDescription": "ISMQ Retries; Any Reject",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_C_RxR_ISMQ_RETRY.ANY",
"PerPkg": "1",
@@ -990,6 +1098,7 @@
},
{
"BriefDescription": "ISMQ Retries; No Egress Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_C_RxR_ISMQ_RETRY.FULL",
"PerPkg": "1",
@@ -999,6 +1108,7 @@
},
{
"BriefDescription": "ISMQ Retries; No IIO Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_C_RxR_ISMQ_RETRY.IIO_CREDITS",
"PerPkg": "1",
@@ -1008,6 +1118,7 @@
},
{
"BriefDescription": "ISMQ Retries",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_C_RxR_ISMQ_RETRY.NID",
"PerPkg": "1",
@@ -1017,6 +1128,7 @@
},
{
"BriefDescription": "ISMQ Retries; No QPI Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_C_RxR_ISMQ_RETRY.QPI_CREDITS",
"PerPkg": "1",
@@ -1026,6 +1138,7 @@
},
{
"BriefDescription": "ISMQ Retries; No RTIDs",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_C_RxR_ISMQ_RETRY.RTID",
"PerPkg": "1",
@@ -1035,6 +1148,7 @@
},
{
"BriefDescription": "ISMQ Retries",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_C_RxR_ISMQ_RETRY.WB_CREDITS",
"PerPkg": "1",
@@ -1044,6 +1158,7 @@
},
{
"BriefDescription": "ISMQ Request Queue Rejects; No AD Sbo Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_C_RxR_ISMQ_RETRY2.AD_SBO",
"PerPkg": "1",
@@ -1053,6 +1168,7 @@
},
{
"BriefDescription": "ISMQ Request Queue Rejects; No BL Sbo Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_C_RxR_ISMQ_RETRY2.BL_SBO",
"PerPkg": "1",
@@ -1062,6 +1178,7 @@
},
{
"BriefDescription": "ISMQ Request Queue Rejects; Target Node Filter",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_C_RxR_ISMQ_RETRY2.TARGET",
"PerPkg": "1",
@@ -1071,6 +1188,7 @@
},
{
"BriefDescription": "Ingress Occupancy; IPQ",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_C_RxR_OCCUPANCY.IPQ",
"PerPkg": "1",
@@ -1080,6 +1198,7 @@
},
{
"BriefDescription": "Ingress Occupancy; IRQ",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_C_RxR_OCCUPANCY.IRQ",
"PerPkg": "1",
@@ -1089,6 +1208,7 @@
},
{
"BriefDescription": "Ingress Occupancy; IRQ Rejected",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_C_RxR_OCCUPANCY.IRQ_REJ",
"PerPkg": "1",
@@ -1098,6 +1218,7 @@
},
{
"BriefDescription": "Ingress Occupancy; PRQ Rejects",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_C_RxR_OCCUPANCY.PRQ_REJ",
"PerPkg": "1",
@@ -1107,6 +1228,7 @@
},
{
"BriefDescription": "SBo Credits Acquired; For AD Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x3D",
"EventName": "UNC_C_SBO_CREDITS_ACQUIRED.AD",
"PerPkg": "1",
@@ -1116,6 +1238,7 @@
},
{
"BriefDescription": "SBo Credits Acquired; For BL Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x3D",
"EventName": "UNC_C_SBO_CREDITS_ACQUIRED.BL",
"PerPkg": "1",
@@ -1125,6 +1248,7 @@
},
{
"BriefDescription": "SBo Credits Occupancy; For AD Ring",
+ "Counter": "0",
"EventCode": "0x3E",
"EventName": "UNC_C_SBO_CREDIT_OCCUPANCY.AD",
"PerPkg": "1",
@@ -1134,6 +1258,7 @@
},
{
"BriefDescription": "SBo Credits Occupancy; For BL Ring",
+ "Counter": "0",
"EventCode": "0x3E",
"EventName": "UNC_C_SBO_CREDIT_OCCUPANCY.BL",
"PerPkg": "1",
@@ -1143,6 +1268,7 @@
},
{
"BriefDescription": "TOR Inserts; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.ALL",
"PerPkg": "1",
@@ -1152,6 +1278,7 @@
},
{
"BriefDescription": "TOR Inserts; Evictions",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.EVICTION",
"PerPkg": "1",
@@ -1161,6 +1288,7 @@
},
{
"BriefDescription": "TOR Inserts; Local Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.LOCAL",
"PerPkg": "1",
@@ -1170,6 +1298,7 @@
},
{
"BriefDescription": "TOR Inserts; Local Memory - Opcode Matched",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.LOCAL_OPCODE",
"PerPkg": "1",
@@ -1179,6 +1308,7 @@
},
{
"BriefDescription": "TOR Inserts; Misses to Local Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.MISS_LOCAL",
"PerPkg": "1",
@@ -1188,6 +1318,7 @@
},
{
"BriefDescription": "TOR Inserts; Misses to Local Memory - Opcode Matched",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.MISS_LOCAL_OPCODE",
"PerPkg": "1",
@@ -1197,6 +1328,7 @@
},
{
"BriefDescription": "TOR Inserts; Miss Opcode Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.MISS_OPCODE",
"PerPkg": "1",
@@ -1206,6 +1338,7 @@
},
{
"BriefDescription": "TOR Inserts; Misses to Remote Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.MISS_REMOTE",
"PerPkg": "1",
@@ -1215,6 +1348,7 @@
},
{
"BriefDescription": "TOR Inserts; Misses to Remote Memory - Opcode Matched",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.MISS_REMOTE_OPCODE",
"PerPkg": "1",
@@ -1224,6 +1358,7 @@
},
{
"BriefDescription": "TOR Inserts; NID Matched",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.NID_ALL",
"PerPkg": "1",
@@ -1233,6 +1368,7 @@
},
{
"BriefDescription": "TOR Inserts; NID Matched Evictions",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.NID_EVICTION",
"PerPkg": "1",
@@ -1242,6 +1378,7 @@
},
{
"BriefDescription": "TOR Inserts; NID Matched Miss All",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.NID_MISS_ALL",
"PerPkg": "1",
@@ -1251,6 +1388,7 @@
},
{
"BriefDescription": "TOR Inserts; NID and Opcode Matched Miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.NID_MISS_OPCODE",
"PerPkg": "1",
@@ -1260,6 +1398,7 @@
},
{
"BriefDescription": "TOR Inserts; NID and Opcode Matched",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.NID_OPCODE",
"PerPkg": "1",
@@ -1269,6 +1408,7 @@
},
{
"BriefDescription": "TOR Inserts; NID Matched Writebacks",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.NID_WB",
"PerPkg": "1",
@@ -1278,6 +1418,7 @@
},
{
"BriefDescription": "TOR Inserts; Opcode Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.OPCODE",
"PerPkg": "1",
@@ -1287,6 +1428,7 @@
},
{
"BriefDescription": "TOR Inserts; Remote Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.REMOTE",
"PerPkg": "1",
@@ -1296,6 +1438,7 @@
},
{
"BriefDescription": "TOR Inserts; Remote Memory - Opcode Matched",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.REMOTE_OPCODE",
"PerPkg": "1",
@@ -1305,6 +1448,7 @@
},
{
"BriefDescription": "TOR Inserts; Writebacks",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.WB",
"PerPkg": "1",
@@ -1314,6 +1458,7 @@
},
{
"BriefDescription": "TOR Occupancy; Any",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.ALL",
"PerPkg": "1",
@@ -1323,6 +1468,7 @@
},
{
"BriefDescription": "TOR Occupancy; Evictions",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.EVICTION",
"PerPkg": "1",
@@ -1332,6 +1478,7 @@
},
{
"BriefDescription": "Occupancy counter for LLC data reads (demand and L2 prefetch). Derived from unc_c_tor_occupancy.miss_opcode",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.LLC_DATA_READ",
"Filter": "filter_opc=0x182",
@@ -1342,6 +1489,7 @@
},
{
"BriefDescription": "TOR Occupancy",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.LOCAL",
"PerPkg": "1",
@@ -1351,6 +1499,7 @@
},
{
"BriefDescription": "TOR Occupancy; Local Memory - Opcode Matched",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.LOCAL_OPCODE",
"PerPkg": "1",
@@ -1360,6 +1509,7 @@
},
{
"BriefDescription": "TOR Occupancy; Miss All",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.MISS_ALL",
"PerPkg": "1",
@@ -1369,6 +1519,7 @@
},
{
"BriefDescription": "TOR Occupancy",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.MISS_LOCAL",
"PerPkg": "1",
@@ -1378,6 +1529,7 @@
},
{
"BriefDescription": "TOR Occupancy; Misses to Local Memory - Opcode Matched",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.MISS_LOCAL_OPCODE",
"PerPkg": "1",
@@ -1387,6 +1539,7 @@
},
{
"BriefDescription": "TOR Occupancy; Miss Opcode Match",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.MISS_OPCODE",
"PerPkg": "1",
@@ -1396,6 +1549,7 @@
},
{
"BriefDescription": "TOR Occupancy",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.MISS_REMOTE",
"PerPkg": "1",
@@ -1405,6 +1559,7 @@
},
{
"BriefDescription": "TOR Occupancy; Misses to Remote Memory - Opcode Matched",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.MISS_REMOTE_OPCODE",
"PerPkg": "1",
@@ -1414,6 +1569,7 @@
},
{
"BriefDescription": "TOR Occupancy; NID Matched",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.NID_ALL",
"PerPkg": "1",
@@ -1423,6 +1579,7 @@
},
{
"BriefDescription": "TOR Occupancy; NID Matched Evictions",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.NID_EVICTION",
"PerPkg": "1",
@@ -1432,6 +1589,7 @@
},
{
"BriefDescription": "TOR Occupancy; NID Matched",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.NID_MISS_ALL",
"PerPkg": "1",
@@ -1441,6 +1599,7 @@
},
{
"BriefDescription": "TOR Occupancy; NID and Opcode Matched Miss",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.NID_MISS_OPCODE",
"PerPkg": "1",
@@ -1450,6 +1609,7 @@
},
{
"BriefDescription": "TOR Occupancy; NID and Opcode Matched",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.NID_OPCODE",
"PerPkg": "1",
@@ -1459,6 +1619,7 @@
},
{
"BriefDescription": "TOR Occupancy; NID Matched Writebacks",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.NID_WB",
"PerPkg": "1",
@@ -1468,6 +1629,7 @@
},
{
"BriefDescription": "TOR Occupancy; Opcode Match",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.OPCODE",
"PerPkg": "1",
@@ -1477,6 +1639,7 @@
},
{
"BriefDescription": "TOR Occupancy",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.REMOTE",
"PerPkg": "1",
@@ -1486,6 +1649,7 @@
},
{
"BriefDescription": "TOR Occupancy; Remote Memory - Opcode Matched",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.REMOTE_OPCODE",
"PerPkg": "1",
@@ -1495,6 +1659,7 @@
},
{
"BriefDescription": "TOR Occupancy; Writebacks",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.WB",
"PerPkg": "1",
@@ -1504,6 +1669,7 @@
},
{
"BriefDescription": "Onto AD Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_C_TxR_ADS_USED.AD",
"PerPkg": "1",
@@ -1512,6 +1678,7 @@
},
{
"BriefDescription": "Onto AK Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_C_TxR_ADS_USED.AK",
"PerPkg": "1",
@@ -1520,6 +1687,7 @@
},
{
"BriefDescription": "Onto BL Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_C_TxR_ADS_USED.BL",
"PerPkg": "1",
@@ -1528,6 +1696,7 @@
},
{
"BriefDescription": "Egress Allocations; AD - Cachebo",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_C_TxR_INSERTS.AD_CACHE",
"PerPkg": "1",
@@ -1537,6 +1706,7 @@
},
{
"BriefDescription": "Egress Allocations; AD - Corebo",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_C_TxR_INSERTS.AD_CORE",
"PerPkg": "1",
@@ -1546,6 +1716,7 @@
},
{
"BriefDescription": "Egress Allocations; AK - Cachebo",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_C_TxR_INSERTS.AK_CACHE",
"PerPkg": "1",
@@ -1555,6 +1726,7 @@
},
{
"BriefDescription": "Egress Allocations; AK - Corebo",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_C_TxR_INSERTS.AK_CORE",
"PerPkg": "1",
@@ -1564,6 +1736,7 @@
},
{
"BriefDescription": "Egress Allocations; BL - Cacheno",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_C_TxR_INSERTS.BL_CACHE",
"PerPkg": "1",
@@ -1573,6 +1746,7 @@
},
{
"BriefDescription": "Egress Allocations; BL - Corebo",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_C_TxR_INSERTS.BL_CORE",
"PerPkg": "1",
@@ -1582,6 +1756,7 @@
},
{
"BriefDescription": "Egress Allocations; IV - Cachebo",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_C_TxR_INSERTS.IV_CACHE",
"PerPkg": "1",
@@ -1591,6 +1766,7 @@
},
{
"BriefDescription": "Injection Starvation; Onto AD Ring (to core)",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_C_TxR_STARVED.AD_CORE",
"PerPkg": "1",
@@ -1600,6 +1776,7 @@
},
{
"BriefDescription": "Injection Starvation; Onto AK Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_C_TxR_STARVED.AK_BOTH",
"PerPkg": "1",
@@ -1609,6 +1786,7 @@
},
{
"BriefDescription": "Injection Starvation; Onto BL Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_C_TxR_STARVED.BL_BOTH",
"PerPkg": "1",
@@ -1618,6 +1796,7 @@
},
{
"BriefDescription": "Injection Starvation; Onto IV Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_C_TxR_STARVED.IV",
"PerPkg": "1",
@@ -1627,6 +1806,7 @@
},
{
"BriefDescription": "BT Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_H_BT_CYCLES_NE",
"PerPkg": "1",
@@ -1635,6 +1815,7 @@
},
{
"BriefDescription": "BT to HT Not Issued; Incoming Data Hazard",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.INCOMING_BL_HAZARD",
"PerPkg": "1",
@@ -1644,6 +1825,7 @@
},
{
"BriefDescription": "BT to HT Not Issued; Incoming Snoop Hazard",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.INCOMING_SNP_HAZARD",
"PerPkg": "1",
@@ -1653,6 +1835,7 @@
},
{
"BriefDescription": "BT to HT Not Issued; Incoming Data Hazard",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.RSPACKCFLT_HAZARD",
"PerPkg": "1",
@@ -1662,6 +1845,7 @@
},
{
"BriefDescription": "BT to HT Not Issued; Incoming Data Hazard",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.WBMDATA_HAZARD",
"PerPkg": "1",
@@ -1671,6 +1855,7 @@
},
{
"BriefDescription": "HA to iMC Bypass; Not Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_H_BYPASS_IMC.NOT_TAKEN",
"PerPkg": "1",
@@ -1680,6 +1865,7 @@
},
{
"BriefDescription": "HA to iMC Bypass; Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_H_BYPASS_IMC.TAKEN",
"PerPkg": "1",
@@ -1689,6 +1875,7 @@
},
{
"BriefDescription": "uclks",
+ "Counter": "0,1,2,3",
"EventName": "UNC_H_CLOCKTICKS",
"PerPkg": "1",
"PublicDescription": "Counts the number of uclks in the HA. This will be slightly different than the count in the Ubox because of enable/freeze delays. The HA is on the other side of the die from the fixed Ubox uclk counter, so the drift could be somewhat larger than in units that are closer like the QPI Agent.",
@@ -1696,6 +1883,7 @@
},
{
"BriefDescription": "Direct2Core Messages Sent",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_H_DIRECT2CORE_COUNT",
"PerPkg": "1",
@@ -1704,6 +1892,7 @@
},
{
"BriefDescription": "Cycles when Direct2Core was Disabled",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_H_DIRECT2CORE_CYCLES_DISABLED",
"PerPkg": "1",
@@ -1712,6 +1901,7 @@
},
{
"BriefDescription": "Number of Reads that had Direct2Core Overridden",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_H_DIRECT2CORE_TXN_OVERRIDE",
"PerPkg": "1",
@@ -1720,6 +1910,7 @@
},
{
"BriefDescription": "Directory Lat Opt Return",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_H_DIRECTORY_LAT_OPT",
"PerPkg": "1",
@@ -1728,6 +1919,7 @@
},
{
"BriefDescription": "Directory Lookups; Snoop Not Needed",
+ "Counter": "0,1,2,3",
"EventCode": "0xC",
"EventName": "UNC_H_DIRECTORY_LOOKUP.NO_SNP",
"PerPkg": "1",
@@ -1737,6 +1929,7 @@
},
{
"BriefDescription": "Directory Lookups; Snoop Needed",
+ "Counter": "0,1,2,3",
"EventCode": "0xC",
"EventName": "UNC_H_DIRECTORY_LOOKUP.SNP",
"PerPkg": "1",
@@ -1746,6 +1939,7 @@
},
{
"BriefDescription": "Directory Updates; Any Directory Update",
+ "Counter": "0,1,2,3",
"EventCode": "0xD",
"EventName": "UNC_H_DIRECTORY_UPDATE.ANY",
"PerPkg": "1",
@@ -1755,6 +1949,7 @@
},
{
"BriefDescription": "Directory Updates; Directory Clear",
+ "Counter": "0,1,2,3",
"EventCode": "0xD",
"EventName": "UNC_H_DIRECTORY_UPDATE.CLEAR",
"PerPkg": "1",
@@ -1764,6 +1959,7 @@
},
{
"BriefDescription": "Directory Updates; Directory Set",
+ "Counter": "0,1,2,3",
"EventCode": "0xD",
"EventName": "UNC_H_DIRECTORY_UPDATE.SET",
"PerPkg": "1",
@@ -1773,6 +1969,7 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; op is AckCnfltWbI",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_H_HITME_HIT.ACKCNFLTWBI",
"PerPkg": "1",
@@ -1781,6 +1978,7 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; All Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_H_HITME_HIT.ALL",
"PerPkg": "1",
@@ -1789,6 +1987,7 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_H_HITME_HIT.ALLOCS",
"PerPkg": "1",
@@ -1797,6 +1996,7 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_H_HITME_HIT.EVICTS",
"PerPkg": "1",
@@ -1805,6 +2005,7 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; HOM Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_H_HITME_HIT.HOM",
"PerPkg": "1",
@@ -1813,6 +2014,7 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; Invalidations",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_H_HITME_HIT.INVALS",
"PerPkg": "1",
@@ -1821,6 +2023,7 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; op is RdCode, RdData, RdDataMigratory, RdInvOwn, RdCur or InvItoE",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_H_HITME_HIT.READ_OR_INVITOE",
"PerPkg": "1",
@@ -1829,6 +2032,7 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; op is RspI, RspIWb, RspS, RspSWb, RspCnflt or RspCnfltWbI",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_H_HITME_HIT.RSP",
"PerPkg": "1",
@@ -1837,6 +2041,7 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; op is RspIFwd or RspIFwdWb for a local request",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_H_HITME_HIT.RSPFWDI_LOCAL",
"PerPkg": "1",
@@ -1845,6 +2050,7 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; op is RspIFwd or RspIFwdWb for a remote request",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_H_HITME_HIT.RSPFWDI_REMOTE",
"PerPkg": "1",
@@ -1853,6 +2059,7 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; op is RsSFwd or RspSFwdWb",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_H_HITME_HIT.RSPFWDS",
"PerPkg": "1",
@@ -1861,6 +2068,7 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; op is WbMtoE or WbMtoS",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_H_HITME_HIT.WBMTOE_OR_S",
"PerPkg": "1",
@@ -1869,6 +2077,7 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; op is WbMtoI",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_H_HITME_HIT.WBMTOI",
"PerPkg": "1",
@@ -1877,6 +2086,7 @@
},
{
"BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is AckCnfltWbI",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_H_HITME_HIT_PV_BITS_SET.ACKCNFLTWBI",
"PerPkg": "1",
@@ -1885,6 +2095,7 @@
},
{
"BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; All Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_H_HITME_HIT_PV_BITS_SET.ALL",
"PerPkg": "1",
@@ -1893,6 +2104,7 @@
},
{
"BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; HOM Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_H_HITME_HIT_PV_BITS_SET.HOM",
"PerPkg": "1",
@@ -1901,6 +2113,7 @@
},
{
"BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RdCode, RdData, RdDataMigratory, RdInvOwn, RdCur or InvItoE",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_H_HITME_HIT_PV_BITS_SET.READ_OR_INVITOE",
"PerPkg": "1",
@@ -1909,6 +2122,7 @@
},
{
"BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RspI, RspIWb, RspS, RspSWb, RspCnflt or RspCnfltWbI",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_H_HITME_HIT_PV_BITS_SET.RSP",
"PerPkg": "1",
@@ -1917,6 +2131,7 @@
},
{
"BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RspIFwd or RspIFwdWb for a local request",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_H_HITME_HIT_PV_BITS_SET.RSPFWDI_LOCAL",
"PerPkg": "1",
@@ -1925,6 +2140,7 @@
},
{
"BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RspIFwd or RspIFwdWb for a remote request",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_H_HITME_HIT_PV_BITS_SET.RSPFWDI_REMOTE",
"PerPkg": "1",
@@ -1933,6 +2149,7 @@
},
{
"BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RsSFwd or RspSFwdWb",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_H_HITME_HIT_PV_BITS_SET.RSPFWDS",
"PerPkg": "1",
@@ -1941,6 +2158,7 @@
},
{
"BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is WbMtoE or WbMtoS",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_H_HITME_HIT_PV_BITS_SET.WBMTOE_OR_S",
"PerPkg": "1",
@@ -1949,6 +2167,7 @@
},
{
"BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is WbMtoI",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_H_HITME_HIT_PV_BITS_SET.WBMTOI",
"PerPkg": "1",
@@ -1957,6 +2176,7 @@
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed; op is AckCnfltWbI",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_H_HITME_LOOKUP.ACKCNFLTWBI",
"PerPkg": "1",
@@ -1965,6 +2185,7 @@
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed; All Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_H_HITME_LOOKUP.ALL",
"PerPkg": "1",
@@ -1973,6 +2194,7 @@
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed; Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_H_HITME_LOOKUP.ALLOCS",
"PerPkg": "1",
@@ -1981,6 +2203,7 @@
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed; HOM Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_H_HITME_LOOKUP.HOM",
"PerPkg": "1",
@@ -1989,6 +2212,7 @@
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed; Invalidations",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_H_HITME_LOOKUP.INVALS",
"PerPkg": "1",
@@ -1997,6 +2221,7 @@
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RdCode, RdData, RdDataMigratory, RdInvOwn, RdCur or InvItoE",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_H_HITME_LOOKUP.READ_OR_INVITOE",
"PerPkg": "1",
@@ -2005,6 +2230,7 @@
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RspI, RspIWb, RspS, RspSWb, RspCnflt or RspCnfltWbI",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_H_HITME_LOOKUP.RSP",
"PerPkg": "1",
@@ -2013,6 +2239,7 @@
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RspIFwd or RspIFwdWb for a local request",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_H_HITME_LOOKUP.RSPFWDI_LOCAL",
"PerPkg": "1",
@@ -2021,6 +2248,7 @@
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RspIFwd or RspIFwdWb for a remote request",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_H_HITME_LOOKUP.RSPFWDI_REMOTE",
"PerPkg": "1",
@@ -2029,6 +2257,7 @@
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RsSFwd or RspSFwdWb",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_H_HITME_LOOKUP.RSPFWDS",
"PerPkg": "1",
@@ -2037,6 +2266,7 @@
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed; op is WbMtoE or WbMtoS",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_H_HITME_LOOKUP.WBMTOE_OR_S",
"PerPkg": "1",
@@ -2045,6 +2275,7 @@
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed; op is WbMtoI",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_H_HITME_LOOKUP.WBMTOI",
"PerPkg": "1",
@@ -2053,6 +2284,7 @@
},
{
"BriefDescription": "Cycles without QPI Ingress Credits; AD to QPI Link 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI0",
"PerPkg": "1",
@@ -2062,6 +2294,7 @@
},
{
"BriefDescription": "Cycles without QPI Ingress Credits; AD to QPI Link 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI1",
"PerPkg": "1",
@@ -2071,6 +2304,7 @@
},
{
"BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI2",
"PerPkg": "1",
@@ -2080,6 +2314,7 @@
},
{
"BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI0",
"PerPkg": "1",
@@ -2089,6 +2324,7 @@
},
{
"BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI1",
"PerPkg": "1",
@@ -2098,6 +2334,7 @@
},
{
"BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI2",
"PerPkg": "1",
@@ -2107,6 +2344,7 @@
},
{
"BriefDescription": "HA to iMC Normal Priority Reads Issued; Normal Priority",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_H_IMC_READS.NORMAL",
"PerPkg": "1",
@@ -2116,6 +2354,7 @@
},
{
"BriefDescription": "Retry Events",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_H_IMC_RETRY",
"PerPkg": "1",
@@ -2123,6 +2362,7 @@
},
{
"BriefDescription": "HA to iMC Full Line Writes Issued; All Writes",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_H_IMC_WRITES.ALL",
"PerPkg": "1",
@@ -2132,6 +2372,7 @@
},
{
"BriefDescription": "HA to iMC Full Line Writes Issued; Full Line Non-ISOCH",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_H_IMC_WRITES.FULL",
"PerPkg": "1",
@@ -2141,6 +2382,7 @@
},
{
"BriefDescription": "HA to iMC Full Line Writes Issued; ISOCH Full Line",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_H_IMC_WRITES.FULL_ISOCH",
"PerPkg": "1",
@@ -2150,6 +2392,7 @@
},
{
"BriefDescription": "HA to iMC Full Line Writes Issued; Partial Non-ISOCH",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_H_IMC_WRITES.PARTIAL",
"PerPkg": "1",
@@ -2159,6 +2402,7 @@
},
{
"BriefDescription": "HA to iMC Full Line Writes Issued; ISOCH Partial",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_H_IMC_WRITES.PARTIAL_ISOCH",
"PerPkg": "1",
@@ -2168,6 +2412,7 @@
},
{
"BriefDescription": "IOT Backpressure",
+ "Counter": "0,1,2",
"EventCode": "0x61",
"EventName": "UNC_H_IOT_BACKPRESSURE.HUB",
"PerPkg": "1",
@@ -2176,6 +2421,7 @@
},
{
"BriefDescription": "IOT Backpressure",
+ "Counter": "0,1,2",
"EventCode": "0x61",
"EventName": "UNC_H_IOT_BACKPRESSURE.SAT",
"PerPkg": "1",
@@ -2184,6 +2430,7 @@
},
{
"BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "Counter": "0,1,2",
"EventCode": "0x64",
"EventName": "UNC_H_IOT_CTS_EAST_LO.CTS0",
"PerPkg": "1",
@@ -2193,6 +2440,7 @@
},
{
"BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "Counter": "0,1,2",
"EventCode": "0x64",
"EventName": "UNC_H_IOT_CTS_EAST_LO.CTS1",
"PerPkg": "1",
@@ -2202,6 +2450,7 @@
},
{
"BriefDescription": "IOT Common Trigger Sequencer - Hi",
+ "Counter": "0,1,2",
"EventCode": "0x65",
"EventName": "UNC_H_IOT_CTS_HI.CTS2",
"PerPkg": "1",
@@ -2211,6 +2460,7 @@
},
{
"BriefDescription": "IOT Common Trigger Sequencer - Hi",
+ "Counter": "0,1,2",
"EventCode": "0x65",
"EventName": "UNC_H_IOT_CTS_HI.CTS3",
"PerPkg": "1",
@@ -2220,6 +2470,7 @@
},
{
"BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "Counter": "0,1,2",
"EventCode": "0x62",
"EventName": "UNC_H_IOT_CTS_WEST_LO.CTS0",
"PerPkg": "1",
@@ -2229,6 +2480,7 @@
},
{
"BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "Counter": "0,1,2",
"EventCode": "0x62",
"EventName": "UNC_H_IOT_CTS_WEST_LO.CTS1",
"PerPkg": "1",
@@ -2238,6 +2490,7 @@
},
{
"BriefDescription": "OSB Snoop Broadcast; Cancelled",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_H_OSB.CANCELLED",
"PerPkg": "1",
@@ -2247,6 +2500,7 @@
},
{
"BriefDescription": "OSB Snoop Broadcast; Local InvItoE",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_H_OSB.INVITOE_LOCAL",
"PerPkg": "1",
@@ -2256,6 +2510,7 @@
},
{
"BriefDescription": "OSB Snoop Broadcast; Local Reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_H_OSB.READS_LOCAL",
"PerPkg": "1",
@@ -2265,6 +2520,7 @@
},
{
"BriefDescription": "OSB Snoop Broadcast; Reads Local - Useful",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_H_OSB.READS_LOCAL_USEFUL",
"PerPkg": "1",
@@ -2274,6 +2530,7 @@
},
{
"BriefDescription": "OSB Snoop Broadcast; Remote",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_H_OSB.REMOTE",
"PerPkg": "1",
@@ -2283,6 +2540,7 @@
},
{
"BriefDescription": "OSB Snoop Broadcast; Remote - Useful",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_H_OSB.REMOTE_USEFUL",
"PerPkg": "1",
@@ -2292,6 +2550,7 @@
},
{
"BriefDescription": "OSB Early Data Return; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_H_OSB_EDR.ALL",
"PerPkg": "1",
@@ -2301,6 +2560,7 @@
},
{
"BriefDescription": "OSB Early Data Return; Reads to Local I",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_H_OSB_EDR.READS_LOCAL_I",
"PerPkg": "1",
@@ -2310,6 +2570,7 @@
},
{
"BriefDescription": "OSB Early Data Return; Reads to Local S",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_H_OSB_EDR.READS_LOCAL_S",
"PerPkg": "1",
@@ -2319,6 +2580,7 @@
},
{
"BriefDescription": "OSB Early Data Return; Reads to Remote I",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_H_OSB_EDR.READS_REMOTE_I",
"PerPkg": "1",
@@ -2328,6 +2590,7 @@
},
{
"BriefDescription": "OSB Early Data Return; Reads to Remote S",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_H_OSB_EDR.READS_REMOTE_S",
"PerPkg": "1",
@@ -2337,6 +2600,7 @@
},
{
"BriefDescription": "Read and Write Requests; Local InvItoEs",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.INVITOE_LOCAL",
"PerPkg": "1",
@@ -2346,6 +2610,7 @@
},
{
"BriefDescription": "Read and Write Requests; Remote InvItoEs",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.INVITOE_REMOTE",
"PerPkg": "1",
@@ -2355,6 +2620,7 @@
},
{
"BriefDescription": "Read and Write Requests; Reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.READS",
"PerPkg": "1",
@@ -2364,6 +2630,7 @@
},
{
"BriefDescription": "Read and Write Requests; Local Reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.READS_LOCAL",
"PerPkg": "1",
@@ -2373,6 +2640,7 @@
},
{
"BriefDescription": "Read and Write Requests; Remote Reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.READS_REMOTE",
"PerPkg": "1",
@@ -2382,6 +2650,7 @@
},
{
"BriefDescription": "Read and Write Requests; Writes",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.WRITES",
"PerPkg": "1",
@@ -2391,6 +2660,7 @@
},
{
"BriefDescription": "Read and Write Requests; Local Writes",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.WRITES_LOCAL",
"PerPkg": "1",
@@ -2400,6 +2670,7 @@
},
{
"BriefDescription": "Read and Write Requests; Remote Writes",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.WRITES_REMOTE",
"PerPkg": "1",
@@ -2409,6 +2680,7 @@
},
{
"BriefDescription": "HA AD Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x3E",
"EventName": "UNC_H_RING_AD_USED.CCW",
"PerPkg": "1",
@@ -2418,6 +2690,7 @@
},
{
"BriefDescription": "HA AD Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x3E",
"EventName": "UNC_H_RING_AD_USED.CCW_EVEN",
"PerPkg": "1",
@@ -2427,6 +2700,7 @@
},
{
"BriefDescription": "HA AD Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x3E",
"EventName": "UNC_H_RING_AD_USED.CCW_ODD",
"PerPkg": "1",
@@ -2436,6 +2710,7 @@
},
{
"BriefDescription": "HA AD Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x3E",
"EventName": "UNC_H_RING_AD_USED.CW",
"PerPkg": "1",
@@ -2445,6 +2720,7 @@
},
{
"BriefDescription": "HA AD Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x3E",
"EventName": "UNC_H_RING_AD_USED.CW_EVEN",
"PerPkg": "1",
@@ -2454,6 +2730,7 @@
},
{
"BriefDescription": "HA AD Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x3E",
"EventName": "UNC_H_RING_AD_USED.CW_ODD",
"PerPkg": "1",
@@ -2463,6 +2740,7 @@
},
{
"BriefDescription": "HA AK Ring in Use; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x3F",
"EventName": "UNC_H_RING_AK_USED.ALL",
"PerPkg": "1",
@@ -2472,6 +2750,7 @@
},
{
"BriefDescription": "HA AK Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x3F",
"EventName": "UNC_H_RING_AK_USED.CCW",
"PerPkg": "1",
@@ -2481,6 +2760,7 @@
},
{
"BriefDescription": "HA AK Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x3F",
"EventName": "UNC_H_RING_AK_USED.CCW_EVEN",
"PerPkg": "1",
@@ -2490,6 +2770,7 @@
},
{
"BriefDescription": "HA AK Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x3F",
"EventName": "UNC_H_RING_AK_USED.CCW_ODD",
"PerPkg": "1",
@@ -2499,6 +2780,7 @@
},
{
"BriefDescription": "HA AK Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x3F",
"EventName": "UNC_H_RING_AK_USED.CW",
"PerPkg": "1",
@@ -2508,6 +2790,7 @@
},
{
"BriefDescription": "HA AK Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x3F",
"EventName": "UNC_H_RING_AK_USED.CW_EVEN",
"PerPkg": "1",
@@ -2517,6 +2800,7 @@
},
{
"BriefDescription": "HA AK Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x3F",
"EventName": "UNC_H_RING_AK_USED.CW_ODD",
"PerPkg": "1",
@@ -2526,6 +2810,7 @@
},
{
"BriefDescription": "HA BL Ring in Use; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_H_RING_BL_USED.ALL",
"PerPkg": "1",
@@ -2535,6 +2820,7 @@
},
{
"BriefDescription": "HA BL Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_H_RING_BL_USED.CCW",
"PerPkg": "1",
@@ -2544,6 +2830,7 @@
},
{
"BriefDescription": "HA BL Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_H_RING_BL_USED.CCW_EVEN",
"PerPkg": "1",
@@ -2553,6 +2840,7 @@
},
{
"BriefDescription": "HA BL Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_H_RING_BL_USED.CCW_ODD",
"PerPkg": "1",
@@ -2562,6 +2850,7 @@
},
{
"BriefDescription": "HA BL Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_H_RING_BL_USED.CW",
"PerPkg": "1",
@@ -2571,6 +2860,7 @@
},
{
"BriefDescription": "HA BL Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_H_RING_BL_USED.CW_EVEN",
"PerPkg": "1",
@@ -2580,6 +2870,7 @@
},
{
"BriefDescription": "HA BL Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_H_RING_BL_USED.CW_ODD",
"PerPkg": "1",
@@ -2589,6 +2880,7 @@
},
{
"BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN0",
"PerPkg": "1",
@@ -2598,6 +2890,7 @@
},
{
"BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN1",
"PerPkg": "1",
@@ -2607,6 +2900,7 @@
},
{
"BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN2",
"PerPkg": "1",
@@ -2616,6 +2910,7 @@
},
{
"BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN3",
"PerPkg": "1",
@@ -2625,6 +2920,7 @@
},
{
"BriefDescription": "iMC RPQ Credits Empty - Special; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN0",
"PerPkg": "1",
@@ -2634,6 +2930,7 @@
},
{
"BriefDescription": "iMC RPQ Credits Empty - Special; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN1",
"PerPkg": "1",
@@ -2643,6 +2940,7 @@
},
{
"BriefDescription": "iMC RPQ Credits Empty - Special; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN2",
"PerPkg": "1",
@@ -2652,6 +2950,7 @@
},
{
"BriefDescription": "iMC RPQ Credits Empty - Special; Channel 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN3",
"PerPkg": "1",
@@ -2661,6 +2960,7 @@
},
{
"BriefDescription": "SBo0 Credits Acquired; For AD Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x68",
"EventName": "UNC_H_SBO0_CREDITS_ACQUIRED.AD",
"PerPkg": "1",
@@ -2670,6 +2970,7 @@
},
{
"BriefDescription": "SBo0 Credits Acquired; For BL Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x68",
"EventName": "UNC_H_SBO0_CREDITS_ACQUIRED.BL",
"PerPkg": "1",
@@ -2679,6 +2980,7 @@
},
{
"BriefDescription": "SBo0 Credits Occupancy; For AD Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x6A",
"EventName": "UNC_H_SBO0_CREDIT_OCCUPANCY.AD",
"PerPkg": "1",
@@ -2688,6 +2990,7 @@
},
{
"BriefDescription": "SBo0 Credits Occupancy; For BL Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x6A",
"EventName": "UNC_H_SBO0_CREDIT_OCCUPANCY.BL",
"PerPkg": "1",
@@ -2697,6 +3000,7 @@
},
{
"BriefDescription": "SBo1 Credits Acquired; For AD Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x69",
"EventName": "UNC_H_SBO1_CREDITS_ACQUIRED.AD",
"PerPkg": "1",
@@ -2706,6 +3010,7 @@
},
{
"BriefDescription": "SBo1 Credits Acquired; For BL Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x69",
"EventName": "UNC_H_SBO1_CREDITS_ACQUIRED.BL",
"PerPkg": "1",
@@ -2715,6 +3020,7 @@
},
{
"BriefDescription": "SBo1 Credits Occupancy; For AD Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x6B",
"EventName": "UNC_H_SBO1_CREDIT_OCCUPANCY.AD",
"PerPkg": "1",
@@ -2724,6 +3030,7 @@
},
{
"BriefDescription": "SBo1 Credits Occupancy; For BL Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x6B",
"EventName": "UNC_H_SBO1_CREDIT_OCCUPANCY.BL",
"PerPkg": "1",
@@ -2733,6 +3040,7 @@
},
{
"BriefDescription": "Data beat the Snoop Responses; Local Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xA",
"EventName": "UNC_H_SNOOPS_RSP_AFTER_DATA.LOCAL",
"PerPkg": "1",
@@ -2742,6 +3050,7 @@
},
{
"BriefDescription": "Data beat the Snoop Responses; Remote Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xA",
"EventName": "UNC_H_SNOOPS_RSP_AFTER_DATA.REMOTE",
"PerPkg": "1",
@@ -2751,6 +3060,7 @@
},
{
"BriefDescription": "Cycles with Snoops Outstanding; All Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_H_SNOOP_CYCLES_NE.ALL",
"PerPkg": "1",
@@ -2760,6 +3070,7 @@
},
{
"BriefDescription": "Cycles with Snoops Outstanding; Local Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_H_SNOOP_CYCLES_NE.LOCAL",
"PerPkg": "1",
@@ -2769,6 +3080,7 @@
},
{
"BriefDescription": "Cycles with Snoops Outstanding; Remote Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_H_SNOOP_CYCLES_NE.REMOTE",
"PerPkg": "1",
@@ -2778,6 +3090,7 @@
},
{
"BriefDescription": "Tracker Snoops Outstanding Accumulator; Local Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_H_SNOOP_OCCUPANCY.LOCAL",
"PerPkg": "1",
@@ -2787,6 +3100,7 @@
},
{
"BriefDescription": "Tracker Snoops Outstanding Accumulator; Remote Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_H_SNOOP_OCCUPANCY.REMOTE",
"PerPkg": "1",
@@ -2796,6 +3110,7 @@
},
{
"BriefDescription": "Snoop Responses Received; RSPCNFLCT*",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPCNFLCT",
"PerPkg": "1",
@@ -2805,6 +3120,7 @@
},
{
"BriefDescription": "Snoop Responses Received; RspI",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPI",
"PerPkg": "1",
@@ -2814,6 +3130,7 @@
},
{
"BriefDescription": "M line forwarded from remote cache with no writeback to memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPIFWD",
"PerPkg": "1",
@@ -2824,6 +3141,7 @@
},
{
"BriefDescription": "Shared line response from remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPS",
"PerPkg": "1",
@@ -2834,6 +3152,7 @@
},
{
"BriefDescription": "Shared line forwarded from remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPSFWD",
"PerPkg": "1",
@@ -2844,6 +3163,7 @@
},
{
"BriefDescription": "M line forwarded from remote cache along with writeback to memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSP_FWD_WB",
"PerPkg": "1",
@@ -2854,6 +3174,7 @@
},
{
"BriefDescription": "Snoop Responses Received; Rsp*WB",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSP_WB",
"PerPkg": "1",
@@ -2863,6 +3184,7 @@
},
{
"BriefDescription": "Snoop Responses Received Local; Other",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_H_SNP_RESP_RECV_LOCAL.OTHER",
"PerPkg": "1",
@@ -2872,6 +3194,7 @@
},
{
"BriefDescription": "Snoop Responses Received Local; RspCnflct",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPCNFLCT",
"PerPkg": "1",
@@ -2881,6 +3204,7 @@
},
{
"BriefDescription": "Snoop Responses Received Local; RspI",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPI",
"PerPkg": "1",
@@ -2890,6 +3214,7 @@
},
{
"BriefDescription": "Snoop Responses Received Local; RspIFwd",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPIFWD",
"PerPkg": "1",
@@ -2899,6 +3224,7 @@
},
{
"BriefDescription": "Snoop Responses Received Local; RspS",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPS",
"PerPkg": "1",
@@ -2908,6 +3234,7 @@
},
{
"BriefDescription": "Snoop Responses Received Local; RspSFwd",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPSFWD",
"PerPkg": "1",
@@ -2917,6 +3244,7 @@
},
{
"BriefDescription": "Snoop Responses Received Local; Rsp*FWD*WB",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPxFWDxWB",
"PerPkg": "1",
@@ -2926,6 +3254,7 @@
},
{
"BriefDescription": "Snoop Responses Received Local; Rsp*WB",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPxWB",
"PerPkg": "1",
@@ -2935,6 +3264,7 @@
},
{
"BriefDescription": "Stall on No Sbo Credits; For SBo0, AD Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x6C",
"EventName": "UNC_H_STALL_NO_SBO_CREDIT.SBO0_AD",
"PerPkg": "1",
@@ -2944,6 +3274,7 @@
},
{
"BriefDescription": "Stall on No Sbo Credits; For SBo0, BL Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x6C",
"EventName": "UNC_H_STALL_NO_SBO_CREDIT.SBO0_BL",
"PerPkg": "1",
@@ -2953,6 +3284,7 @@
},
{
"BriefDescription": "Stall on No Sbo Credits; For SBo1, AD Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x6C",
"EventName": "UNC_H_STALL_NO_SBO_CREDIT.SBO1_AD",
"PerPkg": "1",
@@ -2962,6 +3294,7 @@
},
{
"BriefDescription": "Stall on No Sbo Credits; For SBo1, BL Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x6C",
"EventName": "UNC_H_STALL_NO_SBO_CREDIT.SBO1_BL",
"PerPkg": "1",
@@ -2971,6 +3304,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_H_TAD_REQUESTS_G0.REGION0",
"PerPkg": "1",
@@ -2980,6 +3314,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_H_TAD_REQUESTS_G0.REGION1",
"PerPkg": "1",
@@ -2989,6 +3324,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_H_TAD_REQUESTS_G0.REGION2",
"PerPkg": "1",
@@ -2998,6 +3334,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_H_TAD_REQUESTS_G0.REGION3",
"PerPkg": "1",
@@ -3007,6 +3344,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_H_TAD_REQUESTS_G0.REGION4",
"PerPkg": "1",
@@ -3016,6 +3354,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_H_TAD_REQUESTS_G0.REGION5",
"PerPkg": "1",
@@ -3025,6 +3364,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_H_TAD_REQUESTS_G0.REGION6",
"PerPkg": "1",
@@ -3034,6 +3374,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_H_TAD_REQUESTS_G0.REGION7",
"PerPkg": "1",
@@ -3043,6 +3384,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_H_TAD_REQUESTS_G1.REGION10",
"PerPkg": "1",
@@ -3052,6 +3394,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 11",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_H_TAD_REQUESTS_G1.REGION11",
"PerPkg": "1",
@@ -3061,6 +3404,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_H_TAD_REQUESTS_G1.REGION8",
"PerPkg": "1",
@@ -3070,6 +3414,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_H_TAD_REQUESTS_G1.REGION9",
"PerPkg": "1",
@@ -3079,6 +3424,7 @@
},
{
"BriefDescription": "Tracker Cycles Full; Cycles Completely Used",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_H_TRACKER_CYCLES_FULL.ALL",
"PerPkg": "1",
@@ -3088,6 +3434,7 @@
},
{
"BriefDescription": "Tracker Cycles Full; Cycles GP Completely Used",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_H_TRACKER_CYCLES_FULL.GP",
"PerPkg": "1",
@@ -3097,6 +3444,7 @@
},
{
"BriefDescription": "Tracker Cycles Not Empty; All Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_H_TRACKER_CYCLES_NE.ALL",
"PerPkg": "1",
@@ -3106,6 +3454,7 @@
},
{
"BriefDescription": "Tracker Cycles Not Empty; Local Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_H_TRACKER_CYCLES_NE.LOCAL",
"PerPkg": "1",
@@ -3115,6 +3464,7 @@
},
{
"BriefDescription": "Tracker Cycles Not Empty; Remote Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_H_TRACKER_CYCLES_NE.REMOTE",
"PerPkg": "1",
@@ -3124,6 +3474,7 @@
},
{
"BriefDescription": "Tracker Occupancy Accumulator; Local InvItoE Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_H_TRACKER_OCCUPANCY.INVITOE_LOCAL",
"PerPkg": "1",
@@ -3133,6 +3484,7 @@
},
{
"BriefDescription": "Tracker Occupancy Accumulator; Remote InvItoE Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_H_TRACKER_OCCUPANCY.INVITOE_REMOTE",
"PerPkg": "1",
@@ -3142,6 +3494,7 @@
},
{
"BriefDescription": "Tracker Occupancy Accumulator; Local Read Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_H_TRACKER_OCCUPANCY.READS_LOCAL",
"PerPkg": "1",
@@ -3151,6 +3504,7 @@
},
{
"BriefDescription": "Tracker Occupancy Accumulator; Remote Read Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_H_TRACKER_OCCUPANCY.READS_REMOTE",
"PerPkg": "1",
@@ -3160,6 +3514,7 @@
},
{
"BriefDescription": "Tracker Occupancy Accumulator; Local Write Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_H_TRACKER_OCCUPANCY.WRITES_LOCAL",
"PerPkg": "1",
@@ -3169,6 +3524,7 @@
},
{
"BriefDescription": "Tracker Occupancy Accumulator; Remote Write Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_H_TRACKER_OCCUPANCY.WRITES_REMOTE",
"PerPkg": "1",
@@ -3178,6 +3534,7 @@
},
{
"BriefDescription": "Data Pending Occupancy Accumulator; Local Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_H_TRACKER_PENDING_OCCUPANCY.LOCAL",
"PerPkg": "1",
@@ -3187,6 +3544,7 @@
},
{
"BriefDescription": "Data Pending Occupancy Accumulator; Remote Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_H_TRACKER_PENDING_OCCUPANCY.REMOTE",
"PerPkg": "1",
@@ -3196,6 +3554,7 @@
},
{
"BriefDescription": "Outbound NDR Ring Transactions; Non-data Responses",
+ "Counter": "0,1,2,3",
"EventCode": "0xF",
"EventName": "UNC_H_TxR_AD.HOM",
"PerPkg": "1",
@@ -3205,6 +3564,7 @@
},
{
"BriefDescription": "AD Egress Full; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_H_TxR_AD_CYCLES_FULL.ALL",
"PerPkg": "1",
@@ -3214,6 +3574,7 @@
},
{
"BriefDescription": "AD Egress Full; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_H_TxR_AD_CYCLES_FULL.SCHED0",
"PerPkg": "1",
@@ -3223,6 +3584,7 @@
},
{
"BriefDescription": "AD Egress Full; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_H_TxR_AD_CYCLES_FULL.SCHED1",
"PerPkg": "1",
@@ -3232,6 +3594,7 @@
},
{
"BriefDescription": "AD Egress Not Empty; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_H_TxR_AD_CYCLES_NE.ALL",
"PerPkg": "1",
@@ -3241,6 +3604,7 @@
},
{
"BriefDescription": "AD Egress Not Empty; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_H_TxR_AD_CYCLES_NE.SCHED0",
"PerPkg": "1",
@@ -3250,6 +3614,7 @@
},
{
"BriefDescription": "AD Egress Not Empty; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_H_TxR_AD_CYCLES_NE.SCHED1",
"PerPkg": "1",
@@ -3259,6 +3624,7 @@
},
{
"BriefDescription": "AD Egress Allocations; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_H_TxR_AD_INSERTS.ALL",
"PerPkg": "1",
@@ -3268,6 +3634,7 @@
},
{
"BriefDescription": "AD Egress Allocations; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_H_TxR_AD_INSERTS.SCHED0",
"PerPkg": "1",
@@ -3277,6 +3644,7 @@
},
{
"BriefDescription": "AD Egress Allocations; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_H_TxR_AD_INSERTS.SCHED1",
"PerPkg": "1",
@@ -3286,6 +3654,7 @@
},
{
"BriefDescription": "AK Egress Full; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_H_TxR_AK_CYCLES_FULL.ALL",
"PerPkg": "1",
@@ -3295,6 +3664,7 @@
},
{
"BriefDescription": "AK Egress Full; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_H_TxR_AK_CYCLES_FULL.SCHED0",
"PerPkg": "1",
@@ -3304,6 +3674,7 @@
},
{
"BriefDescription": "AK Egress Full; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_H_TxR_AK_CYCLES_FULL.SCHED1",
"PerPkg": "1",
@@ -3313,6 +3684,7 @@
},
{
"BriefDescription": "AK Egress Not Empty; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_H_TxR_AK_CYCLES_NE.ALL",
"PerPkg": "1",
@@ -3322,6 +3694,7 @@
},
{
"BriefDescription": "AK Egress Not Empty; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_H_TxR_AK_CYCLES_NE.SCHED0",
"PerPkg": "1",
@@ -3331,6 +3704,7 @@
},
{
"BriefDescription": "AK Egress Not Empty; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_H_TxR_AK_CYCLES_NE.SCHED1",
"PerPkg": "1",
@@ -3340,6 +3714,7 @@
},
{
"BriefDescription": "AK Egress Allocations; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_H_TxR_AK_INSERTS.ALL",
"PerPkg": "1",
@@ -3349,6 +3724,7 @@
},
{
"BriefDescription": "AK Egress Allocations; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_H_TxR_AK_INSERTS.SCHED0",
"PerPkg": "1",
@@ -3358,6 +3734,7 @@
},
{
"BriefDescription": "AK Egress Allocations; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_H_TxR_AK_INSERTS.SCHED1",
"PerPkg": "1",
@@ -3367,6 +3744,7 @@
},
{
"BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_H_TxR_BL.DRS_CACHE",
"PerPkg": "1",
@@ -3376,6 +3754,7 @@
},
{
"BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Core",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_H_TxR_BL.DRS_CORE",
"PerPkg": "1",
@@ -3385,6 +3764,7 @@
},
{
"BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to QPI",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_H_TxR_BL.DRS_QPI",
"PerPkg": "1",
@@ -3394,6 +3774,7 @@
},
{
"BriefDescription": "BL Egress Full; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x36",
"EventName": "UNC_H_TxR_BL_CYCLES_FULL.ALL",
"PerPkg": "1",
@@ -3403,6 +3784,7 @@
},
{
"BriefDescription": "BL Egress Full; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x36",
"EventName": "UNC_H_TxR_BL_CYCLES_FULL.SCHED0",
"PerPkg": "1",
@@ -3412,6 +3794,7 @@
},
{
"BriefDescription": "BL Egress Full; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x36",
"EventName": "UNC_H_TxR_BL_CYCLES_FULL.SCHED1",
"PerPkg": "1",
@@ -3421,6 +3804,7 @@
},
{
"BriefDescription": "BL Egress Not Empty; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_H_TxR_BL_CYCLES_NE.ALL",
"PerPkg": "1",
@@ -3430,6 +3814,7 @@
},
{
"BriefDescription": "BL Egress Not Empty; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_H_TxR_BL_CYCLES_NE.SCHED0",
"PerPkg": "1",
@@ -3439,6 +3824,7 @@
},
{
"BriefDescription": "BL Egress Not Empty; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_H_TxR_BL_CYCLES_NE.SCHED1",
"PerPkg": "1",
@@ -3448,6 +3834,7 @@
},
{
"BriefDescription": "BL Egress Allocations; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_H_TxR_BL_INSERTS.ALL",
"PerPkg": "1",
@@ -3457,6 +3844,7 @@
},
{
"BriefDescription": "BL Egress Allocations; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_H_TxR_BL_INSERTS.SCHED0",
"PerPkg": "1",
@@ -3466,6 +3854,7 @@
},
{
"BriefDescription": "BL Egress Allocations; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_H_TxR_BL_INSERTS.SCHED1",
"PerPkg": "1",
@@ -3475,6 +3864,7 @@
},
{
"BriefDescription": "Injection Starvation; For AK Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x6D",
"EventName": "UNC_H_TxR_STARVED.AK",
"PerPkg": "1",
@@ -3484,6 +3874,7 @@
},
{
"BriefDescription": "Injection Starvation; For BL Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x6D",
"EventName": "UNC_H_TxR_STARVED.BL",
"PerPkg": "1",
@@ -3493,6 +3884,7 @@
},
{
"BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN0",
"PerPkg": "1",
@@ -3502,6 +3894,7 @@
},
{
"BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN1",
"PerPkg": "1",
@@ -3511,6 +3904,7 @@
},
{
"BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN2",
"PerPkg": "1",
@@ -3520,6 +3914,7 @@
},
{
"BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN3",
"PerPkg": "1",
@@ -3529,6 +3924,7 @@
},
{
"BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN0",
"PerPkg": "1",
@@ -3538,6 +3934,7 @@
},
{
"BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN1",
"PerPkg": "1",
@@ -3547,6 +3944,7 @@
},
{
"BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN2",
"PerPkg": "1",
@@ -3556,6 +3954,7 @@
},
{
"BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN3",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/broadwellx/uncore-interconnect.json
index b9fb216bee16..765d44012bba 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/uncore-interconnect.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Number of non data (control) flits transmitted . Derived from unc_q_txl_flits_g0.non_data",
+ "Counter": "0,1,2,3",
"EventName": "QPI_CTL_BANDWIDTH_TX",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.; Number of non-NULL non-data flits transmitted across QPI. This basically tracks the protocol overhead on the QPI link. One can get a good picture of the QPI-link characteristics by evaluating the protocol flits, data flits, and idle/null flits. This includes the header flits for data packets.",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "Number of data flits transmitted . Derived from unc_q_txl_flits_g0.data",
+ "Counter": "0,1,2,3",
"EventName": "QPI_DATA_BANDWIDTH_TX",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.; Number of data flits transmitted over QPI. Each flit contains 64b of data. This includes both DRS and NCB data flits (coherent and non-coherent). This can be used to calculate the data bandwidth of the QPI link. One can get a good picture of the QPI-link characteristics by evaluating the protocol flits, data flits, and idle/null flits. This does not include the header flits that go in data packets.",
@@ -19,6 +21,7 @@
},
{
"BriefDescription": "Total Write Cache Occupancy; Any Source",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.ANY",
"PerPkg": "1",
@@ -28,6 +31,7 @@
},
{
"BriefDescription": "Total Write Cache Occupancy; Select Source",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.SOURCE",
"PerPkg": "1",
@@ -37,6 +41,7 @@
},
{
"BriefDescription": "Clocks in the IRP",
+ "Counter": "0,1",
"EventName": "UNC_I_CLOCKTICKS",
"PerPkg": "1",
"PublicDescription": "Number of clocks in the IRP.",
@@ -44,6 +49,7 @@
},
{
"BriefDescription": "Coherent Ops; CLFlush",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_COHERENT_OPS.CLFLUSH",
"PerPkg": "1",
@@ -53,6 +59,7 @@
},
{
"BriefDescription": "Coherent Ops; CRd",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_COHERENT_OPS.CRD",
"PerPkg": "1",
@@ -62,6 +69,7 @@
},
{
"BriefDescription": "Coherent Ops; DRd",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_COHERENT_OPS.DRD",
"PerPkg": "1",
@@ -71,6 +79,7 @@
},
{
"BriefDescription": "Coherent Ops; PCIDCAHin5t",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_COHERENT_OPS.PCIDCAHINT",
"PerPkg": "1",
@@ -80,6 +89,7 @@
},
{
"BriefDescription": "Coherent Ops; PCIRdCur",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_COHERENT_OPS.PCIRDCUR",
"PerPkg": "1",
@@ -89,6 +99,7 @@
},
{
"BriefDescription": "Coherent Ops; PCIItoM",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_COHERENT_OPS.PCITOM",
"PerPkg": "1",
@@ -98,6 +109,7 @@
},
{
"BriefDescription": "Coherent Ops; RFO",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_COHERENT_OPS.RFO",
"PerPkg": "1",
@@ -107,6 +119,7 @@
},
{
"BriefDescription": "Coherent Ops; WbMtoI",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_COHERENT_OPS.WBMTOI",
"PerPkg": "1",
@@ -116,6 +129,7 @@
},
{
"BriefDescription": "Misc Events - Set 0; Cache Inserts of Atomic Transactions as Secondary",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_I_MISC0.2ND_ATOMIC_INSERT",
"PerPkg": "1",
@@ -125,6 +139,7 @@
},
{
"BriefDescription": "Misc Events - Set 0; Cache Inserts of Read Transactions as Secondary",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_I_MISC0.2ND_RD_INSERT",
"PerPkg": "1",
@@ -134,6 +149,7 @@
},
{
"BriefDescription": "Misc Events - Set 0; Cache Inserts of Write Transactions as Secondary",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_I_MISC0.2ND_WR_INSERT",
"PerPkg": "1",
@@ -143,6 +159,7 @@
},
{
"BriefDescription": "Misc Events - Set 0; Fastpath Rejects",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_I_MISC0.FAST_REJ",
"PerPkg": "1",
@@ -152,6 +169,7 @@
},
{
"BriefDescription": "Misc Events - Set 0; Fastpath Requests",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_I_MISC0.FAST_REQ",
"PerPkg": "1",
@@ -161,6 +179,7 @@
},
{
"BriefDescription": "Misc Events - Set 0; Fastpath Transfers From Primary to Secondary",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_I_MISC0.FAST_XFER",
"PerPkg": "1",
@@ -170,6 +189,7 @@
},
{
"BriefDescription": "Misc Events - Set 0; Prefetch Ack Hints From Primary to Secondary",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_I_MISC0.PF_ACK_HINT",
"PerPkg": "1",
@@ -179,6 +199,7 @@
},
{
"BriefDescription": "Misc Events - Set 0; Prefetch TimeOut",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_I_MISC0.PF_TIMEOUT",
"PerPkg": "1",
@@ -188,6 +209,7 @@
},
{
"BriefDescription": "Misc Events - Set 1; Data Throttled",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_MISC1.DATA_THROTTLE",
"PerPkg": "1",
@@ -197,6 +219,7 @@
},
{
"BriefDescription": "Misc Events - Set 1",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_MISC1.LOST_FWD",
"PerPkg": "1",
@@ -206,6 +229,7 @@
},
{
"BriefDescription": "Misc Events - Set 1; Received Invalid",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_MISC1.SEC_RCVD_INVLD",
"PerPkg": "1",
@@ -215,6 +239,7 @@
},
{
"BriefDescription": "Misc Events - Set 1; Received Valid",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_MISC1.SEC_RCVD_VLD",
"PerPkg": "1",
@@ -224,6 +249,7 @@
},
{
"BriefDescription": "Misc Events - Set 1; Slow Transfer of E Line",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_MISC1.SLOW_E",
"PerPkg": "1",
@@ -233,6 +259,7 @@
},
{
"BriefDescription": "Misc Events - Set 1; Slow Transfer of I Line",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_MISC1.SLOW_I",
"PerPkg": "1",
@@ -242,6 +269,7 @@
},
{
"BriefDescription": "Misc Events - Set 1; Slow Transfer of M Line",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_MISC1.SLOW_M",
"PerPkg": "1",
@@ -251,6 +279,7 @@
},
{
"BriefDescription": "Misc Events - Set 1; Slow Transfer of S Line",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_MISC1.SLOW_S",
"PerPkg": "1",
@@ -260,6 +289,7 @@
},
{
"BriefDescription": "AK Ingress Occupancy",
+ "Counter": "0,1",
"EventCode": "0xA",
"EventName": "UNC_I_RxR_AK_INSERTS",
"PerPkg": "1",
@@ -268,6 +298,7 @@
},
{
"BriefDescription": "UNC_I_RxR_BL_DRS_CYCLES_FULL",
+ "Counter": "0,1",
"EventCode": "0x4",
"EventName": "UNC_I_RxR_BL_DRS_CYCLES_FULL",
"PerPkg": "1",
@@ -276,6 +307,7 @@
},
{
"BriefDescription": "BL Ingress Occupancy - DRS",
+ "Counter": "0,1",
"EventCode": "0x1",
"EventName": "UNC_I_RxR_BL_DRS_INSERTS",
"PerPkg": "1",
@@ -284,6 +316,7 @@
},
{
"BriefDescription": "UNC_I_RxR_BL_DRS_OCCUPANCY",
+ "Counter": "0,1",
"EventCode": "0x7",
"EventName": "UNC_I_RxR_BL_DRS_OCCUPANCY",
"PerPkg": "1",
@@ -292,6 +325,7 @@
},
{
"BriefDescription": "UNC_I_RxR_BL_NCB_CYCLES_FULL",
+ "Counter": "0,1",
"EventCode": "0x5",
"EventName": "UNC_I_RxR_BL_NCB_CYCLES_FULL",
"PerPkg": "1",
@@ -300,6 +334,7 @@
},
{
"BriefDescription": "BL Ingress Occupancy - NCB",
+ "Counter": "0,1",
"EventCode": "0x2",
"EventName": "UNC_I_RxR_BL_NCB_INSERTS",
"PerPkg": "1",
@@ -308,6 +343,7 @@
},
{
"BriefDescription": "UNC_I_RxR_BL_NCB_OCCUPANCY",
+ "Counter": "0,1",
"EventCode": "0x8",
"EventName": "UNC_I_RxR_BL_NCB_OCCUPANCY",
"PerPkg": "1",
@@ -316,6 +352,7 @@
},
{
"BriefDescription": "UNC_I_RxR_BL_NCS_CYCLES_FULL",
+ "Counter": "0,1",
"EventCode": "0x6",
"EventName": "UNC_I_RxR_BL_NCS_CYCLES_FULL",
"PerPkg": "1",
@@ -324,6 +361,7 @@
},
{
"BriefDescription": "BL Ingress Occupancy - NCS",
+ "Counter": "0,1",
"EventCode": "0x3",
"EventName": "UNC_I_RxR_BL_NCS_INSERTS",
"PerPkg": "1",
@@ -332,6 +370,7 @@
},
{
"BriefDescription": "UNC_I_RxR_BL_NCS_OCCUPANCY",
+ "Counter": "0,1",
"EventCode": "0x9",
"EventName": "UNC_I_RxR_BL_NCS_OCCUPANCY",
"PerPkg": "1",
@@ -340,6 +379,7 @@
},
{
"BriefDescription": "Snoop Responses; Hit E or S",
+ "Counter": "0,1",
"EventCode": "0x17",
"EventName": "UNC_I_SNOOP_RESP.HIT_ES",
"PerPkg": "1",
@@ -349,6 +389,7 @@
},
{
"BriefDescription": "Snoop Responses; Hit I",
+ "Counter": "0,1",
"EventCode": "0x17",
"EventName": "UNC_I_SNOOP_RESP.HIT_I",
"PerPkg": "1",
@@ -358,6 +399,7 @@
},
{
"BriefDescription": "Snoop Responses; Hit M",
+ "Counter": "0,1",
"EventCode": "0x17",
"EventName": "UNC_I_SNOOP_RESP.HIT_M",
"PerPkg": "1",
@@ -367,6 +409,7 @@
},
{
"BriefDescription": "Snoop Responses; Miss",
+ "Counter": "0,1",
"EventCode": "0x17",
"EventName": "UNC_I_SNOOP_RESP.MISS",
"PerPkg": "1",
@@ -376,6 +419,7 @@
},
{
"BriefDescription": "Snoop Responses; SnpCode",
+ "Counter": "0,1",
"EventCode": "0x17",
"EventName": "UNC_I_SNOOP_RESP.SNPCODE",
"PerPkg": "1",
@@ -385,6 +429,7 @@
},
{
"BriefDescription": "Snoop Responses; SnpData",
+ "Counter": "0,1",
"EventCode": "0x17",
"EventName": "UNC_I_SNOOP_RESP.SNPDATA",
"PerPkg": "1",
@@ -394,6 +439,7 @@
},
{
"BriefDescription": "Snoop Responses; SnpInv",
+ "Counter": "0,1",
"EventCode": "0x17",
"EventName": "UNC_I_SNOOP_RESP.SNPINV",
"PerPkg": "1",
@@ -403,6 +449,7 @@
},
{
"BriefDescription": "Inbound Transaction Count; Atomic",
+ "Counter": "0,1",
"EventCode": "0x16",
"EventName": "UNC_I_TRANSACTIONS.ATOMIC",
"PerPkg": "1",
@@ -412,6 +459,7 @@
},
{
"BriefDescription": "Inbound Transaction Count; Other",
+ "Counter": "0,1",
"EventCode": "0x16",
"EventName": "UNC_I_TRANSACTIONS.OTHER",
"PerPkg": "1",
@@ -421,6 +469,7 @@
},
{
"BriefDescription": "Inbound Transaction Count; Read Prefetches",
+ "Counter": "0,1",
"EventCode": "0x16",
"EventName": "UNC_I_TRANSACTIONS.RD_PREF",
"PerPkg": "1",
@@ -430,6 +479,7 @@
},
{
"BriefDescription": "Inbound Transaction Count; Reads",
+ "Counter": "0,1",
"EventCode": "0x16",
"EventName": "UNC_I_TRANSACTIONS.READS",
"PerPkg": "1",
@@ -439,6 +489,7 @@
},
{
"BriefDescription": "Inbound Transaction Count; Writes",
+ "Counter": "0,1",
"EventCode": "0x16",
"EventName": "UNC_I_TRANSACTIONS.WRITES",
"PerPkg": "1",
@@ -448,6 +499,7 @@
},
{
"BriefDescription": "Inbound Transaction Count; Write Prefetches",
+ "Counter": "0,1",
"EventCode": "0x16",
"EventName": "UNC_I_TRANSACTIONS.WR_PREF",
"PerPkg": "1",
@@ -457,6 +509,7 @@
},
{
"BriefDescription": "No AD Egress Credit Stalls",
+ "Counter": "0,1",
"EventCode": "0x18",
"EventName": "UNC_I_TxR_AD_STALL_CREDIT_CYCLES",
"PerPkg": "1",
@@ -465,6 +518,7 @@
},
{
"BriefDescription": "No BL Egress Credit Stalls",
+ "Counter": "0,1",
"EventCode": "0x19",
"EventName": "UNC_I_TxR_BL_STALL_CREDIT_CYCLES",
"PerPkg": "1",
@@ -473,6 +527,7 @@
},
{
"BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
"EventCode": "0xE",
"EventName": "UNC_I_TxR_DATA_INSERTS_NCB",
"PerPkg": "1",
@@ -481,6 +536,7 @@
},
{
"BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
"EventCode": "0xF",
"EventName": "UNC_I_TxR_DATA_INSERTS_NCS",
"PerPkg": "1",
@@ -489,6 +545,7 @@
},
{
"BriefDescription": "Outbound Request Queue Occupancy",
+ "Counter": "0,1",
"EventCode": "0xD",
"EventName": "UNC_I_TxR_REQUEST_OCCUPANCY",
"PerPkg": "1",
@@ -497,6 +554,7 @@
},
{
"BriefDescription": "Number of qfclks",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_Q_CLOCKTICKS",
"PerPkg": "1",
@@ -505,6 +563,7 @@
},
{
"BriefDescription": "Count of CTO Events",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_Q_CTO_COUNT",
"PerPkg": "1",
@@ -513,6 +572,7 @@
},
{
"BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS",
"PerPkg": "1",
@@ -522,6 +582,7 @@
},
{
"BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress and RBT Miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS_MISS",
"PerPkg": "1",
@@ -531,6 +592,7 @@
},
{
"BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress and RBT Invalid",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS_RBT",
"PerPkg": "1",
@@ -540,6 +602,7 @@
},
{
"BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress and RBT Miss, Invalid",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS_RBT_MISS",
"PerPkg": "1",
@@ -549,6 +612,7 @@
},
{
"BriefDescription": "Direct 2 Core Spawning; Spawn Failure - RBT Miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_DIRECT2CORE.FAILURE_MISS",
"PerPkg": "1",
@@ -558,6 +622,7 @@
},
{
"BriefDescription": "Direct 2 Core Spawning; Spawn Failure - RBT Invalid",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_DIRECT2CORE.FAILURE_RBT_HIT",
"PerPkg": "1",
@@ -567,6 +632,7 @@
},
{
"BriefDescription": "Direct 2 Core Spawning; Spawn Failure - RBT Miss and Invalid",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_DIRECT2CORE.FAILURE_RBT_MISS",
"PerPkg": "1",
@@ -576,6 +642,7 @@
},
{
"BriefDescription": "Direct 2 Core Spawning; Spawn Success",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_DIRECT2CORE.SUCCESS_RBT_HIT",
"PerPkg": "1",
@@ -585,6 +652,7 @@
},
{
"BriefDescription": "Cycles in L1",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_Q_L1_POWER_CYCLES",
"PerPkg": "1",
@@ -593,6 +661,7 @@
},
{
"BriefDescription": "Cycles in L0p",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_Q_RxL0P_POWER_CYCLES",
"PerPkg": "1",
@@ -601,6 +670,7 @@
},
{
"BriefDescription": "Cycles in L0",
+ "Counter": "0,1,2,3",
"EventCode": "0xF",
"EventName": "UNC_Q_RxL0_POWER_CYCLES",
"PerPkg": "1",
@@ -609,6 +679,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Bypassed",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_Q_RxL_BYPASSED",
"PerPkg": "1",
@@ -617,6 +688,7 @@
},
{
"BriefDescription": "CRC Errors Detected; LinkInit",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_Q_RxL_CRC_ERRORS.LINK_INIT",
"PerPkg": "1",
@@ -626,6 +698,7 @@
},
{
"BriefDescription": "UNC_Q_RxL_CRC_ERRORS.NORMAL_OP",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_Q_RxL_CRC_ERRORS.NORMAL_OP",
"PerPkg": "1",
@@ -634,6 +707,7 @@
},
{
"BriefDescription": "VN0 Credit Consumed; DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.DRS",
"PerPkg": "1",
@@ -643,6 +717,7 @@
},
{
"BriefDescription": "VN0 Credit Consumed; HOM",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.HOM",
"PerPkg": "1",
@@ -652,6 +727,7 @@
},
{
"BriefDescription": "VN0 Credit Consumed; NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NCB",
"PerPkg": "1",
@@ -661,6 +737,7 @@
},
{
"BriefDescription": "VN0 Credit Consumed; NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NCS",
"PerPkg": "1",
@@ -670,6 +747,7 @@
},
{
"BriefDescription": "VN0 Credit Consumed; NDR",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NDR",
"PerPkg": "1",
@@ -679,6 +757,7 @@
},
{
"BriefDescription": "VN0 Credit Consumed; SNP",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.SNP",
"PerPkg": "1",
@@ -688,6 +767,7 @@
},
{
"BriefDescription": "VN1 Credit Consumed; DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.DRS",
"PerPkg": "1",
@@ -697,6 +777,7 @@
},
{
"BriefDescription": "VN1 Credit Consumed; HOM",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.HOM",
"PerPkg": "1",
@@ -706,6 +787,7 @@
},
{
"BriefDescription": "VN1 Credit Consumed; NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.NCB",
"PerPkg": "1",
@@ -715,6 +797,7 @@
},
{
"BriefDescription": "VN1 Credit Consumed; NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.NCS",
"PerPkg": "1",
@@ -724,6 +807,7 @@
},
{
"BriefDescription": "VN1 Credit Consumed; NDR",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.NDR",
"PerPkg": "1",
@@ -733,6 +817,7 @@
},
{
"BriefDescription": "VN1 Credit Consumed; SNP",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.SNP",
"PerPkg": "1",
@@ -742,6 +827,7 @@
},
{
"BriefDescription": "VNA Credit Consumed",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VNA",
"PerPkg": "1",
@@ -750,6 +836,7 @@
},
{
"BriefDescription": "RxQ Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0xA",
"EventName": "UNC_Q_RxL_CYCLES_NE",
"PerPkg": "1",
@@ -758,6 +845,7 @@
},
{
"BriefDescription": "RxQ Cycles Not Empty - DRS; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0xF",
"EventName": "UNC_Q_RxL_CYCLES_NE_DRS.VN0",
"PerPkg": "1",
@@ -767,6 +855,7 @@
},
{
"BriefDescription": "RxQ Cycles Not Empty - DRS; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0xF",
"EventName": "UNC_Q_RxL_CYCLES_NE_DRS.VN1",
"PerPkg": "1",
@@ -776,6 +865,7 @@
},
{
"BriefDescription": "RxQ Cycles Not Empty - HOM; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_Q_RxL_CYCLES_NE_HOM.VN0",
"PerPkg": "1",
@@ -785,6 +875,7 @@
},
{
"BriefDescription": "RxQ Cycles Not Empty - HOM; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_Q_RxL_CYCLES_NE_HOM.VN1",
"PerPkg": "1",
@@ -794,6 +885,7 @@
},
{
"BriefDescription": "RxQ Cycles Not Empty - NCB; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_Q_RxL_CYCLES_NE_NCB.VN0",
"PerPkg": "1",
@@ -803,6 +895,7 @@
},
{
"BriefDescription": "RxQ Cycles Not Empty - NCB; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_Q_RxL_CYCLES_NE_NCB.VN1",
"PerPkg": "1",
@@ -812,6 +905,7 @@
},
{
"BriefDescription": "RxQ Cycles Not Empty - NCS; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_Q_RxL_CYCLES_NE_NCS.VN0",
"PerPkg": "1",
@@ -821,6 +915,7 @@
},
{
"BriefDescription": "RxQ Cycles Not Empty - NCS; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_Q_RxL_CYCLES_NE_NCS.VN1",
"PerPkg": "1",
@@ -830,6 +925,7 @@
},
{
"BriefDescription": "RxQ Cycles Not Empty - NDR; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_Q_RxL_CYCLES_NE_NDR.VN0",
"PerPkg": "1",
@@ -839,6 +935,7 @@
},
{
"BriefDescription": "RxQ Cycles Not Empty - NDR; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_Q_RxL_CYCLES_NE_NDR.VN1",
"PerPkg": "1",
@@ -848,6 +945,7 @@
},
{
"BriefDescription": "RxQ Cycles Not Empty - SNP; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_RxL_CYCLES_NE_SNP.VN0",
"PerPkg": "1",
@@ -857,6 +955,7 @@
},
{
"BriefDescription": "RxQ Cycles Not Empty - SNP; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_RxL_CYCLES_NE_SNP.VN1",
"PerPkg": "1",
@@ -866,6 +965,7 @@
},
{
"BriefDescription": "Flits Received - Group 0; Idle and Null Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_Q_RxL_FLITS_G0.IDLE",
"PerPkg": "1",
@@ -875,6 +975,7 @@
},
{
"BriefDescription": "Flits Received - Group 1; DRS Flits (both Header and Data)",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_RxL_FLITS_G1.DRS",
"PerPkg": "1",
@@ -884,6 +985,7 @@
},
{
"BriefDescription": "Flits Received - Group 1; DRS Data Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_RxL_FLITS_G1.DRS_DATA",
"PerPkg": "1",
@@ -893,6 +995,7 @@
},
{
"BriefDescription": "Flits Received - Group 1; DRS Header Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_RxL_FLITS_G1.DRS_NONDATA",
"PerPkg": "1",
@@ -902,6 +1005,7 @@
},
{
"BriefDescription": "Flits Received - Group 1; HOM Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_RxL_FLITS_G1.HOM",
"PerPkg": "1",
@@ -911,6 +1015,7 @@
},
{
"BriefDescription": "Flits Received - Group 1; HOM Non-Request Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_RxL_FLITS_G1.HOM_NONREQ",
"PerPkg": "1",
@@ -920,6 +1025,7 @@
},
{
"BriefDescription": "Flits Received - Group 1; HOM Request Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_RxL_FLITS_G1.HOM_REQ",
"PerPkg": "1",
@@ -929,6 +1035,7 @@
},
{
"BriefDescription": "Flits Received - Group 1; SNP Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_RxL_FLITS_G1.SNP",
"PerPkg": "1",
@@ -938,6 +1045,7 @@
},
{
"BriefDescription": "Flits Received - Group 2; Non-Coherent Rx Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_Q_RxL_FLITS_G2.NCB",
"PerPkg": "1",
@@ -947,6 +1055,7 @@
},
{
"BriefDescription": "Flits Received - Group 2; Non-Coherent data Rx Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_Q_RxL_FLITS_G2.NCB_DATA",
"PerPkg": "1",
@@ -956,6 +1065,7 @@
},
{
"BriefDescription": "Flits Received - Group 2; Non-Coherent non-data Rx Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_Q_RxL_FLITS_G2.NCB_NONDATA",
"PerPkg": "1",
@@ -965,6 +1075,7 @@
},
{
"BriefDescription": "Flits Received - Group 2; Non-Coherent standard Rx Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_Q_RxL_FLITS_G2.NCS",
"PerPkg": "1",
@@ -974,6 +1085,7 @@
},
{
"BriefDescription": "Flits Received - Group 2; Non-Data Response Rx Flits - AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_Q_RxL_FLITS_G2.NDR_AD",
"PerPkg": "1",
@@ -983,6 +1095,7 @@
},
{
"BriefDescription": "Flits Received - Group 2; Non-Data Response Rx Flits - AK",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_Q_RxL_FLITS_G2.NDR_AK",
"PerPkg": "1",
@@ -992,6 +1105,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_Q_RxL_INSERTS",
"PerPkg": "1",
@@ -1000,6 +1114,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - DRS; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_Q_RxL_INSERTS_DRS.VN0",
"PerPkg": "1",
@@ -1009,6 +1124,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - DRS; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_Q_RxL_INSERTS_DRS.VN1",
"PerPkg": "1",
@@ -1018,6 +1134,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - HOM; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0xC",
"EventName": "UNC_Q_RxL_INSERTS_HOM.VN0",
"PerPkg": "1",
@@ -1027,6 +1144,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - HOM; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0xC",
"EventName": "UNC_Q_RxL_INSERTS_HOM.VN1",
"PerPkg": "1",
@@ -1036,6 +1154,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - NCB; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0xA",
"EventName": "UNC_Q_RxL_INSERTS_NCB.VN0",
"PerPkg": "1",
@@ -1045,6 +1164,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - NCB; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0xA",
"EventName": "UNC_Q_RxL_INSERTS_NCB.VN1",
"PerPkg": "1",
@@ -1054,6 +1174,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - NCS; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB",
"EventName": "UNC_Q_RxL_INSERTS_NCS.VN0",
"PerPkg": "1",
@@ -1063,6 +1184,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - NCS; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB",
"EventName": "UNC_Q_RxL_INSERTS_NCS.VN1",
"PerPkg": "1",
@@ -1072,6 +1194,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - NDR; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0xE",
"EventName": "UNC_Q_RxL_INSERTS_NDR.VN0",
"PerPkg": "1",
@@ -1081,6 +1204,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - NDR; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0xE",
"EventName": "UNC_Q_RxL_INSERTS_NDR.VN1",
"PerPkg": "1",
@@ -1090,6 +1214,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - SNP; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD",
"EventName": "UNC_Q_RxL_INSERTS_SNP.VN0",
"PerPkg": "1",
@@ -1099,6 +1224,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - SNP; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD",
"EventName": "UNC_Q_RxL_INSERTS_SNP.VN1",
"PerPkg": "1",
@@ -1108,6 +1234,7 @@
},
{
"BriefDescription": "RxQ Occupancy - All Packets",
+ "Counter": "0,1,2,3",
"EventCode": "0xB",
"EventName": "UNC_Q_RxL_OCCUPANCY",
"PerPkg": "1",
@@ -1116,6 +1243,7 @@
},
{
"BriefDescription": "RxQ Occupancy - DRS; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_Q_RxL_OCCUPANCY_DRS.VN0",
"PerPkg": "1",
@@ -1125,6 +1253,7 @@
},
{
"BriefDescription": "RxQ Occupancy - DRS; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_Q_RxL_OCCUPANCY_DRS.VN1",
"PerPkg": "1",
@@ -1134,6 +1263,7 @@
},
{
"BriefDescription": "RxQ Occupancy - HOM; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_Q_RxL_OCCUPANCY_HOM.VN0",
"PerPkg": "1",
@@ -1143,6 +1273,7 @@
},
{
"BriefDescription": "RxQ Occupancy - HOM; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_Q_RxL_OCCUPANCY_HOM.VN1",
"PerPkg": "1",
@@ -1152,6 +1283,7 @@
},
{
"BriefDescription": "RxQ Occupancy - NCB; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_Q_RxL_OCCUPANCY_NCB.VN0",
"PerPkg": "1",
@@ -1161,6 +1293,7 @@
},
{
"BriefDescription": "RxQ Occupancy - NCB; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_Q_RxL_OCCUPANCY_NCB.VN1",
"PerPkg": "1",
@@ -1170,6 +1303,7 @@
},
{
"BriefDescription": "RxQ Occupancy - NCS; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_Q_RxL_OCCUPANCY_NCS.VN0",
"PerPkg": "1",
@@ -1179,6 +1313,7 @@
},
{
"BriefDescription": "RxQ Occupancy - NCS; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_Q_RxL_OCCUPANCY_NCS.VN1",
"PerPkg": "1",
@@ -1188,6 +1323,7 @@
},
{
"BriefDescription": "RxQ Occupancy - NDR; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_Q_RxL_OCCUPANCY_NDR.VN0",
"PerPkg": "1",
@@ -1197,6 +1333,7 @@
},
{
"BriefDescription": "RxQ Occupancy - NDR; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_Q_RxL_OCCUPANCY_NDR.VN1",
"PerPkg": "1",
@@ -1206,6 +1343,7 @@
},
{
"BriefDescription": "RxQ Occupancy - SNP; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_Q_RxL_OCCUPANCY_SNP.VN0",
"PerPkg": "1",
@@ -1215,6 +1353,7 @@
},
{
"BriefDescription": "RxQ Occupancy - SNP; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_Q_RxL_OCCUPANCY_SNP.VN1",
"PerPkg": "1",
@@ -1224,6 +1363,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - HOM",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_Q_RxL_STALLS_VN0.BGF_DRS",
"PerPkg": "1",
@@ -1233,6 +1373,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_Q_RxL_STALLS_VN0.BGF_HOM",
"PerPkg": "1",
@@ -1242,6 +1383,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - SNP",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_Q_RxL_STALLS_VN0.BGF_NCB",
"PerPkg": "1",
@@ -1251,6 +1393,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - NDR",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_Q_RxL_STALLS_VN0.BGF_NCS",
"PerPkg": "1",
@@ -1260,6 +1403,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_Q_RxL_STALLS_VN0.BGF_NDR",
"PerPkg": "1",
@@ -1269,6 +1413,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_Q_RxL_STALLS_VN0.BGF_SNP",
"PerPkg": "1",
@@ -1278,6 +1423,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN0; Egress Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_Q_RxL_STALLS_VN0.EGRESS_CREDITS",
"PerPkg": "1",
@@ -1287,6 +1433,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN0; GV",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_Q_RxL_STALLS_VN0.GV",
"PerPkg": "1",
@@ -1296,6 +1443,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - HOM",
+ "Counter": "0,1,2,3",
"EventCode": "0x3A",
"EventName": "UNC_Q_RxL_STALLS_VN1.BGF_DRS",
"PerPkg": "1",
@@ -1305,6 +1453,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x3A",
"EventName": "UNC_Q_RxL_STALLS_VN1.BGF_HOM",
"PerPkg": "1",
@@ -1314,6 +1463,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - SNP",
+ "Counter": "0,1,2,3",
"EventCode": "0x3A",
"EventName": "UNC_Q_RxL_STALLS_VN1.BGF_NCB",
"PerPkg": "1",
@@ -1323,6 +1473,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - NDR",
+ "Counter": "0,1,2,3",
"EventCode": "0x3A",
"EventName": "UNC_Q_RxL_STALLS_VN1.BGF_NCS",
"PerPkg": "1",
@@ -1332,6 +1483,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x3A",
"EventName": "UNC_Q_RxL_STALLS_VN1.BGF_NDR",
"PerPkg": "1",
@@ -1341,6 +1493,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x3A",
"EventName": "UNC_Q_RxL_STALLS_VN1.BGF_SNP",
"PerPkg": "1",
@@ -1350,6 +1503,7 @@
},
{
"BriefDescription": "Cycles in L0p",
+ "Counter": "0,1,2,3",
"EventCode": "0xD",
"EventName": "UNC_Q_TxL0P_POWER_CYCLES",
"PerPkg": "1",
@@ -1358,6 +1512,7 @@
},
{
"BriefDescription": "Cycles in L0",
+ "Counter": "0,1,2,3",
"EventCode": "0xC",
"EventName": "UNC_Q_TxL0_POWER_CYCLES",
"PerPkg": "1",
@@ -1366,6 +1521,7 @@
},
{
"BriefDescription": "Tx Flit Buffer Bypassed",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_Q_TxL_BYPASSED",
"PerPkg": "1",
@@ -1374,6 +1530,7 @@
},
{
"BriefDescription": "Cycles Stalled with no LLR Credits; LLR is almost full",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_TxL_CRC_NO_CREDITS.ALMOST_FULL",
"PerPkg": "1",
@@ -1383,6 +1540,7 @@
},
{
"BriefDescription": "Cycles Stalled with no LLR Credits; LLR is full",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_TxL_CRC_NO_CREDITS.FULL",
"PerPkg": "1",
@@ -1392,6 +1550,7 @@
},
{
"BriefDescription": "Tx Flit Buffer Cycles not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_Q_TxL_CYCLES_NE",
"PerPkg": "1",
@@ -1400,6 +1559,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 0; Data Tx Flits",
+ "Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G0.DATA",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.; Number of data flits transmitted over QPI. Each flit contains 64b of data. This includes both DRS and NCB data flits (coherent and non-coherent). This can be used to calculate the data bandwidth of the QPI link. One can get a good picture of the QPI-link characteristics by evaluating the protocol flits, data flits, and idle/null flits. This does not include the header flits that go in data packets.",
@@ -1408,6 +1568,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 0; Non-Data protocol Tx Flits",
+ "Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G0.NON_DATA",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.; Number of non-NULL non-data flits transmitted across QPI. This basically tracks the protocol overhead on the QPI link. One can get a good picture of the QPI-link characteristics by evaluating the protocol flits, data flits, and idle/null flits. This includes the header flits for data packets.",
@@ -1416,6 +1577,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 1; DRS Flits (both Header and Data)",
+ "Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G1.DRS",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of flits transmitted over QPI on the DRS (Data Response) channel. DRS flits are used to transmit data with coherency.",
@@ -1424,6 +1586,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 1; DRS Data Flits",
+ "Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G1.DRS_DATA",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of data flits transmitted over QPI on the DRS (Data Response) channel. DRS flits are used to transmit data with coherency. This does not count data flits transmitted over the NCB channel which transmits non-coherent data. This includes only the data flits (not the header).",
@@ -1432,6 +1595,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 1; DRS Header Flits",
+ "Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G1.DRS_NONDATA",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of protocol flits transmitted over QPI on the DRS (Data Response) channel. DRS flits are used to transmit data with coherency. This does not count data flits transmitted over the NCB channel which transmits non-coherent data. This includes only the header flits (not the data). This includes extended headers.",
@@ -1440,6 +1604,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 1; HOM Flits",
+ "Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G1.HOM",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of flits transmitted over QPI on the home channel.",
@@ -1448,6 +1613,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 1; HOM Non-Request Flits",
+ "Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G1.HOM_NONREQ",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of non-request flits transmitted over QPI on the home channel. These are most commonly snoop responses, and this event can be used as a proxy for that.",
@@ -1456,6 +1622,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 1; HOM Request Flits",
+ "Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G1.HOM_REQ",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of data request transmitted over QPI on the home channel. This basically counts the number of remote memory requests transmitted over QPI. In conjunction with the local read count in the Home Agent, one can calculate the number of LLC Misses.",
@@ -1464,6 +1631,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 1; SNP Flits",
+ "Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G1.SNP",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of snoop request flits transmitted over QPI. These requests are contained in the snoop channel. This does not include snoop responses, which are transmitted on the home channel.",
@@ -1472,6 +1640,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 2; Non-Coherent Bypass Tx Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_Q_TxL_FLITS_G2.NCB",
"PerPkg": "1",
@@ -1481,6 +1650,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 2; Non-Coherent data Tx Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_Q_TxL_FLITS_G2.NCB_DATA",
"PerPkg": "1",
@@ -1490,6 +1660,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 2; Non-Coherent non-data Tx Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_Q_TxL_FLITS_G2.NCB_NONDATA",
"PerPkg": "1",
@@ -1499,6 +1670,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 2; Non-Coherent standard Tx Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_Q_TxL_FLITS_G2.NCS",
"PerPkg": "1",
@@ -1508,6 +1680,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 2; Non-Data Response Tx Flits - AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_Q_TxL_FLITS_G2.NDR_AD",
"PerPkg": "1",
@@ -1517,6 +1690,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 2; Non-Data Response Tx Flits - AK",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_Q_TxL_FLITS_G2.NDR_AK",
"PerPkg": "1",
@@ -1526,6 +1700,7 @@
},
{
"BriefDescription": "Tx Flit Buffer Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_Q_TxL_INSERTS",
"PerPkg": "1",
@@ -1534,6 +1709,7 @@
},
{
"BriefDescription": "Tx Flit Buffer Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_Q_TxL_OCCUPANCY",
"PerPkg": "1",
@@ -1542,6 +1718,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - HOM; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_Q_TxR_AD_HOM_CREDIT_ACQUIRED.VN0",
"PerPkg": "1",
@@ -1551,6 +1728,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - HOM; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_Q_TxR_AD_HOM_CREDIT_ACQUIRED.VN1",
"PerPkg": "1",
@@ -1560,6 +1738,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AD HOM; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_Q_TxR_AD_HOM_CREDIT_OCCUPANCY.VN0",
"PerPkg": "1",
@@ -1569,6 +1748,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AD HOM; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_Q_TxR_AD_HOM_CREDIT_OCCUPANCY.VN1",
"PerPkg": "1",
@@ -1578,6 +1758,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AD NDR; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_Q_TxR_AD_NDR_CREDIT_ACQUIRED.VN0",
"PerPkg": "1",
@@ -1587,6 +1768,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AD NDR; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_Q_TxR_AD_NDR_CREDIT_ACQUIRED.VN1",
"PerPkg": "1",
@@ -1596,6 +1778,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AD NDR; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_Q_TxR_AD_NDR_CREDIT_OCCUPANCY.VN0",
"PerPkg": "1",
@@ -1605,6 +1788,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AD NDR; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_Q_TxR_AD_NDR_CREDIT_OCCUPANCY.VN1",
"PerPkg": "1",
@@ -1614,6 +1798,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - SNP; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_Q_TxR_AD_SNP_CREDIT_ACQUIRED.VN0",
"PerPkg": "1",
@@ -1623,6 +1808,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - SNP; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_Q_TxR_AD_SNP_CREDIT_ACQUIRED.VN1",
"PerPkg": "1",
@@ -1632,6 +1818,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AD SNP; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_Q_TxR_AD_SNP_CREDIT_OCCUPANCY.VN0",
"PerPkg": "1",
@@ -1641,6 +1828,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AD SNP; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_Q_TxR_AD_SNP_CREDIT_OCCUPANCY.VN1",
"PerPkg": "1",
@@ -1650,6 +1838,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AK NDR",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_Q_TxR_AK_NDR_CREDIT_ACQUIRED",
"PerPkg": "1",
@@ -1658,6 +1847,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AK NDR",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_Q_TxR_AK_NDR_CREDIT_OCCUPANCY",
"PerPkg": "1",
@@ -1666,6 +1856,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - DRS; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_Q_TxR_BL_DRS_CREDIT_ACQUIRED.VN0",
"PerPkg": "1",
@@ -1675,6 +1866,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - DRS; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_Q_TxR_BL_DRS_CREDIT_ACQUIRED.VN1",
"PerPkg": "1",
@@ -1684,6 +1876,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - DRS; for Shared VN",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_Q_TxR_BL_DRS_CREDIT_ACQUIRED.VN_SHR",
"PerPkg": "1",
@@ -1693,6 +1886,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - BL DRS; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_Q_TxR_BL_DRS_CREDIT_OCCUPANCY.VN0",
"PerPkg": "1",
@@ -1702,6 +1896,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - BL DRS; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_Q_TxR_BL_DRS_CREDIT_OCCUPANCY.VN1",
"PerPkg": "1",
@@ -1711,6 +1906,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - BL DRS; for Shared VN",
+ "Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_Q_TxR_BL_DRS_CREDIT_OCCUPANCY.VN_SHR",
"PerPkg": "1",
@@ -1720,6 +1916,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - NCB; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_Q_TxR_BL_NCB_CREDIT_ACQUIRED.VN0",
"PerPkg": "1",
@@ -1729,6 +1926,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - NCB; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_Q_TxR_BL_NCB_CREDIT_ACQUIRED.VN1",
"PerPkg": "1",
@@ -1738,6 +1936,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - BL NCB; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_Q_TxR_BL_NCB_CREDIT_OCCUPANCY.VN0",
"PerPkg": "1",
@@ -1747,6 +1946,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - BL NCB; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_Q_TxR_BL_NCB_CREDIT_OCCUPANCY.VN1",
"PerPkg": "1",
@@ -1756,6 +1956,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - NCS; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_Q_TxR_BL_NCS_CREDIT_ACQUIRED.VN0",
"PerPkg": "1",
@@ -1765,6 +1966,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - NCS; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_Q_TxR_BL_NCS_CREDIT_ACQUIRED.VN1",
"PerPkg": "1",
@@ -1774,6 +1976,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - BL NCS; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_Q_TxR_BL_NCS_CREDIT_OCCUPANCY.VN0",
"PerPkg": "1",
@@ -1783,6 +1986,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - BL NCS; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_Q_TxR_BL_NCS_CREDIT_OCCUPANCY.VN1",
"PerPkg": "1",
@@ -1792,6 +1996,7 @@
},
{
"BriefDescription": "VNA Credits Returned",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_Q_VNA_CREDIT_RETURNS",
"PerPkg": "1",
@@ -1800,6 +2005,7 @@
},
{
"BriefDescription": "VNA Credits Pending Return - Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_Q_VNA_CREDIT_RETURN_OCCUPANCY",
"PerPkg": "1",
@@ -1808,6 +2014,7 @@
},
{
"BriefDescription": "Number of uclks in domain",
+ "Counter": "0,1,2",
"EventCode": "0x1",
"EventName": "UNC_R3_CLOCKTICKS",
"PerPkg": "1",
@@ -1816,6 +2023,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x1F",
"EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO10",
"PerPkg": "1",
@@ -1825,6 +2033,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x1F",
"EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO11",
"PerPkg": "1",
@@ -1834,6 +2043,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x1F",
"EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO12",
"PerPkg": "1",
@@ -1843,6 +2053,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x1F",
"EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO13",
"PerPkg": "1",
@@ -1852,6 +2063,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x1F",
"EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO14_16",
"PerPkg": "1",
@@ -1861,6 +2073,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x1F",
"EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO8",
"PerPkg": "1",
@@ -1870,6 +2083,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x1F",
"EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO9",
"PerPkg": "1",
@@ -1879,6 +2093,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x1F",
"EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO_15_17",
"PerPkg": "1",
@@ -1888,6 +2103,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO0",
"PerPkg": "1",
@@ -1897,6 +2113,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO1",
"PerPkg": "1",
@@ -1906,6 +2123,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO2",
"PerPkg": "1",
@@ -1915,6 +2133,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO3",
"PerPkg": "1",
@@ -1924,6 +2143,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO4",
"PerPkg": "1",
@@ -1933,6 +2153,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO5",
"PerPkg": "1",
@@ -1942,6 +2163,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO6",
"PerPkg": "1",
@@ -1951,6 +2173,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO7",
"PerPkg": "1",
@@ -1960,6 +2183,7 @@
},
{
"BriefDescription": "HA/R2 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2D",
"EventName": "UNC_R3_HA_R2_BL_CREDITS_EMPTY.HA0",
"PerPkg": "1",
@@ -1969,6 +2193,7 @@
},
{
"BriefDescription": "HA/R2 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2D",
"EventName": "UNC_R3_HA_R2_BL_CREDITS_EMPTY.HA1",
"PerPkg": "1",
@@ -1978,6 +2203,7 @@
},
{
"BriefDescription": "HA/R2 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2D",
"EventName": "UNC_R3_HA_R2_BL_CREDITS_EMPTY.R2_NCB",
"PerPkg": "1",
@@ -1987,6 +2213,7 @@
},
{
"BriefDescription": "HA/R2 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2D",
"EventName": "UNC_R3_HA_R2_BL_CREDITS_EMPTY.R2_NCS",
"PerPkg": "1",
@@ -1996,6 +2223,7 @@
},
{
"BriefDescription": "IOT Backpressure",
+ "Counter": "0,1,2",
"EventCode": "0xB",
"EventName": "UNC_R3_IOT_BACKPRESSURE.HUB",
"PerPkg": "1",
@@ -2004,6 +2232,7 @@
},
{
"BriefDescription": "IOT Backpressure",
+ "Counter": "0,1,2",
"EventCode": "0xB",
"EventName": "UNC_R3_IOT_BACKPRESSURE.SAT",
"PerPkg": "1",
@@ -2012,6 +2241,7 @@
},
{
"BriefDescription": "IOT Common Trigger Sequencer - Hi",
+ "Counter": "0,1,2",
"EventCode": "0xD",
"EventName": "UNC_R3_IOT_CTS_HI.CTS2",
"PerPkg": "1",
@@ -2021,6 +2251,7 @@
},
{
"BriefDescription": "IOT Common Trigger Sequencer - Hi",
+ "Counter": "0,1,2",
"EventCode": "0xD",
"EventName": "UNC_R3_IOT_CTS_HI.CTS3",
"PerPkg": "1",
@@ -2030,6 +2261,7 @@
},
{
"BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "Counter": "0,1,2",
"EventCode": "0xC",
"EventName": "UNC_R3_IOT_CTS_LO.CTS0",
"PerPkg": "1",
@@ -2039,6 +2271,7 @@
},
{
"BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "Counter": "0,1,2",
"EventCode": "0xC",
"EventName": "UNC_R3_IOT_CTS_LO.CTS1",
"PerPkg": "1",
@@ -2048,6 +2281,7 @@
},
{
"BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x20",
"EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN0_HOM",
"PerPkg": "1",
@@ -2057,6 +2291,7 @@
},
{
"BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x20",
"EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN0_NDR",
"PerPkg": "1",
@@ -2066,6 +2301,7 @@
},
{
"BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x20",
"EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN0_SNP",
"PerPkg": "1",
@@ -2075,6 +2311,7 @@
},
{
"BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x20",
"EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN1_HOM",
"PerPkg": "1",
@@ -2084,6 +2321,7 @@
},
{
"BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x20",
"EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN1_NDR",
"PerPkg": "1",
@@ -2093,6 +2331,7 @@
},
{
"BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x20",
"EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN1_SNP",
"PerPkg": "1",
@@ -2102,6 +2341,7 @@
},
{
"BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x20",
"EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VNA",
"PerPkg": "1",
@@ -2111,6 +2351,7 @@
},
{
"BriefDescription": "QPI0 BL Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x21",
"EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VN1_HOM",
"PerPkg": "1",
@@ -2120,6 +2361,7 @@
},
{
"BriefDescription": "QPI0 BL Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x21",
"EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VN1_NDR",
"PerPkg": "1",
@@ -2129,6 +2371,7 @@
},
{
"BriefDescription": "QPI0 BL Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x21",
"EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VN1_SNP",
"PerPkg": "1",
@@ -2138,6 +2381,7 @@
},
{
"BriefDescription": "QPI0 BL Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x21",
"EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VNA",
"PerPkg": "1",
@@ -2147,6 +2391,7 @@
},
{
"BriefDescription": "QPI1 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2E",
"EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VN1_HOM",
"PerPkg": "1",
@@ -2156,6 +2401,7 @@
},
{
"BriefDescription": "QPI1 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2E",
"EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VN1_NDR",
"PerPkg": "1",
@@ -2165,6 +2411,7 @@
},
{
"BriefDescription": "QPI1 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2E",
"EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VN1_SNP",
"PerPkg": "1",
@@ -2174,6 +2421,7 @@
},
{
"BriefDescription": "QPI1 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2E",
"EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VNA",
"PerPkg": "1",
@@ -2183,6 +2431,7 @@
},
{
"BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2F",
"EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN0_HOM",
"PerPkg": "1",
@@ -2192,6 +2441,7 @@
},
{
"BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2F",
"EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN0_NDR",
"PerPkg": "1",
@@ -2201,6 +2451,7 @@
},
{
"BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2F",
"EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN0_SNP",
"PerPkg": "1",
@@ -2210,6 +2461,7 @@
},
{
"BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2F",
"EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN1_HOM",
"PerPkg": "1",
@@ -2219,6 +2471,7 @@
},
{
"BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2F",
"EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN1_NDR",
"PerPkg": "1",
@@ -2228,6 +2481,7 @@
},
{
"BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2F",
"EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN1_SNP",
"PerPkg": "1",
@@ -2237,6 +2491,7 @@
},
{
"BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2F",
"EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VNA",
"PerPkg": "1",
@@ -2246,6 +2501,7 @@
},
{
"BriefDescription": "R3 AD Ring in Use; All",
+ "Counter": "0,1,2",
"EventCode": "0x7",
"EventName": "UNC_R3_RING_AD_USED.ALL",
"PerPkg": "1",
@@ -2255,6 +2511,7 @@
},
{
"BriefDescription": "R3 AD Ring in Use; Counterclockwise",
+ "Counter": "0,1,2",
"EventCode": "0x7",
"EventName": "UNC_R3_RING_AD_USED.CCW",
"PerPkg": "1",
@@ -2264,6 +2521,7 @@
},
{
"BriefDescription": "R3 AD Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2",
"EventCode": "0x7",
"EventName": "UNC_R3_RING_AD_USED.CCW_EVEN",
"PerPkg": "1",
@@ -2273,6 +2531,7 @@
},
{
"BriefDescription": "R3 AD Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2",
"EventCode": "0x7",
"EventName": "UNC_R3_RING_AD_USED.CCW_ODD",
"PerPkg": "1",
@@ -2282,6 +2541,7 @@
},
{
"BriefDescription": "R3 AD Ring in Use; Clockwise",
+ "Counter": "0,1,2",
"EventCode": "0x7",
"EventName": "UNC_R3_RING_AD_USED.CW",
"PerPkg": "1",
@@ -2291,6 +2551,7 @@
},
{
"BriefDescription": "R3 AD Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2",
"EventCode": "0x7",
"EventName": "UNC_R3_RING_AD_USED.CW_EVEN",
"PerPkg": "1",
@@ -2300,6 +2561,7 @@
},
{
"BriefDescription": "R3 AD Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2",
"EventCode": "0x7",
"EventName": "UNC_R3_RING_AD_USED.CW_ODD",
"PerPkg": "1",
@@ -2309,6 +2571,7 @@
},
{
"BriefDescription": "R3 AK Ring in Use; All",
+ "Counter": "0,1,2",
"EventCode": "0x8",
"EventName": "UNC_R3_RING_AK_USED.ALL",
"PerPkg": "1",
@@ -2318,6 +2581,7 @@
},
{
"BriefDescription": "R3 AK Ring in Use; Counterclockwise",
+ "Counter": "0,1,2",
"EventCode": "0x8",
"EventName": "UNC_R3_RING_AK_USED.CCW",
"PerPkg": "1",
@@ -2327,6 +2591,7 @@
},
{
"BriefDescription": "R3 AK Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2",
"EventCode": "0x8",
"EventName": "UNC_R3_RING_AK_USED.CCW_EVEN",
"PerPkg": "1",
@@ -2336,6 +2601,7 @@
},
{
"BriefDescription": "R3 AK Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2",
"EventCode": "0x8",
"EventName": "UNC_R3_RING_AK_USED.CCW_ODD",
"PerPkg": "1",
@@ -2345,6 +2611,7 @@
},
{
"BriefDescription": "R3 AK Ring in Use; Clockwise",
+ "Counter": "0,1,2",
"EventCode": "0x8",
"EventName": "UNC_R3_RING_AK_USED.CW",
"PerPkg": "1",
@@ -2354,6 +2621,7 @@
},
{
"BriefDescription": "R3 AK Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2",
"EventCode": "0x8",
"EventName": "UNC_R3_RING_AK_USED.CW_EVEN",
"PerPkg": "1",
@@ -2363,6 +2631,7 @@
},
{
"BriefDescription": "R3 AK Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2",
"EventCode": "0x8",
"EventName": "UNC_R3_RING_AK_USED.CW_ODD",
"PerPkg": "1",
@@ -2372,6 +2641,7 @@
},
{
"BriefDescription": "R3 BL Ring in Use; All",
+ "Counter": "0,1,2",
"EventCode": "0x9",
"EventName": "UNC_R3_RING_BL_USED.ALL",
"PerPkg": "1",
@@ -2381,6 +2651,7 @@
},
{
"BriefDescription": "R3 BL Ring in Use; Counterclockwise",
+ "Counter": "0,1,2",
"EventCode": "0x9",
"EventName": "UNC_R3_RING_BL_USED.CCW",
"PerPkg": "1",
@@ -2390,6 +2661,7 @@
},
{
"BriefDescription": "R3 BL Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2",
"EventCode": "0x9",
"EventName": "UNC_R3_RING_BL_USED.CCW_EVEN",
"PerPkg": "1",
@@ -2399,6 +2671,7 @@
},
{
"BriefDescription": "R3 BL Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2",
"EventCode": "0x9",
"EventName": "UNC_R3_RING_BL_USED.CCW_ODD",
"PerPkg": "1",
@@ -2408,6 +2681,7 @@
},
{
"BriefDescription": "R3 BL Ring in Use; Clockwise",
+ "Counter": "0,1,2",
"EventCode": "0x9",
"EventName": "UNC_R3_RING_BL_USED.CW",
"PerPkg": "1",
@@ -2417,6 +2691,7 @@
},
{
"BriefDescription": "R3 BL Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2",
"EventCode": "0x9",
"EventName": "UNC_R3_RING_BL_USED.CW_EVEN",
"PerPkg": "1",
@@ -2426,6 +2701,7 @@
},
{
"BriefDescription": "R3 BL Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2",
"EventCode": "0x9",
"EventName": "UNC_R3_RING_BL_USED.CW_ODD",
"PerPkg": "1",
@@ -2435,6 +2711,7 @@
},
{
"BriefDescription": "R3 IV Ring in Use; Any",
+ "Counter": "0,1,2",
"EventCode": "0xA",
"EventName": "UNC_R3_RING_IV_USED.ANY",
"PerPkg": "1",
@@ -2444,6 +2721,7 @@
},
{
"BriefDescription": "R3 IV Ring in Use; Clockwise",
+ "Counter": "0,1,2",
"EventCode": "0xA",
"EventName": "UNC_R3_RING_IV_USED.CW",
"PerPkg": "1",
@@ -2453,6 +2731,7 @@
},
{
"BriefDescription": "Ring Stop Starved; AK",
+ "Counter": "0,1,2",
"EventCode": "0xE",
"EventName": "UNC_R3_RING_SINK_STARVED.AK",
"PerPkg": "1",
@@ -2462,6 +2741,7 @@
},
{
"BriefDescription": "Ingress Cycles Not Empty; HOM",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_R3_RxR_CYCLES_NE.HOM",
"PerPkg": "1",
@@ -2471,6 +2751,7 @@
},
{
"BriefDescription": "Ingress Cycles Not Empty; NDR",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_R3_RxR_CYCLES_NE.NDR",
"PerPkg": "1",
@@ -2480,6 +2761,7 @@
},
{
"BriefDescription": "Ingress Cycles Not Empty; SNP",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_R3_RxR_CYCLES_NE.SNP",
"PerPkg": "1",
@@ -2489,6 +2771,7 @@
},
{
"BriefDescription": "VN1 Ingress Cycles Not Empty; DRS",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_R3_RxR_CYCLES_NE_VN1.DRS",
"PerPkg": "1",
@@ -2498,6 +2781,7 @@
},
{
"BriefDescription": "VN1 Ingress Cycles Not Empty; HOM",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_R3_RxR_CYCLES_NE_VN1.HOM",
"PerPkg": "1",
@@ -2507,6 +2791,7 @@
},
{
"BriefDescription": "VN1 Ingress Cycles Not Empty; NCB",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_R3_RxR_CYCLES_NE_VN1.NCB",
"PerPkg": "1",
@@ -2516,6 +2801,7 @@
},
{
"BriefDescription": "VN1 Ingress Cycles Not Empty; NCS",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_R3_RxR_CYCLES_NE_VN1.NCS",
"PerPkg": "1",
@@ -2525,6 +2811,7 @@
},
{
"BriefDescription": "VN1 Ingress Cycles Not Empty; NDR",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_R3_RxR_CYCLES_NE_VN1.NDR",
"PerPkg": "1",
@@ -2534,6 +2821,7 @@
},
{
"BriefDescription": "VN1 Ingress Cycles Not Empty; SNP",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_R3_RxR_CYCLES_NE_VN1.SNP",
"PerPkg": "1",
@@ -2543,6 +2831,7 @@
},
{
"BriefDescription": "Ingress Allocations; DRS",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_R3_RxR_INSERTS.DRS",
"PerPkg": "1",
@@ -2552,6 +2841,7 @@
},
{
"BriefDescription": "Ingress Allocations; HOM",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_R3_RxR_INSERTS.HOM",
"PerPkg": "1",
@@ -2561,6 +2851,7 @@
},
{
"BriefDescription": "Ingress Allocations; NCB",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_R3_RxR_INSERTS.NCB",
"PerPkg": "1",
@@ -2570,6 +2861,7 @@
},
{
"BriefDescription": "Ingress Allocations; NCS",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_R3_RxR_INSERTS.NCS",
"PerPkg": "1",
@@ -2579,6 +2871,7 @@
},
{
"BriefDescription": "Ingress Allocations; NDR",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_R3_RxR_INSERTS.NDR",
"PerPkg": "1",
@@ -2588,6 +2881,7 @@
},
{
"BriefDescription": "Ingress Allocations; SNP",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_R3_RxR_INSERTS.SNP",
"PerPkg": "1",
@@ -2597,6 +2891,7 @@
},
{
"BriefDescription": "VN1 Ingress Allocations; DRS",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_R3_RxR_INSERTS_VN1.DRS",
"PerPkg": "1",
@@ -2606,6 +2901,7 @@
},
{
"BriefDescription": "VN1 Ingress Allocations; HOM",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_R3_RxR_INSERTS_VN1.HOM",
"PerPkg": "1",
@@ -2615,6 +2911,7 @@
},
{
"BriefDescription": "VN1 Ingress Allocations; NCB",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_R3_RxR_INSERTS_VN1.NCB",
"PerPkg": "1",
@@ -2624,6 +2921,7 @@
},
{
"BriefDescription": "VN1 Ingress Allocations; NCS",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_R3_RxR_INSERTS_VN1.NCS",
"PerPkg": "1",
@@ -2633,6 +2931,7 @@
},
{
"BriefDescription": "VN1 Ingress Allocations; NDR",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_R3_RxR_INSERTS_VN1.NDR",
"PerPkg": "1",
@@ -2642,6 +2941,7 @@
},
{
"BriefDescription": "VN1 Ingress Allocations; SNP",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_R3_RxR_INSERTS_VN1.SNP",
"PerPkg": "1",
@@ -2651,6 +2951,7 @@
},
{
"BriefDescription": "VN1 Ingress Occupancy Accumulator; DRS",
+ "Counter": "0",
"EventCode": "0x13",
"EventName": "UNC_R3_RxR_OCCUPANCY_VN1.DRS",
"PerPkg": "1",
@@ -2660,6 +2961,7 @@
},
{
"BriefDescription": "VN1 Ingress Occupancy Accumulator; HOM",
+ "Counter": "0",
"EventCode": "0x13",
"EventName": "UNC_R3_RxR_OCCUPANCY_VN1.HOM",
"PerPkg": "1",
@@ -2669,6 +2971,7 @@
},
{
"BriefDescription": "VN1 Ingress Occupancy Accumulator; NCB",
+ "Counter": "0",
"EventCode": "0x13",
"EventName": "UNC_R3_RxR_OCCUPANCY_VN1.NCB",
"PerPkg": "1",
@@ -2678,6 +2981,7 @@
},
{
"BriefDescription": "VN1 Ingress Occupancy Accumulator; NCS",
+ "Counter": "0",
"EventCode": "0x13",
"EventName": "UNC_R3_RxR_OCCUPANCY_VN1.NCS",
"PerPkg": "1",
@@ -2687,6 +2991,7 @@
},
{
"BriefDescription": "VN1 Ingress Occupancy Accumulator; NDR",
+ "Counter": "0",
"EventCode": "0x13",
"EventName": "UNC_R3_RxR_OCCUPANCY_VN1.NDR",
"PerPkg": "1",
@@ -2696,6 +3001,7 @@
},
{
"BriefDescription": "VN1 Ingress Occupancy Accumulator; SNP",
+ "Counter": "0",
"EventCode": "0x13",
"EventName": "UNC_R3_RxR_OCCUPANCY_VN1.SNP",
"PerPkg": "1",
@@ -2705,6 +3011,7 @@
},
{
"BriefDescription": "SBo0 Credits Acquired; For AD Ring",
+ "Counter": "0,1",
"EventCode": "0x28",
"EventName": "UNC_R3_SBO0_CREDITS_ACQUIRED.AD",
"PerPkg": "1",
@@ -2714,6 +3021,7 @@
},
{
"BriefDescription": "SBo0 Credits Acquired; For BL Ring",
+ "Counter": "0,1",
"EventCode": "0x28",
"EventName": "UNC_R3_SBO0_CREDITS_ACQUIRED.BL",
"PerPkg": "1",
@@ -2723,6 +3031,7 @@
},
{
"BriefDescription": "SBo0 Credits Occupancy; For AD Ring",
+ "Counter": "0",
"EventCode": "0x2A",
"EventName": "UNC_R3_SBO0_CREDIT_OCCUPANCY.AD",
"PerPkg": "1",
@@ -2732,6 +3041,7 @@
},
{
"BriefDescription": "SBo0 Credits Occupancy; For BL Ring",
+ "Counter": "0",
"EventCode": "0x2A",
"EventName": "UNC_R3_SBO0_CREDIT_OCCUPANCY.BL",
"PerPkg": "1",
@@ -2741,6 +3051,7 @@
},
{
"BriefDescription": "SBo1 Credits Acquired; For AD Ring",
+ "Counter": "0,1",
"EventCode": "0x29",
"EventName": "UNC_R3_SBO1_CREDITS_ACQUIRED.AD",
"PerPkg": "1",
@@ -2750,6 +3061,7 @@
},
{
"BriefDescription": "SBo1 Credits Acquired; For BL Ring",
+ "Counter": "0,1",
"EventCode": "0x29",
"EventName": "UNC_R3_SBO1_CREDITS_ACQUIRED.BL",
"PerPkg": "1",
@@ -2759,6 +3071,7 @@
},
{
"BriefDescription": "SBo1 Credits Occupancy; For AD Ring",
+ "Counter": "0",
"EventCode": "0x2B",
"EventName": "UNC_R3_SBO1_CREDIT_OCCUPANCY.AD",
"PerPkg": "1",
@@ -2768,6 +3081,7 @@
},
{
"BriefDescription": "SBo1 Credits Occupancy; For BL Ring",
+ "Counter": "0",
"EventCode": "0x2B",
"EventName": "UNC_R3_SBO1_CREDIT_OCCUPANCY.BL",
"PerPkg": "1",
@@ -2777,6 +3091,7 @@
},
{
"BriefDescription": "Stall on No Sbo Credits; For SBo0, AD Ring",
+ "Counter": "0,1",
"EventCode": "0x2C",
"EventName": "UNC_R3_STALL_NO_SBO_CREDIT.SBO0_AD",
"PerPkg": "1",
@@ -2786,6 +3101,7 @@
},
{
"BriefDescription": "Stall on No Sbo Credits; For SBo0, BL Ring",
+ "Counter": "0,1",
"EventCode": "0x2C",
"EventName": "UNC_R3_STALL_NO_SBO_CREDIT.SBO0_BL",
"PerPkg": "1",
@@ -2795,6 +3111,7 @@
},
{
"BriefDescription": "Stall on No Sbo Credits; For SBo1, AD Ring",
+ "Counter": "0,1",
"EventCode": "0x2C",
"EventName": "UNC_R3_STALL_NO_SBO_CREDIT.SBO1_AD",
"PerPkg": "1",
@@ -2804,6 +3121,7 @@
},
{
"BriefDescription": "Stall on No Sbo Credits; For SBo1, BL Ring",
+ "Counter": "0,1",
"EventCode": "0x2C",
"EventName": "UNC_R3_STALL_NO_SBO_CREDIT.SBO1_BL",
"PerPkg": "1",
@@ -2813,6 +3131,7 @@
},
{
"BriefDescription": "Egress CCW NACK; AD CCW",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R3_TxR_NACK.DN_AD",
"PerPkg": "1",
@@ -2822,6 +3141,7 @@
},
{
"BriefDescription": "Egress CCW NACK; AK CCW",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R3_TxR_NACK.DN_AK",
"PerPkg": "1",
@@ -2831,6 +3151,7 @@
},
{
"BriefDescription": "Egress CCW NACK; BL CCW",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R3_TxR_NACK.DN_BL",
"PerPkg": "1",
@@ -2840,6 +3161,7 @@
},
{
"BriefDescription": "Egress CCW NACK; AK CCW",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R3_TxR_NACK.UP_AD",
"PerPkg": "1",
@@ -2849,6 +3171,7 @@
},
{
"BriefDescription": "Egress CCW NACK; BL CW",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R3_TxR_NACK.UP_AK",
"PerPkg": "1",
@@ -2858,6 +3181,7 @@
},
{
"BriefDescription": "Egress CCW NACK; BL CCW",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R3_TxR_NACK.UP_BL",
"PerPkg": "1",
@@ -2867,6 +3191,7 @@
},
{
"BriefDescription": "VN0 Credit Acquisition Failed on DRS; DRS Message Class",
+ "Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_R3_VN0_CREDITS_REJECT.DRS",
"PerPkg": "1",
@@ -2876,6 +3201,7 @@
},
{
"BriefDescription": "VN0 Credit Acquisition Failed on DRS; HOM Message Class",
+ "Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_R3_VN0_CREDITS_REJECT.HOM",
"PerPkg": "1",
@@ -2885,6 +3211,7 @@
},
{
"BriefDescription": "VN0 Credit Acquisition Failed on DRS; NCB Message Class",
+ "Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_R3_VN0_CREDITS_REJECT.NCB",
"PerPkg": "1",
@@ -2894,6 +3221,7 @@
},
{
"BriefDescription": "VN0 Credit Acquisition Failed on DRS; NCS Message Class",
+ "Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_R3_VN0_CREDITS_REJECT.NCS",
"PerPkg": "1",
@@ -2903,6 +3231,7 @@
},
{
"BriefDescription": "VN0 Credit Acquisition Failed on DRS; NDR Message Class",
+ "Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_R3_VN0_CREDITS_REJECT.NDR",
"PerPkg": "1",
@@ -2912,6 +3241,7 @@
},
{
"BriefDescription": "VN0 Credit Acquisition Failed on DRS; SNP Message Class",
+ "Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_R3_VN0_CREDITS_REJECT.SNP",
"PerPkg": "1",
@@ -2921,6 +3251,7 @@
},
{
"BriefDescription": "VN0 Credit Used; DRS Message Class",
+ "Counter": "0,1",
"EventCode": "0x36",
"EventName": "UNC_R3_VN0_CREDITS_USED.DRS",
"PerPkg": "1",
@@ -2930,6 +3261,7 @@
},
{
"BriefDescription": "VN0 Credit Used; HOM Message Class",
+ "Counter": "0,1",
"EventCode": "0x36",
"EventName": "UNC_R3_VN0_CREDITS_USED.HOM",
"PerPkg": "1",
@@ -2939,6 +3271,7 @@
},
{
"BriefDescription": "VN0 Credit Used; NCB Message Class",
+ "Counter": "0,1",
"EventCode": "0x36",
"EventName": "UNC_R3_VN0_CREDITS_USED.NCB",
"PerPkg": "1",
@@ -2948,6 +3281,7 @@
},
{
"BriefDescription": "VN0 Credit Used; NCS Message Class",
+ "Counter": "0,1",
"EventCode": "0x36",
"EventName": "UNC_R3_VN0_CREDITS_USED.NCS",
"PerPkg": "1",
@@ -2957,6 +3291,7 @@
},
{
"BriefDescription": "VN0 Credit Used; NDR Message Class",
+ "Counter": "0,1",
"EventCode": "0x36",
"EventName": "UNC_R3_VN0_CREDITS_USED.NDR",
"PerPkg": "1",
@@ -2966,6 +3301,7 @@
},
{
"BriefDescription": "VN0 Credit Used; SNP Message Class",
+ "Counter": "0,1",
"EventCode": "0x36",
"EventName": "UNC_R3_VN0_CREDITS_USED.SNP",
"PerPkg": "1",
@@ -2975,6 +3311,7 @@
},
{
"BriefDescription": "VN1 Credit Acquisition Failed on DRS; DRS Message Class",
+ "Counter": "0,1",
"EventCode": "0x39",
"EventName": "UNC_R3_VN1_CREDITS_REJECT.DRS",
"PerPkg": "1",
@@ -2984,6 +3321,7 @@
},
{
"BriefDescription": "VN1 Credit Acquisition Failed on DRS; HOM Message Class",
+ "Counter": "0,1",
"EventCode": "0x39",
"EventName": "UNC_R3_VN1_CREDITS_REJECT.HOM",
"PerPkg": "1",
@@ -2993,6 +3331,7 @@
},
{
"BriefDescription": "VN1 Credit Acquisition Failed on DRS; NCB Message Class",
+ "Counter": "0,1",
"EventCode": "0x39",
"EventName": "UNC_R3_VN1_CREDITS_REJECT.NCB",
"PerPkg": "1",
@@ -3002,6 +3341,7 @@
},
{
"BriefDescription": "VN1 Credit Acquisition Failed on DRS; NCS Message Class",
+ "Counter": "0,1",
"EventCode": "0x39",
"EventName": "UNC_R3_VN1_CREDITS_REJECT.NCS",
"PerPkg": "1",
@@ -3011,6 +3351,7 @@
},
{
"BriefDescription": "VN1 Credit Acquisition Failed on DRS; NDR Message Class",
+ "Counter": "0,1",
"EventCode": "0x39",
"EventName": "UNC_R3_VN1_CREDITS_REJECT.NDR",
"PerPkg": "1",
@@ -3020,6 +3361,7 @@
},
{
"BriefDescription": "VN1 Credit Acquisition Failed on DRS; SNP Message Class",
+ "Counter": "0,1",
"EventCode": "0x39",
"EventName": "UNC_R3_VN1_CREDITS_REJECT.SNP",
"PerPkg": "1",
@@ -3029,6 +3371,7 @@
},
{
"BriefDescription": "VN1 Credit Used; DRS Message Class",
+ "Counter": "0,1",
"EventCode": "0x38",
"EventName": "UNC_R3_VN1_CREDITS_USED.DRS",
"PerPkg": "1",
@@ -3038,6 +3381,7 @@
},
{
"BriefDescription": "VN1 Credit Used; HOM Message Class",
+ "Counter": "0,1",
"EventCode": "0x38",
"EventName": "UNC_R3_VN1_CREDITS_USED.HOM",
"PerPkg": "1",
@@ -3047,6 +3391,7 @@
},
{
"BriefDescription": "VN1 Credit Used; NCB Message Class",
+ "Counter": "0,1",
"EventCode": "0x38",
"EventName": "UNC_R3_VN1_CREDITS_USED.NCB",
"PerPkg": "1",
@@ -3056,6 +3401,7 @@
},
{
"BriefDescription": "VN1 Credit Used; NCS Message Class",
+ "Counter": "0,1",
"EventCode": "0x38",
"EventName": "UNC_R3_VN1_CREDITS_USED.NCS",
"PerPkg": "1",
@@ -3065,6 +3411,7 @@
},
{
"BriefDescription": "VN1 Credit Used; NDR Message Class",
+ "Counter": "0,1",
"EventCode": "0x38",
"EventName": "UNC_R3_VN1_CREDITS_USED.NDR",
"PerPkg": "1",
@@ -3074,6 +3421,7 @@
},
{
"BriefDescription": "VN1 Credit Used; SNP Message Class",
+ "Counter": "0,1",
"EventCode": "0x38",
"EventName": "UNC_R3_VN1_CREDITS_USED.SNP",
"PerPkg": "1",
@@ -3083,6 +3431,7 @@
},
{
"BriefDescription": "VNA credit Acquisitions; HOM Message Class",
+ "Counter": "0,1",
"EventCode": "0x33",
"EventName": "UNC_R3_VNA_CREDITS_ACQUIRED.AD",
"PerPkg": "1",
@@ -3092,6 +3441,7 @@
},
{
"BriefDescription": "VNA credit Acquisitions; HOM Message Class",
+ "Counter": "0,1",
"EventCode": "0x33",
"EventName": "UNC_R3_VNA_CREDITS_ACQUIRED.BL",
"PerPkg": "1",
@@ -3101,6 +3451,7 @@
},
{
"BriefDescription": "VNA Credit Reject; DRS Message Class",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_R3_VNA_CREDITS_REJECT.DRS",
"PerPkg": "1",
@@ -3110,6 +3461,7 @@
},
{
"BriefDescription": "VNA Credit Reject; HOM Message Class",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_R3_VNA_CREDITS_REJECT.HOM",
"PerPkg": "1",
@@ -3119,6 +3471,7 @@
},
{
"BriefDescription": "VNA Credit Reject; NCB Message Class",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_R3_VNA_CREDITS_REJECT.NCB",
"PerPkg": "1",
@@ -3128,6 +3481,7 @@
},
{
"BriefDescription": "VNA Credit Reject; NCS Message Class",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_R3_VNA_CREDITS_REJECT.NCS",
"PerPkg": "1",
@@ -3137,6 +3491,7 @@
},
{
"BriefDescription": "VNA Credit Reject; NDR Message Class",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_R3_VNA_CREDITS_REJECT.NDR",
"PerPkg": "1",
@@ -3146,6 +3501,7 @@
},
{
"BriefDescription": "VNA Credit Reject; SNP Message Class",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_R3_VNA_CREDITS_REJECT.SNP",
"PerPkg": "1",
@@ -3155,6 +3511,7 @@
},
{
"BriefDescription": "Bounce Control",
+ "Counter": "0,1,2,3",
"EventCode": "0xA",
"EventName": "UNC_S_BOUNCE_CONTROL",
"PerPkg": "1",
@@ -3162,12 +3519,14 @@
},
{
"BriefDescription": "Uncore Clocks",
+ "Counter": "0,1,2,3",
"EventName": "UNC_S_CLOCKTICKS",
"PerPkg": "1",
"Unit": "SBOX"
},
{
"BriefDescription": "FaST wire asserted",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_S_FAST_ASSERTED",
"PerPkg": "1",
@@ -3176,6 +3535,7 @@
},
{
"BriefDescription": "AD Ring In Use; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_S_RING_AD_USED.ALL",
"PerPkg": "1",
@@ -3185,6 +3545,7 @@
},
{
"BriefDescription": "AD Ring In Use; Down",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_S_RING_AD_USED.DOWN",
"PerPkg": "1",
@@ -3194,6 +3555,7 @@
},
{
"BriefDescription": "AD Ring In Use; Down and Event",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_S_RING_AD_USED.DOWN_EVEN",
"PerPkg": "1",
@@ -3203,6 +3565,7 @@
},
{
"BriefDescription": "AD Ring In Use; Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_S_RING_AD_USED.DOWN_ODD",
"PerPkg": "1",
@@ -3212,6 +3575,7 @@
},
{
"BriefDescription": "AD Ring In Use; Up",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_S_RING_AD_USED.UP",
"PerPkg": "1",
@@ -3221,6 +3585,7 @@
},
{
"BriefDescription": "AD Ring In Use; Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_S_RING_AD_USED.UP_EVEN",
"PerPkg": "1",
@@ -3230,6 +3595,7 @@
},
{
"BriefDescription": "AD Ring In Use; Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_S_RING_AD_USED.UP_ODD",
"PerPkg": "1",
@@ -3239,6 +3605,7 @@
},
{
"BriefDescription": "AK Ring In Use; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_S_RING_AK_USED.ALL",
"PerPkg": "1",
@@ -3248,6 +3615,7 @@
},
{
"BriefDescription": "AK Ring In Use; Down",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_S_RING_AK_USED.DOWN",
"PerPkg": "1",
@@ -3257,6 +3625,7 @@
},
{
"BriefDescription": "AK Ring In Use; Down and Event",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_S_RING_AK_USED.DOWN_EVEN",
"PerPkg": "1",
@@ -3266,6 +3635,7 @@
},
{
"BriefDescription": "AK Ring In Use; Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_S_RING_AK_USED.DOWN_ODD",
"PerPkg": "1",
@@ -3275,6 +3645,7 @@
},
{
"BriefDescription": "AK Ring In Use; Up",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_S_RING_AK_USED.UP",
"PerPkg": "1",
@@ -3284,6 +3655,7 @@
},
{
"BriefDescription": "AK Ring In Use; Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_S_RING_AK_USED.UP_EVEN",
"PerPkg": "1",
@@ -3293,6 +3665,7 @@
},
{
"BriefDescription": "AK Ring In Use; Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_S_RING_AK_USED.UP_ODD",
"PerPkg": "1",
@@ -3302,6 +3675,7 @@
},
{
"BriefDescription": "BL Ring in Use; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_S_RING_BL_USED.ALL",
"PerPkg": "1",
@@ -3311,6 +3685,7 @@
},
{
"BriefDescription": "BL Ring in Use; Down",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_S_RING_BL_USED.DOWN",
"PerPkg": "1",
@@ -3320,6 +3695,7 @@
},
{
"BriefDescription": "BL Ring in Use; Down and Event",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_S_RING_BL_USED.DOWN_EVEN",
"PerPkg": "1",
@@ -3329,6 +3705,7 @@
},
{
"BriefDescription": "BL Ring in Use; Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_S_RING_BL_USED.DOWN_ODD",
"PerPkg": "1",
@@ -3338,6 +3715,7 @@
},
{
"BriefDescription": "BL Ring in Use; Up",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_S_RING_BL_USED.UP",
"PerPkg": "1",
@@ -3347,6 +3725,7 @@
},
{
"BriefDescription": "BL Ring in Use; Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_S_RING_BL_USED.UP_EVEN",
"PerPkg": "1",
@@ -3356,6 +3735,7 @@
},
{
"BriefDescription": "BL Ring in Use; Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_S_RING_BL_USED.UP_ODD",
"PerPkg": "1",
@@ -3365,6 +3745,7 @@
},
{
"BriefDescription": "Number of LLC responses that bounced on the Ring.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_S_RING_BOUNCES.AD_CACHE",
"PerPkg": "1",
@@ -3373,6 +3754,7 @@
},
{
"BriefDescription": "Number of LLC responses that bounced on the Ring.; Acknowledgements to core",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_S_RING_BOUNCES.AK_CORE",
"PerPkg": "1",
@@ -3381,6 +3763,7 @@
},
{
"BriefDescription": "Number of LLC responses that bounced on the Ring.; Data Responses to core",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_S_RING_BOUNCES.BL_CORE",
"PerPkg": "1",
@@ -3389,6 +3772,7 @@
},
{
"BriefDescription": "Number of LLC responses that bounced on the Ring.; Snoops of processor's cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_S_RING_BOUNCES.IV_CORE",
"PerPkg": "1",
@@ -3397,6 +3781,7 @@
},
{
"BriefDescription": "BL Ring in Use; Any",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_S_RING_IV_USED.DN",
"PerPkg": "1",
@@ -3406,6 +3791,7 @@
},
{
"BriefDescription": "BL Ring in Use; Any",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_S_RING_IV_USED.UP",
"PerPkg": "1",
@@ -3415,6 +3801,7 @@
},
{
"BriefDescription": "UNC_S_RING_SINK_STARVED.AD_CACHE",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_S_RING_SINK_STARVED.AD_CACHE",
"PerPkg": "1",
@@ -3423,6 +3810,7 @@
},
{
"BriefDescription": "UNC_S_RING_SINK_STARVED.AK_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_S_RING_SINK_STARVED.AK_CORE",
"PerPkg": "1",
@@ -3431,6 +3819,7 @@
},
{
"BriefDescription": "UNC_S_RING_SINK_STARVED.BL_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_S_RING_SINK_STARVED.BL_CORE",
"PerPkg": "1",
@@ -3439,6 +3828,7 @@
},
{
"BriefDescription": "UNC_S_RING_SINK_STARVED.IV_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_S_RING_SINK_STARVED.IV_CORE",
"PerPkg": "1",
@@ -3447,6 +3837,7 @@
},
{
"BriefDescription": "Injection Starvation; AD - Bounces",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_S_RxR_BUSY_STARVED.AD_BNC",
"PerPkg": "1",
@@ -3456,6 +3847,7 @@
},
{
"BriefDescription": "Injection Starvation; AD - Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_S_RxR_BUSY_STARVED.AD_CRD",
"PerPkg": "1",
@@ -3465,6 +3857,7 @@
},
{
"BriefDescription": "Injection Starvation; BL - Bounces",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_S_RxR_BUSY_STARVED.BL_BNC",
"PerPkg": "1",
@@ -3474,6 +3867,7 @@
},
{
"BriefDescription": "Injection Starvation; BL - Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_S_RxR_BUSY_STARVED.BL_CRD",
"PerPkg": "1",
@@ -3483,6 +3877,7 @@
},
{
"BriefDescription": "Bypass; AD - Bounces",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_S_RxR_BYPASS.AD_BNC",
"PerPkg": "1",
@@ -3492,6 +3887,7 @@
},
{
"BriefDescription": "Bypass; AD - Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_S_RxR_BYPASS.AD_CRD",
"PerPkg": "1",
@@ -3501,6 +3897,7 @@
},
{
"BriefDescription": "Bypass; AK",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_S_RxR_BYPASS.AK",
"PerPkg": "1",
@@ -3510,6 +3907,7 @@
},
{
"BriefDescription": "Bypass; BL - Bounces",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_S_RxR_BYPASS.BL_BNC",
"PerPkg": "1",
@@ -3519,6 +3917,7 @@
},
{
"BriefDescription": "Bypass; BL - Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_S_RxR_BYPASS.BL_CRD",
"PerPkg": "1",
@@ -3528,6 +3927,7 @@
},
{
"BriefDescription": "Bypass; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_S_RxR_BYPASS.IV",
"PerPkg": "1",
@@ -3537,6 +3937,7 @@
},
{
"BriefDescription": "Injection Starvation; AD - Bounces",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_S_RxR_CRD_STARVED.AD_BNC",
"PerPkg": "1",
@@ -3546,6 +3947,7 @@
},
{
"BriefDescription": "Injection Starvation; AD - Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_S_RxR_CRD_STARVED.AD_CRD",
"PerPkg": "1",
@@ -3555,6 +3957,7 @@
},
{
"BriefDescription": "Injection Starvation; AK",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_S_RxR_CRD_STARVED.AK",
"PerPkg": "1",
@@ -3564,6 +3967,7 @@
},
{
"BriefDescription": "Injection Starvation; BL - Bounces",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_S_RxR_CRD_STARVED.BL_BNC",
"PerPkg": "1",
@@ -3573,6 +3977,7 @@
},
{
"BriefDescription": "Injection Starvation; BL - Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_S_RxR_CRD_STARVED.BL_CRD",
"PerPkg": "1",
@@ -3582,6 +3987,7 @@
},
{
"BriefDescription": "Injection Starvation; IVF Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_S_RxR_CRD_STARVED.IFV",
"PerPkg": "1",
@@ -3591,6 +3997,7 @@
},
{
"BriefDescription": "Injection Starvation; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_S_RxR_CRD_STARVED.IV",
"PerPkg": "1",
@@ -3600,6 +4007,7 @@
},
{
"BriefDescription": "Ingress Allocations; AD - Bounces",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_S_RxR_INSERTS.AD_BNC",
"PerPkg": "1",
@@ -3609,6 +4017,7 @@
},
{
"BriefDescription": "Ingress Allocations; AD - Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_S_RxR_INSERTS.AD_CRD",
"PerPkg": "1",
@@ -3618,6 +4027,7 @@
},
{
"BriefDescription": "Ingress Allocations; AK",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_S_RxR_INSERTS.AK",
"PerPkg": "1",
@@ -3627,6 +4037,7 @@
},
{
"BriefDescription": "Ingress Allocations; BL - Bounces",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_S_RxR_INSERTS.BL_BNC",
"PerPkg": "1",
@@ -3636,6 +4047,7 @@
},
{
"BriefDescription": "Ingress Allocations; BL - Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_S_RxR_INSERTS.BL_CRD",
"PerPkg": "1",
@@ -3645,6 +4057,7 @@
},
{
"BriefDescription": "Ingress Allocations; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_S_RxR_INSERTS.IV",
"PerPkg": "1",
@@ -3654,6 +4067,7 @@
},
{
"BriefDescription": "Ingress Occupancy; AD - Bounces",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_S_RxR_OCCUPANCY.AD_BNC",
"PerPkg": "1",
@@ -3663,6 +4077,7 @@
},
{
"BriefDescription": "Ingress Occupancy; AD - Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_S_RxR_OCCUPANCY.AD_CRD",
"PerPkg": "1",
@@ -3672,6 +4087,7 @@
},
{
"BriefDescription": "Ingress Occupancy; AK",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_S_RxR_OCCUPANCY.AK",
"PerPkg": "1",
@@ -3681,6 +4097,7 @@
},
{
"BriefDescription": "Ingress Occupancy; BL - Bounces",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_S_RxR_OCCUPANCY.BL_BNC",
"PerPkg": "1",
@@ -3690,6 +4107,7 @@
},
{
"BriefDescription": "Ingress Occupancy; BL - Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_S_RxR_OCCUPANCY.BL_CRD",
"PerPkg": "1",
@@ -3699,6 +4117,7 @@
},
{
"BriefDescription": "Ingress Occupancy; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_S_RxR_OCCUPANCY.IV",
"PerPkg": "1",
@@ -3708,6 +4127,7 @@
},
{
"BriefDescription": "UNC_S_TxR_ADS_USED.AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_S_TxR_ADS_USED.AD",
"PerPkg": "1",
@@ -3716,6 +4136,7 @@
},
{
"BriefDescription": "UNC_S_TxR_ADS_USED.AK",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_S_TxR_ADS_USED.AK",
"PerPkg": "1",
@@ -3724,6 +4145,7 @@
},
{
"BriefDescription": "UNC_S_TxR_ADS_USED.BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_S_TxR_ADS_USED.BL",
"PerPkg": "1",
@@ -3732,6 +4154,7 @@
},
{
"BriefDescription": "Egress Allocations; AD - Bounces",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_S_TxR_INSERTS.AD_BNC",
"PerPkg": "1",
@@ -3741,6 +4164,7 @@
},
{
"BriefDescription": "Egress Allocations; AD - Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_S_TxR_INSERTS.AD_CRD",
"PerPkg": "1",
@@ -3750,6 +4174,7 @@
},
{
"BriefDescription": "Egress Allocations; AK",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_S_TxR_INSERTS.AK",
"PerPkg": "1",
@@ -3759,6 +4184,7 @@
},
{
"BriefDescription": "Egress Allocations; BL - Bounces",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_S_TxR_INSERTS.BL_BNC",
"PerPkg": "1",
@@ -3768,6 +4194,7 @@
},
{
"BriefDescription": "Egress Allocations; BL - Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_S_TxR_INSERTS.BL_CRD",
"PerPkg": "1",
@@ -3777,6 +4204,7 @@
},
{
"BriefDescription": "Egress Allocations; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_S_TxR_INSERTS.IV",
"PerPkg": "1",
@@ -3786,6 +4214,7 @@
},
{
"BriefDescription": "Egress Occupancy; AD - Bounces",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_S_TxR_OCCUPANCY.AD_BNC",
"PerPkg": "1",
@@ -3795,6 +4224,7 @@
},
{
"BriefDescription": "Egress Occupancy; AD - Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_S_TxR_OCCUPANCY.AD_CRD",
"PerPkg": "1",
@@ -3804,6 +4234,7 @@
},
{
"BriefDescription": "Egress Occupancy; AK",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_S_TxR_OCCUPANCY.AK",
"PerPkg": "1",
@@ -3813,6 +4244,7 @@
},
{
"BriefDescription": "Egress Occupancy; BL - Bounces",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_S_TxR_OCCUPANCY.BL_BNC",
"PerPkg": "1",
@@ -3822,6 +4254,7 @@
},
{
"BriefDescription": "Egress Occupancy; BL - Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_S_TxR_OCCUPANCY.BL_CRD",
"PerPkg": "1",
@@ -3831,6 +4264,7 @@
},
{
"BriefDescription": "Egress Occupancy; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_S_TxR_OCCUPANCY.IV",
"PerPkg": "1",
@@ -3840,6 +4274,7 @@
},
{
"BriefDescription": "Injection Starvation; Onto AD Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_S_TxR_STARVED.AD",
"PerPkg": "1",
@@ -3849,6 +4284,7 @@
},
{
"BriefDescription": "Injection Starvation; Onto AK Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_S_TxR_STARVED.AK",
"PerPkg": "1",
@@ -3858,6 +4294,7 @@
},
{
"BriefDescription": "Injection Starvation; Onto BL Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_S_TxR_STARVED.BL",
"PerPkg": "1",
@@ -3867,6 +4304,7 @@
},
{
"BriefDescription": "Injection Starvation; Onto IV Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_S_TxR_STARVED.IV",
"PerPkg": "1",
@@ -3876,6 +4314,7 @@
},
{
"BriefDescription": "Clockticks in the UBOX using a dedicated 48-bit Fixed Counter",
+ "Counter": "FIXED",
"EventCode": "0xff",
"EventName": "UNC_U_CLOCKTICKS",
"PerPkg": "1",
@@ -3883,6 +4322,7 @@
},
{
"BriefDescription": "VLW Received",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.DOORBELL_RCVD",
"PerPkg": "1",
@@ -3892,6 +4332,7 @@
},
{
"BriefDescription": "Filter Match",
+ "Counter": "0,1",
"EventCode": "0x41",
"EventName": "UNC_U_FILTER_MATCH.DISABLE",
"PerPkg": "1",
@@ -3901,6 +4342,7 @@
},
{
"BriefDescription": "Filter Match",
+ "Counter": "0,1",
"EventCode": "0x41",
"EventName": "UNC_U_FILTER_MATCH.ENABLE",
"PerPkg": "1",
@@ -3910,6 +4352,7 @@
},
{
"BriefDescription": "Filter Match",
+ "Counter": "0,1",
"EventCode": "0x41",
"EventName": "UNC_U_FILTER_MATCH.U2C_DISABLE",
"PerPkg": "1",
@@ -3919,6 +4362,7 @@
},
{
"BriefDescription": "Filter Match",
+ "Counter": "0,1",
"EventCode": "0x41",
"EventName": "UNC_U_FILTER_MATCH.U2C_ENABLE",
"PerPkg": "1",
@@ -3928,6 +4372,7 @@
},
{
"BriefDescription": "Cycles PHOLD Assert to Ack; Assert to ACK",
+ "Counter": "0,1",
"EventCode": "0x45",
"EventName": "UNC_U_PHOLD_CYCLES.ASSERT_TO_ACK",
"PerPkg": "1",
@@ -3937,6 +4382,7 @@
},
{
"BriefDescription": "RACU Request",
+ "Counter": "0,1",
"EventCode": "0x46",
"EventName": "UNC_U_RACU_REQUESTS",
"PerPkg": "1",
@@ -3945,6 +4391,7 @@
},
{
"BriefDescription": "Monitor Sent to T0; Correctable Machine Check",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.CMC",
"PerPkg": "1",
@@ -3954,6 +4401,7 @@
},
{
"BriefDescription": "Monitor Sent to T0; Livelock",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.LIVELOCK",
"PerPkg": "1",
@@ -3963,6 +4411,7 @@
},
{
"BriefDescription": "Monitor Sent to T0; LTError",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.LTERROR",
"PerPkg": "1",
@@ -3972,6 +4421,7 @@
},
{
"BriefDescription": "Monitor Sent to T0; Monitor T0",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.MONITOR_T0",
"PerPkg": "1",
@@ -3981,6 +4431,7 @@
},
{
"BriefDescription": "Monitor Sent to T0; Monitor T1",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.MONITOR_T1",
"PerPkg": "1",
@@ -3990,6 +4441,7 @@
},
{
"BriefDescription": "Monitor Sent to T0; Other",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.OTHER",
"PerPkg": "1",
@@ -3999,6 +4451,7 @@
},
{
"BriefDescription": "Monitor Sent to T0; Trap",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.TRAP",
"PerPkg": "1",
@@ -4008,6 +4461,7 @@
},
{
"BriefDescription": "Monitor Sent to T0; Uncorrectable Machine Check",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.UMC",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/uncore-io.json b/tools/perf/pmu-events/arch/x86/broadwellx/uncore-io.json
index 01e04daf03da..daef7accdbcb 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/uncore-io.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/uncore-io.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Number of uclks in domain",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_R2_CLOCKTICKS",
"PerPkg": "1",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "UNC_R2_IIO_CREDIT.ISOCH_QPI0",
+ "Counter": "0,1",
"EventCode": "0x2D",
"EventName": "UNC_R2_IIO_CREDIT.ISOCH_QPI0",
"PerPkg": "1",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "UNC_R2_IIO_CREDIT.ISOCH_QPI1",
+ "Counter": "0,1",
"EventCode": "0x2D",
"EventName": "UNC_R2_IIO_CREDIT.ISOCH_QPI1",
"PerPkg": "1",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "UNC_R2_IIO_CREDIT.PRQ_QPI0",
+ "Counter": "0,1",
"EventCode": "0x2D",
"EventName": "UNC_R2_IIO_CREDIT.PRQ_QPI0",
"PerPkg": "1",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "UNC_R2_IIO_CREDIT.PRQ_QPI1",
+ "Counter": "0,1",
"EventCode": "0x2D",
"EventName": "UNC_R2_IIO_CREDIT.PRQ_QPI1",
"PerPkg": "1",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "R2PCIe IIO Credit Acquired; DRS",
+ "Counter": "0,1",
"EventCode": "0x33",
"EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.DRS",
"PerPkg": "1",
@@ -50,6 +56,7 @@
},
{
"BriefDescription": "R2PCIe IIO Credit Acquired; NCB",
+ "Counter": "0,1",
"EventCode": "0x33",
"EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.NCB",
"PerPkg": "1",
@@ -59,6 +66,7 @@
},
{
"BriefDescription": "R2PCIe IIO Credit Acquired; NCS",
+ "Counter": "0,1",
"EventCode": "0x33",
"EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.NCS",
"PerPkg": "1",
@@ -68,6 +76,7 @@
},
{
"BriefDescription": "R2PCIe IIO Credits in Use; DRS",
+ "Counter": "0,1",
"EventCode": "0x32",
"EventName": "UNC_R2_IIO_CREDITS_USED.DRS",
"PerPkg": "1",
@@ -77,6 +86,7 @@
},
{
"BriefDescription": "R2PCIe IIO Credits in Use; NCB",
+ "Counter": "0,1",
"EventCode": "0x32",
"EventName": "UNC_R2_IIO_CREDITS_USED.NCB",
"PerPkg": "1",
@@ -86,6 +96,7 @@
},
{
"BriefDescription": "R2PCIe IIO Credits in Use; NCS",
+ "Counter": "0,1",
"EventCode": "0x32",
"EventName": "UNC_R2_IIO_CREDITS_USED.NCS",
"PerPkg": "1",
@@ -95,6 +106,7 @@
},
{
"BriefDescription": "R2 AD Ring in Use; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_R2_RING_AD_USED.ALL",
"PerPkg": "1",
@@ -104,6 +116,7 @@
},
{
"BriefDescription": "R2 AD Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_R2_RING_AD_USED.CCW",
"PerPkg": "1",
@@ -113,6 +126,7 @@
},
{
"BriefDescription": "R2 AD Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_R2_RING_AD_USED.CCW_EVEN",
"PerPkg": "1",
@@ -122,6 +136,7 @@
},
{
"BriefDescription": "R2 AD Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_R2_RING_AD_USED.CCW_ODD",
"PerPkg": "1",
@@ -131,6 +146,7 @@
},
{
"BriefDescription": "R2 AD Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_R2_RING_AD_USED.CW",
"PerPkg": "1",
@@ -140,6 +156,7 @@
},
{
"BriefDescription": "R2 AD Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_R2_RING_AD_USED.CW_EVEN",
"PerPkg": "1",
@@ -149,6 +166,7 @@
},
{
"BriefDescription": "R2 AD Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_R2_RING_AD_USED.CW_ODD",
"PerPkg": "1",
@@ -158,6 +176,7 @@
},
{
"BriefDescription": "AK Ingress Bounced; Dn",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_R2_RING_AK_BOUNCES.DN",
"PerPkg": "1",
@@ -167,6 +186,7 @@
},
{
"BriefDescription": "AK Ingress Bounced; Up",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_R2_RING_AK_BOUNCES.UP",
"PerPkg": "1",
@@ -176,6 +196,7 @@
},
{
"BriefDescription": "R2 AK Ring in Use; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_R2_RING_AK_USED.ALL",
"PerPkg": "1",
@@ -185,6 +206,7 @@
},
{
"BriefDescription": "R2 AK Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_R2_RING_AK_USED.CCW",
"PerPkg": "1",
@@ -194,6 +216,7 @@
},
{
"BriefDescription": "R2 AK Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_R2_RING_AK_USED.CCW_EVEN",
"PerPkg": "1",
@@ -203,6 +226,7 @@
},
{
"BriefDescription": "R2 AK Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_R2_RING_AK_USED.CCW_ODD",
"PerPkg": "1",
@@ -212,6 +236,7 @@
},
{
"BriefDescription": "R2 AK Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_R2_RING_AK_USED.CW",
"PerPkg": "1",
@@ -221,6 +246,7 @@
},
{
"BriefDescription": "R2 AK Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_R2_RING_AK_USED.CW_EVEN",
"PerPkg": "1",
@@ -230,6 +256,7 @@
},
{
"BriefDescription": "R2 AK Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_R2_RING_AK_USED.CW_ODD",
"PerPkg": "1",
@@ -239,6 +266,7 @@
},
{
"BriefDescription": "R2 BL Ring in Use; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_R2_RING_BL_USED.ALL",
"PerPkg": "1",
@@ -248,6 +276,7 @@
},
{
"BriefDescription": "R2 BL Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_R2_RING_BL_USED.CCW",
"PerPkg": "1",
@@ -257,6 +286,7 @@
},
{
"BriefDescription": "R2 BL Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_R2_RING_BL_USED.CCW_EVEN",
"PerPkg": "1",
@@ -266,6 +296,7 @@
},
{
"BriefDescription": "R2 BL Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_R2_RING_BL_USED.CCW_ODD",
"PerPkg": "1",
@@ -275,6 +306,7 @@
},
{
"BriefDescription": "R2 BL Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_R2_RING_BL_USED.CW",
"PerPkg": "1",
@@ -284,6 +316,7 @@
},
{
"BriefDescription": "R2 BL Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_R2_RING_BL_USED.CW_EVEN",
"PerPkg": "1",
@@ -293,6 +326,7 @@
},
{
"BriefDescription": "R2 BL Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_R2_RING_BL_USED.CW_ODD",
"PerPkg": "1",
@@ -302,6 +336,7 @@
},
{
"BriefDescription": "R2 IV Ring in Use; Any",
+ "Counter": "0,1,2,3",
"EventCode": "0xA",
"EventName": "UNC_R2_RING_IV_USED.ANY",
"PerPkg": "1",
@@ -311,6 +346,7 @@
},
{
"BriefDescription": "R2 IV Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0xA",
"EventName": "UNC_R2_RING_IV_USED.CCW",
"PerPkg": "1",
@@ -320,6 +356,7 @@
},
{
"BriefDescription": "R2 IV Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0xA",
"EventName": "UNC_R2_RING_IV_USED.CW",
"PerPkg": "1",
@@ -329,6 +366,7 @@
},
{
"BriefDescription": "Ingress Cycles Not Empty; NCB",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_R2_RxR_CYCLES_NE.NCB",
"PerPkg": "1",
@@ -338,6 +376,7 @@
},
{
"BriefDescription": "Ingress Cycles Not Empty; NCS",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_R2_RxR_CYCLES_NE.NCS",
"PerPkg": "1",
@@ -347,6 +386,7 @@
},
{
"BriefDescription": "Ingress Allocations; NCB",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_R2_RxR_INSERTS.NCB",
"PerPkg": "1",
@@ -356,6 +396,7 @@
},
{
"BriefDescription": "Ingress Allocations; NCS",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_R2_RxR_INSERTS.NCS",
"PerPkg": "1",
@@ -365,6 +406,7 @@
},
{
"BriefDescription": "Ingress Occupancy Accumulator; DRS",
+ "Counter": "0",
"EventCode": "0x13",
"EventName": "UNC_R2_RxR_OCCUPANCY.DRS",
"PerPkg": "1",
@@ -374,6 +416,7 @@
},
{
"BriefDescription": "SBo0 Credits Acquired; For AD Ring",
+ "Counter": "0,1",
"EventCode": "0x28",
"EventName": "UNC_R2_SBO0_CREDITS_ACQUIRED.AD",
"PerPkg": "1",
@@ -383,6 +426,7 @@
},
{
"BriefDescription": "SBo0 Credits Acquired; For BL Ring",
+ "Counter": "0,1",
"EventCode": "0x28",
"EventName": "UNC_R2_SBO0_CREDITS_ACQUIRED.BL",
"PerPkg": "1",
@@ -392,6 +436,7 @@
},
{
"BriefDescription": "SBo0 Credits Occupancy; For AD Ring",
+ "Counter": "0",
"EventCode": "0x2A",
"EventName": "UNC_R2_SBO0_CREDIT_OCCUPANCY.AD",
"PerPkg": "1",
@@ -401,6 +446,7 @@
},
{
"BriefDescription": "SBo0 Credits Occupancy; For BL Ring",
+ "Counter": "0",
"EventCode": "0x2A",
"EventName": "UNC_R2_SBO0_CREDIT_OCCUPANCY.BL",
"PerPkg": "1",
@@ -410,6 +456,7 @@
},
{
"BriefDescription": "Stall on No Sbo Credits; For SBo0, AD Ring",
+ "Counter": "0,1",
"EventCode": "0x2C",
"EventName": "UNC_R2_STALL_NO_SBO_CREDIT.SBO0_AD",
"PerPkg": "1",
@@ -419,6 +466,7 @@
},
{
"BriefDescription": "Stall on No Sbo Credits; For SBo0, BL Ring",
+ "Counter": "0,1",
"EventCode": "0x2C",
"EventName": "UNC_R2_STALL_NO_SBO_CREDIT.SBO0_BL",
"PerPkg": "1",
@@ -428,6 +476,7 @@
},
{
"BriefDescription": "Stall on No Sbo Credits; For SBo1, AD Ring",
+ "Counter": "0,1",
"EventCode": "0x2C",
"EventName": "UNC_R2_STALL_NO_SBO_CREDIT.SBO1_AD",
"PerPkg": "1",
@@ -437,6 +486,7 @@
},
{
"BriefDescription": "Stall on No Sbo Credits; For SBo1, BL Ring",
+ "Counter": "0,1",
"EventCode": "0x2C",
"EventName": "UNC_R2_STALL_NO_SBO_CREDIT.SBO1_BL",
"PerPkg": "1",
@@ -446,6 +496,7 @@
},
{
"BriefDescription": "Egress Cycles Full; AD",
+ "Counter": "0",
"EventCode": "0x25",
"EventName": "UNC_R2_TxR_CYCLES_FULL.AD",
"PerPkg": "1",
@@ -455,6 +506,7 @@
},
{
"BriefDescription": "Egress Cycles Full; AK",
+ "Counter": "0",
"EventCode": "0x25",
"EventName": "UNC_R2_TxR_CYCLES_FULL.AK",
"PerPkg": "1",
@@ -464,6 +516,7 @@
},
{
"BriefDescription": "Egress Cycles Full; BL",
+ "Counter": "0",
"EventCode": "0x25",
"EventName": "UNC_R2_TxR_CYCLES_FULL.BL",
"PerPkg": "1",
@@ -473,6 +526,7 @@
},
{
"BriefDescription": "Egress Cycles Not Empty; AD",
+ "Counter": "0",
"EventCode": "0x23",
"EventName": "UNC_R2_TxR_CYCLES_NE.AD",
"PerPkg": "1",
@@ -482,6 +536,7 @@
},
{
"BriefDescription": "Egress Cycles Not Empty; AK",
+ "Counter": "0",
"EventCode": "0x23",
"EventName": "UNC_R2_TxR_CYCLES_NE.AK",
"PerPkg": "1",
@@ -491,6 +546,7 @@
},
{
"BriefDescription": "Egress Cycles Not Empty; BL",
+ "Counter": "0",
"EventCode": "0x23",
"EventName": "UNC_R2_TxR_CYCLES_NE.BL",
"PerPkg": "1",
@@ -500,6 +556,7 @@
},
{
"BriefDescription": "Egress CCW NACK; AD CCW",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R2_TxR_NACK_CW.DN_AD",
"PerPkg": "1",
@@ -509,6 +566,7 @@
},
{
"BriefDescription": "Egress CCW NACK; AK CCW",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R2_TxR_NACK_CW.DN_AK",
"PerPkg": "1",
@@ -518,6 +576,7 @@
},
{
"BriefDescription": "Egress CCW NACK; BL CCW",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R2_TxR_NACK_CW.DN_BL",
"PerPkg": "1",
@@ -527,6 +586,7 @@
},
{
"BriefDescription": "Egress CCW NACK; AK CCW",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R2_TxR_NACK_CW.UP_AD",
"PerPkg": "1",
@@ -536,6 +596,7 @@
},
{
"BriefDescription": "Egress CCW NACK; BL CW",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R2_TxR_NACK_CW.UP_AK",
"PerPkg": "1",
@@ -545,6 +606,7 @@
},
{
"BriefDescription": "Egress CCW NACK; BL CCW",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R2_TxR_NACK_CW.UP_BL",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/uncore-memory.json b/tools/perf/pmu-events/arch/x86/broadwellx/uncore-memory.json
index b5a33e7a68c6..45555316f8ea 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/uncore-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "read requests to memory controller. Derived from unc_m_cas_count.rd",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "LLC_MISSES.MEM_READ",
"PerPkg": "1",
@@ -11,6 +12,7 @@
},
{
"BriefDescription": "write requests to memory controller. Derived from unc_m_cas_count.wr",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "LLC_MISSES.MEM_WRITE",
"PerPkg": "1",
@@ -21,6 +23,7 @@
},
{
"BriefDescription": "DRAM Activate Count; Activate due to Write",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_M_ACT_COUNT.BYP",
"PerPkg": "1",
@@ -30,6 +33,7 @@
},
{
"BriefDescription": "DRAM Activate Count; Activate due to Read",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_M_ACT_COUNT.RD",
"PerPkg": "1",
@@ -39,6 +43,7 @@
},
{
"BriefDescription": "DRAM Activate Count; Activate due to Write",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_M_ACT_COUNT.WR",
"PerPkg": "1",
@@ -48,6 +53,7 @@
},
{
"BriefDescription": "ACT command issued by 2 cycle bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M_BYP_CMDS.ACT",
"PerPkg": "1",
@@ -56,6 +62,7 @@
},
{
"BriefDescription": "CAS command issued by 2 cycle bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M_BYP_CMDS.CAS",
"PerPkg": "1",
@@ -64,6 +71,7 @@
},
{
"BriefDescription": "PRE command issued by 2 cycle bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M_BYP_CMDS.PRE",
"PerPkg": "1",
@@ -72,6 +80,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM WR_CAS (w/ and w/out auto-pre)",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.ALL",
"PerPkg": "1",
@@ -81,6 +90,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM Reads (RD_CAS + Underfills)",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.RD",
"PerPkg": "1",
@@ -90,6 +100,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM RD_CAS (w/ and w/out auto-pre)",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.RD_REG",
"PerPkg": "1",
@@ -99,6 +110,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Read CAS issued in RMM",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.RD_RMM",
"PerPkg": "1",
@@ -107,6 +119,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Underfill Read Issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
"PerPkg": "1",
@@ -116,6 +129,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Read CAS issued in WMM",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.RD_WMM",
"PerPkg": "1",
@@ -124,6 +138,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM WR_CAS (both Modes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.WR",
"PerPkg": "1",
@@ -133,6 +148,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Read Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.WR_RMM",
"PerPkg": "1",
@@ -142,6 +158,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Write Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.WR_WMM",
"PerPkg": "1",
@@ -151,6 +168,7 @@
},
{
"BriefDescription": "Clockticks in the Memory Controller using a dedicated 48-bit Fixed Counter",
+ "Counter": "FIXED",
"EventCode": "0xff",
"EventName": "UNC_M_CLOCKTICKS",
"PerPkg": "1",
@@ -158,18 +176,21 @@
},
{
"BriefDescription": "Clockticks in the Memory Controller using one of the programmable counters",
+ "Counter": "0,1,2,3",
"EventName": "UNC_M_CLOCKTICKS_P",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_M_CLOCKTICKS_P",
+ "Counter": "0,1,2,3",
"EventName": "UNC_M_DCLOCKTICKS",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "DRAM Precharge All Commands",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_M_DRAM_PRE_ALL",
"PerPkg": "1",
@@ -178,6 +199,7 @@
},
{
"BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_M_DRAM_REFRESH.HIGH",
"PerPkg": "1",
@@ -187,6 +209,7 @@
},
{
"BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_M_DRAM_REFRESH.PANIC",
"PerPkg": "1",
@@ -196,6 +219,7 @@
},
{
"BriefDescription": "ECC Correctable Errors",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_M_ECC_CORRECTABLE_ERRORS",
"PerPkg": "1",
@@ -204,6 +228,7 @@
},
{
"BriefDescription": "Cycles in a Major Mode; Isoch Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_M_MAJOR_MODES.ISOCH",
"PerPkg": "1",
@@ -213,6 +238,7 @@
},
{
"BriefDescription": "Cycles in a Major Mode; Partial Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_M_MAJOR_MODES.PARTIAL",
"PerPkg": "1",
@@ -222,6 +248,7 @@
},
{
"BriefDescription": "Cycles in a Major Mode; Read Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_M_MAJOR_MODES.READ",
"PerPkg": "1",
@@ -231,6 +258,7 @@
},
{
"BriefDescription": "Cycles in a Major Mode; Write Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_M_MAJOR_MODES.WRITE",
"PerPkg": "1",
@@ -240,6 +268,7 @@
},
{
"BriefDescription": "Channel DLLOFF Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M_POWER_CHANNEL_DLLOFF",
"PerPkg": "1",
@@ -248,6 +277,7 @@
},
{
"BriefDescription": "Channel PPD Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_M_POWER_CHANNEL_PPD",
"PerPkg": "1",
@@ -256,6 +286,7 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK0",
"PerPkg": "1",
@@ -265,6 +296,7 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK1",
"PerPkg": "1",
@@ -274,6 +306,7 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK2",
"PerPkg": "1",
@@ -283,6 +316,7 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK3",
"PerPkg": "1",
@@ -292,6 +326,7 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK4",
"PerPkg": "1",
@@ -301,6 +336,7 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK5",
"PerPkg": "1",
@@ -310,6 +346,7 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK6",
"PerPkg": "1",
@@ -319,6 +356,7 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK7",
"PerPkg": "1",
@@ -328,6 +366,7 @@
},
{
"BriefDescription": "Critical Throttle Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M_POWER_CRITICAL_THROTTLE_CYCLES",
"PerPkg": "1",
@@ -336,6 +375,7 @@
},
{
"BriefDescription": "UNC_M_POWER_PCU_THROTTLING",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M_POWER_PCU_THROTTLING",
"PerPkg": "1",
@@ -343,6 +383,7 @@
},
{
"BriefDescription": "Clock-Enabled Self-Refresh",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M_POWER_SELF_REFRESH",
"PerPkg": "1",
@@ -351,6 +392,7 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK0",
"PerPkg": "1",
@@ -360,6 +402,7 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK1",
"PerPkg": "1",
@@ -369,6 +412,7 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK2",
"PerPkg": "1",
@@ -378,6 +422,7 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK3",
"PerPkg": "1",
@@ -387,6 +432,7 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK4",
"PerPkg": "1",
@@ -396,6 +442,7 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK5",
"PerPkg": "1",
@@ -405,6 +452,7 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK6",
"PerPkg": "1",
@@ -414,6 +462,7 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK7",
"PerPkg": "1",
@@ -423,6 +472,7 @@
},
{
"BriefDescription": "Read Preemption Count; Read over Read Preemption",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_M_PREEMPTION.RD_PREEMPT_RD",
"PerPkg": "1",
@@ -432,6 +482,7 @@
},
{
"BriefDescription": "Read Preemption Count; Read over Write Preemption",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_M_PREEMPTION.RD_PREEMPT_WR",
"PerPkg": "1",
@@ -441,6 +492,7 @@
},
{
"BriefDescription": "DRAM Precharge commands.; Precharge due to bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.BYP",
"PerPkg": "1",
@@ -450,6 +502,7 @@
},
{
"BriefDescription": "DRAM Precharge commands.; Precharge due to timer expiration",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.PAGE_CLOSE",
"PerPkg": "1",
@@ -459,6 +512,7 @@
},
{
"BriefDescription": "DRAM Precharge commands.; Precharges due to page miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.PAGE_MISS",
"PerPkg": "1",
@@ -468,6 +522,7 @@
},
{
"BriefDescription": "DRAM Precharge commands.; Precharge due to read",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.RD",
"PerPkg": "1",
@@ -477,6 +532,7 @@
},
{
"BriefDescription": "DRAM Precharge commands.; Precharge due to write",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.WR",
"PerPkg": "1",
@@ -486,6 +542,7 @@
},
{
"BriefDescription": "Read CAS issued with HIGH priority",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M_RD_CAS_PRIO.HIGH",
"PerPkg": "1",
@@ -494,6 +551,7 @@
},
{
"BriefDescription": "Read CAS issued with LOW priority",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M_RD_CAS_PRIO.LOW",
"PerPkg": "1",
@@ -502,6 +560,7 @@
},
{
"BriefDescription": "Read CAS issued with MEDIUM priority",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M_RD_CAS_PRIO.MED",
"PerPkg": "1",
@@ -510,6 +569,7 @@
},
{
"BriefDescription": "Read CAS issued with PANIC NON ISOCH priority (starved)",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M_RD_CAS_PRIO.PANIC",
"PerPkg": "1",
@@ -518,6 +578,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.ALLBANKS",
"PerPkg": "1",
@@ -527,6 +588,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK0",
"PerPkg": "1",
@@ -535,6 +597,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK1",
"PerPkg": "1",
@@ -544,6 +607,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK10",
"PerPkg": "1",
@@ -553,6 +617,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK11",
"PerPkg": "1",
@@ -562,6 +627,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK12",
"PerPkg": "1",
@@ -571,6 +637,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK13",
"PerPkg": "1",
@@ -580,6 +647,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK14",
"PerPkg": "1",
@@ -589,6 +657,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK15",
"PerPkg": "1",
@@ -598,6 +667,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK2",
"PerPkg": "1",
@@ -607,6 +677,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK3",
"PerPkg": "1",
@@ -616,6 +687,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK4",
"PerPkg": "1",
@@ -625,6 +697,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK5",
"PerPkg": "1",
@@ -634,6 +707,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK6",
"PerPkg": "1",
@@ -643,6 +717,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK7",
"PerPkg": "1",
@@ -652,6 +727,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK8",
"PerPkg": "1",
@@ -661,6 +737,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK9",
"PerPkg": "1",
@@ -670,6 +747,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANKG0",
"PerPkg": "1",
@@ -679,6 +757,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANKG1",
"PerPkg": "1",
@@ -688,6 +767,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANKG2",
"PerPkg": "1",
@@ -697,6 +777,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANKG3",
"PerPkg": "1",
@@ -706,6 +787,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.ALLBANKS",
"PerPkg": "1",
@@ -715,6 +797,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK0",
"PerPkg": "1",
@@ -723,6 +806,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK1",
"PerPkg": "1",
@@ -732,6 +816,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK10",
"PerPkg": "1",
@@ -741,6 +826,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK11",
"PerPkg": "1",
@@ -750,6 +836,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK12",
"PerPkg": "1",
@@ -759,6 +846,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK13",
"PerPkg": "1",
@@ -768,6 +856,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK14",
"PerPkg": "1",
@@ -777,6 +866,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK15",
"PerPkg": "1",
@@ -786,6 +876,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK2",
"PerPkg": "1",
@@ -795,6 +886,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK3",
"PerPkg": "1",
@@ -804,6 +896,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK4",
"PerPkg": "1",
@@ -813,6 +906,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK5",
"PerPkg": "1",
@@ -822,6 +916,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK6",
"PerPkg": "1",
@@ -831,6 +926,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK7",
"PerPkg": "1",
@@ -840,6 +936,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK8",
"PerPkg": "1",
@@ -849,6 +946,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK9",
"PerPkg": "1",
@@ -858,6 +956,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANKG0",
"PerPkg": "1",
@@ -867,6 +966,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANKG1",
"PerPkg": "1",
@@ -876,6 +976,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANKG2",
"PerPkg": "1",
@@ -885,6 +986,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANKG3",
"PerPkg": "1",
@@ -894,6 +996,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK0",
"PerPkg": "1",
@@ -902,6 +1005,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.ALLBANKS",
"PerPkg": "1",
@@ -911,6 +1015,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK0",
"PerPkg": "1",
@@ -919,6 +1024,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK1",
"PerPkg": "1",
@@ -928,6 +1034,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK10",
"PerPkg": "1",
@@ -937,6 +1044,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK11",
"PerPkg": "1",
@@ -946,6 +1054,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK12",
"PerPkg": "1",
@@ -955,6 +1064,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK13",
"PerPkg": "1",
@@ -964,6 +1074,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK14",
"PerPkg": "1",
@@ -973,6 +1084,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK15",
"PerPkg": "1",
@@ -982,6 +1094,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK2",
"PerPkg": "1",
@@ -991,6 +1104,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK3",
"PerPkg": "1",
@@ -1000,6 +1114,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK4",
"PerPkg": "1",
@@ -1009,6 +1124,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK5",
"PerPkg": "1",
@@ -1018,6 +1134,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK6",
"PerPkg": "1",
@@ -1027,6 +1144,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK7",
"PerPkg": "1",
@@ -1036,6 +1154,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK8",
"PerPkg": "1",
@@ -1045,6 +1164,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK9",
"PerPkg": "1",
@@ -1054,6 +1174,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANKG0",
"PerPkg": "1",
@@ -1063,6 +1184,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANKG1",
"PerPkg": "1",
@@ -1072,6 +1194,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANKG2",
"PerPkg": "1",
@@ -1081,6 +1204,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANKG3",
"PerPkg": "1",
@@ -1090,6 +1214,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.ALLBANKS",
"PerPkg": "1",
@@ -1099,6 +1224,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK0",
"PerPkg": "1",
@@ -1107,6 +1233,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK1",
"PerPkg": "1",
@@ -1116,6 +1243,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK10",
"PerPkg": "1",
@@ -1125,6 +1253,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK11",
"PerPkg": "1",
@@ -1134,6 +1263,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK12",
"PerPkg": "1",
@@ -1143,6 +1273,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK13",
"PerPkg": "1",
@@ -1152,6 +1283,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK14",
"PerPkg": "1",
@@ -1161,6 +1293,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK15",
"PerPkg": "1",
@@ -1170,6 +1303,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK2",
"PerPkg": "1",
@@ -1179,6 +1313,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK3",
"PerPkg": "1",
@@ -1188,6 +1323,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK4",
"PerPkg": "1",
@@ -1197,6 +1333,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK5",
"PerPkg": "1",
@@ -1206,6 +1343,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK6",
"PerPkg": "1",
@@ -1215,6 +1353,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK7",
"PerPkg": "1",
@@ -1224,6 +1363,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK8",
"PerPkg": "1",
@@ -1233,6 +1373,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK9",
"PerPkg": "1",
@@ -1242,6 +1383,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANKG0",
"PerPkg": "1",
@@ -1251,6 +1393,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANKG1",
"PerPkg": "1",
@@ -1260,6 +1403,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANKG2",
"PerPkg": "1",
@@ -1269,6 +1413,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANKG3",
"PerPkg": "1",
@@ -1278,6 +1423,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.ALLBANKS",
"PerPkg": "1",
@@ -1287,6 +1433,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK0",
"PerPkg": "1",
@@ -1295,6 +1442,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK1",
"PerPkg": "1",
@@ -1304,6 +1452,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK10",
"PerPkg": "1",
@@ -1313,6 +1462,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK11",
"PerPkg": "1",
@@ -1322,6 +1472,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK12",
"PerPkg": "1",
@@ -1331,6 +1482,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK13",
"PerPkg": "1",
@@ -1340,6 +1492,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK14",
"PerPkg": "1",
@@ -1349,6 +1502,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK15",
"PerPkg": "1",
@@ -1358,6 +1512,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK2",
"PerPkg": "1",
@@ -1367,6 +1522,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK3",
"PerPkg": "1",
@@ -1376,6 +1532,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK4",
"PerPkg": "1",
@@ -1385,6 +1542,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK5",
"PerPkg": "1",
@@ -1394,6 +1552,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK6",
"PerPkg": "1",
@@ -1403,6 +1562,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK7",
"PerPkg": "1",
@@ -1412,6 +1572,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK8",
"PerPkg": "1",
@@ -1421,6 +1582,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK9",
"PerPkg": "1",
@@ -1430,6 +1592,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANKG0",
"PerPkg": "1",
@@ -1439,6 +1602,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANKG1",
"PerPkg": "1",
@@ -1448,6 +1612,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANKG2",
"PerPkg": "1",
@@ -1457,6 +1622,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANKG3",
"PerPkg": "1",
@@ -1466,6 +1632,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.ALLBANKS",
"PerPkg": "1",
@@ -1475,6 +1642,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK0",
"PerPkg": "1",
@@ -1483,6 +1651,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK1",
"PerPkg": "1",
@@ -1492,6 +1661,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK10",
"PerPkg": "1",
@@ -1501,6 +1671,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK11",
"PerPkg": "1",
@@ -1510,6 +1681,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK12",
"PerPkg": "1",
@@ -1519,6 +1691,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK13",
"PerPkg": "1",
@@ -1528,6 +1701,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK14",
"PerPkg": "1",
@@ -1537,6 +1711,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK15",
"PerPkg": "1",
@@ -1546,6 +1721,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK2",
"PerPkg": "1",
@@ -1555,6 +1731,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK3",
"PerPkg": "1",
@@ -1564,6 +1741,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK4",
"PerPkg": "1",
@@ -1573,6 +1751,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK5",
"PerPkg": "1",
@@ -1582,6 +1761,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK6",
"PerPkg": "1",
@@ -1591,6 +1771,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK7",
"PerPkg": "1",
@@ -1600,6 +1781,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK8",
"PerPkg": "1",
@@ -1609,6 +1791,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK9",
"PerPkg": "1",
@@ -1618,6 +1801,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANKG0",
"PerPkg": "1",
@@ -1627,6 +1811,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANKG1",
"PerPkg": "1",
@@ -1636,6 +1821,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANKG2",
"PerPkg": "1",
@@ -1645,6 +1831,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANKG3",
"PerPkg": "1",
@@ -1654,6 +1841,7 @@
},
{
"BriefDescription": "Read Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M_RPQ_CYCLES_NE",
"PerPkg": "1",
@@ -1662,6 +1850,7 @@
},
{
"BriefDescription": "Read Pending Queue Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M_RPQ_INSERTS",
"PerPkg": "1",
@@ -1670,6 +1859,7 @@
},
{
"BriefDescription": "VMSE MXB write buffer occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_M_VMSE_MXB_WR_OCCUPANCY",
"PerPkg": "1",
@@ -1677,6 +1867,7 @@
},
{
"BriefDescription": "VMSE WR PUSH issued; VMSE write PUSH issued in RMM",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M_VMSE_WR_PUSH.RMM",
"PerPkg": "1",
@@ -1685,6 +1876,7 @@
},
{
"BriefDescription": "VMSE WR PUSH issued; VMSE write PUSH issued in WMM",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M_VMSE_WR_PUSH.WMM",
"PerPkg": "1",
@@ -1693,6 +1885,7 @@
},
{
"BriefDescription": "Transition from WMM to RMM because of low threshold; Transition from WMM to RMM because of starve counter",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "UNC_M_WMM_TO_RMM.LOW_THRESH",
"PerPkg": "1",
@@ -1701,6 +1894,7 @@
},
{
"BriefDescription": "Transition from WMM to RMM because of low threshold",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "UNC_M_WMM_TO_RMM.STARVE",
"PerPkg": "1",
@@ -1709,6 +1903,7 @@
},
{
"BriefDescription": "Transition from WMM to RMM because of low threshold",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "UNC_M_WMM_TO_RMM.VMSE_RETRY",
"PerPkg": "1",
@@ -1717,6 +1912,7 @@
},
{
"BriefDescription": "Write Pending Queue Full Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M_WPQ_CYCLES_FULL",
"PerPkg": "1",
@@ -1725,6 +1921,7 @@
},
{
"BriefDescription": "Write Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M_WPQ_CYCLES_NE",
"PerPkg": "1",
@@ -1733,6 +1930,7 @@
},
{
"BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_M_WPQ_READ_HIT",
"PerPkg": "1",
@@ -1741,6 +1939,7 @@
},
{
"BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M_WPQ_WRITE_HIT",
"PerPkg": "1",
@@ -1749,6 +1948,7 @@
},
{
"BriefDescription": "Not getting the requested Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_M_WRONG_MM",
"PerPkg": "1",
@@ -1756,6 +1956,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.ALLBANKS",
"PerPkg": "1",
@@ -1765,6 +1966,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK0",
"PerPkg": "1",
@@ -1773,6 +1975,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK1",
"PerPkg": "1",
@@ -1782,6 +1985,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK10",
"PerPkg": "1",
@@ -1791,6 +1995,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK11",
"PerPkg": "1",
@@ -1800,6 +2005,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK12",
"PerPkg": "1",
@@ -1809,6 +2015,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK13",
"PerPkg": "1",
@@ -1818,6 +2025,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK14",
"PerPkg": "1",
@@ -1827,6 +2035,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK15",
"PerPkg": "1",
@@ -1836,6 +2045,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK2",
"PerPkg": "1",
@@ -1845,6 +2055,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK3",
"PerPkg": "1",
@@ -1854,6 +2065,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK4",
"PerPkg": "1",
@@ -1863,6 +2075,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK5",
"PerPkg": "1",
@@ -1872,6 +2085,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK6",
"PerPkg": "1",
@@ -1881,6 +2095,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK7",
"PerPkg": "1",
@@ -1890,6 +2105,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK8",
"PerPkg": "1",
@@ -1899,6 +2115,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK9",
"PerPkg": "1",
@@ -1908,6 +2125,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANKG0",
"PerPkg": "1",
@@ -1917,6 +2135,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANKG1",
"PerPkg": "1",
@@ -1926,6 +2145,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANKG2",
"PerPkg": "1",
@@ -1935,6 +2155,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANKG3",
"PerPkg": "1",
@@ -1944,6 +2165,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.ALLBANKS",
"PerPkg": "1",
@@ -1953,6 +2175,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK0",
"PerPkg": "1",
@@ -1961,6 +2184,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK1",
"PerPkg": "1",
@@ -1970,6 +2194,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK10",
"PerPkg": "1",
@@ -1979,6 +2204,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK11",
"PerPkg": "1",
@@ -1988,6 +2214,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK12",
"PerPkg": "1",
@@ -1997,6 +2224,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK13",
"PerPkg": "1",
@@ -2006,6 +2234,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK14",
"PerPkg": "1",
@@ -2015,6 +2244,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK15",
"PerPkg": "1",
@@ -2024,6 +2254,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK2",
"PerPkg": "1",
@@ -2033,6 +2264,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK3",
"PerPkg": "1",
@@ -2042,6 +2274,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK4",
"PerPkg": "1",
@@ -2051,6 +2284,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK5",
"PerPkg": "1",
@@ -2060,6 +2294,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK6",
"PerPkg": "1",
@@ -2069,6 +2304,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK7",
"PerPkg": "1",
@@ -2078,6 +2314,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK8",
"PerPkg": "1",
@@ -2087,6 +2324,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK9",
"PerPkg": "1",
@@ -2096,6 +2334,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANKG0",
"PerPkg": "1",
@@ -2105,6 +2344,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANKG1",
"PerPkg": "1",
@@ -2114,6 +2354,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANKG2",
"PerPkg": "1",
@@ -2123,6 +2364,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANKG3",
"PerPkg": "1",
@@ -2132,6 +2374,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.ALLBANKS",
"PerPkg": "1",
@@ -2141,6 +2384,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK0",
"PerPkg": "1",
@@ -2149,6 +2393,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK1",
"PerPkg": "1",
@@ -2158,6 +2403,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK10",
"PerPkg": "1",
@@ -2167,6 +2413,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK11",
"PerPkg": "1",
@@ -2176,6 +2423,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK12",
"PerPkg": "1",
@@ -2185,6 +2433,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK13",
"PerPkg": "1",
@@ -2194,6 +2443,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK14",
"PerPkg": "1",
@@ -2203,6 +2453,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK15",
"PerPkg": "1",
@@ -2212,6 +2463,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK2",
"PerPkg": "1",
@@ -2221,6 +2473,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK3",
"PerPkg": "1",
@@ -2230,6 +2483,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK4",
"PerPkg": "1",
@@ -2239,6 +2493,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK5",
"PerPkg": "1",
@@ -2248,6 +2503,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK6",
"PerPkg": "1",
@@ -2257,6 +2513,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK7",
"PerPkg": "1",
@@ -2266,6 +2523,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK8",
"PerPkg": "1",
@@ -2275,6 +2533,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK9",
"PerPkg": "1",
@@ -2284,6 +2543,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANKG0",
"PerPkg": "1",
@@ -2293,6 +2553,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANKG1",
"PerPkg": "1",
@@ -2302,6 +2563,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANKG2",
"PerPkg": "1",
@@ -2311,6 +2573,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANKG3",
"PerPkg": "1",
@@ -2320,6 +2583,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.ALLBANKS",
"PerPkg": "1",
@@ -2329,6 +2593,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK0",
"PerPkg": "1",
@@ -2337,6 +2602,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK1",
"PerPkg": "1",
@@ -2346,6 +2612,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK10",
"PerPkg": "1",
@@ -2355,6 +2622,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK11",
"PerPkg": "1",
@@ -2364,6 +2632,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK12",
"PerPkg": "1",
@@ -2373,6 +2642,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK13",
"PerPkg": "1",
@@ -2382,6 +2652,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK14",
"PerPkg": "1",
@@ -2391,6 +2662,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK15",
"PerPkg": "1",
@@ -2400,6 +2672,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK2",
"PerPkg": "1",
@@ -2409,6 +2682,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK3",
"PerPkg": "1",
@@ -2418,6 +2692,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK4",
"PerPkg": "1",
@@ -2427,6 +2702,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK5",
"PerPkg": "1",
@@ -2436,6 +2712,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK6",
"PerPkg": "1",
@@ -2445,6 +2722,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK7",
"PerPkg": "1",
@@ -2454,6 +2732,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK8",
"PerPkg": "1",
@@ -2463,6 +2742,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK9",
"PerPkg": "1",
@@ -2472,6 +2752,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANKG0",
"PerPkg": "1",
@@ -2481,6 +2762,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANKG1",
"PerPkg": "1",
@@ -2490,6 +2772,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANKG2",
"PerPkg": "1",
@@ -2499,6 +2782,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANKG3",
"PerPkg": "1",
@@ -2508,6 +2792,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.ALLBANKS",
"PerPkg": "1",
@@ -2517,6 +2802,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK0",
"PerPkg": "1",
@@ -2525,6 +2811,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK1",
"PerPkg": "1",
@@ -2534,6 +2821,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK10",
"PerPkg": "1",
@@ -2543,6 +2831,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK11",
"PerPkg": "1",
@@ -2552,6 +2841,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK12",
"PerPkg": "1",
@@ -2561,6 +2851,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK13",
"PerPkg": "1",
@@ -2570,6 +2861,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK14",
"PerPkg": "1",
@@ -2579,6 +2871,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK15",
"PerPkg": "1",
@@ -2588,6 +2881,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK2",
"PerPkg": "1",
@@ -2597,6 +2891,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK3",
"PerPkg": "1",
@@ -2606,6 +2901,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK4",
"PerPkg": "1",
@@ -2615,6 +2911,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK5",
"PerPkg": "1",
@@ -2624,6 +2921,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK6",
"PerPkg": "1",
@@ -2633,6 +2931,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK7",
"PerPkg": "1",
@@ -2642,6 +2941,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK8",
"PerPkg": "1",
@@ -2651,6 +2951,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK9",
"PerPkg": "1",
@@ -2660,6 +2961,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANKG0",
"PerPkg": "1",
@@ -2669,6 +2971,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANKG1",
"PerPkg": "1",
@@ -2678,6 +2981,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANKG2",
"PerPkg": "1",
@@ -2687,6 +2991,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANKG3",
"PerPkg": "1",
@@ -2696,6 +3001,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.ALLBANKS",
"PerPkg": "1",
@@ -2705,6 +3011,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK0",
"PerPkg": "1",
@@ -2713,6 +3020,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK1",
"PerPkg": "1",
@@ -2722,6 +3030,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK10",
"PerPkg": "1",
@@ -2731,6 +3040,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK11",
"PerPkg": "1",
@@ -2740,6 +3050,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK12",
"PerPkg": "1",
@@ -2749,6 +3060,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK13",
"PerPkg": "1",
@@ -2758,6 +3070,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK14",
"PerPkg": "1",
@@ -2767,6 +3080,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK15",
"PerPkg": "1",
@@ -2776,6 +3090,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK2",
"PerPkg": "1",
@@ -2785,6 +3100,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK3",
"PerPkg": "1",
@@ -2794,6 +3110,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK4",
"PerPkg": "1",
@@ -2803,6 +3120,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK5",
"PerPkg": "1",
@@ -2812,6 +3130,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK6",
"PerPkg": "1",
@@ -2821,6 +3140,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK7",
"PerPkg": "1",
@@ -2830,6 +3150,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK8",
"PerPkg": "1",
@@ -2839,6 +3160,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK9",
"PerPkg": "1",
@@ -2848,6 +3170,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANKG0",
"PerPkg": "1",
@@ -2857,6 +3180,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANKG1",
"PerPkg": "1",
@@ -2866,6 +3190,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANKG2",
"PerPkg": "1",
@@ -2875,6 +3200,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANKG3",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/uncore-power.json b/tools/perf/pmu-events/arch/x86/broadwellx/uncore-power.json
index 320aaab53a0b..afdc636b9855 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/uncore-power.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/uncore-power.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "pclk Cycles",
+ "Counter": "0,1,2,3",
"EventName": "UNC_P_CLOCKTICKS",
"PerPkg": "1",
"PublicDescription": "The PCU runs off a fixed 1 GHz clock. This event counts the number of pclk cycles measured while the counter was enabled. The pclk, like the Memory Controller's dclk, counts at a constant rate making it a good measure of actual wall time.",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_P_CORE0_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -16,6 +18,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x6A",
"EventName": "UNC_P_CORE10_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -24,6 +27,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x6B",
"EventName": "UNC_P_CORE11_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -32,6 +36,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x6C",
"EventName": "UNC_P_CORE12_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -40,6 +45,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x6D",
"EventName": "UNC_P_CORE13_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -48,6 +54,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x6E",
"EventName": "UNC_P_CORE14_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -56,6 +63,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x6F",
"EventName": "UNC_P_CORE15_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -64,6 +72,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_P_CORE16_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -72,6 +81,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_P_CORE17_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -80,6 +90,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_P_CORE1_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -88,6 +99,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x62",
"EventName": "UNC_P_CORE2_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -96,6 +108,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "UNC_P_CORE3_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -104,6 +117,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x64",
"EventName": "UNC_P_CORE4_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -112,6 +126,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x65",
"EventName": "UNC_P_CORE5_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -120,6 +135,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x66",
"EventName": "UNC_P_CORE6_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -128,6 +144,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x67",
"EventName": "UNC_P_CORE7_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -136,6 +153,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x68",
"EventName": "UNC_P_CORE8_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -144,6 +162,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x69",
"EventName": "UNC_P_CORE9_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -152,6 +171,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_P_DEMOTIONS_CORE0",
"PerPkg": "1",
@@ -160,6 +180,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_P_DEMOTIONS_CORE1",
"PerPkg": "1",
@@ -168,6 +189,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x3A",
"EventName": "UNC_P_DEMOTIONS_CORE10",
"PerPkg": "1",
@@ -176,6 +198,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x3B",
"EventName": "UNC_P_DEMOTIONS_CORE11",
"PerPkg": "1",
@@ -184,6 +207,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "UNC_P_DEMOTIONS_CORE12",
"PerPkg": "1",
@@ -192,6 +216,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x3D",
"EventName": "UNC_P_DEMOTIONS_CORE13",
"PerPkg": "1",
@@ -200,6 +225,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x3E",
"EventName": "UNC_P_DEMOTIONS_CORE14",
"PerPkg": "1",
@@ -208,6 +234,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x3F",
"EventName": "UNC_P_DEMOTIONS_CORE15",
"PerPkg": "1",
@@ -216,6 +243,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_P_DEMOTIONS_CORE16",
"PerPkg": "1",
@@ -224,6 +252,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_P_DEMOTIONS_CORE17",
"PerPkg": "1",
@@ -232,6 +261,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_P_DEMOTIONS_CORE2",
"PerPkg": "1",
@@ -240,6 +270,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_P_DEMOTIONS_CORE3",
"PerPkg": "1",
@@ -248,6 +279,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_P_DEMOTIONS_CORE4",
"PerPkg": "1",
@@ -256,6 +288,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_P_DEMOTIONS_CORE5",
"PerPkg": "1",
@@ -264,6 +297,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x36",
"EventName": "UNC_P_DEMOTIONS_CORE6",
"PerPkg": "1",
@@ -272,6 +306,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_P_DEMOTIONS_CORE7",
"PerPkg": "1",
@@ -280,6 +315,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_P_DEMOTIONS_CORE8",
"PerPkg": "1",
@@ -288,6 +324,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_P_DEMOTIONS_CORE9",
"PerPkg": "1",
@@ -296,6 +333,7 @@
},
{
"BriefDescription": "Thermal Strongest Upper Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES",
"PerPkg": "1",
@@ -304,6 +342,7 @@
},
{
"BriefDescription": "OS Strongest Upper Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_P_FREQ_MAX_OS_CYCLES",
"PerPkg": "1",
@@ -312,6 +351,7 @@
},
{
"BriefDescription": "Power Strongest Upper Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_P_FREQ_MAX_POWER_CYCLES",
"PerPkg": "1",
@@ -320,6 +360,7 @@
},
{
"BriefDescription": "IO P Limit Strongest Lower Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x73",
"EventName": "UNC_P_FREQ_MIN_IO_P_CYCLES",
"PerPkg": "1",
@@ -328,6 +369,7 @@
},
{
"BriefDescription": "Cycles spent changing Frequency",
+ "Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "UNC_P_FREQ_TRANS_CYCLES",
"PerPkg": "1",
@@ -336,6 +378,7 @@
},
{
"BriefDescription": "Memory Phase Shedding Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_P_MEMORY_PHASE_SHEDDING_CYCLES",
"PerPkg": "1",
@@ -344,6 +387,7 @@
},
{
"BriefDescription": "Package C State Residency - C0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_P_PKG_RESIDENCY_C0_CYCLES",
"PerPkg": "1",
@@ -352,6 +396,7 @@
},
{
"BriefDescription": "Package C State Residency - C1E",
+ "Counter": "0,1,2,3",
"EventCode": "0x4E",
"EventName": "UNC_P_PKG_RESIDENCY_C1E_CYCLES",
"PerPkg": "1",
@@ -360,6 +405,7 @@
},
{
"BriefDescription": "Package C State Residency - C2E",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_P_PKG_RESIDENCY_C2E_CYCLES",
"PerPkg": "1",
@@ -368,6 +414,7 @@
},
{
"BriefDescription": "Package C State Residency - C3",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_P_PKG_RESIDENCY_C3_CYCLES",
"PerPkg": "1",
@@ -376,6 +423,7 @@
},
{
"BriefDescription": "Package C State Residency - C6",
+ "Counter": "0,1,2,3",
"EventCode": "0x2D",
"EventName": "UNC_P_PKG_RESIDENCY_C6_CYCLES",
"PerPkg": "1",
@@ -384,6 +432,7 @@
},
{
"BriefDescription": "Package C7 State Residency",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_P_PKG_RESIDENCY_C7_CYCLES",
"PerPkg": "1",
@@ -392,6 +441,7 @@
},
{
"BriefDescription": "Number of cores in C-State; C0 and C1",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
"Filter": "occ_sel=1",
@@ -401,6 +451,7 @@
},
{
"BriefDescription": "Number of cores in C-State; C3",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
"Filter": "occ_sel=2",
@@ -410,6 +461,7 @@
},
{
"BriefDescription": "Number of cores in C-State; C6 and C7",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
"Filter": "occ_sel=3",
@@ -419,6 +471,7 @@
},
{
"BriefDescription": "External Prochot",
+ "Counter": "0,1,2,3",
"EventCode": "0xA",
"EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
"PerPkg": "1",
@@ -427,6 +480,7 @@
},
{
"BriefDescription": "Internal Prochot",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_P_PROCHOT_INTERNAL_CYCLES",
"PerPkg": "1",
@@ -435,6 +489,7 @@
},
{
"BriefDescription": "Total Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_P_TOTAL_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -443,6 +498,7 @@
},
{
"BriefDescription": "UNC_P_UFS_TRANSITIONS_RING_GV",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "UNC_P_UFS_TRANSITIONS_RING_GV",
"PerPkg": "1",
@@ -451,6 +507,7 @@
},
{
"BriefDescription": "VR Hot",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_P_VR_HOT_CYCLES",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/virtual-memory.json b/tools/perf/pmu-events/arch/x86/broadwellx/virtual-memory.json
index 93621e004d88..eb1d9541e26c 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/virtual-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Load misses in all DTLB levels that cause page walks",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"SampleAfterValue": "2000003",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Load misses that miss the DTLB and hit the STLB (2M).",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT_2M",
"SampleAfterValue": "2000003",
@@ -24,6 +27,7 @@
},
{
"BriefDescription": "Load misses that miss the DTLB and hit the STLB (4K).",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT_4K",
"SampleAfterValue": "2000003",
@@ -31,6 +35,7 @@
},
{
"BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
@@ -39,6 +44,7 @@
},
{
"BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (1G)",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
@@ -48,6 +54,7 @@
},
{
"BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (2M/4M).",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
@@ -57,6 +64,7 @@
},
{
"BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (4K).",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
@@ -66,6 +74,7 @@
},
{
"BriefDescription": "Cycles when PMH is busy with page walks",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
@@ -75,6 +84,7 @@
},
{
"BriefDescription": "Store misses in all DTLB levels that cause page walks",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
@@ -84,6 +94,7 @@
},
{
"BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.STLB_HIT",
"SampleAfterValue": "100003",
@@ -91,6 +102,7 @@
},
{
"BriefDescription": "Store misses that miss the DTLB and hit the STLB (2M).",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.STLB_HIT_2M",
"SampleAfterValue": "100003",
@@ -98,6 +110,7 @@
},
{
"BriefDescription": "Store misses that miss the DTLB and hit the STLB (4K).",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.STLB_HIT_4K",
"SampleAfterValue": "100003",
@@ -105,6 +118,7 @@
},
{
"BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
@@ -113,6 +127,7 @@
},
{
"BriefDescription": "Store misses in all DTLB levels that cause completed page walks (1G)",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
@@ -122,6 +137,7 @@
},
{
"BriefDescription": "Store misses in all DTLB levels that cause completed page walks (2M/4M)",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
@@ -131,6 +147,7 @@
},
{
"BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (4K)",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
@@ -140,6 +157,7 @@
},
{
"BriefDescription": "Cycles when PMH is busy with page walks",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_DURATION",
@@ -149,6 +167,7 @@
},
{
"BriefDescription": "Cycle count for an Extended Page table walk.",
+ "Counter": "0,1,2,3",
"EventCode": "0x4F",
"EventName": "EPT.WALK_CYCLES",
"PublicDescription": "This event counts cycles for an extended page table walk. The Extended Page directory cache differs from standard TLB caches by the operating system that use it. Virtual machine operating systems use the extended page directory cache, while guest operating systems use the standard TLB caches.",
@@ -157,6 +176,7 @@
},
{
"BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAE",
"EventName": "ITLB.ITLB_FLUSH",
"PublicDescription": "This event counts the number of flushes of the big or small ITLB pages. Counting include both TLB Flush (covering all sets) and TLB Set Clear (set-specific).",
@@ -165,6 +185,7 @@
},
{
"BriefDescription": "Misses at all ITLB levels that cause page walks",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
@@ -174,6 +195,7 @@
},
{
"BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.STLB_HIT",
"SampleAfterValue": "100003",
@@ -181,6 +203,7 @@
},
{
"BriefDescription": "Code misses that miss the DTLB and hit the STLB (2M).",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.STLB_HIT_2M",
"SampleAfterValue": "100003",
@@ -188,6 +211,7 @@
},
{
"BriefDescription": "Core misses that miss the DTLB and hit the STLB (4K).",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.STLB_HIT_4K",
"SampleAfterValue": "100003",
@@ -195,6 +219,7 @@
},
{
"BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
@@ -203,6 +228,7 @@
},
{
"BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (1G)",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
@@ -212,6 +238,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
@@ -221,6 +248,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
@@ -230,6 +258,7 @@
},
{
"BriefDescription": "Cycles when PMH is busy with page walks",
+ "Counter": "0,1,2,3",
"Errata": "BDM69",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_DURATION",
@@ -239,6 +268,7 @@
},
{
"BriefDescription": "Number of DTLB page walker hits in the L1+FB.",
+ "Counter": "0,1,2,3",
"Errata": "BDM69, BDM98",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.DTLB_L1",
@@ -247,6 +277,7 @@
},
{
"BriefDescription": "Number of DTLB page walker hits in the L2.",
+ "Counter": "0,1,2,3",
"Errata": "BDM69, BDM98",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.DTLB_L2",
@@ -255,6 +286,7 @@
},
{
"BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP.",
+ "Counter": "0,1,2,3",
"Errata": "BDM69, BDM98",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.DTLB_L3",
@@ -263,6 +295,7 @@
},
{
"BriefDescription": "Number of DTLB page walker hits in Memory.",
+ "Counter": "0,1,2,3",
"Errata": "BDM69, BDM98",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
@@ -271,6 +304,7 @@
},
{
"BriefDescription": "Number of ITLB page walker hits in the L1+FB.",
+ "Counter": "0,1,2,3",
"Errata": "BDM69, BDM98",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.ITLB_L1",
@@ -279,6 +313,7 @@
},
{
"BriefDescription": "Number of ITLB page walker hits in the L2.",
+ "Counter": "0,1,2,3",
"Errata": "BDM69, BDM98",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.ITLB_L2",
@@ -287,6 +322,7 @@
},
{
"BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP.",
+ "Counter": "0,1,2,3",
"Errata": "BDM69, BDM98",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.ITLB_L3",
@@ -295,6 +331,7 @@
},
{
"BriefDescription": "DTLB flush attempts of the thread-specific entries",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "TLB_FLUSH.DTLB_THREAD",
"PublicDescription": "This event counts the number of DTLB flush attempts of the thread-specific entries.",
@@ -303,6 +340,7 @@
},
{
"BriefDescription": "STLB flush attempts",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "TLB_FLUSH.STLB_ANY",
"PublicDescription": "This event counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, and so on).",
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/cache.json b/tools/perf/pmu-events/arch/x86/cascadelakex/cache.json
index a842f05cb60d..8bad700ff8ea 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/cache.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "L1D data line replacements",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "L1D.REPLACEMENT",
"PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Number of times a request needed a FB entry but there was no entry available for it. That is the FB unavailability was dominant reason for blocking the request. A request includes cacheable/uncacheable demands that is load, store or SW prefetch.",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.FB_FULL",
"PublicDescription": "Number of times a request needed a FB (Fill Buffer) entry but there was no entry available for it. A request includes cacheable/uncacheable demands that are load, store or SW prefetch instructions.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "L1D miss outstandings duration in cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING",
"PublicDescription": "Counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch.Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING_CYCLES",
@@ -35,6 +39,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
@@ -43,6 +48,7 @@
},
{
"BriefDescription": "L2 cache lines filling L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.ALL",
"PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
@@ -51,6 +57,7 @@
},
{
"BriefDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines can be either in modified state or clean state. Modified lines may either be written back to L3 or directly written to memory and not allocated in L3. Clean lines may either be allocated in L3 or dropped",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.NON_SILENT",
"PublicDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines can be either in modified state or clean state. Modified lines may either be written back to L3 or directly written to memory and not allocated in L3. Clean lines may either be allocated in L3 or dropped.",
@@ -59,6 +66,7 @@
},
{
"BriefDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared state. A non-threaded event.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.SILENT",
"SampleAfterValue": "200003",
@@ -66,6 +74,7 @@
},
{
"BriefDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.USELESS_HWPF",
"SampleAfterValue": "200003",
@@ -73,6 +82,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event L2_LINES_OUT.USELESS_HWPF",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.USELESS_PREF",
@@ -81,6 +91,7 @@
},
{
"BriefDescription": "L2 code requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_CODE_RD",
"PublicDescription": "Counts the total number of L2 code requests.",
@@ -89,6 +100,7 @@
},
{
"BriefDescription": "Demand Data Read requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
"PublicDescription": "Counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
@@ -97,6 +109,7 @@
},
{
"BriefDescription": "Demand requests that miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_MISS",
"PublicDescription": "Demand requests that miss L2 cache.",
@@ -105,6 +118,7 @@
},
{
"BriefDescription": "Demand requests to L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
"PublicDescription": "Demand requests to L2 cache.",
@@ -113,6 +127,7 @@
},
{
"BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_PF",
"PublicDescription": "Counts the total number of requests from the L2 hardware prefetchers.",
@@ -121,6 +136,7 @@
},
{
"BriefDescription": "RFO requests to L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_RFO",
"PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
@@ -129,6 +145,7 @@
},
{
"BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_HIT",
"PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
@@ -137,6 +154,7 @@
},
{
"BriefDescription": "L2 cache misses when fetching instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_MISS",
"PublicDescription": "Counts L2 cache misses when fetching instructions.",
@@ -145,6 +163,7 @@
},
{
"BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
"PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache",
@@ -153,6 +172,7 @@
},
{
"BriefDescription": "Demand Data Read miss L2, no rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
"PublicDescription": "Counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
@@ -161,6 +181,7 @@
},
{
"BriefDescription": "All requests that miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.MISS",
"PublicDescription": "All requests that miss L2 cache.",
@@ -169,6 +190,7 @@
},
{
"BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.PF_HIT",
"PublicDescription": "Counts requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that hit L2 cache.",
@@ -177,6 +199,7 @@
},
{
"BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.PF_MISS",
"PublicDescription": "Counts requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that miss L2 cache.",
@@ -185,6 +208,7 @@
},
{
"BriefDescription": "All L2 requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.REFERENCES",
"PublicDescription": "All L2 requests.",
@@ -193,6 +217,7 @@
},
{
"BriefDescription": "RFO requests that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_HIT",
"PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
@@ -201,6 +226,7 @@
},
{
"BriefDescription": "RFO requests that miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_MISS",
"PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
@@ -209,6 +235,7 @@
},
{
"BriefDescription": "L2 writebacks that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.L2_WB",
"PublicDescription": "Counts L2 writebacks that access L2 cache.",
@@ -217,6 +244,7 @@
},
{
"BriefDescription": "Core-originated cacheable demand requests missed L3",
+ "Counter": "0,1,2,3",
"Errata": "SKL057",
"EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.MISS",
@@ -226,6 +254,7 @@
},
{
"BriefDescription": "Core-originated cacheable demand requests that refer to L3",
+ "Counter": "0,1,2,3",
"Errata": "SKL057",
"EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
@@ -235,6 +264,7 @@
},
{
"BriefDescription": "Retired load instructions.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_INST_RETIRED.ALL_LOADS",
@@ -245,6 +275,7 @@
},
{
"BriefDescription": "Retired store instructions.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_INST_RETIRED.ALL_STORES",
@@ -255,6 +286,7 @@
},
{
"BriefDescription": "All retired memory instructions.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_INST_RETIRED.ANY",
@@ -265,6 +297,7 @@
},
{
"BriefDescription": "Retired load instructions with locked access.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_INST_RETIRED.LOCK_LOADS",
@@ -274,6 +307,7 @@
},
{
"BriefDescription": "Retired load instructions that split across a cacheline boundary.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
@@ -284,6 +318,7 @@
},
{
"BriefDescription": "Retired store instructions that split across a cacheline boundary.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_INST_RETIRED.SPLIT_STORES",
@@ -294,6 +329,7 @@
},
{
"BriefDescription": "Retired load instructions that miss the STLB.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
@@ -304,6 +340,7 @@
},
{
"BriefDescription": "Retired store instructions that miss the STLB.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
@@ -314,6 +351,7 @@
},
{
"BriefDescription": "Retired load instructions which data sources were L3 and cross-core snoop hits in on-pkg core cache",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT",
@@ -324,6 +362,7 @@
},
{
"BriefDescription": "Retired load instructions which data sources were HitM responses from shared L3",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM",
@@ -334,6 +373,7 @@
},
{
"BriefDescription": "Retired load instructions which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
@@ -343,6 +383,7 @@
},
{
"BriefDescription": "Retired load instructions which data sources were hits in L3 without snoops required",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
@@ -353,6 +394,7 @@
},
{
"BriefDescription": "Retired load instructions which data sources missed L3 but serviced from local dram",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
@@ -363,6 +405,7 @@
},
{
"BriefDescription": "Retired load instructions which data sources missed L3 but serviced from remote dram",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM",
@@ -372,6 +415,7 @@
},
{
"BriefDescription": "Retired load instructions whose data sources was forwarded from a remote cache",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD",
@@ -381,6 +425,7 @@
},
{
"BriefDescription": "Retired load instructions whose data sources was remote HITM",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM",
@@ -391,6 +436,7 @@
},
{
"BriefDescription": "Retired load instructions with remote Intel(R) Optane(TM) DC persistent memory as the data source where the data request missed all caches.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM",
@@ -401,6 +447,7 @@
},
{
"BriefDescription": "Retired instructions with at least 1 uncacheable load or lock.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD4",
"EventName": "MEM_LOAD_MISC_RETIRED.UC",
@@ -410,6 +457,7 @@
},
{
"BriefDescription": "Retired load instructions which data sources were load missed L1 but hit FB due to preceding miss to the same cache line with data not ready",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_RETIRED.FB_HIT",
@@ -420,6 +468,7 @@
},
{
"BriefDescription": "Retired load instructions with L1 cache hits as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_RETIRED.L1_HIT",
@@ -430,6 +479,7 @@
},
{
"BriefDescription": "Retired load instructions missed L1 cache as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_RETIRED.L1_MISS",
@@ -440,6 +490,7 @@
},
{
"BriefDescription": "Retired load instructions with L2 cache hits as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_RETIRED.L2_HIT",
@@ -450,6 +501,7 @@
},
{
"BriefDescription": "Retired load instructions missed L2 cache as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_RETIRED.L2_MISS",
@@ -460,6 +512,7 @@
},
{
"BriefDescription": "Retired load instructions with L3 cache hits as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_RETIRED.L3_HIT",
@@ -470,6 +523,7 @@
},
{
"BriefDescription": "Retired load instructions missed L3 cache as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_RETIRED.L3_MISS",
@@ -480,6 +534,7 @@
},
{
"BriefDescription": "Retired load instructions with local Intel(R) Optane(TM) DC persistent memory as the data source where the data request missed all caches.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_RETIRED.LOCAL_PMM",
@@ -490,6 +545,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_HIT.ANY_SNOOP OCR.ALL_DATA_RD.L3_HIT.ANY_SNOOP OCR.ALL_DATA_RD.L3_HIT.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -499,6 +555,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE OCR.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE OCR.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -508,6 +565,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD OCR.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD OCR.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -517,6 +575,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD OCR.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD OCR.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -526,6 +585,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED OCR.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED OCR.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -535,6 +595,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -544,6 +605,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_HIT.SNOOP_MISS OCR.ALL_DATA_RD.L3_HIT.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -553,6 +615,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_HIT.SNOOP_NONE OCR.ALL_DATA_RD.L3_HIT.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -562,6 +625,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_E.ANY_SNOOP OCR.ALL_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -571,6 +635,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_E.HITM_OTHER_CORE OCR.ALL_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -580,6 +645,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD OCR.ALL_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -589,6 +655,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD OCR.ALL_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -598,6 +665,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED OCR.ALL_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -607,6 +675,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_E.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -616,6 +685,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_E.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -625,6 +695,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_F.ANY_SNOOP OCR.ALL_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -634,6 +705,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_F.HITM_OTHER_CORE OCR.ALL_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -643,6 +715,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD OCR.ALL_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -652,6 +725,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD OCR.ALL_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -661,6 +735,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED OCR.ALL_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -670,6 +745,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_F.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -679,6 +755,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_F.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -688,6 +765,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_M.ANY_SNOOP OCR.ALL_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -697,6 +775,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_M.HITM_OTHER_CORE OCR.ALL_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -706,6 +785,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD OCR.ALL_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -715,6 +795,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD OCR.ALL_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -724,6 +805,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED OCR.ALL_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -733,6 +815,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_M.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -742,6 +825,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -751,6 +835,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_S.ANY_SNOOP OCR.ALL_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -760,6 +845,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_S.HITM_OTHER_CORE OCR.ALL_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -769,6 +855,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD OCR.ALL_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -778,6 +865,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD OCR.ALL_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -787,6 +875,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED OCR.ALL_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -796,6 +885,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -805,6 +895,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_S.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -814,6 +905,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP OCR.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP OCR.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -823,6 +915,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT.HITM_OTHER_CORE OCR.ALL_PF_DATA_RD.L3_HIT.HITM_OTHER_CORE OCR.ALL_PF_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -832,6 +925,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD OCR.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD OCR.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -841,6 +935,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -850,6 +945,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED OCR.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED OCR.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -859,6 +955,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -868,6 +965,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_MISS OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -877,6 +975,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_NONE OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -886,6 +985,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_E.ANY_SNOOP OCR.ALL_PF_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -895,6 +995,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_E.HITM_OTHER_CORE OCR.ALL_PF_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -904,6 +1005,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD OCR.ALL_PF_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -913,6 +1015,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -922,6 +1025,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED OCR.ALL_PF_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -931,6 +1035,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_E.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -940,6 +1045,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_E.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -949,6 +1055,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_F.ANY_SNOOP OCR.ALL_PF_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -958,6 +1065,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_F.HITM_OTHER_CORE OCR.ALL_PF_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -967,6 +1075,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD OCR.ALL_PF_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -976,6 +1085,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -985,6 +1095,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED OCR.ALL_PF_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -994,6 +1105,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_F.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1003,6 +1115,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_F.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1012,6 +1125,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_M.ANY_SNOOP OCR.ALL_PF_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1021,6 +1135,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_M.HITM_OTHER_CORE OCR.ALL_PF_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1030,6 +1145,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD OCR.ALL_PF_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1039,6 +1155,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1048,6 +1165,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED OCR.ALL_PF_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1057,6 +1175,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_M.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1066,6 +1185,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1075,6 +1195,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_S.ANY_SNOOP OCR.ALL_PF_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1084,6 +1205,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_S.HITM_OTHER_CORE OCR.ALL_PF_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1093,6 +1215,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD OCR.ALL_PF_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1102,6 +1225,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1111,6 +1235,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED OCR.ALL_PF_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1120,6 +1245,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1129,6 +1255,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_S.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1138,6 +1265,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_HIT.ANY_SNOOP OCR.ALL_PF_RFO.L3_HIT.ANY_SNOOP OCR.ALL_PF_RFO.L3_HIT.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1147,6 +1275,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_HIT.HITM_OTHER_CORE OCR.ALL_PF_RFO.L3_HIT.HITM_OTHER_CORE OCR.ALL_PF_RFO.L3_HIT.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1156,6 +1285,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_FWD OCR.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_FWD OCR.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1165,6 +1295,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1174,6 +1305,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED OCR.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED OCR.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1183,6 +1315,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1192,6 +1325,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_HIT.SNOOP_MISS OCR.ALL_PF_RFO.L3_HIT.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1201,6 +1335,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_HIT.SNOOP_NONE OCR.ALL_PF_RFO.L3_HIT.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1210,6 +1345,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_E.ANY_SNOOP OCR.ALL_PF_RFO.L3_HIT_E.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1219,6 +1355,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_E.HITM_OTHER_CORE OCR.ALL_PF_RFO.L3_HIT_E.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1228,6 +1365,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD OCR.ALL_PF_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1237,6 +1375,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1246,6 +1385,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_E.NO_SNOOP_NEEDED OCR.ALL_PF_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1255,6 +1395,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_E.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1264,6 +1405,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_E.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1273,6 +1415,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_F.ANY_SNOOP OCR.ALL_PF_RFO.L3_HIT_F.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1282,6 +1425,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_F.HITM_OTHER_CORE OCR.ALL_PF_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1291,6 +1435,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD OCR.ALL_PF_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1300,6 +1445,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1309,6 +1455,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_F.NO_SNOOP_NEEDED OCR.ALL_PF_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1318,6 +1465,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_F.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1327,6 +1475,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_F.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1336,6 +1485,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_M.ANY_SNOOP OCR.ALL_PF_RFO.L3_HIT_M.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1345,6 +1495,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_M.HITM_OTHER_CORE OCR.ALL_PF_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1354,6 +1505,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD OCR.ALL_PF_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1363,6 +1515,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1372,6 +1525,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_M.NO_SNOOP_NEEDED OCR.ALL_PF_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1381,6 +1535,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_M.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1390,6 +1545,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_M.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1399,6 +1555,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_S.ANY_SNOOP OCR.ALL_PF_RFO.L3_HIT_S.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1408,6 +1565,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_S.HITM_OTHER_CORE OCR.ALL_PF_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1417,6 +1575,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD OCR.ALL_PF_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1426,6 +1585,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1435,6 +1595,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_S.NO_SNOOP_NEEDED OCR.ALL_PF_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1444,6 +1605,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_S.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1453,6 +1615,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_S.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1462,6 +1625,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_HIT.ANY_SNOOP OCR.ALL_READS.L3_HIT.ANY_SNOOP OCR.ALL_READS.L3_HIT.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1471,6 +1635,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_HIT.HITM_OTHER_CORE OCR.ALL_READS.L3_HIT.HITM_OTHER_CORE OCR.ALL_READS.L3_HIT.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1480,6 +1645,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_HIT.HIT_OTHER_CORE_FWD OCR.ALL_READS.L3_HIT.HIT_OTHER_CORE_FWD OCR.ALL_READS.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1489,6 +1655,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_HIT.HIT_OTHER_CORE_NO_FWD OCR.ALL_READS.L3_HIT.HIT_OTHER_CORE_NO_FWD OCR.ALL_READS.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1498,6 +1665,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_HIT.NO_SNOOP_NEEDED OCR.ALL_READS.L3_HIT.NO_SNOOP_NEEDED OCR.ALL_READS.L3_HIT.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1507,6 +1675,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1516,6 +1685,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_HIT.SNOOP_MISS OCR.ALL_READS.L3_HIT.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1525,6 +1695,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_HIT.SNOOP_NONE OCR.ALL_READS.L3_HIT.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1534,6 +1705,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_HIT_E.ANY_SNOOP OCR.ALL_READS.L3_HIT_E.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1543,6 +1715,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_HIT_E.HITM_OTHER_CORE OCR.ALL_READS.L3_HIT_E.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1552,6 +1725,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_HIT_E.HIT_OTHER_CORE_FWD OCR.ALL_READS.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1561,6 +1735,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_HIT_E.HIT_OTHER_CORE_NO_FWD OCR.ALL_READS.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1570,6 +1745,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_HIT_E.NO_SNOOP_NEEDED OCR.ALL_READS.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1579,6 +1755,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_HIT_E.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1588,6 +1765,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_HIT_E.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1597,6 +1775,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_HIT_F.ANY_SNOOP OCR.ALL_READS.L3_HIT_F.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1606,6 +1785,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_HIT_F.HITM_OTHER_CORE OCR.ALL_READS.L3_HIT_F.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1615,6 +1795,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_HIT_F.HIT_OTHER_CORE_FWD OCR.ALL_READS.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1624,6 +1805,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_HIT_F.HIT_OTHER_CORE_NO_FWD OCR.ALL_READS.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1633,6 +1815,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_HIT_F.NO_SNOOP_NEEDED OCR.ALL_READS.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1642,6 +1825,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_HIT_F.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1651,6 +1835,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_HIT_F.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1660,6 +1845,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_HIT_M.ANY_SNOOP OCR.ALL_READS.L3_HIT_M.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1669,6 +1855,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_HIT_M.HITM_OTHER_CORE OCR.ALL_READS.L3_HIT_M.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1678,6 +1865,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_HIT_M.HIT_OTHER_CORE_FWD OCR.ALL_READS.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1687,6 +1875,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_HIT_M.HIT_OTHER_CORE_NO_FWD OCR.ALL_READS.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1696,6 +1885,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_HIT_M.NO_SNOOP_NEEDED OCR.ALL_READS.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1705,6 +1895,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_HIT_M.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1714,6 +1905,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_HIT_M.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1723,6 +1915,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_HIT_S.ANY_SNOOP OCR.ALL_READS.L3_HIT_S.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1732,6 +1925,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_HIT_S.HITM_OTHER_CORE OCR.ALL_READS.L3_HIT_S.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1741,6 +1935,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_HIT_S.HIT_OTHER_CORE_FWD OCR.ALL_READS.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1750,6 +1945,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_HIT_S.HIT_OTHER_CORE_NO_FWD OCR.ALL_READS.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1759,6 +1955,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_HIT_S.NO_SNOOP_NEEDED OCR.ALL_READS.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1768,6 +1965,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_HIT_S.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1777,6 +1975,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_HIT_S.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1786,6 +1985,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_HIT.ANY_SNOOP OCR.ALL_RFO.L3_HIT.ANY_SNOOP OCR.ALL_RFO.L3_HIT.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1795,6 +1995,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_HIT.HITM_OTHER_CORE OCR.ALL_RFO.L3_HIT.HITM_OTHER_CORE OCR.ALL_RFO.L3_HIT.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1804,6 +2005,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_HIT.HIT_OTHER_CORE_FWD OCR.ALL_RFO.L3_HIT.HIT_OTHER_CORE_FWD OCR.ALL_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1813,6 +2015,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD OCR.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD OCR.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1822,6 +2025,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED OCR.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED OCR.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1831,6 +2035,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1840,6 +2045,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_HIT.SNOOP_MISS OCR.ALL_RFO.L3_HIT.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1849,6 +2055,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_HIT.SNOOP_NONE OCR.ALL_RFO.L3_HIT.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1858,6 +2065,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_HIT_E.ANY_SNOOP OCR.ALL_RFO.L3_HIT_E.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1867,6 +2075,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_HIT_E.HITM_OTHER_CORE OCR.ALL_RFO.L3_HIT_E.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1876,6 +2085,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD OCR.ALL_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1885,6 +2095,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD OCR.ALL_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1894,6 +2105,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_HIT_E.NO_SNOOP_NEEDED OCR.ALL_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1903,6 +2115,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_HIT_E.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1912,6 +2125,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_HIT_E.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1921,6 +2135,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_HIT_F.ANY_SNOOP OCR.ALL_RFO.L3_HIT_F.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1930,6 +2145,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_HIT_F.HITM_OTHER_CORE OCR.ALL_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1939,6 +2155,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD OCR.ALL_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1948,6 +2165,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD OCR.ALL_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1957,6 +2175,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_HIT_F.NO_SNOOP_NEEDED OCR.ALL_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1966,6 +2185,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_HIT_F.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1975,6 +2195,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_HIT_F.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1984,6 +2205,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_HIT_M.ANY_SNOOP OCR.ALL_RFO.L3_HIT_M.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1993,6 +2215,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_HIT_M.HITM_OTHER_CORE OCR.ALL_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2002,6 +2225,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD OCR.ALL_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2011,6 +2235,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD OCR.ALL_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2020,6 +2245,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_HIT_M.NO_SNOOP_NEEDED OCR.ALL_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -2029,6 +2255,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_HIT_M.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -2038,6 +2265,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_HIT_M.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2047,6 +2275,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_HIT_S.ANY_SNOOP OCR.ALL_RFO.L3_HIT_S.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -2056,6 +2285,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_HIT_S.HITM_OTHER_CORE OCR.ALL_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2065,6 +2295,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD OCR.ALL_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2074,6 +2305,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD OCR.ALL_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2083,6 +2315,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_HIT_S.NO_SNOOP_NEEDED OCR.ALL_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -2092,6 +2325,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_HIT_S.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -2101,6 +2335,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_HIT_S.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2110,6 +2345,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP OCR.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -2119,6 +2355,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT.HITM_OTHER_CORE OCR.DEMAND_CODE_RD.L3_HIT.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2128,6 +2365,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_FWD OCR.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2137,6 +2375,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD OCR.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2146,6 +2385,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT.NO_SNOOP_NEEDED OCR.DEMAND_CODE_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -2155,6 +2395,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2164,6 +2405,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -2173,6 +2415,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2182,6 +2425,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT_E.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -2191,6 +2435,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2200,6 +2445,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2209,6 +2455,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2218,6 +2465,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -2227,6 +2475,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -2236,6 +2485,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2245,6 +2495,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT_F.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -2254,6 +2505,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2263,6 +2515,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2272,6 +2525,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2281,6 +2535,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -2290,6 +2545,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -2299,6 +2555,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2308,6 +2565,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT_M.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -2317,6 +2575,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2326,6 +2585,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2335,6 +2595,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2344,6 +2605,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -2353,6 +2615,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -2362,6 +2625,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2371,6 +2635,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT_S.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -2380,6 +2645,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2389,6 +2655,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2398,6 +2665,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2407,6 +2675,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -2416,6 +2685,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -2425,6 +2695,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2434,6 +2705,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP OCR.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -2443,6 +2715,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE OCR.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2452,6 +2725,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2461,6 +2735,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2470,6 +2745,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT.NO_SNOOP_NEEDED OCR.DEMAND_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -2479,6 +2755,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2488,6 +2765,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -2497,6 +2775,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2506,6 +2785,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -2515,6 +2795,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2524,6 +2805,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2533,6 +2815,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2542,6 +2825,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -2551,6 +2835,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -2560,6 +2845,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2569,6 +2855,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -2578,6 +2865,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2587,6 +2875,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2596,6 +2885,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2605,6 +2895,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -2614,6 +2905,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -2623,6 +2915,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2632,6 +2925,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -2641,6 +2935,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2650,6 +2945,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2659,6 +2955,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2668,6 +2965,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -2677,6 +2975,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -2686,6 +2985,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2695,6 +2995,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -2704,6 +3005,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2713,6 +3015,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2722,6 +3025,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2731,6 +3035,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -2740,6 +3045,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -2749,6 +3055,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2758,6 +3065,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT.ANY_SNOOP OCR.DEMAND_RFO.L3_HIT.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -2767,6 +3075,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE OCR.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2776,6 +3085,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_FWD OCR.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2785,6 +3095,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD OCR.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2794,6 +3105,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT.NO_SNOOP_NEEDED OCR.DEMAND_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -2803,6 +3115,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2812,6 +3125,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -2821,6 +3135,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2830,6 +3145,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT_E.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -2839,6 +3155,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT_E.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2848,6 +3165,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2857,6 +3175,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2866,6 +3185,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -2875,6 +3195,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -2884,6 +3205,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2893,6 +3215,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT_F.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -2902,6 +3225,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2911,6 +3235,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2920,6 +3245,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2929,6 +3255,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -2938,6 +3265,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -2947,6 +3275,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2956,6 +3285,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT_M.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -2965,6 +3295,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2974,6 +3305,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2983,6 +3315,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2992,6 +3325,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -3001,6 +3335,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -3010,6 +3345,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -3019,6 +3355,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT_S.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -3028,6 +3365,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -3037,6 +3375,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3046,6 +3385,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3055,6 +3395,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -3064,6 +3405,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -3073,6 +3415,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -3082,6 +3425,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT.ANY_SNOOP OCR.OTHER.L3_HIT.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -3091,6 +3435,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT.HITM_OTHER_CORE OCR.OTHER.L3_HIT.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -3100,6 +3445,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT.HIT_OTHER_CORE_FWD OCR.OTHER.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3109,6 +3455,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT.HIT_OTHER_CORE_NO_FWD OCR.OTHER.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3118,6 +3465,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT.NO_SNOOP_NEEDED OCR.OTHER.L3_HIT.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -3127,6 +3475,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3136,6 +3485,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -3145,6 +3495,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -3154,6 +3505,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT_E.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -3163,6 +3515,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT_E.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -3172,6 +3525,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3181,6 +3535,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3190,6 +3545,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -3199,6 +3555,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -3208,6 +3565,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -3217,6 +3575,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT_F.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -3226,6 +3585,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT_F.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -3235,6 +3595,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3244,6 +3605,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3253,6 +3615,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -3262,6 +3625,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -3271,6 +3635,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -3280,6 +3645,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT_M.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -3289,6 +3655,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT_M.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -3298,6 +3665,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3307,6 +3675,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3316,6 +3685,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -3325,6 +3695,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -3334,6 +3705,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -3343,6 +3715,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT_S.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -3352,6 +3725,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT_S.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -3361,6 +3735,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3370,6 +3745,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3379,6 +3755,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -3388,6 +3765,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -3397,6 +3775,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -3406,6 +3785,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT.ANY_SNOOP OCR.PF_L1D_AND_SW.L3_HIT.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -3415,6 +3795,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT.HITM_OTHER_CORE OCR.PF_L1D_AND_SW.L3_HIT.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -3424,6 +3805,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_FWD OCR.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3433,6 +3815,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_NO_FWD OCR.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3442,6 +3825,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT.NO_SNOOP_NEEDED OCR.PF_L1D_AND_SW.L3_HIT.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -3451,6 +3835,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3460,6 +3845,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -3469,6 +3855,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -3478,6 +3865,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT_E.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -3487,6 +3875,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT_E.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -3496,6 +3885,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3505,6 +3895,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3514,6 +3905,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -3523,6 +3915,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -3532,6 +3925,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -3541,6 +3935,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT_F.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -3550,6 +3945,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT_F.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -3559,6 +3955,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3568,6 +3965,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3577,6 +3975,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -3586,6 +3985,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -3595,6 +3995,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -3604,6 +4005,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT_M.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -3613,6 +4015,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT_M.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -3622,6 +4025,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3631,6 +4035,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3640,6 +4045,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -3649,6 +4055,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -3658,6 +4065,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -3667,6 +4075,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT_S.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -3676,6 +4085,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT_S.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -3685,6 +4095,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3694,6 +4105,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3703,6 +4115,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -3712,6 +4125,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -3721,6 +4135,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -3730,6 +4145,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT.ANY_SNOOP OCR.PF_L2_DATA_RD.L3_HIT.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -3739,6 +4155,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT.HITM_OTHER_CORE OCR.PF_L2_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -3748,6 +4165,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD OCR.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3757,6 +4175,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD OCR.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3766,6 +4185,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT.NO_SNOOP_NEEDED OCR.PF_L2_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -3775,6 +4195,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3784,6 +4205,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -3793,6 +4215,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -3802,6 +4225,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -3811,6 +4235,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -3820,6 +4245,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3829,6 +4255,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3838,6 +4265,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -3847,6 +4275,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -3856,6 +4285,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -3865,6 +4295,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -3874,6 +4305,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -3883,6 +4315,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3892,6 +4325,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3901,6 +4335,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -3910,6 +4345,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -3919,6 +4355,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -3928,6 +4365,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -3937,6 +4375,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -3946,6 +4385,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3955,6 +4395,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3964,6 +4405,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -3973,6 +4415,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -3982,6 +4425,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -3991,6 +4435,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -4000,6 +4445,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -4009,6 +4455,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -4018,6 +4465,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -4027,6 +4475,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -4036,6 +4485,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -4045,6 +4495,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -4054,6 +4505,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT.ANY_SNOOP OCR.PF_L2_RFO.L3_HIT.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -4063,6 +4515,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE OCR.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -4072,6 +4525,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_FWD OCR.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -4081,6 +4535,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD OCR.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -4090,6 +4545,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT.NO_SNOOP_NEEDED OCR.PF_L2_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -4099,6 +4555,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -4108,6 +4565,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -4117,6 +4575,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -4126,6 +4585,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT_E.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -4135,6 +4595,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT_E.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -4144,6 +4605,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -4153,6 +4615,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -4162,6 +4625,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -4171,6 +4635,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -4180,6 +4645,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -4189,6 +4655,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT_F.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -4198,6 +4665,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -4207,6 +4675,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -4216,6 +4685,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -4225,6 +4695,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -4234,6 +4705,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -4243,6 +4715,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -4252,6 +4725,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT_M.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -4261,6 +4735,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -4270,6 +4745,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -4279,6 +4755,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -4288,6 +4765,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -4297,6 +4775,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -4306,6 +4785,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -4315,6 +4795,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT_S.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -4324,6 +4805,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -4333,6 +4815,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -4342,6 +4825,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -4351,6 +4835,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -4360,6 +4845,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -4369,6 +4855,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -4378,6 +4865,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP OCR.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -4387,6 +4875,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT.HITM_OTHER_CORE OCR.PF_L3_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -4396,6 +4885,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD OCR.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -4405,6 +4895,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD OCR.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -4414,6 +4905,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT.NO_SNOOP_NEEDED OCR.PF_L3_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -4423,6 +4915,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -4432,6 +4925,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -4441,6 +4935,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -4450,6 +4945,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -4459,6 +4955,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -4468,6 +4965,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -4477,6 +4975,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -4486,6 +4985,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -4495,6 +4995,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -4504,6 +5005,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -4513,6 +5015,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -4522,6 +5025,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -4531,6 +5035,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -4540,6 +5045,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -4549,6 +5055,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -4558,6 +5065,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -4567,6 +5075,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -4576,6 +5085,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -4585,6 +5095,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -4594,6 +5105,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -4603,6 +5115,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -4612,6 +5125,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -4621,6 +5135,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -4630,6 +5145,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -4639,6 +5155,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -4648,6 +5165,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -4657,6 +5175,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -4666,6 +5185,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -4675,6 +5195,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -4684,6 +5205,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -4693,6 +5215,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -4702,6 +5225,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT.ANY_SNOOP OCR.PF_L3_RFO.L3_HIT.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -4711,6 +5235,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT.HITM_OTHER_CORE OCR.PF_L3_RFO.L3_HIT.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -4720,6 +5245,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_FWD OCR.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -4729,6 +5255,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD OCR.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -4738,6 +5265,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT.NO_SNOOP_NEEDED OCR.PF_L3_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -4747,6 +5275,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -4756,6 +5285,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -4765,6 +5295,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -4774,6 +5305,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT_E.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -4783,6 +5315,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT_E.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -4792,6 +5325,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -4801,6 +5335,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -4810,6 +5345,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -4819,6 +5355,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -4828,6 +5365,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -4837,6 +5375,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT_F.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -4846,6 +5385,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -4855,6 +5395,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -4864,6 +5405,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -4873,6 +5415,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -4882,6 +5425,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -4891,6 +5435,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -4900,6 +5445,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT_M.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -4909,6 +5455,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -4918,6 +5465,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -4927,6 +5475,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -4936,6 +5485,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -4945,6 +5495,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -4954,6 +5505,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -4963,6 +5515,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT_S.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -4972,6 +5525,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -4981,6 +5535,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -4990,6 +5545,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -4999,6 +5555,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -5008,6 +5565,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -5017,6 +5575,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -5026,6 +5585,7 @@
},
{
"BriefDescription": "Demand and prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
"PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
@@ -5034,6 +5594,7 @@
},
{
"BriefDescription": "Any memory transaction that reached the SQ.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
"PublicDescription": "Counts memory transactions reached the super queue including requests initiated by the core, all L3 prefetches, page walks, etc..",
@@ -5042,6 +5603,7 @@
},
{
"BriefDescription": "Cacheable and non-cacheable code read requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
"PublicDescription": "Counts both cacheable and non-cacheable code read requests.",
@@ -5050,6 +5612,7 @@
},
{
"BriefDescription": "Demand Data Read requests sent to uncore",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
"PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
@@ -5058,6 +5621,7 @@
},
{
"BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
"PublicDescription": "Counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
@@ -5066,6 +5630,7 @@
},
{
"BriefDescription": "Offcore requests buffer cannot take more entries for this thread core.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
"PublicDescription": "Counts the number of cases when the offcore requests buffer cannot take more entries for the core. This can happen when the superqueue does not contain eligible entries, or when L1D writeback pending FIFO requests is full.Note: Writeback pending FIFO has six entries.",
@@ -5074,6 +5639,7 @@
},
{
"BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
"PublicDescription": "Counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
@@ -5082,6 +5648,7 @@
},
{
"BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
@@ -5091,6 +5658,7 @@
},
{
"BriefDescription": "Cycles with offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
@@ -5100,6 +5668,7 @@
},
{
"BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
@@ -5109,6 +5678,7 @@
},
{
"BriefDescription": "Cycles with offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
@@ -5118,6 +5688,7 @@
},
{
"BriefDescription": "Offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore, every cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
"PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
@@ -5126,6 +5697,7 @@
},
{
"BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
"PublicDescription": "Counts the number of offcore outstanding Demand Data Read transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor. See the corresponding Umask under OFFCORE_REQUESTS.Note: A prefetch promoted to Demand is counted from the promotion point.",
@@ -5134,6 +5706,7 @@
},
{
"BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
@@ -5142,6 +5715,7 @@
},
{
"BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
"PublicDescription": "Counts the number of offcore outstanding RFO (store) transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
@@ -5150,6 +5724,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.ANY_RESPONSE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE",
@@ -5160,6 +5735,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.ANY_SNOOP",
@@ -5170,6 +5746,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE",
@@ -5180,6 +5757,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
@@ -5190,6 +5768,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
@@ -5200,6 +5779,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
@@ -5210,6 +5790,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
@@ -5220,6 +5801,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_MISS",
@@ -5230,6 +5812,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_NONE",
@@ -5240,6 +5823,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_E.ANY_SNOOP",
@@ -5250,6 +5834,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
@@ -5260,6 +5845,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
@@ -5270,6 +5856,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
@@ -5280,6 +5867,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
@@ -5290,6 +5878,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_E.SNOOP_MISS",
@@ -5300,6 +5889,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_E.SNOOP_NONE",
@@ -5310,6 +5900,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_F.ANY_SNOOP",
@@ -5320,6 +5911,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
@@ -5330,6 +5922,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
@@ -5340,6 +5933,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
@@ -5350,6 +5944,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
@@ -5360,6 +5955,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_F.SNOOP_MISS",
@@ -5370,6 +5966,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_F.SNOOP_NONE",
@@ -5380,6 +5977,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_M.ANY_SNOOP",
@@ -5390,6 +5988,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
@@ -5400,6 +5999,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
@@ -5410,6 +6010,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
@@ -5420,6 +6021,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
@@ -5430,6 +6032,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_M.SNOOP_MISS",
@@ -5440,6 +6043,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_M.SNOOP_NONE",
@@ -5450,6 +6054,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_S.ANY_SNOOP",
@@ -5460,6 +6065,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
@@ -5470,6 +6076,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
@@ -5480,6 +6087,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
@@ -5490,6 +6098,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
@@ -5500,6 +6109,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_S.SNOOP_MISS",
@@ -5510,6 +6120,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_S.SNOOP_NONE",
@@ -5520,6 +6131,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
@@ -5530,6 +6142,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
@@ -5540,6 +6153,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
@@ -5550,6 +6164,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
@@ -5560,6 +6175,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
@@ -5570,6 +6186,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
@@ -5580,6 +6197,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
@@ -5590,6 +6208,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
@@ -5600,6 +6219,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
@@ -5610,6 +6230,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
@@ -5620,6 +6241,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.ANY_RESPONSE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.ANY_RESPONSE",
@@ -5630,6 +6252,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP",
@@ -5640,6 +6263,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HITM_OTHER_CORE",
@@ -5650,6 +6274,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
@@ -5660,6 +6285,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
@@ -5670,6 +6296,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
@@ -5680,6 +6307,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
@@ -5690,6 +6318,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_MISS",
@@ -5700,6 +6329,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_NONE",
@@ -5710,6 +6340,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_E.ANY_SNOOP",
@@ -5720,6 +6351,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
@@ -5730,6 +6362,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
@@ -5740,6 +6373,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
@@ -5750,6 +6384,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
@@ -5760,6 +6395,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_E.SNOOP_MISS",
@@ -5770,6 +6406,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_E.SNOOP_NONE",
@@ -5780,6 +6417,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_F.ANY_SNOOP",
@@ -5790,6 +6428,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
@@ -5800,6 +6439,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
@@ -5810,6 +6450,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
@@ -5820,6 +6461,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
@@ -5830,6 +6472,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_F.SNOOP_MISS",
@@ -5840,6 +6483,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_F.SNOOP_NONE",
@@ -5850,6 +6494,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_M.ANY_SNOOP",
@@ -5860,6 +6505,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
@@ -5870,6 +6516,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
@@ -5880,6 +6527,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
@@ -5890,6 +6538,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
@@ -5900,6 +6549,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_M.SNOOP_MISS",
@@ -5910,6 +6560,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_M.SNOOP_NONE",
@@ -5920,6 +6571,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_S.ANY_SNOOP",
@@ -5930,6 +6582,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
@@ -5940,6 +6593,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
@@ -5950,6 +6604,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
@@ -5960,6 +6615,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
@@ -5970,6 +6626,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_S.SNOOP_MISS",
@@ -5980,6 +6637,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_S.SNOOP_NONE",
@@ -5990,6 +6648,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
@@ -6000,6 +6659,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
@@ -6010,6 +6670,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
@@ -6020,6 +6681,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
@@ -6030,6 +6692,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
@@ -6040,6 +6703,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
@@ -6050,6 +6714,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
@@ -6060,6 +6725,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
@@ -6070,6 +6736,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
@@ -6080,6 +6747,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
@@ -6090,6 +6758,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.ANY_RESPONSE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.ANY_RESPONSE",
@@ -6100,6 +6769,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.ANY_SNOOP",
@@ -6110,6 +6780,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HITM_OTHER_CORE",
@@ -6120,6 +6791,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
@@ -6130,6 +6802,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
@@ -6140,6 +6813,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED",
@@ -6150,6 +6824,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
@@ -6160,6 +6835,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_MISS",
@@ -6170,6 +6846,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_NONE",
@@ -6180,6 +6857,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_E.ANY_SNOOP",
@@ -6190,6 +6868,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_E.HITM_OTHER_CORE",
@@ -6200,6 +6879,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
@@ -6210,6 +6890,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
@@ -6220,6 +6901,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
@@ -6230,6 +6912,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_E.SNOOP_MISS",
@@ -6240,6 +6923,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_E.SNOOP_NONE",
@@ -6250,6 +6934,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_F.ANY_SNOOP",
@@ -6260,6 +6945,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_F.HITM_OTHER_CORE",
@@ -6270,6 +6956,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
@@ -6280,6 +6967,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
@@ -6290,6 +6978,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
@@ -6300,6 +6989,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_F.SNOOP_MISS",
@@ -6310,6 +7000,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_F.SNOOP_NONE",
@@ -6320,6 +7011,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_M.ANY_SNOOP",
@@ -6330,6 +7022,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_M.HITM_OTHER_CORE",
@@ -6340,6 +7033,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
@@ -6350,6 +7044,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
@@ -6360,6 +7055,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
@@ -6370,6 +7066,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_M.SNOOP_MISS",
@@ -6380,6 +7077,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_M.SNOOP_NONE",
@@ -6390,6 +7088,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_S.ANY_SNOOP",
@@ -6400,6 +7099,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_S.HITM_OTHER_CORE",
@@ -6410,6 +7110,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
@@ -6420,6 +7121,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
@@ -6430,6 +7132,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
@@ -6440,6 +7143,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_S.SNOOP_MISS",
@@ -6450,6 +7154,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_S.SNOOP_NONE",
@@ -6460,6 +7165,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
@@ -6470,6 +7176,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
@@ -6480,6 +7187,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
@@ -6490,6 +7198,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.ANY_SNOOP",
@@ -6500,6 +7209,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
@@ -6510,6 +7220,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
@@ -6520,6 +7231,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
@@ -6530,6 +7242,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
@@ -6540,6 +7253,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_MISS",
@@ -6550,6 +7264,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NONE",
@@ -6560,6 +7275,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.ANY_RESPONSE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.ANY_RESPONSE",
@@ -6570,6 +7286,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.ANY_SNOOP",
@@ -6580,6 +7297,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.HITM_OTHER_CORE",
@@ -6590,6 +7308,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.HIT_OTHER_CORE_FWD",
@@ -6600,6 +7319,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.HIT_OTHER_CORE_NO_FWD",
@@ -6610,6 +7330,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.NO_SNOOP_NEEDED",
@@ -6620,6 +7341,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.SNOOP_HIT_WITH_FWD",
@@ -6630,6 +7352,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.SNOOP_MISS",
@@ -6640,6 +7363,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.SNOOP_NONE",
@@ -6650,6 +7374,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_E.ANY_SNOOP",
@@ -6660,6 +7385,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_E.HITM_OTHER_CORE",
@@ -6670,6 +7396,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_E.HIT_OTHER_CORE_FWD",
@@ -6680,6 +7407,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
@@ -6690,6 +7418,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_E.NO_SNOOP_NEEDED",
@@ -6700,6 +7429,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_E.SNOOP_MISS",
@@ -6710,6 +7440,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_E.SNOOP_NONE",
@@ -6720,6 +7451,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_F.ANY_SNOOP",
@@ -6730,6 +7462,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_F.HITM_OTHER_CORE",
@@ -6740,6 +7473,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_F.HIT_OTHER_CORE_FWD",
@@ -6750,6 +7484,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
@@ -6760,6 +7495,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_F.NO_SNOOP_NEEDED",
@@ -6770,6 +7506,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_F.SNOOP_MISS",
@@ -6780,6 +7517,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_F.SNOOP_NONE",
@@ -6790,6 +7528,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_M.ANY_SNOOP",
@@ -6800,6 +7539,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_M.HITM_OTHER_CORE",
@@ -6810,6 +7550,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_M.HIT_OTHER_CORE_FWD",
@@ -6820,6 +7561,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
@@ -6830,6 +7572,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_M.NO_SNOOP_NEEDED",
@@ -6840,6 +7583,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_M.SNOOP_MISS",
@@ -6850,6 +7594,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_M.SNOOP_NONE",
@@ -6860,6 +7605,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_S.ANY_SNOOP",
@@ -6870,6 +7616,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_S.HITM_OTHER_CORE",
@@ -6880,6 +7627,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_S.HIT_OTHER_CORE_FWD",
@@ -6890,6 +7638,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
@@ -6900,6 +7649,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_S.NO_SNOOP_NEEDED",
@@ -6910,6 +7660,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_S.SNOOP_MISS",
@@ -6920,6 +7671,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_S.SNOOP_NONE",
@@ -6930,6 +7682,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
@@ -6940,6 +7693,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
@@ -6950,6 +7704,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
@@ -6960,6 +7715,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.SUPPLIER_NONE.ANY_SNOOP",
@@ -6970,6 +7726,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.SUPPLIER_NONE.HITM_OTHER_CORE",
@@ -6980,6 +7737,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
@@ -6990,6 +7748,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
@@ -7000,6 +7759,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.SUPPLIER_NONE.NO_SNOOP_NEEDED",
@@ -7010,6 +7770,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.SUPPLIER_NONE.SNOOP_MISS",
@@ -7020,6 +7781,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.SUPPLIER_NONE.SNOOP_NONE",
@@ -7030,6 +7792,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.ANY_RESPONSE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
@@ -7040,6 +7803,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.ANY_SNOOP",
@@ -7050,6 +7814,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HITM_OTHER_CORE",
@@ -7060,6 +7825,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
@@ -7070,6 +7836,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
@@ -7080,6 +7847,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED",
@@ -7090,6 +7858,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
@@ -7100,6 +7869,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_MISS",
@@ -7110,6 +7880,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_NONE",
@@ -7120,6 +7891,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_E.ANY_SNOOP",
@@ -7130,6 +7902,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_E.HITM_OTHER_CORE",
@@ -7140,6 +7913,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
@@ -7150,6 +7924,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
@@ -7160,6 +7935,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
@@ -7170,6 +7946,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_E.SNOOP_MISS",
@@ -7180,6 +7957,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_E.SNOOP_NONE",
@@ -7190,6 +7968,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_F.ANY_SNOOP",
@@ -7200,6 +7979,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_F.HITM_OTHER_CORE",
@@ -7210,6 +7990,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
@@ -7220,6 +8001,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
@@ -7230,6 +8012,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
@@ -7240,6 +8023,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_F.SNOOP_MISS",
@@ -7250,6 +8034,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_F.SNOOP_NONE",
@@ -7260,6 +8045,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_M.ANY_SNOOP",
@@ -7270,6 +8056,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_M.HITM_OTHER_CORE",
@@ -7280,6 +8067,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
@@ -7290,6 +8078,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
@@ -7300,6 +8089,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
@@ -7310,6 +8100,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_M.SNOOP_MISS",
@@ -7320,6 +8111,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_M.SNOOP_NONE",
@@ -7330,6 +8122,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_S.ANY_SNOOP",
@@ -7340,6 +8133,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_S.HITM_OTHER_CORE",
@@ -7350,6 +8144,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
@@ -7360,6 +8155,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
@@ -7370,6 +8166,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
@@ -7380,6 +8177,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_S.SNOOP_MISS",
@@ -7390,6 +8188,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_S.SNOOP_NONE",
@@ -7400,6 +8199,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
@@ -7410,6 +8210,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
@@ -7420,6 +8221,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
@@ -7430,6 +8232,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.ANY_SNOOP",
@@ -7440,6 +8243,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
@@ -7450,6 +8254,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
@@ -7460,6 +8265,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
@@ -7470,6 +8276,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
@@ -7480,6 +8287,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_MISS",
@@ -7490,6 +8298,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_NONE",
@@ -7500,6 +8309,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.ANY_RESPONSE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
@@ -7510,6 +8320,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
@@ -7520,6 +8331,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HITM_OTHER_CORE",
@@ -7530,6 +8342,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_FWD",
@@ -7540,6 +8353,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
@@ -7550,6 +8364,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.NO_SNOOP_NEEDED",
@@ -7560,6 +8375,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
@@ -7570,6 +8386,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
@@ -7580,6 +8397,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NONE",
@@ -7590,6 +8408,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.ANY_SNOOP",
@@ -7600,6 +8419,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.HITM_OTHER_CORE",
@@ -7610,6 +8430,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
@@ -7620,6 +8441,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
@@ -7630,6 +8452,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.NO_SNOOP_NEEDED",
@@ -7640,6 +8463,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_MISS",
@@ -7650,6 +8474,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NONE",
@@ -7660,6 +8485,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_F.ANY_SNOOP",
@@ -7670,6 +8496,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_F.HITM_OTHER_CORE",
@@ -7680,6 +8507,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
@@ -7690,6 +8518,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
@@ -7700,6 +8529,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_F.NO_SNOOP_NEEDED",
@@ -7710,6 +8540,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_F.SNOOP_MISS",
@@ -7720,6 +8551,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_F.SNOOP_NONE",
@@ -7730,6 +8562,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.ANY_SNOOP",
@@ -7740,6 +8573,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.HITM_OTHER_CORE",
@@ -7750,6 +8584,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
@@ -7760,6 +8595,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
@@ -7770,6 +8606,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.NO_SNOOP_NEEDED",
@@ -7780,6 +8617,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_MISS",
@@ -7790,6 +8628,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NONE",
@@ -7800,6 +8639,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.ANY_SNOOP",
@@ -7810,6 +8650,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.HITM_OTHER_CORE",
@@ -7820,6 +8661,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
@@ -7830,6 +8672,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
@@ -7840,6 +8683,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.NO_SNOOP_NEEDED",
@@ -7850,6 +8694,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_MISS",
@@ -7860,6 +8705,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NONE",
@@ -7870,6 +8716,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
@@ -7880,6 +8727,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
@@ -7890,6 +8738,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
@@ -7900,6 +8749,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
@@ -7910,6 +8760,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
@@ -7920,6 +8771,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
@@ -7930,6 +8782,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
@@ -7940,6 +8793,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
@@ -7950,6 +8804,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
@@ -7960,6 +8815,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
@@ -7970,6 +8826,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
@@ -7980,6 +8837,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
@@ -7990,6 +8848,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE",
@@ -8000,6 +8859,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
@@ -8010,6 +8870,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
@@ -8020,6 +8881,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
@@ -8030,6 +8892,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
@@ -8040,6 +8903,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
@@ -8050,6 +8914,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NONE",
@@ -8060,6 +8925,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.ANY_SNOOP",
@@ -8070,6 +8936,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
@@ -8080,6 +8947,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
@@ -8090,6 +8958,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
@@ -8100,6 +8969,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
@@ -8110,6 +8980,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_MISS",
@@ -8120,6 +8991,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NONE",
@@ -8130,6 +9002,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_F.ANY_SNOOP",
@@ -8140,6 +9013,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
@@ -8150,6 +9024,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
@@ -8160,6 +9035,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
@@ -8170,6 +9046,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
@@ -8180,6 +9057,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_F.SNOOP_MISS",
@@ -8190,6 +9068,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_F.SNOOP_NONE",
@@ -8200,6 +9079,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.ANY_SNOOP",
@@ -8210,6 +9090,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
@@ -8220,6 +9101,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
@@ -8230,6 +9112,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
@@ -8240,6 +9123,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
@@ -8250,6 +9134,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_MISS",
@@ -8260,6 +9145,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NONE",
@@ -8270,6 +9156,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.ANY_SNOOP",
@@ -8280,6 +9167,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
@@ -8290,6 +9178,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
@@ -8300,6 +9189,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
@@ -8310,6 +9200,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
@@ -8320,6 +9211,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_MISS",
@@ -8330,6 +9222,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NONE",
@@ -8340,6 +9233,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
@@ -8350,6 +9244,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
@@ -8360,6 +9255,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
@@ -8370,6 +9266,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
@@ -8380,6 +9277,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
@@ -8390,6 +9288,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
@@ -8400,6 +9299,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
@@ -8410,6 +9310,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
@@ -8420,6 +9321,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
@@ -8430,6 +9332,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
@@ -8440,6 +9343,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.ANY_RESPONSE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
@@ -8450,6 +9354,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.ANY_SNOOP",
@@ -8460,6 +9365,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE",
@@ -8470,6 +9376,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
@@ -8480,6 +9387,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
@@ -8490,6 +9398,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.NO_SNOOP_NEEDED",
@@ -8500,6 +9409,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
@@ -8510,6 +9420,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_MISS",
@@ -8520,6 +9431,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NONE",
@@ -8530,6 +9442,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.ANY_SNOOP",
@@ -8540,6 +9453,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.HITM_OTHER_CORE",
@@ -8550,6 +9464,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
@@ -8560,6 +9475,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
@@ -8570,6 +9486,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
@@ -8580,6 +9497,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_MISS",
@@ -8590,6 +9508,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_NONE",
@@ -8600,6 +9519,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_F.ANY_SNOOP",
@@ -8610,6 +9530,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_F.HITM_OTHER_CORE",
@@ -8620,6 +9541,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
@@ -8630,6 +9552,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
@@ -8640,6 +9563,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
@@ -8650,6 +9574,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_F.SNOOP_MISS",
@@ -8660,6 +9585,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_F.SNOOP_NONE",
@@ -8670,6 +9596,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.ANY_SNOOP",
@@ -8680,6 +9607,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.HITM_OTHER_CORE",
@@ -8690,6 +9618,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
@@ -8700,6 +9629,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
@@ -8710,6 +9640,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
@@ -8720,6 +9651,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_MISS",
@@ -8730,6 +9662,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_NONE",
@@ -8740,6 +9673,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.ANY_SNOOP",
@@ -8750,6 +9684,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.HITM_OTHER_CORE",
@@ -8760,6 +9695,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
@@ -8770,6 +9706,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
@@ -8780,6 +9717,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
@@ -8790,6 +9728,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_MISS",
@@ -8800,6 +9739,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_NONE",
@@ -8810,6 +9750,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
@@ -8820,6 +9761,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
@@ -8830,6 +9772,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
@@ -8840,6 +9783,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.ANY_SNOOP",
@@ -8850,6 +9794,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
@@ -8860,6 +9805,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
@@ -8870,6 +9816,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
@@ -8880,6 +9827,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
@@ -8890,6 +9838,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_MISS",
@@ -8900,6 +9849,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NONE",
@@ -8910,6 +9860,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.ANY_RESPONSE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.ANY_RESPONSE",
@@ -8920,6 +9871,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.ANY_SNOOP",
@@ -8930,6 +9882,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.HITM_OTHER_CORE",
@@ -8940,6 +9893,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.HIT_OTHER_CORE_FWD",
@@ -8950,6 +9904,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.HIT_OTHER_CORE_NO_FWD",
@@ -8960,6 +9915,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.NO_SNOOP_NEEDED",
@@ -8970,6 +9926,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HIT_WITH_FWD",
@@ -8980,6 +9937,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_MISS",
@@ -8990,6 +9948,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NONE",
@@ -9000,6 +9959,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.ANY_SNOOP",
@@ -9010,6 +9970,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.HITM_OTHER_CORE",
@@ -9020,6 +9981,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.HIT_OTHER_CORE_FWD",
@@ -9030,6 +9992,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
@@ -9040,6 +10003,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.NO_SNOOP_NEEDED",
@@ -9050,6 +10014,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_MISS",
@@ -9060,6 +10025,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_NONE",
@@ -9070,6 +10036,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_F.ANY_SNOOP",
@@ -9080,6 +10047,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_F.HITM_OTHER_CORE",
@@ -9090,6 +10058,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_F.HIT_OTHER_CORE_FWD",
@@ -9100,6 +10069,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
@@ -9110,6 +10080,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_F.NO_SNOOP_NEEDED",
@@ -9120,6 +10091,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_F.SNOOP_MISS",
@@ -9130,6 +10102,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_F.SNOOP_NONE",
@@ -9140,6 +10113,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.ANY_SNOOP",
@@ -9150,6 +10124,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.HITM_OTHER_CORE",
@@ -9160,6 +10135,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.HIT_OTHER_CORE_FWD",
@@ -9170,6 +10146,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
@@ -9180,6 +10157,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.NO_SNOOP_NEEDED",
@@ -9190,6 +10168,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_MISS",
@@ -9200,6 +10179,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_NONE",
@@ -9210,6 +10190,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.ANY_SNOOP",
@@ -9220,6 +10201,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.HITM_OTHER_CORE",
@@ -9230,6 +10212,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.HIT_OTHER_CORE_FWD",
@@ -9240,6 +10223,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
@@ -9250,6 +10234,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.NO_SNOOP_NEEDED",
@@ -9260,6 +10245,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_MISS",
@@ -9270,6 +10256,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_NONE",
@@ -9280,6 +10267,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
@@ -9290,6 +10278,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
@@ -9300,6 +10289,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
@@ -9310,6 +10300,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.ANY_SNOOP",
@@ -9320,6 +10311,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.HITM_OTHER_CORE",
@@ -9330,6 +10322,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
@@ -9340,6 +10333,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
@@ -9350,6 +10344,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.NO_SNOOP_NEEDED",
@@ -9360,6 +10355,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_MISS",
@@ -9370,6 +10366,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NONE",
@@ -9380,6 +10377,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.ANY_RESPONSE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.ANY_RESPONSE",
@@ -9390,6 +10388,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.ANY_SNOOP",
@@ -9400,6 +10399,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HITM_OTHER_CORE",
@@ -9410,6 +10410,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_FWD",
@@ -9420,6 +10421,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_NO_FWD",
@@ -9430,6 +10432,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.NO_SNOOP_NEEDED",
@@ -9440,6 +10443,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.SNOOP_HIT_WITH_FWD",
@@ -9450,6 +10454,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.SNOOP_MISS",
@@ -9460,6 +10465,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.SNOOP_NONE",
@@ -9470,6 +10476,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_E.ANY_SNOOP",
@@ -9480,6 +10487,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_E.HITM_OTHER_CORE",
@@ -9490,6 +10498,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_E.HIT_OTHER_CORE_FWD",
@@ -9500,6 +10509,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
@@ -9510,6 +10520,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_E.NO_SNOOP_NEEDED",
@@ -9520,6 +10531,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_E.SNOOP_MISS",
@@ -9530,6 +10542,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_E.SNOOP_NONE",
@@ -9540,6 +10553,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_F.ANY_SNOOP",
@@ -9550,6 +10564,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_F.HITM_OTHER_CORE",
@@ -9560,6 +10575,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_F.HIT_OTHER_CORE_FWD",
@@ -9570,6 +10586,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
@@ -9580,6 +10597,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_F.NO_SNOOP_NEEDED",
@@ -9590,6 +10608,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_F.SNOOP_MISS",
@@ -9600,6 +10619,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_F.SNOOP_NONE",
@@ -9610,6 +10630,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_M.ANY_SNOOP",
@@ -9620,6 +10641,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_M.HITM_OTHER_CORE",
@@ -9630,6 +10652,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_M.HIT_OTHER_CORE_FWD",
@@ -9640,6 +10663,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
@@ -9650,6 +10674,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_M.NO_SNOOP_NEEDED",
@@ -9660,6 +10685,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_M.SNOOP_MISS",
@@ -9670,6 +10696,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_M.SNOOP_NONE",
@@ -9680,6 +10707,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_S.ANY_SNOOP",
@@ -9690,6 +10718,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_S.HITM_OTHER_CORE",
@@ -9700,6 +10729,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_S.HIT_OTHER_CORE_FWD",
@@ -9710,6 +10740,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
@@ -9720,6 +10751,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_S.NO_SNOOP_NEEDED",
@@ -9730,6 +10762,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_S.SNOOP_MISS",
@@ -9740,6 +10773,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_S.SNOOP_NONE",
@@ -9750,6 +10784,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
@@ -9760,6 +10795,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
@@ -9770,6 +10806,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
@@ -9780,6 +10817,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.SUPPLIER_NONE.ANY_SNOOP",
@@ -9790,6 +10828,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.SUPPLIER_NONE.HITM_OTHER_CORE",
@@ -9800,6 +10839,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
@@ -9810,6 +10850,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
@@ -9820,6 +10861,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.SUPPLIER_NONE.NO_SNOOP_NEEDED",
@@ -9830,6 +10872,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.SUPPLIER_NONE.SNOOP_MISS",
@@ -9840,6 +10883,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.SUPPLIER_NONE.SNOOP_NONE",
@@ -9850,6 +10894,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.ANY_RESPONSE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.ANY_RESPONSE",
@@ -9860,6 +10905,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.ANY_SNOOP",
@@ -9870,6 +10916,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.HITM_OTHER_CORE",
@@ -9880,6 +10927,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
@@ -9890,6 +10938,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
@@ -9900,6 +10949,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
@@ -9910,6 +10960,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
@@ -9920,6 +10971,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_MISS",
@@ -9930,6 +10982,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_NONE",
@@ -9940,6 +10993,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_E.ANY_SNOOP",
@@ -9950,6 +11004,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
@@ -9960,6 +11015,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
@@ -9970,6 +11026,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
@@ -9980,6 +11037,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
@@ -9990,6 +11048,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_E.SNOOP_MISS",
@@ -10000,6 +11059,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_E.SNOOP_NONE",
@@ -10010,6 +11070,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_F.ANY_SNOOP",
@@ -10020,6 +11081,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
@@ -10030,6 +11092,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
@@ -10040,6 +11103,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
@@ -10050,6 +11114,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
@@ -10060,6 +11125,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_F.SNOOP_MISS",
@@ -10070,6 +11136,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_F.SNOOP_NONE",
@@ -10080,6 +11147,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_M.ANY_SNOOP",
@@ -10090,6 +11158,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
@@ -10100,6 +11169,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
@@ -10110,6 +11180,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
@@ -10120,6 +11191,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
@@ -10130,6 +11202,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_M.SNOOP_MISS",
@@ -10140,6 +11213,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_M.SNOOP_NONE",
@@ -10150,6 +11224,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_S.ANY_SNOOP",
@@ -10160,6 +11235,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
@@ -10170,6 +11246,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
@@ -10180,6 +11257,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
@@ -10190,6 +11268,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
@@ -10200,6 +11279,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_S.SNOOP_MISS",
@@ -10210,6 +11290,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_S.SNOOP_NONE",
@@ -10220,6 +11301,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
@@ -10230,6 +11312,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
@@ -10240,6 +11323,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
@@ -10250,6 +11334,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
@@ -10260,6 +11345,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
@@ -10270,6 +11356,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
@@ -10280,6 +11367,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
@@ -10290,6 +11378,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
@@ -10300,6 +11389,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
@@ -10310,6 +11400,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
@@ -10320,6 +11411,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.ANY_RESPONSE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
@@ -10330,6 +11422,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.ANY_SNOOP",
@@ -10340,6 +11433,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE",
@@ -10350,6 +11444,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
@@ -10360,6 +11455,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
@@ -10370,6 +11466,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.NO_SNOOP_NEEDED",
@@ -10380,6 +11477,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
@@ -10390,6 +11488,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_MISS",
@@ -10400,6 +11499,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_NONE",
@@ -10410,6 +11510,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_E.ANY_SNOOP",
@@ -10420,6 +11521,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_E.HITM_OTHER_CORE",
@@ -10430,6 +11532,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
@@ -10440,6 +11543,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
@@ -10450,6 +11554,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
@@ -10460,6 +11565,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_E.SNOOP_MISS",
@@ -10470,6 +11576,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_E.SNOOP_NONE",
@@ -10480,6 +11587,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_F.ANY_SNOOP",
@@ -10490,6 +11598,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_F.HITM_OTHER_CORE",
@@ -10500,6 +11609,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
@@ -10510,6 +11620,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
@@ -10520,6 +11631,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
@@ -10530,6 +11642,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_F.SNOOP_MISS",
@@ -10540,6 +11653,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_F.SNOOP_NONE",
@@ -10550,6 +11664,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_M.ANY_SNOOP",
@@ -10560,6 +11675,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_M.HITM_OTHER_CORE",
@@ -10570,6 +11686,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
@@ -10580,6 +11697,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
@@ -10590,6 +11708,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
@@ -10600,6 +11719,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_M.SNOOP_MISS",
@@ -10610,6 +11730,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_M.SNOOP_NONE",
@@ -10620,6 +11741,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_S.ANY_SNOOP",
@@ -10630,6 +11752,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_S.HITM_OTHER_CORE",
@@ -10640,6 +11763,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
@@ -10650,6 +11774,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
@@ -10660,6 +11785,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
@@ -10670,6 +11796,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_S.SNOOP_MISS",
@@ -10680,6 +11807,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_S.SNOOP_NONE",
@@ -10690,6 +11818,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
@@ -10700,6 +11829,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
@@ -10710,6 +11840,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
@@ -10720,6 +11851,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.ANY_SNOOP",
@@ -10730,6 +11862,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
@@ -10740,6 +11873,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
@@ -10750,6 +11884,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
@@ -10760,6 +11895,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
@@ -10770,6 +11906,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_MISS",
@@ -10780,6 +11917,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_NONE",
@@ -10790,6 +11928,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.ANY_RESPONSE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.ANY_RESPONSE",
@@ -10800,6 +11939,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP",
@@ -10810,6 +11950,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.HITM_OTHER_CORE",
@@ -10820,6 +11961,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
@@ -10830,6 +11972,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
@@ -10840,6 +11983,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
@@ -10850,6 +11994,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
@@ -10860,6 +12005,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_MISS",
@@ -10870,6 +12016,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_NONE",
@@ -10880,6 +12027,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.ANY_SNOOP",
@@ -10890,6 +12038,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
@@ -10900,6 +12049,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
@@ -10910,6 +12060,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
@@ -10920,6 +12071,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
@@ -10930,6 +12082,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.SNOOP_MISS",
@@ -10940,6 +12093,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.SNOOP_NONE",
@@ -10950,6 +12104,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_F.ANY_SNOOP",
@@ -10960,6 +12115,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
@@ -10970,6 +12126,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
@@ -10980,6 +12137,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
@@ -10990,6 +12148,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
@@ -11000,6 +12159,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_F.SNOOP_MISS",
@@ -11010,6 +12170,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_F.SNOOP_NONE",
@@ -11020,6 +12181,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.ANY_SNOOP",
@@ -11030,6 +12192,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
@@ -11040,6 +12203,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
@@ -11050,6 +12214,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
@@ -11060,6 +12225,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
@@ -11070,6 +12236,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.SNOOP_MISS",
@@ -11080,6 +12247,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.SNOOP_NONE",
@@ -11090,6 +12258,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.ANY_SNOOP",
@@ -11100,6 +12269,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
@@ -11110,6 +12280,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
@@ -11120,6 +12291,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
@@ -11130,6 +12302,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
@@ -11140,6 +12313,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.SNOOP_MISS",
@@ -11150,6 +12324,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.SNOOP_NONE",
@@ -11160,6 +12335,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
@@ -11170,6 +12346,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
@@ -11180,6 +12357,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
@@ -11190,6 +12368,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
@@ -11200,6 +12379,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
@@ -11210,6 +12390,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
@@ -11220,6 +12401,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
@@ -11230,6 +12412,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
@@ -11240,6 +12423,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
@@ -11250,6 +12434,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
@@ -11260,6 +12445,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.ANY_RESPONSE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.ANY_RESPONSE",
@@ -11270,6 +12456,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.ANY_SNOOP",
@@ -11280,6 +12467,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.HITM_OTHER_CORE",
@@ -11290,6 +12478,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
@@ -11300,6 +12489,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
@@ -11310,6 +12500,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.NO_SNOOP_NEEDED",
@@ -11320,6 +12511,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
@@ -11330,6 +12522,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_MISS",
@@ -11340,6 +12533,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_NONE",
@@ -11350,6 +12544,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.ANY_SNOOP",
@@ -11360,6 +12555,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.HITM_OTHER_CORE",
@@ -11370,6 +12566,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
@@ -11380,6 +12577,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
@@ -11390,6 +12588,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
@@ -11400,6 +12599,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.SNOOP_MISS",
@@ -11410,6 +12610,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.SNOOP_NONE",
@@ -11420,6 +12621,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_F.ANY_SNOOP",
@@ -11430,6 +12632,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_F.HITM_OTHER_CORE",
@@ -11440,6 +12643,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
@@ -11450,6 +12654,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
@@ -11460,6 +12665,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
@@ -11470,6 +12676,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_F.SNOOP_MISS",
@@ -11480,6 +12687,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_F.SNOOP_NONE",
@@ -11490,6 +12698,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.ANY_SNOOP",
@@ -11500,6 +12709,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.HITM_OTHER_CORE",
@@ -11510,6 +12720,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
@@ -11520,6 +12731,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
@@ -11530,6 +12742,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
@@ -11540,6 +12753,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.SNOOP_MISS",
@@ -11550,6 +12764,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.SNOOP_NONE",
@@ -11560,6 +12775,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.ANY_SNOOP",
@@ -11570,6 +12786,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.HITM_OTHER_CORE",
@@ -11580,6 +12797,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
@@ -11590,6 +12808,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
@@ -11600,6 +12819,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
@@ -11610,6 +12830,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.SNOOP_MISS",
@@ -11620,6 +12841,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.SNOOP_NONE",
@@ -11630,6 +12852,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
@@ -11640,6 +12863,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
@@ -11650,6 +12874,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
@@ -11660,6 +12885,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.ANY_SNOOP",
@@ -11670,6 +12896,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
@@ -11680,6 +12907,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
@@ -11690,6 +12918,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
@@ -11700,6 +12929,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
@@ -11710,6 +12940,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_MISS",
@@ -11720,6 +12951,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NONE",
@@ -11730,6 +12962,7 @@
},
{
"BriefDescription": "Number of cache line split locks sent to uncore.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF4",
"EventName": "SQ_MISC.SPLIT_LOCK",
"PublicDescription": "Counts the number of cache line split locks sent to the uncore.",
@@ -11737,7 +12970,16 @@
"UMask": "0x10"
},
{
+ "BriefDescription": "Counts the number of PREFETCHNTA, PREFETCHW, PREFETCHT0, PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.ANY",
+ "SampleAfterValue": "2000003",
+ "UMask": "0xf"
+ },
+ {
"BriefDescription": "Number of PREFETCHNTA instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "SW_PREFETCH_ACCESS.NTA",
"SampleAfterValue": "2000003",
@@ -11745,6 +12987,7 @@
},
{
"BriefDescription": "Number of PREFETCHW instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
"SampleAfterValue": "2000003",
@@ -11752,6 +12995,7 @@
},
{
"BriefDescription": "Number of PREFETCHT0 instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "SW_PREFETCH_ACCESS.T0",
"SampleAfterValue": "2000003",
@@ -11759,6 +13003,7 @@
},
{
"BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "SW_PREFETCH_ACCESS.T1_T2",
"SampleAfterValue": "2000003",
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
index 297046818efe..b02a89e14c5d 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
@@ -68,7 +68,7 @@
},
{
"BriefDescription": "Percentage of time spent in the active CPU power state C0",
- "MetricExpr": "tma_info_system_cpu_utilization",
+ "MetricExpr": "tma_info_system_cpus_utilized",
"MetricName": "cpu_utilization",
"ScaleUnit": "100%"
},
@@ -163,7 +163,7 @@
},
{
"BriefDescription": "Ratio of number of code read requests missing last level core cache (includes demand w/ prefetches) to the total number of completed instructions",
- "MetricExpr": "cha@UNC_CHA_TOR_INSERTS.IA_MISS\\,config1\\=0x12CC0233@ / INST_RETIRED.ANY",
+ "MetricExpr": "cha@UNC_CHA_TOR_INSERTS.IA_MISS\\,config1\\=0x12cc0233@ / INST_RETIRED.ANY",
"MetricName": "llc_code_read_mpi_demand_plus_prefetch",
"ScaleUnit": "1per_instr"
},
@@ -187,7 +187,7 @@
},
{
"BriefDescription": "Ratio of number of data read requests missing last level core cache (includes demand w/ prefetches) to the total number of completed instructions",
- "MetricExpr": "cha@UNC_CHA_TOR_INSERTS.IA_MISS\\,config1\\=0x12D40433@ / INST_RETIRED.ANY",
+ "MetricExpr": "cha@UNC_CHA_TOR_INSERTS.IA_MISS\\,config1\\=0x12d40433@ / INST_RETIRED.ANY",
"MetricName": "llc_data_read_mpi_demand_plus_prefetch",
"ScaleUnit": "1per_instr"
},
@@ -328,7 +328,7 @@
{
"BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists",
"MetricExpr": "34 * (FP_ASSIST.ANY + OTHER_ASSISTS.ANY) / tma_info_thread_slots",
- "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricGroup": "BvIO;TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_assists",
"MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
"PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: OTHER_ASSISTS.ANY",
@@ -337,7 +337,7 @@
{
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
"MetricExpr": "1 - tma_frontend_bound - (UOPS_ISSUED.ANY + 4 * (INT_MISC.RECOVERY_CYCLES_ANY / 2 if #SMT_on else INT_MISC.RECOVERY_CYCLES)) / tma_info_thread_slots",
- "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvOB;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
"MetricThreshold": "tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL1",
@@ -358,7 +358,7 @@
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * tma_bad_speculation",
- "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
+ "MetricGroup": "BadSpec;BrMispredicts;BvMP;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
"MetricName": "tma_branch_mispredicts",
"MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
@@ -396,7 +396,7 @@
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(44 * tma_info_system_core_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM * (OCR.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE / (OCR.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE + OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD))) + 44 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
- "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricGroup": "BvMS;DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_contested_accesses",
"MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS_PS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
@@ -417,7 +417,7 @@
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "44 * tma_info_system_core_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM * (1 - OCR.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE / (OCR.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE + OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
- "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricGroup": "BvMS;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_data_sharing",
"MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT_PS. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
@@ -435,7 +435,7 @@
{
"BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
"MetricExpr": "ARITH.DIVIDER_ACTIVE / tma_info_thread_clks",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
"MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_ACTIVE",
@@ -466,14 +466,14 @@
"MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_dsb_switches",
"MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
"MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "min(9 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_LOAD_MISSES.WALK_ACTIVE, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
- "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
+ "MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
"MetricName": "tma_dtlb_load",
"MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs, tma_info_bottleneck_memory_synchronization",
@@ -482,7 +482,7 @@
{
"BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
"MetricExpr": "(9 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_STORE_MISSES.WALK_ACTIVE) / tma_info_core_core_clks",
- "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
+ "MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
"MetricName": "tma_dtlb_store",
"MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load, tma_info_bottleneck_memory_data_tlbs, tma_info_bottleneck_memory_synchronization",
@@ -492,7 +492,7 @@
"BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(110 * tma_info_system_core_frequency * (OCR.DEMAND_RFO.L3_MISS.REMOTE_HITM + OCR.PF_L2_RFO.L3_MISS.REMOTE_HITM) + 47.5 * tma_info_system_core_frequency * (OCR.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE + OCR.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE)) / tma_info_thread_clks",
- "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
+ "MetricGroup": "BvMS;DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
"MetricName": "tma_false_sharing",
"MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
@@ -502,7 +502,7 @@
"BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
"MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "tma_info_memory_load_miss_real_latency * cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@ / tma_info_thread_clks",
- "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
+ "MetricGroup": "BvMS;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
"MetricName": "tma_fb_full",
"MetricThreshold": "tma_fb_full > 0.3",
"PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
@@ -515,7 +515,7 @@
"MetricName": "tma_fetch_bandwidth",
"MetricThreshold": "tma_fetch_bandwidth > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
{
@@ -530,6 +530,7 @@
},
{
"BriefDescription": "This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or up to ([SNB+] four; [ADL+] five) uops",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "tma_heavy_operations - tma_microcode_sequencer",
"MetricGroup": "TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueD0",
"MetricName": "tma_few_uops_instructions",
@@ -558,7 +559,7 @@
},
{
"BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired",
- "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricExpr": "FP_ARITH_INST_RETIRED.SCALAR / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_scalar",
"MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
@@ -605,7 +606,7 @@
{
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / tma_info_thread_slots",
- "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvFB;BvIO;PGO;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_frontend_bound",
"MetricThreshold": "tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL1",
@@ -615,7 +616,7 @@
{
"BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions",
"MetricExpr": "tma_light_operations * UOPS_RETIRED.MACRO_FUSED / UOPS_RETIRED.RETIRE_SLOTS",
- "MetricGroup": "Branches;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_fused_instructions",
"MetricThreshold": "tma_fused_instructions > 0.1 & tma_light_operations > 0.6",
"PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. CMP+JCC or DEC+JCC are common examples of legacy fusions. {([MTL] Note new MOV+OP and Load+OP fusions appear under Other_Light_Ops in MTL!)}",
@@ -634,7 +635,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses",
"MetricExpr": "(ICACHE_16B.IFDATA_STALL + 2 * cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=1\\,edge@) / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_icache_misses",
"MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS",
@@ -669,24 +670,6 @@
},
{
"BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts",
- "MetricExpr": "(100 * (1 - tma_core_bound / (((EXE_ACTIVITY.EXE_BOUND_0_PORTS + tma_core_bound * RS_EVENTS.EMPTY_CYCLES) / CPU_CLK_UNHALTED.THREAD * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) / CPU_CLK_UNHALTED.THREAD * CPU_CLK_UNHALTED.THREAD + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL)) / CPU_CLK_UNHALTED.THREAD if ARITH.DIVIDER_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) / CPU_CLK_UNHALTED.THREAD) if tma_core_bound < (((EXE_ACTIVITY.EXE_BOUND_0_PORTS + tma_core_bound * RS_EVENTS.EMPTY_CYCLES) / CPU_CLK_UNHALTED.THREAD * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) / CPU_CLK_UNHALTED.THREAD * CPU_CLK_UNHALTED.THREAD + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL)) / CPU_CLK_UNHALTED.THREAD if ARITH.DIVIDER_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) / CPU_CLK_UNHALTED.THREAD) else 1) if tma_info_system_smt_2t_utilization > 0.5 else 0)",
- "MetricGroup": "Cor;SMT",
- "MetricName": "tma_info_botlnk_core_bound_likely"
- },
- {
- "BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck.",
- "MetricExpr": "100 * (100 * (tma_fetch_latency * (DSB2MITE_SWITCHES.PENALTY_CYCLES / CPU_CLK_UNHALTED.THREAD) / ((ICACHE_16B.IFDATA_STALL + 2 * cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=0x1\\,edge\\=0x1@) / CPU_CLK_UNHALTED.THREAD + ICACHE_TAG.STALLS / CPU_CLK_UNHALTED.THREAD + (INT_MISC.CLEAR_RESTEER_CYCLES / CPU_CLK_UNHALTED.THREAD + 9 * BACLEARS.ANY / CPU_CLK_UNHALTED.THREAD) + min(2 * IDQ.MS_SWITCHES / CPU_CLK_UNHALTED.THREAD, 1) + DECODE.LCP / CPU_CLK_UNHALTED.THREAD + DSB2MITE_SWITCHES.PENALTY_CYCLES / CPU_CLK_UNHALTED.THREAD) + tma_fetch_bandwidth * tma_mite / (tma_mite + tma_dsb)))",
- "MetricGroup": "DSBmiss;Fed",
- "MetricName": "tma_info_botlnk_dsb_misses"
- },
- {
- "BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck.",
- "MetricExpr": "100 * (100 * (tma_fetch_latency * ((ICACHE_16B.IFDATA_STALL + 2 * cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=0x1\\,edge\\=0x1@) / CPU_CLK_UNHALTED.THREAD) / ((ICACHE_16B.IFDATA_STALL + 2 * cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=0x1\\,edge\\=0x1@) / CPU_CLK_UNHALTED.THREAD + ICACHE_TAG.STALLS / CPU_CLK_UNHALTED.THREAD + (INT_MISC.CLEAR_RESTEER_CYCLES / CPU_CLK_UNHALTED.THREAD + 9 * BACLEARS.ANY / CPU_CLK_UNHALTED.THREAD) + min(2 * IDQ.MS_SWITCHES / CPU_CLK_UNHALTED.THREAD, 1) + DECODE.LCP / CPU_CLK_UNHALTED.THREAD + DSB2MITE_SWITCHES.PENALTY_CYCLES / CPU_CLK_UNHALTED.THREAD)))",
- "MetricGroup": "Fed;FetchLat;IcMiss",
- "MetricName": "tma_info_botlnk_ic_misses"
- },
- {
- "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(100 * (1 - tma_core_bound / tma_ports_utilization if tma_core_bound < tma_ports_utilization else 1) if tma_info_system_smt_2t_utilization > 0.5 else 0)",
"MetricGroup": "Cor;SMT",
@@ -694,13 +677,21 @@
"MetricThreshold": "tma_info_botlnk_l0_core_bound_likely > 0.5"
},
{
+ "BriefDescription": "Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck",
+ "MetricExpr": "100 * (tma_frontend_bound * (tma_fetch_bandwidth / (tma_fetch_bandwidth + tma_fetch_latency)) * (tma_dsb / (tma_dsb + tma_mite)))",
+ "MetricGroup": "DSB;FetchBW;tma_issueFB",
+ "MetricName": "tma_info_botlnk_l2_dsb_bandwidth",
+ "MetricThreshold": "tma_info_botlnk_l2_dsb_bandwidth > 10",
+ "PublicDescription": "Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp"
+ },
+ {
"BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_mite))",
"MetricGroup": "DSBmiss;Fed;tma_issueFB",
"MetricName": "tma_info_botlnk_l2_dsb_misses",
"MetricThreshold": "tma_info_botlnk_l2_dsb_misses > 10",
- "PublicDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp"
+ "PublicDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp"
},
{
"BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
@@ -711,39 +702,33 @@
"PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: "
},
{
- "BriefDescription": "Total pipeline cost of \"useful operations\" - the baseline operations not covered by Branching_Overhead nor Irregular_Overhead.",
- "MetricExpr": "100 * (tma_retiring - (BR_INST_RETIRED.ALL_BRANCHES + BR_INST_RETIRED.NEAR_CALL) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
- "MetricGroup": "Ret",
- "MetricName": "tma_info_bottleneck_base_non_br",
- "MetricThreshold": "tma_info_bottleneck_base_non_br > 20"
- },
- {
"BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)",
- "MetricGroup": "BigFootprint;Fed;Frontend;IcMiss;MemoryTLB",
+ "MetricGroup": "BigFootprint;BvBC;Fed;Frontend;IcMiss;MemoryTLB",
"MetricName": "tma_info_bottleneck_big_code",
"MetricThreshold": "tma_info_bottleneck_big_code > 20"
},
{
- "BriefDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls)",
- "MetricExpr": "100 * ((BR_INST_RETIRED.ALL_BRANCHES + BR_INST_RETIRED.NEAR_CALL) / tma_info_thread_slots)",
- "MetricGroup": "Ret",
+ "BriefDescription": "Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA",
+ "MetricExpr": "100 * ((BR_INST_RETIRED.ALL_BRANCHES + 2 * BR_INST_RETIRED.NEAR_CALL + INST_RETIRED.NOP) / tma_info_thread_slots)",
+ "MetricGroup": "BvBO;Ret",
"MetricName": "tma_info_bottleneck_branching_overhead",
- "MetricThreshold": "tma_info_bottleneck_branching_overhead > 5"
+ "MetricThreshold": "tma_info_bottleneck_branching_overhead > 5",
+ "PublicDescription": "Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA. Examples include function calls; loops and alignments. (A lower bound)"
},
{
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
- "MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_hit_latency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
+ "MetricGroup": "BvMB;Mem;MemoryBW;Offcore;tma_issueBW",
"MetricName": "tma_info_bottleneck_cache_memory_bandwidth",
"MetricThreshold": "tma_info_bottleneck_cache_memory_bandwidth > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full"
},
{
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency)))",
- "MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_l1_hit_latency / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_hit_latency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
+ "MetricGroup": "BvML;Mem;MemoryLat;Offcore;tma_issueLat",
"MetricName": "tma_info_bottleneck_cache_memory_latency",
"MetricThreshold": "tma_info_bottleneck_cache_memory_latency > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks. Related metrics: tma_l3_hit_latency, tma_mem_latency"
@@ -751,23 +736,23 @@
{
"BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation",
"MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_ports_utilization + tma_serializing_operation)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))",
- "MetricGroup": "Cor;tma_issueComp",
+ "MetricGroup": "BvCB;Cor;tma_issueComp",
"MetricName": "tma_info_bottleneck_compute_bound_est",
"MetricThreshold": "tma_info_bottleneck_compute_bound_est > 20",
"PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. Related metrics: "
},
{
- "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks",
+ "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end)",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * (10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts)) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_info_bottleneck_big_code",
- "MetricGroup": "Fed;FetchBW;Frontend",
+ "MetricGroup": "BvFB;Fed;FetchBW;Frontend",
"MetricName": "tma_info_bottleneck_instruction_fetch_bw",
"MetricThreshold": "tma_info_bottleneck_instruction_fetch_bw > 20"
},
{
"BriefDescription": "Total pipeline cost of irregular execution (e.g",
"MetricExpr": "100 * (tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * (10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts)) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + tma_core_bound * RS_EVENTS.EMPTY_CYCLES / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
- "MetricGroup": "Bad;Cor;Ret;tma_issueMS",
+ "MetricGroup": "Bad;BvIO;Cor;Ret;tma_issueMS",
"MetricName": "tma_info_bottleneck_irregular_overhead",
"MetricThreshold": "tma_info_bottleneck_irregular_overhead > 10",
"PublicDescription": "Total pipeline cost of irregular execution (e.g. FP-assists in HPC, Wait time with work imbalance multithreaded workloads, overhead in system services or virtualized environments). Related metrics: tma_microcode_sequencer, tma_ms_switches"
@@ -775,8 +760,8 @@
{
"BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency)))",
- "MetricGroup": "Mem;MemoryTLB;Offcore;tma_issueTLB",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_hit_latency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency)))",
+ "MetricGroup": "BvMT;Mem;MemoryTLB;Offcore;tma_issueTLB",
"MetricName": "tma_info_bottleneck_memory_data_tlbs",
"MetricThreshold": "tma_info_bottleneck_memory_data_tlbs > 20",
"PublicDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs). Related metrics: tma_dtlb_load, tma_dtlb_store, tma_info_bottleneck_memory_synchronization"
@@ -784,7 +769,7 @@
{
"BriefDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)",
"MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) * tma_remote_cache / (tma_local_mem + tma_remote_cache + tma_remote_mem) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * tma_false_sharing / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))",
- "MetricGroup": "Mem;Offcore;tma_issueTLB",
+ "MetricGroup": "BvMS;Mem;Offcore;tma_issueTLB",
"MetricName": "tma_info_bottleneck_memory_synchronization",
"MetricThreshold": "tma_info_bottleneck_memory_synchronization > 10",
"PublicDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors). Related metrics: tma_dtlb_load, tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs"
@@ -793,18 +778,25 @@
"BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
- "MetricGroup": "Bad;BadSpec;BrMispredicts;tma_issueBM",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts;BvMP;tma_issueBM",
"MetricName": "tma_info_bottleneck_mispredictions",
"MetricThreshold": "tma_info_bottleneck_mispredictions > 20",
"PublicDescription": "Total pipeline cost of Branch Misprediction related bottlenecks. Related metrics: tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost, tma_mispredicts_resteers"
},
{
- "BriefDescription": "Total pipeline cost of remaining bottlenecks (apart from those listed in the Info.Bottlenecks metrics class)",
- "MetricExpr": "100 - (tma_info_bottleneck_big_code + tma_info_bottleneck_instruction_fetch_bw + tma_info_bottleneck_mispredictions + tma_info_bottleneck_cache_memory_bandwidth + tma_info_bottleneck_cache_memory_latency + tma_info_bottleneck_memory_data_tlbs + tma_info_bottleneck_memory_synchronization + tma_info_bottleneck_compute_bound_est + tma_info_bottleneck_irregular_overhead + tma_info_bottleneck_branching_overhead + tma_info_bottleneck_base_non_br)",
- "MetricGroup": "Cor;Offcore",
+ "BriefDescription": "Total pipeline cost of remaining bottlenecks in the back-end",
+ "MetricExpr": "100 - (tma_info_bottleneck_big_code + tma_info_bottleneck_instruction_fetch_bw + tma_info_bottleneck_mispredictions + tma_info_bottleneck_cache_memory_bandwidth + tma_info_bottleneck_cache_memory_latency + tma_info_bottleneck_memory_data_tlbs + tma_info_bottleneck_memory_synchronization + tma_info_bottleneck_compute_bound_est + tma_info_bottleneck_irregular_overhead + tma_info_bottleneck_branching_overhead + tma_info_bottleneck_useful_work)",
+ "MetricGroup": "BvOB;Cor;Offcore",
"MetricName": "tma_info_bottleneck_other_bottlenecks",
"MetricThreshold": "tma_info_bottleneck_other_bottlenecks > 20",
- "PublicDescription": "Total pipeline cost of remaining bottlenecks (apart from those listed in the Info.Bottlenecks metrics class). Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls."
+ "PublicDescription": "Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls."
+ },
+ {
+ "BriefDescription": "Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead.",
+ "MetricExpr": "100 * (tma_retiring - (BR_INST_RETIRED.ALL_BRANCHES + 2 * BR_INST_RETIRED.NEAR_CALL + INST_RETIRED.NOP) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
+ "MetricGroup": "BvUW;Ret",
+ "MetricName": "tma_info_bottleneck_useful_work",
+ "MetricThreshold": "tma_info_bottleneck_useful_work > 20"
},
{
"BriefDescription": "Fraction of branches that are CALL or RET",
@@ -858,7 +850,7 @@
},
{
"BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
- "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@) / (2 * tma_info_core_core_clks)",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@) / (2 * tma_info_core_core_clks)",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "tma_info_core_fp_arith_utilization",
"PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
@@ -875,7 +867,7 @@
"MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
"MetricName": "tma_info_frontend_dsb_coverage",
"MetricThreshold": "tma_info_frontend_dsb_coverage < 0.7 & tma_info_thread_ipc / 4 > 0.35",
- "PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_inst_mix_iptb, tma_lcp"
+ "PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_inst_mix_iptb, tma_lcp"
},
{
"BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
@@ -936,7 +928,7 @@
{
"BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "INST_RETIRED.ANY / (cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@)",
"MetricGroup": "Flops;InsType",
"MetricName": "tma_info_inst_mix_iparith",
"MetricThreshold": "tma_info_inst_mix_iparith < 10",
@@ -1032,18 +1024,12 @@
"MetricThreshold": "tma_info_inst_mix_ipswpf < 100"
},
{
- "BriefDescription": "Instruction per taken branch",
+ "BriefDescription": "Instructions per taken branch",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
"MetricName": "tma_info_inst_mix_iptb",
"MetricThreshold": "tma_info_inst_mix_iptb < 9",
- "PublicDescription": "Instruction per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_lcp"
- },
- {
- "BriefDescription": "STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
- "MetricExpr": "tma_info_memory_tlb_code_stlb_mpki",
- "MetricGroup": "Fed;MemoryTLB",
- "MetricName": "tma_info_memory_code_stlb_mpki"
+ "PublicDescription": "Instructions per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_lcp"
},
{
"BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
@@ -1082,30 +1068,18 @@
"MetricName": "tma_info_memory_core_l3_cache_fill_bw_2t"
},
{
- "BriefDescription": "Average Parallel L2 cache miss data reads",
- "MetricExpr": "tma_info_memory_latency_data_l2_mlp",
- "MetricGroup": "Memory_BW;Offcore",
- "MetricName": "tma_info_memory_data_l2_mlp"
- },
- {
"BriefDescription": "Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)",
"MetricExpr": "1e3 * MEM_LOAD_RETIRED.FB_HIT / INST_RETIRED.ANY",
"MetricGroup": "CacheHits;Mem",
"MetricName": "tma_info_memory_fb_hpki"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
"MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l1d_cache_fill_bw"
},
{
- "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
- "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / (duration_time * 1e3 / 1e3)",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "tma_info_memory_l1d_cache_fill_bw_2t"
- },
- {
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
"MetricExpr": "1e3 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
"MetricGroup": "CacheHits;Mem",
@@ -1118,30 +1092,12 @@
"MetricName": "tma_info_memory_l1mpki_load"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
"MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l2_cache_fill_bw"
},
{
- "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
- "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / (duration_time * 1e3 / 1e3)",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "tma_info_memory_l2_cache_fill_bw_2t"
- },
- {
- "BriefDescription": "Rate of non silent evictions from the L2 cache per Kilo instruction",
- "MetricExpr": "1e3 * L2_LINES_OUT.NON_SILENT / INST_RETIRED.ANY",
- "MetricGroup": "L2Evicts;Mem;Server",
- "MetricName": "tma_info_memory_l2_evictions_nonsilent_pki"
- },
- {
- "BriefDescription": "Rate of silent evictions from the L2 cache per Kilo instruction where the evicted lines are dropped (no writeback to L3 or memory)",
- "MetricExpr": "1e3 * L2_LINES_OUT.SILENT / INST_RETIRED.ANY",
- "MetricGroup": "L2Evicts;Mem;Server",
- "MetricName": "tma_info_memory_l2_evictions_silent_pki"
- },
- {
"BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
"MetricExpr": "1e3 * (L2_RQSTS.REFERENCES - L2_RQSTS.MISS) / INST_RETIRED.ANY",
"MetricGroup": "CacheHits;Mem",
@@ -1172,30 +1128,24 @@
"MetricName": "tma_info_memory_l2mpki_load"
},
{
- "BriefDescription": "",
- "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / duration_time",
- "MetricGroup": "Mem;MemoryBW;Offcore",
- "MetricName": "tma_info_memory_l3_cache_access_bw"
+ "BriefDescription": "Offcore requests (L2 cache miss) per kilo instruction for demand RFOs",
+ "MetricExpr": "1e3 * OFFCORE_REQUESTS.DEMAND_RFO / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Offcore",
+ "MetricName": "tma_info_memory_l2mpki_rfo"
},
{
- "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / (duration_time * 1e3 / 1e3)",
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW;Offcore",
- "MetricName": "tma_info_memory_l3_cache_access_bw_2t"
+ "MetricName": "tma_info_memory_l3_cache_access_bw"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
"MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l3_cache_fill_bw"
},
{
- "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / (duration_time * 1e3 / 1e3)",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "tma_info_memory_l3_cache_fill_bw_2t"
- },
- {
"BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
"MetricExpr": "1e3 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
"MetricGroup": "Mem",
@@ -1209,27 +1159,15 @@
},
{
"BriefDescription": "Average Latency for L2 cache miss demand Loads",
- "MetricExpr": "tma_info_memory_load_l2_miss_latency",
- "MetricGroup": "Memory_Lat;Offcore",
- "MetricName": "tma_info_memory_latency_load_l2_miss_latency"
- },
- {
- "BriefDescription": "Average Parallel L2 cache miss demand Loads",
- "MetricExpr": "tma_info_memory_load_l2_mlp",
- "MetricGroup": "Memory_BW;Offcore",
- "MetricName": "tma_info_memory_latency_load_l2_mlp"
- },
- {
- "BriefDescription": "Average Latency for L2 cache miss demand Loads",
"MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD",
"MetricGroup": "Memory_Lat;Offcore",
- "MetricName": "tma_info_memory_load_l2_miss_latency"
+ "MetricName": "tma_info_memory_latency_load_l2_miss_latency"
},
{
"BriefDescription": "Average Parallel L2 cache miss demand Loads",
"MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
"MetricGroup": "Memory_BW;Offcore",
- "MetricName": "tma_info_memory_load_l2_mlp"
+ "MetricName": "tma_info_memory_latency_load_l2_mlp"
},
{
"BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
@@ -1238,14 +1176,8 @@
"MetricName": "tma_info_memory_load_miss_real_latency"
},
{
- "BriefDescription": "STLB (2nd level TLB) data load speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
- "MetricExpr": "tma_info_memory_tlb_load_stlb_mpki",
- "MetricGroup": "Mem;MemoryTLB",
- "MetricName": "tma_info_memory_load_stlb_mpki"
- },
- {
"BriefDescription": "Un-cacheable retired load per kilo instruction",
- "MetricExpr": "tma_info_memory_uc_load_pki",
+ "MetricExpr": "1e3 * MEM_LOAD_MISC_RETIRED.UC / INST_RETIRED.ANY",
"MetricGroup": "Mem",
"MetricName": "tma_info_memory_mix_uc_load_pki"
},
@@ -1257,18 +1189,6 @@
"PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)"
},
{
- "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "tma_info_memory_tlb_page_walks_utilization",
- "MetricGroup": "Mem;MemoryTLB",
- "MetricName": "tma_info_memory_page_walks_utilization"
- },
- {
- "BriefDescription": "STLB (2nd level TLB) data store speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
- "MetricExpr": "tma_info_memory_tlb_store_stlb_mpki",
- "MetricGroup": "Mem;MemoryTLB",
- "MetricName": "tma_info_memory_store_stlb_mpki"
- },
- {
"BriefDescription": "STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
"MetricExpr": "1e3 * ITLB_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
"MetricGroup": "Fed;MemoryTLB",
@@ -1295,18 +1215,24 @@
"MetricName": "tma_info_memory_tlb_store_stlb_mpki"
},
{
- "BriefDescription": "Un-cacheable retired load per kilo instruction",
- "MetricExpr": "1e3 * MEM_LOAD_MISC_RETIRED.UC / INST_RETIRED.ANY",
- "MetricGroup": "Mem",
- "MetricName": "tma_info_memory_uc_load_pki"
- },
- {
- "BriefDescription": "",
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per core",
"MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@)",
"MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
"MetricName": "tma_info_pipeline_execute"
},
{
+ "BriefDescription": "Average number of uops fetched from DSB per cycle",
+ "MetricExpr": "IDQ.DSB_UOPS / IDQ.DSB_CYCLES_ANY",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "tma_info_pipeline_fetch_dsb"
+ },
+ {
+ "BriefDescription": "Average number of uops fetched from MITE per cycle",
+ "MetricExpr": "IDQ.MITE_UOPS / IDQ.MITE_CYCLES",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "tma_info_pipeline_fetch_mite"
+ },
+ {
"BriefDescription": "Instructions per a microcode Assist invocation",
"MetricExpr": "INST_RETIRED.ANY / (FP_ASSIST.ANY + OTHER_ASSISTS.ANY)",
"MetricGroup": "MicroSeq;Pipeline;Ret;Retire",
@@ -1328,13 +1254,13 @@
},
{
"BriefDescription": "Average CPU Utilization (percentage)",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "tma_info_system_cpus_utilized / #num_cpus_online",
"MetricGroup": "HPC;Summary",
"MetricName": "tma_info_system_cpu_utilization"
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "#num_cpus_online * tma_info_system_cpu_utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized"
},
@@ -1513,7 +1439,7 @@
"MetricThreshold": "tma_info_thread_uoppi > 1.05"
},
{
- "BriefDescription": "Instruction per taken branch",
+ "BriefDescription": "Uops per taken branch",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW",
"MetricName": "tma_info_thread_uptb",
@@ -1522,7 +1448,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
"MetricExpr": "ICACHE_TAG.STALLS / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_itlb_misses",
"MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS",
@@ -1538,10 +1464,19 @@
"ScaleUnit": "100%"
},
{
+ "BriefDescription": "This metric roughly estimates fraction of cycles with demand load accesses that hit the L1 cache",
+ "MetricExpr": "min(2 * (MEM_INST_RETIRED.ALL_LOADS - MEM_LOAD_RETIRED.FB_HIT - MEM_LOAD_RETIRED.L1_MISS) * 20 / 100, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
+ "MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_l1_hit_latency",
+ "MetricThreshold": "tma_l1_hit_latency > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles with demand load accesses that hit the L1 cache. The short latency of the L1 data cache may be exposed in pointer-chasing memory access patterns as an example. Sample with: MEM_LOAD_RETIRED.L1_HIT",
+ "ScaleUnit": "100%"
+ },
+ {
"BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) / (MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@) * ((CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks)",
- "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricGroup": "BvML;CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l2_bound",
"MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT_PS",
@@ -1559,7 +1494,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
"MetricExpr": "17 * tma_info_system_core_frequency * (MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2)) / tma_info_thread_clks",
- "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
+ "MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
"MetricName": "tma_l3_hit_latency",
"MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_info_bottleneck_cache_memory_latency, tma_mem_latency",
@@ -1571,7 +1506,7 @@
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_lcp",
"MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
- "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
"ScaleUnit": "100%"
},
{
@@ -1616,7 +1551,7 @@
"MetricGroup": "Server;TopdownL5;tma_L5_group;tma_mem_latency_group",
"MetricName": "tma_local_mem",
"MetricThreshold": "tma_local_mem > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory. Caching will improve the latency and increase performance. Sample with: MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM_PS",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory. Caching will improve the latency and increase performance. Sample with: MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
"ScaleUnit": "100%"
},
{
@@ -1625,14 +1560,14 @@
"MetricGroup": "Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
"MetricName": "tma_lock_latency",
"MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
- "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS_PS. Related metrics: tma_store_latency",
+ "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS. Related metrics: tma_store_latency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_bad_speculation - tma_branch_mispredicts",
- "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
+ "MetricGroup": "BadSpec;BvMS;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
"MetricName": "tma_machine_clears",
"MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
@@ -1642,7 +1577,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_thread_clks",
- "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
"MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_sq_full",
@@ -1651,7 +1586,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM)",
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth",
- "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
+ "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
"MetricName": "tma_mem_latency",
"MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_info_bottleneck_cache_memory_latency, tma_l3_hit_latency",
@@ -1678,6 +1613,7 @@
},
{
"BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * IDQ.MS_UOPS / tma_info_thread_slots",
"MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS",
"MetricName": "tma_microcode_sequencer",
@@ -1688,7 +1624,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage",
"MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks",
- "MetricGroup": "BadSpec;BrMispredicts;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
+ "MetricGroup": "BadSpec;BrMispredicts;BvMP;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
"MetricName": "tma_mispredicts_resteers",
"MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost, tma_info_bottleneck_mispredictions",
@@ -1724,7 +1660,7 @@
{
"BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused",
"MetricExpr": "tma_light_operations * (BR_INST_RETIRED.ALL_BRANCHES - UOPS_RETIRED.MACRO_FUSED) / UOPS_RETIRED.RETIRE_SLOTS",
- "MetricGroup": "Branches;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_non_fused_branches",
"MetricThreshold": "tma_non_fused_branches > 0.1 & tma_light_operations > 0.6",
"PublicDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused.",
@@ -1733,7 +1669,7 @@
{
"BriefDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions",
"MetricExpr": "tma_light_operations * INST_RETIRED.NOP / UOPS_RETIRED.RETIRE_SLOTS",
- "MetricGroup": "Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
+ "MetricGroup": "BvBO;Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
"MetricName": "tma_nop_instructions",
"MetricThreshold": "tma_nop_instructions > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)",
"PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP",
@@ -1751,7 +1687,7 @@
{
"BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).",
"MetricExpr": "max(tma_branch_mispredicts * (1 - BR_MISP_RETIRED.ALL_BRANCHES / (INT_MISC.CLEARS_COUNT - MACHINE_CLEARS.COUNT)), 0.0001)",
- "MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
+ "MetricGroup": "BrMispredicts;BvIO;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_other_mispredicts",
"MetricThreshold": "tma_other_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
@@ -1759,7 +1695,7 @@
{
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.",
"MetricExpr": "max(tma_machine_clears * (1 - MACHINE_CLEARS.MEMORY_ORDERING / MACHINE_CLEARS.COUNT), 0.0001)",
- "MetricGroup": "Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group",
+ "MetricGroup": "BvIO;Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_other_nukes",
"MetricThreshold": "tma_other_nukes > 0.05 & (tma_machine_clears > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
@@ -1857,7 +1793,7 @@
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricExpr": "(EXE_ACTIVITY.EXE_BOUND_0_PORTS + tma_core_bound * RS_EVENTS.EMPTY_CYCLES) / tma_info_thread_clks * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) / tma_info_thread_clks",
+ "MetricExpr": "EXE_ACTIVITY.EXE_BOUND_0_PORTS / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_0",
"MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
@@ -1885,7 +1821,7 @@
{
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise).",
"MetricExpr": "(UOPS_EXECUTED.CORE_CYCLES_GE_3 / 2 if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_3) / tma_info_core_core_clks",
- "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricGroup": "BvCB;PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_3m",
"MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%"
@@ -1912,7 +1848,7 @@
{
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / tma_info_thread_slots",
- "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvUW;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_retiring",
"MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL1",
@@ -1922,7 +1858,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations",
"MetricExpr": "PARTIAL_RAT_STALLS.SCOREBOARD / tma_info_thread_clks",
- "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO",
+ "MetricGroup": "BvIO;PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO",
"MetricName": "tma_serializing_operation",
"MetricThreshold": "tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: PARTIAL_RAT_STALLS.SCOREBOARD. Related metrics: tma_ms_switches",
@@ -1959,7 +1895,7 @@
{
"BriefDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors)",
"MetricExpr": "(OFFCORE_REQUESTS_BUFFER.SQ_FULL / 2 if #SMT_on else OFFCORE_REQUESTS_BUFFER.SQ_FULL) / tma_info_core_core_clks",
- "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
+ "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
"MetricName": "tma_sq_full",
"MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth",
@@ -1987,7 +1923,7 @@
"BriefDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses",
"MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "(L2_RQSTS.RFO_HIT * 11 * (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) + (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_thread_clks",
- "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
+ "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
"MetricName": "tma_store_latency",
"MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
@@ -2020,7 +1956,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears",
"MetricExpr": "9 * BACLEARS.ANY / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
"MetricName": "tma_unknown_branches",
"MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: BACLEARS.ANY",
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/counter.json b/tools/perf/pmu-events/arch/x86/cascadelakex/counter.json
new file mode 100644
index 000000000000..e94b76404856
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/counter.json
@@ -0,0 +1,52 @@
+[
+ {
+ "Unit": "core",
+ "CountersNumFixed": "3",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "CHA",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "IIO",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "IRP",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "2"
+ },
+ {
+ "Unit": "UPI",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "M2M",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "iMC",
+ "CountersNumFixed": "1",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "M3UPI",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "3"
+ },
+ {
+ "Unit": "PCU",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "UBOX",
+ "CountersNumFixed": "1",
+ "CountersNumGeneric": "2"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/floating-point.json b/tools/perf/pmu-events/arch/x86/cascadelakex/floating-point.json
index bb4d5101f962..1c709983b65f 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/floating-point.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts once for most SIMD 128-bit packed computational double precision floating-point instructions retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
"PublicDescription": "Counts once for most SIMD 128-bit packed computational double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Counts once for most SIMD 128-bit packed computational single precision floating-point instruction retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
"PublicDescription": "Counts once for most SIMD 128-bit packed computational single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Counts once for most SIMD 256-bit packed double computational precision floating-point instructions retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
"PublicDescription": "Counts once for most SIMD 256-bit packed double computational precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Counts once for most SIMD 256-bit packed single computational precision floating-point instructions retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
"PublicDescription": "Counts once for most SIMD 256-bit packed single computational precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 128-bit packed single and 256-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and packed double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.4_FLOPS",
"PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision and 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point and packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -49,6 +55,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE",
"PublicDescription": "Number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -57,6 +64,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision and 512-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RSQRT14 RCP RCP14 DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.8_FLOPS",
"PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision and 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision and double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RSQRT14 RCP RCP14 DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -65,6 +73,7 @@
},
{
"BriefDescription": "Counts once for most SIMD scalar computational floating-point instructions retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR",
"PublicDescription": "Counts once for most SIMD scalar computational single precision and double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SIMD scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -73,6 +82,7 @@
},
{
"BriefDescription": "Counts once for most SIMD scalar computational double precision floating-point instructions retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
"PublicDescription": "Counts once for most SIMD scalar computational double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SIMD scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -81,6 +91,7 @@
},
{
"BriefDescription": "Counts once for most SIMD scalar computational single precision floating-point instructions retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
"PublicDescription": "Counts once for most SIMD scalar computational single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SIMD scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -89,6 +100,7 @@
},
{
"BriefDescription": "Number of any Vector retired FP arithmetic instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.VECTOR",
"SampleAfterValue": "2000003",
@@ -96,6 +108,7 @@
},
{
"BriefDescription": "Intel AVX-512 computational 512-bit packed BFloat16 instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xCF",
"EventName": "FP_ARITH_INST_RETIRED2.128BIT_PACKED_BF16",
"PublicDescription": "Counts once for each Intel AVX-512 computational 512-bit packed BFloat16 floating-point instruction retired. Applies to the ZMM based VDPBF16PS instruction. Each count represents 64 computation operations. This event is only supported on products formerly named Cooper Lake and is not supported on products formerly named Cascade Lake.",
@@ -104,6 +117,7 @@
},
{
"BriefDescription": "Intel AVX-512 computational 128-bit packed BFloat16 instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xCF",
"EventName": "FP_ARITH_INST_RETIRED2.256BIT_PACKED_BF16",
"PublicDescription": "Counts once for each Intel AVX-512 computational 128-bit packed BFloat16 floating-point instruction retired. Applies to the XMM based VDPBF16PS instruction. Each count represents 16 computation operations. This event is only supported on products formerly named Cooper Lake and is not supported on products formerly named Cascade Lake.",
@@ -112,6 +126,7 @@
},
{
"BriefDescription": "Intel AVX-512 computational 256-bit packed BFloat16 instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xCF",
"EventName": "FP_ARITH_INST_RETIRED2.512BIT_PACKED_BF16",
"PublicDescription": "Counts once for each Intel AVX-512 computational 256-bit packed BFloat16 floating-point instruction retired. Applies to the YMM based VDPBF16PS instruction. Each count represents 32 computation operations. This event is only supported on products formerly named Cooper Lake and is not supported on products formerly named Cascade Lake.",
@@ -120,6 +135,7 @@
},
{
"BriefDescription": "Cycles with any input/output SSE or FP assist",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.ANY",
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/frontend.json b/tools/perf/pmu-events/arch/x86/cascadelakex/frontend.json
index d6f543471b24..0e1dedce00f2 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/frontend.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "Counter": "0,1,2,3",
"EventCode": "0xE6",
"EventName": "BACLEARS.ANY",
"PublicDescription": "Counts the number of times the front-end is resteered when it finds a branch instruction in a fetch line. This occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Stalls caused by changing prefix length of the instruction. [This event is alias to ILD_STALL.LCP]",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "DECODE.LCP",
"PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk. [This event is alias to ILD_STALL.LCP]",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switches",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "DSB2MITE_SWITCHES.COUNT",
"PublicDescription": "This event counts the number of the Decode Stream Buffer (DSB)-to-MITE switches including all misses because of missing Decode Stream Buffer (DSB) cache and u-arch forced misses. Note: Invoking MITE requires two or three cycles delay.",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
"PublicDescription": "Counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. MM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.Penalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced DSB miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.ANY_DSB_MISS",
"MSRIndex": "0x3F7",
@@ -44,6 +49,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced a critical DSB miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.DSB_MISS",
"MSRIndex": "0x3F7",
@@ -55,6 +61,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced iTLB true miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.ITLB_MISS",
"MSRIndex": "0x3F7",
@@ -66,6 +73,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.L1I_MISS",
"MSRIndex": "0x3F7",
@@ -76,6 +84,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.L2_MISS",
"MSRIndex": "0x3F7",
@@ -86,6 +95,7 @@
},
{
"BriefDescription": "Retired instructions after front-end starvation of at least 1 cycle",
+ "Counter": "0,1,2,3",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_1",
"MSRIndex": "0x3F7",
@@ -97,6 +107,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
"MSRIndex": "0x3F7",
@@ -107,6 +118,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
"MSRIndex": "0x3F7",
@@ -118,6 +130,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
"MSRIndex": "0x3F7",
@@ -128,6 +141,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
"MSRIndex": "0x3F7",
@@ -138,6 +152,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
"MSRIndex": "0x3F7",
@@ -149,6 +164,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 2 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_2",
"MSRIndex": "0x3F7",
@@ -159,6 +175,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 3 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_3",
"MSRIndex": "0x3F7",
@@ -169,6 +186,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
"MSRIndex": "0x3F7",
@@ -180,6 +198,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
"MSRIndex": "0x3F7",
@@ -190,6 +209,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
"MSRIndex": "0x3F7",
@@ -200,6 +220,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
"MSRIndex": "0x3F7",
@@ -210,6 +231,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
"MSRIndex": "0x3F7",
@@ -221,6 +243,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.STLB_MISS",
"MSRIndex": "0x3F7",
@@ -232,6 +255,7 @@
},
{
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE_16B.IFDATA_STALL",
"PublicDescription": "Cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity.",
@@ -240,6 +264,7 @@
},
{
"BriefDescription": "Instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "ICACHE_64B.IFTAG_HIT",
"SampleAfterValue": "200003",
@@ -247,6 +272,7 @@
},
{
"BriefDescription": "Instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "ICACHE_64B.IFTAG_MISS",
"SampleAfterValue": "200003",
@@ -254,6 +280,7 @@
},
{
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_TAG.STALLS]",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "ICACHE_64B.IFTAG_STALL",
"SampleAfterValue": "200003",
@@ -261,6 +288,7 @@
},
{
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_64B.IFTAG_STALL]",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "ICACHE_TAG.STALLS",
"SampleAfterValue": "200003",
@@ -268,6 +296,7 @@
},
{
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 or more Uops [This event is alias to IDQ.DSB_CYCLES_OK]",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0x79",
"EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
@@ -277,6 +306,7 @@
},
{
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop [This event is alias to IDQ.DSB_CYCLES_ANY]",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
@@ -286,6 +316,7 @@
},
{
"BriefDescription": "Cycles MITE is delivering 4 Uops",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0x79",
"EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
@@ -295,6 +326,7 @@
},
{
"BriefDescription": "Cycles MITE is delivering any Uop",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
@@ -304,6 +336,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.DSB_CYCLES",
@@ -313,6 +346,7 @@
},
{
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop [This event is alias to IDQ.ALL_DSB_CYCLES_ANY_UOPS]",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.DSB_CYCLES_ANY",
@@ -322,6 +356,7 @@
},
{
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 or more Uops [This event is alias to IDQ.ALL_DSB_CYCLES_4_UOPS]",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0x79",
"EventName": "IDQ.DSB_CYCLES_OK",
@@ -331,6 +366,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.DSB_UOPS",
"PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
@@ -339,6 +375,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MITE_CYCLES",
@@ -348,6 +385,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MITE_UOPS",
"PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
@@ -356,6 +394,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MS_CYCLES",
@@ -365,6 +404,7 @@
},
{
"BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MS_DSB_CYCLES",
@@ -374,6 +414,7 @@
},
{
"BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_MITE_UOPS",
"PublicDescription": "Counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
@@ -382,6 +423,7 @@
},
{
"BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x79",
@@ -392,6 +434,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_UOPS",
"PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS). Any instruction over 4 uops will be delivered by the MS. Some instructions such as transcendentals may additionally generate uops from the MS.",
@@ -400,6 +443,7 @@
},
{
"BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
"PublicDescription": "Counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4 x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread. b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions). c. Instruction Decode Queue (IDQ) delivers four uops.",
@@ -408,6 +452,7 @@
},
{
"BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
@@ -417,6 +462,7 @@
},
{
"BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
@@ -426,6 +472,7 @@
},
{
"BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+ "Counter": "0,1,2,3",
"CounterMask": "3",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
@@ -435,6 +482,7 @@
},
{
"BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
@@ -444,6 +492,7 @@
},
{
"BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/memory.json b/tools/perf/pmu-events/arch/x86/cascadelakex/memory.json
index c69b2c33334b..bab4ca603f08 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/memory.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Cycles while L3 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L3_MISS",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one).",
+ "Counter": "0,1,2,3",
"EventCode": "0xC8",
"EventName": "HLE_RETIRED.ABORTED",
"PEBS": "1",
@@ -26,6 +29,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to unfriendly events (such as interrupts).",
+ "Counter": "0,1,2,3",
"EventCode": "0xC8",
"EventName": "HLE_RETIRED.ABORTED_EVENTS",
"SampleAfterValue": "2000003",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "Counter": "0,1,2,3",
"EventCode": "0xC8",
"EventName": "HLE_RETIRED.ABORTED_MEM",
"SampleAfterValue": "2000003",
@@ -40,6 +45,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to incompatible memory type",
+ "Counter": "0,1,2,3",
"EventCode": "0xC8",
"EventName": "HLE_RETIRED.ABORTED_MEMTYPE",
"PublicDescription": "Number of times an HLE execution aborted due to incompatible memory type.",
@@ -48,6 +54,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to hardware timer expiration.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC8",
"EventName": "HLE_RETIRED.ABORTED_TIMER",
"SampleAfterValue": "2000003",
@@ -55,6 +62,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
+ "Counter": "0,1,2,3",
"EventCode": "0xC8",
"EventName": "HLE_RETIRED.ABORTED_UNFRIENDLY",
"SampleAfterValue": "2000003",
@@ -62,6 +70,7 @@
},
{
"BriefDescription": "Number of times an HLE execution successfully committed",
+ "Counter": "0,1,2,3",
"EventCode": "0xC8",
"EventName": "HLE_RETIRED.COMMIT",
"PublicDescription": "Number of times HLE commit succeeded.",
@@ -70,6 +79,7 @@
},
{
"BriefDescription": "Number of times an HLE execution started.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC8",
"EventName": "HLE_RETIRED.START",
"PublicDescription": "Number of times we entered an HLE region. Does not count nested transactions.",
@@ -78,6 +88,7 @@
},
{
"BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
+ "Counter": "0,1,2,3",
"Errata": "SKL089",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
@@ -87,6 +98,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
@@ -99,6 +111,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
@@ -111,6 +124,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
@@ -123,6 +137,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
@@ -135,6 +150,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
@@ -147,6 +163,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
@@ -159,6 +176,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
@@ -171,6 +189,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
@@ -183,6 +202,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_MISS.ANY_SNOOP OCR.ALL_DATA_RD.L3_MISS.ANY_SNOOP OCR.ALL_DATA_RD.L3_MISS.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -192,6 +212,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_MISS.HITM_OTHER_CORE OCR.ALL_DATA_RD.L3_MISS.HITM_OTHER_CORE OCR.ALL_DATA_RD.L3_MISS.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -201,6 +222,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD OCR.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD OCR.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -210,6 +232,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD OCR.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD OCR.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -219,6 +242,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_MISS.NO_SNOOP_NEEDED OCR.ALL_DATA_RD.L3_MISS.NO_SNOOP_NEEDED OCR.ALL_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -228,6 +252,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_MISS.REMOTE_HITM OCR.ALL_DATA_RD.L3_MISS.REMOTE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -237,6 +262,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD OCR.ALL_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
@@ -246,6 +272,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_MISS.SNOOP_MISS OCR.ALL_DATA_RD.L3_MISS.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -255,6 +282,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_MISS.SNOOP_NONE OCR.ALL_DATA_RD.L3_MISS.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -264,6 +292,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -273,6 +302,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -282,6 +312,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -291,6 +322,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -300,6 +332,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -309,6 +342,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -318,6 +352,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -327,6 +362,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -336,6 +372,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD OCR.ALL_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -345,6 +382,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -354,6 +392,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -363,6 +402,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -372,6 +412,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -381,6 +422,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -390,6 +432,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -399,6 +442,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -408,6 +452,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS.ANY_SNOOP OCR.ALL_PF_DATA_RD.L3_MISS.ANY_SNOOP OCR.ALL_PF_DATA_RD.L3_MISS.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -417,6 +462,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS.HITM_OTHER_CORE OCR.ALL_PF_DATA_RD.L3_MISS.HITM_OTHER_CORE OCR.ALL_PF_DATA_RD.L3_MISS.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -426,6 +472,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD OCR.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD OCR.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -435,6 +482,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -444,6 +492,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS.NO_SNOOP_NEEDED OCR.ALL_PF_DATA_RD.L3_MISS.NO_SNOOP_NEEDED OCR.ALL_PF_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -453,6 +502,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS.REMOTE_HITM OCR.ALL_PF_DATA_RD.L3_MISS.REMOTE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -462,6 +512,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD OCR.ALL_PF_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
@@ -471,6 +522,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS.SNOOP_MISS OCR.ALL_PF_DATA_RD.L3_MISS.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -480,6 +532,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS.SNOOP_NONE OCR.ALL_PF_DATA_RD.L3_MISS.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -489,6 +542,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -498,6 +552,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -507,6 +562,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -516,6 +572,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -525,6 +582,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -534,6 +592,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -543,6 +602,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -552,6 +612,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -561,6 +622,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -570,6 +632,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -579,6 +642,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -588,6 +652,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -597,6 +662,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -606,6 +672,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -615,6 +682,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -624,6 +692,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -633,6 +702,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_MISS.ANY_SNOOP OCR.ALL_PF_RFO.L3_MISS.ANY_SNOOP OCR.ALL_PF_RFO.L3_MISS.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -642,6 +712,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_MISS.HITM_OTHER_CORE OCR.ALL_PF_RFO.L3_MISS.HITM_OTHER_CORE OCR.ALL_PF_RFO.L3_MISS.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -651,6 +722,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_FWD OCR.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_FWD OCR.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -660,6 +732,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -669,6 +742,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_MISS.NO_SNOOP_NEEDED OCR.ALL_PF_RFO.L3_MISS.NO_SNOOP_NEEDED OCR.ALL_PF_RFO.L3_MISS.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -678,6 +752,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_MISS.REMOTE_HITM OCR.ALL_PF_RFO.L3_MISS.REMOTE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -687,6 +762,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_MISS.REMOTE_HIT_FORWARD OCR.ALL_PF_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
@@ -696,6 +772,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_MISS.SNOOP_MISS OCR.ALL_PF_RFO.L3_MISS.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -705,6 +782,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_MISS.SNOOP_NONE OCR.ALL_PF_RFO.L3_MISS.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -714,6 +792,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -723,6 +802,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -732,6 +812,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -741,6 +822,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -750,6 +832,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -759,6 +842,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -768,6 +852,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -777,6 +862,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -786,6 +872,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD OCR.ALL_PF_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -795,6 +882,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -804,6 +892,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -813,6 +902,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -822,6 +912,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -831,6 +922,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -840,6 +932,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -849,6 +942,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -858,6 +952,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_MISS.ANY_SNOOP OCR.ALL_READS.L3_MISS.ANY_SNOOP OCR.ALL_READS.L3_MISS.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -867,6 +962,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_MISS.HITM_OTHER_CORE OCR.ALL_READS.L3_MISS.HITM_OTHER_CORE OCR.ALL_READS.L3_MISS.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -876,6 +972,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_MISS.HIT_OTHER_CORE_FWD OCR.ALL_READS.L3_MISS.HIT_OTHER_CORE_FWD OCR.ALL_READS.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -885,6 +982,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_MISS.HIT_OTHER_CORE_NO_FWD OCR.ALL_READS.L3_MISS.HIT_OTHER_CORE_NO_FWD OCR.ALL_READS.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -894,6 +992,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_MISS.NO_SNOOP_NEEDED OCR.ALL_READS.L3_MISS.NO_SNOOP_NEEDED OCR.ALL_READS.L3_MISS.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -903,6 +1002,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_MISS.REMOTE_HITM OCR.ALL_READS.L3_MISS.REMOTE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -912,6 +1012,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_MISS.REMOTE_HIT_FORWARD OCR.ALL_READS.L3_MISS.REMOTE_HIT_FORWARD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
@@ -921,6 +1022,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_MISS.SNOOP_MISS OCR.ALL_READS.L3_MISS.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -930,6 +1032,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_MISS.SNOOP_NONE OCR.ALL_READS.L3_MISS.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -939,6 +1042,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.ANY_SNOOP OCR.ALL_READS.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -948,6 +1052,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -957,6 +1062,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -966,6 +1072,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -975,6 +1082,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED OCR.ALL_READS.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -984,6 +1092,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -993,6 +1102,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1002,6 +1112,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1011,6 +1122,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD OCR.ALL_READS.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1020,6 +1132,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1029,6 +1142,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1038,6 +1152,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1047,6 +1162,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1056,6 +1172,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1065,6 +1182,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1074,6 +1192,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1083,6 +1202,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_MISS.ANY_SNOOP OCR.ALL_RFO.L3_MISS.ANY_SNOOP OCR.ALL_RFO.L3_MISS.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1092,6 +1212,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_MISS.HITM_OTHER_CORE OCR.ALL_RFO.L3_MISS.HITM_OTHER_CORE OCR.ALL_RFO.L3_MISS.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1101,6 +1222,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_MISS.HIT_OTHER_CORE_FWD OCR.ALL_RFO.L3_MISS.HIT_OTHER_CORE_FWD OCR.ALL_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1110,6 +1232,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD OCR.ALL_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD OCR.ALL_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1119,6 +1242,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_MISS.NO_SNOOP_NEEDED OCR.ALL_RFO.L3_MISS.NO_SNOOP_NEEDED OCR.ALL_RFO.L3_MISS.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1128,6 +1252,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_MISS.REMOTE_HITM OCR.ALL_RFO.L3_MISS.REMOTE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1137,6 +1262,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_MISS.REMOTE_HIT_FORWARD OCR.ALL_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1146,6 +1272,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_MISS.SNOOP_MISS OCR.ALL_RFO.L3_MISS.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1155,6 +1282,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_MISS.SNOOP_NONE OCR.ALL_RFO.L3_MISS.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1164,6 +1292,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1173,6 +1302,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1182,6 +1312,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1191,6 +1322,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1200,6 +1332,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1209,6 +1342,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1218,6 +1352,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1227,6 +1362,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1236,6 +1372,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD OCR.ALL_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1245,6 +1382,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1254,6 +1392,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1263,6 +1402,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1272,6 +1412,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1281,6 +1422,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1290,6 +1432,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1299,6 +1442,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1308,6 +1452,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP OCR.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1317,6 +1462,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS.HITM_OTHER_CORE OCR.DEMAND_CODE_RD.L3_MISS.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1326,6 +1472,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS.HIT_OTHER_CORE_FWD OCR.DEMAND_CODE_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1335,6 +1482,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD OCR.DEMAND_CODE_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1344,6 +1492,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS.NO_SNOOP_NEEDED OCR.DEMAND_CODE_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1353,6 +1502,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS.REMOTE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1362,6 +1512,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1371,6 +1522,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1380,6 +1532,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1389,6 +1542,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1398,6 +1552,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1407,6 +1562,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1416,6 +1572,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1425,6 +1582,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1434,6 +1592,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1443,6 +1602,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1452,6 +1612,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1461,6 +1622,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1470,6 +1632,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1479,6 +1642,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1488,6 +1652,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1497,6 +1662,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1506,6 +1672,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1515,6 +1682,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1524,6 +1692,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1533,6 +1702,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS.ANY_SNOOP OCR.DEMAND_DATA_RD.L3_MISS.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1542,6 +1712,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS.HITM_OTHER_CORE OCR.DEMAND_DATA_RD.L3_MISS.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1551,6 +1722,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD OCR.DEMAND_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1560,6 +1732,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD OCR.DEMAND_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1569,6 +1742,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS.NO_SNOOP_NEEDED OCR.DEMAND_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1578,6 +1752,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS.REMOTE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1587,6 +1762,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1596,6 +1772,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1605,6 +1782,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1614,6 +1792,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1623,6 +1802,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1632,6 +1812,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1641,6 +1822,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1650,6 +1832,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1659,6 +1842,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1668,6 +1852,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1677,6 +1862,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1686,6 +1872,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1695,6 +1882,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1704,6 +1892,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1713,6 +1902,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1722,6 +1912,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1731,6 +1922,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1740,6 +1932,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1749,6 +1942,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1758,6 +1952,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS.ANY_SNOOP OCR.DEMAND_RFO.L3_MISS.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1767,6 +1962,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS.HITM_OTHER_CORE OCR.DEMAND_RFO.L3_MISS.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1776,6 +1972,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS.HIT_OTHER_CORE_FWD OCR.DEMAND_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1785,6 +1982,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD OCR.DEMAND_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1794,6 +1992,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS.NO_SNOOP_NEEDED OCR.DEMAND_RFO.L3_MISS.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1803,6 +2002,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS.REMOTE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1812,6 +2012,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1821,6 +2022,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1830,6 +2032,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1839,6 +2042,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1848,6 +2052,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1857,6 +2062,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1866,6 +2072,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1875,6 +2082,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1884,6 +2092,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1893,6 +2102,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1902,6 +2112,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1911,6 +2122,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1920,6 +2132,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1929,6 +2142,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1938,6 +2152,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1947,6 +2162,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1956,6 +2172,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1965,6 +2182,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1974,6 +2192,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1983,6 +2202,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS.ANY_SNOOP OCR.OTHER.L3_MISS.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1992,6 +2212,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS.HITM_OTHER_CORE OCR.OTHER.L3_MISS.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2001,6 +2222,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS.HIT_OTHER_CORE_FWD OCR.OTHER.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2010,6 +2232,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS.HIT_OTHER_CORE_NO_FWD OCR.OTHER.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2019,6 +2242,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS.NO_SNOOP_NEEDED OCR.OTHER.L3_MISS.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -2028,6 +2252,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS.REMOTE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2037,6 +2262,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS.REMOTE_HIT_FORWARD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2046,6 +2272,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -2055,6 +2282,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2064,6 +2292,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -2073,6 +2302,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2082,6 +2312,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2091,6 +2322,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2100,6 +2332,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -2109,6 +2342,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -2118,6 +2352,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2127,6 +2362,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2136,6 +2372,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2145,6 +2382,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -2154,6 +2392,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2163,6 +2402,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2172,6 +2412,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2181,6 +2422,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -2190,6 +2432,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -2199,6 +2442,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2208,6 +2452,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS.ANY_SNOOP OCR.PF_L1D_AND_SW.L3_MISS.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -2217,6 +2462,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS.HITM_OTHER_CORE OCR.PF_L1D_AND_SW.L3_MISS.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2226,6 +2472,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS.HIT_OTHER_CORE_FWD OCR.PF_L1D_AND_SW.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2235,6 +2482,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS.HIT_OTHER_CORE_NO_FWD OCR.PF_L1D_AND_SW.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2244,6 +2492,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS.NO_SNOOP_NEEDED OCR.PF_L1D_AND_SW.L3_MISS.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -2253,6 +2502,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS.REMOTE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2262,6 +2512,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS.REMOTE_HIT_FORWARD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2271,6 +2522,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -2280,6 +2532,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2289,6 +2542,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -2298,6 +2552,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2307,6 +2562,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2316,6 +2572,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2325,6 +2582,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -2334,6 +2592,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -2343,6 +2602,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2352,6 +2612,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2361,6 +2622,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2370,6 +2632,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -2379,6 +2642,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2388,6 +2652,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2397,6 +2662,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2406,6 +2672,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -2415,6 +2682,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -2424,6 +2692,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2433,6 +2702,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS.ANY_SNOOP OCR.PF_L2_DATA_RD.L3_MISS.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -2442,6 +2712,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS.HITM_OTHER_CORE OCR.PF_L2_DATA_RD.L3_MISS.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2451,6 +2722,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD OCR.PF_L2_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2460,6 +2732,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD OCR.PF_L2_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2469,6 +2742,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS.NO_SNOOP_NEEDED OCR.PF_L2_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -2478,6 +2752,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS.REMOTE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2487,6 +2762,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2496,6 +2772,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -2505,6 +2782,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2514,6 +2792,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -2523,6 +2802,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2532,6 +2812,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2541,6 +2822,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2550,6 +2832,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -2559,6 +2842,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -2568,6 +2852,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2577,6 +2862,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2586,6 +2872,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2595,6 +2882,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -2604,6 +2892,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2613,6 +2902,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2622,6 +2912,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2631,6 +2922,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -2640,6 +2932,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -2649,6 +2942,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2658,6 +2952,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS.ANY_SNOOP OCR.PF_L2_RFO.L3_MISS.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -2667,6 +2962,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS.HITM_OTHER_CORE OCR.PF_L2_RFO.L3_MISS.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2676,6 +2972,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS.HIT_OTHER_CORE_FWD OCR.PF_L2_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2685,6 +2982,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD OCR.PF_L2_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2694,6 +2992,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS.NO_SNOOP_NEEDED OCR.PF_L2_RFO.L3_MISS.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -2703,6 +3002,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS.REMOTE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2712,6 +3012,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2721,6 +3022,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -2730,6 +3032,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2739,6 +3042,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -2748,6 +3052,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2757,6 +3062,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2766,6 +3072,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2775,6 +3082,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -2784,6 +3092,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -2793,6 +3102,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2802,6 +3112,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2811,6 +3122,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2820,6 +3132,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -2829,6 +3142,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2838,6 +3152,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2847,6 +3162,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2856,6 +3172,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -2865,6 +3182,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -2874,6 +3192,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2883,6 +3202,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS.ANY_SNOOP OCR.PF_L3_DATA_RD.L3_MISS.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -2892,6 +3212,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS.HITM_OTHER_CORE OCR.PF_L3_DATA_RD.L3_MISS.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2901,6 +3222,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD OCR.PF_L3_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2910,6 +3232,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD OCR.PF_L3_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2919,6 +3242,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS.NO_SNOOP_NEEDED OCR.PF_L3_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -2928,6 +3252,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS.REMOTE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2937,6 +3262,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2946,6 +3272,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -2955,6 +3282,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2964,6 +3292,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -2973,6 +3302,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2982,6 +3312,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2991,6 +3322,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3000,6 +3332,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -3009,6 +3342,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -3018,6 +3352,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3027,6 +3362,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -3036,6 +3372,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3045,6 +3382,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -3054,6 +3392,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -3063,6 +3402,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3072,6 +3412,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3081,6 +3422,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -3090,6 +3432,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -3099,6 +3442,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -3108,6 +3452,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS.ANY_SNOOP OCR.PF_L3_RFO.L3_MISS.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -3117,6 +3462,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS.HITM_OTHER_CORE OCR.PF_L3_RFO.L3_MISS.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -3126,6 +3472,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS.HIT_OTHER_CORE_FWD OCR.PF_L3_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3135,6 +3482,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD OCR.PF_L3_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3144,6 +3492,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS.NO_SNOOP_NEEDED OCR.PF_L3_RFO.L3_MISS.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -3153,6 +3502,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS.REMOTE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -3162,6 +3512,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3171,6 +3522,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -3180,6 +3532,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -3189,6 +3542,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -3198,6 +3552,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -3207,6 +3562,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3216,6 +3572,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3225,6 +3582,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -3234,6 +3592,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -3243,6 +3602,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3252,6 +3612,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -3261,6 +3622,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3270,6 +3632,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -3279,6 +3642,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -3288,6 +3652,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3297,6 +3662,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -3306,6 +3672,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -3315,6 +3682,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -3324,6 +3692,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -3333,6 +3702,7 @@
},
{
"BriefDescription": "Demand Data Read requests who miss L3 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
"PublicDescription": "Demand Data Read requests who miss L3 cache.",
@@ -3341,6 +3711,7 @@
},
{
"BriefDescription": "Cycles with at least 1 Demand Data Read requests who miss L3 cache in the superQ.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD",
@@ -3349,6 +3720,7 @@
},
{
"BriefDescription": "Counts number of Offcore outstanding Demand Data Read requests that miss L3 cache in the superQ every cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD",
"SampleAfterValue": "2000003",
@@ -3356,6 +3728,7 @@
},
{
"BriefDescription": "Cycles with at least 6 Demand Data Read requests that miss L3 cache in the superQ.",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD_GE_6",
@@ -3364,6 +3737,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.ANY_SNOOP",
@@ -3374,6 +3748,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.HITM_OTHER_CORE",
@@ -3384,6 +3759,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
@@ -3394,6 +3770,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -3404,6 +3781,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
@@ -3414,6 +3792,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.REMOTE_HITM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.REMOTE_HITM",
@@ -3424,6 +3803,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
@@ -3434,6 +3814,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_MISS",
@@ -3444,6 +3825,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_NONE",
@@ -3454,6 +3836,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
@@ -3464,6 +3847,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
@@ -3474,6 +3858,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
@@ -3484,6 +3869,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
@@ -3494,6 +3880,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
@@ -3504,6 +3891,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
@@ -3514,6 +3902,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
@@ -3524,6 +3913,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
@@ -3534,6 +3924,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
@@ -3544,6 +3935,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
@@ -3554,6 +3946,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
@@ -3564,6 +3957,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
@@ -3574,6 +3968,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
@@ -3584,6 +3979,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
@@ -3594,6 +3990,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
@@ -3604,6 +4001,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
@@ -3614,6 +4012,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.ANY_SNOOP",
@@ -3624,6 +4023,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.HITM_OTHER_CORE",
@@ -3634,6 +4034,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
@@ -3644,6 +4045,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -3654,6 +4056,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
@@ -3664,6 +4067,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.REMOTE_HITM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.REMOTE_HITM",
@@ -3674,6 +4078,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
@@ -3684,6 +4089,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_MISS",
@@ -3694,6 +4100,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_NONE",
@@ -3704,6 +4111,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
@@ -3714,6 +4122,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
@@ -3724,6 +4133,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
@@ -3734,6 +4144,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
@@ -3744,6 +4155,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
@@ -3754,6 +4166,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
@@ -3764,6 +4177,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
@@ -3774,6 +4188,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
@@ -3784,6 +4199,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
@@ -3794,6 +4210,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
@@ -3804,6 +4221,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
@@ -3814,6 +4232,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
@@ -3824,6 +4243,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
@@ -3834,6 +4254,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
@@ -3844,6 +4265,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
@@ -3854,6 +4276,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
@@ -3864,6 +4287,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.ANY_SNOOP",
@@ -3874,6 +4298,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.HITM_OTHER_CORE",
@@ -3884,6 +4309,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
@@ -3894,6 +4320,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -3904,6 +4331,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.NO_SNOOP_NEEDED",
@@ -3914,6 +4342,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.REMOTE_HITM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.REMOTE_HITM",
@@ -3924,6 +4353,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.REMOTE_HIT_FORWARD",
@@ -3934,6 +4364,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_MISS",
@@ -3944,6 +4375,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_NONE",
@@ -3954,6 +4386,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
@@ -3964,6 +4397,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
@@ -3974,6 +4408,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
@@ -3984,6 +4419,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
@@ -3994,6 +4430,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
@@ -4004,6 +4441,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
@@ -4014,6 +4452,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
@@ -4024,6 +4463,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
@@ -4034,6 +4474,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
@@ -4044,6 +4485,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
@@ -4054,6 +4496,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
@@ -4064,6 +4507,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
@@ -4074,6 +4518,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
@@ -4084,6 +4529,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
@@ -4094,6 +4540,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
@@ -4104,6 +4551,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
@@ -4114,6 +4562,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.ANY_SNOOP",
@@ -4124,6 +4573,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.HITM_OTHER_CORE",
@@ -4134,6 +4584,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.HIT_OTHER_CORE_FWD",
@@ -4144,6 +4595,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -4154,6 +4606,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.NO_SNOOP_NEEDED",
@@ -4164,6 +4617,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.REMOTE_HITM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.REMOTE_HITM",
@@ -4174,6 +4628,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.REMOTE_HIT_FORWARD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.REMOTE_HIT_FORWARD",
@@ -4184,6 +4639,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.SNOOP_MISS",
@@ -4194,6 +4650,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.SNOOP_NONE",
@@ -4204,6 +4661,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
@@ -4214,6 +4672,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
@@ -4224,6 +4683,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
@@ -4234,6 +4694,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
@@ -4244,6 +4705,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
@@ -4254,6 +4716,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
@@ -4264,6 +4727,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
@@ -4274,6 +4738,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
@@ -4284,6 +4749,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
@@ -4294,6 +4760,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
@@ -4304,6 +4771,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
@@ -4314,6 +4782,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
@@ -4324,6 +4793,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
@@ -4334,6 +4804,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
@@ -4344,6 +4815,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
@@ -4354,6 +4826,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
@@ -4364,6 +4837,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.ANY_SNOOP",
@@ -4374,6 +4848,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.HITM_OTHER_CORE",
@@ -4384,6 +4859,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
@@ -4394,6 +4870,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -4404,6 +4881,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.NO_SNOOP_NEEDED",
@@ -4414,6 +4892,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.REMOTE_HITM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.REMOTE_HITM",
@@ -4424,6 +4903,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.REMOTE_HIT_FORWARD",
@@ -4434,6 +4914,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_MISS",
@@ -4444,6 +4925,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_NONE",
@@ -4454,6 +4936,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
@@ -4464,6 +4947,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
@@ -4474,6 +4958,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
@@ -4484,6 +4969,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
@@ -4494,6 +4980,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
@@ -4504,6 +4991,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
@@ -4514,6 +5002,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
@@ -4524,6 +5013,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
@@ -4534,6 +5024,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
@@ -4544,6 +5035,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
@@ -4554,6 +5046,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
@@ -4564,6 +5057,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
@@ -4574,6 +5068,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
@@ -4584,6 +5079,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
@@ -4594,6 +5090,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
@@ -4604,6 +5101,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
@@ -4614,6 +5112,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP",
@@ -4624,6 +5123,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.HITM_OTHER_CORE",
@@ -4634,6 +5134,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.HIT_OTHER_CORE_FWD",
@@ -4644,6 +5145,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -4654,6 +5156,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.NO_SNOOP_NEEDED",
@@ -4664,6 +5167,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.REMOTE_HITM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.REMOTE_HITM",
@@ -4674,6 +5178,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.REMOTE_HIT_FORWARD",
@@ -4684,6 +5189,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS",
@@ -4694,6 +5200,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NONE",
@@ -4704,6 +5211,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
@@ -4714,6 +5222,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
@@ -4724,6 +5233,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
@@ -4734,6 +5244,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
@@ -4744,6 +5255,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
@@ -4754,6 +5266,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
@@ -4764,6 +5277,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
@@ -4774,6 +5288,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
@@ -4784,6 +5299,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
@@ -4794,6 +5310,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
@@ -4804,6 +5321,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
@@ -4814,6 +5332,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
@@ -4824,6 +5343,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
@@ -4834,6 +5354,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
@@ -4844,6 +5365,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
@@ -4854,6 +5376,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
@@ -4864,6 +5387,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.ANY_SNOOP",
@@ -4874,6 +5398,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.HITM_OTHER_CORE",
@@ -4884,6 +5409,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
@@ -4894,6 +5420,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -4904,6 +5431,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
@@ -4914,6 +5442,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.REMOTE_HITM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.REMOTE_HITM",
@@ -4924,6 +5453,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
@@ -4934,6 +5464,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS",
@@ -4944,6 +5475,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NONE",
@@ -4954,6 +5486,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
@@ -4964,6 +5497,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
@@ -4974,6 +5508,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
@@ -4984,6 +5519,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
@@ -4994,6 +5530,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
@@ -5004,6 +5541,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
@@ -5014,6 +5552,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
@@ -5024,6 +5563,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
@@ -5034,6 +5574,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
@@ -5044,6 +5585,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
@@ -5054,6 +5596,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
@@ -5064,6 +5607,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
@@ -5074,6 +5618,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
@@ -5084,6 +5629,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
@@ -5094,6 +5640,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
@@ -5104,6 +5651,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
@@ -5114,6 +5662,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.ANY_SNOOP",
@@ -5124,6 +5673,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.HITM_OTHER_CORE",
@@ -5134,6 +5684,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
@@ -5144,6 +5695,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -5154,6 +5706,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.NO_SNOOP_NEEDED",
@@ -5164,6 +5717,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.REMOTE_HITM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.REMOTE_HITM",
@@ -5174,6 +5728,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.REMOTE_HIT_FORWARD",
@@ -5184,6 +5739,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_MISS",
@@ -5194,6 +5750,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NONE",
@@ -5204,6 +5761,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
@@ -5214,6 +5772,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
@@ -5224,6 +5783,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
@@ -5234,6 +5794,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
@@ -5244,6 +5805,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
@@ -5254,6 +5816,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
@@ -5264,6 +5827,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
@@ -5274,6 +5838,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
@@ -5284,6 +5849,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
@@ -5294,6 +5860,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
@@ -5304,6 +5871,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
@@ -5314,6 +5882,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
@@ -5324,6 +5893,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
@@ -5334,6 +5904,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
@@ -5344,6 +5915,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
@@ -5354,6 +5926,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
@@ -5364,6 +5937,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.ANY_SNOOP",
@@ -5374,6 +5948,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.HITM_OTHER_CORE",
@@ -5384,6 +5959,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.HIT_OTHER_CORE_FWD",
@@ -5394,6 +5970,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -5404,6 +5981,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.NO_SNOOP_NEEDED",
@@ -5414,6 +5992,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS.REMOTE_HITM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.REMOTE_HITM",
@@ -5424,6 +6003,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS.REMOTE_HIT_FORWARD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.REMOTE_HIT_FORWARD",
@@ -5434,6 +6014,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_MISS",
@@ -5444,6 +6025,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NONE",
@@ -5454,6 +6036,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
@@ -5464,6 +6047,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
@@ -5474,6 +6058,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
@@ -5484,6 +6069,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
@@ -5494,6 +6080,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
@@ -5504,6 +6091,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
@@ -5514,6 +6102,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
@@ -5524,6 +6113,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
@@ -5534,6 +6124,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
@@ -5544,6 +6135,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
@@ -5554,6 +6146,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
@@ -5564,6 +6157,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
@@ -5574,6 +6168,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
@@ -5584,6 +6179,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
@@ -5594,6 +6190,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
@@ -5604,6 +6201,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
@@ -5614,6 +6212,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.ANY_SNOOP",
@@ -5624,6 +6223,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.HITM_OTHER_CORE",
@@ -5634,6 +6234,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.HIT_OTHER_CORE_FWD",
@@ -5644,6 +6245,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -5654,6 +6256,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.NO_SNOOP_NEEDED",
@@ -5664,6 +6267,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.REMOTE_HITM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.REMOTE_HITM",
@@ -5674,6 +6278,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.REMOTE_HIT_FORWARD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.REMOTE_HIT_FORWARD",
@@ -5684,6 +6289,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.SNOOP_MISS",
@@ -5694,6 +6300,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.SNOOP_NONE",
@@ -5704,6 +6311,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
@@ -5714,6 +6322,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
@@ -5724,6 +6333,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
@@ -5734,6 +6344,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
@@ -5744,6 +6355,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
@@ -5754,6 +6366,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
@@ -5764,6 +6377,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
@@ -5774,6 +6388,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
@@ -5784,6 +6399,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
@@ -5794,6 +6410,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
@@ -5804,6 +6421,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
@@ -5814,6 +6432,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
@@ -5824,6 +6443,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
@@ -5834,6 +6454,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
@@ -5844,6 +6465,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
@@ -5854,6 +6476,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
@@ -5864,6 +6487,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.ANY_SNOOP",
@@ -5874,6 +6498,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.HITM_OTHER_CORE",
@@ -5884,6 +6509,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
@@ -5894,6 +6520,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -5904,6 +6531,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
@@ -5914,6 +6542,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.REMOTE_HITM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.REMOTE_HITM",
@@ -5924,6 +6553,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
@@ -5934,6 +6564,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_MISS",
@@ -5944,6 +6575,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_NONE",
@@ -5954,6 +6586,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
@@ -5964,6 +6597,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
@@ -5974,6 +6608,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
@@ -5984,6 +6619,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
@@ -5994,6 +6630,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
@@ -6004,6 +6641,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
@@ -6014,6 +6652,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
@@ -6024,6 +6663,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
@@ -6034,6 +6674,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
@@ -6044,6 +6685,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
@@ -6054,6 +6696,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
@@ -6064,6 +6707,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
@@ -6074,6 +6718,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
@@ -6084,6 +6729,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
@@ -6094,6 +6740,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
@@ -6104,6 +6751,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
@@ -6114,6 +6762,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.ANY_SNOOP",
@@ -6124,6 +6773,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.HITM_OTHER_CORE",
@@ -6134,6 +6784,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
@@ -6144,6 +6795,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -6154,6 +6806,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.NO_SNOOP_NEEDED",
@@ -6164,6 +6817,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.REMOTE_HITM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.REMOTE_HITM",
@@ -6174,6 +6828,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.REMOTE_HIT_FORWARD",
@@ -6184,6 +6839,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_MISS",
@@ -6194,6 +6850,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_NONE",
@@ -6204,6 +6861,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
@@ -6214,6 +6872,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
@@ -6224,6 +6883,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
@@ -6234,6 +6894,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
@@ -6244,6 +6905,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
@@ -6254,6 +6916,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
@@ -6264,6 +6927,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
@@ -6274,6 +6938,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
@@ -6284,6 +6949,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
@@ -6294,6 +6960,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
@@ -6304,6 +6971,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
@@ -6314,6 +6982,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
@@ -6324,6 +6993,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
@@ -6334,6 +7004,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
@@ -6344,6 +7015,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
@@ -6354,6 +7026,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
@@ -6364,6 +7037,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.ANY_SNOOP",
@@ -6374,6 +7048,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.HITM_OTHER_CORE",
@@ -6384,6 +7059,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
@@ -6394,6 +7070,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -6404,6 +7081,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
@@ -6414,6 +7092,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.REMOTE_HITM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.REMOTE_HITM",
@@ -6424,6 +7103,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
@@ -6434,6 +7114,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS",
@@ -6444,6 +7125,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_NONE",
@@ -6454,6 +7136,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
@@ -6464,6 +7147,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
@@ -6474,6 +7158,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
@@ -6484,6 +7169,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
@@ -6494,6 +7180,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
@@ -6504,6 +7191,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
@@ -6514,6 +7202,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
@@ -6524,6 +7213,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
@@ -6534,6 +7224,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
@@ -6544,6 +7235,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
@@ -6554,6 +7246,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
@@ -6564,6 +7257,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
@@ -6574,6 +7268,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
@@ -6584,6 +7279,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
@@ -6594,6 +7290,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
@@ -6604,6 +7301,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
@@ -6614,6 +7312,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.ANY_SNOOP",
@@ -6624,6 +7323,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.HITM_OTHER_CORE",
@@ -6634,6 +7334,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
@@ -6644,6 +7345,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -6654,6 +7356,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.NO_SNOOP_NEEDED",
@@ -6664,6 +7367,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.REMOTE_HITM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.REMOTE_HITM",
@@ -6674,6 +7378,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.REMOTE_HIT_FORWARD",
@@ -6684,6 +7389,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_MISS",
@@ -6694,6 +7400,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_NONE",
@@ -6704,6 +7411,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
@@ -6714,6 +7422,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
@@ -6724,6 +7433,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
@@ -6734,6 +7444,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
@@ -6744,6 +7455,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
@@ -6754,6 +7466,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
@@ -6764,6 +7477,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
@@ -6774,6 +7488,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
@@ -6784,6 +7499,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
@@ -6794,6 +7510,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
@@ -6804,6 +7521,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
@@ -6814,6 +7532,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
@@ -6824,6 +7543,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
@@ -6834,6 +7554,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
@@ -6844,6 +7565,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
@@ -6854,6 +7576,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
@@ -6864,6 +7587,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one).",
+ "Counter": "0,1,2,3",
"EventCode": "0xC9",
"EventName": "RTM_RETIRED.ABORTED",
"PEBS": "2",
@@ -6873,6 +7597,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC9",
"EventName": "RTM_RETIRED.ABORTED_EVENTS",
"PublicDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
@@ -6881,6 +7606,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC9",
"EventName": "RTM_RETIRED.ABORTED_MEM",
"PublicDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).",
@@ -6889,6 +7615,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
+ "Counter": "0,1,2,3",
"EventCode": "0xC9",
"EventName": "RTM_RETIRED.ABORTED_MEMTYPE",
"PublicDescription": "Number of times an RTM execution aborted due to incompatible memory type.",
@@ -6897,6 +7624,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to uncommon conditions.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC9",
"EventName": "RTM_RETIRED.ABORTED_TIMER",
"SampleAfterValue": "2000003",
@@ -6904,6 +7632,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0xC9",
"EventName": "RTM_RETIRED.ABORTED_UNFRIENDLY",
"PublicDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions.",
@@ -6912,6 +7641,7 @@
},
{
"BriefDescription": "Number of times an RTM execution successfully committed",
+ "Counter": "0,1,2,3",
"EventCode": "0xC9",
"EventName": "RTM_RETIRED.COMMIT",
"PublicDescription": "Number of times RTM commit succeeded.",
@@ -6920,6 +7650,7 @@
},
{
"BriefDescription": "Number of times an RTM execution started.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC9",
"EventName": "RTM_RETIRED.START",
"PublicDescription": "Number of times we entered an RTM region. Does not count nested transactions.",
@@ -6928,6 +7659,7 @@
},
{
"BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed. Since this is the count of execution, it may not always cause a transactional abort.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC1",
"SampleAfterValue": "2000003",
@@ -6935,6 +7667,7 @@
},
{
"BriefDescription": "Counts the number of times a class of instructions (e.g., vzeroupper) that may cause a transactional abort was executed inside a transactional region",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC2",
"PublicDescription": "Unfriendly TSX abort triggered by a vzeroupper instruction.",
@@ -6943,6 +7676,7 @@
},
{
"BriefDescription": "Counts the number of times an instruction execution caused the transactional nest count supported to be exceeded",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC3",
"PublicDescription": "Unfriendly TSX abort triggered by a nest count that is too deep.",
@@ -6951,6 +7685,7 @@
},
{
"BriefDescription": "Counts the number of times a XBEGIN instruction was executed inside an HLE transactional region.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC4",
"PublicDescription": "RTM region detected inside HLE.",
@@ -6959,6 +7694,7 @@
},
{
"BriefDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC5",
"PublicDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region.",
@@ -6967,6 +7703,7 @@
},
{
"BriefDescription": "Number of times a transactional abort was signaled due to a data capacity limitation for transactional reads or writes.",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_CAPACITY",
"SampleAfterValue": "2000003",
@@ -6974,6 +7711,7 @@
},
{
"BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_CONFLICT",
"PublicDescription": "Number of times a TSX line had a cache conflict.",
@@ -6982,6 +7720,7 @@
},
{
"BriefDescription": "Number of times an HLE transactional execution aborted due to XRELEASE lock not satisfying the address and value requirements in the elision buffer",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
"PublicDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch.",
@@ -6990,6 +7729,7 @@
},
{
"BriefDescription": "Number of times an HLE transactional execution aborted due to NoAllocatedElisionBuffer being non-zero.",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
"PublicDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty.",
@@ -6998,6 +7738,7 @@
},
{
"BriefDescription": "Number of times an HLE transactional execution aborted due to an unsupported read alignment from the elision buffer.",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
"PublicDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer.",
@@ -7006,6 +7747,7 @@
},
{
"BriefDescription": "Number of times a HLE transactional region aborted due to a non XRELEASE prefixed instruction writing to an elided lock in the elision buffer",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
"PublicDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock.",
@@ -7014,6 +7756,7 @@
},
{
"BriefDescription": "Number of times HLE lock could not be elided due to ElisionBufferAvailable being zero.",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
"PublicDescription": "Number of times we could not allocate Lock Buffer.",
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/metricgroups.json b/tools/perf/pmu-events/arch/x86/cascadelakex/metricgroups.json
index 904d299c95a3..cccfcab3425e 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/metricgroups.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/metricgroups.json
@@ -5,7 +5,20 @@
"BigFootprint": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"BrMispredicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Branches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvBC": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvBO": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvCB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvFB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvIO": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvML": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMP": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMS": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMT": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvOB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvUW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"CacheHits": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "CacheMisses": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"CodeGen": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Compute": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Cor": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/other.json b/tools/perf/pmu-events/arch/x86/cascadelakex/other.json
index 95d42ac36717..f25693b17b8b 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/other.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/other.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the Non-AVX turbo schedule.",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "CORE_POWER.LVL0_TURBO_LICENSE",
"PublicDescription": "Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX2 turbo schedule.",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "CORE_POWER.LVL1_TURBO_LICENSE",
"PublicDescription": "Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX512 turbo schedule.",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "CORE_POWER.LVL2_TURBO_LICENSE",
"PublicDescription": "Core cycles where the core was running with power-delivery for license level 2 (introduced in Skylake Server microarchitecture). This includes high current AVX 512-bit instructions.",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Core cycles the core was throttled due to a pending power level request.",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "CORE_POWER.THROTTLE",
"PublicDescription": "Core cycles the out-of-order engine was throttled due to a pending power level request.",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "CORE_SNOOP_RESPONSE.RSP_IFWDFE",
+ "Counter": "0,1,2,3",
"EventCode": "0xEF",
"EventName": "CORE_SNOOP_RESPONSE.RSP_IFWDFE",
"SampleAfterValue": "2000003",
@@ -40,6 +45,7 @@
},
{
"BriefDescription": "CORE_SNOOP_RESPONSE.RSP_IFWDM",
+ "Counter": "0,1,2,3",
"EventCode": "0xEF",
"EventName": "CORE_SNOOP_RESPONSE.RSP_IFWDM",
"SampleAfterValue": "2000003",
@@ -47,6 +53,7 @@
},
{
"BriefDescription": "CORE_SNOOP_RESPONSE.RSP_IHITFSE",
+ "Counter": "0,1,2,3",
"EventCode": "0xEF",
"EventName": "CORE_SNOOP_RESPONSE.RSP_IHITFSE",
"SampleAfterValue": "2000003",
@@ -54,6 +61,7 @@
},
{
"BriefDescription": "CORE_SNOOP_RESPONSE.RSP_IHITI",
+ "Counter": "0,1,2,3",
"EventCode": "0xEF",
"EventName": "CORE_SNOOP_RESPONSE.RSP_IHITI",
"SampleAfterValue": "2000003",
@@ -61,6 +69,7 @@
},
{
"BriefDescription": "CORE_SNOOP_RESPONSE.RSP_SFWDFE",
+ "Counter": "0,1,2,3",
"EventCode": "0xEF",
"EventName": "CORE_SNOOP_RESPONSE.RSP_SFWDFE",
"SampleAfterValue": "2000003",
@@ -68,6 +77,7 @@
},
{
"BriefDescription": "CORE_SNOOP_RESPONSE.RSP_SFWDM",
+ "Counter": "0,1,2,3",
"EventCode": "0xEF",
"EventName": "CORE_SNOOP_RESPONSE.RSP_SFWDM",
"SampleAfterValue": "2000003",
@@ -75,6 +85,7 @@
},
{
"BriefDescription": "CORE_SNOOP_RESPONSE.RSP_SHITFSE",
+ "Counter": "0,1,2,3",
"EventCode": "0xEF",
"EventName": "CORE_SNOOP_RESPONSE.RSP_SHITFSE",
"SampleAfterValue": "2000003",
@@ -82,6 +93,7 @@
},
{
"BriefDescription": "Number of hardware interrupts received by the processor.",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "HW_INTERRUPTS.RECEIVED",
"PublicDescription": "Counts the number of hardware interruptions received by the processor.",
@@ -90,6 +102,7 @@
},
{
"BriefDescription": "Counts number of cache lines that are dropped and not written back to L3 as they are deemed to be less likely to be reused shortly",
+ "Counter": "0,1,2,3",
"EventCode": "0xFE",
"EventName": "IDI_MISC.WB_DOWNGRADE",
"PublicDescription": "Counts number of cache lines that are dropped and not written back to L3 as they are deemed to be less likely to be reused shortly.",
@@ -98,6 +111,7 @@
},
{
"BriefDescription": "Counts number of cache lines that are allocated and written back to L3 with the intention that they are more likely to be reused shortly",
+ "Counter": "0,1,2,3",
"EventCode": "0xFE",
"EventName": "IDI_MISC.WB_UPGRADE",
"PublicDescription": "Counts number of cache lines that are allocated and written back to L3 with the intention that they are more likely to be reused shortly.",
@@ -106,6 +120,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.ANY_RESPONSE have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -115,6 +130,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -124,6 +140,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -133,6 +150,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -142,6 +160,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.SUPPLIER_NONE.ANY_SNOOP OCR.ALL_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -151,6 +170,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE OCR.ALL_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -160,6 +180,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD OCR.ALL_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -169,6 +190,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD OCR.ALL_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -178,6 +200,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED OCR.ALL_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -187,6 +210,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -196,6 +220,7 @@
},
{
"BriefDescription": "OCR.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -205,6 +230,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.ANY_RESPONSE have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -214,6 +240,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -223,6 +250,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -232,6 +260,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -241,6 +270,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.ANY_SNOOP OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -250,6 +280,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -259,6 +290,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -268,6 +300,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -277,6 +310,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -286,6 +320,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -295,6 +330,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -304,6 +340,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.ANY_RESPONSE have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -313,6 +350,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -322,6 +360,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -331,6 +370,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -340,6 +380,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.SUPPLIER_NONE.ANY_SNOOP OCR.ALL_PF_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -349,6 +390,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.SUPPLIER_NONE.HITM_OTHER_CORE OCR.ALL_PF_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -358,6 +400,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD OCR.ALL_PF_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -367,6 +410,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -376,6 +420,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED OCR.ALL_PF_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -385,6 +430,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -394,6 +440,7 @@
},
{
"BriefDescription": "OCR.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -403,6 +450,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.ANY_RESPONSE have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -412,6 +460,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.PMM_HIT_LOCAL_PMM.ANY_SNOOP OCR.ALL_READS.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -421,6 +470,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.PMM_HIT_LOCAL_PMM.SNOOP_NONE OCR.ALL_READS.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -430,6 +480,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED OCR.ALL_READS.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -439,6 +490,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.SUPPLIER_NONE.ANY_SNOOP OCR.ALL_READS.SUPPLIER_NONE.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -448,6 +500,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.SUPPLIER_NONE.HITM_OTHER_CORE OCR.ALL_READS.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -457,6 +510,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.SUPPLIER_NONE.HIT_OTHER_CORE_FWD OCR.ALL_READS.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -466,6 +520,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD OCR.ALL_READS.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -475,6 +530,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.SUPPLIER_NONE.NO_SNOOP_NEEDED OCR.ALL_READS.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -484,6 +540,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.SUPPLIER_NONE.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -493,6 +550,7 @@
},
{
"BriefDescription": "OCR.ALL_READS.SUPPLIER_NONE.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -502,6 +560,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.ANY_RESPONSE have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -511,6 +570,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -520,6 +580,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -529,6 +590,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -538,6 +600,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.SUPPLIER_NONE.ANY_SNOOP OCR.ALL_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -547,6 +610,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.SUPPLIER_NONE.HITM_OTHER_CORE OCR.ALL_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -556,6 +620,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD OCR.ALL_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -565,6 +630,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD OCR.ALL_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -574,6 +640,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED OCR.ALL_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -583,6 +650,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -592,6 +660,7 @@
},
{
"BriefDescription": "OCR.ALL_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -601,6 +670,7 @@
},
{
"BriefDescription": "Counts all demand code reads have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -610,6 +680,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -619,6 +690,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -628,6 +700,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -637,6 +710,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -646,6 +720,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -655,6 +730,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -664,6 +740,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -673,6 +750,7 @@
},
{
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -682,6 +760,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -691,6 +770,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -700,6 +780,7 @@
},
{
"BriefDescription": "Counts demand data reads have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -709,6 +790,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -718,6 +800,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -727,6 +810,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -736,6 +820,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -745,6 +830,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -754,6 +840,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -763,6 +850,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -772,6 +860,7 @@
},
{
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -781,6 +870,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -790,6 +880,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -799,6 +890,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -808,6 +900,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -817,6 +910,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -826,6 +920,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -835,6 +930,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -844,6 +940,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -853,6 +950,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -862,6 +960,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -871,6 +970,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -880,6 +980,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -889,6 +990,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -898,6 +1000,7 @@
},
{
"BriefDescription": "Counts any other requests have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -907,6 +1010,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -916,6 +1020,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -925,6 +1030,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -934,6 +1040,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.SUPPLIER_NONE.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -943,6 +1050,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -952,6 +1060,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -961,6 +1070,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -970,6 +1080,7 @@
},
{
"BriefDescription": "Counts any other requests OCR.OTHER.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -979,6 +1090,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -988,6 +1100,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -997,6 +1110,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1006,6 +1120,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1015,6 +1130,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1024,6 +1140,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1033,6 +1150,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.SUPPLIER_NONE.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1042,6 +1160,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1051,6 +1170,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1060,6 +1180,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1069,6 +1190,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1078,6 +1200,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1087,6 +1210,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1096,6 +1220,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1105,6 +1230,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1114,6 +1240,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1123,6 +1250,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1132,6 +1260,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1141,6 +1270,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1150,6 +1280,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1159,6 +1290,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1168,6 +1300,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1177,6 +1310,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1186,6 +1320,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1195,6 +1330,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1204,6 +1340,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1213,6 +1350,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1222,6 +1360,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1231,6 +1370,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1240,6 +1380,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1249,6 +1390,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1258,6 +1400,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1267,6 +1410,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1276,6 +1420,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1285,6 +1430,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1294,6 +1440,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1303,6 +1450,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1312,6 +1460,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1321,6 +1470,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1330,6 +1480,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1339,6 +1490,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1348,6 +1500,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1357,6 +1510,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1366,6 +1520,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1375,6 +1530,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1384,6 +1540,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1393,6 +1550,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1402,6 +1560,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1411,6 +1570,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1420,6 +1580,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1429,6 +1590,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1438,6 +1600,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1447,6 +1610,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1456,6 +1620,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1465,6 +1630,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1474,6 +1640,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1483,6 +1650,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json b/tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json
index c50ddf5b40dd..3dd296ab4d78 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Cycles when divide unit is busy executing divide or square root operations. Accounts for integer and floating-point operations.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x14",
"EventName": "ARITH.DIVIDER_ACTIVE",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "All (macro) branch instructions retired.",
+ "Counter": "0,1,2,3",
"Errata": "SKL091",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "All (macro) branch instructions retired.",
+ "Counter": "0,1,2,3",
"Errata": "SKL091",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
@@ -27,6 +30,7 @@
},
{
"BriefDescription": "Conditional branch instructions retired. [This event is alias to BR_INST_RETIRED.CONDITIONAL]",
+ "Counter": "0,1,2,3",
"Errata": "SKL091",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.COND",
@@ -36,6 +40,7 @@
},
{
"BriefDescription": "Conditional branch instructions retired. [This event is alias to BR_INST_RETIRED.COND]",
+ "Counter": "0,1,2,3",
"Errata": "SKL091",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
@@ -46,6 +51,7 @@
},
{
"BriefDescription": "Not taken branch instructions retired.",
+ "Counter": "0,1,2,3",
"Errata": "SKL091",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_NTAKEN",
@@ -55,6 +61,7 @@
},
{
"BriefDescription": "Far branch instructions retired.",
+ "Counter": "0,1,2,3",
"Errata": "SKL091",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
@@ -65,6 +72,7 @@
},
{
"BriefDescription": "Direct and indirect near call instructions retired.",
+ "Counter": "0,1,2,3",
"Errata": "SKL091",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
@@ -75,6 +83,7 @@
},
{
"BriefDescription": "Return instructions retired.",
+ "Counter": "0,1,2,3",
"Errata": "SKL091",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
@@ -85,6 +94,7 @@
},
{
"BriefDescription": "Taken branch instructions retired.",
+ "Counter": "0,1,2,3",
"Errata": "SKL091",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
@@ -95,6 +105,7 @@
},
{
"BriefDescription": "Not taken branch instructions retired.",
+ "Counter": "0,1,2,3",
"Errata": "SKL091",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NOT_TAKEN",
@@ -104,6 +115,7 @@
},
{
"BriefDescription": "Speculative and retired mispredicted macro conditional branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.ALL_BRANCHES",
"PublicDescription": "This event counts both taken and not taken speculative and retired mispredicted branch instructions.",
@@ -112,6 +124,7 @@
},
{
"BriefDescription": "Speculative mispredicted indirect branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.INDIRECT",
"PublicDescription": "Counts speculatively miss-predicted indirect branches at execution time. Counts for indirect near CALL or JMP instructions (RET excluded).",
@@ -120,6 +133,7 @@
},
{
"BriefDescription": "All mispredicted macro branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
@@ -127,6 +141,7 @@
},
{
"BriefDescription": "Mispredicted macro branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
"PEBS": "2",
@@ -136,6 +151,7 @@
},
{
"BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.CONDITIONAL",
"PEBS": "1",
@@ -145,6 +161,7 @@
},
{
"BriefDescription": "Mispredicted direct and indirect near call instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.NEAR_CALL",
"PEBS": "1",
@@ -154,6 +171,7 @@
},
{
"BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
"PEBS": "1",
@@ -162,6 +180,7 @@
},
{
"BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.RET",
"PEBS": "1",
@@ -171,6 +190,7 @@
},
{
"BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "25003",
@@ -178,6 +198,7 @@
},
{
"BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
"SampleAfterValue": "25003",
@@ -186,6 +207,7 @@
{
"AnyThread": "1",
"BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
"SampleAfterValue": "25003",
@@ -193,6 +215,7 @@
},
{
"BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "25003",
@@ -200,6 +223,7 @@
},
{
"BriefDescription": "Reference cycles when the core is not in halt state.",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
"SampleAfterValue": "2000003",
@@ -207,6 +231,7 @@
},
{
"BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.REF_XCLK",
"SampleAfterValue": "25003",
@@ -215,6 +240,7 @@
{
"AnyThread": "1",
"BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
"SampleAfterValue": "25003",
@@ -222,6 +248,7 @@
},
{
"BriefDescription": "Counts when there is a transition from ring 1, 2 or 3 to ring 0.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x3C",
@@ -231,6 +258,7 @@
},
{
"BriefDescription": "Core cycles when the thread is not in halt state",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"SampleAfterValue": "2000003",
@@ -239,12 +267,14 @@
{
"AnyThread": "1",
"BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
"SampleAfterValue": "2000003",
"UMask": "0x2"
},
{
"BriefDescription": "Thread cycles when thread is not in halt state",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
"PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
@@ -253,12 +283,14 @@
{
"AnyThread": "1",
"BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
"SampleAfterValue": "2000003"
},
{
"BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "8",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
@@ -267,6 +299,7 @@
},
{
"BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
@@ -275,6 +308,7 @@
},
{
"BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
"CounterMask": "16",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
@@ -283,6 +317,7 @@
},
{
"BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "12",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
@@ -291,6 +326,7 @@
},
{
"BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
@@ -299,6 +335,7 @@
},
{
"BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
"CounterMask": "20",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
@@ -307,6 +344,7 @@
},
{
"BriefDescription": "Total execution stalls.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
@@ -315,6 +353,7 @@
},
{
"BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "EXE_ACTIVITY.1_PORTS_UTIL",
"PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
@@ -323,6 +362,7 @@
},
{
"BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "EXE_ACTIVITY.2_PORTS_UTIL",
"PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
@@ -331,6 +371,7 @@
},
{
"BriefDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "EXE_ACTIVITY.3_PORTS_UTIL",
"PublicDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
@@ -339,6 +380,7 @@
},
{
"BriefDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "EXE_ACTIVITY.4_PORTS_UTIL",
"PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.",
@@ -347,6 +389,7 @@
},
{
"BriefDescription": "Cycles where the Store Buffer was full and no outstanding load.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "EXE_ACTIVITY.BOUND_ON_STORES",
"SampleAfterValue": "2000003",
@@ -354,6 +397,7 @@
},
{
"BriefDescription": "Cycles where no uops were executed, the Reservation Station was not empty, the Store Buffer was full and there was no outstanding load.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "EXE_ACTIVITY.EXE_BOUND_0_PORTS",
"PublicDescription": "Counts cycles during which no uops were executed on all ports and Reservation Station (RS) was not empty.",
@@ -362,6 +406,7 @@
},
{
"BriefDescription": "Stalls caused by changing prefix length of the instruction. [This event is alias to DECODE.LCP]",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.LCP",
"PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk. [This event is alias to DECODE.LCP]",
@@ -370,6 +415,7 @@
},
{
"BriefDescription": "Instruction decoders utilized in a cycle",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "INST_DECODED.DECODERS",
"PublicDescription": "Number of decoders utilized in a cycle when the MITE (legacy decode pipeline) fetches instructions.",
@@ -378,6 +424,7 @@
},
{
"BriefDescription": "Instructions retired from execution.",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PublicDescription": "Counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, Counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. Counting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
"SampleAfterValue": "2000003",
@@ -385,6 +432,7 @@
},
{
"BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Counter": "0,1,2,3",
"Errata": "SKL091, SKL044",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.ANY_P",
@@ -393,15 +441,17 @@
},
{
"BriefDescription": "Number of all retired NOP instructions.",
+ "Counter": "0,1,2,3",
"Errata": "SKL091, SKL044",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.NOP",
- "PEBS": "2",
+ "PEBS": "1",
"SampleAfterValue": "2000003",
"UMask": "0x2"
},
{
"BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
+ "Counter": "1",
"Errata": "SKL091, SKL044",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.PREC_DIST",
@@ -412,6 +462,7 @@
},
{
"BriefDescription": "Number of cycles using always true condition applied to PEBS instructions retired event.",
+ "Counter": "0,2,3",
"CounterMask": "10",
"Errata": "SKL091, SKL044",
"EventCode": "0xC0",
@@ -424,6 +475,7 @@
},
{
"BriefDescription": "Clears speculative count",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x0D",
@@ -434,6 +486,7 @@
},
{
"BriefDescription": "Cycles the issue-stage is waiting for front-end to fetch from resteered path following branch misprediction or machine clear events.",
+ "Counter": "0,1,2,3",
"EventCode": "0x0D",
"EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
"SampleAfterValue": "2000003",
@@ -441,6 +494,7 @@
},
{
"BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
+ "Counter": "0,1,2,3",
"EventCode": "0x0D",
"EventName": "INT_MISC.RECOVERY_CYCLES",
"PublicDescription": "Core cycles the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
@@ -450,6 +504,7 @@
{
"AnyThread": "1",
"BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "Counter": "0,1,2,3",
"EventCode": "0x0D",
"EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
"SampleAfterValue": "2000003",
@@ -457,6 +512,7 @@
},
{
"BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.NO_SR",
"PublicDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
@@ -465,6 +521,7 @@
},
{
"BriefDescription": "Loads blocked due to overlapping with a preceding store that cannot be forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.STORE_FORWARD",
"PublicDescription": "Counts the number of times where store forwarding was prevented for a load operation. The most common case is a load blocked due to the address of memory access (partially) overlapping with a preceding uncompleted store. Note: See the table of not supported store forwards in the Optimization Guide.",
@@ -473,6 +530,7 @@
},
{
"BriefDescription": "False dependencies in MOB due to partial compare on address.",
+ "Counter": "0,1,2,3",
"EventCode": "0x07",
"EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
"PublicDescription": "Counts false dependencies in MOB when the partial comparison upon loose net check and dependency was resolved by the Enhanced Loose net mechanism. This may not result in high performance penalties. Loose net checks can fail when loads and stores are 4k aliased.",
@@ -481,6 +539,7 @@
},
{
"BriefDescription": "Demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.",
+ "Counter": "0,1,2,3",
"EventCode": "0x4C",
"EventName": "LOAD_HIT_PRE.SW_PF",
"PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
@@ -489,6 +548,7 @@
},
{
"BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder. [This event is alias to LSD.CYCLES_OK]",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xA8",
"EventName": "LSD.CYCLES_4_UOPS",
@@ -498,6 +558,7 @@
},
{
"BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA8",
"EventName": "LSD.CYCLES_ACTIVE",
@@ -507,6 +568,7 @@
},
{
"BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder. [This event is alias to LSD.CYCLES_4_UOPS]",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xA8",
"EventName": "LSD.CYCLES_OK",
@@ -516,6 +578,7 @@
},
{
"BriefDescription": "Number of Uops delivered by the LSD.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA8",
"EventName": "LSD.UOPS",
"PublicDescription": "Number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
@@ -524,6 +587,7 @@
},
{
"BriefDescription": "Number of machine clears (nukes) of any type.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xC3",
@@ -533,6 +597,7 @@
},
{
"BriefDescription": "Self-modifying code (SMC) detected.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.SMC",
"PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
@@ -541,6 +606,7 @@
},
{
"BriefDescription": "Number of times a microcode assist is invoked by HW other than FP-assist. Examples include AD (page Access Dirty) and AVX* related assists.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "OTHER_ASSISTS.ANY",
"SampleAfterValue": "100003",
@@ -548,6 +614,7 @@
},
{
"BriefDescription": "Cycles where the pipeline is stalled due to serializing operations.",
+ "Counter": "0,1,2,3",
"EventCode": "0x59",
"EventName": "PARTIAL_RAT_STALLS.SCOREBOARD",
"PublicDescription": "This event counts cycles during which the microcode scoreboard stalls happen.",
@@ -556,6 +623,7 @@
},
{
"BriefDescription": "Resource-related stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xa2",
"EventName": "RESOURCE_STALLS.ANY",
"PublicDescription": "Counts resource-related stall cycles.",
@@ -564,6 +632,7 @@
},
{
"BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.SB",
"PublicDescription": "Counts allocation stall cycles caused by the store buffer (SB) being full. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
@@ -572,6 +641,7 @@
},
{
"BriefDescription": "Increments whenever there is an update to the LBR array.",
+ "Counter": "0,1,2,3",
"EventCode": "0xCC",
"EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
"PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR enable via IA32_DEBUGCTL MSR and branch type selection via MSR_LBR_SELECT.",
@@ -580,6 +650,7 @@
},
{
"BriefDescription": "Number of retired PAUSE instructions (that do not end up with a VMExit to the VMM; TSX aborted Instructions may be counted). This event is not supported on first SKL and KBL products.",
+ "Counter": "0,1,2,3",
"EventCode": "0xCC",
"EventName": "ROB_MISC_EVENTS.PAUSE_INST",
"SampleAfterValue": "2000003",
@@ -587,6 +658,7 @@
},
{
"BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "Counter": "0,1,2,3",
"EventCode": "0x5E",
"EventName": "RS_EVENTS.EMPTY_CYCLES",
"PublicDescription": "Counts cycles during which the reservation station (RS) is empty for the thread.; Note: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.",
@@ -595,6 +667,7 @@
},
{
"BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x5E",
@@ -606,6 +679,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_0",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 0.",
@@ -614,6 +688,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_1",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 1.",
@@ -622,6 +697,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_2",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 2.",
@@ -630,6 +706,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_3",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 3.",
@@ -638,6 +715,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_4",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 4.",
@@ -646,6 +724,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_5",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 5.",
@@ -654,6 +733,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_6",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 6.",
@@ -662,6 +742,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_7",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 7.",
@@ -670,6 +751,7 @@
},
{
"BriefDescription": "Number of uops executed on the core.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE",
"PublicDescription": "Number of uops executed from any thread.",
@@ -678,6 +760,7 @@
},
{
"BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
@@ -686,6 +769,7 @@
},
{
"BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
@@ -694,6 +778,7 @@
},
{
"BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
@@ -702,6 +787,7 @@
},
{
"BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
@@ -710,6 +796,7 @@
},
{
"BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
@@ -719,6 +806,7 @@
},
{
"BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
@@ -728,6 +816,7 @@
},
{
"BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
@@ -737,6 +826,7 @@
},
{
"BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "Counter": "0,1,2,3",
"CounterMask": "3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
@@ -746,6 +836,7 @@
},
{
"BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
@@ -755,6 +846,7 @@
},
{
"BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.STALL_CYCLES",
@@ -765,6 +857,7 @@
},
{
"BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.THREAD",
"PublicDescription": "Number of uops to be executed per-thread each cycle.",
@@ -773,6 +866,7 @@
},
{
"BriefDescription": "Counts the number of x87 uops dispatched.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.X87",
"PublicDescription": "Counts the number of x87 uops executed.",
@@ -781,6 +875,7 @@
},
{
"BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.ANY",
"PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
@@ -789,6 +884,7 @@
},
{
"BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.SLOW_LEA",
"SampleAfterValue": "2000003",
@@ -796,6 +892,7 @@
},
{
"BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.STALL_CYCLES",
@@ -806,6 +903,7 @@
},
{
"BriefDescription": "Uops inserted at issue-stage in order to preserve upper bits of vector registers.",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH",
"PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to Mixing Intel AVX and Intel SSE Code section of the Optimization Guide.",
@@ -814,6 +912,7 @@
},
{
"BriefDescription": "Number of macro-fused uops retired. (non precise)",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.MACRO_FUSED",
"PublicDescription": "Counts the number of macro-fused uops retired. (non precise)",
@@ -822,6 +921,7 @@
},
{
"BriefDescription": "Retirement slots used.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.RETIRE_SLOTS",
"PublicDescription": "Counts the retirement slots used.",
@@ -830,6 +930,7 @@
},
{
"BriefDescription": "Cycles without actually retired uops.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.STALL_CYCLES",
@@ -840,6 +941,7 @@
},
{
"BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "Counter": "0,1,2,3",
"CounterMask": "16",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.TOTAL_CYCLES",
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-cache.json b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-cache.json
index 2c880535cc82..c9596e18ec09 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "MMIO reads. Derived from unc_cha_tor_inserts.ia_miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_MISSES.MMIO_READ",
"Filter": "config1=0x40040e33",
@@ -11,6 +12,7 @@
},
{
"BriefDescription": "MMIO writes. Derived from unc_cha_tor_inserts.ia_miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_MISSES.MMIO_WRITE",
"Filter": "config1=0x40041e33",
@@ -21,6 +23,7 @@
},
{
"BriefDescription": "LLC misses - Uncacheable reads (from cpu) . Derived from unc_cha_tor_inserts.ia_miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_MISSES.UNCACHEABLE",
"Filter": "config1=0x40e33",
@@ -31,6 +34,7 @@
},
{
"BriefDescription": "Streaming stores (full cache line). Derived from unc_cha_tor_inserts.ia_miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_REFERENCES.STREAMING_FULL",
"Filter": "config1=0x41833",
@@ -42,6 +46,7 @@
},
{
"BriefDescription": "Streaming stores (partial cache line). Derived from unc_cha_tor_inserts.ia_miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_REFERENCES.STREAMING_PARTIAL",
"Filter": "config1=0x41a33",
@@ -53,8 +58,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -62,8 +69,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -71,8 +80,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -80,8 +91,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -89,8 +102,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -98,8 +113,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -107,8 +124,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -116,8 +135,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -125,8 +146,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -134,8 +157,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -143,8 +168,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -152,8 +179,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -161,8 +190,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -170,8 +201,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -179,8 +212,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -188,8 +223,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -197,8 +234,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -206,8 +245,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -215,8 +256,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -224,8 +267,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -233,8 +278,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -242,8 +289,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -251,8 +300,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -260,8 +311,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -269,8 +322,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -278,8 +333,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -287,8 +344,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -296,8 +355,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -305,8 +366,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -314,8 +377,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -323,8 +388,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -332,8 +399,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -341,8 +410,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -350,8 +421,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -359,8 +432,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -368,8 +443,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -377,8 +454,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -386,8 +465,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -395,8 +476,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -404,8 +487,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -413,8 +498,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -422,8 +509,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -431,8 +520,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -440,8 +531,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -449,8 +542,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -458,8 +553,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -467,8 +564,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -476,8 +575,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -485,8 +586,10 @@
},
{
"BriefDescription": "CHA to iMC Bypass; Intermediate bypass Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x57",
"EventName": "UNC_CHA_BYPASS_CHA_IMC.INTERMEDIATE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not.; Filter for transactions that succeeded in taking the intermediate bypass.",
"UMask": "0x2",
@@ -494,8 +597,10 @@
},
{
"BriefDescription": "CHA to iMC Bypass; Not Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x57",
"EventName": "UNC_CHA_BYPASS_CHA_IMC.NOT_TAKEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not.; Filter for transactions that could not take the bypass, and issues a read to memory. Note that transactions that did not take the bypass but did not issue read to memory will not be counted.",
"UMask": "0x4",
@@ -503,8 +608,10 @@
},
{
"BriefDescription": "CHA to iMC Bypass; Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x57",
"EventName": "UNC_CHA_BYPASS_CHA_IMC.TAKEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not.; Filter for transactions that succeeded in taking the full bypass.",
"UMask": "0x1",
@@ -512,6 +619,7 @@
},
{
"BriefDescription": "Uncore cache clock ticks",
+ "Counter": "0,1,2,3",
"EventName": "UNC_CHA_CLOCKTICKS",
"PerPkg": "1",
"PublicDescription": "Counts clockticks of the clock controlling the uncore caching and home agent (CHA).",
@@ -519,55 +627,69 @@
},
{
"BriefDescription": "CMS Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "UNC_CHA_CMS_CLOCKTICKS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "CHA"
},
{
"BriefDescription": "Core PMA Events; C1 State",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_CHA_CORE_PMA.C1_STATE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Core PMA Events; C1 Transition",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_CHA_CORE_PMA.C1_TRANSITION",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Core PMA Events; C6 State",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_CHA_CORE_PMA.C6_STATE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Core PMA Events; C6 Transition",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_CHA_CORE_PMA.C6_TRANSITION",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Core PMA Events; GV",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_CHA_CORE_PMA.GV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "Core Cross Snoops Issued; Any Cycle with Multiple Snoops",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.ANY_GTONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0xe2",
@@ -575,8 +697,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued; Any Single Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.ANY_ONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0xe1",
@@ -584,8 +708,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued; Any Snoop to Remote Node",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.ANY_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0xe4",
@@ -593,6 +719,7 @@
},
{
"BriefDescription": "Core Cross Snoops Issued; Multiple Core Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.CORE_GTONE",
"PerPkg": "1",
@@ -602,8 +729,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued; Single Core Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.CORE_ONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x41",
@@ -611,8 +740,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued; Core Request to Remote Node",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.CORE_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x44",
@@ -620,6 +751,7 @@
},
{
"BriefDescription": "Core Cross Snoops Issued; Multiple Eviction",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.EVICT_GTONE",
"PerPkg": "1",
@@ -629,8 +761,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued; Single Eviction",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.EVICT_ONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x81",
@@ -638,8 +772,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued; Eviction to Remote Node",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.EVICT_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x84",
@@ -647,8 +783,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued; Multiple External Snoops",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.EXT_GTONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x22",
@@ -656,8 +794,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued; Single External Snoops",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.EXT_ONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x21",
@@ -665,8 +805,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued; External Snoop to Remote Node",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.EXT_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x24",
@@ -674,14 +816,17 @@
},
{
"BriefDescription": "Counter 0 Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_CHA_COUNTER0_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Since occupancy counts can only be captured in the Cbo's 0 counter, this event allows a user to capture occupancy related information by filtering the Cb0 occupancy count captured in Counter 0. The filtering available is found in the control register - threshold, invert and edge detect. E.g. setting threshold to 1 can effectively monitor how many cycles the monitored queue has an entry.",
"Unit": "CHA"
},
{
"BriefDescription": "Multi-socket cacheline Directory state lookups; Snoop Not Needed",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_CHA_DIR_LOOKUP.NO_SNP",
"PerPkg": "1",
@@ -691,6 +836,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory state lookups; Snoop Needed",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_CHA_DIR_LOOKUP.SNP",
"PerPkg": "1",
@@ -700,6 +846,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory state updates; Directory Updated memory write from the HA pipe",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_CHA_DIR_UPDATE.HA",
"PerPkg": "1",
@@ -709,6 +856,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory state updates; Directory Updated memory write from TOR pipe",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_CHA_DIR_UPDATE.TOR",
"PerPkg": "1",
@@ -718,8 +866,10 @@
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements; Down",
+ "Counter": "0,1,2,3",
"EventCode": "0xAE",
"EventName": "UNC_CHA_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x4",
@@ -727,8 +877,10 @@
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements; Up",
+ "Counter": "0,1,2,3",
"EventCode": "0xAE",
"EventName": "UNC_CHA_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x1",
@@ -736,6 +888,7 @@
},
{
"BriefDescription": "FaST wire asserted; Horizontal",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_CHA_FAST_ASSERTED.HORZ",
"PerPkg": "1",
@@ -745,8 +898,10 @@
},
{
"BriefDescription": "FaST wire asserted; Vertical",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_CHA_FAST_ASSERTED.VERT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles either the local or incoming distress signals are asserted. Incoming distress includes up, dn and across.",
"UMask": "0x1",
@@ -754,6 +909,7 @@
},
{
"BriefDescription": "Read request from a remote socket which hit in the HitMe Cache to a line In the E state",
+ "Counter": "0,1,2,3",
"EventCode": "0x5F",
"EventName": "UNC_CHA_HITME_HIT.EX_RDS",
"PerPkg": "1",
@@ -763,80 +919,100 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; Shared hit and op is RdInvOwn, RdInv, Inv*",
+ "Counter": "0,1,2,3",
"EventCode": "0x5F",
"EventName": "UNC_CHA_HITME_HIT.SHARED_OWNREQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; op is WbMtoE",
+ "Counter": "0,1,2,3",
"EventCode": "0x5F",
"EventName": "UNC_CHA_HITME_HIT.WBMTOE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; op is WbMtoI, WbPushMtoI, WbFlush, or WbMtoS",
+ "Counter": "0,1,2,3",
"EventCode": "0x5F",
"EventName": "UNC_CHA_HITME_HIT.WBMTOI_OR_S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RdCode, RdData, RdDataMigratory, RdCur, RdInvOwn, RdInv, Inv*",
+ "Counter": "0,1,2,3",
"EventCode": "0x5E",
"EventName": "UNC_CHA_HITME_LOOKUP.READ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed; op is WbMtoE, WbMtoI, WbPushMtoI, WbFlush, or WbMtoS",
+ "Counter": "0,1,2,3",
"EventCode": "0x5E",
"EventName": "UNC_CHA_HITME_LOOKUP.WRITE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Counts Number of Misses in HitMe Cache; No SF/LLC HitS/F and op is RdInvOwn",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_CHA_HITME_MISS.NOTSHARED_RDINVOWN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "Counts Number of Misses in HitMe Cache; op is RdCode, RdData, RdDataMigratory, RdCur, RdInv, Inv*",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_CHA_HITME_MISS.READ_OR_INV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "Counts Number of Misses in HitMe Cache; SF/LLC HitS/F and op is RdInvOwn",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_CHA_HITME_MISS.SHARED_RDINVOWN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "Counts the number of Allocate/Update to HitMe Cache; Deallocate HitME$ on Reads without RspFwdI*",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_CHA_HITME_UPDATE.DEALLOCATE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "Counts the number of Allocate/Update to HitMe Cache; op is RspIFwd or RspIFwdWb for a local request",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_CHA_HITME_UPDATE.DEALLOCATE_RSPFWDI_LOC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Received RspFwdI* for a local request, but converted HitME$ to SF entry",
"UMask": "0x1",
@@ -844,16 +1020,20 @@
},
{
"BriefDescription": "Counts the number of Allocate/Update to HitMe Cache; Update HitMe Cache on RdInvOwn even if not RspFwdI*",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_CHA_HITME_UPDATE.RDINVOWN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Counts the number of Allocate/Update to HitMe Cache; op is RspIFwd or RspIFwdWb for a remote request",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_CHA_HITME_UPDATE.RSPFWDI_REM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Updated HitME$ on RspFwdI* or local HitM/E received for a remote request",
"UMask": "0x2",
@@ -861,16 +1041,20 @@
},
{
"BriefDescription": "Counts the number of Allocate/Update to HitMe Cache; Update HitMe Cache to SHARed",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_CHA_HITME_UPDATE.SHARED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Horizontal AD Ring In Use; Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -878,8 +1062,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use; Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -887,8 +1073,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use; Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -896,8 +1084,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use; Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -905,8 +1095,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use; Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xA9",
"EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -914,8 +1106,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use; Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xA9",
"EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -923,8 +1117,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use; Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xA9",
"EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -932,8 +1128,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use; Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xA9",
"EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -941,8 +1139,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use; Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -950,8 +1150,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use; Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -959,8 +1161,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use; Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -968,8 +1172,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use; Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -977,8 +1183,10 @@
},
{
"BriefDescription": "Horizontal IV Ring in Use; Left",
+ "Counter": "0,1,2,3",
"EventCode": "0xAD",
"EventName": "UNC_CHA_HORZ_RING_IV_IN_USE.LEFT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x1",
@@ -986,8 +1194,10 @@
},
{
"BriefDescription": "Horizontal IV Ring in Use; Right",
+ "Counter": "0,1,2,3",
"EventCode": "0xAD",
"EventName": "UNC_CHA_HORZ_RING_IV_IN_USE.RIGHT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x4",
@@ -995,6 +1205,7 @@
},
{
"BriefDescription": "Normal priority reads issued to the memory controller from the CHA",
+ "Counter": "0,1,2,3",
"EventCode": "0x59",
"EventName": "UNC_CHA_IMC_READS_COUNT.NORMAL",
"PerPkg": "1",
@@ -1004,8 +1215,10 @@
},
{
"BriefDescription": "HA to iMC Reads Issued; ISOCH",
+ "Counter": "0,1,2,3",
"EventCode": "0x59",
"EventName": "UNC_CHA_IMC_READS_COUNT.PRIORITY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count of the number of reads issued to any of the memory controller channels. This can be filtered by the priority of the reads.",
"UMask": "0x2",
@@ -1013,6 +1226,7 @@
},
{
"BriefDescription": "CHA to iMC Full Line Writes Issued; Full Line Non-ISOCH",
+ "Counter": "0,1,2,3",
"EventCode": "0x5B",
"EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL",
"PerPkg": "1",
@@ -1022,8 +1236,10 @@
},
{
"BriefDescription": "Writes Issued to the iMC by the HA; Full Line MIG",
+ "Counter": "0,1,2,3",
"EventCode": "0x5B",
"EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL_MIG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the total number of writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
"UMask": "0x10",
@@ -1031,8 +1247,10 @@
},
{
"BriefDescription": "Writes Issued to the iMC by the HA; ISOCH Full Line",
+ "Counter": "0,1,2,3",
"EventCode": "0x5B",
"EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL_PRIORITY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the total number of writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
"UMask": "0x4",
@@ -1040,8 +1258,10 @@
},
{
"BriefDescription": "Writes Issued to the iMC by the HA; Partial Non-ISOCH",
+ "Counter": "0,1,2,3",
"EventCode": "0x5B",
"EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the total number of writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
"UMask": "0x2",
@@ -1049,8 +1269,10 @@
},
{
"BriefDescription": "Writes Issued to the iMC by the HA; Partial MIG",
+ "Counter": "0,1,2,3",
"EventCode": "0x5B",
"EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL_MIG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the total number of writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.; Filter for memory controller 5 only.",
"UMask": "0x20",
@@ -1058,8 +1280,10 @@
},
{
"BriefDescription": "Writes Issued to the iMC by the HA; ISOCH Partial",
+ "Counter": "0,1,2,3",
"EventCode": "0x5B",
"EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL_PRIORITY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the total number of writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
"UMask": "0x8",
@@ -1067,64 +1291,80 @@
},
{
"BriefDescription": "Counts Number of times IODC entry allocation is attempted; Number of IODC allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x62",
"EventName": "UNC_CHA_IODC_ALLOC.INVITOM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Counts Number of times IODC entry allocation is attempted; Number of IODC allocations dropped due to IODC Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x62",
"EventName": "UNC_CHA_IODC_ALLOC.IODCFULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Counts Number of times IODC entry allocation is attempted; Number of IDOC allocation dropped due to OSB gate",
+ "Counter": "0,1,2,3",
"EventCode": "0x62",
"EventName": "UNC_CHA_IODC_ALLOC.OSBGATED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Counts number of IODC deallocations; IODC deallocated due to any reason",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "UNC_CHA_IODC_DEALLOC.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "Counts number of IODC deallocations; IODC deallocated due to conflicting transaction",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "UNC_CHA_IODC_DEALLOC.SNPOUT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Counts number of IODC deallocations; IODC deallocated due to WbMtoE",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "UNC_CHA_IODC_DEALLOC.WBMTOE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Counts number of IODC deallocations; IODC deallocated due to WbMtoI",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "UNC_CHA_IODC_DEALLOC.WBMTOI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Counts number of IODC deallocations; IODC deallocated due to WbPushMtoI",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "UNC_CHA_IODC_DEALLOC.WBPUSHMTOI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Moved to Cbo section",
"UMask": "0x4",
@@ -1132,8 +1372,10 @@
},
{
"BriefDescription": "Cache and Snoop Filter Lookups; Any Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.ANY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.; Filters for any transaction originating from the IPQ or IRQ. This does not include lookups originating from the ISMQ.",
"UMask": "0x11",
@@ -1141,8 +1383,10 @@
},
{
"BriefDescription": "Cache and Snoop Filter Lookups; Data Read Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.; Read transactions",
"UMask": "0x3",
@@ -1150,8 +1394,10 @@
},
{
"BriefDescription": "Cache and Snoop Filter Lookups; Local",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.",
"UMask": "0x31",
@@ -1159,8 +1405,10 @@
},
{
"BriefDescription": "Cache and Snoop Filter Lookups; Remote",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.",
"UMask": "0x91",
@@ -1168,8 +1416,10 @@
},
{
"BriefDescription": "Cache and Snoop Filter Lookups; External Snoop Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_SNOOP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.; Filters for only snoop requests coming from the remote socket(s) through the IPQ.",
"UMask": "0x9",
@@ -1177,8 +1427,10 @@
},
{
"BriefDescription": "Cache and Snoop Filter Lookups; Write Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.WRITE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.; Writeback transactions from L2 to the LLC This includes all write transactions -- both Cacheable and UC.",
"UMask": "0x5",
@@ -1186,35 +1438,43 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_E",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.E_STATE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_F",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.F_STATE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "Lines Victimized; Local - All Lines",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x2f",
@@ -1222,8 +1482,10 @@
},
{
"BriefDescription": "Lines Victimized; Local - Lines in E State",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_E",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x22",
@@ -1231,8 +1493,10 @@
},
{
"BriefDescription": "Lines Victimized; Local - Lines in F State",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x28",
@@ -1240,8 +1504,10 @@
},
{
"BriefDescription": "Lines Victimized; Local - Lines in M State",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_M",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x21",
@@ -1249,8 +1515,10 @@
},
{
"BriefDescription": "Lines Victimized; Local - Lines in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_S",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x24",
@@ -1258,26 +1526,32 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_M",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.M_STATE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.REMOTE_ALL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "Lines Victimized; Remote - All Lines",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x8f",
@@ -1285,8 +1559,10 @@
},
{
"BriefDescription": "Lines Victimized; Remote - Lines in E State",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_E",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x82",
@@ -1294,8 +1570,10 @@
},
{
"BriefDescription": "Lines Victimized; Remote - Lines in F State",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x88",
@@ -1303,8 +1581,10 @@
},
{
"BriefDescription": "Lines Victimized; Remote - Lines in M State",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_M",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x81",
@@ -1312,8 +1592,10 @@
},
{
"BriefDescription": "Lines Victimized; Remote - Lines in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_S",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x84",
@@ -1321,15 +1603,18 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_S",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.S_STATE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Lines Victimized; Lines in E state",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_E",
"PerPkg": "1",
@@ -1339,6 +1624,7 @@
},
{
"BriefDescription": "Lines Victimized; Lines in F State",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_F",
"PerPkg": "1",
@@ -1348,6 +1634,7 @@
},
{
"BriefDescription": "Lines Victimized; Lines in M state",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_M",
"PerPkg": "1",
@@ -1357,6 +1644,7 @@
},
{
"BriefDescription": "Lines Victimized; Lines in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_S",
"PerPkg": "1",
@@ -1366,8 +1654,10 @@
},
{
"BriefDescription": "Cbo Misc; CV0 Prefetch Miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_CHA_MISC.CV0_PREF_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Miscellaneous events in the Cbo.",
"UMask": "0x20",
@@ -1375,8 +1665,10 @@
},
{
"BriefDescription": "Cbo Misc; CV0 Prefetch Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_CHA_MISC.CV0_PREF_VIC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Miscellaneous events in the Cbo.",
"UMask": "0x10",
@@ -1384,6 +1676,7 @@
},
{
"BriefDescription": "Number of times that an RFO hit in S state.",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_CHA_MISC.RFO_HIT_S",
"PerPkg": "1",
@@ -1393,8 +1686,10 @@
},
{
"BriefDescription": "Cbo Misc; Silent Snoop Eviction",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_CHA_MISC.RSPI_WAS_FSE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Miscellaneous events in the Cbo.; Counts the number of times when a Snoop hit in FSE states and triggered a silent eviction. This is useful because this information is lost in the PRE encodings.",
"UMask": "0x1",
@@ -1402,8 +1697,10 @@
},
{
"BriefDescription": "Cbo Misc; Write Combining Aliasing",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_CHA_MISC.WC_ALIASING",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Miscellaneous events in the Cbo.; Counts the number of times that a USWC write (WCIL(F)) transaction hit in the LLC in M state, triggering a WBMtoI followed by the USWC write. This occurs when there is WC aliasing.",
"UMask": "0x2",
@@ -1411,16 +1708,20 @@
},
{
"BriefDescription": "OSB Snoop Broadcast",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_CHA_OSB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
"Unit": "CHA"
},
{
"BriefDescription": "Memory Mode related events; Counts the number of times CHA saw NM Set conflict in IODC",
+ "Counter": "0,1,2,3",
"EventCode": "0x64",
"EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.IODC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "2LM related events; Counts the number of times CHA saw NM Set conflict in IODC",
"UMask": "0x10",
@@ -1428,8 +1729,10 @@
},
{
"BriefDescription": "Memory Mode related events; Counts the number of times CHA saw NM Set conflict in SF/LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x64",
"EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.LLC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "NM evictions due to another read to the same near memory set in the LLC.",
"UMask": "0x2",
@@ -1437,8 +1740,10 @@
},
{
"BriefDescription": "Memory Mode related events; Counts the number of times CHA saw NM Set conflict in SF/LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x64",
"EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.SF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "NM evictions due to another read to the same near memory set in the SF.",
"UMask": "0x1",
@@ -1446,8 +1751,10 @@
},
{
"BriefDescription": "Memory Mode related events; Counts the number of times CHA saw NM Set conflict in TOR",
+ "Counter": "0,1,2,3",
"EventCode": "0x64",
"EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.TOR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Reject in the CHA due to a pending read to the same near memory set in the TOR.",
"UMask": "0x4",
@@ -1455,8 +1762,10 @@
},
{
"BriefDescription": "Memory mode related events; Counts the number of times CHA saw NM Set conflict in TOR and the transaction was rejected",
+ "Counter": "0,1,2,3",
"EventCode": "0x64",
"EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.TOR_REJECT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Rejects in the CHA due to a pending read to the same near memory set in the TOR.",
"UMask": "0x8",
@@ -1464,8 +1773,10 @@
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty; EDC0_SMI2",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.EDC0_SMI2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.; Filter for memory controller 2 only.",
"UMask": "0x4",
@@ -1473,8 +1784,10 @@
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty; EDC1_SMI3",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.EDC1_SMI3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.; Filter for memory controller 3 only.",
"UMask": "0x8",
@@ -1482,8 +1795,10 @@
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty; EDC2_SMI4",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.EDC2_SMI4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.; Filter for memory controller 4 only.",
"UMask": "0x10",
@@ -1491,8 +1806,10 @@
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty; EDC3_SMI5",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.EDC3_SMI5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.; Filter for memory controller 5 only.",
"UMask": "0x20",
@@ -1500,8 +1817,10 @@
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty; MC0_SMI0",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC0_SMI0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.; Filter for memory controller 0 only.",
"UMask": "0x1",
@@ -1509,8 +1828,10 @@
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty; MC1_SMI1",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC1_SMI1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.; Filter for memory controller 1 only.",
"UMask": "0x2",
@@ -1518,6 +1839,7 @@
},
{
"BriefDescription": "Local requests for exclusive ownership of a cache line without receiving data",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.INVITOE_LOCAL",
"PerPkg": "1",
@@ -1527,6 +1849,7 @@
},
{
"BriefDescription": "Local requests for exclusive ownership of a cache line without receiving data",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.INVITOE_REMOTE",
"PerPkg": "1",
@@ -1536,6 +1859,7 @@
},
{
"BriefDescription": "Read requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.READS",
"PerPkg": "1",
@@ -1545,6 +1869,7 @@
},
{
"BriefDescription": "Read requests from a unit on this socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.READS_LOCAL",
"PerPkg": "1",
@@ -1554,6 +1879,7 @@
},
{
"BriefDescription": "Read requests from a remote socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.READS_REMOTE",
"PerPkg": "1",
@@ -1563,6 +1889,7 @@
},
{
"BriefDescription": "Write requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.WRITES",
"PerPkg": "1",
@@ -1572,6 +1899,7 @@
},
{
"BriefDescription": "Write Requests from a unit on this socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.WRITES_LOCAL",
"PerPkg": "1",
@@ -1581,6 +1909,7 @@
},
{
"BriefDescription": "Read and Write Requests; Writes Remote",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.WRITES_REMOTE",
"PerPkg": "1",
@@ -1590,8 +1919,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring.; AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_CHA_RING_BOUNCES_HORZ.AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x1",
@@ -1599,8 +1930,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring.; AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_CHA_RING_BOUNCES_HORZ.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x2",
@@ -1608,8 +1941,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring.; BL",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_CHA_RING_BOUNCES_HORZ.BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x4",
@@ -1617,8 +1952,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring.; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_CHA_RING_BOUNCES_HORZ.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x8",
@@ -1626,8 +1963,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring.; AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_CHA_RING_BOUNCES_VERT.AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x1",
@@ -1635,8 +1974,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring.; Acknowledgements to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_CHA_RING_BOUNCES_VERT.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x2",
@@ -1644,8 +1985,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring.; Data Responses to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_CHA_RING_BOUNCES_VERT.BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x4",
@@ -1653,8 +1996,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring.; Snoops of processor's cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_CHA_RING_BOUNCES_VERT.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x8",
@@ -1662,87 +2007,109 @@
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring; AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.AD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring; AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring; Acknowledgements to Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring; BL",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.IV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring; AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_CHA_RING_SINK_STARVED_VERT.AD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring; Acknowledgements to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_CHA_RING_SINK_STARVED_VERT.AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring; Data Responses to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_CHA_RING_SINK_STARVED_VERT.BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring; Snoops of processor's cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_CHA_RING_SINK_STARVED_VERT.IV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Source Throttle",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_CHA_RING_SRC_THRTL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Allocations; IPQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_CHA_RxC_INSERTS.IPQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
"UMask": "0x4",
@@ -1750,6 +2117,7 @@
},
{
"BriefDescription": "Ingress (from CMS) Allocations; IRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_CHA_RxC_INSERTS.IRQ",
"PerPkg": "1",
@@ -1759,8 +2127,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Allocations; IRQ Rejected",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_CHA_RxC_INSERTS.IRQ_REJ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
"UMask": "0x2",
@@ -1768,8 +2138,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Allocations; PRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_CHA_RxC_INSERTS.PRQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
"UMask": "0x10",
@@ -1777,8 +2149,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Allocations; PRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_CHA_RxC_INSERTS.PRQ_REJ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
"UMask": "0x20",
@@ -1786,8 +2160,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Allocations; RRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_CHA_RxC_INSERTS.RRQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
"UMask": "0x40",
@@ -1795,8 +2171,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Allocations; WBQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_CHA_RxC_INSERTS.WBQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
"UMask": "0x80",
@@ -1804,238 +2182,297 @@
},
{
"BriefDescription": "Ingress Probe Queue Rejects; AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_CHA_RxC_IPQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress Probe Queue Rejects; AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_CHA_RxC_IPQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress Probe Queue Rejects; Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_CHA_RxC_IPQ0_REJECT.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress Probe Queue Rejects; BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress Probe Queue Rejects; BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress Probe Queue Rejects; BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress Probe Queue Rejects; BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress Probe Queue Rejects; Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_CHA_RxC_IPQ0_REJECT.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress Probe Queue Rejects; Allow Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_CHA_RxC_IPQ1_REJECT.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress Probe Queue Rejects; ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_CHA_RxC_IPQ1_REJECT.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress Probe Queue Rejects; HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_CHA_RxC_IPQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress Probe Queue Rejects; Merging these two together to make room for ANY_REJECT_*0",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_CHA_RxC_IPQ1_REJECT.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress Probe Queue Rejects; LLC Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_CHA_RxC_IPQ1_REJECT.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress Probe Queue Rejects; PhyAddr Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_CHA_RxC_IPQ1_REJECT.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress Probe Queue Rejects; SF Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_CHA_RxC_IPQ1_REJECT.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress Probe Queue Rejects; Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_CHA_RxC_IPQ1_REJECT.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; Allow Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; Merging these two together to make room for ANY_REJECT_*0",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; LLC Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; PhyAddr Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.PA_MATCH",
"PerPkg": "1",
@@ -2044,24 +2481,30 @@
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; SF Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "ISMQ Rejects; AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x1",
@@ -2069,8 +2512,10 @@
},
{
"BriefDescription": "ISMQ Rejects; AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x2",
@@ -2078,8 +2523,10 @@
},
{
"BriefDescription": "ISMQ Rejects; Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x40",
@@ -2087,8 +2534,10 @@
},
{
"BriefDescription": "ISMQ Rejects; BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x10",
@@ -2096,8 +2545,10 @@
},
{
"BriefDescription": "ISMQ Rejects; BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x20",
@@ -2105,8 +2556,10 @@
},
{
"BriefDescription": "ISMQ Rejects; BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x4",
@@ -2114,8 +2567,10 @@
},
{
"BriefDescription": "ISMQ Rejects; BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x8",
@@ -2123,8 +2578,10 @@
},
{
"BriefDescription": "ISMQ Rejects; Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x80",
@@ -2132,8 +2589,10 @@
},
{
"BriefDescription": "ISMQ Retries; AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x1",
@@ -2141,8 +2600,10 @@
},
{
"BriefDescription": "ISMQ Retries; AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x2",
@@ -2150,8 +2611,10 @@
},
{
"BriefDescription": "ISMQ Retries; Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x40",
@@ -2159,8 +2622,10 @@
},
{
"BriefDescription": "ISMQ Retries; BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x10",
@@ -2168,8 +2633,10 @@
},
{
"BriefDescription": "ISMQ Retries; BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x20",
@@ -2177,8 +2644,10 @@
},
{
"BriefDescription": "ISMQ Retries; BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x4",
@@ -2186,8 +2655,10 @@
},
{
"BriefDescription": "ISMQ Retries; BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x8",
@@ -2195,8 +2666,10 @@
},
{
"BriefDescription": "ISMQ Retries; Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x80",
@@ -2204,8 +2677,10 @@
},
{
"BriefDescription": "ISMQ Rejects; ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_CHA_RxC_ISMQ1_REJECT.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x1",
@@ -2213,8 +2688,10 @@
},
{
"BriefDescription": "ISMQ Rejects; HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_CHA_RxC_ISMQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x2",
@@ -2222,8 +2699,10 @@
},
{
"BriefDescription": "ISMQ Retries; ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2D",
"EventName": "UNC_CHA_RxC_ISMQ1_RETRY.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x1",
@@ -2231,8 +2710,10 @@
},
{
"BriefDescription": "ISMQ Retries; HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x2D",
"EventName": "UNC_CHA_RxC_ISMQ1_RETRY.HA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x2",
@@ -2240,8 +2721,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Occupancy; IPQ",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_CHA_RxC_OCCUPANCY.IPQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
"UMask": "0x4",
@@ -2249,6 +2732,7 @@
},
{
"BriefDescription": "Ingress (from CMS) Occupancy; IRQ",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_CHA_RxC_OCCUPANCY.IRQ",
"PerPkg": "1",
@@ -2258,8 +2742,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Occupancy; RRQ",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_CHA_RxC_OCCUPANCY.RRQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
"UMask": "0x40",
@@ -2267,8 +2753,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Occupancy; WBQ",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_CHA_RxC_OCCUPANCY.WBQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
"UMask": "0x80",
@@ -2276,8 +2764,10 @@
},
{
"BriefDescription": "Other Retries; AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x1",
@@ -2285,8 +2775,10 @@
},
{
"BriefDescription": "Other Retries; AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x2",
@@ -2294,8 +2786,10 @@
},
{
"BriefDescription": "Other Retries; Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x40",
@@ -2303,8 +2797,10 @@
},
{
"BriefDescription": "Other Retries; BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x10",
@@ -2312,8 +2808,10 @@
},
{
"BriefDescription": "Other Retries; BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x20",
@@ -2321,8 +2819,10 @@
},
{
"BriefDescription": "Other Retries; BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x4",
@@ -2330,8 +2830,10 @@
},
{
"BriefDescription": "Other Retries; BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x8",
@@ -2339,8 +2841,10 @@
},
{
"BriefDescription": "Other Retries; Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x80",
@@ -2348,8 +2852,10 @@
},
{
"BriefDescription": "Other Retries; Allow Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x40",
@@ -2357,8 +2863,10 @@
},
{
"BriefDescription": "Other Retries; ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x1",
@@ -2366,8 +2874,10 @@
},
{
"BriefDescription": "Other Retries; HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.HA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x2",
@@ -2375,8 +2885,10 @@
},
{
"BriefDescription": "Other Retries; Merging these two together to make room for ANY_REJECT_*0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x20",
@@ -2384,8 +2896,10 @@
},
{
"BriefDescription": "Other Retries; LLC Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x4",
@@ -2393,8 +2907,10 @@
},
{
"BriefDescription": "Other Retries; PhyAddr Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x80",
@@ -2402,8 +2918,10 @@
},
{
"BriefDescription": "Other Retries; SF Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x8",
@@ -2411,8 +2929,10 @@
},
{
"BriefDescription": "Other Retries; Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x10",
@@ -2420,136 +2940,170 @@
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; Allow Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; LLC OR SF Way",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; LLC Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; PhyAddr Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; SF Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "Request Queue Retries; AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x1",
@@ -2557,8 +3111,10 @@
},
{
"BriefDescription": "Request Queue Retries; AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x2",
@@ -2566,8 +3122,10 @@
},
{
"BriefDescription": "Request Queue Retries; Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x40",
@@ -2575,8 +3133,10 @@
},
{
"BriefDescription": "Request Queue Retries; BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x10",
@@ -2584,8 +3144,10 @@
},
{
"BriefDescription": "Request Queue Retries; BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x20",
@@ -2593,8 +3155,10 @@
},
{
"BriefDescription": "Request Queue Retries; BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x4",
@@ -2602,8 +3166,10 @@
},
{
"BriefDescription": "Request Queue Retries; BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x8",
@@ -2611,8 +3177,10 @@
},
{
"BriefDescription": "Request Queue Retries; Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x80",
@@ -2620,8 +3188,10 @@
},
{
"BriefDescription": "Request Queue Retries; Allow Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x40",
@@ -2629,8 +3199,10 @@
},
{
"BriefDescription": "Request Queue Retries; ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x1",
@@ -2638,8 +3210,10 @@
},
{
"BriefDescription": "Request Queue Retries; HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.HA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x2",
@@ -2647,8 +3221,10 @@
},
{
"BriefDescription": "Request Queue Retries; Merging these two together to make room for ANY_REJECT_*0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x20",
@@ -2656,8 +3232,10 @@
},
{
"BriefDescription": "Request Queue Retries; LLC Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x4",
@@ -2665,8 +3243,10 @@
},
{
"BriefDescription": "Request Queue Retries; PhyAddr Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x80",
@@ -2674,8 +3254,10 @@
},
{
"BriefDescription": "Request Queue Retries; SF Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x8",
@@ -2683,8 +3265,10 @@
},
{
"BriefDescription": "Request Queue Retries; Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x10",
@@ -2692,8 +3276,10 @@
},
{
"BriefDescription": "RRQ Rejects; AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_CHA_RxC_RRQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x1",
@@ -2701,8 +3287,10 @@
},
{
"BriefDescription": "RRQ Rejects; AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_CHA_RxC_RRQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x2",
@@ -2710,8 +3298,10 @@
},
{
"BriefDescription": "RRQ Rejects; Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_CHA_RxC_RRQ0_REJECT.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x40",
@@ -2719,8 +3309,10 @@
},
{
"BriefDescription": "RRQ Rejects; BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x10",
@@ -2728,8 +3320,10 @@
},
{
"BriefDescription": "RRQ Rejects; BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x20",
@@ -2737,8 +3331,10 @@
},
{
"BriefDescription": "RRQ Rejects; BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x4",
@@ -2746,8 +3342,10 @@
},
{
"BriefDescription": "RRQ Rejects; BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x8",
@@ -2755,8 +3353,10 @@
},
{
"BriefDescription": "RRQ Rejects; Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_CHA_RxC_RRQ0_REJECT.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x80",
@@ -2764,8 +3364,10 @@
},
{
"BriefDescription": "RRQ Rejects; Allow Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_CHA_RxC_RRQ1_REJECT.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x40",
@@ -2773,8 +3375,10 @@
},
{
"BriefDescription": "RRQ Rejects; ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_CHA_RxC_RRQ1_REJECT.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x1",
@@ -2782,8 +3386,10 @@
},
{
"BriefDescription": "RRQ Rejects; HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_CHA_RxC_RRQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x2",
@@ -2791,8 +3397,10 @@
},
{
"BriefDescription": "RRQ Rejects; Merging these two together to make room for ANY_REJECT_*0",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_CHA_RxC_RRQ1_REJECT.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x20",
@@ -2800,8 +3408,10 @@
},
{
"BriefDescription": "RRQ Rejects; LLC Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_CHA_RxC_RRQ1_REJECT.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x4",
@@ -2809,8 +3419,10 @@
},
{
"BriefDescription": "RRQ Rejects; PhyAddr Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_CHA_RxC_RRQ1_REJECT.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x80",
@@ -2818,8 +3430,10 @@
},
{
"BriefDescription": "RRQ Rejects; SF Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_CHA_RxC_RRQ1_REJECT.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x8",
@@ -2827,8 +3441,10 @@
},
{
"BriefDescription": "RRQ Rejects; Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_CHA_RxC_RRQ1_REJECT.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x10",
@@ -2836,8 +3452,10 @@
},
{
"BriefDescription": "WBQ Rejects; AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_CHA_RxC_WBQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x1",
@@ -2845,8 +3463,10 @@
},
{
"BriefDescription": "WBQ Rejects; AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_CHA_RxC_WBQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x2",
@@ -2854,8 +3474,10 @@
},
{
"BriefDescription": "WBQ Rejects; Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_CHA_RxC_WBQ0_REJECT.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x40",
@@ -2863,8 +3485,10 @@
},
{
"BriefDescription": "WBQ Rejects; BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x10",
@@ -2872,8 +3496,10 @@
},
{
"BriefDescription": "WBQ Rejects; BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x20",
@@ -2881,8 +3507,10 @@
},
{
"BriefDescription": "WBQ Rejects; BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x4",
@@ -2890,8 +3518,10 @@
},
{
"BriefDescription": "WBQ Rejects; BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x8",
@@ -2899,8 +3529,10 @@
},
{
"BriefDescription": "WBQ Rejects; Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_CHA_RxC_WBQ0_REJECT.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x80",
@@ -2908,8 +3540,10 @@
},
{
"BriefDescription": "WBQ Rejects; Allow Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_CHA_RxC_WBQ1_REJECT.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x40",
@@ -2917,8 +3551,10 @@
},
{
"BriefDescription": "WBQ Rejects; ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_CHA_RxC_WBQ1_REJECT.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x1",
@@ -2926,8 +3562,10 @@
},
{
"BriefDescription": "WBQ Rejects; HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_CHA_RxC_WBQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x2",
@@ -2935,8 +3573,10 @@
},
{
"BriefDescription": "WBQ Rejects; Merging these two together to make room for ANY_REJECT_*0",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_CHA_RxC_WBQ1_REJECT.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x20",
@@ -2944,8 +3584,10 @@
},
{
"BriefDescription": "WBQ Rejects; LLC Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_CHA_RxC_WBQ1_REJECT.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x4",
@@ -2953,8 +3595,10 @@
},
{
"BriefDescription": "WBQ Rejects; PhyAddr Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_CHA_RxC_WBQ1_REJECT.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x80",
@@ -2962,8 +3606,10 @@
},
{
"BriefDescription": "WBQ Rejects; SF Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_CHA_RxC_WBQ1_REJECT.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x8",
@@ -2971,8 +3617,10 @@
},
{
"BriefDescription": "WBQ Rejects; Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_CHA_RxC_WBQ1_REJECT.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x10",
@@ -2980,8 +3628,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_CHA_RxR_BUSY_STARVED.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x1",
@@ -2989,8 +3639,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_CHA_RxR_BUSY_STARVED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x10",
@@ -2998,8 +3650,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_CHA_RxR_BUSY_STARVED.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x4",
@@ -3007,8 +3661,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_CHA_RxR_BUSY_STARVED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x40",
@@ -3016,8 +3672,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_CHA_RxR_BYPASS.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the CMS Ingress",
"UMask": "0x1",
@@ -3025,8 +3683,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_CHA_RxR_BYPASS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the CMS Ingress",
"UMask": "0x10",
@@ -3034,8 +3694,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_CHA_RxR_BYPASS.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the CMS Ingress",
"UMask": "0x2",
@@ -3043,8 +3705,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_CHA_RxR_BYPASS.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the CMS Ingress",
"UMask": "0x4",
@@ -3052,8 +3716,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_CHA_RxR_BYPASS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the CMS Ingress",
"UMask": "0x40",
@@ -3061,8 +3727,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_CHA_RxR_BYPASS.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the CMS Ingress",
"UMask": "0x8",
@@ -3070,8 +3738,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_CHA_RxR_CRD_STARVED.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x1",
@@ -3079,8 +3749,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_CHA_RxR_CRD_STARVED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x10",
@@ -3088,8 +3760,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_CHA_RxR_CRD_STARVED.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x2",
@@ -3097,8 +3771,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_CHA_RxR_CRD_STARVED.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x4",
@@ -3106,8 +3782,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_CHA_RxR_CRD_STARVED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x40",
@@ -3115,8 +3793,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; IFV - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_CHA_RxR_CRD_STARVED.IFV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x80",
@@ -3124,8 +3804,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_CHA_RxR_CRD_STARVED.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x8",
@@ -3133,8 +3815,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_CHA_RxR_INSERTS.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x1",
@@ -3142,8 +3826,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_CHA_RxR_INSERTS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x10",
@@ -3151,8 +3837,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_CHA_RxR_INSERTS.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x2",
@@ -3160,8 +3848,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_CHA_RxR_INSERTS.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x4",
@@ -3169,8 +3859,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_CHA_RxR_INSERTS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x40",
@@ -3178,8 +3870,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_CHA_RxR_INSERTS.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x8",
@@ -3187,8 +3881,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_CHA_RxR_OCCUPANCY.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x1",
@@ -3196,8 +3892,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_CHA_RxR_OCCUPANCY.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x10",
@@ -3205,8 +3903,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_CHA_RxR_OCCUPANCY.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x2",
@@ -3214,8 +3914,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_CHA_RxR_OCCUPANCY.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x4",
@@ -3223,8 +3925,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_CHA_RxR_OCCUPANCY.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x40",
@@ -3232,8 +3936,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_CHA_RxR_OCCUPANCY.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x8",
@@ -3241,6 +3947,7 @@
},
{
"BriefDescription": "Snoop filter capacity evictions for E-state entries.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3D",
"EventName": "UNC_CHA_SF_EVICTION.E_STATE",
"PerPkg": "1",
@@ -3250,6 +3957,7 @@
},
{
"BriefDescription": "Snoop filter capacity evictions for M-state entries.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3D",
"EventName": "UNC_CHA_SF_EVICTION.M_STATE",
"PerPkg": "1",
@@ -3259,6 +3967,7 @@
},
{
"BriefDescription": "Snoop filter capacity evictions for S-state entries.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3D",
"EventName": "UNC_CHA_SF_EVICTION.S_STATE",
"PerPkg": "1",
@@ -3268,8 +3977,10 @@
},
{
"BriefDescription": "Snoops Sent; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_CHA_SNOOPS_SENT.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of snoops issued by the HA.",
"UMask": "0x1",
@@ -3277,8 +3988,10 @@
},
{
"BriefDescription": "Snoops Sent; Broadcast snoop for Local Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_CHA_SNOOPS_SENT.BCST_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of snoops issued by the HA.; Counts the number of broadcast snoops issued by the HA. This filter includes only requests coming from local sockets.",
"UMask": "0x10",
@@ -3286,8 +3999,10 @@
},
{
"BriefDescription": "Snoops Sent; Broadcast snoops for Remote Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_CHA_SNOOPS_SENT.BCST_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of snoops issued by the HA.; Counts the number of broadcast snoops issued by the HA.This filter includes only requests coming from remote sockets.",
"UMask": "0x20",
@@ -3295,8 +4010,10 @@
},
{
"BriefDescription": "Snoops Sent; Directed snoops for Local Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_CHA_SNOOPS_SENT.DIRECT_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of snoops issued by the HA.; Counts the number of directed snoops issued by the HA. This filter includes only requests coming from local sockets.",
"UMask": "0x40",
@@ -3304,8 +4021,10 @@
},
{
"BriefDescription": "Snoops Sent; Directed snoops for Remote Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_CHA_SNOOPS_SENT.DIRECT_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of snoops issued by the HA.; Counts the number of directed snoops issued by the HA. This filter includes only requests coming from remote sockets.",
"UMask": "0x80",
@@ -3313,8 +4032,10 @@
},
{
"BriefDescription": "Snoops Sent; Broadcast or directed Snoops sent for Local Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_CHA_SNOOPS_SENT.LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of snoops issued by the HA.; Counts the number of broadcast or directed snoops issued by the HA per request. This filter includes only requests coming from the local socket.",
"UMask": "0x4",
@@ -3322,8 +4043,10 @@
},
{
"BriefDescription": "Snoops Sent; Broadcast or directed Snoops sent for Remote Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_CHA_SNOOPS_SENT.REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of snoops issued by the HA.; Counts the number of broadcast or directed snoops issued by the HA per request. This filter includes only requests coming from the remote socket.",
"UMask": "0x8",
@@ -3331,6 +4054,7 @@
},
{
"BriefDescription": "RspCnflct* Snoop Responses Received",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "UNC_CHA_SNOOP_RESP.RSPCNFLCTS",
"PerPkg": "1",
@@ -3340,8 +4064,10 @@
},
{
"BriefDescription": "Snoop Responses Received; RspFwd",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "UNC_CHA_SNOOP_RESP.RSPFWD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for a snoop response of RspFwd to a CA request. This snoop response is only possible for RdCur when a snoop HITM/E in a remote caching agent and it directly forwards data to a requestor without changing the requestor's cache line state.",
"UMask": "0x80",
@@ -3349,6 +4075,7 @@
},
{
"BriefDescription": "RspI Snoop Responses Received",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "UNC_CHA_SNOOP_RESP.RSPI",
"PerPkg": "1",
@@ -3358,6 +4085,7 @@
},
{
"BriefDescription": "RspIFwd Snoop Responses Received",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "UNC_CHA_SNOOP_RESP.RSPIFWD",
"PerPkg": "1",
@@ -3367,8 +4095,10 @@
},
{
"BriefDescription": "Snoop Responses Received : RspS",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "UNC_CHA_SNOOP_RESP.RSPS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received : RspS : Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1. : Filters for snoop responses of RspS. RspS is returned when a remote cache has data but is not forwarding it. It is a way to let the requesting socket know that it cannot allocate the data in E state. No data is sent with S RspS.",
"UMask": "0x2",
@@ -3376,6 +4106,7 @@
},
{
"BriefDescription": "RspSFwd Snoop Responses Received",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "UNC_CHA_SNOOP_RESP.RSPSFWD",
"PerPkg": "1",
@@ -3385,6 +4116,7 @@
},
{
"BriefDescription": "Rsp*Fwd*WB Snoop Responses Received",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "UNC_CHA_SNOOP_RESP.RSP_FWD_WB",
"PerPkg": "1",
@@ -3394,6 +4126,7 @@
},
{
"BriefDescription": "Rsp*WB Snoop Responses Received",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "UNC_CHA_SNOOP_RESP.RSP_WBWB",
"PerPkg": "1",
@@ -3403,8 +4136,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local; RspCnflct",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPCNFLCT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of snoop responses received for a Local request; Filters for snoops responses of RspConflict to local CA requests. This is returned when a snoop finds an existing outstanding transaction in a remote caching agent when it CAMs that caching agent. This triggers conflict resolution hardware. This covers both RspCnflct and RspCnflctWbI.",
"UMask": "0x40",
@@ -3412,8 +4147,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local; RspFwd",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPFWD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of snoop responses received for a Local request; Filters for a snoop response of RspFwd to local CA requests. This snoop response is only possible for RdCur when a snoop HITM/E in a remote caching agent and it directly forwards data to a requestor without changing the requestor's cache line state.",
"UMask": "0x80",
@@ -3421,8 +4158,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local; RspI",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of snoop responses received for a Local request; Filters for snoops responses of RspI to local CA requests. RspI is returned when the remote cache does not have the data, or when the remote cache silently evicts data (such as when an RFO hits non-modified data).",
"UMask": "0x1",
@@ -3430,8 +4169,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local; RspIFwd",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPIFWD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of snoop responses received for a Local request; Filters for snoop responses of RspIFwd to local CA requests. This is returned when a remote caching agent forwards data and the requesting agent is able to acquire the data in E or M states. This is commonly returned with RFO transactions. It can be either a HitM or a HitFE.",
"UMask": "0x4",
@@ -3439,8 +4180,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local; RspS",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of snoop responses received for a Local request; Filters for snoop responses of RspS to local CA requests. RspS is returned when a remote cache has data but is not forwarding it. It is a way to let the requesting socket know that it cannot allocate the data in E state. No data is sent with S RspS.",
"UMask": "0x2",
@@ -3448,8 +4191,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local; RspSFwd",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPSFWD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of snoop responses received for a Local request; Filters for a snoop response of RspSFwd to local CA requests. This is returned when a remote caching agent forwards data but holds on to its current copy. This is common for data and code reads that hit in a remote socket in E or F state.",
"UMask": "0x8",
@@ -3457,8 +4202,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local; Rsp*FWD*WB",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSP_FWD_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of snoop responses received for a Local request; Filters for a snoop response of Rsp*Fwd*WB to local CA requests. This snoop response is only used in 4s systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to the home to be written back to memory.",
"UMask": "0x20",
@@ -3466,8 +4213,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local; Rsp*WB",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSP_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of snoop responses received for a Local request; Filters for a snoop response of RspIWB or RspSWB to local CA requests. This is returned when a non-RFO request hits in M state. Data and Code Reads can return either RspIWB or RspSWB depending on how the system has been configured. InvItoE transactions will also return RspIWB because they must acquire ownership.",
"UMask": "0x10",
@@ -3475,8 +4224,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -3484,8 +4235,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -3493,8 +4246,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -3502,8 +4257,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -3511,8 +4268,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -3520,8 +4279,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -3529,8 +4290,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -3538,8 +4301,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -3547,8 +4312,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -3556,8 +4323,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -3565,8 +4334,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -3574,8 +4345,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -3583,8 +4356,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -3592,8 +4367,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -3601,8 +4378,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -3610,8 +4389,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -3619,8 +4400,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -3628,8 +4411,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -3637,8 +4422,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -3646,8 +4433,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -3655,8 +4444,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -3664,8 +4455,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -3673,8 +4466,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -3682,8 +4477,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -3691,8 +4488,10 @@
},
{
"BriefDescription": "TOR Inserts; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"UMask": "0xff",
@@ -3700,8 +4499,10 @@
},
{
"BriefDescription": "TOR Inserts; Hits from Local",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.ALL_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"UMask": "0x15",
@@ -3709,8 +4510,10 @@
},
{
"BriefDescription": "TOR Inserts; All from Local iA and IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.ALL_IO_IA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.; All locally initiated requests",
"UMask": "0x35",
@@ -3718,8 +4521,10 @@
},
{
"BriefDescription": "TOR Inserts; Misses from Local",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.ALL_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"UMask": "0x25",
@@ -3727,8 +4532,10 @@
},
{
"BriefDescription": "TOR Inserts; SF/LLC Evictions",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.EVICT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.; TOR allocation occurred as a result of SF/LLC evictions (came from the ISMQ)",
"UMask": "0x2",
@@ -3736,8 +4543,10 @@
},
{
"BriefDescription": "TOR Inserts; Hit (Not a Miss)",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.HIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.; HITs (hit is defined to be not a miss [see below], as a result for any request allocated into the TOR, one of either HIT or MISS must be true)",
"UMask": "0x10",
@@ -3745,6 +4554,7 @@
},
{
"BriefDescription": "TOR Inserts; All from Local iA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA",
"PerPkg": "1",
@@ -3754,6 +4564,7 @@
},
{
"BriefDescription": "TOR Inserts; Hits from Local iA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT",
"PerPkg": "1",
@@ -3763,6 +4574,7 @@
},
{
"BriefDescription": "TOR Inserts : CRds issued by iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD",
"Filter": "config1=0x40233",
@@ -3773,6 +4585,7 @@
},
{
"BriefDescription": "TOR Inserts : DRds issued by iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD",
"Filter": "config1=0x40433",
@@ -3783,6 +4596,7 @@
},
{
"BriefDescription": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefCRD",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefCRD",
"Filter": "config1=0x4b233",
@@ -3792,6 +4606,7 @@
},
{
"BriefDescription": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefDRD",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefDRD",
"Filter": "config1=0x4b433",
@@ -3801,6 +4616,7 @@
},
{
"BriefDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefRFO",
"Filter": "config1=0x4b033",
@@ -3811,6 +4627,7 @@
},
{
"BriefDescription": "TOR Inserts : RFOs issued by iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO",
"Filter": "config1=0x40033",
@@ -3821,6 +4638,7 @@
},
{
"BriefDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
"PerPkg": "1",
@@ -3830,6 +4648,7 @@
},
{
"BriefDescription": "TOR Inserts : CRds issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD",
"Filter": "config1=0x40233",
@@ -3840,6 +4659,7 @@
},
{
"BriefDescription": "TOR Inserts : DRds issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD",
"Filter": "config1=0x40433",
@@ -3850,6 +4670,7 @@
},
{
"BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefCRD",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefCRD",
"Filter": "config1=0x4b233",
@@ -3859,6 +4680,7 @@
},
{
"BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefDRD",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefDRD",
"Filter": "config1=0x4b433",
@@ -3868,6 +4690,7 @@
},
{
"BriefDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefRFO",
"Filter": "config1=0x4b033",
@@ -3878,6 +4701,7 @@
},
{
"BriefDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO",
"Filter": "config1=0x40033",
@@ -3888,8 +4712,10 @@
},
{
"BriefDescription": "TOR Inserts; All from Local IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.; All locally generated IO traffic",
"UMask": "0x34",
@@ -3897,6 +4723,7 @@
},
{
"BriefDescription": "TOR Inserts; Hits from Local IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_HIT",
"PerPkg": "1",
@@ -3906,6 +4733,7 @@
},
{
"BriefDescription": "TOR Inserts; Misses from Local IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS",
"PerPkg": "1",
@@ -3915,8 +4743,10 @@
},
{
"BriefDescription": "TOR Inserts; ItoM misses from Local IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM",
+ "Experimental": "1",
"Filter": "config1=0x49033",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that are generated from local IO ItoM requests that miss the LLC. An ItoM request is used by IIO to request a data write without first reading the data for ownership.",
@@ -3925,8 +4755,10 @@
},
{
"BriefDescription": "TOR Inserts; RdCur misses from Local IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_RDCUR",
+ "Experimental": "1",
"Filter": "config1=0x43C33",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that are generated from local IO RdCur requests and miss the LLC. A RdCur request is used by IIO to read data without changing state.",
@@ -3935,8 +4767,10 @@
},
{
"BriefDescription": "TOR Inserts; RFO misses from Local IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_RFO",
+ "Experimental": "1",
"Filter": "config1=0x40033",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that are generated from local IO RFO requests that miss the LLC. A read for ownership (RFO) requests a cache line to be cached in E state with the intent to modify.",
@@ -3945,8 +4779,10 @@
},
{
"BriefDescription": "TOR Inserts; IPQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IPQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"UMask": "0x8",
@@ -3954,26 +4790,32 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IPQ_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x18",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IPQ_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x28",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts; IRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IRQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"UMask": "0x1",
@@ -3981,17 +4823,21 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.LOC_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x37",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts; Miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.; Misses. (a miss is defined to be any transaction from the IRQ, PRQ, RRQ, IPQ or (in the victim case) the ISMQ, that required the CHA to spawn a new UPI/SMI3 request on the UPI fabric (including UPI snoops and/or any RD/WR to a local memory controller, in the event that the CHA is the home node)). Basically, if the LLC/SF/MLC complex were not able to service the request without involving another agent...it is a miss. If only IDI snoops were required, it is not a miss (that means the SF/MLC com",
"UMask": "0x20",
@@ -3999,8 +4845,10 @@
},
{
"BriefDescription": "TOR Inserts; PRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.PRQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"UMask": "0x4",
@@ -4008,6 +4856,7 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.REM_ALL",
@@ -4017,44 +4866,54 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.RRQ_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x50",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.RRQ_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x60",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.WBQ_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x90",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.WBQ_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa0",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : All",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xff",
@@ -4062,8 +4921,10 @@
},
{
"BriefDescription": "TOR Occupancy; All from Local",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.ALL_FROM_LOC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); All remotely generated requests",
"UMask": "0x37",
@@ -4071,8 +4932,10 @@
},
{
"BriefDescription": "TOR Occupancy; Hits from Local",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.ALL_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"UMask": "0x17",
@@ -4080,8 +4943,10 @@
},
{
"BriefDescription": "TOR Occupancy; Misses from Local",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.ALL_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"UMask": "0x27",
@@ -4089,8 +4954,10 @@
},
{
"BriefDescription": "TOR Occupancy; SF/LLC Evictions",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.EVICT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T; TOR allocation occurred as a result of SF/LLC evictions (came from the ISMQ)",
"UMask": "0x2",
@@ -4098,8 +4965,10 @@
},
{
"BriefDescription": "TOR Occupancy; Hit (Not a Miss)",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.HIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T; HITs (hit is defined to be not a miss [see below], as a result for any request allocated into the TOR, one of either HIT or MISS must be true)",
"UMask": "0x10",
@@ -4107,6 +4976,7 @@
},
{
"BriefDescription": "TOR Occupancy; All from Local iA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA",
"PerPkg": "1",
@@ -4116,6 +4986,7 @@
},
{
"BriefDescription": "TOR Occupancy; Hits from Local iA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT",
"PerPkg": "1",
@@ -4125,6 +4996,7 @@
},
{
"BriefDescription": "TOR Occupancy : CRds issued by iA Cores that Hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD",
"Filter": "config1=0x40233",
@@ -4135,6 +5007,7 @@
},
{
"BriefDescription": "TOR Occupancy : DRds issued by iA Cores that Hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD",
"Filter": "config1=0x40433",
@@ -4145,6 +5018,7 @@
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefCRD",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefCRD",
"Filter": "config1=0x4b233",
@@ -4154,6 +5028,7 @@
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefDRD",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefDRD",
"Filter": "config1=0x4b433",
@@ -4163,6 +5038,7 @@
},
{
"BriefDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores that hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefRFO",
"Filter": "config1=0x4b033",
@@ -4173,6 +5049,7 @@
},
{
"BriefDescription": "TOR Occupancy : RFOs issued by iA Cores that Hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO",
"Filter": "config1=0x40033",
@@ -4183,6 +5060,7 @@
},
{
"BriefDescription": "TOR Occupancy; Misses from Local iA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS",
"PerPkg": "1",
@@ -4192,6 +5070,7 @@
},
{
"BriefDescription": "TOR Occupancy : CRds issued by iA Cores that Missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD",
"Filter": "config1=0x40233",
@@ -4202,6 +5081,7 @@
},
{
"BriefDescription": "TOR Occupancy : DRds issued by iA Cores that Missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD",
"Filter": "config1=0x40433",
@@ -4212,6 +5092,7 @@
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefCRD",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefCRD",
"Filter": "config1=0x4b233",
@@ -4221,6 +5102,7 @@
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefDRD",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefDRD",
"Filter": "config1=0x4b433",
@@ -4230,6 +5112,7 @@
},
{
"BriefDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores that missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefRFO",
"Filter": "config1=0x4b033",
@@ -4240,6 +5123,7 @@
},
{
"BriefDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO",
"Filter": "config1=0x40033",
@@ -4250,8 +5134,10 @@
},
{
"BriefDescription": "TOR Occupancy; All from Local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T; All locally generated IO traffic",
"UMask": "0x34",
@@ -4259,8 +5145,10 @@
},
{
"BriefDescription": "TOR Occupancy; Hits from Local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"UMask": "0x14",
@@ -4268,8 +5156,10 @@
},
{
"BriefDescription": "TOR Occupancy; Misses from Local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"UMask": "0x24",
@@ -4277,8 +5167,10 @@
},
{
"BriefDescription": "TOR Occupancy; ITOM Misses from Local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM",
+ "Experimental": "1",
"Filter": "config1=0x49033",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that are generated from local IO ItoM requests that miss the LLC. An ItoM is used by IIO to request a data write without first reading the data for ownership.",
@@ -4287,8 +5179,10 @@
},
{
"BriefDescription": "TOR Occupancy; RDCUR misses from Local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_RDCUR",
+ "Experimental": "1",
"Filter": "config1=0x43C33",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that are generated from local IO RdCur requests that miss the LLC. A RdCur request is used by IIO to read data without changing state.",
@@ -4297,8 +5191,10 @@
},
{
"BriefDescription": "TOR Occupancy; RFO misses from Local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_RFO",
+ "Experimental": "1",
"Filter": "config1=0x40033",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that are generated from local IO RFO requests that miss the LLC. A read for ownership (RFO) requests data to be cached in E state with the intent to modify.",
@@ -4307,8 +5203,10 @@
},
{
"BriefDescription": "TOR Occupancy; IPQ",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IPQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"UMask": "0x8",
@@ -4316,26 +5214,32 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IPQ_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x18",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IPQ_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x28",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy; IRQ",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IRQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"UMask": "0x1",
@@ -4343,17 +5247,21 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.ALL_FROM_LOC",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x37",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy; Miss",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T; Misses. (a miss is defined to be any transaction from the IRQ, PRQ, RRQ, IPQ or (in the victim case) the ISMQ, that required the CHA to spawn a new UPI/SMI3 request on the UPI fabric (including UPI snoops and/or any RD/WR to a local memory controller, in the event that the CHA is the home node)). Basically, if the LLC/SF/MLC complex were not able to service the request without involving another agent...it is a miss. If only IDI snoops were required, it is not a miss (that means the SF/MLC com",
"UMask": "0x20",
@@ -4361,8 +5269,10 @@
},
{
"BriefDescription": "TOR Occupancy; PRQ",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.PRQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"UMask": "0x4",
@@ -4370,8 +5280,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_CHA_TxR_HORZ_ADS_USED.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -4379,8 +5291,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_CHA_TxR_HORZ_ADS_USED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -4388,8 +5302,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_CHA_TxR_HORZ_ADS_USED.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -4397,8 +5313,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_CHA_TxR_HORZ_ADS_USED.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -4406,8 +5324,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_CHA_TxR_HORZ_ADS_USED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -4415,8 +5335,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9F",
"EventName": "UNC_CHA_TxR_HORZ_BYPASS.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -4424,8 +5346,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x9F",
"EventName": "UNC_CHA_TxR_HORZ_BYPASS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -4433,8 +5357,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9F",
"EventName": "UNC_CHA_TxR_HORZ_BYPASS.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -4442,8 +5368,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9F",
"EventName": "UNC_CHA_TxR_HORZ_BYPASS.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -4451,8 +5379,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x9F",
"EventName": "UNC_CHA_TxR_HORZ_BYPASS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -4460,8 +5390,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9F",
"EventName": "UNC_CHA_TxR_HORZ_BYPASS.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x8",
@@ -4469,8 +5401,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -4478,8 +5412,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -4487,8 +5423,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -4496,8 +5434,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -4505,8 +5445,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -4514,8 +5456,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -4523,8 +5467,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -4532,8 +5478,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -4541,8 +5489,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -4550,8 +5500,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -4559,8 +5511,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -4568,8 +5522,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -4577,8 +5533,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_CHA_TxR_HORZ_INSERTS.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -4586,8 +5544,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_CHA_TxR_HORZ_INSERTS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -4595,8 +5555,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_CHA_TxR_HORZ_INSERTS.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -4604,8 +5566,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_CHA_TxR_HORZ_INSERTS.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -4613,8 +5577,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_CHA_TxR_HORZ_INSERTS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -4622,8 +5588,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_CHA_TxR_HORZ_INSERTS.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -4631,8 +5599,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_CHA_TxR_HORZ_NACK.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x1",
@@ -4640,8 +5610,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_CHA_TxR_HORZ_NACK.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x20",
@@ -4649,8 +5621,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_CHA_TxR_HORZ_NACK.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x2",
@@ -4658,8 +5632,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_CHA_TxR_HORZ_NACK.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x4",
@@ -4667,8 +5643,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_CHA_TxR_HORZ_NACK.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x40",
@@ -4676,8 +5654,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_CHA_TxR_HORZ_NACK.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x8",
@@ -4685,8 +5665,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -4694,8 +5676,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -4703,8 +5687,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -4712,8 +5698,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -4721,8 +5709,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -4730,8 +5720,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -4739,8 +5731,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9B",
"EventName": "UNC_CHA_TxR_HORZ_STARVED.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x1",
@@ -4748,8 +5742,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9B",
"EventName": "UNC_CHA_TxR_HORZ_STARVED.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x2",
@@ -4757,8 +5753,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9B",
"EventName": "UNC_CHA_TxR_HORZ_STARVED.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x4",
@@ -4766,8 +5764,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9B",
"EventName": "UNC_CHA_TxR_HORZ_STARVED.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x8",
@@ -4775,8 +5775,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_CHA_TxR_VERT_ADS_USED.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -4784,8 +5786,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_CHA_TxR_VERT_ADS_USED.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -4793,8 +5797,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_CHA_TxR_VERT_ADS_USED.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -4802,8 +5808,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_CHA_TxR_VERT_ADS_USED.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x20",
@@ -4811,8 +5819,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_CHA_TxR_VERT_ADS_USED.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -4820,8 +5830,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_CHA_TxR_VERT_ADS_USED.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -4829,8 +5841,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_CHA_TxR_VERT_BYPASS.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -4838,8 +5852,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_CHA_TxR_VERT_BYPASS.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -4847,8 +5863,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_CHA_TxR_VERT_BYPASS.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -4856,8 +5874,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_CHA_TxR_VERT_BYPASS.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x20",
@@ -4865,8 +5885,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_CHA_TxR_VERT_BYPASS.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -4874,8 +5896,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_CHA_TxR_VERT_BYPASS.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -4883,8 +5907,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_CHA_TxR_VERT_BYPASS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x8",
@@ -4892,8 +5918,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -4901,8 +5929,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -4910,8 +5940,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -4919,8 +5951,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -4928,8 +5962,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -4937,8 +5973,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -4946,8 +5984,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -4955,8 +5995,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -4964,8 +6006,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -4973,8 +6017,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -4982,8 +6028,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -4991,8 +6039,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -5000,8 +6050,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -5009,8 +6061,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -5018,8 +6072,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_CHA_TxR_VERT_INSERTS.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -5027,8 +6083,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_CHA_TxR_VERT_INSERTS.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -5036,8 +6094,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_CHA_TxR_VERT_INSERTS.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -5045,8 +6105,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_CHA_TxR_VERT_INSERTS.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -5054,8 +6116,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_CHA_TxR_VERT_INSERTS.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -5063,8 +6127,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_CHA_TxR_VERT_INSERTS.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -5072,8 +6138,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_CHA_TxR_VERT_INSERTS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -5081,8 +6149,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_CHA_TxR_VERT_NACK.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x1",
@@ -5090,8 +6160,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_CHA_TxR_VERT_NACK.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x10",
@@ -5099,8 +6171,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_CHA_TxR_VERT_NACK.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x2",
@@ -5108,8 +6182,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_CHA_TxR_VERT_NACK.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x20",
@@ -5117,8 +6193,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_CHA_TxR_VERT_NACK.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x4",
@@ -5126,8 +6204,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_CHA_TxR_VERT_NACK.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x40",
@@ -5135,8 +6215,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_CHA_TxR_VERT_NACK.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x8",
@@ -5144,8 +6226,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -5153,8 +6237,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -5162,8 +6248,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -5171,8 +6259,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -5180,8 +6270,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -5189,8 +6281,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -5198,8 +6292,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -5207,8 +6303,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_CHA_TxR_VERT_STARVED.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x1",
@@ -5216,8 +6314,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_CHA_TxR_VERT_STARVED.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x10",
@@ -5225,8 +6325,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_CHA_TxR_VERT_STARVED.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x2",
@@ -5234,8 +6336,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_CHA_TxR_VERT_STARVED.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x20",
@@ -5243,8 +6347,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_CHA_TxR_VERT_STARVED.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x4",
@@ -5252,8 +6358,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_CHA_TxR_VERT_STARVED.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x40",
@@ -5261,8 +6369,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_CHA_TxR_VERT_STARVED.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x8",
@@ -5270,8 +6380,10 @@
},
{
"BriefDescription": "UPI Ingress Credit Allocations; AD REQ Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of UPI credits acquired for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This can be used with the Credit Occupancy event in order to calculate average credit lifetime. This event supports filtering to cover the VNA/VN0 credits and the different message classes. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
"UMask": "0x4",
@@ -5279,8 +6391,10 @@
},
{
"BriefDescription": "UPI Ingress Credit Allocations; AD RSP VN0 Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of UPI credits acquired for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This can be used with the Credit Occupancy event in order to calculate average credit lifetime. This event supports filtering to cover the VNA/VN0 credits and the different message classes. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
"UMask": "0x8",
@@ -5288,8 +6402,10 @@
},
{
"BriefDescription": "UPI Ingress Credit Allocations; BL NCB Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of UPI credits acquired for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This can be used with the Credit Occupancy event in order to calculate average credit lifetime. This event supports filtering to cover the VNA/VN0 credits and the different message classes. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
"UMask": "0x40",
@@ -5297,8 +6413,10 @@
},
{
"BriefDescription": "UPI Ingress Credit Allocations; BL NCS Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of UPI credits acquired for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This can be used with the Credit Occupancy event in order to calculate average credit lifetime. This event supports filtering to cover the VNA/VN0 credits and the different message classes. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
"UMask": "0x80",
@@ -5306,8 +6424,10 @@
},
{
"BriefDescription": "UPI Ingress Credit Allocations; BL RSP Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of UPI credits acquired for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This can be used with the Credit Occupancy event in order to calculate average credit lifetime. This event supports filtering to cover the VNA/VN0 credits and the different message classes. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
"UMask": "0x10",
@@ -5315,8 +6435,10 @@
},
{
"BriefDescription": "UPI Ingress Credit Allocations; BL DRS Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of UPI credits acquired for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This can be used with the Credit Occupancy event in order to calculate average credit lifetime. This event supports filtering to cover the VNA/VN0 credits and the different message classes. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
"UMask": "0x20",
@@ -5324,8 +6446,10 @@
},
{
"BriefDescription": "UPI Ingress Credit Allocations; VN0 Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of UPI credits acquired for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This can be used with the Credit Occupancy event in order to calculate average credit lifetime. This event supports filtering to cover the VNA/VN0 credits and the different message classes. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
"UMask": "0x2",
@@ -5333,8 +6457,10 @@
},
{
"BriefDescription": "UPI Ingress Credit Allocations; VNA Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.VNA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of UPI credits acquired for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This can be used with the Credit Occupancy event in order to calculate average credit lifetime. This event supports filtering to cover the VNA/VN0 credits and the different message classes. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
"UMask": "0x1",
@@ -5342,8 +6468,10 @@
},
{
"BriefDescription": "UPI Ingress Credits In Use Cycles; AD REQ VN0 Credits",
+ "Counter": "0",
"EventCode": "0x3B",
"EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VN0_AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of UPI credits available in each cycle for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This stat increments by the number of credits that are available each cycle. This can be used in conjunction with the Credit Acquired event in order to calculate average credit lifetime. This event supports filtering for the different types of credits that are available. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
"UMask": "0x4",
@@ -5351,8 +6479,10 @@
},
{
"BriefDescription": "UPI Ingress Credits In Use Cycles; AD RSP VN0 Credits",
+ "Counter": "0",
"EventCode": "0x3B",
"EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VN0_AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of UPI credits available in each cycle for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This stat increments by the number of credits that are available each cycle. This can be used in conjunction with the Credit Acquired event in order to calculate average credit lifetime. This event supports filtering for the different types of credits that are available. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
"UMask": "0x8",
@@ -5360,8 +6490,10 @@
},
{
"BriefDescription": "UPI Ingress Credits In Use Cycles; BL NCB VN0 Credits",
+ "Counter": "0",
"EventCode": "0x3B",
"EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VN0_BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of UPI credits available in each cycle for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This stat increments by the number of credits that are available each cycle. This can be used in conjunction with the Credit Acquired event in order to calculate average credit lifetime. This event supports filtering for the different types of credits that are available. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
"UMask": "0x40",
@@ -5369,6 +6501,7 @@
},
{
"BriefDescription": "UPI Ingress Credits In Use Cycles; BL NCS VN0 Credits",
+ "Counter": "0",
"EventCode": "0x3B",
"EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VN0_BL_NCS",
"PerPkg": "1",
@@ -5378,8 +6511,10 @@
},
{
"BriefDescription": "UPI Ingress Credits In Use Cycles; BL RSP VN0 Credits",
+ "Counter": "0",
"EventCode": "0x3B",
"EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VN0_BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of UPI credits available in each cycle for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This stat increments by the number of credits that are available each cycle. This can be used in conjunction with the Credit Acquired event in order to calculate average credit lifetime. This event supports filtering for the different types of credits that are available. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
"UMask": "0x10",
@@ -5387,8 +6522,10 @@
},
{
"BriefDescription": "UPI Ingress Credits In Use Cycles; BL DRS VN0 Credits",
+ "Counter": "0",
"EventCode": "0x3B",
"EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VN0_BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of UPI credits available in each cycle for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This stat increments by the number of credits that are available each cycle. This can be used in conjunction with the Credit Acquired event in order to calculate average credit lifetime. This event supports filtering for the different types of credits that are available. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
"UMask": "0x20",
@@ -5396,8 +6533,10 @@
},
{
"BriefDescription": "UPI Ingress Credits In Use Cycles; AD VNA Credits",
+ "Counter": "0",
"EventCode": "0x3B",
"EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VNA_AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of UPI credits available in each cycle for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This stat increments by the number of credits that are available each cycle. This can be used in conjunction with the Credit Acquired event in order to calculate average credit lifetime. This event supports filtering for the different types of credits that are available. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
"UMask": "0x1",
@@ -5405,8 +6544,10 @@
},
{
"BriefDescription": "UPI Ingress Credits In Use Cycles; BL VNA Credits",
+ "Counter": "0",
"EventCode": "0x3B",
"EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VNA_BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of UPI credits available in each cycle for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This stat increments by the number of credits that are available each cycle. This can be used in conjunction with the Credit Acquired event in order to calculate average credit lifetime. This event supports filtering for the different types of credits that are available. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
"UMask": "0x2",
@@ -5414,8 +6555,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use; Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_CHA_VERT_RING_AD_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -5423,8 +6566,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use; Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_CHA_VERT_RING_AD_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -5432,8 +6577,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use; Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_CHA_VERT_RING_AD_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -5441,8 +6588,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use; Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_CHA_VERT_RING_AD_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -5450,8 +6599,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use; Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xA8",
"EventName": "UNC_CHA_VERT_RING_AK_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -5459,8 +6610,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use; Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xA8",
"EventName": "UNC_CHA_VERT_RING_AK_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -5468,8 +6621,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use; Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xA8",
"EventName": "UNC_CHA_VERT_RING_AK_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -5477,8 +6632,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use; Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xA8",
"EventName": "UNC_CHA_VERT_RING_AK_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -5486,8 +6643,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use; Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_CHA_VERT_RING_BL_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -5495,8 +6654,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use; Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_CHA_VERT_RING_BL_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -5504,8 +6665,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use; Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_CHA_VERT_RING_BL_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -5513,8 +6676,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use; Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_CHA_VERT_RING_BL_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -5522,8 +6687,10 @@
},
{
"BriefDescription": "Vertical IV Ring in Use; Down",
+ "Counter": "0,1,2,3",
"EventCode": "0xAC",
"EventName": "UNC_CHA_VERT_RING_IV_IN_USE.DN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x4",
@@ -5531,8 +6698,10 @@
},
{
"BriefDescription": "Vertical IV Ring in Use; Up",
+ "Counter": "0,1,2,3",
"EventCode": "0xAC",
"EventName": "UNC_CHA_VERT_RING_IV_IN_USE.UP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x1",
@@ -5540,8 +6709,10 @@
},
{
"BriefDescription": "WbPushMtoI; Pushed to LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_CHA_WB_PUSH_MTOI.LLC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times when the CHA was received WbPushMtoI; Counts the number of times when the CHA was able to push WbPushMToI to LLC",
"UMask": "0x1",
@@ -5549,8 +6720,10 @@
},
{
"BriefDescription": "WbPushMtoI; Pushed to Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_CHA_WB_PUSH_MTOI.MEM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times when the CHA was received WbPushMtoI; Counts the number of times when the CHA was unable to push WbPushMToI to LLC (hence pushed it to MEM)",
"UMask": "0x2",
@@ -5558,8 +6731,10 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty; EDC0_SMI2",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.EDC0_SMI2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.; Filter for memory controller 2 only.",
"UMask": "0x4",
@@ -5567,8 +6742,10 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty; EDC1_SMI3",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.EDC1_SMI3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.; Filter for memory controller 3 only.",
"UMask": "0x8",
@@ -5576,8 +6753,10 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty; EDC2_SMI4",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.EDC2_SMI4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.; Filter for memory controller 4 only.",
"UMask": "0x10",
@@ -5585,8 +6764,10 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty; EDC3_SMI5",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.EDC3_SMI5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.; Filter for memory controller 5 only.",
"UMask": "0x20",
@@ -5594,8 +6775,10 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty; MC0_SMI0",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC0_SMI0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.; Filter for memory controller 0 only.",
"UMask": "0x1",
@@ -5603,8 +6786,10 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty; MC1_SMI1",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC1_SMI1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.; Filter for memory controller 1 only.",
"UMask": "0x2",
@@ -5612,8 +6797,10 @@
},
{
"BriefDescription": "Core Cross Snoop Responses; Any RspIFwdFE",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_CHA_XSNP_RESP.ANY_RSPI_FWDFE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Any Request - Response I to Fwd F/E",
"UMask": "0xe4",
@@ -5621,8 +6808,10 @@
},
{
"BriefDescription": "Core Cross Snoop Responses",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_CHA_XSNP_RESP.ANY_RSPI_FWDM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Any Request - Response I to Fwd M",
"UMask": "0xf0",
@@ -5630,8 +6819,10 @@
},
{
"BriefDescription": "Core Cross Snoop Responses; Any RspSFwdFE",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_CHA_XSNP_RESP.ANY_RSPS_FWDFE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Any Request - Response S to Fwd F/E",
"UMask": "0xe2",
@@ -5639,8 +6830,10 @@
},
{
"BriefDescription": "Core Cross Snoop Responses; Any RspSFwdM",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_CHA_XSNP_RESP.ANY_RSPS_FWDM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Any Request - Response S to Fwd M",
"UMask": "0xe8",
@@ -5648,8 +6841,10 @@
},
{
"BriefDescription": "Core Cross Snoop Responses; Any RspHitFSE",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_CHA_XSNP_RESP.ANY_RSP_HITFSE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Any Request - Response any to Hit F/S/E",
"UMask": "0xe1",
@@ -5657,8 +6852,10 @@
},
{
"BriefDescription": "Core Cross Snoop Responses; Core RspIFwdFE",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_CHA_XSNP_RESP.CORE_RSPI_FWDFE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Core Request - Response I to Fwd F/E",
"UMask": "0x44",
@@ -5666,8 +6863,10 @@
},
{
"BriefDescription": "Core Cross Snoop Responses; Core RspIFwdM",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_CHA_XSNP_RESP.CORE_RSPI_FWDM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Core Request - Response I to Fwd M",
"UMask": "0x50",
@@ -5675,8 +6874,10 @@
},
{
"BriefDescription": "Core Cross Snoop Responses; Core RspSFwdFE",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_CHA_XSNP_RESP.CORE_RSPS_FWDFE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Core Request - Response S to Fwd F/E",
"UMask": "0x42",
@@ -5684,8 +6885,10 @@
},
{
"BriefDescription": "Core Cross Snoop Responses; Core RspSFwdM",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_CHA_XSNP_RESP.CORE_RSPS_FWDM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Core Request - Response S to Fwd M",
"UMask": "0x48",
@@ -5693,8 +6896,10 @@
},
{
"BriefDescription": "Core Cross Snoop Responses; Core RspHitFSE",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_CHA_XSNP_RESP.CORE_RSP_HITFSE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Core Request - Response any to Hit F/S/E",
"UMask": "0x41",
@@ -5702,8 +6907,10 @@
},
{
"BriefDescription": "Core Cross Snoop Responses; Evict RspIFwdFE",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_CHA_XSNP_RESP.EVICT_RSPI_FWDFE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Eviction Request - Response I to Fwd F/E",
"UMask": "0x84",
@@ -5711,8 +6918,10 @@
},
{
"BriefDescription": "Core Cross Snoop Responses; Evict RspIFwdM",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_CHA_XSNP_RESP.EVICT_RSPI_FWDM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Eviction Request - Response I to Fwd M",
"UMask": "0x90",
@@ -5720,8 +6929,10 @@
},
{
"BriefDescription": "Core Cross Snoop Responses; Evict RspSFwdFE",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_CHA_XSNP_RESP.EVICT_RSPS_FWDFE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Eviction Request - Response S to Fwd F/E",
"UMask": "0x82",
@@ -5729,8 +6940,10 @@
},
{
"BriefDescription": "Core Cross Snoop Responses; Evict RspSFwdM",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_CHA_XSNP_RESP.EVICT_RSPS_FWDM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Eviction Request - Response S to Fwd M",
"UMask": "0x88",
@@ -5738,8 +6951,10 @@
},
{
"BriefDescription": "Core Cross Snoop Responses; Evict RspHitFSE",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_CHA_XSNP_RESP.EVICT_RSP_HITFSE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Eviction Request - Response any to Hit F/S/E",
"UMask": "0x81",
@@ -5747,8 +6962,10 @@
},
{
"BriefDescription": "Core Cross Snoop Responses; External RspIFwdFE",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_CHA_XSNP_RESP.EXT_RSPI_FWDFE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; External Request - Response I to Fwd F/E",
"UMask": "0x24",
@@ -5756,8 +6973,10 @@
},
{
"BriefDescription": "Core Cross Snoop Responses; External RspIFwdM",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_CHA_XSNP_RESP.EXT_RSPI_FWDM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; External Request - Response I to Fwd M",
"UMask": "0x30",
@@ -5765,8 +6984,10 @@
},
{
"BriefDescription": "Core Cross Snoop Responses; External RspSFwdFE",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_CHA_XSNP_RESP.EXT_RSPS_FWDFE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; External Request - Response S to Fwd F/E",
"UMask": "0x22",
@@ -5774,8 +6995,10 @@
},
{
"BriefDescription": "Core Cross Snoop Responses; External RspSFwdM",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_CHA_XSNP_RESP.EXT_RSPS_FWDM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; External Request - Response S to Fwd M",
"UMask": "0x28",
@@ -5783,8 +7006,10 @@
},
{
"BriefDescription": "Core Cross Snoop Responses; External RspHitFSE",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_CHA_XSNP_RESP.EXT_RSP_HITFSE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; External Request - Response any to Hit F/S/E",
"UMask": "0x21",
@@ -5792,6 +7017,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CLOCKTICKS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventName": "UNC_C_CLOCKTICKS",
"PerPkg": "1",
@@ -5799,6 +7025,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_FAST_ASSERTED.HORZ",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA5",
"EventName": "UNC_C_FAST_ASSERTED",
@@ -5808,15 +7035,18 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.ANY",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.ANY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.DATA_READ",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.DATA_READ",
@@ -5826,24 +7056,29 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.LOCAL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x31",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.REMOTE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x91",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.REMOTE_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.REMOTE_SNOOP",
@@ -5853,15 +7088,18 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.WRITE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.WRITE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_E",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.E_STATE",
@@ -5871,6 +7109,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_F",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.F_STATE",
@@ -5880,15 +7119,18 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.LOCAL_ALL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2f",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_M",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.M_STATE",
@@ -5898,15 +7140,18 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.REMOTE_ALL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_S",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.S_STATE",
@@ -5916,59 +7161,72 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SRC_THRTL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA4",
"EventName": "UNC_C_RING_SRC_THRTL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.EVICT",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.EVICT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.HIT",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.HIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IPQ",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.IPQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.IPQ_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x18",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.IPQ_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x28",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.IRQ",
@@ -5978,6 +7236,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA_HIT",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.IRQ_HIT",
@@ -5987,6 +7246,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.IRQ_MISS",
@@ -5996,51 +7256,62 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.LOC_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x37",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.LOC_IA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x31",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IO",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.LOC_IO",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x34",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.PRQ",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.PRQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IO_HIT",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.PRQ_HIT",
@@ -6050,6 +7321,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IO_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.PRQ_MISS",
@@ -6059,6 +7331,7 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.REM_ALL",
@@ -6068,87 +7341,106 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.RRQ_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x50",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.RRQ_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x60",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.WBQ_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x90",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.WBQ_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa0",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.EVICT",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.EVICT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.HIT",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.HIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IPQ",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.IPQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.IPQ_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x18",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.IPQ_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x28",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IA",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.IRQ",
@@ -6158,6 +7450,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IA_HIT",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.IRQ_HIT",
@@ -6167,6 +7460,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IA_MISS",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.IRQ_MISS",
@@ -6176,608 +7470,743 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.LOC_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x37",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IA",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.LOC_IA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x31",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IO",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.LOC_IO",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x34",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.MISS",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.PRQ",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.PRQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IO_HIT",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.PRQ_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x14",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IO_MISS",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.PRQ_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x24",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x80",
"EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x80",
"EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x80",
"EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x80",
"EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR4",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x80",
"EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR5",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x80",
"EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x82",
"EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x82",
"EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x82",
"EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x82",
"EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR4",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x82",
"EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR5",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x82",
"EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x88",
"EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x88",
"EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x88",
"EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x88",
"EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR4",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x88",
"EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR5",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x88",
"EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x8A",
"EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x8A",
"EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x8A",
"EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x8A",
"EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR4",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x8A",
"EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR5",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x8A",
"EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR4",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR5",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x86",
"EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x86",
"EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x86",
"EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x86",
"EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR4",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x86",
"EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR5",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x86",
"EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x8E",
"EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x8E",
"EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x8E",
"EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x8E",
"EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR4",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x8E",
"EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR5",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x8E",
"EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x8C",
"EventName": "UNC_H_AG1_BL_CREDITS_ACQUIRED.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x8C",
"EventName": "UNC_H_AG1_BL_CREDITS_ACQUIRED.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x8C",
"EventName": "UNC_H_AG1_BL_CREDITS_ACQUIRED.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x8C",
"EventName": "UNC_H_AG1_BL_CREDITS_ACQUIRED.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR4",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x8C",
"EventName": "UNC_H_AG1_BL_CREDITS_ACQUIRED.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR5",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x8C",
"EventName": "UNC_H_AG1_BL_CREDITS_ACQUIRED.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_BYPASS_CHA_IMC.INTERMEDIATE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x57",
"EventName": "UNC_H_BYPASS_CHA_IMC.INTERMEDIATE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_BYPASS_CHA_IMC.NOT_TAKEN",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x57",
"EventName": "UNC_H_BYPASS_CHA_IMC.NOT_TAKEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_BYPASS_CHA_IMC.TAKEN",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x57",
"EventName": "UNC_H_BYPASS_CHA_IMC.TAKEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CMS_CLOCKTICKS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_H_CLOCK",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_PMA.C1_STATE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x17",
"EventName": "UNC_H_CORE_PMA.C1_STATE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_PMA.C1_TRANSITION",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x17",
"EventName": "UNC_H_CORE_PMA.C1_TRANSITION",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_PMA.C6_STATE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x17",
"EventName": "UNC_H_CORE_PMA.C6_STATE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_PMA.C6_TRANSITION",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x17",
"EventName": "UNC_H_CORE_PMA.C6_TRANSITION",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_PMA.GV",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x17",
"EventName": "UNC_H_CORE_PMA.GV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.ANY_GTONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x33",
"EventName": "UNC_H_CORE_SNP.ANY_GTONE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.ANY_ONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x33",
"EventName": "UNC_H_CORE_SNP.ANY_ONE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.ANY_REMOTE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x33",
"EventName": "UNC_H_CORE_SNP.ANY_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.CORE_GTONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x33",
"EventName": "UNC_H_CORE_SNP.CORE_GTONE",
@@ -6787,24 +8216,29 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.CORE_ONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x33",
"EventName": "UNC_H_CORE_SNP.CORE_ONE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x41",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.CORE_REMOTE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x33",
"EventName": "UNC_H_CORE_SNP.CORE_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x44",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.EVICT_GTONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x33",
"EventName": "UNC_H_CORE_SNP.EVICT_GTONE",
@@ -6814,59 +8248,72 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.EVICT_ONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x33",
"EventName": "UNC_H_CORE_SNP.EVICT_ONE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x81",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.EVICT_REMOTE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x33",
"EventName": "UNC_H_CORE_SNP.EVICT_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x84",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.EXT_GTONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x33",
"EventName": "UNC_H_CORE_SNP.EXT_GTONE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x22",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.EXT_ONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x33",
"EventName": "UNC_H_CORE_SNP.EXT_ONE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x21",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.EXT_REMOTE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x33",
"EventName": "UNC_H_CORE_SNP.EXT_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x24",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_COUNTER0_OCCUPANCY",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x1F",
"EventName": "UNC_H_COUNTER0_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_DIR_LOOKUP.NO_SNP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x53",
"EventName": "UNC_H_DIR_LOOKUP.NO_SNP",
@@ -6876,6 +8323,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_DIR_LOOKUP.SNP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x53",
"EventName": "UNC_H_DIR_LOOKUP.SNP",
@@ -6885,6 +8333,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_DIR_UPDATE.HA",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x54",
"EventName": "UNC_H_DIR_UPDATE.HA",
@@ -6894,6 +8343,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_DIR_UPDATE.TOR",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x54",
"EventName": "UNC_H_DIR_UPDATE.TOR",
@@ -6903,24 +8353,29 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xAE",
"EventName": "UNC_H_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xAE",
"EventName": "UNC_H_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_HIT.EX_RDS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5F",
"EventName": "UNC_H_HITME_HIT.EX_RDS",
@@ -6930,411 +8385,502 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_HIT.SHARED_OWNREQ",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5F",
"EventName": "UNC_H_HITME_HIT.SHARED_OWNREQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_HIT.WBMTOE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5F",
"EventName": "UNC_H_HITME_HIT.WBMTOE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_HIT.WBMTOI_OR_S",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5F",
"EventName": "UNC_H_HITME_HIT.WBMTOI_OR_S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_LOOKUP.READ",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5E",
"EventName": "UNC_H_HITME_LOOKUP.READ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_LOOKUP.WRITE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5E",
"EventName": "UNC_H_HITME_LOOKUP.WRITE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_MISS.NOTSHARED_RDINVOWN",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x60",
"EventName": "UNC_H_HITME_MISS.NOTSHARED_RDINVOWN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_MISS.READ_OR_INV",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x60",
"EventName": "UNC_H_HITME_MISS.READ_OR_INV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_MISS.SHARED_RDINVOWN",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x60",
"EventName": "UNC_H_HITME_MISS.SHARED_RDINVOWN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_UPDATE.DEALLOCATE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x61",
"EventName": "UNC_H_HITME_UPDATE.DEALLOCATE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_UPDATE.DEALLOCATE_RSPFWDI_LOC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x61",
"EventName": "UNC_H_HITME_UPDATE.DEALLOCATE_RSPFWDI_LOC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_UPDATE.RDINVOWN",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x61",
"EventName": "UNC_H_HITME_UPDATE.RDINVOWN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_UPDATE.RSPFWDI_REM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x61",
"EventName": "UNC_H_HITME_UPDATE.RSPFWDI_REM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_UPDATE.SHARED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x61",
"EventName": "UNC_H_HITME_UPDATE.SHARED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA7",
"EventName": "UNC_H_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA7",
"EventName": "UNC_H_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA7",
"EventName": "UNC_H_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA7",
"EventName": "UNC_H_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA9",
"EventName": "UNC_H_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA9",
"EventName": "UNC_H_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA9",
"EventName": "UNC_H_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA9",
"EventName": "UNC_H_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xAB",
"EventName": "UNC_H_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xAB",
"EventName": "UNC_H_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xAB",
"EventName": "UNC_H_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xAB",
"EventName": "UNC_H_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_IV_IN_USE.LEFT",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xAD",
"EventName": "UNC_H_HORZ_RING_IV_IN_USE.LEFT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_IV_IN_USE.RIGHT",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xAD",
"EventName": "UNC_H_HORZ_RING_IV_IN_USE.RIGHT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IMC_READS_COUNT.NORMAL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x59",
"EventName": "UNC_H_IMC_READS_COUNT.NORMAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IMC_READS_COUNT.PRIORITY",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x59",
"EventName": "UNC_H_IMC_READS_COUNT.PRIORITY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IMC_WRITES_COUNT.FULL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5B",
"EventName": "UNC_H_IMC_WRITES_COUNT.FULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IMC_WRITES_COUNT.FULL_MIG",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5B",
"EventName": "UNC_H_IMC_WRITES_COUNT.FULL_MIG",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IMC_WRITES_COUNT.FULL_PRIORITY",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5B",
"EventName": "UNC_H_IMC_WRITES_COUNT.FULL_PRIORITY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IMC_WRITES_COUNT.PARTIAL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5B",
"EventName": "UNC_H_IMC_WRITES_COUNT.PARTIAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IMC_WRITES_COUNT.PARTIAL_MIG",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5B",
"EventName": "UNC_H_IMC_WRITES_COUNT.PARTIAL_MIG",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IMC_WRITES_COUNT.PARTIAL_PRIORITY",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5B",
"EventName": "UNC_H_IMC_WRITES_COUNT.PARTIAL_PRIORITY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IODC_ALLOC.INVITOM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x62",
"EventName": "UNC_H_IODC_ALLOC.INVITOM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IODC_ALLOC.IODCFULL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x62",
"EventName": "UNC_H_IODC_ALLOC.IODCFULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IODC_ALLOC.OSBGATED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x62",
"EventName": "UNC_H_IODC_ALLOC.OSBGATED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IODC_DEALLOC.ALL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x63",
"EventName": "UNC_H_IODC_DEALLOC.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IODC_DEALLOC.SNPOUT",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x63",
"EventName": "UNC_H_IODC_DEALLOC.SNPOUT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IODC_DEALLOC.WBMTOE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x63",
"EventName": "UNC_H_IODC_DEALLOC.WBMTOE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IODC_DEALLOC.WBMTOI",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x63",
"EventName": "UNC_H_IODC_DEALLOC.WBMTOI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IODC_DEALLOC.WBPUSHMTOI",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x63",
"EventName": "UNC_H_IODC_DEALLOC.WBPUSHMTOI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_MISC.CV0_PREF_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x39",
"EventName": "UNC_H_MISC.CV0_PREF_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_MISC.CV0_PREF_VIC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x39",
"EventName": "UNC_H_MISC.CV0_PREF_VIC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_MISC.RFO_HIT_S",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x39",
"EventName": "UNC_H_MISC.RFO_HIT_S",
@@ -7344,86 +8890,105 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_MISC.RSPI_WAS_FSE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x39",
"EventName": "UNC_H_MISC.RSPI_WAS_FSE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_MISC.WC_ALIASING",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x39",
"EventName": "UNC_H_MISC.WC_ALIASING",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_OSB",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x55",
"EventName": "UNC_H_OSB",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_READ_NO_CREDITS.EDC0_SMI2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x58",
"EventName": "UNC_H_READ_NO_CREDITS.EDC0_SMI2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_READ_NO_CREDITS.EDC1_SMI3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x58",
"EventName": "UNC_H_READ_NO_CREDITS.EDC1_SMI3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_READ_NO_CREDITS.EDC2_SMI4",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x58",
"EventName": "UNC_H_READ_NO_CREDITS.EDC2_SMI4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_READ_NO_CREDITS.EDC3_SMI5",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x58",
"EventName": "UNC_H_READ_NO_CREDITS.EDC3_SMI5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_READ_NO_CREDITS.MC0_SMI0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x58",
"EventName": "UNC_H_READ_NO_CREDITS.MC0_SMI0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_READ_NO_CREDITS.MC1_SMI1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x58",
"EventName": "UNC_H_READ_NO_CREDITS.MC1_SMI1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_REQUESTS.INVITOE_LOCAL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x50",
"EventName": "UNC_H_REQUESTS.INVITOE_LOCAL",
@@ -7433,6 +8998,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_REQUESTS.INVITOE_REMOTE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x50",
"EventName": "UNC_H_REQUESTS.INVITOE_REMOTE",
@@ -7442,6 +9008,7 @@
},
{
"BriefDescription": "read requests from home agent",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x50",
"EventName": "UNC_H_REQUESTS.READS",
@@ -7451,6 +9018,7 @@
},
{
"BriefDescription": "read requests from local home agent",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x50",
"EventName": "UNC_H_REQUESTS.READS_LOCAL",
@@ -7460,15 +9028,18 @@
},
{
"BriefDescription": "read requests from remote home agent",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x50",
"EventName": "UNC_H_REQUESTS.READS_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "write requests from home agent",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x50",
"EventName": "UNC_H_REQUESTS.WRITES",
@@ -7478,6 +9049,7 @@
},
{
"BriefDescription": "write requests from local home agent",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x50",
"EventName": "UNC_H_REQUESTS.WRITES_LOCAL",
@@ -7487,177 +9059,216 @@
},
{
"BriefDescription": "write requests from remote home agent",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x50",
"EventName": "UNC_H_REQUESTS.WRITES_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_BOUNCES_HORZ.AD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA1",
"EventName": "UNC_H_RING_BOUNCES_HORZ.AD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_BOUNCES_HORZ.AK",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA1",
"EventName": "UNC_H_RING_BOUNCES_HORZ.AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_BOUNCES_HORZ.BL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA1",
"EventName": "UNC_H_RING_BOUNCES_HORZ.BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_BOUNCES_HORZ.IV",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA1",
"EventName": "UNC_H_RING_BOUNCES_HORZ.IV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_BOUNCES_VERT.AD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA0",
"EventName": "UNC_H_RING_BOUNCES_VERT.AD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_BOUNCES_VERT.AK",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA0",
"EventName": "UNC_H_RING_BOUNCES_VERT.AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_BOUNCES_VERT.BL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA0",
"EventName": "UNC_H_RING_BOUNCES_VERT.BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_BOUNCES_VERT.IV",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA0",
"EventName": "UNC_H_RING_BOUNCES_VERT.IV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SINK_STARVED_HORZ.AD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA3",
"EventName": "UNC_H_RING_SINK_STARVED_HORZ.AD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SINK_STARVED_HORZ.AK",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA3",
"EventName": "UNC_H_RING_SINK_STARVED_HORZ.AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SINK_STARVED_HORZ.AK_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA3",
"EventName": "UNC_H_RING_SINK_STARVED_HORZ.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SINK_STARVED_HORZ.BL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA3",
"EventName": "UNC_H_RING_SINK_STARVED_HORZ.BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SINK_STARVED_HORZ.IV",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA3",
"EventName": "UNC_H_RING_SINK_STARVED_HORZ.IV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SINK_STARVED_VERT.AD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA2",
"EventName": "UNC_H_RING_SINK_STARVED_VERT.AD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SINK_STARVED_VERT.AK",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA2",
"EventName": "UNC_H_RING_SINK_STARVED_VERT.AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SINK_STARVED_VERT.BL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA2",
"EventName": "UNC_H_RING_SINK_STARVED_VERT.BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SINK_STARVED_VERT.IV",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA2",
"EventName": "UNC_H_RING_SINK_STARVED_VERT.IV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_INSERTS.IPQ",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x13",
"EventName": "UNC_H_RxC_INSERTS.IPQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_INSERTS.IRQ",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x13",
"EventName": "UNC_H_RxC_INSERTS.IRQ",
@@ -7667,276 +9278,337 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_INSERTS.IRQ_REJ",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x13",
"EventName": "UNC_H_RxC_INSERTS.IRQ_REJ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_INSERTS.PRQ",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x13",
"EventName": "UNC_H_RxC_INSERTS.PRQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_INSERTS.PRQ_REJ",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x13",
"EventName": "UNC_H_RxC_INSERTS.PRQ_REJ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_INSERTS.RRQ",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x13",
"EventName": "UNC_H_RxC_INSERTS.RRQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_INSERTS.WBQ",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x13",
"EventName": "UNC_H_RxC_INSERTS.WBQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ0_REJECT.AD_REQ_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x22",
"EventName": "UNC_H_RxC_IPQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ0_REJECT.AD_RSP_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x22",
"EventName": "UNC_H_RxC_IPQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ0_REJECT.BL_NCB_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x22",
"EventName": "UNC_H_RxC_IPQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ0_REJECT.BL_NCS_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x22",
"EventName": "UNC_H_RxC_IPQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ0_REJECT.BL_RSP_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x22",
"EventName": "UNC_H_RxC_IPQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ0_REJECT.BL_WB_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x22",
"EventName": "UNC_H_RxC_IPQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ1_REJECT.ALLOW_SNP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x23",
"EventName": "UNC_H_RxC_IPQ1_REJECT.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ1_REJECT.ANY0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x23",
"EventName": "UNC_H_RxC_IPQ1_REJECT.ANY_IPQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ1_REJECT.HA",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x23",
"EventName": "UNC_H_RxC_IPQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ1_REJECT.LLC_OR_SF_WAY",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x23",
"EventName": "UNC_H_RxC_IPQ1_REJECT.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ1_REJECT.LLC_VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x23",
"EventName": "UNC_H_RxC_IPQ1_REJECT.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ1_REJECT.PA_MATCH",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x23",
"EventName": "UNC_H_RxC_IPQ1_REJECT.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ1_REJECT.SF_VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x23",
"EventName": "UNC_H_RxC_IPQ1_REJECT.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ1_REJECT.VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x23",
"EventName": "UNC_H_RxC_IPQ1_REJECT.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ0_REJECT.AD_REQ_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x18",
"EventName": "UNC_H_RxC_IRQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ0_REJECT.AD_RSP_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x18",
"EventName": "UNC_H_RxC_IRQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ0_REJECT.BL_NCB_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x18",
"EventName": "UNC_H_RxC_IRQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ0_REJECT.BL_NCS_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x18",
"EventName": "UNC_H_RxC_IRQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ0_REJECT.BL_RSP_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x18",
"EventName": "UNC_H_RxC_IRQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ0_REJECT.BL_WB_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x18",
"EventName": "UNC_H_RxC_IRQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ1_REJECT.ALLOW_SNP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x19",
"EventName": "UNC_H_RxC_IRQ1_REJECT.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ1_REJECT.ANY0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x19",
"EventName": "UNC_H_RxC_IRQ1_REJECT.ANY_REJECT_IRQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ1_REJECT.HA",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x19",
"EventName": "UNC_H_RxC_IRQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ1_REJECT.LLC_OR_SF_WAY",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x19",
"EventName": "UNC_H_RxC_IRQ1_REJECT.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ1_REJECT.LLC_VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x19",
"EventName": "UNC_H_RxC_IRQ1_REJECT.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ1_REJECT.PA_MATCH",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x19",
"EventName": "UNC_H_RxC_IRQ1_REJECT.PA_MATCH",
@@ -7946,177 +9618,216 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ1_REJECT.SF_VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x19",
"EventName": "UNC_H_RxC_IRQ1_REJECT.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ1_REJECT.VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x19",
"EventName": "UNC_H_RxC_IRQ1_REJECT.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_REJECT.AD_REQ_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x24",
"EventName": "UNC_H_RxC_ISMQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_REJECT.AD_RSP_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x24",
"EventName": "UNC_H_RxC_ISMQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_REJECT.BL_NCB_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x24",
"EventName": "UNC_H_RxC_ISMQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_REJECT.BL_NCS_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x24",
"EventName": "UNC_H_RxC_ISMQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_REJECT.BL_RSP_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x24",
"EventName": "UNC_H_RxC_ISMQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_REJECT.BL_WB_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x24",
"EventName": "UNC_H_RxC_ISMQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_RETRY.AD_REQ_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2C",
"EventName": "UNC_H_RxC_ISMQ0_RETRY.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_RETRY.AD_RSP_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2C",
"EventName": "UNC_H_RxC_ISMQ0_RETRY.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_RETRY.BL_NCB_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2C",
"EventName": "UNC_H_RxC_ISMQ0_RETRY.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_RETRY.BL_NCS_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2C",
"EventName": "UNC_H_RxC_ISMQ0_RETRY.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_RETRY.BL_RSP_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2C",
"EventName": "UNC_H_RxC_ISMQ0_RETRY.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_RETRY.BL_WB_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2C",
"EventName": "UNC_H_RxC_ISMQ0_RETRY.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ1_REJECT.ANY0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x25",
"EventName": "UNC_H_RxC_ISMQ1_REJECT.ANY_ISMQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ1_REJECT.HA",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x25",
"EventName": "UNC_H_RxC_ISMQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ1_RETRY.ANY0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2D",
"EventName": "UNC_H_RxC_ISMQ1_RETRY.ANY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ1_RETRY.HA",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2D",
"EventName": "UNC_H_RxC_ISMQ1_RETRY.HA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OCCUPANCY.IPQ",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x11",
"EventName": "UNC_H_RxC_OCCUPANCY.IPQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OCCUPANCY.IRQ",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x11",
"EventName": "UNC_H_RxC_OCCUPANCY.IRQ",
@@ -8126,1005 +9837,1228 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OCCUPANCY.RRQ",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x11",
"EventName": "UNC_H_RxC_OCCUPANCY.RRQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OCCUPANCY.WBQ",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x11",
"EventName": "UNC_H_RxC_OCCUPANCY.WBQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER0_RETRY.AD_REQ_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2E",
"EventName": "UNC_H_RxC_OTHER0_RETRY.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER0_RETRY.AD_RSP_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2E",
"EventName": "UNC_H_RxC_OTHER0_RETRY.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER0_RETRY.BL_NCB_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2E",
"EventName": "UNC_H_RxC_OTHER0_RETRY.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER0_RETRY.BL_NCS_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2E",
"EventName": "UNC_H_RxC_OTHER0_RETRY.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER0_RETRY.BL_RSP_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2E",
"EventName": "UNC_H_RxC_OTHER0_RETRY.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER0_RETRY.BL_WB_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2E",
"EventName": "UNC_H_RxC_OTHER0_RETRY.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER1_RETRY.ALLOW_SNP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2F",
"EventName": "UNC_H_RxC_OTHER1_RETRY.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER1_RETRY.ANY0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2F",
"EventName": "UNC_H_RxC_OTHER1_RETRY.ANY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER1_RETRY.HA",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2F",
"EventName": "UNC_H_RxC_OTHER1_RETRY.HA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER1_RETRY.LLC_OR_SF_WAY",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2F",
"EventName": "UNC_H_RxC_OTHER1_RETRY.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER1_RETRY.LLC_VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2F",
"EventName": "UNC_H_RxC_OTHER1_RETRY.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER1_RETRY.PA_MATCH",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2F",
"EventName": "UNC_H_RxC_OTHER1_RETRY.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER1_RETRY.SF_VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2F",
"EventName": "UNC_H_RxC_OTHER1_RETRY.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER1_RETRY.VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2F",
"EventName": "UNC_H_RxC_OTHER1_RETRY.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ0_REJECT.AD_REQ_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x20",
"EventName": "UNC_H_RxC_PRQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ0_REJECT.AD_RSP_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x20",
"EventName": "UNC_H_RxC_PRQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ0_REJECT.BL_NCB_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x20",
"EventName": "UNC_H_RxC_PRQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ0_REJECT.BL_NCS_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x20",
"EventName": "UNC_H_RxC_PRQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ0_REJECT.BL_RSP_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x20",
"EventName": "UNC_H_RxC_PRQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ0_REJECT.BL_WB_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x20",
"EventName": "UNC_H_RxC_PRQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ1_REJECT.ALLOW_SNP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x21",
"EventName": "UNC_H_RxC_PRQ1_REJECT.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ1_REJECT.ANY0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x21",
"EventName": "UNC_H_RxC_PRQ1_REJECT.ANY_PRQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ1_REJECT.HA",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x21",
"EventName": "UNC_H_RxC_PRQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ1_REJECT.LLC_OR_SF_WAY",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x21",
"EventName": "UNC_H_RxC_PRQ1_REJECT.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ1_REJECT.LLC_VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x21",
"EventName": "UNC_H_RxC_PRQ1_REJECT.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ1_REJECT.PA_MATCH",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x21",
"EventName": "UNC_H_RxC_PRQ1_REJECT.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ1_REJECT.SF_VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x21",
"EventName": "UNC_H_RxC_PRQ1_REJECT.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ1_REJECT.VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x21",
"EventName": "UNC_H_RxC_PRQ1_REJECT.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q0_RETRY.AD_REQ_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2A",
"EventName": "UNC_H_RxC_REQ_Q0_RETRY.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q0_RETRY.AD_RSP_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2A",
"EventName": "UNC_H_RxC_REQ_Q0_RETRY.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q0_RETRY.BL_NCB_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2A",
"EventName": "UNC_H_RxC_REQ_Q0_RETRY.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q0_RETRY.BL_NCS_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2A",
"EventName": "UNC_H_RxC_REQ_Q0_RETRY.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q0_RETRY.BL_RSP_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2A",
"EventName": "UNC_H_RxC_REQ_Q0_RETRY.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q0_RETRY.BL_WB_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2A",
"EventName": "UNC_H_RxC_REQ_Q0_RETRY.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q1_RETRY.ALLOW_SNP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2B",
"EventName": "UNC_H_RxC_REQ_Q1_RETRY.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q1_RETRY.ANY0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2B",
"EventName": "UNC_H_RxC_REQ_Q1_RETRY.ANY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q1_RETRY.HA",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2B",
"EventName": "UNC_H_RxC_REQ_Q1_RETRY.HA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q1_RETRY.LLC_OR_SF_WAY",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2B",
"EventName": "UNC_H_RxC_REQ_Q1_RETRY.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q1_RETRY.LLC_VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2B",
"EventName": "UNC_H_RxC_REQ_Q1_RETRY.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q1_RETRY.PA_MATCH",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2B",
"EventName": "UNC_H_RxC_REQ_Q1_RETRY.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q1_RETRY.SF_VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2B",
"EventName": "UNC_H_RxC_REQ_Q1_RETRY.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q1_RETRY.VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2B",
"EventName": "UNC_H_RxC_REQ_Q1_RETRY.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ0_REJECT.AD_REQ_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x26",
"EventName": "UNC_H_RxC_RRQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ0_REJECT.AD_RSP_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x26",
"EventName": "UNC_H_RxC_RRQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ0_REJECT.BL_NCB_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x26",
"EventName": "UNC_H_RxC_RRQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ0_REJECT.BL_NCS_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x26",
"EventName": "UNC_H_RxC_RRQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ0_REJECT.BL_RSP_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x26",
"EventName": "UNC_H_RxC_RRQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ0_REJECT.BL_WB_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x26",
"EventName": "UNC_H_RxC_RRQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ1_REJECT.ALLOW_SNP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x27",
"EventName": "UNC_H_RxC_RRQ1_REJECT.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ1_REJECT.ANY0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x27",
"EventName": "UNC_H_RxC_RRQ1_REJECT.ANY_RRQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ1_REJECT.HA",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x27",
"EventName": "UNC_H_RxC_RRQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ1_REJECT.LLC_OR_SF_WAY",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x27",
"EventName": "UNC_H_RxC_RRQ1_REJECT.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ1_REJECT.LLC_VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x27",
"EventName": "UNC_H_RxC_RRQ1_REJECT.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ1_REJECT.PA_MATCH",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x27",
"EventName": "UNC_H_RxC_RRQ1_REJECT.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ1_REJECT.SF_VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x27",
"EventName": "UNC_H_RxC_RRQ1_REJECT.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ1_REJECT.VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x27",
"EventName": "UNC_H_RxC_RRQ1_REJECT.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ0_REJECT.AD_REQ_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x28",
"EventName": "UNC_H_RxC_WBQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ0_REJECT.AD_RSP_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x28",
"EventName": "UNC_H_RxC_WBQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ0_REJECT.BL_NCB_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x28",
"EventName": "UNC_H_RxC_WBQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ0_REJECT.BL_NCS_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x28",
"EventName": "UNC_H_RxC_WBQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ0_REJECT.BL_RSP_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x28",
"EventName": "UNC_H_RxC_WBQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ0_REJECT.BL_WB_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x28",
"EventName": "UNC_H_RxC_WBQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ1_REJECT.ALLOW_SNP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x29",
"EventName": "UNC_H_RxC_WBQ1_REJECT.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ1_REJECT.ANY0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x29",
"EventName": "UNC_H_RxC_WBQ1_REJECT.ANY_WBQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ1_REJECT.HA",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x29",
"EventName": "UNC_H_RxC_WBQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ1_REJECT.LLC_OR_SF_WAY",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x29",
"EventName": "UNC_H_RxC_WBQ1_REJECT.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ1_REJECT.LLC_VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x29",
"EventName": "UNC_H_RxC_WBQ1_REJECT.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ1_REJECT.PA_MATCH",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x29",
"EventName": "UNC_H_RxC_WBQ1_REJECT.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ1_REJECT.SF_VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x29",
"EventName": "UNC_H_RxC_WBQ1_REJECT.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ1_REJECT.VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x29",
"EventName": "UNC_H_RxC_WBQ1_REJECT.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_BUSY_STARVED.AD_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB4",
"EventName": "UNC_H_RxR_BUSY_STARVED.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_BUSY_STARVED.AD_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB4",
"EventName": "UNC_H_RxR_BUSY_STARVED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_BUSY_STARVED.BL_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB4",
"EventName": "UNC_H_RxR_BUSY_STARVED.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_BUSY_STARVED.BL_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB4",
"EventName": "UNC_H_RxR_BUSY_STARVED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_BYPASS.AD_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB2",
"EventName": "UNC_H_RxR_BYPASS.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_BYPASS.AD_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB2",
"EventName": "UNC_H_RxR_BYPASS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_BYPASS.AK_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB2",
"EventName": "UNC_H_RxR_BYPASS.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_BYPASS.BL_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB2",
"EventName": "UNC_H_RxR_BYPASS.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_BYPASS.BL_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB2",
"EventName": "UNC_H_RxR_BYPASS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_BYPASS.IV_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB2",
"EventName": "UNC_H_RxR_BYPASS.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_CRD_STARVED.AD_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB3",
"EventName": "UNC_H_RxR_CRD_STARVED.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_CRD_STARVED.AD_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB3",
"EventName": "UNC_H_RxR_CRD_STARVED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_CRD_STARVED.AK_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB3",
"EventName": "UNC_H_RxR_CRD_STARVED.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_CRD_STARVED.BL_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB3",
"EventName": "UNC_H_RxR_CRD_STARVED.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_CRD_STARVED.BL_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB3",
"EventName": "UNC_H_RxR_CRD_STARVED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_CRD_STARVED.IFV",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB3",
"EventName": "UNC_H_RxR_CRD_STARVED.IFV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_CRD_STARVED.IV_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB3",
"EventName": "UNC_H_RxR_CRD_STARVED.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_INSERTS.AD_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB1",
"EventName": "UNC_H_RxR_INSERTS.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_INSERTS.AD_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB1",
"EventName": "UNC_H_RxR_INSERTS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_INSERTS.AK_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB1",
"EventName": "UNC_H_RxR_INSERTS.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_INSERTS.BL_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB1",
"EventName": "UNC_H_RxR_INSERTS.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_INSERTS.BL_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB1",
"EventName": "UNC_H_RxR_INSERTS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_INSERTS.IV_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB1",
"EventName": "UNC_H_RxR_INSERTS.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_OCCUPANCY.AD_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB0",
"EventName": "UNC_H_RxR_OCCUPANCY.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_OCCUPANCY.AD_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB0",
"EventName": "UNC_H_RxR_OCCUPANCY.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_OCCUPANCY.AK_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB0",
"EventName": "UNC_H_RxR_OCCUPANCY.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_OCCUPANCY.BL_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB0",
"EventName": "UNC_H_RxR_OCCUPANCY.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_OCCUPANCY.BL_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB0",
"EventName": "UNC_H_RxR_OCCUPANCY.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_OCCUPANCY.IV_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB0",
"EventName": "UNC_H_RxR_OCCUPANCY.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SF_EVICTION.E_STATE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x3D",
"EventName": "UNC_H_SF_EVICTION.E_STATE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SF_EVICTION.M_STATE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x3D",
"EventName": "UNC_H_SF_EVICTION.M_STATE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SF_EVICTION.S_STATE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x3D",
"EventName": "UNC_H_SF_EVICTION.S_STATE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOPS_SENT.ALL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x51",
"EventName": "UNC_H_SNOOPS_SENT.",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOPS_SENT.BCST_LOCAL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x51",
"EventName": "UNC_H_SNOOPS_SENT.BCST_LOC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOPS_SENT.BCST_REMOTE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x51",
"EventName": "UNC_H_SNOOPS_SENT.BCST_REM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOPS_SENT.DIRECT_LOCAL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x51",
"EventName": "UNC_H_SNOOPS_SENT.DIRECT_LOC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOPS_SENT.DIRECT_REMOTE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x51",
"EventName": "UNC_H_SNOOPS_SENT.DIRECT_REM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOPS_SENT.LOCAL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x51",
"EventName": "UNC_H_SNOOPS_SENT.LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOPS_SENT.REMOTE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x51",
"EventName": "UNC_H_SNOOPS_SENT.REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSPCNFLCTS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5C",
"EventName": "UNC_H_SNOOP_RESP.RSPCNFLCT",
@@ -9134,24 +11068,29 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSPFWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5C",
"EventName": "UNC_H_SNOOP_RESP.RSPFWD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSPI",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5C",
"EventName": "UNC_H_SNOOP_RESP.RSPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSPIFWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5C",
"EventName": "UNC_H_SNOOP_RESP.RSPIFWD",
@@ -9161,15 +11100,18 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSPS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5C",
"EventName": "UNC_H_SNOOP_RESP.RSPS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSPSFWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5C",
"EventName": "UNC_H_SNOOP_RESP.RSPSFWD",
@@ -9179,6 +11121,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSP_FWD_WB",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5C",
"EventName": "UNC_H_SNOOP_RESP.RSP_FWD_WB",
@@ -9188,1575 +11131,1925 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSP_WBWB",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5C",
"EventName": "UNC_H_SNOOP_RESP.RSP_WB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSPCNFLCT",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5D",
"EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSPCNFLCT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSPFWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5D",
"EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSPFWD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSPI",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5D",
"EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSPIFWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5D",
"EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSPIFWD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSPS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5D",
"EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSPS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSPSFWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5D",
"EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSPSFWD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSP_FWD_WB",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5D",
"EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSP_FWD_WB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSP_WB",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5D",
"EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSP_WB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD0",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD0",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD0",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD0",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD0",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD0",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD2",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD2",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD2",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD2",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD2",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD2",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD4",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD4",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD4",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD4",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD4",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD4",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD6",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD6",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD6",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD6",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD6",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD6",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_ADS_USED.AD_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9D",
"EventName": "UNC_H_TxR_HORZ_ADS_USED.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_ADS_USED.AD_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9D",
"EventName": "UNC_H_TxR_HORZ_ADS_USED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_ADS_USED.AK_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9D",
"EventName": "UNC_H_TxR_HORZ_ADS_USED.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_ADS_USED.BL_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9D",
"EventName": "UNC_H_TxR_HORZ_ADS_USED.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_ADS_USED.BL_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9D",
"EventName": "UNC_H_TxR_HORZ_ADS_USED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_BYPASS.AD_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9F",
"EventName": "UNC_H_TxR_HORZ_BYPASS.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_BYPASS.AD_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9F",
"EventName": "UNC_H_TxR_HORZ_BYPASS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_BYPASS.AK_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9F",
"EventName": "UNC_H_TxR_HORZ_BYPASS.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_BYPASS.BL_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9F",
"EventName": "UNC_H_TxR_HORZ_BYPASS.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_BYPASS.BL_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9F",
"EventName": "UNC_H_TxR_HORZ_BYPASS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_BYPASS.IV_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9F",
"EventName": "UNC_H_TxR_HORZ_BYPASS.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_FULL.AD_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x96",
"EventName": "UNC_H_TxR_HORZ_CYCLES_FULL.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x96",
"EventName": "UNC_H_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_FULL.AK_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x96",
"EventName": "UNC_H_TxR_HORZ_CYCLES_FULL.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_FULL.BL_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x96",
"EventName": "UNC_H_TxR_HORZ_CYCLES_FULL.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x96",
"EventName": "UNC_H_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_FULL.IV_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x96",
"EventName": "UNC_H_TxR_HORZ_CYCLES_FULL.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_NE.AD_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x97",
"EventName": "UNC_H_TxR_HORZ_CYCLES_NE.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x97",
"EventName": "UNC_H_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_NE.AK_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x97",
"EventName": "UNC_H_TxR_HORZ_CYCLES_NE.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_NE.BL_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x97",
"EventName": "UNC_H_TxR_HORZ_CYCLES_NE.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x97",
"EventName": "UNC_H_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_NE.IV_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x97",
"EventName": "UNC_H_TxR_HORZ_CYCLES_NE.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_INSERTS.AD_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x95",
"EventName": "UNC_H_TxR_HORZ_INSERTS.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_INSERTS.AD_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x95",
"EventName": "UNC_H_TxR_HORZ_INSERTS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_INSERTS.AK_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x95",
"EventName": "UNC_H_TxR_HORZ_INSERTS.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_INSERTS.BL_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x95",
"EventName": "UNC_H_TxR_HORZ_INSERTS.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_INSERTS.BL_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x95",
"EventName": "UNC_H_TxR_HORZ_INSERTS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_INSERTS.IV_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x95",
"EventName": "UNC_H_TxR_HORZ_INSERTS.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_NACK.AD_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x99",
"EventName": "UNC_H_TxR_HORZ_NACK.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_NACK.AD_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x99",
"EventName": "UNC_H_TxR_HORZ_NACK.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_NACK.AK_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x99",
"EventName": "UNC_H_TxR_HORZ_NACK.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_NACK.BL_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x99",
"EventName": "UNC_H_TxR_HORZ_NACK.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_NACK.BL_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x99",
"EventName": "UNC_H_TxR_HORZ_NACK.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_NACK.IV_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x99",
"EventName": "UNC_H_TxR_HORZ_NACK.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_OCCUPANCY.AD_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x94",
"EventName": "UNC_H_TxR_HORZ_OCCUPANCY.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x94",
"EventName": "UNC_H_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_OCCUPANCY.AK_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x94",
"EventName": "UNC_H_TxR_HORZ_OCCUPANCY.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_OCCUPANCY.BL_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x94",
"EventName": "UNC_H_TxR_HORZ_OCCUPANCY.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x94",
"EventName": "UNC_H_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_OCCUPANCY.IV_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x94",
"EventName": "UNC_H_TxR_HORZ_OCCUPANCY.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_STARVED.AD_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9B",
"EventName": "UNC_H_TxR_HORZ_STARVED.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_STARVED.AK_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9B",
"EventName": "UNC_H_TxR_HORZ_STARVED.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_STARVED.BL_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9B",
"EventName": "UNC_H_TxR_HORZ_STARVED.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_STARVED.IV_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9B",
"EventName": "UNC_H_TxR_HORZ_STARVED.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_ADS_USED.AD_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9C",
"EventName": "UNC_H_TxR_VERT_ADS_USED.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_ADS_USED.AD_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9C",
"EventName": "UNC_H_TxR_VERT_ADS_USED.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_ADS_USED.AK_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9C",
"EventName": "UNC_H_TxR_VERT_ADS_USED.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_ADS_USED.AK_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9C",
"EventName": "UNC_H_TxR_VERT_ADS_USED.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_ADS_USED.BL_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9C",
"EventName": "UNC_H_TxR_VERT_ADS_USED.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_ADS_USED.BL_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9C",
"EventName": "UNC_H_TxR_VERT_ADS_USED.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_BYPASS.AD_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9E",
"EventName": "UNC_H_TxR_VERT_BYPASS.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_BYPASS.AD_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9E",
"EventName": "UNC_H_TxR_VERT_BYPASS.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_BYPASS.AK_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9E",
"EventName": "UNC_H_TxR_VERT_BYPASS.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_BYPASS.AK_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9E",
"EventName": "UNC_H_TxR_VERT_BYPASS.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_BYPASS.BL_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9E",
"EventName": "UNC_H_TxR_VERT_BYPASS.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_BYPASS.BL_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9E",
"EventName": "UNC_H_TxR_VERT_BYPASS.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_BYPASS.IV",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9E",
"EventName": "UNC_H_TxR_VERT_BYPASS.IV_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_FULL.AD_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x92",
"EventName": "UNC_H_TxR_VERT_CYCLES_FULL.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_FULL.AD_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x92",
"EventName": "UNC_H_TxR_VERT_CYCLES_FULL.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_FULL.AK_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x92",
"EventName": "UNC_H_TxR_VERT_CYCLES_FULL.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_FULL.AK_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x92",
"EventName": "UNC_H_TxR_VERT_CYCLES_FULL.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_FULL.BL_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x92",
"EventName": "UNC_H_TxR_VERT_CYCLES_FULL.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_FULL.BL_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x92",
"EventName": "UNC_H_TxR_VERT_CYCLES_FULL.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_FULL.IV",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x92",
"EventName": "UNC_H_TxR_VERT_CYCLES_FULL.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_NE.AD_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x93",
"EventName": "UNC_H_TxR_VERT_CYCLES_NE.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_NE.AD_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x93",
"EventName": "UNC_H_TxR_VERT_CYCLES_NE.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_NE.AK_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x93",
"EventName": "UNC_H_TxR_VERT_CYCLES_NE.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_NE.AK_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x93",
"EventName": "UNC_H_TxR_VERT_CYCLES_NE.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_NE.BL_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x93",
"EventName": "UNC_H_TxR_VERT_CYCLES_NE.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_NE.BL_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x93",
"EventName": "UNC_H_TxR_VERT_CYCLES_NE.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_NE.IV",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x93",
"EventName": "UNC_H_TxR_VERT_CYCLES_NE.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_INSERTS.AD_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x91",
"EventName": "UNC_H_TxR_VERT_INSERTS.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_INSERTS.AD_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x91",
"EventName": "UNC_H_TxR_VERT_INSERTS.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_INSERTS.AK_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x91",
"EventName": "UNC_H_TxR_VERT_INSERTS.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_INSERTS.AK_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x91",
"EventName": "UNC_H_TxR_VERT_INSERTS.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_INSERTS.BL_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x91",
"EventName": "UNC_H_TxR_VERT_INSERTS.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_INSERTS.BL_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x91",
"EventName": "UNC_H_TxR_VERT_INSERTS.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_INSERTS.IV",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x91",
"EventName": "UNC_H_TxR_VERT_INSERTS.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_NACK.AD_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x98",
"EventName": "UNC_H_TxR_VERT_NACK.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_NACK.AD_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x98",
"EventName": "UNC_H_TxR_VERT_NACK.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_NACK.AK_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x98",
"EventName": "UNC_H_TxR_VERT_NACK.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_NACK.AK_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x98",
"EventName": "UNC_H_TxR_VERT_NACK.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_NACK.BL_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x98",
"EventName": "UNC_H_TxR_VERT_NACK.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_NACK.BL_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x98",
"EventName": "UNC_H_TxR_VERT_NACK.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_NACK.IV",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x98",
"EventName": "UNC_H_TxR_VERT_NACK.IV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_OCCUPANCY.AD_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x90",
"EventName": "UNC_H_TxR_VERT_OCCUPANCY.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_OCCUPANCY.AD_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x90",
"EventName": "UNC_H_TxR_VERT_OCCUPANCY.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_OCCUPANCY.AK_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x90",
"EventName": "UNC_H_TxR_VERT_OCCUPANCY.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_OCCUPANCY.AK_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x90",
"EventName": "UNC_H_TxR_VERT_OCCUPANCY.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_OCCUPANCY.BL_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x90",
"EventName": "UNC_H_TxR_VERT_OCCUPANCY.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_OCCUPANCY.BL_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x90",
"EventName": "UNC_H_TxR_VERT_OCCUPANCY.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_OCCUPANCY.IV",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x90",
"EventName": "UNC_H_TxR_VERT_OCCUPANCY.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_STARVED.AD_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9A",
"EventName": "UNC_H_TxR_VERT_STARVED.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_STARVED.AD_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9A",
"EventName": "UNC_H_TxR_VERT_STARVED.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_STARVED.AK_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9A",
"EventName": "UNC_H_TxR_VERT_STARVED.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_STARVED.AK_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9A",
"EventName": "UNC_H_TxR_VERT_STARVED.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_STARVED.BL_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9A",
"EventName": "UNC_H_TxR_VERT_STARVED.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_STARVED.BL_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9A",
"EventName": "UNC_H_TxR_VERT_STARVED.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_STARVED.IV",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9A",
"EventName": "UNC_H_TxR_VERT_STARVED.IV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_AD_IN_USE.DN_EVEN",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA6",
"EventName": "UNC_H_VERT_RING_AD_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_AD_IN_USE.DN_ODD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA6",
"EventName": "UNC_H_VERT_RING_AD_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_AD_IN_USE.UP_EVEN",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA6",
"EventName": "UNC_H_VERT_RING_AD_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_AD_IN_USE.UP_ODD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA6",
"EventName": "UNC_H_VERT_RING_AD_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_AK_IN_USE.DN_EVEN",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA8",
"EventName": "UNC_H_VERT_RING_AK_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_AK_IN_USE.DN_ODD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA8",
"EventName": "UNC_H_VERT_RING_AK_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_AK_IN_USE.UP_EVEN",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA8",
"EventName": "UNC_H_VERT_RING_AK_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_AK_IN_USE.UP_ODD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA8",
"EventName": "UNC_H_VERT_RING_AK_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_BL_IN_USE.DN_EVEN",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xAA",
"EventName": "UNC_H_VERT_RING_BL_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_BL_IN_USE.DN_ODD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xAA",
"EventName": "UNC_H_VERT_RING_BL_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_BL_IN_USE.UP_EVEN",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xAA",
"EventName": "UNC_H_VERT_RING_BL_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_BL_IN_USE.UP_ODD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xAA",
"EventName": "UNC_H_VERT_RING_BL_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_IV_IN_USE.DN",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xAC",
"EventName": "UNC_H_VERT_RING_IV_IN_USE.DN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_IV_IN_USE.UP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xAC",
"EventName": "UNC_H_VERT_RING_IV_IN_USE.UP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_WB_PUSH_MTOI.LLC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x56",
"EventName": "UNC_H_WB_PUSH_MTOI.LLC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_WB_PUSH_MTOI.MEM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x56",
"EventName": "UNC_H_WB_PUSH_MTOI.MEM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_WRITE_NO_CREDITS.EDC0_SMI2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5A",
"EventName": "UNC_H_WRITE_NO_CREDITS.EDC0_SMI2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_WRITE_NO_CREDITS.EDC1_SMI3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5A",
"EventName": "UNC_H_WRITE_NO_CREDITS.EDC1_SMI3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_WRITE_NO_CREDITS.EDC2_SMI4",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5A",
"EventName": "UNC_H_WRITE_NO_CREDITS.EDC2_SMI4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_WRITE_NO_CREDITS.EDC3_SMI5",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5A",
"EventName": "UNC_H_WRITE_NO_CREDITS.EDC3_SMI5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_WRITE_NO_CREDITS.MC0_SMI0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5A",
"EventName": "UNC_H_WRITE_NO_CREDITS.MC0_SMI0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_WRITE_NO_CREDITS.MC1_SMI1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5A",
"EventName": "UNC_H_WRITE_NO_CREDITS.MC1_SMI1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.ANY_RSPI_FWDFE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x32",
"EventName": "UNC_H_XSNP_RESP.ANY_RSPI_FWDFE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.ANY_RSPI_FWDM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x32",
"EventName": "UNC_H_XSNP_RESP.ANY_RSPI_FWDM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf0",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.ANY_RSPS_FWDFE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x32",
"EventName": "UNC_H_XSNP_RESP.ANY_RSPS_FWDFE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.ANY_RSPS_FWDM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x32",
"EventName": "UNC_H_XSNP_RESP.ANY_RSPS_FWDM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.ANY_RSP_HITFSE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x32",
"EventName": "UNC_H_XSNP_RESP.ANY_RSP_HITFSE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.CORE_RSPI_FWDFE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x32",
"EventName": "UNC_H_XSNP_RESP.CORE_RSPI_FWDFE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x44",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.CORE_RSPI_FWDM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x32",
"EventName": "UNC_H_XSNP_RESP.CORE_RSPI_FWDM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x50",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.CORE_RSPS_FWDFE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x32",
"EventName": "UNC_H_XSNP_RESP.CORE_RSPS_FWDFE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x42",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.CORE_RSPS_FWDM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x32",
"EventName": "UNC_H_XSNP_RESP.CORE_RSPS_FWDM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x48",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.CORE_RSP_HITFSE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x32",
"EventName": "UNC_H_XSNP_RESP.CORE_RSP_HITFSE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x41",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.EVICT_RSPI_FWDFE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x32",
"EventName": "UNC_H_XSNP_RESP.EVICT_RSPI_FWDFE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x84",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.EVICT_RSPI_FWDM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x32",
"EventName": "UNC_H_XSNP_RESP.EVICT_RSPI_FWDM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x90",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.EVICT_RSPS_FWDFE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x32",
"EventName": "UNC_H_XSNP_RESP.EVICT_RSPS_FWDFE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x82",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.EVICT_RSPS_FWDM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x32",
"EventName": "UNC_H_XSNP_RESP.EVICT_RSPS_FWDM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x88",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.EVICT_RSP_HITFSE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x32",
"EventName": "UNC_H_XSNP_RESP.EVICT_RSP_HITFSE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x81",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.EXT_RSPI_FWDFE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x32",
"EventName": "UNC_H_XSNP_RESP.EXT_RSPI_FWDFE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x24",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.EXT_RSPI_FWDM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x32",
"EventName": "UNC_H_XSNP_RESP.EXT_RSPI_FWDM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x30",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.EXT_RSPS_FWDFE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x32",
"EventName": "UNC_H_XSNP_RESP.EXT_RSPS_FWDFE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x22",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.EXT_RSPS_FWDM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x32",
"EventName": "UNC_H_XSNP_RESP.EXT_RSPS_FWDM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x28",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.EXT_RSP_HITFSE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x32",
"EventName": "UNC_H_XSNP_RESP.EXT_RSP_HITFSE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x21",
"Unit": "CHA"
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-interconnect.json
index 3fe9ce483bbe..91889e447bd1 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-interconnect.json
@@ -1,8 +1,10 @@
[
{
"BriefDescription": "Total Write Cache Occupancy; Any Source",
+ "Counter": "0,1",
"EventCode": "0xF",
"EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.ANY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.; Tracks all requests from any source port.",
"UMask": "0x1",
@@ -10,8 +12,10 @@
},
{
"BriefDescription": "Total Write Cache Occupancy; Snoops",
+ "Counter": "0,1",
"EventCode": "0xF",
"EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.IV_Q",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.",
"UMask": "0x2",
@@ -19,6 +23,7 @@
},
{
"BriefDescription": "Total IRP occupancy of inbound read and write requests.",
+ "Counter": "0,1",
"EventCode": "0xF",
"EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.MEM",
"PerPkg": "1",
@@ -28,15 +33,19 @@
},
{
"BriefDescription": "IRP Clocks",
+ "Counter": "0,1",
"EventCode": "0x1",
"EventName": "UNC_I_CLOCKTICKS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "Coherent Ops; CLFlush",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_I_COHERENT_OPS.CLFLUSH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of coherency related operations serviced by the IRP",
"UMask": "0x80",
@@ -44,8 +53,10 @@
},
{
"BriefDescription": "Coherent Ops; CRd",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_I_COHERENT_OPS.CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of coherency related operations serviced by the IRP",
"UMask": "0x2",
@@ -53,8 +64,10 @@
},
{
"BriefDescription": "Coherent Ops; DRd",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_I_COHERENT_OPS.DRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of coherency related operations serviced by the IRP",
"UMask": "0x4",
@@ -62,8 +75,10 @@
},
{
"BriefDescription": "Coherent Ops; PCIDCAHin5t",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_I_COHERENT_OPS.PCIDCAHINT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of coherency related operations serviced by the IRP",
"UMask": "0x20",
@@ -71,8 +86,10 @@
},
{
"BriefDescription": "Coherent Ops; PCIRdCur",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_I_COHERENT_OPS.PCIRDCUR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of coherency related operations serviced by the IRP",
"UMask": "0x1",
@@ -80,6 +97,7 @@
},
{
"BriefDescription": "PCIITOM request issued by the IRP unit to the mesh with the intention of writing a full cacheline.",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_I_COHERENT_OPS.PCITOM",
"PerPkg": "1",
@@ -89,6 +107,7 @@
},
{
"BriefDescription": "RFO request issued by the IRP unit to the mesh with the intention of writing a partial cacheline.",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_I_COHERENT_OPS.RFO",
"PerPkg": "1",
@@ -98,8 +117,10 @@
},
{
"BriefDescription": "Coherent Ops; WbMtoI",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_I_COHERENT_OPS.WBMTOI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of coherency related operations serviced by the IRP",
"UMask": "0x40",
@@ -107,13 +128,16 @@
},
{
"BriefDescription": "FAF RF full",
+ "Counter": "0,1",
"EventCode": "0x17",
"EventName": "UNC_I_FAF_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "Inbound read requests received by the IRP and inserted into the FAF queue.",
+ "Counter": "0,1",
"EventCode": "0x18",
"EventName": "UNC_I_FAF_INSERTS",
"PerPkg": "1",
@@ -122,6 +146,7 @@
},
{
"BriefDescription": "Occupancy of the IRP FAF queue.",
+ "Counter": "0,1",
"EventCode": "0x19",
"EventName": "UNC_I_FAF_OCCUPANCY",
"PerPkg": "1",
@@ -130,95 +155,119 @@
},
{
"BriefDescription": "FAF allocation -- sent to ADQ",
+ "Counter": "0,1",
"EventCode": "0x16",
"EventName": "UNC_I_FAF_TRANSACTIONS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "All Inserts Inbound (p2p + faf + cset)",
+ "Counter": "0,1",
"EventCode": "0x1E",
"EventName": "UNC_I_IRP_ALL.INBOUND_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "IRP"
},
{
"BriefDescription": "All Inserts Outbound (BL, AK, Snoops)",
+ "Counter": "0,1",
"EventCode": "0x1E",
"EventName": "UNC_I_IRP_ALL.OUTBOUND_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "IRP"
},
{
"BriefDescription": "Misc Events - Set 0; Cache Inserts of Atomic Transactions as Secondary",
+ "Counter": "0,1",
"EventCode": "0x1C",
"EventName": "UNC_I_MISC0.2ND_ATOMIC_INSERT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "IRP"
},
{
"BriefDescription": "Misc Events - Set 0; Cache Inserts of Read Transactions as Secondary",
+ "Counter": "0,1",
"EventCode": "0x1C",
"EventName": "UNC_I_MISC0.2ND_RD_INSERT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "IRP"
},
{
"BriefDescription": "Misc Events - Set 0; Cache Inserts of Write Transactions as Secondary",
+ "Counter": "0,1",
"EventCode": "0x1C",
"EventName": "UNC_I_MISC0.2ND_WR_INSERT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "IRP"
},
{
"BriefDescription": "Misc Events - Set 0; Fastpath Rejects",
+ "Counter": "0,1",
"EventCode": "0x1C",
"EventName": "UNC_I_MISC0.FAST_REJ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "IRP"
},
{
"BriefDescription": "Misc Events - Set 0; Fastpath Requests",
+ "Counter": "0,1",
"EventCode": "0x1C",
"EventName": "UNC_I_MISC0.FAST_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "IRP"
},
{
"BriefDescription": "Misc Events - Set 0; Fastpath Transfers From Primary to Secondary",
+ "Counter": "0,1",
"EventCode": "0x1C",
"EventName": "UNC_I_MISC0.FAST_XFER",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "IRP"
},
{
"BriefDescription": "Misc Events - Set 0; Prefetch Ack Hints From Primary to Secondary",
+ "Counter": "0,1",
"EventCode": "0x1C",
"EventName": "UNC_I_MISC0.PF_ACK_HINT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "IRP"
},
{
"BriefDescription": "Misc Events - Set 0",
+ "Counter": "0,1",
"EventCode": "0x1C",
"EventName": "UNC_I_MISC0.UNKNOWN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "IRP"
},
{
"BriefDescription": "Misc Events - Set 1; Lost Forward",
+ "Counter": "0,1",
"EventCode": "0x1D",
"EventName": "UNC_I_MISC1.LOST_FWD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop pulled away ownership before a write was committed",
"UMask": "0x10",
@@ -226,8 +275,10 @@
},
{
"BriefDescription": "Misc Events - Set 1; Received Invalid",
+ "Counter": "0,1",
"EventCode": "0x1D",
"EventName": "UNC_I_MISC1.SEC_RCVD_INVLD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Secondary received a transfer that did not have sufficient MESI state",
"UMask": "0x20",
@@ -235,8 +286,10 @@
},
{
"BriefDescription": "Misc Events - Set 1; Received Valid",
+ "Counter": "0,1",
"EventCode": "0x1D",
"EventName": "UNC_I_MISC1.SEC_RCVD_VLD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Secondary received a transfer that did have sufficient MESI state",
"UMask": "0x40",
@@ -244,8 +297,10 @@
},
{
"BriefDescription": "Misc Events - Set 1; Slow Transfer of E Line",
+ "Counter": "0,1",
"EventCode": "0x1D",
"EventName": "UNC_I_MISC1.SLOW_E",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Secondary received a transfer that did have sufficient MESI state",
"UMask": "0x4",
@@ -253,8 +308,10 @@
},
{
"BriefDescription": "Misc Events - Set 1; Slow Transfer of I Line",
+ "Counter": "0,1",
"EventCode": "0x1D",
"EventName": "UNC_I_MISC1.SLOW_I",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop took cacheline ownership before write from data was committed.",
"UMask": "0x1",
@@ -262,8 +319,10 @@
},
{
"BriefDescription": "Misc Events - Set 1; Slow Transfer of M Line",
+ "Counter": "0,1",
"EventCode": "0x1D",
"EventName": "UNC_I_MISC1.SLOW_M",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop took cacheline ownership before write from data was committed.",
"UMask": "0x8",
@@ -271,8 +330,10 @@
},
{
"BriefDescription": "Misc Events - Set 1; Slow Transfer of S Line",
+ "Counter": "0,1",
"EventCode": "0x1D",
"EventName": "UNC_I_MISC1.SLOW_S",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Secondary received a transfer that did not have sufficient MESI state",
"UMask": "0x2",
@@ -280,88 +341,110 @@
},
{
"BriefDescription": "P2P Requests",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_I_P2P_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "P2P requests from the ITC",
"Unit": "IRP"
},
{
"BriefDescription": "P2P Occupancy",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_P2P_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "P2P B & S Queue Occupancy",
"Unit": "IRP"
},
{
"BriefDescription": "P2P Transactions; P2P completions",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_P2P_TRANSACTIONS.CMPL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "IRP"
},
{
"BriefDescription": "P2P Transactions; match if local only",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_P2P_TRANSACTIONS.LOC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "IRP"
},
{
"BriefDescription": "P2P Transactions; match if local and target matches",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_P2P_TRANSACTIONS.LOC_AND_TGT_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "IRP"
},
{
"BriefDescription": "P2P Transactions; P2P Message",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_P2P_TRANSACTIONS.MSG",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "IRP"
},
{
"BriefDescription": "P2P Transactions; P2P reads",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_P2P_TRANSACTIONS.RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "IRP"
},
{
"BriefDescription": "P2P Transactions; Match if remote only",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_P2P_TRANSACTIONS.REM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "IRP"
},
{
"BriefDescription": "P2P Transactions; match if remote and target matches",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_P2P_TRANSACTIONS.REM_AND_TGT_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "IRP"
},
{
"BriefDescription": "P2P Transactions; P2P Writes",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_P2P_TRANSACTIONS.WR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "IRP"
},
{
"BriefDescription": "Responses to snoops of any type that hit M, E, S or I line in the IIO",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.ALL_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit M, E, S or I line in the IIO",
"UMask": "0x7e",
@@ -369,8 +452,10 @@
},
{
"BriefDescription": "Responses to snoops of any type that hit E or S line in the IIO cache",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.ALL_HIT_ES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit E or S line in the IIO cache",
"UMask": "0x74",
@@ -378,8 +463,10 @@
},
{
"BriefDescription": "Responses to snoops of any type that hit I line in the IIO cache",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.ALL_HIT_I",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit I line in the IIO cache",
"UMask": "0x72",
@@ -387,8 +474,10 @@
},
{
"BriefDescription": "Responses to snoops of any type that hit M line in the IIO cache",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.ALL_HIT_M",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit M line in the IIO cache",
"UMask": "0x78",
@@ -396,8 +485,10 @@
},
{
"BriefDescription": "Responses to snoops of any type that miss the IIO cache",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.ALL_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Responses to snoops of any type (code, data, invalidate) that miss the IIO cache",
"UMask": "0x71",
@@ -405,64 +496,80 @@
},
{
"BriefDescription": "Snoop Responses; Hit E or S",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.HIT_ES",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses; Hit I",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.HIT_I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses; Hit M",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.HIT_M",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses; Miss",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses; SnpCode",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.SNPCODE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses; SnpData",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.SNPDATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses; SnpInv",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.SNPINV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "IRP"
},
{
"BriefDescription": "Inbound Transaction Count; Atomic",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_I_TRANSACTIONS.ATOMIC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks the number of atomic transactions",
"UMask": "0x10",
@@ -470,8 +577,10 @@
},
{
"BriefDescription": "Inbound Transaction Count; Other",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_I_TRANSACTIONS.OTHER",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks the number of 'other' kinds of transactions.",
"UMask": "0x20",
@@ -479,8 +588,10 @@
},
{
"BriefDescription": "Inbound Transaction Count; Read Prefetches",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_I_TRANSACTIONS.RD_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks the number of read prefetches.",
"UMask": "0x4",
@@ -488,8 +599,10 @@
},
{
"BriefDescription": "Inbound Transaction Count; Reads",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_I_TRANSACTIONS.READS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks only read requests (not including read prefetches).",
"UMask": "0x1",
@@ -497,8 +610,10 @@
},
{
"BriefDescription": "Inbound Transaction Count; Writes",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_I_TRANSACTIONS.WRITES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks only write requests. Each write request should have a prefetch, so there is no need to explicitly track these requests. For writes that are tickled and have to retry, the counter will be incremented for each retry.",
"UMask": "0x2",
@@ -506,6 +621,7 @@
},
{
"BriefDescription": "Inbound write (fast path) requests received by the IRP.",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_I_TRANSACTIONS.WR_PREF",
"PerPkg": "1",
@@ -515,118 +631,150 @@
},
{
"BriefDescription": "AK Egress Allocations",
+ "Counter": "0,1",
"EventCode": "0xB",
"EventName": "UNC_I_TxC_AK_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL DRS Egress Cycles Full",
+ "Counter": "0,1",
"EventCode": "0x5",
"EventName": "UNC_I_TxC_BL_DRS_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL DRS Egress Inserts",
+ "Counter": "0,1",
"EventCode": "0x2",
"EventName": "UNC_I_TxC_BL_DRS_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL DRS Egress Occupancy",
+ "Counter": "0,1",
"EventCode": "0x8",
"EventName": "UNC_I_TxC_BL_DRS_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL NCB Egress Cycles Full",
+ "Counter": "0,1",
"EventCode": "0x6",
"EventName": "UNC_I_TxC_BL_NCB_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL NCB Egress Inserts",
+ "Counter": "0,1",
"EventCode": "0x3",
"EventName": "UNC_I_TxC_BL_NCB_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL NCB Egress Occupancy",
+ "Counter": "0,1",
"EventCode": "0x9",
"EventName": "UNC_I_TxC_BL_NCB_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL NCS Egress Cycles Full",
+ "Counter": "0,1",
"EventCode": "0x7",
"EventName": "UNC_I_TxC_BL_NCS_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL NCS Egress Inserts",
+ "Counter": "0,1",
"EventCode": "0x4",
"EventName": "UNC_I_TxC_BL_NCS_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL NCS Egress Occupancy",
+ "Counter": "0,1",
"EventCode": "0xA",
"EventName": "UNC_I_TxC_BL_NCS_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "No AD Egress Credit Stalls",
+ "Counter": "0,1",
"EventCode": "0x1A",
"EventName": "UNC_I_TxR2_AD_STALL_CREDIT_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number times when it is not possible to issue a request to the R2PCIe because there are no AD Egress Credits available.",
"Unit": "IRP"
},
{
"BriefDescription": "No BL Egress Credit Stalls",
+ "Counter": "0,1",
"EventCode": "0x1B",
"EventName": "UNC_I_TxR2_BL_STALL_CREDIT_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number times when it is not possible to issue data to the R2PCIe because there are no BL Egress Credits available.",
"Unit": "IRP"
},
{
"BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
"EventCode": "0xD",
"EventName": "UNC_I_TxS_DATA_INSERTS_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of requests issued to the switch (towards the devices).",
"Unit": "IRP"
},
{
"BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
"EventCode": "0xE",
"EventName": "UNC_I_TxS_DATA_INSERTS_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of requests issued to the switch (towards the devices).",
"Unit": "IRP"
},
{
"BriefDescription": "Outbound Request Queue Occupancy",
+ "Counter": "0,1",
"EventCode": "0xC",
"EventName": "UNC_I_TxS_REQUEST_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of outstanding outbound requests from the IRP to the switch (towards the devices). This can be used in conjunction with the allocations event in order to calculate average latency of outbound requests.",
"Unit": "IRP"
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -634,8 +782,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -643,8 +793,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -652,8 +804,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -661,8 +815,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -670,8 +826,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -679,8 +837,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -688,8 +848,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -697,8 +859,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -706,8 +870,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -715,8 +881,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -724,8 +892,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -733,8 +903,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -742,8 +914,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -751,8 +925,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -760,8 +936,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -769,8 +947,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -778,8 +958,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -787,8 +969,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -796,8 +980,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -805,8 +991,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -814,8 +1002,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -823,8 +1013,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -832,8 +1024,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -841,8 +1035,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -850,8 +1046,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -859,8 +1057,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -868,8 +1068,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -877,8 +1079,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -886,8 +1090,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -895,8 +1101,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -904,8 +1112,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -913,8 +1123,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -922,8 +1134,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -931,8 +1145,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -940,8 +1156,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -949,8 +1167,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -958,8 +1178,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -967,8 +1189,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -976,8 +1200,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -985,8 +1211,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -994,8 +1222,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -1003,8 +1233,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_M2M_AG1_BL_CREDITS_ACQUIRED.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -1012,8 +1244,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_M2M_AG1_BL_CREDITS_ACQUIRED.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -1021,8 +1255,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_M2M_AG1_BL_CREDITS_ACQUIRED.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -1030,8 +1266,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_M2M_AG1_BL_CREDITS_ACQUIRED.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -1039,8 +1277,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_M2M_AG1_BL_CREDITS_ACQUIRED.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -1048,8 +1288,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_M2M_AG1_BL_CREDITS_ACQUIRED.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -1057,6 +1299,7 @@
},
{
"BriefDescription": "Traffic in which the M2M to iMC Bypass was not taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M2M_BYPASS_M2M_Egress.NOT_TAKEN",
"PerPkg": "1",
@@ -1066,43 +1309,54 @@
},
{
"BriefDescription": "M2M to iMC Bypass; Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M2M_BYPASS_M2M_Egress.TAKEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M to iMC Bypass; Not Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_BYPASS_M2M_INGRESS.NOT_TAKEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "M2M to iMC Bypass; Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_BYPASS_M2M_INGRESS.TAKEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles - at UCLK",
+ "Counter": "0,1,2,3",
"EventName": "UNC_M2M_CLOCKTICKS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "CMS Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "UNC_M2M_CMS_CLOCKTICKS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles when direct to core mode (which bypasses the CHA) was disabled",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_DIRSTATE",
"PerPkg": "1",
@@ -1111,6 +1365,7 @@
},
{
"BriefDescription": "Messages sent direct to core (bypassing the CHA)",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_M2M_DIRECT2CORE_TAKEN",
"PerPkg": "1",
@@ -1119,6 +1374,7 @@
},
{
"BriefDescription": "Number of reads in which direct to core transaction were overridden",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_DIRECT2CORE_TXN_OVERRIDE",
"PerPkg": "1",
@@ -1127,6 +1383,7 @@
},
{
"BriefDescription": "Number of reads in which direct to Intel(R) UPI transactions were overridden",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_CREDITS",
"PerPkg": "1",
@@ -1135,6 +1392,7 @@
},
{
"BriefDescription": "Cycles when direct to Intel(R) UPI was disabled",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_DIRSTATE",
"PerPkg": "1",
@@ -1143,6 +1401,7 @@
},
{
"BriefDescription": "Messages sent direct to the Intel(R) UPI",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_M2M_DIRECT2UPI_TAKEN",
"PerPkg": "1",
@@ -1151,6 +1410,7 @@
},
{
"BriefDescription": "Number of reads that a message sent direct2 Intel(R) UPI was overridden",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_M2M_DIRECT2UPI_TXN_OVERRIDE",
"PerPkg": "1",
@@ -1159,70 +1419,87 @@
},
{
"BriefDescription": "Directory Hit; On NonDirty Line in A State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Hit; On NonDirty Line in I State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Hit; On NonDirty Line in L State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_P",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Hit; On NonDirty Line in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Hit; On Dirty Line in A State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Hit; On Dirty Line in I State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Hit; On Dirty Line in L State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_P",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Hit; On Dirty Line in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Multi-socket cacheline Directory lookups (any state found)",
+ "Counter": "0,1,2,3",
"EventCode": "0x2D",
"EventName": "UNC_M2M_DIRECTORY_LOOKUP.ANY",
"PerPkg": "1",
@@ -1232,6 +1509,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory lookups (cacheline found in A state)",
+ "Counter": "0,1,2,3",
"EventCode": "0x2D",
"EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_A",
"PerPkg": "1",
@@ -1241,6 +1519,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory lookup (cacheline found in I state)",
+ "Counter": "0,1,2,3",
"EventCode": "0x2D",
"EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_I",
"PerPkg": "1",
@@ -1250,6 +1529,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory lookup (cacheline found in S state)",
+ "Counter": "0,1,2,3",
"EventCode": "0x2D",
"EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_S",
"PerPkg": "1",
@@ -1259,70 +1539,87 @@
},
{
"BriefDescription": "Directory Miss; On NonDirty Line in A State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Miss; On NonDirty Line in I State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Miss; On NonDirty Line in L State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_P",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Miss; On NonDirty Line in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Miss; On Dirty Line in A State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Miss; On Dirty Line in I State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Miss; On Dirty Line in L State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_P",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Miss; On Dirty Line in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Multi-socket cacheline Directory update from A to I",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.A2I",
"PerPkg": "1",
@@ -1332,6 +1629,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory update from A to S",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.A2S",
"PerPkg": "1",
@@ -1341,6 +1639,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory update from/to Any state",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.ANY",
"PerPkg": "1",
@@ -1350,6 +1649,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory update from I to A",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.I2A",
"PerPkg": "1",
@@ -1359,6 +1659,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory update from I to S",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.I2S",
"PerPkg": "1",
@@ -1368,6 +1669,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory update from S to A",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.S2A",
"PerPkg": "1",
@@ -1377,6 +1679,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory update from S to I",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.S2I",
"PerPkg": "1",
@@ -1386,8 +1689,10 @@
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements; Down",
+ "Counter": "0,1,2,3",
"EventCode": "0xAE",
"EventName": "UNC_M2M_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x4",
@@ -1395,8 +1700,10 @@
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements; Up",
+ "Counter": "0,1,2,3",
"EventCode": "0xAE",
"EventName": "UNC_M2M_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x1",
@@ -1404,8 +1711,10 @@
},
{
"BriefDescription": "FaST wire asserted; Horizontal",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_M2M_FAST_ASSERTED.HORZ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles either the local or incoming distress signals are asserted. Incoming distress includes up, dn and across.",
"UMask": "0x2",
@@ -1413,8 +1722,10 @@
},
{
"BriefDescription": "FaST wire asserted; Vertical",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_M2M_FAST_ASSERTED.VERT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles either the local or incoming distress signals are asserted. Incoming distress includes up, dn and across.",
"UMask": "0x1",
@@ -1422,8 +1733,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use; Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -1431,8 +1744,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use; Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -1440,8 +1755,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use; Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -1449,8 +1766,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use; Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -1458,8 +1777,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use; Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xA9",
"EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -1467,8 +1788,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use; Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xA9",
"EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -1476,8 +1799,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use; Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xA9",
"EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -1485,8 +1810,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use; Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xA9",
"EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -1494,8 +1821,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use; Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -1503,8 +1832,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use; Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -1512,8 +1843,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use; Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -1521,8 +1854,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use; Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -1530,8 +1865,10 @@
},
{
"BriefDescription": "Horizontal IV Ring in Use; Left",
+ "Counter": "0,1,2,3",
"EventCode": "0xAD",
"EventName": "UNC_M2M_HORZ_RING_IV_IN_USE.LEFT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x1",
@@ -1539,8 +1876,10 @@
},
{
"BriefDescription": "Horizontal IV Ring in Use; Right",
+ "Counter": "0,1,2,3",
"EventCode": "0xAD",
"EventName": "UNC_M2M_HORZ_RING_IV_IN_USE.RIGHT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x4",
@@ -1548,6 +1887,7 @@
},
{
"BriefDescription": "Reads to iMC issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.ALL",
"PerPkg": "1",
@@ -1557,22 +1897,27 @@
},
{
"BriefDescription": "M2M Reads Issued to iMC; All, regardless of priority.",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.FROM_TRANSGRESS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Reads Issued to iMC; Critical Priority",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Reads to iMC issued at Normal Priority (Non-Isochronous)",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.NORMAL",
"PerPkg": "1",
@@ -1582,6 +1927,7 @@
},
{
"BriefDescription": "Read requests to Intel(R) Optane(TM) DC persistent memory issued to the iMC from M2M",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.TO_PMM",
"PerPkg": "1",
@@ -1591,6 +1937,7 @@
},
{
"BriefDescription": "Writes to iMC issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.ALL",
"PerPkg": "1",
@@ -1600,30 +1947,37 @@
},
{
"BriefDescription": "M2M Writes Issued to iMC; All, regardless of priority.",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.FROM_TRANSGRESS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC; Full Line Non-ISOCH",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.FULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC; ISOCH Full Line",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.FULL_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC; All, regardless of priority.",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.NI",
"PerPkg": "1",
@@ -1632,6 +1986,7 @@
},
{
"BriefDescription": "Partial Non-Isochronous writes to the iMC",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.PARTIAL",
"PerPkg": "1",
@@ -1641,14 +1996,17 @@
},
{
"BriefDescription": "M2M Writes Issued to iMC; ISOCH Partial",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.PARTIAL_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Write requests to Intel(R) Optane(TM) DC persistent memory issued to the iMC from M2M",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.TO_PMM",
"PerPkg": "1",
@@ -1658,84 +2016,105 @@
},
{
"BriefDescription": "Number Packet Header Matches; MC Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x4C",
"EventName": "UNC_M2M_PKT_MATCH.MC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Number Packet Header Matches; Mesh Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x4C",
"EventName": "UNC_M2M_PKT_MATCH.MESH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC RPQ Cycles w/Credits - Regular; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x4F",
"EventName": "UNC_M2M_PMM_RPQ_CYCLES_REG_CREDITS.CHN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC RPQ Cycles w/Credits - Regular; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x4F",
"EventName": "UNC_M2M_PMM_RPQ_CYCLES_REG_CREDITS.CHN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC RPQ Cycles w/Credits - Regular; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x4F",
"EventName": "UNC_M2M_PMM_RPQ_CYCLES_REG_CREDITS.CHN2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_M2M_PMM_WPQ_CYCLES_REG_CREDITS.CHN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_M2M_PMM_WPQ_CYCLES_REG_CREDITS.CHN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_M2M_PMM_WPQ_CYCLES_REG_CREDITS.CHN2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Cycles Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_M2M_PREFCAM_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_M2M_PREFCAM_CYCLES_NE",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch requests that got turn into a demand request",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_M2M_PREFCAM_DEMAND_PROMOTIONS",
"PerPkg": "1",
@@ -1744,6 +2123,7 @@
},
{
"BriefDescription": "Inserts into the Memory Controller Prefetch Queue",
+ "Counter": "0,1,2,3",
"EventCode": "0x57",
"EventName": "UNC_M2M_PREFCAM_INSERTS",
"PerPkg": "1",
@@ -1752,15 +2132,19 @@
},
{
"BriefDescription": "Prefetch CAM Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_M2M_PREFCAM_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring.; AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M2M_RING_BOUNCES_HORZ.AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x1",
@@ -1768,8 +2152,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring.; AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M2M_RING_BOUNCES_HORZ.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x2",
@@ -1777,8 +2163,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring.; BL",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M2M_RING_BOUNCES_HORZ.BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x4",
@@ -1786,8 +2174,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring.; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M2M_RING_BOUNCES_HORZ.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x8",
@@ -1795,8 +2185,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring.; AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M2M_RING_BOUNCES_VERT.AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x1",
@@ -1804,8 +2196,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring.; Acknowledgements to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M2M_RING_BOUNCES_VERT.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x2",
@@ -1813,8 +2207,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring.; Data Responses to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M2M_RING_BOUNCES_VERT.BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x4",
@@ -1822,8 +2218,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring.; Snoops of processor's cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M2M_RING_BOUNCES_VERT.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x8",
@@ -1831,174 +2229,217 @@
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring; AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.AD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring; AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring; Acknowledgements to Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring; BL",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.IV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring; AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_M2M_RING_SINK_STARVED_VERT.AD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring; Acknowledgements to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_M2M_RING_SINK_STARVED_VERT.AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring; Data Responses to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_M2M_RING_SINK_STARVED_VERT.BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring; Snoops of processor's cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_M2M_RING_SINK_STARVED_VERT.IV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Source Throttle",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_M2M_RING_SRC_THRTL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_RPQ_CYCLES_SPEC_CREDITS.CHN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x44",
"EventName": "UNC_M2M_RPQ_CYCLES_NO_SPEC_CREDITS.CHN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_RPQ_CYCLES_SPEC_CREDITS.CHN1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x44",
"EventName": "UNC_M2M_RPQ_CYCLES_NO_SPEC_CREDITS.CHN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_RPQ_CYCLES_SPEC_CREDITS.CHN2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x44",
"EventName": "UNC_M2M_RPQ_CYCLES_NO_SPEC_CREDITS.CHN2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Regular; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M2M_RPQ_CYCLES_REG_CREDITS.CHN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Regular; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M2M_RPQ_CYCLES_REG_CREDITS.CHN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Regular; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M2M_RPQ_CYCLES_REG_CREDITS.CHN2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Special; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M2M_RPQ_CYCLES_SPEC_CREDITS.CHN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Special; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M2M_RPQ_CYCLES_SPEC_CREDITS.CHN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Special; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M2M_RPQ_CYCLES_SPEC_CREDITS.CHN2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "AD Ingress (from CMS) Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M2M_RxC_AD_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "AD Ingress (from CMS) Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_M2M_RxC_AD_CYCLES_NE",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "AD Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_M2M_RxC_AD_INSERTS",
"PerPkg": "1",
@@ -2007,6 +2448,7 @@
},
{
"BriefDescription": "AD Ingress (from CMS) Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M2M_RxC_AD_OCCUPANCY",
"PerPkg": "1",
@@ -2014,20 +2456,25 @@
},
{
"BriefDescription": "BL Ingress (from CMS) Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_M2M_RxC_BL_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "BL Ingress (from CMS) Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_M2M_RxC_BL_CYCLES_NE",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "BL Ingress (from CMS) Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_M2M_RxC_BL_INSERTS",
"PerPkg": "1",
@@ -2035,6 +2482,7 @@
},
{
"BriefDescription": "BL Ingress (from CMS) Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_M2M_RxC_BL_OCCUPANCY",
"PerPkg": "1",
@@ -2042,8 +2490,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M2M_RxR_BUSY_STARVED.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x1",
@@ -2051,8 +2501,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M2M_RxR_BUSY_STARVED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x10",
@@ -2060,8 +2512,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M2M_RxR_BUSY_STARVED.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x4",
@@ -2069,8 +2523,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M2M_RxR_BUSY_STARVED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x40",
@@ -2078,8 +2534,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M2M_RxR_BYPASS.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the CMS Ingress",
"UMask": "0x1",
@@ -2087,8 +2545,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M2M_RxR_BYPASS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the CMS Ingress",
"UMask": "0x10",
@@ -2096,8 +2556,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M2M_RxR_BYPASS.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the CMS Ingress",
"UMask": "0x2",
@@ -2105,8 +2567,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M2M_RxR_BYPASS.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the CMS Ingress",
"UMask": "0x4",
@@ -2114,8 +2578,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M2M_RxR_BYPASS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the CMS Ingress",
"UMask": "0x40",
@@ -2123,8 +2589,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M2M_RxR_BYPASS.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the CMS Ingress",
"UMask": "0x8",
@@ -2132,8 +2600,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M2M_RxR_CRD_STARVED.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x1",
@@ -2141,8 +2611,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M2M_RxR_CRD_STARVED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x10",
@@ -2150,8 +2622,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M2M_RxR_CRD_STARVED.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x2",
@@ -2159,8 +2633,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M2M_RxR_CRD_STARVED.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x4",
@@ -2168,8 +2644,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M2M_RxR_CRD_STARVED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x40",
@@ -2177,8 +2655,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; IFV - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M2M_RxR_CRD_STARVED.IFV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x80",
@@ -2186,8 +2666,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M2M_RxR_CRD_STARVED.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x8",
@@ -2195,8 +2677,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M2M_RxR_INSERTS.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x1",
@@ -2204,8 +2688,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M2M_RxR_INSERTS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x10",
@@ -2213,8 +2699,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M2M_RxR_INSERTS.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x2",
@@ -2222,8 +2710,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M2M_RxR_INSERTS.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x4",
@@ -2231,8 +2721,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M2M_RxR_INSERTS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x40",
@@ -2240,8 +2732,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M2M_RxR_INSERTS.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x8",
@@ -2249,8 +2743,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M2M_RxR_OCCUPANCY.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x1",
@@ -2258,8 +2754,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M2M_RxR_OCCUPANCY.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x10",
@@ -2267,8 +2765,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M2M_RxR_OCCUPANCY.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x2",
@@ -2276,8 +2776,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M2M_RxR_OCCUPANCY.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x4",
@@ -2285,8 +2787,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M2M_RxR_OCCUPANCY.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x40",
@@ -2294,8 +2798,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M2M_RxR_OCCUPANCY.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x8",
@@ -2303,8 +2809,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -2312,8 +2820,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -2321,8 +2831,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -2330,8 +2842,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -2339,8 +2853,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -2348,8 +2864,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -2357,8 +2875,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -2366,8 +2886,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -2375,8 +2897,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -2384,8 +2908,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -2393,8 +2919,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -2402,8 +2930,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -2411,8 +2941,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -2420,8 +2952,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -2429,8 +2963,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -2438,8 +2974,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -2447,8 +2985,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -2456,8 +2996,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -2465,8 +3007,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -2474,8 +3018,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -2483,8 +3029,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -2492,8 +3040,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -2501,8 +3051,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -2510,8 +3062,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -2519,8 +3073,10 @@
},
{
"BriefDescription": "Clean line read hits(Regular and RFO) to Near Memory(DRAM cache) in Memory Mode and regular reads to DRAM in 1LM",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_M2M_TAG_HIT.NM_RD_HIT_CLEAN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Tag Hit; Read Hit from NearMem, Clean Line",
"UMask": "0x1",
@@ -2528,6 +3084,7 @@
},
{
"BriefDescription": "Dirty line read hits(Regular and RFO) to Near Memory(DRAM cache) in Memory Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_M2M_TAG_HIT.NM_RD_HIT_DIRTY",
"PerPkg": "1",
@@ -2537,6 +3094,7 @@
},
{
"BriefDescription": "Clean line underfill read hits to Near Memory(DRAM cache) in Memory Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_M2M_TAG_HIT.NM_UFILL_HIT_CLEAN",
"PerPkg": "1",
@@ -2546,6 +3104,7 @@
},
{
"BriefDescription": "Dirty line underfill read hits to Near Memory(DRAM cache) in Memory Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_M2M_TAG_HIT.NM_UFILL_HIT_DIRTY",
"PerPkg": "1",
@@ -2555,151 +3114,190 @@
},
{
"BriefDescription": "Number AD Ingress Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M2M_TGR_AD_CREDITS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Number BL Ingress Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M2M_TGR_BL_CREDITS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Cycles Full; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_M2M_TRACKER_CYCLES_FULL.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Cycles Full; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_M2M_TRACKER_CYCLES_FULL.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Cycles Full; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_M2M_TRACKER_CYCLES_FULL.CH2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Cycles Not Empty; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2M_TRACKER_CYCLES_NE.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Cycles Not Empty; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2M_TRACKER_CYCLES_NE.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Cycles Not Empty; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2M_TRACKER_CYCLES_NE.CH2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Inserts; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "UNC_M2M_TRACKER_INSERTS.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Inserts; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "UNC_M2M_TRACKER_INSERTS.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Inserts; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "UNC_M2M_TRACKER_INSERTS.CH2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Occupancy; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Occupancy; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Occupancy; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Data Pending Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "UNC_M2M_TRACKER_PENDING_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "AD Egress (to CMS) Credit Acquired",
+ "Counter": "0,1,2,3",
"EventCode": "0xD",
"EventName": "UNC_M2M_TxC_AD_CREDITS_ACQUIRED",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "AD Egress (to CMS) Credits Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0xE",
"EventName": "UNC_M2M_TxC_AD_CREDIT_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "AD Egress (to CMS) Full",
+ "Counter": "0,1,2,3",
"EventCode": "0xC",
"EventName": "UNC_M2M_TxC_AD_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "AD Egress (to CMS) Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0xB",
"EventName": "UNC_M2M_TxC_AD_CYCLES_NE",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "AD Egress (to CMS) Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_M2M_TxC_AD_INSERTS",
"PerPkg": "1",
@@ -2707,20 +3305,25 @@
},
{
"BriefDescription": "Cycles with No AD Egress (to CMS) Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0xF",
"EventName": "UNC_M2M_TxC_AD_NO_CREDIT_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles Stalled with No AD Egress (to CMS) Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M2M_TxC_AD_NO_CREDIT_STALLED",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "AD Egress (to CMS) Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0xA",
"EventName": "UNC_M2M_TxC_AD_OCCUPANCY",
"PerPkg": "1",
@@ -2728,430 +3331,537 @@
},
{
"BriefDescription": "Outbound Ring Transactions on AK; CRD Transactions to Cbo",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_M2M_TxC_AK.CRD_CBO",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Outbound Ring Transactions on AK; NDR Transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_M2M_TxC_AK.NDR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Credit Acquired; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_M2M_TxC_AK_CREDITS_ACQUIRED.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Credit Acquired; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_M2M_TxC_AK_CREDITS_ACQUIRED.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Credits Occupancy; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_M2M_TxC_AK_CREDIT_OCCUPANCY.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Credits Occupancy; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_M2M_TxC_AK_CREDIT_OCCUPANCY.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Full; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Full; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Full; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Full; Read Credit Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.RDCRD0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Full; Read Credit Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.RDCRD1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x88",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Full; Write Compare Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCMP0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Full; Write Compare Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCMP1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa0",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Full; Write Credit Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCRD0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Full; Write Credit Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCRD1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x90",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Not Empty; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_M2M_TxC_AK_CYCLES_NE.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Not Empty; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_M2M_TxC_AK_CYCLES_NE.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Not Empty; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_M2M_TxC_AK_CYCLES_NE.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Not Empty; Read Credit Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_M2M_TxC_AK_CYCLES_NE.RDCRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Not Empty; Write Compare Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_M2M_TxC_AK_CYCLES_NE.WRCMP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Not Empty; Write Credit Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_M2M_TxC_AK_CYCLES_NE.WRCRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Allocations; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2M_TxC_AK_INSERTS.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Allocations; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2M_TxC_AK_INSERTS.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Allocations; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2M_TxC_AK_INSERTS.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Allocations; Prefetch Read Cam Hit",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2M_TxC_AK_INSERTS.PREF_RD_CAM_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Allocations; Read Credit Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2M_TxC_AK_INSERTS.RDCRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Allocations; Write Compare Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2M_TxC_AK_INSERTS.WRCMP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Allocations; Write Credit Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2M_TxC_AK_INSERTS.WRCRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles with No AK Egress (to CMS) Credits; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_M2M_TxC_AK_NO_CREDIT_CYCLES.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles with No AK Egress (to CMS) Credits; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_M2M_TxC_AK_NO_CREDIT_CYCLES.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles Stalled with No AK Egress (to CMS) Credits; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M2M_TxC_AK_NO_CREDIT_STALLED.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles Stalled with No AK Egress (to CMS) Credits; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M2M_TxC_AK_NO_CREDIT_STALLED.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Occupancy; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_M2M_TxC_AK_OCCUPANCY.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Occupancy; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_M2M_TxC_AK_OCCUPANCY.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Occupancy; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_M2M_TxC_AK_OCCUPANCY.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Occupancy; Read Credit Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_M2M_TxC_AK_OCCUPANCY.RDCRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Occupancy; Write Compare Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_M2M_TxC_AK_OCCUPANCY.WRCMP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Occupancy; Write Credit Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_M2M_TxC_AK_OCCUPANCY.WRCRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Sideband",
+ "Counter": "0,1,2,3",
"EventCode": "0x6B",
"EventName": "UNC_M2M_TxC_AK_SIDEBAND.RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Sideband",
+ "Counter": "0,1,2,3",
"EventCode": "0x6B",
"EventName": "UNC_M2M_TxC_AK_SIDEBAND.WR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2M_TxC_BL.DRS_CACHE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Core",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2M_TxC_BL.DRS_CORE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to QPI",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2M_TxC_BL.DRS_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Credit Acquired; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2M_TxC_BL_CREDITS_ACQUIRED.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Credit Acquired; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2M_TxC_BL_CREDITS_ACQUIRED.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Credits Occupancy; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_M2M_TxC_BL_CREDIT_OCCUPANCY.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Credits Occupancy; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_M2M_TxC_BL_CREDIT_OCCUPANCY.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Full; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M2M_TxC_BL_CYCLES_FULL.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Full; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M2M_TxC_BL_CYCLES_FULL.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Full; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M2M_TxC_BL_CYCLES_FULL.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Not Empty; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M2M_TxC_BL_CYCLES_NE.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Not Empty; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M2M_TxC_BL_CYCLES_NE.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Not Empty; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M2M_TxC_BL_CYCLES_NE.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Allocations; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_M2M_TxC_BL_INSERTS.ALL",
"PerPkg": "1",
@@ -3160,54 +3870,67 @@
},
{
"BriefDescription": "BL Egress (to CMS) Allocations; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_M2M_TxC_BL_INSERTS.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Allocations; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_M2M_TxC_BL_INSERTS.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles with No BL Egress (to CMS) Credits; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_M2M_TxC_BL_NO_CREDIT_CYCLES.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles with No BL Egress (to CMS) Credits; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_M2M_TxC_BL_NO_CREDIT_CYCLES.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles Stalled with No BL Egress (to CMS) Credits; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_M2M_TxC_BL_NO_CREDIT_STALLED.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles Stalled with No BL Egress (to CMS) Credits; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_M2M_TxC_BL_NO_CREDIT_STALLED.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Occupancy; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_M2M_TxC_BL_OCCUPANCY.ALL",
"PerPkg": "1",
@@ -3216,24 +3939,30 @@
},
{
"BriefDescription": "BL Egress (to CMS) Occupancy; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_M2M_TxC_BL_OCCUPANCY.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Occupancy; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_M2M_TxC_BL_OCCUPANCY.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "CMS Horizontal ADS Used; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_M2M_TxR_HORZ_ADS_USED.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -3241,8 +3970,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_M2M_TxR_HORZ_ADS_USED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -3250,8 +3981,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_M2M_TxR_HORZ_ADS_USED.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -3259,8 +3992,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_M2M_TxR_HORZ_ADS_USED.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -3268,8 +4003,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_M2M_TxR_HORZ_ADS_USED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -3277,8 +4014,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9F",
"EventName": "UNC_M2M_TxR_HORZ_BYPASS.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -3286,8 +4025,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x9F",
"EventName": "UNC_M2M_TxR_HORZ_BYPASS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -3295,8 +4036,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9F",
"EventName": "UNC_M2M_TxR_HORZ_BYPASS.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -3304,8 +4047,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9F",
"EventName": "UNC_M2M_TxR_HORZ_BYPASS.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -3313,8 +4058,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x9F",
"EventName": "UNC_M2M_TxR_HORZ_BYPASS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -3322,8 +4069,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9F",
"EventName": "UNC_M2M_TxR_HORZ_BYPASS.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x8",
@@ -3331,8 +4080,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -3340,8 +4091,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -3349,8 +4102,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -3358,8 +4113,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -3367,8 +4124,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -3376,8 +4135,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -3385,8 +4146,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -3394,8 +4157,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -3403,8 +4168,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -3412,8 +4179,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -3421,8 +4190,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -3430,8 +4201,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -3439,8 +4212,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_M2M_TxR_HORZ_INSERTS.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -3448,8 +4223,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_M2M_TxR_HORZ_INSERTS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -3457,8 +4234,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_M2M_TxR_HORZ_INSERTS.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -3466,8 +4245,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_M2M_TxR_HORZ_INSERTS.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -3475,8 +4256,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_M2M_TxR_HORZ_INSERTS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -3484,8 +4267,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_M2M_TxR_HORZ_INSERTS.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -3493,8 +4278,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_M2M_TxR_HORZ_NACK.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x1",
@@ -3502,8 +4289,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_M2M_TxR_HORZ_NACK.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x20",
@@ -3511,8 +4300,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_M2M_TxR_HORZ_NACK.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x2",
@@ -3520,8 +4311,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_M2M_TxR_HORZ_NACK.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x4",
@@ -3529,8 +4322,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_M2M_TxR_HORZ_NACK.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x40",
@@ -3538,8 +4333,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_M2M_TxR_HORZ_NACK.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x8",
@@ -3547,8 +4344,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -3556,8 +4355,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -3565,8 +4366,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -3574,8 +4377,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -3583,8 +4388,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -3592,8 +4399,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -3601,8 +4410,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9B",
"EventName": "UNC_M2M_TxR_HORZ_STARVED.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x1",
@@ -3610,8 +4421,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9B",
"EventName": "UNC_M2M_TxR_HORZ_STARVED.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x2",
@@ -3619,8 +4432,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9B",
"EventName": "UNC_M2M_TxR_HORZ_STARVED.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x4",
@@ -3628,8 +4443,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9B",
"EventName": "UNC_M2M_TxR_HORZ_STARVED.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x8",
@@ -3637,8 +4454,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_M2M_TxR_VERT_ADS_USED.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -3646,8 +4465,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_M2M_TxR_VERT_ADS_USED.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -3655,8 +4476,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_M2M_TxR_VERT_ADS_USED.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -3664,8 +4487,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_M2M_TxR_VERT_ADS_USED.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x20",
@@ -3673,8 +4498,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_M2M_TxR_VERT_ADS_USED.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -3682,8 +4509,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_M2M_TxR_VERT_ADS_USED.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -3691,8 +4520,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_M2M_TxR_VERT_BYPASS.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -3700,8 +4531,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_M2M_TxR_VERT_BYPASS.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -3709,8 +4542,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_M2M_TxR_VERT_BYPASS.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -3718,8 +4553,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_M2M_TxR_VERT_BYPASS.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x20",
@@ -3727,8 +4564,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_M2M_TxR_VERT_BYPASS.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -3736,8 +4575,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_M2M_TxR_VERT_BYPASS.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -3745,8 +4586,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_M2M_TxR_VERT_BYPASS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x8",
@@ -3754,8 +4597,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -3763,8 +4608,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -3772,8 +4619,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -3781,8 +4630,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -3790,8 +4641,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -3799,8 +4652,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -3808,8 +4663,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -3817,8 +4674,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -3826,8 +4685,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -3835,8 +4696,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -3844,8 +4707,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -3853,8 +4718,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -3862,8 +4729,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -3871,8 +4740,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -3880,8 +4751,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_M2M_TxR_VERT_INSERTS.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -3889,8 +4762,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_M2M_TxR_VERT_INSERTS.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -3898,8 +4773,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_M2M_TxR_VERT_INSERTS.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -3907,8 +4784,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_M2M_TxR_VERT_INSERTS.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -3916,8 +4795,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_M2M_TxR_VERT_INSERTS.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -3925,8 +4806,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_M2M_TxR_VERT_INSERTS.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -3934,8 +4817,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_M2M_TxR_VERT_INSERTS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -3943,8 +4828,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2M_TxR_VERT_NACK.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x1",
@@ -3952,8 +4839,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2M_TxR_VERT_NACK.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x10",
@@ -3961,8 +4850,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2M_TxR_VERT_NACK.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x2",
@@ -3970,8 +4861,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2M_TxR_VERT_NACK.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x20",
@@ -3979,8 +4872,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2M_TxR_VERT_NACK.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x4",
@@ -3988,8 +4883,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2M_TxR_VERT_NACK.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x40",
@@ -3997,8 +4894,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2M_TxR_VERT_NACK.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x8",
@@ -4006,8 +4905,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -4015,8 +4916,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -4024,8 +4927,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -4033,8 +4938,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -4042,8 +4949,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -4051,8 +4960,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -4060,8 +4971,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -4069,8 +4982,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_M2M_TxR_VERT_STARVED.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x1",
@@ -4078,8 +4993,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_M2M_TxR_VERT_STARVED.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x10",
@@ -4087,8 +5004,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_M2M_TxR_VERT_STARVED.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x2",
@@ -4096,8 +5015,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_M2M_TxR_VERT_STARVED.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x20",
@@ -4105,8 +5026,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_M2M_TxR_VERT_STARVED.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x4",
@@ -4114,8 +5037,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_M2M_TxR_VERT_STARVED.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x40",
@@ -4123,8 +5048,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_M2M_TxR_VERT_STARVED.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x8",
@@ -4132,8 +5059,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use; Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_M2M_VERT_RING_AD_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -4141,8 +5070,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use; Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_M2M_VERT_RING_AD_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -4150,8 +5081,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use; Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_M2M_VERT_RING_AD_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -4159,8 +5092,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use; Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_M2M_VERT_RING_AD_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -4168,8 +5103,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use; Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xA8",
"EventName": "UNC_M2M_VERT_RING_AK_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -4177,8 +5114,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use; Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xA8",
"EventName": "UNC_M2M_VERT_RING_AK_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -4186,8 +5125,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use; Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xA8",
"EventName": "UNC_M2M_VERT_RING_AK_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -4195,8 +5136,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use; Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xA8",
"EventName": "UNC_M2M_VERT_RING_AK_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -4204,8 +5147,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use; Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_M2M_VERT_RING_BL_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -4213,8 +5158,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use; Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_M2M_VERT_RING_BL_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -4222,8 +5169,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use; Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_M2M_VERT_RING_BL_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -4231,8 +5180,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use; Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_M2M_VERT_RING_BL_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -4240,8 +5191,10 @@
},
{
"BriefDescription": "Vertical IV Ring in Use; Down",
+ "Counter": "0,1,2,3",
"EventCode": "0xAC",
"EventName": "UNC_M2M_VERT_RING_IV_IN_USE.DN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x4",
@@ -4249,8 +5202,10 @@
},
{
"BriefDescription": "Vertical IV Ring in Use; Up",
+ "Counter": "0,1,2,3",
"EventCode": "0xAC",
"EventName": "UNC_M2M_VERT_RING_IV_IN_USE.UP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x1",
@@ -4258,179 +5213,223 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_WPQ_CYCLES_REG_CREDITS.CHN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x4D",
"EventName": "UNC_M2M_WPQ_CYCLES_NO_REG_CREDITS.CHN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_WPQ_CYCLES_REG_CREDITS.CHN1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x4D",
"EventName": "UNC_M2M_WPQ_CYCLES_NO_REG_CREDITS.CHN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_WPQ_CYCLES_REG_CREDITS.CHN2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x4D",
"EventName": "UNC_M2M_WPQ_CYCLES_NO_REG_CREDITS.CHN2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x4D",
"EventName": "UNC_M2M_WPQ_CYCLES_REG_CREDITS.CHN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x4D",
"EventName": "UNC_M2M_WPQ_CYCLES_REG_CREDITS.CHN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x4D",
"EventName": "UNC_M2M_WPQ_CYCLES_REG_CREDITS.CHN2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Special; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x4E",
"EventName": "UNC_M2M_WPQ_CYCLES_SPEC_CREDITS.CHN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Special; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x4E",
"EventName": "UNC_M2M_WPQ_CYCLES_SPEC_CREDITS.CHN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Special; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x4E",
"EventName": "UNC_M2M_WPQ_CYCLES_SPEC_CREDITS.CHN2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Cycles Full; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x4A",
"EventName": "UNC_M2M_WRITE_TRACKER_CYCLES_FULL.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Cycles Full; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x4A",
"EventName": "UNC_M2M_WRITE_TRACKER_CYCLES_FULL.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Cycles Full; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x4A",
"EventName": "UNC_M2M_WRITE_TRACKER_CYCLES_FULL.CH2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Cycles Not Empty; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_M2M_WRITE_TRACKER_CYCLES_NE.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Cycles Not Empty; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_M2M_WRITE_TRACKER_CYCLES_NE.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Cycles Not Empty; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_M2M_WRITE_TRACKER_CYCLES_NE.CH2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Inserts; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_M2M_WRITE_TRACKER_INSERTS.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Inserts; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_M2M_WRITE_TRACKER_INSERTS.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Inserts; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_M2M_WRITE_TRACKER_INSERTS.CH2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Occupancy; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_M2M_WRITE_TRACKER_OCCUPANCY.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Occupancy; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_M2M_WRITE_TRACKER_OCCUPANCY.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Occupancy; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_M2M_WRITE_TRACKER_OCCUPANCY.CH2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2",
"EventCode": "0x80",
"EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -4438,8 +5437,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2",
"EventCode": "0x80",
"EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -4447,8 +5448,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2",
"EventCode": "0x80",
"EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -4456,8 +5459,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2",
"EventCode": "0x80",
"EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -4465,8 +5470,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2",
"EventCode": "0x80",
"EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -4474,8 +5481,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2",
"EventCode": "0x80",
"EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -4483,8 +5492,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2",
"EventCode": "0x82",
"EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -4492,8 +5503,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2",
"EventCode": "0x82",
"EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -4501,8 +5514,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2",
"EventCode": "0x82",
"EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -4510,8 +5525,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2",
"EventCode": "0x82",
"EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -4519,8 +5536,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2",
"EventCode": "0x82",
"EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -4528,8 +5547,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2",
"EventCode": "0x82",
"EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -4537,8 +5558,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2",
"EventCode": "0x88",
"EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -4546,8 +5569,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2",
"EventCode": "0x88",
"EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -4555,8 +5580,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2",
"EventCode": "0x88",
"EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -4564,8 +5591,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2",
"EventCode": "0x88",
"EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -4573,8 +5602,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2",
"EventCode": "0x88",
"EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -4582,8 +5613,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2",
"EventCode": "0x88",
"EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -4591,8 +5624,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2",
"EventCode": "0x8A",
"EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -4600,8 +5635,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2",
"EventCode": "0x8A",
"EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -4609,8 +5646,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2",
"EventCode": "0x8A",
"EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -4618,8 +5657,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2",
"EventCode": "0x8A",
"EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -4627,8 +5668,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2",
"EventCode": "0x8A",
"EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -4636,8 +5679,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2",
"EventCode": "0x8A",
"EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -4645,8 +5690,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2",
"EventCode": "0x84",
"EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -4654,8 +5701,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2",
"EventCode": "0x84",
"EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -4663,8 +5712,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2",
"EventCode": "0x84",
"EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -4672,8 +5723,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2",
"EventCode": "0x84",
"EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -4681,8 +5734,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2",
"EventCode": "0x84",
"EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -4690,8 +5745,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2",
"EventCode": "0x84",
"EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -4699,8 +5756,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2",
"EventCode": "0x86",
"EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -4708,8 +5767,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2",
"EventCode": "0x86",
"EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -4717,8 +5778,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2",
"EventCode": "0x86",
"EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -4726,8 +5789,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2",
"EventCode": "0x86",
"EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -4735,8 +5800,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2",
"EventCode": "0x86",
"EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -4744,8 +5811,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2",
"EventCode": "0x86",
"EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -4753,8 +5822,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 0",
+ "Counter": "0",
"EventCode": "0x8E",
"EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -4762,8 +5833,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 1",
+ "Counter": "0",
"EventCode": "0x8E",
"EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -4771,8 +5844,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 2",
+ "Counter": "0",
"EventCode": "0x8E",
"EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -4780,8 +5855,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 3",
+ "Counter": "0",
"EventCode": "0x8E",
"EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -4789,8 +5866,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 4",
+ "Counter": "0",
"EventCode": "0x8E",
"EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -4798,8 +5877,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 5",
+ "Counter": "0",
"EventCode": "0x8E",
"EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -4807,8 +5888,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2",
"EventCode": "0x8C",
"EventName": "UNC_M3UPI_AG1_BL_CREDITS_ACQUIRED.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -4816,8 +5899,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2",
"EventCode": "0x8C",
"EventName": "UNC_M3UPI_AG1_BL_CREDITS_ACQUIRED.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -4825,8 +5910,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2",
"EventCode": "0x8C",
"EventName": "UNC_M3UPI_AG1_BL_CREDITS_ACQUIRED.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -4834,8 +5921,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2",
"EventCode": "0x8C",
"EventName": "UNC_M3UPI_AG1_BL_CREDITS_ACQUIRED.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -4843,8 +5932,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2",
"EventCode": "0x8C",
"EventName": "UNC_M3UPI_AG1_BL_CREDITS_ACQUIRED.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -4852,8 +5943,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2",
"EventCode": "0x8C",
"EventName": "UNC_M3UPI_AG1_BL_CREDITS_ACQUIRED.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -4861,8 +5954,10 @@
},
{
"BriefDescription": "CBox AD Credits Empty; Requests",
+ "Counter": "0,1,2",
"EventCode": "0x22",
"EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)",
"UMask": "0x4",
@@ -4870,8 +5965,10 @@
},
{
"BriefDescription": "CBox AD Credits Empty; Snoops",
+ "Counter": "0,1,2",
"EventCode": "0x22",
"EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)",
"UMask": "0x8",
@@ -4879,8 +5976,10 @@
},
{
"BriefDescription": "CBox AD Credits Empty; VNA Messages",
+ "Counter": "0,1,2",
"EventCode": "0x22",
"EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.VNA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)",
"UMask": "0x1",
@@ -4888,8 +5987,10 @@
},
{
"BriefDescription": "CBox AD Credits Empty; Writebacks",
+ "Counter": "0,1,2",
"EventCode": "0x22",
"EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)",
"UMask": "0x2",
@@ -4897,39 +5998,49 @@
},
{
"BriefDescription": "Number of uclks in domain",
+ "Counter": "0,1,2",
"EventCode": "0x1",
"EventName": "UNC_M3UPI_CLOCKTICKS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of uclks in the M3 uclk domain. This could be slightly different than the count in the Ubox because of enable/freeze delays. However, because the M3 is close to the Ubox, they generally should not diverge by more than a handful of cycles.",
"Unit": "M3UPI"
},
{
"BriefDescription": "CMS Clockticks",
+ "Counter": "0,1,2",
"EventCode": "0xC0",
"EventName": "UNC_M3UPI_CMS_CLOCKTICKS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M3UPI"
},
{
"BriefDescription": "D2C Sent",
+ "Counter": "0,1,2",
"EventCode": "0x2B",
"EventName": "UNC_M3UPI_D2C_SENT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases BL sends direct to core",
"Unit": "M3UPI"
},
{
"BriefDescription": "D2U Sent",
+ "Counter": "0,1,2",
"EventCode": "0x2A",
"EventName": "UNC_M3UPI_D2U_SENT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cases where SMI3 sends D2U command",
"Unit": "M3UPI"
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements; Down",
+ "Counter": "0,1,2",
"EventCode": "0xAE",
"EventName": "UNC_M3UPI_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x4",
@@ -4937,8 +6048,10 @@
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements; Up",
+ "Counter": "0,1,2",
"EventCode": "0xAE",
"EventName": "UNC_M3UPI_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x1",
@@ -4946,8 +6059,10 @@
},
{
"BriefDescription": "FaST wire asserted; Horizontal",
+ "Counter": "0,1,2",
"EventCode": "0xA5",
"EventName": "UNC_M3UPI_FAST_ASSERTED.HORZ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles either the local or incoming distress signals are asserted. Incoming distress includes up, dn and across.",
"UMask": "0x2",
@@ -4955,8 +6070,10 @@
},
{
"BriefDescription": "FaST wire asserted; Vertical",
+ "Counter": "0,1,2",
"EventCode": "0xA5",
"EventName": "UNC_M3UPI_FAST_ASSERTED.VERT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles either the local or incoming distress signals are asserted. Incoming distress includes up, dn and across.",
"UMask": "0x1",
@@ -4964,8 +6081,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use; Left and Even",
+ "Counter": "0,1,2",
"EventCode": "0xA7",
"EventName": "UNC_M3UPI_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -4973,8 +6092,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use; Left and Odd",
+ "Counter": "0,1,2",
"EventCode": "0xA7",
"EventName": "UNC_M3UPI_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -4982,8 +6103,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use; Right and Even",
+ "Counter": "0,1,2",
"EventCode": "0xA7",
"EventName": "UNC_M3UPI_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -4991,8 +6114,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use; Right and Odd",
+ "Counter": "0,1,2",
"EventCode": "0xA7",
"EventName": "UNC_M3UPI_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -5000,8 +6125,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use; Left and Even",
+ "Counter": "0,1,2",
"EventCode": "0xA9",
"EventName": "UNC_M3UPI_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -5009,8 +6136,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use; Left and Odd",
+ "Counter": "0,1,2",
"EventCode": "0xA9",
"EventName": "UNC_M3UPI_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -5018,8 +6147,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use; Right and Even",
+ "Counter": "0,1,2",
"EventCode": "0xA9",
"EventName": "UNC_M3UPI_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -5027,8 +6158,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use; Right and Odd",
+ "Counter": "0,1,2",
"EventCode": "0xA9",
"EventName": "UNC_M3UPI_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -5036,8 +6169,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use; Left and Even",
+ "Counter": "0,1,2",
"EventCode": "0xAB",
"EventName": "UNC_M3UPI_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -5045,8 +6180,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use; Left and Odd",
+ "Counter": "0,1,2",
"EventCode": "0xAB",
"EventName": "UNC_M3UPI_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -5054,8 +6191,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use; Right and Even",
+ "Counter": "0,1,2",
"EventCode": "0xAB",
"EventName": "UNC_M3UPI_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -5063,8 +6202,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use; Right and Odd",
+ "Counter": "0,1,2",
"EventCode": "0xAB",
"EventName": "UNC_M3UPI_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -5072,8 +6213,10 @@
},
{
"BriefDescription": "Horizontal IV Ring in Use; Left",
+ "Counter": "0,1,2",
"EventCode": "0xAD",
"EventName": "UNC_M3UPI_HORZ_RING_IV_IN_USE.LEFT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x1",
@@ -5081,8 +6224,10 @@
},
{
"BriefDescription": "Horizontal IV Ring in Use; Right",
+ "Counter": "0,1,2",
"EventCode": "0xAD",
"EventName": "UNC_M3UPI_HORZ_RING_IV_IN_USE.RIGHT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x4",
@@ -5090,8 +6235,10 @@
},
{
"BriefDescription": "M2 BL Credits Empty; IIO0 and IIO1 share the same ring destination. (1 VN0 credit only)",
+ "Counter": "0,1,2",
"EventCode": "0x23",
"EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO0_IIO1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No vn0 and vna credits available to send to M2",
"UMask": "0x1",
@@ -5099,8 +6246,10 @@
},
{
"BriefDescription": "M2 BL Credits Empty; IIO2",
+ "Counter": "0,1,2",
"EventCode": "0x23",
"EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO2_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No vn0 and vna credits available to send to M2",
"UMask": "0x2",
@@ -5108,8 +6257,10 @@
},
{
"BriefDescription": "M2 BL Credits Empty; IIO3",
+ "Counter": "0,1,2",
"EventCode": "0x23",
"EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO3_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No vn0 and vna credits available to send to M2",
"UMask": "0x4",
@@ -5117,8 +6268,10 @@
},
{
"BriefDescription": "M2 BL Credits Empty; IIO4",
+ "Counter": "0,1,2",
"EventCode": "0x23",
"EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO4_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No vn0 and vna credits available to send to M2",
"UMask": "0x8",
@@ -5126,8 +6279,10 @@
},
{
"BriefDescription": "M2 BL Credits Empty; IIO5",
+ "Counter": "0,1,2",
"EventCode": "0x23",
"EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO5_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No vn0 and vna credits available to send to M2",
"UMask": "0x10",
@@ -5135,8 +6290,10 @@
},
{
"BriefDescription": "M2 BL Credits Empty; All IIO targets for NCS are in single mask. ORs them together",
+ "Counter": "0,1,2",
"EventCode": "0x23",
"EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No vn0 and vna credits available to send to M2",
"UMask": "0x20",
@@ -5144,8 +6301,10 @@
},
{
"BriefDescription": "M2 BL Credits Empty; Selected M2p BL NCS credits",
+ "Counter": "0,1,2",
"EventCode": "0x23",
"EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.NCS_SEL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No vn0 and vna credits available to send to M2",
"UMask": "0x40",
@@ -5153,8 +6312,10 @@
},
{
"BriefDescription": "Multi Slot Flit Received; AD - Slot 0",
+ "Counter": "0,1,2",
"EventCode": "0x3E",
"EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AD_SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
"UMask": "0x1",
@@ -5162,8 +6323,10 @@
},
{
"BriefDescription": "Multi Slot Flit Received; AD - Slot 1",
+ "Counter": "0,1,2",
"EventCode": "0x3E",
"EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AD_SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
"UMask": "0x2",
@@ -5171,8 +6334,10 @@
},
{
"BriefDescription": "Multi Slot Flit Received; AD - Slot 2",
+ "Counter": "0,1,2",
"EventCode": "0x3E",
"EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AD_SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
"UMask": "0x4",
@@ -5180,8 +6345,10 @@
},
{
"BriefDescription": "Multi Slot Flit Received; AK - Slot 0",
+ "Counter": "0,1,2",
"EventCode": "0x3E",
"EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AK_SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
"UMask": "0x10",
@@ -5189,8 +6356,10 @@
},
{
"BriefDescription": "Multi Slot Flit Received; AK - Slot 2",
+ "Counter": "0,1,2",
"EventCode": "0x3E",
"EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AK_SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
"UMask": "0x20",
@@ -5198,8 +6367,10 @@
},
{
"BriefDescription": "Multi Slot Flit Received; BL - Slot 0",
+ "Counter": "0,1,2",
"EventCode": "0x3E",
"EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.BL_SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
"UMask": "0x8",
@@ -5207,8 +6378,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring.; AD",
+ "Counter": "0,1,2",
"EventCode": "0xA1",
"EventName": "UNC_M3UPI_RING_BOUNCES_HORZ.AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x1",
@@ -5216,8 +6389,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring.; AK",
+ "Counter": "0,1,2",
"EventCode": "0xA1",
"EventName": "UNC_M3UPI_RING_BOUNCES_HORZ.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x2",
@@ -5225,8 +6400,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring.; BL",
+ "Counter": "0,1,2",
"EventCode": "0xA1",
"EventName": "UNC_M3UPI_RING_BOUNCES_HORZ.BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x4",
@@ -5234,8 +6411,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring.; IV",
+ "Counter": "0,1,2",
"EventCode": "0xA1",
"EventName": "UNC_M3UPI_RING_BOUNCES_HORZ.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x8",
@@ -5243,8 +6422,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring.; AD",
+ "Counter": "0,1,2",
"EventCode": "0xA0",
"EventName": "UNC_M3UPI_RING_BOUNCES_VERT.AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x1",
@@ -5252,8 +6433,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring.; Acknowledgements to core",
+ "Counter": "0,1,2",
"EventCode": "0xA0",
"EventName": "UNC_M3UPI_RING_BOUNCES_VERT.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x2",
@@ -5261,8 +6444,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring.; Data Responses to core",
+ "Counter": "0,1,2",
"EventCode": "0xA0",
"EventName": "UNC_M3UPI_RING_BOUNCES_VERT.BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x4",
@@ -5270,8 +6455,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring.; Snoops of processor's cache.",
+ "Counter": "0,1,2",
"EventCode": "0xA0",
"EventName": "UNC_M3UPI_RING_BOUNCES_VERT.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x8",
@@ -5279,87 +6466,109 @@
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring; AD",
+ "Counter": "0,1,2",
"EventCode": "0xA3",
"EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.AD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M3UPI"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring; AK",
+ "Counter": "0,1,2",
"EventCode": "0xA3",
"EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M3UPI"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring; Acknowledgements to Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0xA3",
"EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M3UPI"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring; BL",
+ "Counter": "0,1,2",
"EventCode": "0xA3",
"EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M3UPI"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring; IV",
+ "Counter": "0,1,2",
"EventCode": "0xA3",
"EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.IV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M3UPI"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring; AD",
+ "Counter": "0,1,2",
"EventCode": "0xA2",
"EventName": "UNC_M3UPI_RING_SINK_STARVED_VERT.AD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M3UPI"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring; Acknowledgements to core",
+ "Counter": "0,1,2",
"EventCode": "0xA2",
"EventName": "UNC_M3UPI_RING_SINK_STARVED_VERT.AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M3UPI"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring; Data Responses to core",
+ "Counter": "0,1,2",
"EventCode": "0xA2",
"EventName": "UNC_M3UPI_RING_SINK_STARVED_VERT.BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M3UPI"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring; Snoops of processor's cache.",
+ "Counter": "0,1,2",
"EventCode": "0xA2",
"EventName": "UNC_M3UPI_RING_SINK_STARVED_VERT.IV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M3UPI"
},
{
"BriefDescription": "Source Throttle",
+ "Counter": "0,1,2",
"EventCode": "0xA4",
"EventName": "UNC_M3UPI_RING_SRC_THRTL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M3UPI"
},
{
"BriefDescription": "Lost Arb for VN0; REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4B",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message requested but lost arbitration; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -5367,8 +6576,10 @@
},
{
"BriefDescription": "Lost Arb for VN0; RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4B",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message requested but lost arbitration; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -5376,8 +6587,10 @@
},
{
"BriefDescription": "Lost Arb for VN0; SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4B",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message requested but lost arbitration; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -5385,8 +6598,10 @@
},
{
"BriefDescription": "Lost Arb for VN0; NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4B",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message requested but lost arbitration; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -5394,8 +6609,10 @@
},
{
"BriefDescription": "Lost Arb for VN0; NCS on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4B",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message requested but lost arbitration; Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -5403,8 +6620,10 @@
},
{
"BriefDescription": "Lost Arb for VN0; RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4B",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message requested but lost arbitration; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -5412,8 +6631,10 @@
},
{
"BriefDescription": "Lost Arb for VN0; WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4B",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message requested but lost arbitration; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -5421,8 +6642,10 @@
},
{
"BriefDescription": "Lost Arb for VN1; REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4C",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message requested but lost arbitration; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -5430,8 +6653,10 @@
},
{
"BriefDescription": "Lost Arb for VN1; RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4C",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message requested but lost arbitration; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -5439,8 +6664,10 @@
},
{
"BriefDescription": "Lost Arb for VN1; SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4C",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message requested but lost arbitration; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -5448,8 +6675,10 @@
},
{
"BriefDescription": "Lost Arb for VN1; NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4C",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message requested but lost arbitration; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -5457,8 +6686,10 @@
},
{
"BriefDescription": "Lost Arb for VN1; NCS on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4C",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message requested but lost arbitration; Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -5466,8 +6697,10 @@
},
{
"BriefDescription": "Lost Arb for VN1; RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4C",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message requested but lost arbitration; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -5475,8 +6708,10 @@
},
{
"BriefDescription": "Lost Arb for VN1; WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4C",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message requested but lost arbitration; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -5484,8 +6719,10 @@
},
{
"BriefDescription": "Arb Miscellaneous; AD, BL Parallel Win",
+ "Counter": "0,1,2",
"EventCode": "0x4D",
"EventName": "UNC_M3UPI_RxC_ARB_MISC.ADBL_PARALLEL_WIN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD and BL messages won arbitration concurrently / in parallel",
"UMask": "0x40",
@@ -5493,8 +6730,10 @@
},
{
"BriefDescription": "Arb Miscellaneous; No Progress on Pending AD VN0",
+ "Counter": "0,1,2",
"EventCode": "0x4D",
"EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_AD_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Arbitration stage made no progress on pending ad vn0 messages because slotting stage cannot accept new message",
"UMask": "0x4",
@@ -5502,8 +6741,10 @@
},
{
"BriefDescription": "Arb Miscellaneous; No Progress on Pending AD VN1",
+ "Counter": "0,1,2",
"EventCode": "0x4D",
"EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_AD_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Arbitration stage made no progress on pending ad vn1 messages because slotting stage cannot accept new message",
"UMask": "0x8",
@@ -5511,8 +6752,10 @@
},
{
"BriefDescription": "Arb Miscellaneous; No Progress on Pending BL VN0",
+ "Counter": "0,1,2",
"EventCode": "0x4D",
"EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_BL_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Arbitration stage made no progress on pending bl vn0 messages because slotting stage cannot accept new message",
"UMask": "0x10",
@@ -5520,8 +6763,10 @@
},
{
"BriefDescription": "Arb Miscellaneous; No Progress on Pending BL VN1",
+ "Counter": "0,1,2",
"EventCode": "0x4D",
"EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_BL_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Arbitration stage made no progress on pending bl vn1 messages because slotting stage cannot accept new message",
"UMask": "0x20",
@@ -5529,8 +6774,10 @@
},
{
"BriefDescription": "Arb Miscellaneous; Parallel Bias to VN0",
+ "Counter": "0,1,2",
"EventCode": "0x4D",
"EventName": "UNC_M3UPI_RxC_ARB_MISC.PAR_BIAS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0/VN1 arbiter gave second, consecutive win to vn0, delaying vn1 win, because vn0 offered parallel ad/bl",
"UMask": "0x1",
@@ -5538,8 +6785,10 @@
},
{
"BriefDescription": "Arb Miscellaneous; Parallel Bias to VN1",
+ "Counter": "0,1,2",
"EventCode": "0x4D",
"EventName": "UNC_M3UPI_RxC_ARB_MISC.PAR_BIAS_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0/VN1 arbiter gave second, consecutive win to vn1, delaying vn0 win, because vn1 offered parallel ad/bl",
"UMask": "0x2",
@@ -5547,8 +6796,10 @@
},
{
"BriefDescription": "Can't Arb for VN0; REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x49",
"EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message was not able to request arbitration while some other message won arbitration; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -5556,8 +6807,10 @@
},
{
"BriefDescription": "Can't Arb for VN0; RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x49",
"EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message was not able to request arbitration while some other message won arbitration; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -5565,8 +6818,10 @@
},
{
"BriefDescription": "Can't Arb for VN0; SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x49",
"EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message was not able to request arbitration while some other message won arbitration; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -5574,8 +6829,10 @@
},
{
"BriefDescription": "Can't Arb for VN0; NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x49",
"EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message was not able to request arbitration while some other message won arbitration; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -5583,8 +6840,10 @@
},
{
"BriefDescription": "Can't Arb for VN0; NCS on BL",
+ "Counter": "0,1,2",
"EventCode": "0x49",
"EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message was not able to request arbitration while some other message won arbitration; Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -5592,8 +6851,10 @@
},
{
"BriefDescription": "Can't Arb for VN0; RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x49",
"EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message was not able to request arbitration while some other message won arbitration; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -5601,8 +6862,10 @@
},
{
"BriefDescription": "Can't Arb for VN0; WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x49",
"EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message was not able to request arbitration while some other message won arbitration; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -5610,8 +6873,10 @@
},
{
"BriefDescription": "Can't Arb for VN1; REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4A",
"EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message was not able to request arbitration while some other message won arbitration; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -5619,8 +6884,10 @@
},
{
"BriefDescription": "Can't Arb for VN1; RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4A",
"EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message was not able to request arbitration while some other message won arbitration; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -5628,8 +6895,10 @@
},
{
"BriefDescription": "Can't Arb for VN1; SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4A",
"EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message was not able to request arbitration while some other message won arbitration; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -5637,8 +6906,10 @@
},
{
"BriefDescription": "Can't Arb for VN1; NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4A",
"EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message was not able to request arbitration while some other message won arbitration; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -5646,8 +6917,10 @@
},
{
"BriefDescription": "Can't Arb for VN1; NCS on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4A",
"EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message was not able to request arbitration while some other message won arbitration; Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -5655,8 +6928,10 @@
},
{
"BriefDescription": "Can't Arb for VN1; RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4A",
"EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message was not able to request arbitration while some other message won arbitration; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -5664,8 +6939,10 @@
},
{
"BriefDescription": "Can't Arb for VN1; WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4A",
"EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message was not able to request arbitration while some other message won arbitration; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -5673,8 +6950,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN0; REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x47",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -5682,8 +6961,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN0; RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x47",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -5691,8 +6972,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN0; SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x47",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -5700,8 +6983,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN0; NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x47",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -5709,8 +6994,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN0; NCS on BL",
+ "Counter": "0,1,2",
"EventCode": "0x47",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits; Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -5718,8 +7005,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN0; RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x47",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -5727,8 +7016,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN0; WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x47",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -5736,8 +7027,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN1; REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x48",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -5745,8 +7038,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN1; RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x48",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -5754,8 +7049,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN1; SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x48",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -5763,8 +7060,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN1; NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x48",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -5772,8 +7071,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN1; NCS on BL",
+ "Counter": "0,1,2",
"EventCode": "0x48",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits; Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -5781,8 +7082,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN1; RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x48",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -5790,8 +7093,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN1; WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x48",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -5799,8 +7104,10 @@
},
{
"BriefDescription": "Ingress Queue Bypasses; AD to Slot 0 on BL Arb",
+ "Counter": "0,1,2",
"EventCode": "0x40",
"EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S0_BL_ARB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times message is bypassed around the Ingress Queue; AD is taking bypass to slot 0 of independent flit while bl message is in arbitration",
"UMask": "0x2",
@@ -5808,8 +7115,10 @@
},
{
"BriefDescription": "Ingress Queue Bypasses; AD to Slot 0 on Idle",
+ "Counter": "0,1,2",
"EventCode": "0x40",
"EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S0_IDLE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times message is bypassed around the Ingress Queue; AD is taking bypass to slot 0 of independent flit while pipeline is idle",
"UMask": "0x1",
@@ -5817,8 +7126,10 @@
},
{
"BriefDescription": "Ingress Queue Bypasses; AD + BL to Slot 1",
+ "Counter": "0,1,2",
"EventCode": "0x40",
"EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S1_BL_SLOT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times message is bypassed around the Ingress Queue; AD is taking bypass to flit slot 1 while merging with bl message in same flit",
"UMask": "0x4",
@@ -5826,8 +7137,10 @@
},
{
"BriefDescription": "Ingress Queue Bypasses; AD + BL to Slot 2",
+ "Counter": "0,1,2",
"EventCode": "0x40",
"EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S2_BL_SLOT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times message is bypassed around the Ingress Queue; AD is taking bypass to flit slot 2 while merging with bl message in same flit",
"UMask": "0x8",
@@ -5835,8 +7148,10 @@
},
{
"BriefDescription": "VN0 message lost contest for flit; REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x50",
"EventName": "UNC_M3UPI_RxC_COLLISION_VN0.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress VN0 packets lost the contest for Flit Slot 0.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -5844,8 +7159,10 @@
},
{
"BriefDescription": "VN0 message lost contest for flit; RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x50",
"EventName": "UNC_M3UPI_RxC_COLLISION_VN0.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress VN0 packets lost the contest for Flit Slot 0.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -5853,8 +7170,10 @@
},
{
"BriefDescription": "VN0 message lost contest for flit; SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x50",
"EventName": "UNC_M3UPI_RxC_COLLISION_VN0.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress VN0 packets lost the contest for Flit Slot 0.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -5862,8 +7181,10 @@
},
{
"BriefDescription": "VN0 message lost contest for flit; NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x50",
"EventName": "UNC_M3UPI_RxC_COLLISION_VN0.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress VN0 packets lost the contest for Flit Slot 0.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -5871,8 +7192,10 @@
},
{
"BriefDescription": "VN0 message lost contest for flit; NCS on BL",
+ "Counter": "0,1,2",
"EventCode": "0x50",
"EventName": "UNC_M3UPI_RxC_COLLISION_VN0.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress VN0 packets lost the contest for Flit Slot 0.; Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -5880,8 +7203,10 @@
},
{
"BriefDescription": "VN0 message lost contest for flit; RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x50",
"EventName": "UNC_M3UPI_RxC_COLLISION_VN0.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress VN0 packets lost the contest for Flit Slot 0.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -5889,8 +7214,10 @@
},
{
"BriefDescription": "VN0 message lost contest for flit; WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x50",
"EventName": "UNC_M3UPI_RxC_COLLISION_VN0.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress VN0 packets lost the contest for Flit Slot 0.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -5898,8 +7225,10 @@
},
{
"BriefDescription": "VN1 message lost contest for flit; REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x51",
"EventName": "UNC_M3UPI_RxC_COLLISION_VN1.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress VN1 packets lost the contest for Flit Slot 0.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -5907,8 +7236,10 @@
},
{
"BriefDescription": "VN1 message lost contest for flit; RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x51",
"EventName": "UNC_M3UPI_RxC_COLLISION_VN1.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress VN1 packets lost the contest for Flit Slot 0.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -5916,8 +7247,10 @@
},
{
"BriefDescription": "VN1 message lost contest for flit; SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x51",
"EventName": "UNC_M3UPI_RxC_COLLISION_VN1.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress VN1 packets lost the contest for Flit Slot 0.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -5925,8 +7258,10 @@
},
{
"BriefDescription": "VN1 message lost contest for flit; NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x51",
"EventName": "UNC_M3UPI_RxC_COLLISION_VN1.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress VN1 packets lost the contest for Flit Slot 0.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -5934,8 +7269,10 @@
},
{
"BriefDescription": "VN1 message lost contest for flit; NCS on BL",
+ "Counter": "0,1,2",
"EventCode": "0x51",
"EventName": "UNC_M3UPI_RxC_COLLISION_VN1.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress VN1 packets lost the contest for Flit Slot 0.; Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -5943,8 +7280,10 @@
},
{
"BriefDescription": "VN1 message lost contest for flit; RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x51",
"EventName": "UNC_M3UPI_RxC_COLLISION_VN1.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress VN1 packets lost the contest for Flit Slot 0.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -5952,8 +7291,10 @@
},
{
"BriefDescription": "VN1 message lost contest for flit; WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x51",
"EventName": "UNC_M3UPI_RxC_COLLISION_VN1.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress VN1 packets lost the contest for Flit Slot 0.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -5961,8 +7302,10 @@
},
{
"BriefDescription": "Miscellaneous Credit Events; Any In BGF FIFO",
+ "Counter": "0,1,2",
"EventCode": "0x60",
"EventName": "UNC_M3UPI_RxC_CRD_MISC.ANY_BGF_FIFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Indication that at least one packet (flit) is in the bgf (fifo only)",
"UMask": "0x1",
@@ -5970,8 +7313,10 @@
},
{
"BriefDescription": "Miscellaneous Credit Events; Any in BGF Path",
+ "Counter": "0,1,2",
"EventCode": "0x60",
"EventName": "UNC_M3UPI_RxC_CRD_MISC.ANY_BGF_PATH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Indication that at least one packet (flit) is in the bgf path (i.e. pipe to fifo)",
"UMask": "0x2",
@@ -5979,8 +7324,10 @@
},
{
"BriefDescription": "Miscellaneous Credit Events; No D2K For Arb",
+ "Counter": "0,1,2",
"EventCode": "0x60",
"EventName": "UNC_M3UPI_RxC_CRD_MISC.NO_D2K_FOR_ARB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 or VN1 BL RSP message was blocked from arbitration request due to lack of D2K CMP credits",
"UMask": "0x4",
@@ -5988,8 +7335,10 @@
},
{
"BriefDescription": "Credit Occupancy; D2K Credits",
+ "Counter": "0,1,2",
"EventCode": "0x61",
"EventName": "UNC_M3UPI_RxC_CRD_OCC.D2K_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "D2K completion fifo credit occupancy (credits in use), accumulated across all cycles",
"UMask": "0x10",
@@ -5997,8 +7346,10 @@
},
{
"BriefDescription": "Credit Occupancy; Packets in BGF FIFO",
+ "Counter": "0,1,2",
"EventCode": "0x61",
"EventName": "UNC_M3UPI_RxC_CRD_OCC.FLITS_IN_FIFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy of m3upi ingress -> upi link layer bgf; packets (flits) in fifo",
"UMask": "0x2",
@@ -6006,8 +7357,10 @@
},
{
"BriefDescription": "Credit Occupancy; Packets in BGF Path",
+ "Counter": "0,1,2",
"EventCode": "0x61",
"EventName": "UNC_M3UPI_RxC_CRD_OCC.FLITS_IN_PATH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy of m3upi ingress -> upi link layer bgf; packets (flits) in path (i.e. pipe to fifo or fifo)",
"UMask": "0x4",
@@ -6015,8 +7368,10 @@
},
{
"BriefDescription": "Credit Occupancy",
+ "Counter": "0,1,2",
"EventCode": "0x61",
"EventName": "UNC_M3UPI_RxC_CRD_OCC.P1P_FIFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "count of bl messages in pump-1-pending state, in completion fifo only",
"UMask": "0x40",
@@ -6024,8 +7379,10 @@
},
{
"BriefDescription": "Credit Occupancy",
+ "Counter": "0,1,2",
"EventCode": "0x61",
"EventName": "UNC_M3UPI_RxC_CRD_OCC.P1P_TOTAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "count of bl messages in pump-1-pending state, in marker table and in fifo",
"UMask": "0x20",
@@ -6033,8 +7390,10 @@
},
{
"BriefDescription": "Credit Occupancy; Transmit Credits",
+ "Counter": "0,1,2",
"EventCode": "0x61",
"EventName": "UNC_M3UPI_RxC_CRD_OCC.TxQ_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Link layer transmit queue credit occupancy (credits in use), accumulated across all cycles",
"UMask": "0x8",
@@ -6042,8 +7401,10 @@
},
{
"BriefDescription": "Credit Occupancy; VNA In Use",
+ "Counter": "0,1,2",
"EventCode": "0x61",
"EventName": "UNC_M3UPI_RxC_CRD_OCC.VNA_IN_USE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Remote UPI VNA credit occupancy (number of credits in use), accumulated across all cycles",
"UMask": "0x1",
@@ -6051,8 +7412,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x43",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -6060,8 +7423,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x43",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -6069,8 +7434,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x43",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -6078,8 +7445,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x43",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -6087,8 +7456,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; NCS on BL",
+ "Counter": "0,1,2",
"EventCode": "0x43",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -6096,8 +7467,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x43",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -6105,8 +7478,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x43",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -6114,8 +7489,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x44",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -6123,8 +7500,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x44",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -6132,8 +7511,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x44",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -6141,8 +7522,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x44",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -6150,8 +7533,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; NCS on BL",
+ "Counter": "0,1,2",
"EventCode": "0x44",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -6159,8 +7544,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x44",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -6168,8 +7555,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x44",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -6177,8 +7566,10 @@
},
{
"BriefDescription": "Data Flit Not Sent; All",
+ "Counter": "0,1,2",
"EventCode": "0x57",
"EventName": "UNC_M3UPI_RxC_FLITS_DATA_NOT_SENT.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Data flit is ready for transmission but could not be sent",
"UMask": "0x1",
@@ -6186,8 +7577,10 @@
},
{
"BriefDescription": "Data Flit Not Sent; No BGF Credits",
+ "Counter": "0,1,2",
"EventCode": "0x57",
"EventName": "UNC_M3UPI_RxC_FLITS_DATA_NOT_SENT.NO_BGF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Data flit is ready for transmission but could not be sent",
"UMask": "0x2",
@@ -6195,8 +7588,10 @@
},
{
"BriefDescription": "Data Flit Not Sent; No TxQ Credits",
+ "Counter": "0,1,2",
"EventCode": "0x57",
"EventName": "UNC_M3UPI_RxC_FLITS_DATA_NOT_SENT.NO_TXQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Data flit is ready for transmission but could not be sent",
"UMask": "0x4",
@@ -6204,8 +7599,10 @@
},
{
"BriefDescription": "Generating BL Data Flit Sequence; Wait on Pump 0",
+ "Counter": "0,1,2",
"EventCode": "0x59",
"EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P0_WAIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "generating bl data flit sequence; waiting for data pump 0",
"UMask": "0x1",
@@ -6213,8 +7610,10 @@
},
{
"BriefDescription": "Generating BL Data Flit Sequence",
+ "Counter": "0,1,2",
"EventCode": "0x59",
"EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_AT_LIMIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "pump-1-pending logic is at capacity (pending table plus completion fifo at limit)",
"UMask": "0x10",
@@ -6222,8 +7621,10 @@
},
{
"BriefDescription": "Generating BL Data Flit Sequence",
+ "Counter": "0,1,2",
"EventCode": "0x59",
"EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_BUSY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "pump-1-pending logic is tracking at least one message",
"UMask": "0x8",
@@ -6231,8 +7632,10 @@
},
{
"BriefDescription": "Generating BL Data Flit Sequence",
+ "Counter": "0,1,2",
"EventCode": "0x59",
"EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_FIFO_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "pump-1-pending completion fifo is full",
"UMask": "0x40",
@@ -6240,8 +7643,10 @@
},
{
"BriefDescription": "Generating BL Data Flit Sequence",
+ "Counter": "0,1,2",
"EventCode": "0x59",
"EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_HOLD_P0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "pump-1-pending logic is at or near capacity, such that pump-0-only bl messages are getting stalled in slotting stage",
"UMask": "0x20",
@@ -6249,8 +7654,10 @@
},
{
"BriefDescription": "Generating BL Data Flit Sequence",
+ "Counter": "0,1,2",
"EventCode": "0x59",
"EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_TO_LIMBO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "a bl message finished but is in limbo and moved to pump-1-pending logic",
"UMask": "0x4",
@@ -6258,8 +7665,10 @@
},
{
"BriefDescription": "Generating BL Data Flit Sequence; Wait on Pump 1",
+ "Counter": "0,1,2",
"EventCode": "0x59",
"EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1_WAIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "generating bl data flit sequence; waiting for data pump 1",
"UMask": "0x2",
@@ -6267,15 +7676,19 @@
},
{
"BriefDescription": "UNC_M3UPI_RxC_FLITS_MISC",
+ "Counter": "0,1,2",
"EventCode": "0x5A",
"EventName": "UNC_M3UPI_RxC_FLITS_MISC",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M3UPI"
},
{
"BriefDescription": "Sent Header Flit; One Message",
+ "Counter": "0,1,2",
"EventCode": "0x56",
"EventName": "UNC_M3UPI_RxC_FLITS_SENT.1_MSG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "One message in flit; VNA or non-VNA flit",
"UMask": "0x1",
@@ -6283,8 +7696,10 @@
},
{
"BriefDescription": "Sent Header Flit; One Message in non-VNA",
+ "Counter": "0,1,2",
"EventCode": "0x56",
"EventName": "UNC_M3UPI_RxC_FLITS_SENT.1_MSG_VNX",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "One message in flit; non-VNA flit",
"UMask": "0x8",
@@ -6292,8 +7707,10 @@
},
{
"BriefDescription": "Sent Header Flit; Two Messages",
+ "Counter": "0,1,2",
"EventCode": "0x56",
"EventName": "UNC_M3UPI_RxC_FLITS_SENT.2_MSGS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Two messages in flit; VNA flit",
"UMask": "0x2",
@@ -6301,8 +7718,10 @@
},
{
"BriefDescription": "Sent Header Flit; Three Messages",
+ "Counter": "0,1,2",
"EventCode": "0x56",
"EventName": "UNC_M3UPI_RxC_FLITS_SENT.3_MSGS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Three messages in flit; VNA flit",
"UMask": "0x4",
@@ -6310,40 +7729,50 @@
},
{
"BriefDescription": "Sent Header Flit",
+ "Counter": "0,1,2",
"EventCode": "0x56",
"EventName": "UNC_M3UPI_RxC_FLITS_SENT.SLOTS_1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M3UPI"
},
{
"BriefDescription": "Sent Header Flit",
+ "Counter": "0,1,2",
"EventCode": "0x56",
"EventName": "UNC_M3UPI_RxC_FLITS_SENT.SLOTS_2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M3UPI"
},
{
"BriefDescription": "Sent Header Flit",
+ "Counter": "0,1,2",
"EventCode": "0x56",
"EventName": "UNC_M3UPI_RxC_FLITS_SENT.SLOTS_3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M3UPI"
},
{
"BriefDescription": "Slotting BL Message Into Header Flit; All",
+ "Counter": "0,1,2",
"EventCode": "0x58",
"EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M3UPI"
},
{
"BriefDescription": "Slotting BL Message Into Header Flit; Needs Data Flit",
+ "Counter": "0,1,2",
"EventCode": "0x58",
"EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.NEED_DATA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL message requires data flit sequence",
"UMask": "0x2",
@@ -6351,8 +7780,10 @@
},
{
"BriefDescription": "Slotting BL Message Into Header Flit; Wait on Pump 0",
+ "Counter": "0,1,2",
"EventCode": "0x58",
"EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P0_WAIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Waiting for header pump 0",
"UMask": "0x4",
@@ -6360,8 +7791,10 @@
},
{
"BriefDescription": "Slotting BL Message Into Header Flit; Don't Need Pump 1",
+ "Counter": "0,1,2",
"EventCode": "0x58",
"EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_NOT_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Header pump 1 is not required for flit",
"UMask": "0x10",
@@ -6369,8 +7802,10 @@
},
{
"BriefDescription": "Slotting BL Message Into Header Flit; Don't Need Pump 1 - Bubble",
+ "Counter": "0,1,2",
"EventCode": "0x58",
"EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_NOT_REQ_BUT_BUBBLE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Header pump 1 is not required for flit but flit transmission delayed",
"UMask": "0x20",
@@ -6378,8 +7813,10 @@
},
{
"BriefDescription": "Slotting BL Message Into Header Flit; Don't Need Pump 1 - Not Avail",
+ "Counter": "0,1,2",
"EventCode": "0x58",
"EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_NOT_REQ_NOT_AVAIL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Header pump 1 is not required for flit and not available",
"UMask": "0x40",
@@ -6387,8 +7824,10 @@
},
{
"BriefDescription": "Slotting BL Message Into Header Flit; Wait on Pump 1",
+ "Counter": "0,1,2",
"EventCode": "0x58",
"EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_WAIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Waiting for header pump 1",
"UMask": "0x8",
@@ -6396,8 +7835,10 @@
},
{
"BriefDescription": "Flit Gen - Header 1; Accumulate",
+ "Counter": "0,1,2",
"EventCode": "0x53",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.ACCUM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Events related to Header Flit Generation - Set 1; Header flit slotting control state machine is in any accumulate state; multi-message flit may be assembled over multiple cycles",
"UMask": "0x1",
@@ -6405,8 +7846,10 @@
},
{
"BriefDescription": "Flit Gen - Header 1; Accumulate Ready",
+ "Counter": "0,1,2",
"EventCode": "0x53",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.ACCUM_READ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Events related to Header Flit Generation - Set 1; header flit slotting control state machine is in accum_ready state; flit is ready to send but transmission is blocked; more messages may be slotted into flit",
"UMask": "0x2",
@@ -6414,8 +7857,10 @@
},
{
"BriefDescription": "Flit Gen - Header 1; Accumulate Wasted",
+ "Counter": "0,1,2",
"EventCode": "0x53",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.ACCUM_WASTED",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Events related to Header Flit Generation - Set 1; Flit is being assembled over multiple cycles, but no additional message is being slotted into flit in current cycle; accumulate cycle is wasted",
"UMask": "0x4",
@@ -6423,8 +7868,10 @@
},
{
"BriefDescription": "Flit Gen - Header 1; Run-Ahead - Blocked",
+ "Counter": "0,1,2",
"EventCode": "0x53",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_BLOCKED",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Events related to Header Flit Generation - Set 1; Header flit slotting entered run-ahead state; new header flit is started while transmission of prior, fully assembled flit is blocked",
"UMask": "0x8",
@@ -6432,8 +7879,10 @@
},
{
"BriefDescription": "Flit Gen - Header 1; Run-Ahead - Message",
+ "Counter": "0,1,2",
"EventCode": "0x53",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_MSG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Events related to Header Flit Generation - Set 1; Header flit slotting is in run-ahead to start new flit, and message is actually slotted into new flit",
"UMask": "0x10",
@@ -6441,8 +7890,10 @@
},
{
"BriefDescription": "Flit Gen - Header 1; Parallel Ok",
+ "Counter": "0,1,2",
"EventCode": "0x53",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.PAR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Events related to Header Flit Generation - Set 1; New header flit construction may proceed in parallel with data flit sequence",
"UMask": "0x20",
@@ -6450,8 +7901,10 @@
},
{
"BriefDescription": "Flit Gen - Header 1; Parallel Flit Finished",
+ "Counter": "0,1,2",
"EventCode": "0x53",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.PAR_FLIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Events related to Header Flit Generation - Set 1; Header flit finished assembly in parallel with data flit sequence",
"UMask": "0x80",
@@ -6459,8 +7912,10 @@
},
{
"BriefDescription": "Flit Gen - Header 1; Parallel Message",
+ "Counter": "0,1,2",
"EventCode": "0x53",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.PAR_MSG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Events related to Header Flit Generation - Set 1; Message is slotted into header flit in parallel with data flit sequence",
"UMask": "0x40",
@@ -6468,8 +7923,10 @@
},
{
"BriefDescription": "Flit Gen - Header 2; Rate-matching Stall",
+ "Counter": "0,1,2",
"EventCode": "0x54",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.RMSTALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Events related to Header Flit Generation - Set 2; Rate-matching stall injected",
"UMask": "0x1",
@@ -6477,8 +7934,10 @@
},
{
"BriefDescription": "Flit Gen - Header 2; Rate-matching Stall - No Message",
+ "Counter": "0,1,2",
"EventCode": "0x54",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.RMSTALL_NOMSG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Events related to Header Flit Generation - Set 2; Rate matching stall injected, but no additional message slotted during stall cycle",
"UMask": "0x2",
@@ -6486,8 +7945,10 @@
},
{
"BriefDescription": "Header Not Sent; All",
+ "Counter": "0,1,2",
"EventCode": "0x55",
"EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "header flit is ready for transmission but could not be sent",
"UMask": "0x1",
@@ -6495,8 +7956,10 @@
},
{
"BriefDescription": "Header Not Sent; No BGF Credits",
+ "Counter": "0,1,2",
"EventCode": "0x55",
"EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.NO_BGF_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "header flit is ready for transmission but could not be sent; No BGF credits available",
"UMask": "0x2",
@@ -6504,8 +7967,10 @@
},
{
"BriefDescription": "Header Not Sent; No BGF Credits + No Extra Message Slotted",
+ "Counter": "0,1,2",
"EventCode": "0x55",
"EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.NO_BGF_NO_MSG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "header flit is ready for transmission but could not be sent; No BGF credits available; no additional message slotted into flit",
"UMask": "0x8",
@@ -6513,8 +7978,10 @@
},
{
"BriefDescription": "Header Not Sent; No TxQ Credits",
+ "Counter": "0,1,2",
"EventCode": "0x55",
"EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.NO_TXQ_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "header flit is ready for transmission but could not be sent; No TxQ credits available",
"UMask": "0x4",
@@ -6522,8 +7989,10 @@
},
{
"BriefDescription": "Header Not Sent; No TxQ Credits + No Extra Message Slotted",
+ "Counter": "0,1,2",
"EventCode": "0x55",
"EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.NO_TXQ_NO_MSG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "header flit is ready for transmission but could not be sent; No TxQ credits available; no additional message slotted into flit",
"UMask": "0x10",
@@ -6531,8 +8000,10 @@
},
{
"BriefDescription": "Header Not Sent; Sent - One Slot Taken",
+ "Counter": "0,1,2",
"EventCode": "0x55",
"EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.ONE_TAKEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "header flit is ready for transmission but could not be sent; sending header flit with only one slot taken (two slots free)",
"UMask": "0x20",
@@ -6540,8 +8011,10 @@
},
{
"BriefDescription": "Header Not Sent; Sent - Three Slots Taken",
+ "Counter": "0,1,2",
"EventCode": "0x55",
"EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.THREE_TAKEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "header flit is ready for transmission but could not be sent; sending header flit with three slots taken (no slots free)",
"UMask": "0x80",
@@ -6549,8 +8022,10 @@
},
{
"BriefDescription": "Header Not Sent; Sent - Two Slots Taken",
+ "Counter": "0,1,2",
"EventCode": "0x55",
"EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.TWO_TAKEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "header flit is ready for transmission but could not be sent; sending header flit with only two slots taken (one slots free)",
"UMask": "0x40",
@@ -6558,8 +8033,10 @@
},
{
"BriefDescription": "Message Held; Can't Slot AD",
+ "Counter": "0,1,2",
"EventCode": "0x52",
"EventName": "UNC_M3UPI_RxC_HELD.CANT_SLOT_AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "some AD message could not be slotted (logical OR of all AD events under INGR_SLOT_CANT_MC_VN{0,1})",
"UMask": "0x40",
@@ -6567,8 +8044,10 @@
},
{
"BriefDescription": "Message Held; Can't Slot BL",
+ "Counter": "0,1,2",
"EventCode": "0x52",
"EventName": "UNC_M3UPI_RxC_HELD.CANT_SLOT_BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "some BL message could not be slotted (logical OR of all BL events under INGR_SLOT_CANT_MC_VN{0,1})",
"UMask": "0x80",
@@ -6576,8 +8055,10 @@
},
{
"BriefDescription": "Message Held; Parallel AD Lost",
+ "Counter": "0,1,2",
"EventCode": "0x52",
"EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_AD_LOST",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "some AD message lost contest for slot 0 (logical OR of all AD events under INGR_SLOT_LOST_MC_VN{0,1})",
"UMask": "0x10",
@@ -6585,8 +8066,10 @@
},
{
"BriefDescription": "Message Held; Parallel Attempt",
+ "Counter": "0,1,2",
"EventCode": "0x52",
"EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_ATTEMPT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ad and bl messages attempted to slot into the same flit in parallel",
"UMask": "0x4",
@@ -6594,8 +8077,10 @@
},
{
"BriefDescription": "Message Held; Parallel BL Lost",
+ "Counter": "0,1,2",
"EventCode": "0x52",
"EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_BL_LOST",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "some BL message lost contest for slot 0 (logical OR of all BL events under INGR_SLOT_LOST_MC_VN{0,1})",
"UMask": "0x20",
@@ -6603,8 +8088,10 @@
},
{
"BriefDescription": "Message Held; Parallel Success",
+ "Counter": "0,1,2",
"EventCode": "0x52",
"EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_SUCCESS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ad and bl messages were actually slotted into the same flit in parallel",
"UMask": "0x8",
@@ -6612,8 +8099,10 @@
},
{
"BriefDescription": "Message Held; VN0",
+ "Counter": "0,1,2",
"EventCode": "0x52",
"EventName": "UNC_M3UPI_RxC_HELD.VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "vn0 message(s) that couldn't be slotted into last vn0 flit are held in slotting stage while processing vn1 flit",
"UMask": "0x1",
@@ -6621,8 +8110,10 @@
},
{
"BriefDescription": "Message Held; VN1",
+ "Counter": "0,1,2",
"EventCode": "0x52",
"EventName": "UNC_M3UPI_RxC_HELD.VN1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "vn1 message(s) that couldn't be slotted into last vn1 flit are held in slotting stage while processing vn0 flit",
"UMask": "0x2",
@@ -6630,8 +8121,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x41",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN0.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -6639,8 +8132,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x41",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN0.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -6648,8 +8143,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x41",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN0.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -6657,8 +8154,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x41",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN0.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -6666,8 +8165,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; NCS on BL",
+ "Counter": "0,1,2",
"EventCode": "0x41",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN0.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -6675,8 +8176,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x41",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN0.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -6684,8 +8187,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x41",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN0.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -6693,8 +8198,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x42",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN1.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -6702,8 +8209,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x42",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN1.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -6711,8 +8220,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x42",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN1.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -6720,8 +8231,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x42",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN1.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -6729,8 +8242,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; NCS on BL",
+ "Counter": "0,1,2",
"EventCode": "0x42",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN1.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -6738,8 +8253,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x42",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN1.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -6747,8 +8264,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x42",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN1.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -6756,8 +8275,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x45",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -6765,8 +8286,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x45",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -6774,8 +8297,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x45",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -6783,8 +8308,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x45",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -6792,8 +8319,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; NCS on BL",
+ "Counter": "0,1,2",
"EventCode": "0x45",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -6801,8 +8330,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x45",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -6810,8 +8341,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x45",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -6819,8 +8352,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x46",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -6828,8 +8363,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x46",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -6837,8 +8374,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x46",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -6846,8 +8385,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x46",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -6855,8 +8396,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; NCS on BL",
+ "Counter": "0,1,2",
"EventCode": "0x46",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -6864,8 +8407,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x46",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -6873,8 +8418,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x46",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -6882,8 +8429,10 @@
},
{
"BriefDescription": "VN0 message can't slot into flit; REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4E",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -6891,8 +8440,10 @@
},
{
"BriefDescription": "VN0 message can't slot into flit; RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4E",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -6900,8 +8451,10 @@
},
{
"BriefDescription": "VN0 message can't slot into flit; SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4E",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -6909,8 +8462,10 @@
},
{
"BriefDescription": "VN0 message can't slot into flit; NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4E",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -6918,8 +8473,10 @@
},
{
"BriefDescription": "VN0 message can't slot into flit; NCS on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4E",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -6927,8 +8484,10 @@
},
{
"BriefDescription": "VN0 message can't slot into flit; RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4E",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -6936,8 +8495,10 @@
},
{
"BriefDescription": "VN0 message can't slot into flit; WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4E",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -6945,8 +8506,10 @@
},
{
"BriefDescription": "VN1 message can't slot into flit; REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4F",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -6954,8 +8517,10 @@
},
{
"BriefDescription": "VN1 message can't slot into flit; RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4F",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -6963,8 +8528,10 @@
},
{
"BriefDescription": "VN1 message can't slot into flit; SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4F",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -6972,8 +8539,10 @@
},
{
"BriefDescription": "VN1 message can't slot into flit; NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4F",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -6981,8 +8550,10 @@
},
{
"BriefDescription": "VN1 message can't slot into flit; NCS on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4F",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -6990,8 +8561,10 @@
},
{
"BriefDescription": "VN1 message can't slot into flit; RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4F",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -6999,8 +8572,10 @@
},
{
"BriefDescription": "VN1 message can't slot into flit; WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4F",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -7008,32 +8583,40 @@
},
{
"BriefDescription": "SMI3 Prefetch Messages; Lost Arbitration",
+ "Counter": "0,1,2",
"EventCode": "0x62",
"EventName": "UNC_M3UPI_RxC_SMI3_PFTCH.ARB_LOST",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M3UPI"
},
{
"BriefDescription": "SMI3 Prefetch Messages; Arrived",
+ "Counter": "0,1,2",
"EventCode": "0x62",
"EventName": "UNC_M3UPI_RxC_SMI3_PFTCH.ARRIVED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M3UPI"
},
{
"BriefDescription": "SMI3 Prefetch Messages; Dropped - Old",
+ "Counter": "0,1,2",
"EventCode": "0x62",
"EventName": "UNC_M3UPI_RxC_SMI3_PFTCH.DROP_OLD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M3UPI"
},
{
"BriefDescription": "SMI3 Prefetch Messages; Dropped - Wrap",
+ "Counter": "0,1,2",
"EventCode": "0x62",
"EventName": "UNC_M3UPI_RxC_SMI3_PFTCH.DROP_WRAP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Dropped because it was overwritten by new message while prefetch queue was full",
"UMask": "0x10",
@@ -7041,16 +8624,20 @@
},
{
"BriefDescription": "SMI3 Prefetch Messages; Slotted",
+ "Counter": "0,1,2",
"EventCode": "0x62",
"EventName": "UNC_M3UPI_RxC_SMI3_PFTCH.SLOTTED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M3UPI"
},
{
"BriefDescription": "Remote VNA Credits; Any In Use",
+ "Counter": "0,1,2",
"EventCode": "0x5B",
"EventName": "UNC_M3UPI_RxC_VNA_CRD.ANY_IN_USE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "At least one remote vna credit is in use",
"UMask": "0x20",
@@ -7058,8 +8645,10 @@
},
{
"BriefDescription": "Remote VNA Credits; Corrected",
+ "Counter": "0,1,2",
"EventCode": "0x5B",
"EventName": "UNC_M3UPI_RxC_VNA_CRD.CORRECTED",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of remote vna credits corrected (local return) per cycle",
"UMask": "0x2",
@@ -7067,8 +8656,10 @@
},
{
"BriefDescription": "Remote VNA Credits; Level < 1",
+ "Counter": "0,1,2",
"EventCode": "0x5B",
"EventName": "UNC_M3UPI_RxC_VNA_CRD.LT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Remote vna credit level is less than 1 (i.e. no vna credits available)",
"UMask": "0x4",
@@ -7076,8 +8667,10 @@
},
{
"BriefDescription": "Remote VNA Credits; Level < 4",
+ "Counter": "0,1,2",
"EventCode": "0x5B",
"EventName": "UNC_M3UPI_RxC_VNA_CRD.LT4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Remote vna credit level is less than 4; bl (or ad requiring 4 vna) cannot arb on vna",
"UMask": "0x8",
@@ -7085,8 +8678,10 @@
},
{
"BriefDescription": "Remote VNA Credits; Level < 5",
+ "Counter": "0,1,2",
"EventCode": "0x5B",
"EventName": "UNC_M3UPI_RxC_VNA_CRD.LT5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Remote vna credit level is less than 5; parallel ad/bl arb on vna not possible",
"UMask": "0x10",
@@ -7094,8 +8689,10 @@
},
{
"BriefDescription": "Remote VNA Credits; Used",
+ "Counter": "0,1,2",
"EventCode": "0x5B",
"EventName": "UNC_M3UPI_RxC_VNA_CRD.USED",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of remote vna credits consumed per cycle",
"UMask": "0x1",
@@ -7103,8 +8700,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; AD - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0xB4",
"EventName": "UNC_M3UPI_RxR_BUSY_STARVED.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x1",
@@ -7112,8 +8711,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; AD - Credit",
+ "Counter": "0,1,2",
"EventCode": "0xB4",
"EventName": "UNC_M3UPI_RxR_BUSY_STARVED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x10",
@@ -7121,8 +8722,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; BL - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0xB4",
"EventName": "UNC_M3UPI_RxR_BUSY_STARVED.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x4",
@@ -7130,8 +8733,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; BL - Credit",
+ "Counter": "0,1,2",
"EventCode": "0xB4",
"EventName": "UNC_M3UPI_RxR_BUSY_STARVED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x40",
@@ -7139,8 +8744,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass; AD - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0xB2",
"EventName": "UNC_M3UPI_RxR_BYPASS.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the CMS Ingress",
"UMask": "0x1",
@@ -7148,8 +8755,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass; AD - Credit",
+ "Counter": "0,1,2",
"EventCode": "0xB2",
"EventName": "UNC_M3UPI_RxR_BYPASS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the CMS Ingress",
"UMask": "0x10",
@@ -7157,8 +8766,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass; AK - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0xB2",
"EventName": "UNC_M3UPI_RxR_BYPASS.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the CMS Ingress",
"UMask": "0x2",
@@ -7166,8 +8777,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass; BL - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0xB2",
"EventName": "UNC_M3UPI_RxR_BYPASS.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the CMS Ingress",
"UMask": "0x4",
@@ -7175,8 +8788,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass; BL - Credit",
+ "Counter": "0,1,2",
"EventCode": "0xB2",
"EventName": "UNC_M3UPI_RxR_BYPASS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the CMS Ingress",
"UMask": "0x40",
@@ -7184,8 +8799,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass; IV - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0xB2",
"EventName": "UNC_M3UPI_RxR_BYPASS.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the CMS Ingress",
"UMask": "0x8",
@@ -7193,8 +8810,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; AD - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0xB3",
"EventName": "UNC_M3UPI_RxR_CRD_STARVED.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x1",
@@ -7202,8 +8821,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; AD - Credit",
+ "Counter": "0,1,2",
"EventCode": "0xB3",
"EventName": "UNC_M3UPI_RxR_CRD_STARVED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x10",
@@ -7211,8 +8832,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; AK - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0xB3",
"EventName": "UNC_M3UPI_RxR_CRD_STARVED.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x2",
@@ -7220,8 +8843,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; BL - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0xB3",
"EventName": "UNC_M3UPI_RxR_CRD_STARVED.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x4",
@@ -7229,8 +8854,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; BL - Credit",
+ "Counter": "0,1,2",
"EventCode": "0xB3",
"EventName": "UNC_M3UPI_RxR_CRD_STARVED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x40",
@@ -7238,8 +8865,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; IFV - Credit",
+ "Counter": "0,1,2",
"EventCode": "0xB3",
"EventName": "UNC_M3UPI_RxR_CRD_STARVED.IFV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x80",
@@ -7247,8 +8876,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; IV - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0xB3",
"EventName": "UNC_M3UPI_RxR_CRD_STARVED.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x8",
@@ -7256,8 +8887,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations; AD - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0xB1",
"EventName": "UNC_M3UPI_RxR_INSERTS.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x1",
@@ -7265,8 +8898,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations; AD - Credit",
+ "Counter": "0,1,2",
"EventCode": "0xB1",
"EventName": "UNC_M3UPI_RxR_INSERTS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x10",
@@ -7274,8 +8909,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations; AK - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0xB1",
"EventName": "UNC_M3UPI_RxR_INSERTS.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x2",
@@ -7283,8 +8920,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations; BL - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0xB1",
"EventName": "UNC_M3UPI_RxR_INSERTS.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x4",
@@ -7292,8 +8931,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations; BL - Credit",
+ "Counter": "0,1,2",
"EventCode": "0xB1",
"EventName": "UNC_M3UPI_RxR_INSERTS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x40",
@@ -7301,8 +8942,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations; IV - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0xB1",
"EventName": "UNC_M3UPI_RxR_INSERTS.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x8",
@@ -7310,8 +8953,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy; AD - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0xB0",
"EventName": "UNC_M3UPI_RxR_OCCUPANCY.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x1",
@@ -7319,8 +8964,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy; AD - Credit",
+ "Counter": "0,1,2",
"EventCode": "0xB0",
"EventName": "UNC_M3UPI_RxR_OCCUPANCY.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x10",
@@ -7328,8 +8975,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy; AK - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0xB0",
"EventName": "UNC_M3UPI_RxR_OCCUPANCY.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x2",
@@ -7337,8 +8986,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy; BL - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0xB0",
"EventName": "UNC_M3UPI_RxR_OCCUPANCY.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x4",
@@ -7346,8 +8997,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy; BL - Credit",
+ "Counter": "0,1,2",
"EventCode": "0xB0",
"EventName": "UNC_M3UPI_RxR_OCCUPANCY.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x40",
@@ -7355,8 +9008,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy; IV - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0xB0",
"EventName": "UNC_M3UPI_RxR_OCCUPANCY.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x8",
@@ -7364,8 +9019,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2",
"EventCode": "0xD0",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -7373,8 +9030,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2",
"EventCode": "0xD0",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -7382,8 +9041,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2",
"EventCode": "0xD0",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -7391,8 +9052,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2",
"EventCode": "0xD0",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -7400,8 +9063,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2",
"EventCode": "0xD0",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -7409,8 +9074,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2",
"EventCode": "0xD0",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -7418,8 +9085,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2",
"EventCode": "0xD2",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -7427,8 +9096,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2",
"EventCode": "0xD2",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -7436,8 +9107,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2",
"EventCode": "0xD2",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -7445,8 +9118,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2",
"EventCode": "0xD2",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -7454,8 +9129,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2",
"EventCode": "0xD2",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -7463,8 +9140,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2",
"EventCode": "0xD2",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -7472,8 +9151,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2",
"EventCode": "0xD4",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -7481,8 +9162,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2",
"EventCode": "0xD4",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -7490,8 +9173,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2",
"EventCode": "0xD4",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -7499,8 +9184,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2",
"EventCode": "0xD4",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -7508,8 +9195,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2",
"EventCode": "0xD4",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -7517,8 +9206,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2",
"EventCode": "0xD4",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -7526,8 +9217,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2",
"EventCode": "0xD6",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -7535,8 +9228,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2",
"EventCode": "0xD6",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -7544,8 +9239,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2",
"EventCode": "0xD6",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -7553,8 +9250,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2",
"EventCode": "0xD6",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -7562,8 +9261,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2",
"EventCode": "0xD6",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -7571,8 +9272,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2",
"EventCode": "0xD6",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -7580,8 +9283,10 @@
},
{
"BriefDescription": "Failed ARB for AD; VN0 REQ Messages",
+ "Counter": "0,1,2",
"EventCode": "0x30",
"EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD arb but no win; arb request asserted but not won",
"UMask": "0x1",
@@ -7589,8 +9294,10 @@
},
{
"BriefDescription": "Failed ARB for AD; VN0 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x30",
"EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD arb but no win; arb request asserted but not won",
"UMask": "0x4",
@@ -7598,8 +9305,10 @@
},
{
"BriefDescription": "Failed ARB for AD; VN0 SNP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x30",
"EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD arb but no win; arb request asserted but not won",
"UMask": "0x2",
@@ -7607,8 +9316,10 @@
},
{
"BriefDescription": "Failed ARB for AD; VN0 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x30",
"EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD arb but no win; arb request asserted but not won",
"UMask": "0x8",
@@ -7616,8 +9327,10 @@
},
{
"BriefDescription": "Failed ARB for AD; VN1 REQ Messages",
+ "Counter": "0,1,2",
"EventCode": "0x30",
"EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD arb but no win; arb request asserted but not won",
"UMask": "0x10",
@@ -7625,8 +9338,10 @@
},
{
"BriefDescription": "Failed ARB for AD; VN1 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x30",
"EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD arb but no win; arb request asserted but not won",
"UMask": "0x40",
@@ -7634,8 +9349,10 @@
},
{
"BriefDescription": "Failed ARB for AD; VN1 SNP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x30",
"EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD arb but no win; arb request asserted but not won",
"UMask": "0x20",
@@ -7643,8 +9360,10 @@
},
{
"BriefDescription": "Failed ARB for AD; VN1 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x30",
"EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD arb but no win; arb request asserted but not won",
"UMask": "0x80",
@@ -7652,8 +9371,10 @@
},
{
"BriefDescription": "AD FlowQ Bypass",
+ "Counter": "0,1,2",
"EventCode": "0x2C",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.AD_SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
"UMask": "0x1",
@@ -7661,8 +9382,10 @@
},
{
"BriefDescription": "AD FlowQ Bypass",
+ "Counter": "0,1,2",
"EventCode": "0x2C",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.AD_SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
"UMask": "0x2",
@@ -7670,8 +9393,10 @@
},
{
"BriefDescription": "AD FlowQ Bypass",
+ "Counter": "0,1,2",
"EventCode": "0x2C",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.AD_SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
"UMask": "0x4",
@@ -7679,8 +9404,10 @@
},
{
"BriefDescription": "AD FlowQ Bypass",
+ "Counter": "0,1,2",
"EventCode": "0x2C",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.BL_EARLY_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
"UMask": "0x8",
@@ -7688,8 +9415,10 @@
},
{
"BriefDescription": "AD Flow Q Not Empty; VN0 REQ Messages",
+ "Counter": "0,1,2",
"EventCode": "0x27",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Egress queue is Not Empty",
"UMask": "0x1",
@@ -7697,8 +9426,10 @@
},
{
"BriefDescription": "AD Flow Q Not Empty; VN0 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x27",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Egress queue is Not Empty",
"UMask": "0x4",
@@ -7706,8 +9437,10 @@
},
{
"BriefDescription": "AD Flow Q Not Empty; VN0 SNP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x27",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Egress queue is Not Empty",
"UMask": "0x2",
@@ -7715,8 +9448,10 @@
},
{
"BriefDescription": "AD Flow Q Not Empty; VN0 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x27",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Egress queue is Not Empty",
"UMask": "0x8",
@@ -7724,8 +9459,10 @@
},
{
"BriefDescription": "AD Flow Q Not Empty; VN1 REQ Messages",
+ "Counter": "0,1,2",
"EventCode": "0x27",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Egress queue is Not Empty",
"UMask": "0x10",
@@ -7733,8 +9470,10 @@
},
{
"BriefDescription": "AD Flow Q Not Empty; VN1 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x27",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Egress queue is Not Empty",
"UMask": "0x40",
@@ -7742,8 +9481,10 @@
},
{
"BriefDescription": "AD Flow Q Not Empty; VN1 SNP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x27",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Egress queue is Not Empty",
"UMask": "0x20",
@@ -7751,8 +9492,10 @@
},
{
"BriefDescription": "AD Flow Q Not Empty; VN1 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x27",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Egress queue is Not Empty",
"UMask": "0x80",
@@ -7760,8 +9503,10 @@
},
{
"BriefDescription": "AD Flow Q Inserts; VN0 REQ Messages",
+ "Counter": "0,1,2",
"EventCode": "0x2D",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x1",
@@ -7769,8 +9514,10 @@
},
{
"BriefDescription": "AD Flow Q Inserts; VN0 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x2D",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x4",
@@ -7778,8 +9525,10 @@
},
{
"BriefDescription": "AD Flow Q Inserts; VN0 SNP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x2D",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x2",
@@ -7787,8 +9536,10 @@
},
{
"BriefDescription": "AD Flow Q Inserts; VN0 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x2D",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x8",
@@ -7796,8 +9547,10 @@
},
{
"BriefDescription": "AD Flow Q Inserts; VN1 REQ Messages",
+ "Counter": "0,1,2",
"EventCode": "0x2D",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x10",
@@ -7805,8 +9558,10 @@
},
{
"BriefDescription": "AD Flow Q Inserts; VN1 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x2D",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x40",
@@ -7814,8 +9569,10 @@
},
{
"BriefDescription": "AD Flow Q Inserts; VN1 SNP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x2D",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN1_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x20",
@@ -7823,64 +9580,80 @@
},
{
"BriefDescription": "AD Flow Q Occupancy; VN0 REQ Messages",
+ "Counter": "0",
"EventCode": "0x1C",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M3UPI"
},
{
"BriefDescription": "AD Flow Q Occupancy; VN0 RSP Messages",
+ "Counter": "0",
"EventCode": "0x1C",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M3UPI"
},
{
"BriefDescription": "AD Flow Q Occupancy; VN0 SNP Messages",
+ "Counter": "0",
"EventCode": "0x1C",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M3UPI"
},
{
"BriefDescription": "AD Flow Q Occupancy; VN0 WB Messages",
+ "Counter": "0",
"EventCode": "0x1C",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M3UPI"
},
{
"BriefDescription": "AD Flow Q Occupancy; VN1 REQ Messages",
+ "Counter": "0",
"EventCode": "0x1C",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M3UPI"
},
{
"BriefDescription": "AD Flow Q Occupancy; VN1 RSP Messages",
+ "Counter": "0",
"EventCode": "0x1C",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M3UPI"
},
{
"BriefDescription": "AD Flow Q Occupancy; VN1 SNP Messages",
+ "Counter": "0",
"EventCode": "0x1C",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN1_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M3UPI"
},
{
"BriefDescription": "Number of Snoop Targets; CHA on VN0",
+ "Counter": "0",
"EventCode": "0x3C",
"EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN0_CHA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency; Number of VN0 Snpf to CHA",
"UMask": "0x4",
@@ -7888,8 +9661,10 @@
},
{
"BriefDescription": "Number of Snoop Targets; Non Idle cycles on VN0",
+ "Counter": "0",
"EventCode": "0x3C",
"EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN0_NON_IDLE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency; Number of non-idle cycles in issuing Vn0 Snpf",
"UMask": "0x40",
@@ -7897,8 +9672,10 @@
},
{
"BriefDescription": "Number of Snoop Targets; Peer UPI0 on VN0",
+ "Counter": "0",
"EventCode": "0x3C",
"EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN0_PEER_UPI0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency; Number of VN0 Snpf to peer UPI0",
"UMask": "0x1",
@@ -7906,8 +9683,10 @@
},
{
"BriefDescription": "Number of Snoop Targets; Peer UPI1 on VN0",
+ "Counter": "0",
"EventCode": "0x3C",
"EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN0_PEER_UPI1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency; Number of VN0 Snpf to peer UPI1",
"UMask": "0x2",
@@ -7915,8 +9694,10 @@
},
{
"BriefDescription": "Number of Snoop Targets; CHA on VN1",
+ "Counter": "0",
"EventCode": "0x3C",
"EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN1_CHA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency; Number of VN1 Snpf to CHA",
"UMask": "0x20",
@@ -7924,8 +9705,10 @@
},
{
"BriefDescription": "Number of Snoop Targets; Non Idle cycles on VN1",
+ "Counter": "0",
"EventCode": "0x3C",
"EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN1_NON_IDLE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency; Number of non-idle cycles in issuing Vn1 Snpf",
"UMask": "0x80",
@@ -7933,8 +9716,10 @@
},
{
"BriefDescription": "Number of Snoop Targets; Peer UPI0 on VN1",
+ "Counter": "0",
"EventCode": "0x3C",
"EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN1_PEER_UPI0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency; Number of VN1 Snpf to peer UPI0",
"UMask": "0x8",
@@ -7942,8 +9727,10 @@
},
{
"BriefDescription": "Number of Snoop Targets; Peer UPI1 on VN1",
+ "Counter": "0",
"EventCode": "0x3C",
"EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN1_PEER_UPI1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency; Number of VN1 Snpf to peer UPI1",
"UMask": "0x10",
@@ -7951,8 +9738,10 @@
},
{
"BriefDescription": "Snoop Arbitration; FlowQ Won",
+ "Counter": "0,1,2",
"EventCode": "0x3D",
"EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP2_VN1.VN0_SNPFP_NONSNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Outcome of SnpF pending arbitration; FlowQ txn issued when SnpF pending on Vn0",
"UMask": "0x1",
@@ -7960,8 +9749,10 @@
},
{
"BriefDescription": "Snoop Arbitration; FlowQ SnpF Won",
+ "Counter": "0,1,2",
"EventCode": "0x3D",
"EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP2_VN1.VN0_SNPFP_VN2SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Outcome of SnpF pending arbitration; FlowQ Vn0 SnpF issued when SnpF pending on Vn1",
"UMask": "0x4",
@@ -7969,8 +9760,10 @@
},
{
"BriefDescription": "Snoop Arbitration; FlowQ Won",
+ "Counter": "0,1,2",
"EventCode": "0x3D",
"EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP2_VN1.VN1_SNPFP_NONSNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Outcome of SnpF pending arbitration; FlowQ txn issued when SnpF pending on Vn1",
"UMask": "0x2",
@@ -7978,8 +9771,10 @@
},
{
"BriefDescription": "Snoop Arbitration; FlowQ SnpF Won",
+ "Counter": "0,1,2",
"EventCode": "0x3D",
"EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP2_VN1.VN1_SNPFP_VN0SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Outcome of SnpF pending arbitration; FlowQ Vn1 SnpF issued when SnpF pending on Vn0",
"UMask": "0x8",
@@ -7987,8 +9782,10 @@
},
{
"BriefDescription": "Speculative ARB for AD - Credit Available; VN0 REQ Messages",
+ "Counter": "0,1,2",
"EventCode": "0x34",
"EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_CRD_AVAIL.VN0_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD speculative arb request with prior cycle credit check complete and credit avail",
"UMask": "0x1",
@@ -7996,8 +9793,10 @@
},
{
"BriefDescription": "Speculative ARB for AD - Credit Available; VN0 SNP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x34",
"EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_CRD_AVAIL.VN0_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD speculative arb request with prior cycle credit check complete and credit avail",
"UMask": "0x2",
@@ -8005,8 +9804,10 @@
},
{
"BriefDescription": "Speculative ARB for AD - Credit Available; VN0 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x34",
"EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_CRD_AVAIL.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD speculative arb request with prior cycle credit check complete and credit avail",
"UMask": "0x8",
@@ -8014,8 +9815,10 @@
},
{
"BriefDescription": "Speculative ARB for AD - Credit Available; VN1 REQ Messages",
+ "Counter": "0,1,2",
"EventCode": "0x34",
"EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_CRD_AVAIL.VN1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD speculative arb request with prior cycle credit check complete and credit avail",
"UMask": "0x10",
@@ -8023,8 +9826,10 @@
},
{
"BriefDescription": "Speculative ARB for AD - Credit Available; VN1 SNP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x34",
"EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_CRD_AVAIL.VN1_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD speculative arb request with prior cycle credit check complete and credit avail",
"UMask": "0x20",
@@ -8032,8 +9837,10 @@
},
{
"BriefDescription": "Speculative ARB for AD - Credit Available; VN1 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x34",
"EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_CRD_AVAIL.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD speculative arb request with prior cycle credit check complete and credit avail",
"UMask": "0x80",
@@ -8041,8 +9848,10 @@
},
{
"BriefDescription": "Speculative ARB for AD - New Message; VN0 REQ Messages",
+ "Counter": "0,1,2",
"EventCode": "0x33",
"EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NEW_MSG.VN0_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD speculative arb request due to new message arriving on a specific channel (MC/VN)",
"UMask": "0x1",
@@ -8050,8 +9859,10 @@
},
{
"BriefDescription": "Speculative ARB for AD - New Message; VN0 SNP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x33",
"EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NEW_MSG.VN0_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD speculative arb request due to new message arriving on a specific channel (MC/VN)",
"UMask": "0x2",
@@ -8059,8 +9870,10 @@
},
{
"BriefDescription": "Speculative ARB for AD - New Message; VN0 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x33",
"EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NEW_MSG.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD speculative arb request due to new message arriving on a specific channel (MC/VN)",
"UMask": "0x8",
@@ -8068,8 +9881,10 @@
},
{
"BriefDescription": "Speculative ARB for AD - New Message; VN1 REQ Messages",
+ "Counter": "0,1,2",
"EventCode": "0x33",
"EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NEW_MSG.VN1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD speculative arb request due to new message arriving on a specific channel (MC/VN)",
"UMask": "0x10",
@@ -8077,8 +9892,10 @@
},
{
"BriefDescription": "Speculative ARB for AD - New Message; VN1 SNP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x33",
"EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NEW_MSG.VN1_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD speculative arb request due to new message arriving on a specific channel (MC/VN)",
"UMask": "0x20",
@@ -8086,8 +9903,10 @@
},
{
"BriefDescription": "Speculative ARB for AD - New Message; VN1 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x33",
"EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NEW_MSG.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD speculative arb request due to new message arriving on a specific channel (MC/VN)",
"UMask": "0x80",
@@ -8095,8 +9914,10 @@
},
{
"BriefDescription": "Speculative ARB for AD - No Credit; VN0 REQ Messages",
+ "Counter": "0,1,2",
"EventCode": "0x32",
"EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN0_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
"UMask": "0x1",
@@ -8104,8 +9925,10 @@
},
{
"BriefDescription": "Speculative ARB for AD - No Credit; VN0 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x32",
"EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
"UMask": "0x4",
@@ -8113,8 +9936,10 @@
},
{
"BriefDescription": "Speculative ARB for AD - No Credit; VN0 SNP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x32",
"EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN0_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
"UMask": "0x2",
@@ -8122,8 +9947,10 @@
},
{
"BriefDescription": "Speculative ARB for AD - No Credit; VN0 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x32",
"EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
"UMask": "0x8",
@@ -8131,8 +9958,10 @@
},
{
"BriefDescription": "Speculative ARB for AD - No Credit; VN1 REQ Messages",
+ "Counter": "0,1,2",
"EventCode": "0x32",
"EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
"UMask": "0x10",
@@ -8140,8 +9969,10 @@
},
{
"BriefDescription": "Speculative ARB for AD - No Credit; VN1 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x32",
"EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
"UMask": "0x40",
@@ -8149,8 +9980,10 @@
},
{
"BriefDescription": "Speculative ARB for AD - No Credit; VN1 SNP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x32",
"EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN1_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
"UMask": "0x20",
@@ -8158,8 +9991,10 @@
},
{
"BriefDescription": "Speculative ARB for AD - No Credit; VN1 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x32",
"EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
"UMask": "0x80",
@@ -8167,22 +10002,28 @@
},
{
"BriefDescription": "AK Flow Q Inserts",
+ "Counter": "0,1,2",
"EventCode": "0x2F",
"EventName": "UNC_M3UPI_TxC_AK_FLQ_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M3UPI"
},
{
"BriefDescription": "AK Flow Q Occupancy",
+ "Counter": "0",
"EventCode": "0x1E",
"EventName": "UNC_M3UPI_TxC_AK_FLQ_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M3UPI"
},
{
"BriefDescription": "Failed ARB for BL; VN0 NCB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x35",
"EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL arb but no win; arb request asserted but not won",
"UMask": "0x4",
@@ -8190,8 +10031,10 @@
},
{
"BriefDescription": "Failed ARB for BL; VN0 NCS Messages",
+ "Counter": "0,1,2",
"EventCode": "0x35",
"EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL arb but no win; arb request asserted but not won",
"UMask": "0x8",
@@ -8199,8 +10042,10 @@
},
{
"BriefDescription": "Failed ARB for BL; VN0 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x35",
"EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL arb but no win; arb request asserted but not won",
"UMask": "0x1",
@@ -8208,8 +10053,10 @@
},
{
"BriefDescription": "Failed ARB for BL; VN0 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x35",
"EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL arb but no win; arb request asserted but not won",
"UMask": "0x2",
@@ -8217,8 +10064,10 @@
},
{
"BriefDescription": "Failed ARB for BL; VN1 NCS Messages",
+ "Counter": "0,1,2",
"EventCode": "0x35",
"EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL arb but no win; arb request asserted but not won",
"UMask": "0x40",
@@ -8226,8 +10075,10 @@
},
{
"BriefDescription": "Failed ARB for BL; VN1 NCB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x35",
"EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL arb but no win; arb request asserted but not won",
"UMask": "0x80",
@@ -8235,8 +10086,10 @@
},
{
"BriefDescription": "Failed ARB for BL; VN1 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x35",
"EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL arb but no win; arb request asserted but not won",
"UMask": "0x10",
@@ -8244,8 +10097,10 @@
},
{
"BriefDescription": "Failed ARB for BL; VN1 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x35",
"EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL arb but no win; arb request asserted but not won",
"UMask": "0x20",
@@ -8253,8 +10108,10 @@
},
{
"BriefDescription": "BL Flow Q Not Empty; VN0 REQ Messages",
+ "Counter": "0,1,2",
"EventCode": "0x28",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Egress queue is Not Empty",
"UMask": "0x1",
@@ -8262,8 +10119,10 @@
},
{
"BriefDescription": "BL Flow Q Not Empty; VN0 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x28",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Egress queue is Not Empty",
"UMask": "0x4",
@@ -8271,8 +10130,10 @@
},
{
"BriefDescription": "BL Flow Q Not Empty; VN0 SNP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x28",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Egress queue is Not Empty",
"UMask": "0x2",
@@ -8280,8 +10141,10 @@
},
{
"BriefDescription": "BL Flow Q Not Empty; VN0 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x28",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Egress queue is Not Empty",
"UMask": "0x8",
@@ -8289,8 +10152,10 @@
},
{
"BriefDescription": "BL Flow Q Not Empty; VN1 REQ Messages",
+ "Counter": "0,1,2",
"EventCode": "0x28",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Egress queue is Not Empty",
"UMask": "0x10",
@@ -8298,8 +10163,10 @@
},
{
"BriefDescription": "BL Flow Q Not Empty; VN1 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x28",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Egress queue is Not Empty",
"UMask": "0x40",
@@ -8307,8 +10174,10 @@
},
{
"BriefDescription": "BL Flow Q Not Empty; VN1 SNP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x28",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Egress queue is Not Empty",
"UMask": "0x20",
@@ -8316,8 +10185,10 @@
},
{
"BriefDescription": "BL Flow Q Not Empty; VN1 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x28",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Egress queue is Not Empty",
"UMask": "0x80",
@@ -8325,8 +10196,10 @@
},
{
"BriefDescription": "BL Flow Q Inserts; VN0 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x2E",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x1",
@@ -8334,8 +10207,10 @@
},
{
"BriefDescription": "BL Flow Q Inserts; VN0 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x2E",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x2",
@@ -8343,8 +10218,10 @@
},
{
"BriefDescription": "BL Flow Q Inserts; VN0 NCS Messages",
+ "Counter": "0,1,2",
"EventCode": "0x2E",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x8",
@@ -8352,8 +10229,10 @@
},
{
"BriefDescription": "BL Flow Q Inserts; VN0 NCB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x2E",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x4",
@@ -8361,8 +10240,10 @@
},
{
"BriefDescription": "BL Flow Q Inserts; VN1 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x2E",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x10",
@@ -8370,8 +10251,10 @@
},
{
"BriefDescription": "BL Flow Q Inserts; VN1 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x2E",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x20",
@@ -8379,8 +10262,10 @@
},
{
"BriefDescription": "BL Flow Q Inserts; VN1_NCB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x2E",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x80",
@@ -8388,8 +10273,10 @@
},
{
"BriefDescription": "BL Flow Q Inserts; VN1_NCS Messages",
+ "Counter": "0,1,2",
"EventCode": "0x2E",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x40",
@@ -8397,72 +10284,90 @@
},
{
"BriefDescription": "BL Flow Q Occupancy; VN0 NCB Messages",
+ "Counter": "0",
"EventCode": "0x1D",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy; VN0 NCS Messages",
+ "Counter": "0",
"EventCode": "0x1D",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy; VN0 RSP Messages",
+ "Counter": "0",
"EventCode": "0x1D",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy; VN0 WB Messages",
+ "Counter": "0",
"EventCode": "0x1D",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy; VN1_NCS Messages",
+ "Counter": "0",
"EventCode": "0x1D",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy; VN1_NCB Messages",
+ "Counter": "0",
"EventCode": "0x1D",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy; VN1 RSP Messages",
+ "Counter": "0",
"EventCode": "0x1D",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy; VN1 WB Messages",
+ "Counter": "0",
"EventCode": "0x1D",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M3UPI"
},
{
"BriefDescription": "Speculative ARB for BL - New Message; VN0 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x38",
"EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NEW_MSG.VN0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL speculative arb request due to new message arriving on a specific channel (MC/VN)",
"UMask": "0x2",
@@ -8470,8 +10375,10 @@
},
{
"BriefDescription": "Speculative ARB for BL - New Message; VN0 NCS Messages",
+ "Counter": "0,1,2",
"EventCode": "0x38",
"EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NEW_MSG.VN0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL speculative arb request due to new message arriving on a specific channel (MC/VN)",
"UMask": "0x8",
@@ -8479,8 +10386,10 @@
},
{
"BriefDescription": "Speculative ARB for BL - New Message; VN0 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x38",
"EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NEW_MSG.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL speculative arb request due to new message arriving on a specific channel (MC/VN)",
"UMask": "0x1",
@@ -8488,8 +10397,10 @@
},
{
"BriefDescription": "Speculative ARB for BL - New Message; VN1 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x38",
"EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NEW_MSG.VN1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL speculative arb request due to new message arriving on a specific channel (MC/VN)",
"UMask": "0x20",
@@ -8497,8 +10408,10 @@
},
{
"BriefDescription": "Speculative ARB for BL - New Message; VN1 NCB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x38",
"EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NEW_MSG.VN1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL speculative arb request due to new message arriving on a specific channel (MC/VN)",
"UMask": "0x80",
@@ -8506,8 +10419,10 @@
},
{
"BriefDescription": "Speculative ARB for BL - New Message; VN1 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x38",
"EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NEW_MSG.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL speculative arb request due to new message arriving on a specific channel (MC/VN)",
"UMask": "0x10",
@@ -8515,8 +10430,10 @@
},
{
"BriefDescription": "Speculative ARB for AD Failed - No Credit; VN0 NCB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x37",
"EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
"UMask": "0x4",
@@ -8524,8 +10441,10 @@
},
{
"BriefDescription": "Speculative ARB for AD Failed - No Credit; VN0 NCS Messages",
+ "Counter": "0,1,2",
"EventCode": "0x37",
"EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
"UMask": "0x8",
@@ -8533,8 +10452,10 @@
},
{
"BriefDescription": "Speculative ARB for AD Failed - No Credit; VN0 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x37",
"EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
"UMask": "0x1",
@@ -8542,8 +10463,10 @@
},
{
"BriefDescription": "Speculative ARB for AD Failed - No Credit; VN0 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x37",
"EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
"UMask": "0x2",
@@ -8551,8 +10474,10 @@
},
{
"BriefDescription": "Speculative ARB for AD Failed - No Credit; VN1 NCS Messages",
+ "Counter": "0,1,2",
"EventCode": "0x37",
"EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
"UMask": "0x40",
@@ -8560,8 +10485,10 @@
},
{
"BriefDescription": "Speculative ARB for AD Failed - No Credit; VN1 NCB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x37",
"EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
"UMask": "0x80",
@@ -8569,8 +10496,10 @@
},
{
"BriefDescription": "Speculative ARB for AD Failed - No Credit; VN1 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x37",
"EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
"UMask": "0x10",
@@ -8578,8 +10507,10 @@
},
{
"BriefDescription": "Speculative ARB for AD Failed - No Credit; VN1 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x37",
"EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
"UMask": "0x20",
@@ -8587,8 +10518,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used; AD - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x9D",
"EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -8596,8 +10529,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used; AD - Credit",
+ "Counter": "0,1,2",
"EventCode": "0x9D",
"EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -8605,8 +10540,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used; AK - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x9D",
"EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -8614,8 +10551,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used; BL - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x9D",
"EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -8623,8 +10562,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used; BL - Credit",
+ "Counter": "0,1,2",
"EventCode": "0x9D",
"EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -8632,8 +10573,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used; AD - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x9F",
"EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -8641,8 +10584,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used; AD - Credit",
+ "Counter": "0,1,2",
"EventCode": "0x9F",
"EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -8650,8 +10595,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used; AK - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x9F",
"EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -8659,8 +10606,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used; BL - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x9F",
"EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -8668,8 +10617,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used; BL - Credit",
+ "Counter": "0,1,2",
"EventCode": "0x9F",
"EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -8677,8 +10628,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used; IV - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x9F",
"EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x8",
@@ -8686,8 +10639,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AD - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x96",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -8695,8 +10650,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AD - Credit",
+ "Counter": "0,1,2",
"EventCode": "0x96",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -8704,8 +10661,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AK - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x96",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -8713,8 +10672,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; BL - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x96",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -8722,8 +10683,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; BL - Credit",
+ "Counter": "0,1,2",
"EventCode": "0x96",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -8731,8 +10694,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; IV - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x96",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -8740,8 +10705,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AD - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x97",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -8749,8 +10716,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AD - Credit",
+ "Counter": "0,1,2",
"EventCode": "0x97",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -8758,8 +10727,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AK - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x97",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -8767,8 +10738,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; BL - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x97",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -8776,8 +10749,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; BL - Credit",
+ "Counter": "0,1,2",
"EventCode": "0x97",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -8785,8 +10760,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; IV - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x97",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -8794,8 +10771,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts; AD - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x95",
"EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -8803,8 +10782,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts; AD - Credit",
+ "Counter": "0,1,2",
"EventCode": "0x95",
"EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -8812,8 +10793,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts; AK - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x95",
"EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -8821,8 +10804,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts; BL - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x95",
"EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -8830,8 +10815,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts; BL - Credit",
+ "Counter": "0,1,2",
"EventCode": "0x95",
"EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -8839,8 +10826,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts; IV - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x95",
"EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -8848,8 +10837,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs; AD - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x99",
"EventName": "UNC_M3UPI_TxR_HORZ_NACK.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x1",
@@ -8857,8 +10848,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs; AD - Credit",
+ "Counter": "0,1,2",
"EventCode": "0x99",
"EventName": "UNC_M3UPI_TxR_HORZ_NACK.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x20",
@@ -8866,8 +10859,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs; AK - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x99",
"EventName": "UNC_M3UPI_TxR_HORZ_NACK.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x2",
@@ -8875,8 +10870,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs; BL - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x99",
"EventName": "UNC_M3UPI_TxR_HORZ_NACK.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x4",
@@ -8884,8 +10881,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs; BL - Credit",
+ "Counter": "0,1,2",
"EventCode": "0x99",
"EventName": "UNC_M3UPI_TxR_HORZ_NACK.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x40",
@@ -8893,8 +10892,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs; IV - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x99",
"EventName": "UNC_M3UPI_TxR_HORZ_NACK.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x8",
@@ -8902,8 +10903,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy; AD - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x94",
"EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -8911,8 +10914,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy; AD - Credit",
+ "Counter": "0,1,2",
"EventCode": "0x94",
"EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -8920,8 +10925,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy; AK - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x94",
"EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -8929,8 +10936,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy; BL - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x94",
"EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -8938,8 +10947,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy; BL - Credit",
+ "Counter": "0,1,2",
"EventCode": "0x94",
"EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -8947,8 +10958,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy; IV - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x94",
"EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -8956,8 +10969,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation; AD - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x9B",
"EventName": "UNC_M3UPI_TxR_HORZ_STARVED.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x1",
@@ -8965,8 +10980,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation; AK - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x9B",
"EventName": "UNC_M3UPI_TxR_HORZ_STARVED.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x2",
@@ -8974,8 +10991,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation; BL - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x9B",
"EventName": "UNC_M3UPI_TxR_HORZ_STARVED.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x4",
@@ -8983,8 +11002,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation; IV - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x9B",
"EventName": "UNC_M3UPI_TxR_HORZ_STARVED.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x8",
@@ -8992,8 +11013,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AD - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x9C",
"EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -9001,8 +11024,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AD - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x9C",
"EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -9010,8 +11035,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AK - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x9C",
"EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -9019,8 +11046,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AK - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x9C",
"EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x20",
@@ -9028,8 +11057,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; BL - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x9C",
"EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -9037,8 +11068,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; BL - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x9C",
"EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -9046,8 +11079,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AD - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x9E",
"EventName": "UNC_M3UPI_TxR_VERT_BYPASS.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -9055,8 +11090,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AD - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x9E",
"EventName": "UNC_M3UPI_TxR_VERT_BYPASS.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -9064,8 +11101,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AK - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x9E",
"EventName": "UNC_M3UPI_TxR_VERT_BYPASS.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -9073,8 +11112,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AK - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x9E",
"EventName": "UNC_M3UPI_TxR_VERT_BYPASS.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x20",
@@ -9082,8 +11123,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; BL - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x9E",
"EventName": "UNC_M3UPI_TxR_VERT_BYPASS.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -9091,8 +11134,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; BL - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x9E",
"EventName": "UNC_M3UPI_TxR_VERT_BYPASS.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -9100,8 +11145,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; IV",
+ "Counter": "0,1,2",
"EventCode": "0x9E",
"EventName": "UNC_M3UPI_TxR_VERT_BYPASS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x8",
@@ -9109,8 +11156,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AD - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x92",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -9118,8 +11167,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AD - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x92",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -9127,8 +11178,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AK - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x92",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -9136,8 +11189,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AK - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x92",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -9145,8 +11200,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; BL - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x92",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -9154,8 +11211,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; BL - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x92",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -9163,8 +11222,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; IV",
+ "Counter": "0,1,2",
"EventCode": "0x92",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -9172,8 +11233,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AD - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x93",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -9181,8 +11244,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AD - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x93",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -9190,8 +11255,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AK - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x93",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -9199,8 +11266,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AK - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x93",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -9208,8 +11277,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; BL - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x93",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -9217,8 +11288,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; BL - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x93",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -9226,8 +11299,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; IV",
+ "Counter": "0,1,2",
"EventCode": "0x93",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -9235,8 +11310,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; AD - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x91",
"EventName": "UNC_M3UPI_TxR_VERT_INSERTS.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -9244,8 +11321,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; AD - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x91",
"EventName": "UNC_M3UPI_TxR_VERT_INSERTS.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -9253,8 +11332,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; AK - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x91",
"EventName": "UNC_M3UPI_TxR_VERT_INSERTS.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -9262,8 +11343,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; AK - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x91",
"EventName": "UNC_M3UPI_TxR_VERT_INSERTS.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -9271,8 +11354,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; BL - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x91",
"EventName": "UNC_M3UPI_TxR_VERT_INSERTS.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -9280,8 +11365,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; BL - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x91",
"EventName": "UNC_M3UPI_TxR_VERT_INSERTS.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -9289,8 +11376,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; IV",
+ "Counter": "0,1,2",
"EventCode": "0x91",
"EventName": "UNC_M3UPI_TxR_VERT_INSERTS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -9298,8 +11387,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; AD - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x98",
"EventName": "UNC_M3UPI_TxR_VERT_NACK.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x1",
@@ -9307,8 +11398,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; AD - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x98",
"EventName": "UNC_M3UPI_TxR_VERT_NACK.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x10",
@@ -9316,8 +11409,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; AK - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x98",
"EventName": "UNC_M3UPI_TxR_VERT_NACK.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x2",
@@ -9325,8 +11420,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; AK - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x98",
"EventName": "UNC_M3UPI_TxR_VERT_NACK.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x20",
@@ -9334,8 +11431,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; BL - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x98",
"EventName": "UNC_M3UPI_TxR_VERT_NACK.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x4",
@@ -9343,8 +11442,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; BL - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x98",
"EventName": "UNC_M3UPI_TxR_VERT_NACK.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x40",
@@ -9352,8 +11453,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; IV",
+ "Counter": "0,1,2",
"EventCode": "0x98",
"EventName": "UNC_M3UPI_TxR_VERT_NACK.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x8",
@@ -9361,8 +11464,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; AD - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x90",
"EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -9370,8 +11475,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; AD - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x90",
"EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -9379,8 +11486,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; AK - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x90",
"EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -9388,8 +11497,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; AK - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x90",
"EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -9397,8 +11508,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; BL - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x90",
"EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -9406,8 +11519,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; BL - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x90",
"EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -9415,8 +11530,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; IV",
+ "Counter": "0,1,2",
"EventCode": "0x90",
"EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -9424,8 +11541,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; AD - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x9A",
"EventName": "UNC_M3UPI_TxR_VERT_STARVED.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x1",
@@ -9433,8 +11552,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; AD - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x9A",
"EventName": "UNC_M3UPI_TxR_VERT_STARVED.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x10",
@@ -9442,8 +11563,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; AK - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x9A",
"EventName": "UNC_M3UPI_TxR_VERT_STARVED.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x2",
@@ -9451,8 +11574,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; AK - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x9A",
"EventName": "UNC_M3UPI_TxR_VERT_STARVED.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x20",
@@ -9460,8 +11585,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; BL - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x9A",
"EventName": "UNC_M3UPI_TxR_VERT_STARVED.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x4",
@@ -9469,8 +11596,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; BL - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x9A",
"EventName": "UNC_M3UPI_TxR_VERT_STARVED.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x40",
@@ -9478,8 +11607,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; IV",
+ "Counter": "0,1,2",
"EventCode": "0x9A",
"EventName": "UNC_M3UPI_TxR_VERT_STARVED.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x8",
@@ -9487,8 +11618,10 @@
},
{
"BriefDescription": "UPI0 AD Credits Empty; VN0 REQ Messages",
+ "Counter": "0,1,2",
"EventCode": "0x20",
"EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN0_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No credits available to send to UPIs on the AD Ring",
"UMask": "0x2",
@@ -9496,8 +11629,10 @@
},
{
"BriefDescription": "UPI0 AD Credits Empty; VN0 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x20",
"EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No credits available to send to UPIs on the AD Ring",
"UMask": "0x8",
@@ -9505,8 +11640,10 @@
},
{
"BriefDescription": "UPI0 AD Credits Empty; VN0 SNP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x20",
"EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN0_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No credits available to send to UPIs on the AD Ring",
"UMask": "0x4",
@@ -9514,8 +11651,10 @@
},
{
"BriefDescription": "UPI0 AD Credits Empty; VN1 REQ Messages",
+ "Counter": "0,1,2",
"EventCode": "0x20",
"EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No credits available to send to UPIs on the AD Ring",
"UMask": "0x10",
@@ -9523,8 +11662,10 @@
},
{
"BriefDescription": "UPI0 AD Credits Empty; VN1 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x20",
"EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No credits available to send to UPIs on the AD Ring",
"UMask": "0x40",
@@ -9532,8 +11673,10 @@
},
{
"BriefDescription": "UPI0 AD Credits Empty; VN1 SNP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x20",
"EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN1_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No credits available to send to UPIs on the AD Ring",
"UMask": "0x20",
@@ -9541,8 +11684,10 @@
},
{
"BriefDescription": "UPI0 AD Credits Empty; VNA",
+ "Counter": "0,1,2",
"EventCode": "0x20",
"EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VNA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No credits available to send to UPIs on the AD Ring",
"UMask": "0x1",
@@ -9550,8 +11695,10 @@
},
{
"BriefDescription": "UPI0 BL Credits Empty; VN0 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x21",
"EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN0_NCS_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
"UMask": "0x4",
@@ -9559,8 +11706,10 @@
},
{
"BriefDescription": "UPI0 BL Credits Empty; VN0 REQ Messages",
+ "Counter": "0,1,2",
"EventCode": "0x21",
"EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
"UMask": "0x2",
@@ -9568,8 +11717,10 @@
},
{
"BriefDescription": "UPI0 BL Credits Empty; VN0 SNP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x21",
"EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
"UMask": "0x8",
@@ -9577,8 +11728,10 @@
},
{
"BriefDescription": "UPI0 BL Credits Empty; VN1 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x21",
"EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN1_NCS_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
"UMask": "0x20",
@@ -9586,8 +11739,10 @@
},
{
"BriefDescription": "UPI0 BL Credits Empty; VN1 REQ Messages",
+ "Counter": "0,1,2",
"EventCode": "0x21",
"EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
"UMask": "0x10",
@@ -9595,8 +11750,10 @@
},
{
"BriefDescription": "UPI0 BL Credits Empty; VN1 SNP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x21",
"EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
"UMask": "0x40",
@@ -9604,8 +11761,10 @@
},
{
"BriefDescription": "UPI0 BL Credits Empty; VNA",
+ "Counter": "0,1,2",
"EventCode": "0x21",
"EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VNA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
"UMask": "0x1",
@@ -9613,6 +11772,7 @@
},
{
"BriefDescription": "Prefetches generated by the flow control queue of the M3UPI unit.",
+ "Counter": "0,1,2",
"EventCode": "0x29",
"EventName": "UNC_M3UPI_UPI_PREFETCH_SPAWN",
"PerPkg": "1",
@@ -9621,8 +11781,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use; Down and Even",
+ "Counter": "0,1,2",
"EventCode": "0xA6",
"EventName": "UNC_M3UPI_VERT_RING_AD_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -9630,8 +11792,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use; Down and Odd",
+ "Counter": "0,1,2",
"EventCode": "0xA6",
"EventName": "UNC_M3UPI_VERT_RING_AD_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -9639,8 +11803,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use; Up and Even",
+ "Counter": "0,1,2",
"EventCode": "0xA6",
"EventName": "UNC_M3UPI_VERT_RING_AD_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -9648,8 +11814,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use; Up and Odd",
+ "Counter": "0,1,2",
"EventCode": "0xA6",
"EventName": "UNC_M3UPI_VERT_RING_AD_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -9657,8 +11825,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use; Down and Even",
+ "Counter": "0,1,2",
"EventCode": "0xA8",
"EventName": "UNC_M3UPI_VERT_RING_AK_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -9666,8 +11836,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use; Down and Odd",
+ "Counter": "0,1,2",
"EventCode": "0xA8",
"EventName": "UNC_M3UPI_VERT_RING_AK_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -9675,8 +11847,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use; Up and Even",
+ "Counter": "0,1,2",
"EventCode": "0xA8",
"EventName": "UNC_M3UPI_VERT_RING_AK_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -9684,8 +11858,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use; Up and Odd",
+ "Counter": "0,1,2",
"EventCode": "0xA8",
"EventName": "UNC_M3UPI_VERT_RING_AK_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -9693,8 +11869,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use; Down and Even",
+ "Counter": "0,1,2",
"EventCode": "0xAA",
"EventName": "UNC_M3UPI_VERT_RING_BL_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -9702,8 +11880,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use; Down and Odd",
+ "Counter": "0,1,2",
"EventCode": "0xAA",
"EventName": "UNC_M3UPI_VERT_RING_BL_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -9711,8 +11891,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use; Up and Even",
+ "Counter": "0,1,2",
"EventCode": "0xAA",
"EventName": "UNC_M3UPI_VERT_RING_BL_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -9720,8 +11902,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use; Up and Odd",
+ "Counter": "0,1,2",
"EventCode": "0xAA",
"EventName": "UNC_M3UPI_VERT_RING_BL_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -9729,8 +11913,10 @@
},
{
"BriefDescription": "Vertical IV Ring in Use; Down",
+ "Counter": "0,1,2",
"EventCode": "0xAC",
"EventName": "UNC_M3UPI_VERT_RING_IV_IN_USE.DN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x4",
@@ -9738,8 +11924,10 @@
},
{
"BriefDescription": "Vertical IV Ring in Use; Up",
+ "Counter": "0,1,2",
"EventCode": "0xAC",
"EventName": "UNC_M3UPI_VERT_RING_IV_IN_USE.UP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x1",
@@ -9747,8 +11935,10 @@
},
{
"BriefDescription": "VN0 Credit Used; WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x5C",
"EventName": "UNC_M3UPI_VN0_CREDITS_USED.NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -9756,8 +11946,10 @@
},
{
"BriefDescription": "VN0 Credit Used; NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x5C",
"EventName": "UNC_M3UPI_VN0_CREDITS_USED.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -9765,8 +11957,10 @@
},
{
"BriefDescription": "VN0 Credit Used; REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x5C",
"EventName": "UNC_M3UPI_VN0_CREDITS_USED.REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -9774,8 +11968,10 @@
},
{
"BriefDescription": "VN0 Credit Used; RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x5C",
"EventName": "UNC_M3UPI_VN0_CREDITS_USED.RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -9783,8 +11979,10 @@
},
{
"BriefDescription": "VN0 Credit Used; SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x5C",
"EventName": "UNC_M3UPI_VN0_CREDITS_USED.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -9792,8 +11990,10 @@
},
{
"BriefDescription": "VN0 Credit Used; RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x5C",
"EventName": "UNC_M3UPI_VN0_CREDITS_USED.WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -9801,8 +12001,10 @@
},
{
"BriefDescription": "VN0 No Credits; WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x5E",
"EventName": "UNC_M3UPI_VN0_NO_CREDITS.NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of Cycles there were no VN0 Credits; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -9810,8 +12012,10 @@
},
{
"BriefDescription": "VN0 No Credits; NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x5E",
"EventName": "UNC_M3UPI_VN0_NO_CREDITS.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of Cycles there were no VN0 Credits; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -9819,8 +12023,10 @@
},
{
"BriefDescription": "VN0 No Credits; REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x5E",
"EventName": "UNC_M3UPI_VN0_NO_CREDITS.REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of Cycles there were no VN0 Credits; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -9828,8 +12034,10 @@
},
{
"BriefDescription": "VN0 No Credits; RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x5E",
"EventName": "UNC_M3UPI_VN0_NO_CREDITS.RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of Cycles there were no VN0 Credits; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -9837,8 +12045,10 @@
},
{
"BriefDescription": "VN0 No Credits; SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x5E",
"EventName": "UNC_M3UPI_VN0_NO_CREDITS.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of Cycles there were no VN0 Credits; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -9846,8 +12056,10 @@
},
{
"BriefDescription": "VN0 No Credits; RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x5E",
"EventName": "UNC_M3UPI_VN0_NO_CREDITS.WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of Cycles there were no VN0 Credits; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -9855,8 +12067,10 @@
},
{
"BriefDescription": "VN1 Credit Used; WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x5D",
"EventName": "UNC_M3UPI_VN1_CREDITS_USED.NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -9864,8 +12078,10 @@
},
{
"BriefDescription": "VN1 Credit Used; NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x5D",
"EventName": "UNC_M3UPI_VN1_CREDITS_USED.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -9873,8 +12089,10 @@
},
{
"BriefDescription": "VN1 Credit Used; REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x5D",
"EventName": "UNC_M3UPI_VN1_CREDITS_USED.REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -9882,8 +12100,10 @@
},
{
"BriefDescription": "VN1 Credit Used; RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x5D",
"EventName": "UNC_M3UPI_VN1_CREDITS_USED.RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -9891,8 +12111,10 @@
},
{
"BriefDescription": "VN1 Credit Used; SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x5D",
"EventName": "UNC_M3UPI_VN1_CREDITS_USED.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -9900,8 +12122,10 @@
},
{
"BriefDescription": "VN1 Credit Used; RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x5D",
"EventName": "UNC_M3UPI_VN1_CREDITS_USED.WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -9909,8 +12133,10 @@
},
{
"BriefDescription": "VN1 No Credits; WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x5F",
"EventName": "UNC_M3UPI_VN1_NO_CREDITS.NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of Cycles there were no VN1 Credits; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -9918,8 +12144,10 @@
},
{
"BriefDescription": "VN1 No Credits; NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x5F",
"EventName": "UNC_M3UPI_VN1_NO_CREDITS.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of Cycles there were no VN1 Credits; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -9927,8 +12155,10 @@
},
{
"BriefDescription": "VN1 No Credits; REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x5F",
"EventName": "UNC_M3UPI_VN1_NO_CREDITS.REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of Cycles there were no VN1 Credits; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -9936,8 +12166,10 @@
},
{
"BriefDescription": "VN1 No Credits; RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x5F",
"EventName": "UNC_M3UPI_VN1_NO_CREDITS.RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of Cycles there were no VN1 Credits; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -9945,8 +12177,10 @@
},
{
"BriefDescription": "VN1 No Credits; SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x5F",
"EventName": "UNC_M3UPI_VN1_NO_CREDITS.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of Cycles there were no VN1 Credits; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -9954,8 +12188,10 @@
},
{
"BriefDescription": "VN1 No Credits; RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x5F",
"EventName": "UNC_M3UPI_VN1_NO_CREDITS.WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of Cycles there were no VN1 Credits; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -9963,15 +12199,18 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_TxC_BL.DRS_UPI",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x40",
"EventName": "UNC_NoUnit_TxC_BL.DRS_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Clocks of the Intel(R) Ultra Path Interconnect (UPI)",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_UPI_CLOCKTICKS",
"PerPkg": "1",
@@ -9980,6 +12219,7 @@
},
{
"BriefDescription": "Data Response packets that go direct to core",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2C",
"PerPkg": "1",
@@ -9989,6 +12229,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_DIRECT_ATTEMPTS.D2U",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x12",
"EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2K",
@@ -9998,6 +12239,7 @@
},
{
"BriefDescription": "Data Response packets that go direct to Intel(R) UPI",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2U",
"PerPkg": "1",
@@ -10007,70 +12249,87 @@
},
{
"BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ1",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ2",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ1",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ2",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ3",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.BL_VNA_EQ0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.BL_VNA_EQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "Cycles Intel(R) UPI is in L1 power mode (shutdown)",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_UPI_L1_POWER_CYCLES",
"PerPkg": "1",
@@ -10079,164 +12338,205 @@
},
{
"BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.BGF_CRD",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_UPI_M3_BYP_BLOCKED.BGF_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.GV_BLOCK",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_UPI_M3_BYP_BLOCKED.GV_BLOCK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_CRD_RETURN_BLOCKED",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_UPI_M3_CRD_RETURN_BLOCKED",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.BGF_CRD",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_UPI_M3_RXQ_BLOCKED.BGF_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_BTW_2_THRESH",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_BTW_2_THRESH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_BTW_0_THRESH",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_BTW_0_THRESH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.GV_BLOCK",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_UPI_M3_RXQ_BLOCKED.GV_BLOCK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "UPI"
},
{
"BriefDescription": "Cycles where phy is not in L0, L0c, L0p, L1",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_UPI_PHY_INIT_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UPI"
},
{
"BriefDescription": "L1 Req Nack",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_UPI_POWER_L1_NACK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times a link sends/receives a LinkReqNAck. When the UPI links would like to change power state, the Tx side initiates a request to the Rx side requesting to change states. This requests can either be accepted or denied. If the Rx side replies with an Ack, the power mode will change. If it replies with NAck, no change will take place. This can be filtered based on Rx and Tx. An Rx LinkReqNAck refers to receiving an NAck (meaning this agent's Tx originally requested the power change). A Tx LinkReqNAck refers to sending this command (meaning the peer agent's Tx originally requested the power change and this agent accepted it).",
"Unit": "UPI"
},
{
"BriefDescription": "L1 Req (same as L1 Ack).",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_UPI_POWER_L1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times a link sends/receives a LinkReqAck. When the UPI links would like to change power state, the Tx side initiates a request to the Rx side requesting to change states. This requests can either be accepted or denied. If the Rx side replies with an Ack, the power mode will change. If it replies with NAck, no change will take place. This can be filtered based on Rx and Tx. An Rx LinkReqAck refers to receiving an Ack (meaning this agent's Tx originally requested the power change). A Tx LinkReqAck refers to sending this command (meaning the peer agent's Tx originally requested the power change and this agent accepted it).",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.ACK",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.ACK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.VN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.VNA",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.VNA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UPI"
},
{
"BriefDescription": "Cycles the Rx of the Intel(R) UPI is in L0p power mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_UPI_RxL0P_POWER_CYCLES",
"PerPkg": "1",
@@ -10245,16 +12545,20 @@
},
{
"BriefDescription": "Cycles in L0. Receive side.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_UPI_RxL0_POWER_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of UPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Receive path of a UPI Port; Non-Coherent Bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match Message Class - NCB",
"UMask": "0xe",
@@ -10262,8 +12566,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port; Non-Coherent Bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match Message Class - NCB",
"UMask": "0x10e",
@@ -10271,8 +12577,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port; Non-Coherent Standard",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match Message Class - NCS",
"UMask": "0xf",
@@ -10280,8 +12588,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port; Non-Coherent Standard",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match Message Class - NCS",
"UMask": "0x10f",
@@ -10289,8 +12599,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port; Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "REQ Message Class",
"UMask": "0x8",
@@ -10298,8 +12610,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port; Request Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.REQ_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match REQ Opcodes - Specified in Umask[7:4]",
"UMask": "0x108",
@@ -10307,24 +12621,30 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port; Response - Conflict",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSPCNFLT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1aa",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Receive path of a UPI Port; Response - Invalid",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x12a",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Receive path of a UPI Port; Response - Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_DATA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match Message Class -WB",
"UMask": "0xc",
@@ -10332,8 +12652,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port; Response - Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_DATA_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match Message Class -WB",
"UMask": "0x10c",
@@ -10341,8 +12663,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port; Response - No Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_NODATA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match Message Class - RSP",
"UMask": "0xa",
@@ -10350,8 +12674,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port; Response - No Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_NODATA_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match Message Class - RSP",
"UMask": "0x10a",
@@ -10359,8 +12685,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port; Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "SNP Message Class",
"UMask": "0x9",
@@ -10368,8 +12696,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port; Snoop Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.SNP_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match SNP Opcodes - Specified in Umask[7:4]",
"UMask": "0x109",
@@ -10377,8 +12707,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port; Writeback",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match Message Class -WB",
"UMask": "0xd",
@@ -10386,8 +12718,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port; Writeback",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.WB_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match Message Class -WB",
"UMask": "0x10d",
@@ -10395,6 +12729,7 @@
},
{
"BriefDescription": "FLITs received which bypassed the Slot0 Receive Buffer",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_UPI_RxL_BYPASSED.SLOT0",
"PerPkg": "1",
@@ -10404,6 +12739,7 @@
},
{
"BriefDescription": "FLITs received which bypassed the Slot0 Receive Buffer",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_UPI_RxL_BYPASSED.SLOT1",
"PerPkg": "1",
@@ -10413,6 +12749,7 @@
},
{
"BriefDescription": "FLITs received which bypassed the Slot0 Receive Buffer",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_UPI_RxL_BYPASSED.SLOT2",
"PerPkg": "1",
@@ -10422,30 +12759,37 @@
},
{
"BriefDescription": "VN0 Credit Consumed",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_UPI_RxL_CREDITS_CONSUMED_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
"Unit": "UPI"
},
{
"BriefDescription": "VN1 Credit Consumed",
+ "Counter": "0,1,2,3",
"EventCode": "0x3A",
"EventName": "UNC_UPI_RxL_CREDITS_CONSUMED_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
"Unit": "UPI"
},
{
"BriefDescription": "VNA Credit Consumed",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_UPI_RxL_CREDITS_CONSUMED_VNA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times that an RxQ VNA credit was consumed (i.e. message uses a VNA credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
"Unit": "UPI"
},
{
"BriefDescription": "Valid data FLITs received from any slot",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_UPI_RxL_FLITS.ALL_DATA",
"PerPkg": "1",
@@ -10455,6 +12799,7 @@
},
{
"BriefDescription": "Null FLITs received from any slot",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_UPI_RxL_FLITS.ALL_NULL",
"PerPkg": "1",
@@ -10464,8 +12809,10 @@
},
{
"BriefDescription": "Valid Flits Received; Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_UPI_RxL_FLITS.DATA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Count Data Flits (which consume all slots), but how much to count is based on Slot0-2 mask, so count can be 0-3 depending on which slots are enabled for counting..",
"UMask": "0x8",
@@ -10473,8 +12820,10 @@
},
{
"BriefDescription": "Valid Flits Received; Idle",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_UPI_RxL_FLITS.IDLE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).",
"UMask": "0x47",
@@ -10482,8 +12831,10 @@
},
{
"BriefDescription": "Valid Flits Received; LLCRD Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_UPI_RxL_FLITS.LLCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Enables counting of LLCRD (with non-zero payload). This only applies to slot 2 since LLCRD is only allowed in slot 2",
"UMask": "0x10",
@@ -10491,8 +12842,10 @@
},
{
"BriefDescription": "Valid Flits Received; LLCTRL",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_UPI_RxL_FLITS.LLCTRL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Equivalent to an idle packet. Enables counting of slot 0 LLCTRL messages.",
"UMask": "0x40",
@@ -10500,6 +12853,7 @@
},
{
"BriefDescription": "Protocol header and credit FLITs received from any slot",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_UPI_RxL_FLITS.NON_DATA",
"PerPkg": "1",
@@ -10509,6 +12863,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_FLITS.ALL_NULL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x3",
"EventName": "UNC_UPI_RxL_FLITS.NULL",
@@ -10518,8 +12873,10 @@
},
{
"BriefDescription": "Valid Flits Received; Protocol Header",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_UPI_RxL_FLITS.PROTHDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Enables count of protocol headers in slot 0,1,2 (depending on slot uMask bits)",
"UMask": "0x80",
@@ -10527,17 +12884,21 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_FLITS.PROTHDR",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x3",
"EventName": "UNC_UPI_RxL_FLITS.PROT_HDR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "UPI"
},
{
"BriefDescription": "Valid Flits Received; Slot 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_UPI_RxL_FLITS.SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Count Slot 0 - Other mask bits determine types of headers to count.",
"UMask": "0x1",
@@ -10545,8 +12906,10 @@
},
{
"BriefDescription": "Valid Flits Received; Slot 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_UPI_RxL_FLITS.SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Count Slot 1 - Other mask bits determine types of headers to count.",
"UMask": "0x2",
@@ -10554,8 +12917,10 @@
},
{
"BriefDescription": "Valid Flits Received; Slot 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_UPI_RxL_FLITS.SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Count Slot 2 - Other mask bits determine types of headers to count.",
"UMask": "0x4",
@@ -10563,62 +12928,76 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_BASIC_HDR_MATCH.NCB",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_HDR_MATCH.NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "UPI"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_BASIC_HDR_MATCH.NCS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_HDR_MATCH.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd",
"Unit": "UPI"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_BASIC_HDR_MATCH.REQ",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_HDR_MATCH.REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_DATA",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_HDR_MATCH.RSP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "UPI"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_BASIC_HDR_MATCH.SNP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_HDR_MATCH.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x9",
"Unit": "UPI"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_BASIC_HDR_MATCH.WB",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_HDR_MATCH.WB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xb",
"Unit": "UPI"
},
{
"BriefDescription": "RxQ Flit Buffer Allocations; Slot 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_UPI_RxL_INSERTS.SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
"UMask": "0x1",
@@ -10626,8 +13005,10 @@
},
{
"BriefDescription": "RxQ Flit Buffer Allocations; Slot 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_UPI_RxL_INSERTS.SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
"UMask": "0x2",
@@ -10635,8 +13016,10 @@
},
{
"BriefDescription": "RxQ Flit Buffer Allocations; Slot 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_UPI_RxL_INSERTS.SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
"UMask": "0x4",
@@ -10644,8 +13027,10 @@
},
{
"BriefDescription": "RxQ Occupancy - All Packets; Slot 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of elements in the UPI RxQ in each cycle. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.",
"UMask": "0x1",
@@ -10653,8 +13038,10 @@
},
{
"BriefDescription": "RxQ Occupancy - All Packets; Slot 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of elements in the UPI RxQ in each cycle. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.",
"UMask": "0x2",
@@ -10662,8 +13049,10 @@
},
{
"BriefDescription": "RxQ Occupancy - All Packets; Slot 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of elements in the UPI RxQ in each cycle. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.",
"UMask": "0x4",
@@ -10671,118 +13060,147 @@
},
{
"BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ1",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ2",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ0",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ2",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ0",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ1",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.CFG_CTL",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.CFG_CTL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.DFX",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.DFX",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RETRY",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RETRY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_BYPASS",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_BYPASS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_CRED",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_CRED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.SPARE",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.SPARE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.TXQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.TXQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "UPI"
},
{
"BriefDescription": "Cycles in which the Tx of the Intel(R) Ultra Path Interconnect (UPI) is in L0p power mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_UPI_TxL0P_POWER_CYCLES",
"PerPkg": "1",
@@ -10791,30 +13209,38 @@
},
{
"BriefDescription": "UNC_UPI_TxL0P_POWER_CYCLES_LL_ENTER",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_UPI_TxL0P_POWER_CYCLES_LL_ENTER",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_POWER_CYCLES_M3_EXIT",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_UPI_TxL0P_POWER_CYCLES_M3_EXIT",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UPI"
},
{
"BriefDescription": "Cycles in L0. Transmit side.",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_UPI_TxL0_POWER_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of UPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port; Non-Coherent Bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match Message Class - NCB",
"UMask": "0xe",
@@ -10822,8 +13248,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port; Non-Coherent Bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match Message Class - NCB",
"UMask": "0x10e",
@@ -10831,8 +13259,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port; Non-Coherent Standard",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match Message Class - NCS",
"UMask": "0xf",
@@ -10840,8 +13270,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port; Non-Coherent Standard",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match Message Class - NCS",
"UMask": "0x10f",
@@ -10849,8 +13281,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port; Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "REQ Message Class",
"UMask": "0x8",
@@ -10858,8 +13292,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port; Request Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.REQ_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match REQ Opcodes - Specified in Umask[7:4]",
"UMask": "0x108",
@@ -10867,24 +13303,30 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port; Response - Conflict",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSPCNFLT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1aa",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port; Response - Invalid",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x12a",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port; Response - Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_DATA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match Message Class -WB",
"UMask": "0xc",
@@ -10892,8 +13334,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port; Response - Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_DATA_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match Message Class -WB",
"UMask": "0x10c",
@@ -10901,8 +13345,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port; Response - No Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_NODATA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match Message Class - RSP",
"UMask": "0xa",
@@ -10910,8 +13356,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port; Response - No Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_NODATA_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match Message Class - RSP",
"UMask": "0x10a",
@@ -10919,8 +13367,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port; Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "SNP Message Class",
"UMask": "0x9",
@@ -10928,8 +13378,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port; Snoop Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.SNP_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match SNP Opcodes - Specified in Umask[7:4]",
"UMask": "0x109",
@@ -10937,8 +13389,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port; Writeback",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match Message Class -WB",
"UMask": "0xd",
@@ -10946,8 +13400,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port; Writeback",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.WB_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match Message Class -WB",
"UMask": "0x10d",
@@ -10955,6 +13411,7 @@
},
{
"BriefDescription": "FLITs that bypassed the TxL Buffer",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_UPI_TxL_BYPASSED",
"PerPkg": "1",
@@ -10963,6 +13420,7 @@
},
{
"BriefDescription": "Valid data FLITs transmitted via any slot",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_UPI_TxL_FLITS.ALL_DATA",
"PerPkg": "1",
@@ -10972,6 +13430,7 @@
},
{
"BriefDescription": "Null FLITs transmitted from any slot",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_UPI_TxL_FLITS.ALL_NULL",
"PerPkg": "1",
@@ -10981,6 +13440,7 @@
},
{
"BriefDescription": "Valid Flits Sent; Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_UPI_TxL_FLITS.DATA",
"PerPkg": "1",
@@ -10990,6 +13450,7 @@
},
{
"BriefDescription": "Idle FLITs transmitted",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_UPI_TxL_FLITS.IDLE",
"PerPkg": "1",
@@ -10999,8 +13460,10 @@
},
{
"BriefDescription": "Valid Flits Sent; LLCRD Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_UPI_TxL_FLITS.LLCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Enables counting of LLCRD (with non-zero payload). This only applies to slot 2 since LLCRD is only allowed in slot 2",
"UMask": "0x10",
@@ -11008,8 +13471,10 @@
},
{
"BriefDescription": "Valid Flits Sent; LLCTRL",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_UPI_TxL_FLITS.LLCTRL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Equivalent to an idle packet. Enables counting of slot 0 LLCTRL messages.",
"UMask": "0x40",
@@ -11017,6 +13482,7 @@
},
{
"BriefDescription": "Protocol header and credit FLITs transmitted across any slot",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_UPI_TxL_FLITS.NON_DATA",
"PerPkg": "1",
@@ -11026,6 +13492,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_FLITS.ALL_NULL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2",
"EventName": "UNC_UPI_TxL_FLITS.NULL",
@@ -11035,8 +13502,10 @@
},
{
"BriefDescription": "Valid Flits Sent; Protocol Header",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_UPI_TxL_FLITS.PROTHDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Enables count of protocol headers in slot 0,1,2 (depending on slot uMask bits)",
"UMask": "0x80",
@@ -11044,17 +13513,21 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_FLITS.PROTHDR",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2",
"EventName": "UNC_UPI_TxL_FLITS.PROT_HDR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "UPI"
},
{
"BriefDescription": "Valid Flits Sent; Slot 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_UPI_TxL_FLITS.SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Count Slot 0 - Other mask bits determine types of headers to count.",
"UMask": "0x1",
@@ -11062,8 +13535,10 @@
},
{
"BriefDescription": "Valid Flits Sent; Slot 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_UPI_TxL_FLITS.SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Count Slot 1 - Other mask bits determine types of headers to count.",
"UMask": "0x2",
@@ -11071,8 +13546,10 @@
},
{
"BriefDescription": "Valid Flits Sent; Slot 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_UPI_TxL_FLITS.SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Count Slot 2 - Other mask bits determine types of headers to count.",
"UMask": "0x4",
@@ -11080,157 +13557,195 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_HDR_MATCH.DATA_HDR",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UPI"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_HDR_MATCH.DUAL_SLOT_HDR",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UPI"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_HDR_MATCH.LOC",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UPI"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.NCB",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_HDR_MATCH.NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe",
"Unit": "UPI"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.NCS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_HDR_MATCH.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf",
"Unit": "UPI"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_HDR_MATCH.NON_DATA_HDR",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UPI"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_HDR_MATCH.REM",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UPI"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.REQ",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_HDR_MATCH.REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_DATA",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_HDR_MATCH.RSP_DATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "UPI"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_NODATA",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_HDR_MATCH.RSP_NODATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "UPI"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_HDR_MATCH.SGL_SLOT_HDR",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UPI"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.SNP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_HDR_MATCH.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x9",
"Unit": "UPI"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.WB",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_HDR_MATCH.WB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "UPI"
},
{
"BriefDescription": "Tx Flit Buffer Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_UPI_TxL_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the UPI Tx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
"Unit": "UPI"
},
{
"BriefDescription": "Tx Flit Buffer Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_UPI_TxL_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of flits in the TxQ. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This can be used with the cycles not empty event to track average occupancy, or the allocations event to track average lifetime in the TxQ.",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_VNA_CREDIT_RETURN_BLOCKED_VN01",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_UPI_VNA_CREDIT_RETURN_BLOCKED_VN01",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UPI"
},
{
"BriefDescription": "VNA Credits Pending Return - Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_UPI_VNA_CREDIT_RETURN_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of VNA credits in the Rx side that are waitng to be returned back across the link.",
"Unit": "UPI"
},
{
"BriefDescription": "Clockticks in the UBOX using a dedicated 48-bit Fixed Counter",
+ "Counter": "FIXED",
"EventCode": "0xff",
"EventName": "UNC_U_CLOCKTICKS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UBOX"
},
{
"BriefDescription": "Message Received",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.DOORBELL_RCVD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore.",
"UMask": "0x8",
@@ -11238,8 +13753,10 @@
},
{
"BriefDescription": "Message Received",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.INT_PRIO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore.",
"UMask": "0x10",
@@ -11247,8 +13764,10 @@
},
{
"BriefDescription": "Message Received; IPI",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.IPI_RCVD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore.; Inter Processor Interrupts",
"UMask": "0x4",
@@ -11256,8 +13775,10 @@
},
{
"BriefDescription": "Message Received; MSI",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.MSI_RCVD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore.; Message Signaled Interrupts - interrupts sent by devices (including PCIe via IOxAPIC) (Socket Mode only)",
"UMask": "0x2",
@@ -11265,8 +13786,10 @@
},
{
"BriefDescription": "Message Received; VLW",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.VLW_RCVD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore.",
"UMask": "0x1",
@@ -11274,16 +13797,20 @@
},
{
"BriefDescription": "IDI Lock/SplitLock Cycles",
+ "Counter": "0,1",
"EventCode": "0x44",
"EventName": "UNC_U_LOCK_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times an IDI Lock/SplitLock sequence was started",
"Unit": "UBOX"
},
{
"BriefDescription": "Cycles PHOLD Assert to Ack; Assert to ACK",
+ "Counter": "0,1",
"EventCode": "0x45",
"EventName": "UNC_U_PHOLD_CYCLES.ASSERT_TO_ACK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PHOLD cycles.",
"UMask": "0x1",
@@ -11291,38 +13818,47 @@
},
{
"BriefDescription": "UNC_U_RACU_DRNG.PFTCH_BUF_EMPTY",
+ "Counter": "0,1",
"EventCode": "0x4C",
"EventName": "UNC_U_RACU_DRNG.PFTCH_BUF_EMPTY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_RACU_DRNG.RDRAND",
+ "Counter": "0,1",
"EventCode": "0x4C",
"EventName": "UNC_U_RACU_DRNG.RDRAND",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_RACU_DRNG.RDSEED",
+ "Counter": "0,1",
"EventCode": "0x4C",
"EventName": "UNC_U_RACU_DRNG.RDSEED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UBOX"
},
{
"BriefDescription": "RACU Request",
+ "Counter": "0,1",
"EventCode": "0x46",
"EventName": "UNC_U_RACU_REQUESTS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number outstanding register requests within message channel tracker",
"Unit": "UBOX"
},
{
"BriefDescription": "UPI interconnect send bandwidth for payload. Derived from unc_upi_txl_flits.all_data",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UPI_DATA_BANDWIDTH_TX",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-io.json b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-io.json
index 743c91f3d2f0..bce46dd4f395 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-io.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-io.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "PCI Express bandwidth reading at IIO. Derived from unc_iio_data_req_of_cpu.mem_read.part0",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "LLC_MISSES.PCIE_READ",
"FCMask": "0x07",
@@ -16,6 +17,7 @@
},
{
"BriefDescription": "PCI Express bandwidth writing at IIO. Derived from unc_iio_data_req_of_cpu.mem_write.part0",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "LLC_MISSES.PCIE_WRITE",
"FCMask": "0x07",
@@ -31,6 +33,7 @@
},
{
"BriefDescription": "Clockticks of the IIO Traffic Controller",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_IIO_CLOCKTICKS",
"PerPkg": "1",
@@ -39,6 +42,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0-3",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.ALL_PARTS",
"FCMask": "0x4",
@@ -49,6 +53,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART0",
"FCMask": "0x4",
@@ -59,6 +64,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART1",
"FCMask": "0x4",
@@ -69,6 +75,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART2",
"FCMask": "0x4",
@@ -79,6 +86,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART3",
"FCMask": "0x4",
@@ -89,8 +97,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts; Port 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.PORT0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x01",
@@ -99,8 +109,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts; Port 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.PORT1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x02",
@@ -109,8 +121,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts; Port 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.PORT2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x04",
@@ -119,8 +133,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts; Port 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.PORT3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x08",
@@ -129,6 +145,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 0-3",
+ "Counter": "2,3",
"EventCode": "0xD5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL_PARTS",
"FCMask": "0x04",
@@ -138,6 +155,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 0",
+ "Counter": "2,3",
"EventCode": "0xD5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART0",
"FCMask": "0x04",
@@ -147,6 +165,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 1",
+ "Counter": "2,3",
"EventCode": "0xD5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART1",
"FCMask": "0x04",
@@ -156,6 +175,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 2",
+ "Counter": "2,3",
"EventCode": "0xD5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART2",
"FCMask": "0x04",
@@ -165,6 +185,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 3",
+ "Counter": "2,3",
"EventCode": "0xD5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART3",
"FCMask": "0x04",
@@ -174,8 +195,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core reading from Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -185,8 +208,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core reading from Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -196,8 +221,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core reading from Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -207,8 +234,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core reading from Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -218,8 +247,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core reading from Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -229,8 +260,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core reading from Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -240,8 +273,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core writing to Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -251,8 +286,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core writing to Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -262,8 +299,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core writing to Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -273,8 +312,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core writing to Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -284,8 +325,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core writing to Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -295,8 +338,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core writing to Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -306,8 +351,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core reading from Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -317,8 +364,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core reading from Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -328,8 +377,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core reading from Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -339,8 +390,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core reading from Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -350,8 +403,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core reading from Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -361,8 +416,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core reading from Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -372,8 +429,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core writing to Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -383,8 +442,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core writing to Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -394,8 +455,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core writing to Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -405,8 +468,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core writing to Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -416,8 +481,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core writing to Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -427,8 +494,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core writing to Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -438,6 +507,7 @@
},
{
"BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part0",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART0",
"FCMask": "0x07",
@@ -449,6 +519,7 @@
},
{
"BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part1",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART1",
"FCMask": "0x07",
@@ -460,6 +531,7 @@
},
{
"BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part2",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART2",
"FCMask": "0x07",
@@ -471,6 +543,7 @@
},
{
"BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part3",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART3",
"FCMask": "0x07",
@@ -482,8 +555,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core reading from Card's MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -493,8 +568,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core reading from Card's MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -504,6 +581,7 @@
},
{
"BriefDescription": "Write request of 4 bytes made to IIO Part0 by the CPU",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART0",
"FCMask": "0x07",
@@ -515,6 +593,7 @@
},
{
"BriefDescription": "Write request of 4 bytes made to IIO Part1 by the CPU",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART1",
"FCMask": "0x07",
@@ -526,6 +605,7 @@
},
{
"BriefDescription": "Write request of 4 bytes made to IIO Part2 by the CPU",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART2",
"FCMask": "0x07",
@@ -537,6 +617,7 @@
},
{
"BriefDescription": "Write request of 4 bytes made to IIO Part3 by the CPU",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART3",
"FCMask": "0x07",
@@ -548,8 +629,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core writing to Card's MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -559,8 +642,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core writing to Card's MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -570,6 +655,7 @@
},
{
"BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part0",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART0",
"FCMask": "0x07",
@@ -581,6 +667,7 @@
},
{
"BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part1",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART1",
"FCMask": "0x07",
@@ -592,6 +679,7 @@
},
{
"BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part2",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART2",
"FCMask": "0x07",
@@ -603,6 +691,7 @@
},
{
"BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part3",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART3",
"FCMask": "0x07",
@@ -614,8 +703,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Another card (different IIO stack) reading from this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -625,8 +716,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Another card (different IIO stack) reading from this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -636,6 +729,7 @@
},
{
"BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part0 by a different IIO unit",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART0",
"FCMask": "0x07",
@@ -647,6 +741,7 @@
},
{
"BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part1 by a different IIO unit",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART1",
"FCMask": "0x07",
@@ -658,6 +753,7 @@
},
{
"BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part2 by a different IIO unit",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART2",
"FCMask": "0x07",
@@ -669,6 +765,7 @@
},
{
"BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part3 by a different IIO unit",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART3",
"FCMask": "0x07",
@@ -680,8 +777,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Another card (different IIO stack) writing to this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -691,8 +790,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Another card (different IIO stack) writing to this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -702,8 +803,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Atomic requests targeting DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -713,8 +816,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Atomic requests targeting DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -724,8 +829,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Atomic requests targeting DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -735,8 +842,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Atomic requests targeting DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -746,8 +855,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Atomic requests targeting DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -757,8 +868,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Atomic requests targeting DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -768,8 +881,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Completion of atomic requests targeting DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -779,8 +894,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Completion of atomic requests targeting DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -790,8 +907,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Completion of atomic requests targeting DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -801,8 +920,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Completion of atomic requests targeting DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -812,6 +933,7 @@
},
{
"BriefDescription": "PCI Express bandwidth reading at IIO, part 0",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0",
"FCMask": "0x07",
@@ -823,6 +945,7 @@
},
{
"BriefDescription": "PCI Express bandwidth reading at IIO, part 1",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1",
"FCMask": "0x07",
@@ -834,6 +957,7 @@
},
{
"BriefDescription": "PCI Express bandwidth reading at IIO, part 2",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2",
"FCMask": "0x07",
@@ -845,6 +969,7 @@
},
{
"BriefDescription": "PCI Express bandwidth reading at IIO, part 3",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
"FCMask": "0x07",
@@ -856,8 +981,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -867,8 +994,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -878,6 +1007,7 @@
},
{
"BriefDescription": "PCI Express bandwidth writing at IIO, part 0",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0",
"FCMask": "0x07",
@@ -889,6 +1019,7 @@
},
{
"BriefDescription": "PCI Express bandwidth writing at IIO, part 1",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1",
"FCMask": "0x07",
@@ -900,6 +1031,7 @@
},
{
"BriefDescription": "PCI Express bandwidth writing at IIO, part 2",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2",
"FCMask": "0x07",
@@ -911,6 +1043,7 @@
},
{
"BriefDescription": "PCI Express bandwidth writing at IIO, part 3",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
"FCMask": "0x07",
@@ -922,8 +1055,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -933,8 +1068,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -944,8 +1081,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Messages",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -955,8 +1094,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Messages",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -966,8 +1107,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Messages",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -977,8 +1120,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Messages",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -988,8 +1133,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Messages",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -999,8 +1146,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Messages",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -1010,6 +1159,7 @@
},
{
"BriefDescription": "Peer to peer read request for 4 bytes made by IIO Part0 to an IIO target",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART0",
"FCMask": "0x07",
@@ -1021,6 +1171,7 @@
},
{
"BriefDescription": "Peer to peer read request for 4 bytes made by IIO Part1 to an IIO target",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART1",
"FCMask": "0x07",
@@ -1032,6 +1183,7 @@
},
{
"BriefDescription": "Peer to peer read request for 4 bytes made by IIO Part2 to an IIO target",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART2",
"FCMask": "0x07",
@@ -1043,6 +1195,7 @@
},
{
"BriefDescription": "Peer to peer read request for 4 bytes made by IIO Part3 to an IIO target",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART3",
"FCMask": "0x07",
@@ -1054,8 +1207,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -1065,8 +1220,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -1076,6 +1233,7 @@
},
{
"BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART0",
"FCMask": "0x07",
@@ -1087,6 +1245,7 @@
},
{
"BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART1",
"FCMask": "0x07",
@@ -1098,6 +1257,7 @@
},
{
"BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART2",
"FCMask": "0x07",
@@ -1109,6 +1269,7 @@
},
{
"BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART3",
"FCMask": "0x07",
@@ -1120,8 +1281,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -1131,8 +1294,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -1142,29 +1307,37 @@
},
{
"BriefDescription": "Num Link Correctable Errors",
+ "Counter": "0,1,2,3",
"EventCode": "0xF",
"EventName": "UNC_IIO_LINK_NUM_CORR_ERR",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IIO"
},
{
"BriefDescription": "Num Link Retries",
+ "Counter": "0,1,2,3",
"EventCode": "0xE",
"EventName": "UNC_IIO_LINK_NUM_RETRIES",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IIO"
},
{
"BriefDescription": "Number packets that passed the Mask/Match Filter",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_IIO_MASK_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IIO"
},
{
"BriefDescription": "AND Mask/match for debug bus; Non-PCIE bus",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_IIO_MASK_MATCH_AND.BUS0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Asserted if all bits specified by mask match",
"UMask": "0x1",
@@ -1172,8 +1345,10 @@
},
{
"BriefDescription": "AND Mask/match for debug bus; Non-PCIE bus and PCIE bus",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_IIO_MASK_MATCH_AND.BUS0_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Asserted if all bits specified by mask match",
"UMask": "0x8",
@@ -1181,8 +1356,10 @@
},
{
"BriefDescription": "AND Mask/match for debug bus; Non-PCIE bus and !(PCIE bus)",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_IIO_MASK_MATCH_AND.BUS0_NOT_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Asserted if all bits specified by mask match",
"UMask": "0x4",
@@ -1190,8 +1367,10 @@
},
{
"BriefDescription": "AND Mask/match for debug bus; PCIE bus",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_IIO_MASK_MATCH_AND.BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Asserted if all bits specified by mask match",
"UMask": "0x2",
@@ -1199,8 +1378,10 @@
},
{
"BriefDescription": "AND Mask/match for debug bus; !(Non-PCIE bus) and PCIE bus",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_IIO_MASK_MATCH_AND.NOT_BUS0_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Asserted if all bits specified by mask match",
"UMask": "0x10",
@@ -1208,8 +1389,10 @@
},
{
"BriefDescription": "AND Mask/match for debug bus",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_IIO_MASK_MATCH_AND.NOT_BUS0_NOT_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Asserted if all bits specified by mask match",
"UMask": "0x20",
@@ -1217,8 +1400,10 @@
},
{
"BriefDescription": "OR Mask/match for debug bus; Non-PCIE bus",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_IIO_MASK_MATCH_OR.BUS0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Asserted if any bits specified by mask match",
"UMask": "0x1",
@@ -1226,8 +1411,10 @@
},
{
"BriefDescription": "OR Mask/match for debug bus; Non-PCIE bus and PCIE bus",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_IIO_MASK_MATCH_OR.BUS0_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Asserted if any bits specified by mask match",
"UMask": "0x8",
@@ -1235,8 +1422,10 @@
},
{
"BriefDescription": "OR Mask/match for debug bus; Non-PCIE bus and !(PCIE bus)",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_IIO_MASK_MATCH_OR.BUS0_NOT_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Asserted if any bits specified by mask match",
"UMask": "0x4",
@@ -1244,8 +1433,10 @@
},
{
"BriefDescription": "OR Mask/match for debug bus; PCIE bus",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_IIO_MASK_MATCH_OR.BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Asserted if any bits specified by mask match",
"UMask": "0x2",
@@ -1253,8 +1444,10 @@
},
{
"BriefDescription": "OR Mask/match for debug bus; !(Non-PCIE bus) and PCIE bus",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_IIO_MASK_MATCH_OR.NOT_BUS0_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Asserted if any bits specified by mask match",
"UMask": "0x10",
@@ -1262,8 +1455,10 @@
},
{
"BriefDescription": "OR Mask/match for debug bus; !(Non-PCIE bus) and !(PCIE bus)",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_IIO_MASK_MATCH_OR.NOT_BUS0_NOT_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Asserted if any bits specified by mask match",
"UMask": "0x20",
@@ -1271,15 +1466,19 @@
},
{
"BriefDescription": "Counting disabled",
+ "Counter": "0,1,2,3",
"EventName": "UNC_IIO_NOTHING",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IIO"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART0",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMIC.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -1288,9 +1487,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART1",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMIC.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -1299,9 +1500,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART2",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMIC.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -1310,9 +1513,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART3",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMIC.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -1321,9 +1526,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.VTD0",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMIC.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -1332,9 +1539,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.VTD1",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMIC.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -1343,9 +1552,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART0",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMICCMP.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -1354,9 +1565,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART1",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMICCMP.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -1365,9 +1578,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART2",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMICCMP.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -1376,9 +1591,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART3",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMICCMP.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -1387,6 +1604,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_READ.PART0",
@@ -1398,6 +1616,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_READ.PART1",
@@ -1409,6 +1628,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_READ.PART2",
@@ -1420,6 +1640,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_READ.PART3",
@@ -1431,9 +1652,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.VTD0",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -1442,9 +1665,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.VTD1",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -1453,6 +1678,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.PART0",
@@ -1464,6 +1690,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.PART1",
@@ -1475,6 +1702,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.PART2",
@@ -1486,6 +1714,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.PART3",
@@ -1497,9 +1726,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.VTD0",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -1508,9 +1739,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.VTD1",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -1519,9 +1752,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MSG.PART0",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MSG.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -1530,9 +1765,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MSG.PART1",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MSG.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -1541,9 +1778,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MSG.PART2",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MSG.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -1552,9 +1791,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MSG.PART3",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MSG.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -1563,9 +1804,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MSG.VTD0",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MSG.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -1574,9 +1817,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MSG.VTD1",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MSG.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -1585,9 +1830,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART0",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -1596,9 +1843,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART1",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -1607,9 +1856,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART2",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -1618,9 +1869,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART3",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -1629,9 +1882,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.VTD0",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -1640,9 +1895,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.VTD1",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -1651,9 +1908,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART0",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -1662,9 +1921,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART1",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -1673,9 +1934,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART2",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -1684,9 +1947,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART3",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -1695,9 +1960,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.VTD0",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -1706,9 +1973,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.VTD1",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -1717,9 +1986,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART0",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -1728,9 +1999,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART1",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -1739,9 +2012,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART2",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -1750,9 +2025,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART3",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -1761,9 +2038,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.VTD0",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -1772,9 +2051,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.VTD1",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -1783,9 +2064,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART0",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -1794,9 +2077,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART1",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -1805,9 +2090,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART2",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -1816,9 +2103,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART3",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -1827,9 +2116,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.VTD0",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -1838,9 +2129,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.VTD1",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -1849,9 +2142,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART0",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -1860,9 +2155,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART1",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -1871,9 +2168,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART2",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -1882,9 +2181,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART3",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -1893,9 +2194,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_READ.VTD0",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -1904,9 +2207,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_READ.VTD1",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -1915,9 +2220,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART0",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -1926,9 +2233,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART1",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -1937,9 +2246,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART2",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -1948,9 +2259,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART3",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -1959,9 +2272,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.VTD0",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -1970,9 +2285,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.VTD1",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -1981,9 +2298,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART0",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -1992,9 +2311,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART1",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -2003,9 +2324,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART2",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -2014,9 +2337,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART3",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -2025,9 +2350,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.VTD0",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -2036,9 +2363,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.VTD1",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -2047,9 +2376,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART0",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -2058,9 +2389,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART1",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -2069,9 +2402,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART2",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -2080,9 +2415,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART3",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -2091,9 +2428,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.VTD0",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -2102,9 +2441,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.VTD1",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -2113,9 +2454,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART0",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -2124,9 +2467,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART1",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -2135,9 +2480,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART2",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -2146,9 +2493,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART3",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -2157,9 +2506,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.VTD0",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -2168,9 +2519,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.VTD1",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -2179,9 +2532,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART0",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -2190,9 +2545,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART1",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -2201,9 +2558,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART2",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -2212,9 +2571,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART3",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -2223,9 +2584,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.VTD0",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -2234,9 +2597,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.VTD1",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -2245,17 +2610,21 @@
},
{
"BriefDescription": "Symbol Times on Link",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_IIO_SYMBOL_TIMES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Gen1 - increment once every 4nS, Gen2 - increment once every 2nS, Gen3 - increment once every 1nS",
"Unit": "IIO"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.ATOMIC.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -2264,9 +2633,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.ATOMIC.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -2275,9 +2646,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.ATOMIC.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -2286,9 +2659,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.ATOMIC.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -2297,9 +2672,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.ATOMIC.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -2308,9 +2685,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.ATOMIC.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -2319,9 +2698,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.ATOMICCMP.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -2330,9 +2711,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.ATOMICCMP.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -2341,9 +2724,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.ATOMICCMP.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -2352,9 +2737,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.ATOMICCMP.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -2363,9 +2750,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.MEM_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -2374,9 +2763,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.MEM_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -2385,9 +2776,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.MEM_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -2396,9 +2789,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.MEM_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -2407,9 +2802,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.MEM_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -2418,9 +2815,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.MEM_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -2429,9 +2828,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.MEM_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -2440,9 +2841,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.MEM_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -2451,9 +2854,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.MEM_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -2462,9 +2867,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.MEM_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -2473,9 +2880,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.VTD0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.MEM_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -2484,9 +2893,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.VTD1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.MEM_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -2495,9 +2906,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.MSG.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -2506,9 +2919,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.MSG.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -2517,9 +2932,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.MSG.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -2528,9 +2945,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.MSG.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -2539,9 +2958,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.MSG.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -2550,9 +2971,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.MSG.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -2561,9 +2984,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.PEER_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -2572,9 +2997,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.PEER_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -2583,9 +3010,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.PEER_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -2594,9 +3023,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.PEER_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -2605,9 +3036,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.PEER_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -2616,9 +3049,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.PEER_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -2627,9 +3062,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.PEER_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -2638,9 +3075,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.PEER_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -2649,9 +3088,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.PEER_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -2660,9 +3101,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.PEER_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -2671,9 +3114,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.PEER_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -2682,9 +3127,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.PEER_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -2693,9 +3140,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.CFG_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -2704,9 +3153,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.CFG_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -2715,9 +3166,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.CFG_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -2726,9 +3179,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.CFG_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -2737,9 +3192,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.VTD0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.CFG_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -2748,9 +3205,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.VTD1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.CFG_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -2759,9 +3218,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.CFG_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -2770,9 +3231,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.CFG_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -2781,9 +3244,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.CFG_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -2792,9 +3257,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.CFG_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -2803,9 +3270,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.VTD0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.CFG_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -2814,9 +3283,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.IO_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -2825,9 +3296,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.IO_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -2836,9 +3309,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.IO_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -2847,9 +3322,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.IO_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -2858,9 +3335,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_READ.VTD0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.IO_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -2869,9 +3348,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_READ.VTD1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.IO_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -2880,9 +3361,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.IO_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -2891,9 +3374,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.IO_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -2902,9 +3387,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.IO_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -2913,9 +3400,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.IO_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -2924,9 +3413,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.VTD0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.IO_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -2935,9 +3426,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.VTD1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.IO_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -2946,9 +3439,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.MEM_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -2957,9 +3452,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.MEM_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -2968,9 +3465,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.MEM_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -2979,9 +3478,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.MEM_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -2990,9 +3491,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.VTD0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.MEM_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3001,9 +3504,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.VTD1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.MEM_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3012,9 +3517,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.MEM_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -3023,9 +3530,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.MEM_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -3034,9 +3543,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.MEM_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -3045,9 +3556,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.MEM_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -3056,9 +3569,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.VTD0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.MEM_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3067,9 +3582,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.VTD1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.MEM_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3078,9 +3595,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.PEER_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -3089,9 +3608,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.PEER_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -3100,9 +3621,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.PEER_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -3111,9 +3634,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.PEER_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -3122,9 +3647,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.VTD0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.PEER_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3133,9 +3660,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.VTD1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.PEER_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3144,9 +3673,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.PEER_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -3155,9 +3686,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.PEER_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -3166,9 +3699,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.PEER_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -3177,9 +3712,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.PEER_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -3188,9 +3725,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.VTD0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.PEER_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3199,9 +3738,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.VTD1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.PEER_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3210,8 +3751,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -3221,8 +3764,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -3232,8 +3777,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -3243,8 +3790,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -3254,8 +3803,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3265,8 +3816,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3276,8 +3829,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -3287,8 +3842,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -3298,8 +3855,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -3309,8 +3868,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -3320,8 +3881,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3331,8 +3894,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3342,8 +3907,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -3353,8 +3920,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -3364,8 +3933,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -3375,8 +3946,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -3386,8 +3959,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3397,8 +3972,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3408,8 +3985,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -3419,8 +3998,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -3430,8 +4011,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -3441,8 +4024,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -3452,8 +4037,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3463,8 +4050,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3474,6 +4063,7 @@
},
{
"BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part0",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART0",
"FCMask": "0x07",
@@ -3485,6 +4075,7 @@
},
{
"BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part1",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART1",
"FCMask": "0x07",
@@ -3496,6 +4087,7 @@
},
{
"BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part2",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART2",
"FCMask": "0x07",
@@ -3507,6 +4099,7 @@
},
{
"BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part3",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART3",
"FCMask": "0x07",
@@ -3518,8 +4111,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3529,8 +4124,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3540,6 +4137,7 @@
},
{
"BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part0 by the CPU",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART0",
"FCMask": "0x07",
@@ -3551,6 +4149,7 @@
},
{
"BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part1 by the CPU",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART1",
"FCMask": "0x07",
@@ -3562,6 +4161,7 @@
},
{
"BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part2 by the CPU",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART2",
"FCMask": "0x07",
@@ -3573,6 +4173,7 @@
},
{
"BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part3 by the CPU",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART3",
"FCMask": "0x07",
@@ -3584,8 +4185,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3595,8 +4198,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3606,6 +4211,7 @@
},
{
"BriefDescription": "Peer to peer read request for up to a 64 byte transaction is made by a different IIO unit to IIO Part0",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART0",
"FCMask": "0x07",
@@ -3617,6 +4223,7 @@
},
{
"BriefDescription": "Peer to peer read request for up to a 64 byte transaction is made by a different IIO unit to IIO Part1",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART1",
"FCMask": "0x07",
@@ -3628,6 +4235,7 @@
},
{
"BriefDescription": "Peer to peer read request for up to a 64 byte transaction is made by a different IIO unit to IIO Part2",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART2",
"FCMask": "0x07",
@@ -3639,6 +4247,7 @@
},
{
"BriefDescription": "Peer to peer read request for up to a 64 byte transaction is made by a different IIO unit to IIO Part3",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART3",
"FCMask": "0x07",
@@ -3650,8 +4259,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Another card (different IIO stack) reading from this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3661,8 +4272,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Another card (different IIO stack) reading from this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3672,6 +4285,7 @@
},
{
"BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made to IIO Part0 by a different IIO unit",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART0",
"FCMask": "0x07",
@@ -3683,6 +4297,7 @@
},
{
"BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made to IIO Part1 by a different IIO unit",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART1",
"FCMask": "0x07",
@@ -3694,6 +4309,7 @@
},
{
"BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made to IIO Part2 by a different IIO unit",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART2",
"FCMask": "0x07",
@@ -3705,6 +4321,7 @@
},
{
"BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made to IIO Part3 by a different IIO unit",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART3",
"FCMask": "0x07",
@@ -3716,8 +4333,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Another card (different IIO stack) writing to this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3727,8 +4346,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Another card (different IIO stack) writing to this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3738,8 +4359,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -3749,8 +4372,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -3760,8 +4385,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -3771,8 +4398,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -3782,8 +4411,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3793,8 +4424,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3804,8 +4437,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Completion of atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMICCMP.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -3815,8 +4450,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Completion of atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMICCMP.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -3826,8 +4463,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Completion of atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMICCMP.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -3837,8 +4476,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Completion of atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMICCMP.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -3848,6 +4489,7 @@
},
{
"BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part0 to Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART0",
"FCMask": "0x07",
@@ -3859,6 +4501,7 @@
},
{
"BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part1 to Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART1",
"FCMask": "0x07",
@@ -3870,6 +4513,7 @@
},
{
"BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part2 to Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART2",
"FCMask": "0x07",
@@ -3881,6 +4525,7 @@
},
{
"BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part3 to Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART3",
"FCMask": "0x07",
@@ -3892,8 +4537,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Card reading from DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3903,8 +4550,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Card reading from DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3914,6 +4563,7 @@
},
{
"BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part0 to Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART0",
"FCMask": "0x07",
@@ -3925,6 +4575,7 @@
},
{
"BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part1 to Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART1",
"FCMask": "0x07",
@@ -3936,6 +4587,7 @@
},
{
"BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part2 to Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART2",
"FCMask": "0x07",
@@ -3947,6 +4599,7 @@
},
{
"BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part3 to Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART3",
"FCMask": "0x07",
@@ -3958,8 +4611,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Card writing to DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3969,8 +4624,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Card writing to DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3980,8 +4637,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -3991,8 +4650,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -4002,8 +4663,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -4013,8 +4676,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -4024,8 +4689,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -4035,8 +4702,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -4046,6 +4715,7 @@
},
{
"BriefDescription": "Peer to peer read request of up to a 64 byte transaction is made by IIO Part0 to an IIO target",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART0",
"FCMask": "0x07",
@@ -4057,6 +4727,7 @@
},
{
"BriefDescription": "Peer to peer read request of up to a 64 byte transaction is made by IIO Part1 to an IIO target",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART1",
"FCMask": "0x07",
@@ -4068,6 +4739,7 @@
},
{
"BriefDescription": "Peer to peer read request of up to a 64 byte transaction is made by IIO Part2 to an IIO target",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART2",
"FCMask": "0x07",
@@ -4079,6 +4751,7 @@
},
{
"BriefDescription": "Peer to peer read request of up to a 64 byte transaction is made by IIO Part3 to an IIO target",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART3",
"FCMask": "0x07",
@@ -4090,8 +4763,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Card reading from another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -4101,8 +4776,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Card reading from another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -4112,6 +4789,7 @@
},
{
"BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made by IIO Part0 to an IIO target",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART0",
"FCMask": "0x07",
@@ -4123,6 +4801,7 @@
},
{
"BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made by IIO Part1 to an IIO target",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART1",
"FCMask": "0x07",
@@ -4134,6 +4813,7 @@
},
{
"BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made by IIO Part2 to an IIO target",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART2",
"FCMask": "0x07",
@@ -4145,6 +4825,7 @@
},
{
"BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made by IIO Part3 to an IIO target",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART3",
"FCMask": "0x07",
@@ -4156,8 +4837,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -4167,8 +4850,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -4178,72 +4863,90 @@
},
{
"BriefDescription": "VTd Access; context cache miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_VTD_ACCESS.CTXT_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "IIO"
},
{
"BriefDescription": "VTd Access; L1 miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_VTD_ACCESS.L1_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "IIO"
},
{
"BriefDescription": "VTd Access; L2 miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_VTD_ACCESS.L2_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "IIO"
},
{
"BriefDescription": "VTd Access; L3 miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_VTD_ACCESS.L3_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "IIO"
},
{
"BriefDescription": "VTd Access; Vtd hit",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_VTD_ACCESS.L4_PAGE_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "IIO"
},
{
"BriefDescription": "VTd Access; TLB miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_VTD_ACCESS.TLB1_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "IIO"
},
{
"BriefDescription": "VTd Access; TLB is full",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_VTD_ACCESS.TLB_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "IIO"
},
{
"BriefDescription": "VTd Access; TLB miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_VTD_ACCESS.TLB_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "IIO"
},
{
"BriefDescription": "VTd Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_IIO_VTD_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IIO"
}
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-memory.json b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-memory.json
index d82d2cca6f0a..265cdf334f6a 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "read requests to memory controller. Derived from unc_m_cas_count.rd",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "LLC_MISSES.MEM_READ",
"PerPkg": "1",
@@ -11,6 +12,7 @@
},
{
"BriefDescription": "write requests to memory controller. Derived from unc_m_cas_count.wr",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "LLC_MISSES.MEM_WRITE",
"PerPkg": "1",
@@ -21,8 +23,10 @@
},
{
"BriefDescription": "DRAM Activate Count; Activate due to Bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_M_ACT_COUNT.BYP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
"UMask": "0x8",
@@ -30,8 +34,10 @@
},
{
"BriefDescription": "DRAM Activate Count; Activate due to Read",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_M_ACT_COUNT.RD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
"UMask": "0x1",
@@ -39,6 +45,7 @@
},
{
"BriefDescription": "DRAM Page Activate commands sent due to a write request",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_M_ACT_COUNT.WR",
"PerPkg": "1",
@@ -48,30 +55,37 @@
},
{
"BriefDescription": "ACT command issued by 2 cycle bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M_BYP_CMDS.ACT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "CAS command issued by 2 cycle bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M_BYP_CMDS.CAS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "PRE command issued by 2 cycle bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M_BYP_CMDS.PRE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "All DRAM CAS Commands issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.ALL",
"PerPkg": "1",
@@ -81,6 +95,7 @@
},
{
"BriefDescription": "All DRAM Read CAS Commands issued (including underfills)",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.RD",
"PerPkg": "1",
@@ -90,14 +105,17 @@
},
{
"BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; Read CAS issued in Read ISOCH Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.RD_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "iMC"
},
{
"BriefDescription": "All DRAM Read CAS Commands issued (does not include underfills)",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.RD_REG",
"PerPkg": "1",
@@ -107,14 +125,17 @@
},
{
"BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; Read CAS issued in RMM",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.RD_RMM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "iMC"
},
{
"BriefDescription": "DRAM Underfill Read CAS Commands issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
"PerPkg": "1",
@@ -124,14 +145,17 @@
},
{
"BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; Read CAS issued in WMM",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.RD_WMM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "All DRAM Write CAS commands issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.WR",
"PerPkg": "1",
@@ -141,16 +165,20 @@
},
{
"BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; Read CAS issued in Write ISOCH Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.WR_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "iMC"
},
{
"BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Read Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.WR_RMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the total number of Opportunistic DRAM Write CAS commands issued on this channel while in Read-Major-Mode.",
"UMask": "0x8",
@@ -158,6 +186,7 @@
},
{
"BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Write Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.WR_WMM",
"PerPkg": "1",
@@ -167,6 +196,7 @@
},
{
"BriefDescription": "Memory controller clock ticks",
+ "Counter": "0,1,2,3",
"EventName": "UNC_M_CLOCKTICKS",
"PerPkg": "1",
"PublicDescription": "Counts clockticks of the fixed frequency clock of the memory controller using one of the programmable counters.",
@@ -174,63 +204,79 @@
},
{
"BriefDescription": "Clockticks in the Memory Controller using a dedicated 48-bit Fixed Counter",
+ "Counter": "FIXED",
"EventCode": "0xff",
"EventName": "UNC_M_CLOCKTICKS_F",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "DRAM Precharge All Commands",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_M_DRAM_PRE_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times that the precharge all command was sent.",
"Unit": "iMC"
},
{
"BriefDescription": "ECC Correctable Errors",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_M_ECC_CORRECTABLE_ERRORS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of ECC errors detected and corrected by the iMC on this channel. This counter is only useful with ECC DRAM devices. This count will increment one time for each correction regardless of the number of bits corrected. The iMC can correct up to 4 bit errors in independent channel mode and 8 bit errors in lockstep mode.",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_MAJMODE2.DRAM_CYC",
+ "Counter": "0,1,2,3",
"EventCode": "0xED",
"EventName": "UNC_M_MAJMODE2.DRAM_CYC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_MAJMODE2.DRAM_ENTER",
+ "Counter": "0,1,2,3",
"EventCode": "0xED",
"EventName": "UNC_M_MAJMODE2.DRAM_ENTER",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "Major Mode 2 : Cycles in PMM major mode",
+ "Counter": "0,1,2,3",
"EventCode": "0xED",
"EventName": "UNC_M_MAJMODE2.PMM_CYC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "Major Mode 2 : Entered PMM major mode",
+ "Counter": "0,1,2,3",
"EventCode": "0xED",
"EventName": "UNC_M_MAJMODE2.PMM_ENTER",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "Cycles in a Major Mode; Isoch Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_M_MAJOR_MODES.ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.; We group these two modes together so that we can use four counters to track each of the major modes at one time. These major modes are used whenever there is an ISOCH txn in the memory controller. In these mode, only ISOCH transactions are processed.",
"UMask": "0x8",
@@ -238,8 +284,10 @@
},
{
"BriefDescription": "Cycles in a Major Mode; Partial Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_M_MAJOR_MODES.PARTIAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.; This major mode is used to drain starved underfill reads. Regular reads and writes are blocked and only underfill reads will be processed.",
"UMask": "0x4",
@@ -247,8 +295,10 @@
},
{
"BriefDescription": "Cycles in a Major Mode; Read Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_M_MAJOR_MODES.READ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.; Read Major Mode is the default mode for the iMC, as reads are generally more critical to forward progress than writes.",
"UMask": "0x1",
@@ -256,8 +306,10 @@
},
{
"BriefDescription": "Cycles in a Major Mode; Write Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_M_MAJOR_MODES.WRITE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.; This mode is triggered when the WPQ hits high occupancy and causes writes to be higher priority than reads. This can cause blips in the available read bandwidth in the system and temporarily increase read latencies in order to achieve better bus utilizations and higher bandwidth.",
"UMask": "0x2",
@@ -265,6 +317,7 @@
},
{
"BriefDescription": "Intel Optane DC persistent memory bandwidth read (MB/sec). Derived from unc_m_pmm_rpq_inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_M_PMM_BANDWIDTH.READ",
"PerPkg": "1",
@@ -273,6 +326,7 @@
},
{
"BriefDescription": "Intel Optane DC persistent memory bandwidth total (MB/sec). Derived from unc_m_pmm_rpq_inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_M_PMM_BANDWIDTH.TOTAL",
"MetricExpr": "UNC_M_PMM_RPQ_INSERTS + UNC_M_PMM_WPQ_INSERTS",
@@ -283,6 +337,7 @@
},
{
"BriefDescription": "Intel Optane DC persistent memory bandwidth write (MB/sec). Derived from unc_m_pmm_wpq_inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0xE7",
"EventName": "UNC_M_PMM_BANDWIDTH.WRITE",
"PerPkg": "1",
@@ -291,6 +346,7 @@
},
{
"BriefDescription": "All commands for Intel(R) Optane(TM) DC persistent memory",
+ "Counter": "0,1,2,3",
"EventCode": "0xEA",
"EventName": "UNC_M_PMM_CMD1.ALL",
"PerPkg": "1",
@@ -299,22 +355,27 @@
},
{
"BriefDescription": "Misc Commands (error, flow ACKs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xEA",
"EventName": "UNC_M_PMM_CMD1.MISC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "iMC"
},
{
"BriefDescription": "Misc GNTs",
+ "Counter": "0,1,2,3",
"EventCode": "0xEA",
"EventName": "UNC_M_PMM_CMD1.MISC_GNT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "iMC"
},
{
"BriefDescription": "Regular reads(RPQ) commands for Intel(R) Optane(TM) DC persistent memory",
+ "Counter": "0,1,2,3",
"EventCode": "0xEA",
"EventName": "UNC_M_PMM_CMD1.RD",
"PerPkg": "1",
@@ -324,14 +385,17 @@
},
{
"BriefDescription": "RPQ GNTs",
+ "Counter": "0,1,2,3",
"EventCode": "0xEA",
"EventName": "UNC_M_PMM_CMD1.RPQ_GNTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "Underfill read commands for Intel(R) Optane(TM) DC persistent memory",
+ "Counter": "0,1,2,3",
"EventCode": "0xEA",
"EventName": "UNC_M_PMM_CMD1.UFILL_RD",
"PerPkg": "1",
@@ -341,14 +405,17 @@
},
{
"BriefDescription": "Underfill GNTs",
+ "Counter": "0,1,2,3",
"EventCode": "0xEA",
"EventName": "UNC_M_PMM_CMD1.WPQ_GNTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "iMC"
},
{
"BriefDescription": "Write commands for Intel(R) Optane(TM) DC persistent memory",
+ "Counter": "0,1,2,3",
"EventCode": "0xEA",
"EventName": "UNC_M_PMM_CMD1.WR",
"PerPkg": "1",
@@ -358,102 +425,127 @@
},
{
"BriefDescription": "Expected No data packet (ERID matched NDP encoding)",
+ "Counter": "0,1,2,3",
"EventCode": "0xEB",
"EventName": "UNC_M_PMM_CMD2.NODATA_EXP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "Unexpected No data packet (ERID matched a Read, but data was a NDP)",
+ "Counter": "0,1,2,3",
"EventCode": "0xEB",
"EventName": "UNC_M_PMM_CMD2.NODATA_UNEXP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "Opportunistic Reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xEB",
"EventName": "UNC_M_PMM_CMD2.OPP_RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "PMM ECC Errors",
+ "Counter": "0,1,2,3",
"EventCode": "0xEB",
"EventName": "UNC_M_PMM_CMD2.PMM_ECC_ERROR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "iMC"
},
{
"BriefDescription": "PMM ERID detectable parity error",
+ "Counter": "0,1,2,3",
"EventCode": "0xEB",
"EventName": "UNC_M_PMM_CMD2.PMM_ERID_ERROR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "iMC"
},
{
"BriefDescription": "Read Requests - Slot 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xEB",
"EventName": "UNC_M_PMM_CMD2.REQS_SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "Read Requests - Slot 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xEB",
"EventName": "UNC_M_PMM_CMD2.REQS_SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "PMM Major Mode; Cycles PMM is in Partial Write Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0xEC",
"EventName": "UNC_M_PMM_MAJMODE1.PARTIAL_WR_CYC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "PMM Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0xEC",
"EventName": "UNC_M_PMM_MAJMODE1.PARTIAL_WR_ENTER",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "iMC"
},
{
"BriefDescription": "PMM Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0xEC",
"EventName": "UNC_M_PMM_MAJMODE1.PARTIAL_WR_EXIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "iMC"
},
{
"BriefDescription": "PMM Major Mode; Cycles PMM is in Read Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0xEC",
"EventName": "UNC_M_PMM_MAJMODE1.RD_CYC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "PMM Major Mode; Cycles PMM is in Write Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0xEC",
"EventName": "UNC_M_PMM_MAJMODE1.WR_CYC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "Intel Optane DC persistent memory read latency (ns). Derived from unc_m_pmm_rpq_occupancy.all",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M_PMM_READ_LATENCY",
"MetricExpr": "UNC_M_PMM_RPQ_OCCUPANCY.ALL / UNC_M_PMM_RPQ_INSERTS / UNC_M_CLOCKTICKS",
@@ -465,20 +557,25 @@
},
{
"BriefDescription": "PMM Read Queue Cycles Full",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_M_PMM_RPQ_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "PMM Read Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_M_PMM_RPQ_CYCLES_NE",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "Write requests allocated in the PMM Write Pending Queue for Intel Optane DC persistent memory",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_M_PMM_RPQ_INSERTS",
"PerPkg": "1",
@@ -486,6 +583,7 @@
},
{
"BriefDescription": "Read Pending Queue Occupancy of all read requests for Intel Optane DC persistent memory",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M_PMM_RPQ_OCCUPANCY.ALL",
"PerPkg": "1",
@@ -494,28 +592,35 @@
},
{
"BriefDescription": "PMM Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M_PMM_RPQ_OCCUPANCY.GNT_WAIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "PMM Write Queue Cycles Full",
+ "Counter": "0,1,2,3",
"EventCode": "0xE6",
"EventName": "UNC_M_PMM_WPQ_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "PMM Write Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0xE5",
"EventName": "UNC_M_PMM_WPQ_CYCLES_NE",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "Write requests allocated in the PMM Write Pending Queue for Intel Optane DC persistent memory",
+ "Counter": "0,1,2,3",
"EventCode": "0xE7",
"EventName": "UNC_M_PMM_WPQ_INSERTS",
"PerPkg": "1",
@@ -523,6 +628,7 @@
},
{
"BriefDescription": "Write Pending Queue Occupancy of all write requests for Intel(R) Optane(TM) DC persistent memory",
+ "Counter": "0,1,2,3",
"EventCode": "0xE4",
"EventName": "UNC_M_PMM_WPQ_OCCUPANCY.ALL",
"PerPkg": "1",
@@ -531,44 +637,55 @@
},
{
"BriefDescription": "PMM Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0xE4",
"EventName": "UNC_M_PMM_WPQ_OCCUPANCY.CAS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "PMM Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0xE4",
"EventName": "UNC_M_PMM_WPQ_OCCUPANCY.PWR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_PMM_WPQ_PCOMMIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xE8",
"EventName": "UNC_M_PMM_WPQ_PCOMMIT",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_PMM_WPQ_PCOMMIT_CYC",
+ "Counter": "0,1,2,3",
"EventCode": "0xE9",
"EventName": "UNC_M_PMM_WPQ_PCOMMIT_CYC",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "Channel DLLOFF Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M_POWER_CHANNEL_DLLOFF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles when all the ranks in the channel are in CKE Slow (DLLOFF) mode.",
"Unit": "iMC"
},
{
"BriefDescription": "Cycles where DRAM ranks are in power down (CKE) mode+C37",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_M_POWER_CHANNEL_PPD",
"MetricExpr": "(UNC_M_POWER_CHANNEL_PPD / UNC_M_CLOCKTICKS) * 100",
@@ -579,8 +696,10 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
"UMask": "0x1",
@@ -588,8 +707,10 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
"UMask": "0x2",
@@ -597,8 +718,10 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
"UMask": "0x4",
@@ -606,8 +729,10 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
"UMask": "0x8",
@@ -615,8 +740,10 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
"UMask": "0x10",
@@ -624,8 +751,10 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
"UMask": "0x20",
@@ -633,8 +762,10 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
"UMask": "0x40",
@@ -642,8 +773,10 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
"UMask": "0x80",
@@ -651,21 +784,26 @@
},
{
"BriefDescription": "Critical Throttle Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M_POWER_CRITICAL_THROTTLE_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the iMC is in critical thermal throttling. When this happens, all traffic is blocked. This should be rare unless something bad is going on in the platform. There is no filtering by rank for this event.",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_POWER_PCU_THROTTLING",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M_POWER_PCU_THROTTLING",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "Cycles Memory is in self refresh power mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M_POWER_SELF_REFRESH",
"MetricExpr": "(UNC_M_POWER_SELF_REFRESH / UNC_M_CLOCKTICKS) * 100",
@@ -676,8 +814,10 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.; Thermal throttling is performed per DIMM. We support 3 DIMMs per channel. This ID allows us to filter by ID.",
"UMask": "0x1",
@@ -685,8 +825,10 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
"UMask": "0x2",
@@ -694,8 +836,10 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
"UMask": "0x4",
@@ -703,8 +847,10 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
"UMask": "0x8",
@@ -712,8 +858,10 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
"UMask": "0x10",
@@ -721,8 +869,10 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
"UMask": "0x20",
@@ -730,8 +880,10 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
"UMask": "0x40",
@@ -739,8 +891,10 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
"UMask": "0x80",
@@ -748,8 +902,10 @@
},
{
"BriefDescription": "Read Preemption Count; Read over Read Preemption",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_M_PREEMPTION.RD_PREEMPT_RD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.; Filter for when a read preempts another read.",
"UMask": "0x1",
@@ -757,8 +913,10 @@
},
{
"BriefDescription": "Read Preemption Count; Read over Write Preemption",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_M_PREEMPTION.RD_PREEMPT_WR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.; Filter for when a read preempts a write.",
"UMask": "0x2",
@@ -766,8 +924,10 @@
},
{
"BriefDescription": "DRAM Precharge commands.; Precharge due to bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.BYP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.",
"UMask": "0x10",
@@ -775,8 +935,10 @@
},
{
"BriefDescription": "DRAM Precharge commands.; Precharge due to timer expiration",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.PAGE_CLOSE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.; Counts the number of DRAM Precharge commands sent on this channel as a result of the page close counter expiring. This does not include implicit precharge commands sent in auto-precharge mode.",
"UMask": "0x2",
@@ -784,6 +946,7 @@
},
{
"BriefDescription": "Pre-charges due to page misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.PAGE_MISS",
"PerPkg": "1",
@@ -793,6 +956,7 @@
},
{
"BriefDescription": "Pre-charge for reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.RD",
"PerPkg": "1",
@@ -802,8 +966,10 @@
},
{
"BriefDescription": "Pre-charge for writes",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.WR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.",
"UMask": "0x8",
@@ -811,1390 +977,1739 @@
},
{
"BriefDescription": "Read CAS issued with HIGH priority",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M_RD_CAS_PRIO.HIGH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "Read CAS issued with LOW priority",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M_RD_CAS_PRIO.LOW",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "Read CAS issued with MEDIUM priority",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M_RD_CAS_PRIO.MED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "Read CAS issued with PANIC NON ISOCH priority (starved)",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M_RD_CAS_PRIO.PANIC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.ALLBANKS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK0",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK10",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK11",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xb",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK12",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK13",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK14",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK15",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK6",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x6",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK7",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK8",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK9",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x9",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANKG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANKG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x12",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANKG2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x13",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANKG3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x14",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.ALLBANKS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK0",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK10",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK11",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xb",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK12",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK13",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK14",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK15",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK6",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x6",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK7",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK8",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK9",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x9",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANKG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANKG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x12",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANKG2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x13",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANKG3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x14",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.ALLBANKS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK0",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK10",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK11",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xb",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK12",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK13",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK14",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK15",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK6",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x6",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK7",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK8",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK9",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x9",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANKG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANKG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x12",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANKG2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x13",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANKG3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x14",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.ALLBANKS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANK0",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANK1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANK10",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANK11",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xb",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANK12",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANK13",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANK14",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANK15",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANK2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANK3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANK4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANK5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANK6",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x6",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANK7",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANK8",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANK9",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x9",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANKG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANKG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x12",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANKG2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x13",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANKG3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x14",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.ALLBANKS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK0",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK10",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK11",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xb",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK12",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK13",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK14",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK15",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK6",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x6",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK7",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK8",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK9",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x9",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANKG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANKG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x12",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANKG2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x13",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANKG3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x14",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.ALLBANKS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK0",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK10",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK11",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xb",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK12",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK13",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK14",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK15",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK6",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x6",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK7",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK8",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK9",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x9",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANKG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANKG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x12",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANKG2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x13",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANKG3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x14",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.ALLBANKS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK0",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK10",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK11",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xb",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK12",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK13",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK14",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK15",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK6",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x6",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK7",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK8",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK9",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x9",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANKG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANKG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x12",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANKG2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x13",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANKG3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x14",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.ALLBANKS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK0",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK10",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK11",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xb",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK12",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK13",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK14",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK15",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK6",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x6",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK7",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK8",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK9",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x9",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANKG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANKG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x12",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANKG2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x13",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANKG3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x14",
"Unit": "iMC"
},
{
"BriefDescription": "Read Pending Queue Full Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_M_RPQ_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the Read Pending Queue is full. When the RPQ is full, the HA will not be able to issue any additional read requests into the iMC. This count should be similar count in the HA which tracks the number of cycles that the HA has no RPQ credits, just somewhat smaller to account for the credit return overhead. We generally do not expect to see RPQ become full except for potentially during Write Major Mode or while running with slow DRAM. This event only tracks non-ISOC queue entries.",
"Unit": "iMC"
},
{
"BriefDescription": "Read Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M_RPQ_CYCLES_NE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Read Pending Queue is not empty. This can then be used to calculate the average occupancy (in conjunction with the Read Pending Queue Occupancy count). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This filter is to be used in conjunction with the occupancy filter so that one can correctly track the average occupancies for schedulable entries and scheduled requests.",
"Unit": "iMC"
},
{
"BriefDescription": "Read Pending Queue Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M_RPQ_INSERTS",
"PerPkg": "1",
@@ -2203,6 +2718,7 @@
},
{
"BriefDescription": "Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M_RPQ_OCCUPANCY",
"PerPkg": "1",
@@ -2211,452 +2727,565 @@
},
{
"BriefDescription": "Scoreboard Accesses; Write Accepts",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M_SB_ACCESSES.FM_RD_CMPS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Accesses; Write Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M_SB_ACCESSES.FM_WR_CMPS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Accesses; FM read completions",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M_SB_ACCESSES.NM_RD_CMPS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Accesses; FM write completions",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M_SB_ACCESSES.NM_WR_CMPS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Accesses; Read Accepts",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M_SB_ACCESSES.RD_ACCEPTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Accesses; Read Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M_SB_ACCESSES.RD_REJECTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Accesses; NM read completions",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M_SB_ACCESSES.WR_ACCEPTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Accesses; NM write completions",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M_SB_ACCESSES.WR_REJECTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "Alloc",
+ "Counter": "0,1,2,3",
"EventCode": "0xD9",
"EventName": "UNC_M_SB_CANARY.ALLOC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "Dealloc",
+ "Counter": "0,1,2,3",
"EventCode": "0xD9",
"EventName": "UNC_M_SB_CANARY.DEALLOC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "Far Mem Read Starved",
+ "Counter": "0,1,2,3",
"EventCode": "0xD9",
"EventName": "UNC_M_SB_CANARY.FMRD_STARVED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "iMC"
},
{
"BriefDescription": "Far Mem Write Starved",
+ "Counter": "0,1,2,3",
"EventCode": "0xD9",
"EventName": "UNC_M_SB_CANARY.FMWR_STARVED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "iMC"
},
{
"BriefDescription": "Near Mem Read Starved",
+ "Counter": "0,1,2,3",
"EventCode": "0xD9",
"EventName": "UNC_M_SB_CANARY.NMRD_STARVED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "Near Mem Write Starved",
+ "Counter": "0,1,2,3",
"EventCode": "0xD9",
"EventName": "UNC_M_SB_CANARY.NMWR_STARVED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "iMC"
},
{
"BriefDescription": "Reject",
+ "Counter": "0,1,2,3",
"EventCode": "0xD9",
"EventName": "UNC_M_SB_CANARY.REJ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "Valid",
+ "Counter": "0,1,2,3",
"EventCode": "0xD9",
"EventName": "UNC_M_SB_CANARY.VLD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Cycles Full",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "UNC_M_SB_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Cycles Not-Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_M_SB_CYCLES_NE",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Inserts; Block region reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M_SB_INSERTS.BLOCK_RDS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Inserts; Block region writes",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M_SB_INSERTS.BLOCK_WRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Inserts; Dealloc all commands (for error flows)",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M_SB_INSERTS.DEALLOC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Inserts; Patrol inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M_SB_INSERTS.PATROL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Inserts; Persistent Mem reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M_SB_INSERTS.PMM_RDS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Inserts; Persistent Mem writes",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M_SB_INSERTS.PMM_WRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Inserts; Reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M_SB_INSERTS.RDS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Inserts; Writes",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M_SB_INSERTS.WRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Occupancy; Block region reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xD5",
"EventName": "UNC_M_SB_OCCUPANCY.BLOCK_RDS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Occupancy; Block region writes",
+ "Counter": "0,1,2,3",
"EventCode": "0xD5",
"EventName": "UNC_M_SB_OCCUPANCY.BLOCK_WRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Occupancy; Patrol",
+ "Counter": "0,1,2,3",
"EventCode": "0xD5",
"EventName": "UNC_M_SB_OCCUPANCY.PATROL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Occupancy; Persistent Mem reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xD5",
"EventName": "UNC_M_SB_OCCUPANCY.PMM_RDS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Occupancy; Persistent Mem writes",
+ "Counter": "0,1,2,3",
"EventCode": "0xD5",
"EventName": "UNC_M_SB_OCCUPANCY.PMM_WRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Occupancy; Reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xD5",
"EventName": "UNC_M_SB_OCCUPANCY.RDS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Occupancy; Writes",
+ "Counter": "0,1,2,3",
"EventCode": "0xD5",
"EventName": "UNC_M_SB_OCCUPANCY.WRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "Number of Scoreboard Requests Rejected; FM requests rejected due to full address conflict",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M_SB_REJECT.FM_ADDR_CNFLT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "Number of Scoreboard Requests Rejected; NM requests rejected due to set conflict",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M_SB_REJECT.NM_SET_CNFLT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "Number of Scoreboard Requests Rejected; Patrol requests rejected due to set conflict",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M_SB_REJECT.PATROL_SET_CNFLT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "Far Mem Read - Clear",
+ "Counter": "0,1,2,3",
"EventCode": "0xD7",
"EventName": "UNC_M_SB_STRV_ALLOC.FMRD_CLR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "iMC"
},
{
"BriefDescription": "Far Mem Read - Set",
+ "Counter": "0,1,2,3",
"EventCode": "0xD7",
"EventName": "UNC_M_SB_STRV_ALLOC.FMRD_SET",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "Far Mem Write - Clear",
+ "Counter": "0,1,2,3",
"EventCode": "0xD7",
"EventName": "UNC_M_SB_STRV_ALLOC.FMWR_CLR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "iMC"
},
{
"BriefDescription": "Far Mem Write - Set",
+ "Counter": "0,1,2,3",
"EventCode": "0xD7",
"EventName": "UNC_M_SB_STRV_ALLOC.FMWR_SET",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "Near Mem Read - Clear",
+ "Counter": "0,1,2,3",
"EventCode": "0xD7",
"EventName": "UNC_M_SB_STRV_ALLOC.NMRD_CLR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "Near Mem Read - Set",
+ "Counter": "0,1,2,3",
"EventCode": "0xD7",
"EventName": "UNC_M_SB_STRV_ALLOC.NMRD_SET",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "Near Mem Write - Clear",
+ "Counter": "0,1,2,3",
"EventCode": "0xD7",
"EventName": "UNC_M_SB_STRV_ALLOC.NMWR_CLR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "iMC"
},
{
"BriefDescription": "Near Mem Write - Set",
+ "Counter": "0,1,2,3",
"EventCode": "0xD7",
"EventName": "UNC_M_SB_STRV_ALLOC.NMWR_SET",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "Far Mem Read",
+ "Counter": "0,1,2,3",
"EventCode": "0xD8",
"EventName": "UNC_M_SB_STRV_OCC.FMRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "Far Mem Write",
+ "Counter": "0,1,2,3",
"EventCode": "0xD8",
"EventName": "UNC_M_SB_STRV_OCC.FMWR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "Near Mem Read",
+ "Counter": "0,1,2,3",
"EventCode": "0xD8",
"EventName": "UNC_M_SB_STRV_OCC.NMRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "Near Mem Write",
+ "Counter": "0,1,2,3",
"EventCode": "0xD8",
"EventName": "UNC_M_SB_STRV_OCC.NMWR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_SB_TAGGED.DDR4_CMP",
+ "Counter": "0,1,2,3",
"EventCode": "0xDD",
"EventName": "UNC_M_SB_TAGGED.DDR4_CMP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_SB_TAGGED.NEW",
+ "Counter": "0,1,2,3",
"EventCode": "0xDD",
"EventName": "UNC_M_SB_TAGGED.NEW",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_SB_TAGGED.OCC",
+ "Counter": "0,1,2,3",
"EventCode": "0xDD",
"EventName": "UNC_M_SB_TAGGED.OCC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_SB_TAGGED.PMM0_CMP",
+ "Counter": "0,1,2,3",
"EventCode": "0xDD",
"EventName": "UNC_M_SB_TAGGED.PMM0_CMP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_SB_TAGGED.PMM1_CMP",
+ "Counter": "0,1,2,3",
"EventCode": "0xDD",
"EventName": "UNC_M_SB_TAGGED.PMM1_CMP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_SB_TAGGED.PMM2_CMP",
+ "Counter": "0,1,2,3",
"EventCode": "0xDD",
"EventName": "UNC_M_SB_TAGGED.PMM2_CMP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_SB_TAGGED.RD_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xDD",
"EventName": "UNC_M_SB_TAGGED.RD_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_SB_TAGGED.RD_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xDD",
"EventName": "UNC_M_SB_TAGGED.RD_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "All hits to Near Memory(DRAM cache) in Memory Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0xD3",
"EventName": "UNC_M_TAGCHK.HIT",
"PerPkg": "1",
@@ -2666,6 +3295,7 @@
},
{
"BriefDescription": "All Clean line misses to Near Memory(DRAM cache) in Memory Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0xD3",
"EventName": "UNC_M_TAGCHK.MISS_CLEAN",
"PerPkg": "1",
@@ -2675,6 +3305,7 @@
},
{
"BriefDescription": "All dirty line misses to Near Memory(DRAM cache) in Memory Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0xD3",
"EventName": "UNC_M_TAGCHK.MISS_DIRTY",
"PerPkg": "1",
@@ -2684,46 +3315,57 @@
},
{
"BriefDescription": "Transition from WMM to RMM because of low threshold; Transition from WMM to RMM because of starve counter",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "UNC_M_WMM_TO_RMM.LOW_THRESH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "Transition from WMM to RMM because of low threshold",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "UNC_M_WMM_TO_RMM.STARVE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "Transition from WMM to RMM because of low threshold",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "UNC_M_WMM_TO_RMM.VMSE_RETRY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "Write Pending Queue Full Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M_WPQ_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the Write Pending Queue is full. When the WPQ is full, the HA will not be able to issue any additional write requests into the iMC. This count should be similar count in the CHA which tracks the number of cycles that the CHA has no WPQ credits, just somewhat smaller to account for the credit return overhead.",
"Unit": "iMC"
},
{
"BriefDescription": "Write Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M_WPQ_CYCLES_NE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Write Pending Queue is not empty. This can then be used to calculate the average queue occupancy (in conjunction with the WPQ Occupancy Accumulation count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies.",
"Unit": "iMC"
},
{
"BriefDescription": "Write Pending Queue Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M_WPQ_INSERTS",
"PerPkg": "1",
@@ -2732,6 +3374,7 @@
},
{
"BriefDescription": "Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "UNC_M_WPQ_OCCUPANCY",
"PerPkg": "1",
@@ -2740,1359 +3383,1701 @@
},
{
"BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_M_WPQ_READ_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
"Unit": "iMC"
},
{
"BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M_WPQ_WRITE_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
"Unit": "iMC"
},
{
"BriefDescription": "Not getting the requested Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_M_WRONG_MM",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.ALLBANKS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK0",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK10",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK11",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xb",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK12",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK13",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK14",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK15",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK6",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x6",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK7",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK8",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK9",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x9",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANKG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANKG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x12",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANKG2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x13",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANKG3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x14",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.ALLBANKS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK0",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK10",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK11",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xb",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK12",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK13",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK14",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK15",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK6",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x6",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK7",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK8",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK9",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x9",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANKG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANKG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x12",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANKG2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x13",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANKG3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x14",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.ALLBANKS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANK0",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANK1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANK10",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANK11",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xb",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANK12",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANK13",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANK14",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANK15",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANK2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANK3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANK4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANK5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANK6",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x6",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANK7",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANK8",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANK9",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x9",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANKG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANKG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x12",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANKG2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x13",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANKG3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x14",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.ALLBANKS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANK0",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANK1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANK10",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANK11",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xb",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANK12",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANK13",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANK14",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANK15",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANK2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANK3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANK4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANK5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANK6",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x6",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANK7",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANK8",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANK9",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x9",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANKG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANKG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x12",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANKG2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x13",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANKG3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x14",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.ALLBANKS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK0",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK10",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK11",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xb",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK12",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK13",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK14",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK15",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK6",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x6",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK7",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK8",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK9",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x9",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANKG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANKG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x12",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANKG2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x13",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANKG3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x14",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.ALLBANKS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK0",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK10",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK11",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xb",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK12",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK13",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK14",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK15",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK6",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x6",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK7",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK8",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK9",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x9",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANKG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANKG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x12",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANKG2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x13",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANKG3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x14",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.ALLBANKS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK0",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK10",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK11",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xb",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK12",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK13",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK14",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK15",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK6",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x6",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK7",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK8",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK9",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x9",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANKG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANKG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x12",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANKG2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x13",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANKG3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x14",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.ALLBANKS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK0",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK10",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK11",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xb",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK12",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK13",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK14",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK15",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK6",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x6",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK7",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK8",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK9",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x9",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANKG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANKG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x12",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANKG2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x13",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANKG3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x14",
"Unit": "iMC"
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-power.json b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-power.json
index ceef46046488..809b86dde933 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-power.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-power.json
@@ -1,147 +1,185 @@
[
{
"BriefDescription": "pclk Cycles",
+ "Counter": "0,1,2,3",
"EventName": "UNC_P_CLOCKTICKS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "The PCU runs off a fixed 1 GHz clock. This event counts the number of pclk cycles measured while the counter was enabled. The pclk, like the Memory Controller's dclk, counts at a constant rate making it a good measure of actual wall time.",
"Unit": "PCU"
},
{
"BriefDescription": "UNC_P_CORE_TRANSITION_CYCLES",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_P_CORE_TRANSITION_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "PCU"
},
{
"BriefDescription": "UNC_P_DEMOTIONS",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_P_DEMOTIONS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "PCU"
},
{
"BriefDescription": "Phase Shed 0 Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x75",
"EventName": "UNC_P_FIVR_PS_PS0_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles spent in phase-shedding power state 0",
"Unit": "PCU"
},
{
"BriefDescription": "Phase Shed 1 Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x76",
"EventName": "UNC_P_FIVR_PS_PS1_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles spent in phase-shedding power state 1",
"Unit": "PCU"
},
{
"BriefDescription": "Phase Shed 2 Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x77",
"EventName": "UNC_P_FIVR_PS_PS2_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles spent in phase-shedding power state 2",
"Unit": "PCU"
},
{
"BriefDescription": "Phase Shed 3 Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x78",
"EventName": "UNC_P_FIVR_PS_PS3_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles spent in phase-shedding power state 3",
"Unit": "PCU"
},
{
"BriefDescription": "Thermal Strongest Upper Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when thermal conditions are the upper limit on frequency. This is related to the THERMAL_THROTTLE CYCLES_ABOVE_TEMP event, which always counts cycles when we are above the thermal temperature. This event (STRONGEST_UPPER_LIMIT) is sampled at the output of the algorithm that determines the actual frequency, while THERMAL_THROTTLE looks at the input.",
"Unit": "PCU"
},
{
"BriefDescription": "Power Strongest Upper Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_P_FREQ_MAX_POWER_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when power is the upper limit on frequency.",
"Unit": "PCU"
},
{
"BriefDescription": "IO P Limit Strongest Lower Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x73",
"EventName": "UNC_P_FREQ_MIN_IO_P_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when IO P Limit is preventing us from dropping the frequency lower. This algorithm monitors the needs to the IO subsystem on both local and remote sockets and will maintain a frequency high enough to maintain good IO BW. This is necessary for when all the IA cores on a socket are idle but a user still would like to maintain high IO Bandwidth.",
"Unit": "PCU"
},
{
"BriefDescription": "Cycles spent changing Frequency",
+ "Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "UNC_P_FREQ_TRANS_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the system is changing frequency. This can not be filtered by thread ID. One can also use it with the occupancy counter that monitors number of threads in C0 to estimate the performance impact that frequency transitions had on the system.",
"Unit": "PCU"
},
{
"BriefDescription": "UNC_P_MCP_PROCHOT_CYCLES",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_P_MCP_PROCHOT_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "PCU"
},
{
"BriefDescription": "Memory Phase Shedding Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_P_MEMORY_PHASE_SHEDDING_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the PCU has triggered memory phase shedding. This is a mode that can be run in the iMC physicals that saves power at the expense of additional latency.",
"Unit": "PCU"
},
{
"BriefDescription": "Package C State Residency - C0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_P_PKG_RESIDENCY_C0_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the package was in C0. This event can be used in conjunction with edge detect to count C0 entrances (or exits using invert). Residency events do not include transition times.",
"Unit": "PCU"
},
{
"BriefDescription": "Package C State Residency - C2E",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_P_PKG_RESIDENCY_C2E_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the package was in C2E. This event can be used in conjunction with edge detect to count C2E entrances (or exits using invert). Residency events do not include transition times.",
"Unit": "PCU"
},
{
"BriefDescription": "Package C State Residency - C3",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_P_PKG_RESIDENCY_C3_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the package was in C3. This event can be used in conjunction with edge detect to count C3 entrances (or exits using invert). Residency events do not include transition times.",
"Unit": "PCU"
},
{
"BriefDescription": "Package C State Residency - C6",
+ "Counter": "0,1,2,3",
"EventCode": "0x2D",
"EventName": "UNC_P_PKG_RESIDENCY_C6_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the package was in C6. This event can be used in conjunction with edge detect to count C6 entrances (or exits using invert). Residency events do not include transition times.",
"Unit": "PCU"
},
{
"BriefDescription": "UNC_P_PMAX_THROTTLED_CYCLES",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_P_PMAX_THROTTLED_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "PCU"
},
{
"BriefDescription": "Number of cores in C-State; C0 and C1",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
"UMask": "0x40",
@@ -149,8 +187,10 @@
},
{
"BriefDescription": "Number of cores in C-State; C3",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
"UMask": "0x80",
@@ -158,8 +198,10 @@
},
{
"BriefDescription": "Number of cores in C-State; C6 and C7",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
"UMask": "0xc0",
@@ -167,32 +209,40 @@
},
{
"BriefDescription": "External Prochot",
+ "Counter": "0,1,2,3",
"EventCode": "0xA",
"EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip.",
"Unit": "PCU"
},
{
"BriefDescription": "Internal Prochot",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_P_PROCHOT_INTERNAL_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that we are in Internal PROCHOT mode. This mode is triggered when a sensor on the die determines that we are too hot and must throttle to avoid damaging the chip.",
"Unit": "PCU"
},
{
"BriefDescription": "Total Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_P_TOTAL_TRANSITION_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles spent performing core C state transitions across all cores.",
"Unit": "PCU"
},
{
"BriefDescription": "VR Hot",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_P_VR_HOT_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "PCU"
}
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/virtual-memory.json b/tools/perf/pmu-events/arch/x86/cascadelakex/virtual-memory.json
index 73feadaf7674..ad33fff57c03 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/virtual-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Load misses in all DTLB levels that cause page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "Counts demand data loads that caused a page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels, but the walk need not have completed.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Loads that miss the DTLB and hit the STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for a load. EPT page walk duration are excluded in Skylake.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
@@ -26,6 +29,7 @@
},
{
"BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (all page sizes) caused by demand data loads. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -34,6 +38,7 @@
},
{
"BriefDescription": "Page walk completed due to a demand data load to a 1G page",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
"PublicDescription": "Counts completed page walks (1G sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -42,6 +47,7 @@
},
{
"BriefDescription": "Page walk completed due to a demand data load to a 2M/4M page",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -50,6 +56,7 @@
},
{
"BriefDescription": "Page walk completed due to a demand data load to a 4K page",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts completed page walks (4K sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -58,6 +65,7 @@
},
{
"BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
"PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake microarchitecture.",
@@ -66,6 +74,7 @@
},
{
"BriefDescription": "Store misses in all DTLB levels that cause page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "Counts demand data stores that caused a page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels, but the walk need not have completed.",
@@ -74,6 +83,7 @@
},
{
"BriefDescription": "Stores that miss the DTLB and hit the STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.STLB_HIT",
"PublicDescription": "Stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
@@ -82,6 +92,7 @@
},
{
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store. EPT page walk duration are excluded in Skylake.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
@@ -91,6 +102,7 @@
},
{
"BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (all page sizes) caused by demand data stores. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -99,6 +111,7 @@
},
{
"BriefDescription": "Page walk completed due to a demand data store to a 1G page",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
"PublicDescription": "Counts completed page walks (1G sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -107,6 +120,7 @@
},
{
"BriefDescription": "Page walk completed due to a demand data store to a 2M/4M page",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -115,6 +129,7 @@
},
{
"BriefDescription": "Page walk completed due to a demand data store to a 4K page",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts completed page walks (4K sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -123,6 +138,7 @@
},
{
"BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_PENDING",
"PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake microarchitecture.",
@@ -131,6 +147,7 @@
},
{
"BriefDescription": "Counts 1 per cycle for each PMH that is busy with a EPT (Extended Page Table) walk for any request type.",
+ "Counter": "0,1,2,3",
"EventCode": "0x4f",
"EventName": "EPT.WALK_PENDING",
"PublicDescription": "Counts cycles for each PMH (Page Miss Handler) that is busy with an EPT (Extended Page Table) walk for any request type.",
@@ -139,6 +156,7 @@
},
{
"BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAE",
"EventName": "ITLB.ITLB_FLUSH",
"PublicDescription": "Counts the number of flushes of the big or small ITLB pages. Counting include both TLB Flush (covering all sets) and TLB Set Clear (set-specific).",
@@ -147,6 +165,7 @@
},
{
"BriefDescription": "Misses at all ITLB levels that cause page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "Counts page walks of any page size (4K/2M/4M/1G) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB, but the walk need not have completed.",
@@ -155,6 +174,7 @@
},
{
"BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.STLB_HIT",
"SampleAfterValue": "100003",
@@ -162,6 +182,7 @@
},
{
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request. EPT page walk duration are excluded in Skylake.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_ACTIVE",
@@ -171,6 +192,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (all page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
@@ -179,6 +201,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (1G)",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
"PublicDescription": "Counts completed page walks (1G page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
@@ -187,6 +210,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts completed page walks (2M/4M page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
@@ -195,6 +219,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts completed page walks (4K page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
@@ -203,6 +228,7 @@
},
{
"BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake.",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_PENDING",
"PublicDescription": "Counts 1 per cycle for each PMH (Page Miss Handler) that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake microarchitecture.",
@@ -211,6 +237,7 @@
},
{
"BriefDescription": "DTLB flush attempts of the thread-specific entries",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "TLB_FLUSH.DTLB_THREAD",
"PublicDescription": "Counts the number of DTLB flush attempts of the thread-specific entries.",
@@ -219,6 +246,7 @@
},
{
"BriefDescription": "STLB flush attempts",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "TLB_FLUSH.STLB_ANY",
"PublicDescription": "Counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, etc.).",
diff --git a/tools/perf/pmu-events/arch/x86/elkhartlake/cache.json b/tools/perf/pmu-events/arch/x86/elkhartlake/cache.json
index c6be60584522..7882dca9d5e1 100644
--- a/tools/perf/pmu-events/arch/x86/elkhartlake/cache.json
+++ b/tools/perf/pmu-events/arch/x86/elkhartlake/cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of core requests (demand and L1 prefetchers) rejected by the L2 queue (L2Q) due to a full condition.",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "CORE_REJECT_L2Q.ANY",
"PublicDescription": "Counts the number of (demand and L1 prefetchers) core requests rejected by the L2 queue (L2Q) due to a full or nearly full condition, which likely indicates back pressure from L2Q. It also counts requests that would have gone directly to the External Queue (XQ), but are rejected due to a full or nearly full condition, indicating back pressure from the IDI link. The L2Q may also reject transactions from a core to ensure fairness between cores, or to delay a cores dirty eviction when the address conflicts incoming external snoops. (Note that L2 prefetcher requests that are dropped are not counted by this event). Counts on a per core basis.",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Counts the number of L1D cacheline (dirty) evictions caused by load misses, stores, and prefetches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "DL1.DIRTY_EVICTION",
"PublicDescription": "Counts the number of L1D cacheline (dirty) evictions caused by load misses, stores, and prefetches. Does not count evictions or dirty writebacks caused by snoops. Does not count a replacement unless a (dirty) line was written back.",
@@ -16,6 +18,7 @@
},
{
"BriefDescription": "Counts the number of demand and prefetch transactions that the External Queue (XQ) rejects due to a full or near full condition.",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "L2_REJECT_XQ.ANY",
"PublicDescription": "Counts the number of demand and prefetch transactions that the External Queue (XQ) rejects due to a full or near full condition which likely indicates back pressure from the IDI link. The XQ may reject transactions from the L2Q (non-cacheable requests), BBL (L2 misses) and WOB (L2 write-back victims).",
@@ -23,6 +26,7 @@
},
{
"BriefDescription": "Counts the total number of L2 Cache accesses. Counts on a per core basis.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_REQUEST.ALL",
"PublicDescription": "Counts the total number of L2 Cache Accesses, includes hits, misses, rejects front door requests for CRd/DRd/RFO/ItoM/L2 Prefetches only. Counts on a per core basis.",
@@ -30,6 +34,7 @@
},
{
"BriefDescription": "Counts the number of L2 Cache accesses that resulted in a hit. Counts on a per core basis.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_REQUEST.HIT",
"PublicDescription": "Counts the number of L2 Cache accesses that resulted in a hit from a front door request only (does not include rejects or recycles), Counts on a per core basis.",
@@ -38,6 +43,7 @@
},
{
"BriefDescription": "Counts the number of L2 Cache accesses that resulted in a miss. Counts on a per core basis.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_REQUEST.MISS",
"PublicDescription": "Counts the number of L2 Cache accesses that resulted in a miss from a front door request only (does not include rejects or recycles). Counts on a per core basis.",
@@ -46,6 +52,7 @@
},
{
"BriefDescription": "Counts the number of L2 Cache accesses that miss the L2 and get rejected. Counts on a per core basis.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_REQUEST.REJECTS",
"PublicDescription": "Counts the number of L2 Cache accesses that miss the L2 and get BBL reject short and long rejects (includes those counted in L2_reject_XQ.any). Counts on a per core basis.",
@@ -54,6 +61,7 @@
},
{
"BriefDescription": "Counts the number of cacheable memory requests that miss in the LLC. Counts on a per core basis.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.MISS",
"PublicDescription": "Counts the number of cacheable memory requests that miss in the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the platform has an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
@@ -62,6 +70,7 @@
},
{
"BriefDescription": "Counts the number of cacheable memory requests that access the LLC. Counts on a per core basis.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
"PublicDescription": "Counts the number of cacheable memory requests that access the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the platform has an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
@@ -70,6 +79,7 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in the L2, LLC, DRAM or MMIO (Non-DRAM).",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.IFETCH",
"PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or translation lookaside buffer (TLB) miss which hit in the L2, LLC, DRAM or MMIO (Non-DRAM).",
@@ -78,6 +88,7 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in DRAM or MMIO (Non-DRAM).",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.IFETCH_DRAM_HIT",
"PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or translation lookaside buffer (TLB) miss which hit in DRAM or MMIO (non-DRAM).",
@@ -86,6 +97,7 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.IFETCH_L2_HIT",
"PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or Translation Lookaside Buffer (TLB) miss which hit in the L2 cache.",
@@ -94,6 +106,7 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in the LLC or other core with HITE/F/M.",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.IFETCH_LLC_HIT",
"PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or Translation Lookaside Buffer (TLB) miss which hit in the Last Level Cache (LLC) or other core with HITE/F/M.",
@@ -102,6 +115,7 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to a demand load miss which hit in the L2, LLC, DRAM or MMIO (Non-DRAM).",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.LOAD",
"SampleAfterValue": "200003",
@@ -109,6 +123,7 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to a demand load miss which hit in DRAM or MMIO (Non-DRAM).",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.LOAD_DRAM_HIT",
"SampleAfterValue": "200003",
@@ -116,6 +131,7 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to a demand load which hit in the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.LOAD_L2_HIT",
"SampleAfterValue": "200003",
@@ -123,6 +139,7 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to a demand load which hit in the LLC or other core with HITE/F/M.",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.LOAD_LLC_HIT",
"PublicDescription": "Counts the number of cycles the core is stalled due to a demand load which hit in the Last Level Cache (LLC) or other core with HITE/F/M.",
@@ -131,6 +148,7 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to a store buffer being full.",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.STORE_BUFFER_FULL",
"SampleAfterValue": "200003",
@@ -138,6 +156,7 @@
},
{
"BriefDescription": "Counts the number of load uops retired that hit in DRAM.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.DRAM_HIT",
@@ -147,6 +166,7 @@
},
{
"BriefDescription": "Counts the number of load uops retired that hit in the L3 cache, in which a snoop was required and modified data was forwarded from another core or module.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.HITM",
@@ -156,6 +176,7 @@
},
{
"BriefDescription": "Counts the number of load uops retired that hit in the L1 data cache.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
@@ -165,6 +186,7 @@
},
{
"BriefDescription": "Counts the number of load uops retired that miss in the L1 data cache.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
@@ -174,6 +196,7 @@
},
{
"BriefDescription": "Counts the number of load uops retired that hit in the L2 cache.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
@@ -183,6 +206,7 @@
},
{
"BriefDescription": "Counts the number of load uops retired that miss in the L2 cache.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
@@ -192,6 +216,7 @@
},
{
"BriefDescription": "Counts the number of load uops retired that hit in the L3 cache.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
@@ -201,6 +226,7 @@
},
{
"BriefDescription": "Counts the number of memory uops retired.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.ALL",
@@ -211,6 +237,7 @@
},
{
"BriefDescription": "Counts the number of load uops retired.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
@@ -221,6 +248,7 @@
},
{
"BriefDescription": "Counts the number of store uops retired.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
@@ -231,6 +259,7 @@
},
{
"BriefDescription": "Counts the number of load uops retired that performed one or more locks.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
@@ -240,6 +269,7 @@
},
{
"BriefDescription": "Counts the number of memory uops retired that were splits.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.SPLIT",
@@ -249,6 +279,7 @@
},
{
"BriefDescription": "Counts the number of retired split load uops.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
@@ -258,6 +289,7 @@
},
{
"BriefDescription": "Counts the number of retired split store uops.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
@@ -267,6 +299,7 @@
},
{
"BriefDescription": "Counts all code reads that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.ALL_CODE_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -276,6 +309,7 @@
},
{
"BriefDescription": "Counts all code reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.ALL_CODE_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -285,6 +319,7 @@
},
{
"BriefDescription": "Counts all code reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.ALL_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -294,6 +329,7 @@
},
{
"BriefDescription": "Counts all code reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.ALL_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -303,6 +339,7 @@
},
{
"BriefDescription": "Counts all code reads that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.ALL_CODE_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -312,6 +349,7 @@
},
{
"BriefDescription": "Counts all code reads that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.ALL_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -321,6 +359,7 @@
},
{
"BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.COREWB_M.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -330,6 +369,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -339,6 +379,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -348,6 +389,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -357,6 +399,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -366,6 +409,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -375,6 +419,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -384,6 +429,7 @@
},
{
"BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -393,6 +439,7 @@
},
{
"BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -402,6 +449,7 @@
},
{
"BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -411,6 +459,7 @@
},
{
"BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -420,6 +469,7 @@
},
{
"BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -429,6 +479,7 @@
},
{
"BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -438,6 +489,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT",
@@ -448,6 +500,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HITM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
@@ -458,6 +511,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
@@ -468,6 +522,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
@@ -478,6 +533,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
@@ -488,6 +544,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
@@ -498,6 +555,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_RFO.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -507,6 +565,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -516,6 +575,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -525,6 +585,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -534,6 +595,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -543,6 +605,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -552,6 +615,7 @@
},
{
"BriefDescription": "Counts streaming stores which modify a full 64 byte cacheline that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.FULL_STREAMING_WR.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -561,6 +625,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetches and software prefetches (except PREFETCHW and PFRFO) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L1D_AND_SWPF.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -570,6 +635,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -579,6 +645,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -588,6 +655,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -597,6 +665,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -606,6 +675,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -615,6 +685,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -624,6 +695,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -633,6 +705,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -642,6 +715,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -651,6 +725,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -660,6 +735,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -669,6 +745,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -678,6 +755,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_RFO.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -687,6 +765,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -696,6 +775,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -705,6 +785,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -714,6 +795,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -723,6 +805,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -732,6 +815,7 @@
},
{
"BriefDescription": "Counts modified writebacks from L1 cache that miss the L2 cache that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.L1WB_M.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -741,6 +825,7 @@
},
{
"BriefDescription": "Counts modified writeBacks from L2 cache that miss the L3 cache that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.L2WB_M.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -750,6 +835,7 @@
},
{
"BriefDescription": "Counts streaming stores which modify only part of a 64 byte cacheline that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.PARTIAL_STREAMING_WR.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -759,6 +845,7 @@
},
{
"BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.READS_TO_CORE.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -768,6 +855,7 @@
},
{
"BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -777,6 +865,7 @@
},
{
"BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -786,6 +875,7 @@
},
{
"BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -795,6 +885,7 @@
},
{
"BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -804,6 +895,7 @@
},
{
"BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -813,6 +905,7 @@
},
{
"BriefDescription": "Counts streaming stores that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.STREAMING_WR.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -822,6 +915,7 @@
},
{
"BriefDescription": "Counts uncached memory reads that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.UC_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -831,6 +925,7 @@
},
{
"BriefDescription": "Counts uncached memory reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.UC_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -840,6 +935,7 @@
},
{
"BriefDescription": "Counts uncached memory reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.UC_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -849,6 +945,7 @@
},
{
"BriefDescription": "Counts uncached memory reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.UC_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -858,6 +955,7 @@
},
{
"BriefDescription": "Counts uncached memory reads that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.UC_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -867,6 +965,7 @@
},
{
"BriefDescription": "Counts uncached memory reads that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.UC_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -876,6 +975,7 @@
},
{
"BriefDescription": "Counts uncached memory writes that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.UC_WR.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -885,6 +985,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to instruction cache misses.",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.ICACHE",
"SampleAfterValue": "1000003",
diff --git a/tools/perf/pmu-events/arch/x86/elkhartlake/counter.json b/tools/perf/pmu-events/arch/x86/elkhartlake/counter.json
new file mode 100644
index 000000000000..aa443347b694
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/elkhartlake/counter.json
@@ -0,0 +1,7 @@
+[
+ {
+ "Unit": "core",
+ "CountersNumFixed": "3",
+ "CountersNumGeneric": "4"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/elkhartlake/floating-point.json b/tools/perf/pmu-events/arch/x86/elkhartlake/floating-point.json
index 88522244b760..79a4beba4b78 100644
--- a/tools/perf/pmu-events/arch/x86/elkhartlake/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/elkhartlake/floating-point.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of cycles the floating point divider is busy.",
+ "Counter": "0,1,2,3",
"EventCode": "0xcd",
"EventName": "CYCLES_DIV_BUSY.FPDIV",
"PublicDescription": "Counts the number of cycles the floating point divider is busy. Does not imply a stall waiting for the divider.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Counts the number of floating point operations retired that required microcode assist.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.FP_ASSIST",
"PublicDescription": "Counts the number of floating point operations retired that required microcode assist, which is not a reflection of the number of FP operations, instructions or uops.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Counts the number of floating point divide uops retired (x87 and SSE, including x87 sqrt).",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.FPDIV",
"PEBS": "1",
diff --git a/tools/perf/pmu-events/arch/x86/elkhartlake/frontend.json b/tools/perf/pmu-events/arch/x86/elkhartlake/frontend.json
index 5ba998e06592..6d131ed90242 100644
--- a/tools/perf/pmu-events/arch/x86/elkhartlake/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/elkhartlake/frontend.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the total number of BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0xe6",
"EventName": "BACLEARS.ANY",
"PublicDescription": "Counts the total number of BACLEARS, which occur when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Counts the number of BACLEARS due to a conditional jump.",
+ "Counter": "0,1,2,3",
"EventCode": "0xe6",
"EventName": "BACLEARS.COND",
"SampleAfterValue": "200003",
@@ -16,6 +18,7 @@
},
{
"BriefDescription": "Counts the number of BACLEARS due to an indirect branch.",
+ "Counter": "0,1,2,3",
"EventCode": "0xe6",
"EventName": "BACLEARS.INDIRECT",
"SampleAfterValue": "200003",
@@ -23,6 +26,7 @@
},
{
"BriefDescription": "Counts the number of BACLEARS due to a return branch.",
+ "Counter": "0,1,2,3",
"EventCode": "0xe6",
"EventName": "BACLEARS.RETURN",
"SampleAfterValue": "200003",
@@ -30,6 +34,7 @@
},
{
"BriefDescription": "Counts the number of BACLEARS due to a direct, unconditional jump.",
+ "Counter": "0,1,2,3",
"EventCode": "0xe6",
"EventName": "BACLEARS.UNCOND",
"SampleAfterValue": "200003",
@@ -37,6 +42,7 @@
},
{
"BriefDescription": "Counts the number of times a decode restriction reduces the decode throughput due to wrong instruction length prediction.",
+ "Counter": "0,1,2,3",
"EventCode": "0xe9",
"EventName": "DECODE_RESTRICTION.PREDECODE_WRONG",
"SampleAfterValue": "200003",
@@ -44,6 +50,7 @@
},
{
"BriefDescription": "Counts the number of requests to the instruction cache for one or more bytes of a cache line.",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.ACCESSES",
"PublicDescription": "Counts the total number of requests to the instruction cache. The event only counts new cache line accesses, so that multiple back to back fetches to the exact same cache line or byte chunk count as one. Specifically, the event counts when accesses from sequential code crosses the cache line boundary, or when a branch target is moved to a new line or to a non-sequential byte chunk of the same line.",
@@ -52,6 +59,7 @@
},
{
"BriefDescription": "Counts the number of instruction cache hits.",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.HIT",
"PublicDescription": "Counts the number of requests that hit in the instruction cache. The event only counts new cache line accesses, so that multiple back to back fetches to the exact same cache line and byte chunk count as one. Specifically, the event counts when accesses from sequential code crosses the cache line boundary, or when a branch target is moved to a new line or to a non-sequential byte chunk of the same line.",
@@ -60,6 +68,7 @@
},
{
"BriefDescription": "Counts the number of instruction cache misses.",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.MISSES",
"PublicDescription": "Counts the number of missed requests to the instruction cache. The event only counts new cache line accesses, so that multiple back to back fetches to the exact same cache line and byte chunk count as one. Specifically, the event counts when accesses from sequential code crosses the cache line boundary, or when a branch target is moved to a new line or to a non-sequential byte chunk of the same line.",
diff --git a/tools/perf/pmu-events/arch/x86/elkhartlake/memory.json b/tools/perf/pmu-events/arch/x86/elkhartlake/memory.json
index c02eb0e836ad..34306ec24e9b 100644
--- a/tools/perf/pmu-events/arch/x86/elkhartlake/memory.json
+++ b/tools/perf/pmu-events/arch/x86/elkhartlake/memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of machine clears due to memory ordering caused by a snoop from an external agent. Does not count internally generated machine clears such as those due to memory disambiguation.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
"SampleAfterValue": "20003",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Counts the number of misaligned load uops that are 4K page splits.",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "MISALIGN_MEM_REF.LOAD_PAGE_SPLIT",
"PEBS": "1",
@@ -16,6 +18,7 @@
},
{
"BriefDescription": "Counts the number of misaligned store uops that are 4K page splits.",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "MISALIGN_MEM_REF.STORE_PAGE_SPLIT",
"PEBS": "1",
@@ -24,6 +27,7 @@
},
{
"BriefDescription": "Counts all code reads that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.ALL_CODE_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Counts all code reads that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.ALL_CODE_RD.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -42,6 +47,7 @@
},
{
"BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.COREWB_M.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -51,6 +57,7 @@
},
{
"BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.COREWB_M.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -60,6 +67,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -69,6 +77,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -78,6 +87,7 @@
},
{
"BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -87,6 +97,7 @@
},
{
"BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -96,6 +107,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
@@ -106,6 +118,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_MISS_LOCAL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL",
@@ -116,6 +129,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -125,6 +139,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -134,6 +149,7 @@
},
{
"BriefDescription": "Counts streaming stores which modify a full 64 byte cacheline that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.FULL_STREAMING_WR.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -143,6 +159,7 @@
},
{
"BriefDescription": "Counts streaming stores which modify a full 64 byte cacheline that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.FULL_STREAMING_WR.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -152,6 +169,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_CODE_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -161,6 +179,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_CODE_RD.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -170,6 +189,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_DATA_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -179,6 +199,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_DATA_RD.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -188,6 +209,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -197,6 +219,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_RFO.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -206,6 +229,7 @@
},
{
"BriefDescription": "Counts modified writebacks from L1 cache that miss the L2 cache that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.L1WB_M.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -215,6 +239,7 @@
},
{
"BriefDescription": "Counts modified writebacks from L1 cache that miss the L2 cache that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.L1WB_M.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -224,6 +249,7 @@
},
{
"BriefDescription": "Counts modified writeBacks from L2 cache that miss the L3 cache that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.L2WB_M.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -233,6 +259,7 @@
},
{
"BriefDescription": "Counts modified writeBacks from L2 cache that miss the L3 cache that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.L2WB_M.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -242,6 +269,7 @@
},
{
"BriefDescription": "Counts miscellaneous requests, such as I/O accesses, that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.OTHER.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -251,6 +279,7 @@
},
{
"BriefDescription": "Counts miscellaneous requests, such as I/O accesses, that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.OTHER.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -260,6 +289,7 @@
},
{
"BriefDescription": "Counts streaming stores which modify only part of a 64 byte cacheline that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.PARTIAL_STREAMING_WR.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -269,6 +299,7 @@
},
{
"BriefDescription": "Counts streaming stores which modify only part of a 64 byte cacheline that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.PARTIAL_STREAMING_WR.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -278,6 +309,7 @@
},
{
"BriefDescription": "Counts all hardware and software prefetches that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.PREFETCHES.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -287,6 +319,7 @@
},
{
"BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.READS_TO_CORE.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -296,6 +329,7 @@
},
{
"BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.READS_TO_CORE.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -305,6 +339,7 @@
},
{
"BriefDescription": "Counts streaming stores that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.STREAMING_WR.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -314,6 +349,7 @@
},
{
"BriefDescription": "Counts streaming stores that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.STREAMING_WR.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -323,6 +359,7 @@
},
{
"BriefDescription": "Counts uncached memory reads that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.UC_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -332,6 +369,7 @@
},
{
"BriefDescription": "Counts uncached memory reads that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.UC_RD.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -341,6 +379,7 @@
},
{
"BriefDescription": "Counts uncached memory writes that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.UC_WR.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -350,6 +389,7 @@
},
{
"BriefDescription": "Counts uncached memory writes that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.UC_WR.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
diff --git a/tools/perf/pmu-events/arch/x86/elkhartlake/other.json b/tools/perf/pmu-events/arch/x86/elkhartlake/other.json
index fefbc383b840..57613207f7ad 100644
--- a/tools/perf/pmu-events/arch/x86/elkhartlake/other.json
+++ b/tools/perf/pmu-events/arch/x86/elkhartlake/other.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "This event is deprecated. Refer to new event BUS_LOCK.SELF_LOCKS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EdgeDetect": "1",
"EventCode": "0x63",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Counts the number of unhalted cycles a core is blocked due to an accepted lock issued by other cores.",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "BUS_LOCK.BLOCK_CYCLES",
"PublicDescription": "Counts the number of unhalted cycles a core is blocked due to an accepted lock issued by other cores. Counts on a per core basis.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event BUS_LOCK.BLOCK_CYCLES",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x63",
"EventName": "BUS_LOCK.CYCLES_OTHER_BLOCK",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event BUS_LOCK.LOCK_CYCLES",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x63",
"EventName": "BUS_LOCK.CYCLES_SELF_BLOCK",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Counts the number of unhalted cycles a core is blocked due to an accepted lock it issued.",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "BUS_LOCK.LOCK_CYCLES",
"PublicDescription": "Counts the number of unhalted cycles a core is blocked due to an accepted lock it issued. Counts on a per core basis.",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "Counts the number of bus locks a core issued its self (e.g. lock to UC or Split Lock) and does not include cache locks.",
+ "Counter": "0,1,2,3",
"EdgeDetect": "1",
"EventCode": "0x63",
"EventName": "BUS_LOCK.SELF_LOCKS",
@@ -49,6 +55,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event MEM_BOUND_STALLS.LOAD_DRAM_HIT",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x34",
"EventName": "C0_STALLS.LOAD_DRAM_HIT",
@@ -57,6 +64,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event MEM_BOUND_STALLS.LOAD_L2_HIT",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x34",
"EventName": "C0_STALLS.LOAD_L2_HIT",
@@ -65,6 +73,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event MEM_BOUND_STALLS.LOAD_LLC_HIT",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x34",
"EventName": "C0_STALLS.LOAD_LLC_HIT",
@@ -73,6 +82,7 @@
},
{
"BriefDescription": "Counts the number of core cycles during which interrupts are masked (disabled).",
+ "Counter": "0,1,2,3",
"EventCode": "0xcb",
"EventName": "HW_INTERRUPTS.MASKED",
"PublicDescription": "Counts the number of core cycles during which interrupts are masked (disabled). Increments by 1 each core cycle that EFLAGS.IF is 0, regardless of whether interrupts are pending or not.",
@@ -81,6 +91,7 @@
},
{
"BriefDescription": "Counts the number of core cycles during which there are pending interrupts while interrupts are masked (disabled).",
+ "Counter": "0,1,2,3",
"EventCode": "0xcb",
"EventName": "HW_INTERRUPTS.PENDING_AND_MASKED",
"PublicDescription": "Counts the number of core cycles during which there are pending interrupts while interrupts are masked (disabled). Increments by 1 each core cycle that both EFLAGS.IF is 0 and an INTR is pending (which means the APIC is telling the ROB to cause an INTR). This event does not increment if EFLAGS.IF is 0 but all interrupt in the APICs Interrupt Request Register (IRR) are inhibited by the PPR (thus either by ISRV or TPR) because in these cases the interrupts would be held up in the APIC and would not be pended to the ROB. This event does count when an interrupt is only inhibited by MOV/POP SS state machines or the STI state machine. These extra inhibits only last for a single instructions and would not be important.",
@@ -89,6 +100,7 @@
},
{
"BriefDescription": "Counts the number of hardware interrupts received by the processor.",
+ "Counter": "0,1,2,3",
"EventCode": "0xcb",
"EventName": "HW_INTERRUPTS.RECEIVED",
"SampleAfterValue": "203",
@@ -96,6 +108,7 @@
},
{
"BriefDescription": "Counts all code reads that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.ALL_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -105,6 +118,7 @@
},
{
"BriefDescription": "Counts all code reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.ALL_CODE_RD.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -114,6 +128,7 @@
},
{
"BriefDescription": "Counts all code reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.ALL_CODE_RD.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -123,6 +138,7 @@
},
{
"BriefDescription": "Counts all code reads that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.ALL_CODE_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -132,6 +148,7 @@
},
{
"BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.COREWB_M.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -141,6 +158,7 @@
},
{
"BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.COREWB_M.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -150,6 +168,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -159,6 +178,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_CODE_RD.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -168,6 +188,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_CODE_RD.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -177,6 +198,7 @@
},
{
"BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -186,6 +208,7 @@
},
{
"BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -195,6 +218,7 @@
},
{
"BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -204,6 +228,7 @@
},
{
"BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -213,6 +238,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.ANY_RESPONSE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
@@ -223,6 +249,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.DRAM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_RD.DRAM",
@@ -233,6 +260,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.LOCAL_DRAM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM",
@@ -243,6 +271,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.OUTSTANDING",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_RD.OUTSTANDING",
@@ -253,6 +282,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -262,6 +292,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_RFO.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -271,6 +302,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_RFO.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -280,6 +312,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_RFO.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -289,6 +322,7 @@
},
{
"BriefDescription": "Counts streaming stores which modify a full 64 byte cacheline that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.FULL_STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -298,6 +332,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetches and software prefetches (except PREFETCHW and PFRFO) that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L1D_AND_SWPF.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -307,6 +342,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -316,6 +352,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_CODE_RD.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -325,6 +362,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_CODE_RD.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -334,6 +372,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_CODE_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -343,6 +382,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -352,6 +392,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_DATA_RD.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -361,6 +402,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_DATA_RD.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -370,6 +412,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -379,6 +422,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_RFO.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -388,6 +432,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_RFO.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -397,6 +442,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_RFO.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -406,6 +452,7 @@
},
{
"BriefDescription": "Counts modified writebacks from L1 cache that miss the L2 cache that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.L1WB_M.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -415,6 +462,7 @@
},
{
"BriefDescription": "Counts modified writeBacks from L2 cache that miss the L3 cache that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.L2WB_M.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -424,6 +472,7 @@
},
{
"BriefDescription": "Counts miscellaneous requests, such as I/O accesses, that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.OTHER.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -433,6 +482,7 @@
},
{
"BriefDescription": "Counts streaming stores which modify only part of a 64 byte cacheline that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.PARTIAL_STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -442,6 +492,7 @@
},
{
"BriefDescription": "Counts all hardware and software prefetches that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.PREFETCHES.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -451,6 +502,7 @@
},
{
"BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.READS_TO_CORE.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -460,6 +512,7 @@
},
{
"BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.READS_TO_CORE.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -469,6 +522,7 @@
},
{
"BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.READS_TO_CORE.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -478,6 +532,7 @@
},
{
"BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.READS_TO_CORE.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -487,6 +542,7 @@
},
{
"BriefDescription": "Counts streaming stores that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -496,6 +552,7 @@
},
{
"BriefDescription": "Counts uncached memory reads that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.UC_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -505,6 +562,7 @@
},
{
"BriefDescription": "Counts uncached memory reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.UC_RD.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -514,6 +572,7 @@
},
{
"BriefDescription": "Counts uncached memory reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.UC_RD.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -523,6 +582,7 @@
},
{
"BriefDescription": "Counts uncached memory reads that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.UC_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -532,6 +592,7 @@
},
{
"BriefDescription": "Counts uncached memory writes that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.UC_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
diff --git a/tools/perf/pmu-events/arch/x86/elkhartlake/pipeline.json b/tools/perf/pmu-events/arch/x86/elkhartlake/pipeline.json
index c483c0838e08..e4e7902c1162 100644
--- a/tools/perf/pmu-events/arch/x86/elkhartlake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/elkhartlake/pipeline.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the total number of branch instructions retired for all branch types.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Counts the number of near CALL branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.CALL",
"PEBS": "1",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Counts the number of far branch instructions retired, includes far jump, far call and return, and interrupt call and return.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"PEBS": "1",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Counts the number of near indirect CALL branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.IND_CALL",
"PEBS": "1",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Counts the number of retired JCC (Jump on Conditional Code) branch instructions retired, includes both taken and not taken branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.JCC",
"PEBS": "1",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "Counts the number of near indirect JMP and near indirect CALL branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NON_RETURN_IND",
"PEBS": "1",
@@ -49,6 +55,7 @@
},
{
"BriefDescription": "Counts the number of near relative CALL branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.REL_CALL",
"PEBS": "1",
@@ -57,6 +64,7 @@
},
{
"BriefDescription": "Counts the number of near RET branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.RETURN",
"PEBS": "1",
@@ -65,6 +73,7 @@
},
{
"BriefDescription": "Counts the number of taken JCC (Jump on Conditional Code) branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.TAKEN_JCC",
"PEBS": "1",
@@ -73,6 +82,7 @@
},
{
"BriefDescription": "Counts the total number of mispredicted branch instructions retired for all branch types.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -81,6 +91,7 @@
},
{
"BriefDescription": "Counts the number of mispredicted near indirect CALL branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.IND_CALL",
"PEBS": "1",
@@ -89,6 +100,7 @@
},
{
"BriefDescription": "Counts the number of mispredicted JCC (Jump on Conditional Code) branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.JCC",
"PEBS": "1",
@@ -97,6 +109,7 @@
},
{
"BriefDescription": "Counts the number of mispredicted near indirect JMP and near indirect CALL branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.NON_RETURN_IND",
"PEBS": "1",
@@ -105,6 +118,7 @@
},
{
"BriefDescription": "Counts the number of mispredicted near RET branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.RETURN",
"PEBS": "1",
@@ -113,6 +127,7 @@
},
{
"BriefDescription": "Counts the number of mispredicted taken JCC (Jump on Conditional Code) branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.TAKEN_JCC",
"PEBS": "1",
@@ -121,6 +136,7 @@
},
{
"BriefDescription": "Counts the total number of BTCLEARS.",
+ "Counter": "0,1,2,3",
"EventCode": "0xe8",
"EventName": "BTCLEAR.ANY",
"PublicDescription": "Counts the total number of BTCLEARS which occurs when the Branch Target Buffer (BTB) predicts a taken branch.",
@@ -128,6 +144,7 @@
},
{
"BriefDescription": "Counts the number of unhalted core clock cycles. (Fixed event)",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.CORE",
"PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1.",
"SampleAfterValue": "2000003",
@@ -135,6 +152,7 @@
},
{
"BriefDescription": "Counts the number of unhalted core clock cycles.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.CORE_P",
"PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses a programmable general purpose performance counter.",
@@ -142,6 +160,7 @@
},
{
"BriefDescription": "Counts the number of unhalted reference clock cycles at TSC frequency.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.REF",
"PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is not affected by core frequency changes and increments at a fixed frequency that is also used for the Time Stamp Counter (TSC). This event uses fixed counter 2.",
@@ -150,6 +169,7 @@
},
{
"BriefDescription": "Counts the number of unhalted reference clock cycles at TSC frequency. (Fixed event)",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is not affected by core frequency changes and increments at a fixed frequency that is also used for the Time Stamp Counter (TSC). This event uses fixed counter 2.",
"SampleAfterValue": "2000003",
@@ -157,6 +177,7 @@
},
{
"BriefDescription": "Counts the number of unhalted reference clock cycles at TSC frequency.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.REF_TSC_P",
"PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is not affected by core frequency changes and increments at a fixed frequency that is also used for the Time Stamp Counter (TSC). This event uses a programmable general purpose performance counter.",
@@ -165,6 +186,7 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xcd",
"EventName": "CYCLES_DIV_BUSY.ANY",
@@ -172,6 +194,7 @@
},
{
"BriefDescription": "Counts the number of cycles the integer divider is busy.",
+ "Counter": "0,1,2,3",
"EventCode": "0xcd",
"EventName": "CYCLES_DIV_BUSY.IDIV",
"PublicDescription": "Counts the number of cycles the integer divider is busy. Does not imply a stall waiting for the divider.",
@@ -180,6 +203,7 @@
},
{
"BriefDescription": "Counts the total number of instructions retired. (Fixed event)",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PEBS": "1",
"PublicDescription": "Counts the total number of instructions that retired. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. This event continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0.",
@@ -188,6 +212,7 @@
},
{
"BriefDescription": "Counts the total number of instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.ANY_P",
"PEBS": "1",
@@ -196,6 +221,7 @@
},
{
"BriefDescription": "Counts the number of retired loads that are blocked because it initially appears to be store forward blocked, but subsequently is shown not to be blocked based on 4K alias check.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.4K_ALIAS",
"PEBS": "1",
@@ -204,6 +230,7 @@
},
{
"BriefDescription": "Counts the number of retired loads that are blocked for any of the following reasons: DTLB miss, address alias, store forward or data unknown (includes memory disambiguation blocks and ESP consuming load blocks).",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.ALL",
"PEBS": "1",
@@ -212,6 +239,7 @@
},
{
"BriefDescription": "Counts the number of retired loads that are blocked because its address exactly matches an older store whose data is not ready.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.DATA_UNKNOWN",
"PEBS": "1",
@@ -220,6 +248,7 @@
},
{
"BriefDescription": "Counts the number of retired loads that are blocked because its address partially overlapped with an older store.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.STORE_FORWARD",
"PEBS": "1",
@@ -228,12 +257,14 @@
},
{
"BriefDescription": "Counts the total number of machine clears for any reason including, but not limited to, memory ordering, memory disambiguation, SMC, and FP assist.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.ANY",
"SampleAfterValue": "20003"
},
{
"BriefDescription": "Counts the number of machine clears due to memory ordering in which an internal load passes an older store within the same CPU.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.DISAMBIGUATION",
"SampleAfterValue": "20003",
@@ -241,6 +272,7 @@
},
{
"BriefDescription": "Counts the number of machine clears due to a page fault. Counts both I-Side and D-Side (Loads/Stores) page faults. A page fault occurs when either the page is not present, or an access violation occurs.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.PAGE_FAULT",
"SampleAfterValue": "20003",
@@ -248,6 +280,7 @@
},
{
"BriefDescription": "Counts the number of machine clears due to program modifying data (self modifying code) within 1K of a recently fetched code page.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.SMC",
"SampleAfterValue": "20003",
@@ -255,6 +288,7 @@
},
{
"BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear.",
+ "Counter": "0,1,2,3",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.ALL",
"PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window including relevant microcode flows and while uops are not yet available in the instruction queue (IQ) even if an FE_bound event occurs during this period. Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.",
@@ -263,6 +297,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to fast nukes such as memory ordering and memory disambiguation machine clears.",
+ "Counter": "0,1,2,3",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.FASTNUKE",
"SampleAfterValue": "1000003",
@@ -270,6 +305,7 @@
},
{
"BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a machine clear (nuke) of any kind including memory ordering and memory disambiguation.",
+ "Counter": "0,1,2,3",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS",
"SampleAfterValue": "1000003",
@@ -277,6 +313,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to branch mispredicts.",
+ "Counter": "0,1,2,3",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.MISPREDICT",
"SampleAfterValue": "1000003",
@@ -284,6 +321,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event TOPDOWN_BAD_SPECULATION.FASTNUKE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.MONUKE",
@@ -292,12 +330,14 @@
},
{
"BriefDescription": "Counts the total number of issue slots every cycle that were not consumed by the backend due to backend stalls.",
+ "Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.ALL",
"SampleAfterValue": "1000003"
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to certain allocation restrictions.",
+ "Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS",
"SampleAfterValue": "1000003",
@@ -305,6 +345,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to memory reservation stalls in which a scheduler is not able to accept uops.",
+ "Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.MEM_SCHEDULER",
"SampleAfterValue": "1000003",
@@ -312,6 +353,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to IEC or FPC RAT stalls, which can be due to FIQ or IEC reservation stalls in which the integer, floating point or SIMD scheduler is not able to accept uops.",
+ "Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER",
"SampleAfterValue": "1000003",
@@ -319,6 +361,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to the physical register file unable to accept an entry (marble stalls).",
+ "Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.REGISTER",
"SampleAfterValue": "1000003",
@@ -326,6 +369,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to the reorder buffer being full (ROB stalls).",
+ "Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.REORDER_BUFFER",
"SampleAfterValue": "1000003",
@@ -333,6 +377,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to scoreboards from the instruction queue (IQ), jump execution unit (JEU), or microcode sequencer (MS).",
+ "Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.SERIALIZATION",
"SampleAfterValue": "1000003",
@@ -340,6 +385,7 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.STORE_BUFFER",
@@ -348,12 +394,14 @@
},
{
"BriefDescription": "Counts the total number of issue slots every cycle that were not consumed by the backend due to frontend stalls.",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.ALL",
"SampleAfterValue": "1000003"
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BACLEARS.",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.BRANCH_DETECT",
"PublicDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
@@ -362,6 +410,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BTCLEARS.",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.BRANCH_RESTEER",
"PublicDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BTCLEARS, which occurs when the Branch Target Buffer (BTB) predicts a taken branch.",
@@ -370,6 +419,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to the microcode sequencer (MS).",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.CISC",
"SampleAfterValue": "1000003",
@@ -377,6 +427,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to decode stalls.",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.DECODE",
"SampleAfterValue": "1000003",
@@ -384,6 +435,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to ITLB misses.",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.ITLB",
"PublicDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to Instruction Table Lookaside Buffer (ITLB) misses.",
@@ -392,6 +444,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to other common frontend stalls not categorized.",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.OTHER",
"SampleAfterValue": "1000003",
@@ -399,6 +452,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to wrong predecodes.",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.PREDECODE",
"SampleAfterValue": "1000003",
@@ -406,6 +460,7 @@
},
{
"BriefDescription": "Counts the total number of consumed retirement slots.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "TOPDOWN_RETIRING.ALL",
"PEBS": "1",
@@ -413,6 +468,7 @@
},
{
"BriefDescription": "Counts the number of uops issued by the front end every cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x0e",
"EventName": "UOPS_ISSUED.ANY",
"PublicDescription": "Counts the number of uops issued by the front end every cycle. When 4-uops are requested and only 2-uops are delivered, the event counts 2. Uops_issued correlates to the number of ROB entries. If uop takes 2 ROB slots it counts as 2 uops_issued.",
@@ -420,6 +476,7 @@
},
{
"BriefDescription": "Counts the total number of uops retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.ALL",
"PEBS": "1",
@@ -427,6 +484,7 @@
},
{
"BriefDescription": "Counts the number of integer divide uops retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.IDIV",
"PEBS": "1",
@@ -435,6 +493,7 @@
},
{
"BriefDescription": "Counts the number of uops that are from complex flows issued by the micro-sequencer (MS).",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.MS",
"PEBS": "1",
@@ -444,6 +503,7 @@
},
{
"BriefDescription": "Counts the number of x87 uops retired, includes those in MS flows.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.X87",
"PEBS": "1",
diff --git a/tools/perf/pmu-events/arch/x86/elkhartlake/virtual-memory.json b/tools/perf/pmu-events/arch/x86/elkhartlake/virtual-memory.json
index cabe29e70e79..f9a6caed8776 100644
--- a/tools/perf/pmu-events/arch/x86/elkhartlake/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/elkhartlake/virtual-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of page walks due to loads that miss the PDE (Page Directory Entry) cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.PDE_CACHE_MISS",
"SampleAfterValue": "200003",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Counts the number of first level TLB misses but second level hits due to a demand load that did not start a page walk. Account for all page sizes. Will result in a DTLB write from STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"SampleAfterValue": "200003",
@@ -15,6 +17,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to load DTLB misses to any page size.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
@@ -23,6 +26,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 1G page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
"PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 1GB pages. Includes page walks that page fault.",
@@ -31,6 +35,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 2M or 4M page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 2M or 4M pages. Includes page walks that page fault.",
@@ -39,6 +44,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 4K page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.",
@@ -47,6 +53,7 @@
},
{
"BriefDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for demand loads every cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for demand loads every cycle. A page walk is outstanding from start till PMH becomes idle again (ready to serve next walk). Includes EPT-walk intervals.",
@@ -55,6 +62,7 @@
},
{
"BriefDescription": "Counts the number of page walks due to stores that miss the PDE (Page Directory Entry) cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.PDE_CACHE_MISS",
"SampleAfterValue": "2000003",
@@ -62,6 +70,7 @@
},
{
"BriefDescription": "Counts the number of first level TLB misses but second level hits due to stores that did not start a page walk. Account for all pages sizes. Will result in a DTLB write from STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.STLB_HIT",
"SampleAfterValue": "2000003",
@@ -69,6 +78,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to store DTLB misses to any page size.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
@@ -77,6 +87,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to store DTLB misses to a 1G page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
"PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 1G pages. Includes page walks that page fault.",
@@ -85,6 +96,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to store DTLB misses to a 2M or 4M page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 2M or 4M pages. Includes page walks that page fault.",
@@ -93,6 +105,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to store DTLB misses to a 4K page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.",
@@ -101,6 +114,7 @@
},
{
"BriefDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for stores every cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for stores every cycle. A page walk is outstanding from start till PMH becomes idle again (ready to serve next walk). Includes EPT-walk intervals.",
@@ -109,6 +123,7 @@
},
{
"BriefDescription": "Counts the number of Extended Page Directory Entry hits.",
+ "Counter": "0,1,2,3",
"EventCode": "0x4f",
"EventName": "EPT.EPDE_HIT",
"PublicDescription": "Counts the number of Extended Page Directory Entry hits. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
@@ -117,6 +132,7 @@
},
{
"BriefDescription": "Counts the number of Extended Page Directory Entry misses.",
+ "Counter": "0,1,2,3",
"EventCode": "0x4f",
"EventName": "EPT.EPDE_MISS",
"PublicDescription": "Counts the number Extended Page Directory Entry misses. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
@@ -125,6 +141,7 @@
},
{
"BriefDescription": "Counts the number of Extended Page Directory Pointer Entry hits.",
+ "Counter": "0,1,2,3",
"EventCode": "0x4f",
"EventName": "EPT.EPDPE_HIT",
"PublicDescription": "Counts the number Extended Page Directory Pointer Entry hits. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
@@ -133,6 +150,7 @@
},
{
"BriefDescription": "Counts the number of Extended Page Directory Pointer Entry misses.",
+ "Counter": "0,1,2,3",
"EventCode": "0x4f",
"EventName": "EPT.EPDPE_MISS",
"PublicDescription": "Counts the number Extended Page Directory Pointer Entry misses. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
@@ -141,6 +159,7 @@
},
{
"BriefDescription": "Counts the number of page walks outstanding for an Extended Page table walk including GTLB hits per cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x4f",
"EventName": "EPT.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding for an Extended Page table walk including GTLB hits per cycle. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
@@ -149,6 +168,7 @@
},
{
"BriefDescription": "Counts the number of times there was an ITLB miss and a new translation was filled into the ITLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "ITLB.FILLS",
"PublicDescription": "Counts the number of times the machine was unable to find a translation in the Instruction Translation Lookaside Buffer (ITLB) and a new translation was filled into the ITLB. The event is speculative in nature, but will not count translations (page walks) that are begun and not finished, or translations that are finished but not filled into the ITLB.",
@@ -157,6 +177,7 @@
},
{
"BriefDescription": "Counts the number of page walks due to an instruction fetch that miss the PDE (Page Directory Entry) cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.PDE_CACHE_MISS",
"SampleAfterValue": "2000003",
@@ -164,6 +185,7 @@
},
{
"BriefDescription": "Counts the number of first level TLB misses but second level hits due to an instruction fetch that did not start a page walk. Account for all pages sizes. Will result in an ITLB write from STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.STLB_HIT",
"SampleAfterValue": "2000003",
@@ -171,6 +193,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to any page size.",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
@@ -179,6 +202,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to a 1G page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
"PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 1G pages. Includes page walks that page fault.",
@@ -187,6 +211,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to a 2M or 4M page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 2M or 4M pages. Includes page walks that page fault.",
@@ -195,6 +220,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to a 4K page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.",
@@ -203,6 +229,7 @@
},
{
"BriefDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for instruction fetches every cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for instruction fetches every cycle. A page walk is outstanding from start till PMH becomes idle again (ready to serve next walk).",
@@ -211,6 +238,7 @@
},
{
"BriefDescription": "Counts the number of retired loads that are blocked due to a first level TLB miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.DTLB_MISS",
"PEBS": "1",
@@ -219,6 +247,7 @@
},
{
"BriefDescription": "Counts the number of memory uops retired that missed in the second level TLB.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS",
@@ -228,6 +257,7 @@
},
{
"BriefDescription": "Counts the number of load uops retired that miss in the second Level TLB.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS_LOADS",
@@ -237,6 +267,7 @@
},
{
"BriefDescription": "Counts the number of store uops retired that miss in the second level TLB.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS_STORES",
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/cache.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/cache.json
index ab09bd9fb409..21d5d96b8a6d 100644
--- a/tools/perf/pmu-events/arch/x86/emeraldrapids/cache.json
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "L1D.HWPF_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "L1D.HWPF_MISS",
"SampleAfterValue": "1000003",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Counts the number of cache lines replaced in L1 data cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "L1D.REPLACEMENT",
"PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
@@ -16,6 +18,7 @@
},
{
"BriefDescription": "Number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.FB_FULL",
"PublicDescription": "Counts number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
@@ -24,6 +27,7 @@
},
{
"BriefDescription": "Number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x48",
@@ -34,6 +38,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event L1D_PEND_MISS.L2_STALLS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.L2_STALL",
@@ -42,6 +47,7 @@
},
{
"BriefDescription": "Number of cycles a demand request has waited due to L1D due to lack of L2 resources.",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.L2_STALLS",
"PublicDescription": "Counts number of cycles a demand request has waited due to L1D due to lack of L2 resources. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
@@ -50,6 +56,7 @@
},
{
"BriefDescription": "Number of L1D misses that are outstanding",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING",
"PublicDescription": "Counts number of L1D misses that are outstanding in each cycle, that is each cycle the number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch. Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
@@ -58,6 +65,7 @@
},
{
"BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING_CYCLES",
@@ -67,6 +75,7 @@
},
{
"BriefDescription": "L2 cache lines filling L2",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "L2_LINES_IN.ALL",
"PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
@@ -74,14 +83,17 @@
"UMask": "0x1f"
},
{
- "BriefDescription": "L2_LINES_OUT.NON_SILENT",
+ "BriefDescription": "Modified cache lines that are evicted by L2 cache when triggered by an L2 cache fill.",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_LINES_OUT.NON_SILENT",
+ "PublicDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines are in Modified state. Modified lines are written back to L3",
"SampleAfterValue": "200003",
"UMask": "0x2"
},
{
"BriefDescription": "Non-modified cache lines that are silently dropped by L2 cache when triggered by an L2 cache fill.",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_LINES_OUT.SILENT",
"PublicDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared or Exclusive state. A non-threaded event.",
@@ -90,6 +102,7 @@
},
{
"BriefDescription": "Cache lines that have been L2 hardware prefetched but not used by demand accesses",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_LINES_OUT.USELESS_HWPF",
"PublicDescription": "Counts the number of cache lines that have been prefetched by the L2 hardware prefetcher but not used by demand access when evicted from the L2 cache",
@@ -98,6 +111,7 @@
},
{
"BriefDescription": "All accesses to L2 cache [This event is alias to L2_RQSTS.REFERENCES]",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_REQUEST.ALL",
"PublicDescription": "Counts all requests that were hit or true misses in L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. [This event is alias to L2_RQSTS.REFERENCES]",
@@ -106,6 +120,7 @@
},
{
"BriefDescription": "Read requests with true-miss in L2 cache. [This event is alias to L2_RQSTS.MISS]",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_REQUEST.MISS",
"PublicDescription": "Counts read requests of any type with true-miss in the L2 cache. True-miss excludes L2 misses that were merged with ongoing L2 misses. [This event is alias to L2_RQSTS.MISS]",
@@ -114,6 +129,7 @@
},
{
"BriefDescription": "L2 code requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_CODE_RD",
"PublicDescription": "Counts the total number of L2 code requests.",
@@ -122,6 +138,7 @@
},
{
"BriefDescription": "Demand Data Read access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
"PublicDescription": "Counts Demand Data Read requests accessing the L2 cache. These requests may hit or miss L2 cache. True-miss exclude misses that were merged with ongoing L2 misses. An access is counted once.",
@@ -130,6 +147,7 @@
},
{
"BriefDescription": "Demand requests that miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_MISS",
"PublicDescription": "Counts demand requests that miss L2 cache.",
@@ -138,6 +156,7 @@
},
{
"BriefDescription": "Demand requests to L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
"PublicDescription": "Counts demand requests to L2 cache.",
@@ -146,6 +165,7 @@
},
{
"BriefDescription": "L2_RQSTS.ALL_HWPF",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_HWPF",
"SampleAfterValue": "200003",
@@ -153,6 +173,7 @@
},
{
"BriefDescription": "RFO requests to L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_RFO",
"PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
@@ -161,6 +182,7 @@
},
{
"BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_HIT",
"PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
@@ -169,6 +191,7 @@
},
{
"BriefDescription": "L2 cache misses when fetching instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_MISS",
"PublicDescription": "Counts L2 cache misses when fetching instructions.",
@@ -177,6 +200,7 @@
},
{
"BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
"PublicDescription": "Counts the number of demand Data Read requests initiated by load instructions that hit L2 cache.",
@@ -185,6 +209,7 @@
},
{
"BriefDescription": "Demand Data Read miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
"PublicDescription": "Counts demand Data Read requests with true-miss in the L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. An access is counted once.",
@@ -193,6 +218,7 @@
},
{
"BriefDescription": "L2_RQSTS.HWPF_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.HWPF_MISS",
"SampleAfterValue": "200003",
@@ -200,6 +226,7 @@
},
{
"BriefDescription": "Read requests with true-miss in L2 cache. [This event is alias to L2_REQUEST.MISS]",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.MISS",
"PublicDescription": "Counts read requests of any type with true-miss in the L2 cache. True-miss excludes L2 misses that were merged with ongoing L2 misses. [This event is alias to L2_REQUEST.MISS]",
@@ -208,6 +235,7 @@
},
{
"BriefDescription": "All accesses to L2 cache [This event is alias to L2_REQUEST.ALL]",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.REFERENCES",
"PublicDescription": "Counts all requests that were hit or true misses in L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. [This event is alias to L2_REQUEST.ALL]",
@@ -216,6 +244,7 @@
},
{
"BriefDescription": "RFO requests that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_HIT",
"PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
@@ -224,6 +253,7 @@
},
{
"BriefDescription": "RFO requests that miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_MISS",
"PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
@@ -232,6 +262,7 @@
},
{
"BriefDescription": "SW prefetch requests that hit L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.SWPF_HIT",
"PublicDescription": "Counts Software prefetch requests that hit the L2 cache. Accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions when FB is not full.",
@@ -240,6 +271,7 @@
},
{
"BriefDescription": "SW prefetch requests that miss L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.SWPF_MISS",
"PublicDescription": "Counts Software prefetch requests that miss the L2 cache. Accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions when FB is not full.",
@@ -247,7 +279,17 @@
"UMask": "0x28"
},
{
+ "BriefDescription": "L2 writebacks that access L2 cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "L2_TRANS.L2_WB",
+ "PublicDescription": "Counts L2 writebacks that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x40"
+ },
+ {
"BriefDescription": "Core-originated cacheable requests that missed L3 (Except hardware prefetches to the L3)",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.MISS",
"PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.",
@@ -256,6 +298,7 @@
},
{
"BriefDescription": "Core-originated cacheable requests that refer to L3 (Except hardware prefetches to the L3)",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
"PublicDescription": "Counts core-originated cacheable requests to the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.",
@@ -264,6 +307,7 @@
},
{
"BriefDescription": "Retired load instructions.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ALL_LOADS",
@@ -274,6 +318,7 @@
},
{
"BriefDescription": "Retired store instructions.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ALL_STORES",
@@ -284,6 +329,7 @@
},
{
"BriefDescription": "All retired memory instructions.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ANY",
@@ -294,6 +340,7 @@
},
{
"BriefDescription": "Retired load instructions with locked access.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.LOCK_LOADS",
@@ -304,6 +351,7 @@
},
{
"BriefDescription": "Retired load instructions that split across a cacheline boundary.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
@@ -314,6 +362,7 @@
},
{
"BriefDescription": "Retired store instructions that split across a cacheline boundary.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.SPLIT_STORES",
@@ -324,6 +373,7 @@
},
{
"BriefDescription": "Retired load instructions that miss the STLB.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
@@ -334,6 +384,7 @@
},
{
"BriefDescription": "Retired store instructions that miss the STLB.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
@@ -344,6 +395,7 @@
},
{
"BriefDescription": "Completed demand load uops that miss the L1 d-cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "MEM_LOAD_COMPLETED.L1_MISS_ANY",
"PublicDescription": "Number of completed demand load requests that missed the L1 data cache including shadow misses (FB hits, merge to an ongoing L1D miss)",
@@ -352,6 +404,7 @@
},
{
"BriefDescription": "Retired load instructions whose data sources were HitM responses from shared L3",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD",
@@ -362,6 +415,7 @@
},
{
"BriefDescription": "Retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
@@ -372,6 +426,7 @@
},
{
"BriefDescription": "Retired load instructions whose data sources were hits in L3 without snoops required",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
@@ -382,6 +437,7 @@
},
{
"BriefDescription": "Retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD",
@@ -392,6 +448,7 @@
},
{
"BriefDescription": "Retired load instructions which data sources missed L3 but serviced from local dram",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
@@ -402,6 +459,7 @@
},
{
"BriefDescription": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM",
@@ -411,6 +469,7 @@
},
{
"BriefDescription": "Retired load instructions whose data sources was forwarded from a remote cache",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD",
@@ -421,6 +480,7 @@
},
{
"BriefDescription": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM",
@@ -430,6 +490,7 @@
},
{
"BriefDescription": "Retired instructions with at least 1 uncacheable load or lock.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd4",
"EventName": "MEM_LOAD_MISC_RETIRED.UC",
@@ -440,6 +501,7 @@
},
{
"BriefDescription": "Number of completed demand load requests that missed the L1, but hit the FB(fill buffer), because a preceding miss to the same cacheline initiated the line to be brought into L1, but data is not yet ready in L1.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.FB_HIT",
@@ -450,6 +512,7 @@
},
{
"BriefDescription": "Retired load instructions with L1 cache hits as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L1_HIT",
@@ -460,6 +523,7 @@
},
{
"BriefDescription": "Retired load instructions missed L1 cache as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L1_MISS",
@@ -470,6 +534,7 @@
},
{
"BriefDescription": "Retired load instructions with L2 cache hits as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L2_HIT",
@@ -480,6 +545,7 @@
},
{
"BriefDescription": "Retired load instructions missed L2 cache as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L2_MISS",
@@ -490,6 +556,7 @@
},
{
"BriefDescription": "Retired load instructions with L3 cache hits as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L3_HIT",
@@ -500,6 +567,7 @@
},
{
"BriefDescription": "Retired load instructions missed L3 cache as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L3_MISS",
@@ -510,6 +578,7 @@
},
{
"BriefDescription": "MEM_STORE_RETIRED.L2_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "MEM_STORE_RETIRED.L2_HIT",
"SampleAfterValue": "200003",
@@ -517,6 +586,7 @@
},
{
"BriefDescription": "Retired memory uops for any access",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe5",
"EventName": "MEM_UOP_RETIRED.ANY",
"PublicDescription": "Number of retired micro-operations (uops) for load or store memory accesses",
@@ -525,6 +595,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -534,6 +605,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that resulted in a snoop hit a modified line in another core's caches which forwarded the data.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -543,6 +615,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_CODE_RD.SNC_CACHE.HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -552,6 +625,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_CODE_RD.SNC_CACHE.HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -561,6 +635,7 @@
},
{
"BriefDescription": "Counts demand data reads that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -570,6 +645,7 @@
},
{
"BriefDescription": "Counts demand data reads that resulted in a snoop hit a modified line in another core's caches which forwarded the data.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -579,6 +655,7 @@
},
{
"BriefDescription": "Counts demand data reads that resulted in a snoop that hit in another core, which did not forward the data.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -588,6 +665,7 @@
},
{
"BriefDescription": "Counts demand data reads that resulted in a snoop hit in another core's caches which forwarded the unmodified data to the requesting core.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -597,6 +675,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by a cache on a remote socket where a snoop hit a modified line in another core's caches which forwarded the data.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.REMOTE_CACHE.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -606,6 +685,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by a cache on a remote socket where a snoop hit in another core's caches which forwarded the unmodified data to the requesting core.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.REMOTE_CACHE.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -615,6 +695,7 @@
},
{
"BriefDescription": "Counts demand data reads that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.SNC_CACHE.HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -624,6 +705,7 @@
},
{
"BriefDescription": "Counts demand data reads that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.SNC_CACHE.HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -633,6 +715,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_RFO.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -642,6 +725,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that resulted in a snoop hit a modified line in another core's caches which forwarded the data.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -651,6 +735,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_RFO.SNC_CACHE.HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -660,6 +745,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_RFO.SNC_CACHE.HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -669,6 +755,7 @@
},
{
"BriefDescription": "Counts hardware prefetches to the L3 only that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.HWPF_L3.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -678,6 +765,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -687,6 +775,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that resulted in a snoop hit a modified line in another core's caches which forwarded the data.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -696,6 +785,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that resulted in a snoop that hit in another core, which did not forward the data.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -705,6 +795,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that resulted in a snoop hit in another core's caches which forwarded the unmodified data to the requesting core.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -714,6 +805,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop was sent and data was returned (Modified or Not Modified).",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.REMOTE_CACHE.SNOOP_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -723,6 +815,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop hit a modified line in another core's caches which forwarded the data.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.REMOTE_CACHE.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -732,6 +825,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop hit in another core's caches which forwarded the unmodified data to the requesting core.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.REMOTE_CACHE.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -741,6 +835,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.SNC_CACHE.HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -750,6 +845,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.SNC_CACHE.HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -759,6 +855,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO), hardware prefetch RFOs (which bring data to L2), and software prefetches for exclusive ownership (PREFETCHW) that hit to a (M)odified cacheline in the L3 or snoop filter.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.RFO_TO_CORE.L3_HIT_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -768,6 +865,7 @@
},
{
"BriefDescription": "Counts streaming stores that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.STREAMING_WR.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -777,6 +875,7 @@
},
{
"BriefDescription": "OFFCORE_REQUESTS.ALL_REQUESTS",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
"SampleAfterValue": "100003",
@@ -784,6 +883,7 @@
},
{
"BriefDescription": "Demand and prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "OFFCORE_REQUESTS.DATA_RD",
"PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
@@ -791,7 +891,17 @@
"UMask": "0x8"
},
{
+ "BriefDescription": "Cacheable and noncacheable code read requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
+ "PublicDescription": "Counts both cacheable and non-cacheable code read requests.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
"BriefDescription": "Demand Data Read requests sent to uncore",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
"PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
@@ -799,7 +909,17 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
+ "PublicDescription": "Counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
"BriefDescription": "This event is deprecated. Refer to new event OFFCORE_REQUESTS_OUTSTANDING.DATA_RD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x20",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
@@ -808,6 +928,7 @@
},
{
"BriefDescription": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x20",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
@@ -815,7 +936,18 @@
"UMask": "0x8"
},
{
+ "BriefDescription": "Cycles with offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
+ "PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
"BriefDescription": "Cycles where at least 1 outstanding demand data read request is pending.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x20",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
@@ -824,6 +956,7 @@
},
{
"BriefDescription": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x20",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
@@ -832,13 +965,24 @@
},
{
"BriefDescription": "OFFCORE_REQUESTS_OUTSTANDING.DATA_RD",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DATA_RD",
"SampleAfterValue": "1000003",
"UMask": "0x8"
},
{
+ "BriefDescription": "Offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore, every cycle.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
+ "PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
"BriefDescription": "For every cycle, increments by the number of outstanding demand data read requests pending.",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
"PublicDescription": "For every cycle, increments by the number of outstanding demand data read requests pending. Requests are considered outstanding from the time they miss the core's L2 cache until the transaction completion message is sent to the requestor.",
@@ -847,6 +991,7 @@
},
{
"BriefDescription": "Counts bus locks, accounts for cache line split locks and UC locks.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2c",
"EventName": "SQ_MISC.BUS_LOCK",
"PublicDescription": "Counts the more expensive bus lock needed to enforce cache coherency for certain memory accesses that need to be done atomically. Can be created by issuing an atomic instruction (via the LOCK prefix) which causes a cache line split or accesses uncacheable memory.",
@@ -854,7 +999,16 @@
"UMask": "0x10"
},
{
+ "BriefDescription": "Counts the number of PREFETCHNTA, PREFETCHW, PREFETCHT0, PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "SW_PREFETCH_ACCESS.ANY",
+ "SampleAfterValue": "100003",
+ "UMask": "0xf"
+ },
+ {
"BriefDescription": "Number of PREFETCHNTA instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "SW_PREFETCH_ACCESS.NTA",
"PublicDescription": "Counts the number of PREFETCHNTA instructions executed.",
@@ -863,6 +1017,7 @@
},
{
"BriefDescription": "Number of PREFETCHW instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
"PublicDescription": "Counts the number of PREFETCHW instructions executed.",
@@ -871,6 +1026,7 @@
},
{
"BriefDescription": "Number of PREFETCHT0 instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "SW_PREFETCH_ACCESS.T0",
"PublicDescription": "Counts the number of PREFETCHT0 instructions executed.",
@@ -879,6 +1035,7 @@
},
{
"BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "SW_PREFETCH_ACCESS.T1_T2",
"PublicDescription": "Counts the number of PREFETCHT1 or PREFETCHT2 instructions executed.",
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/counter.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/counter.json
new file mode 100644
index 000000000000..088d5954747c
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/counter.json
@@ -0,0 +1,82 @@
+[
+ {
+ "Unit": "core",
+ "CountersNumFixed": "4",
+ "CountersNumGeneric": "8"
+ },
+ {
+ "Unit": "PCU",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "IRP",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "2"
+ },
+ {
+ "Unit": "M2PCIe",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "IIO",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "iMC",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "M2M",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "M3UPI",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "UPI",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "CHA",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "CXLCM",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "8"
+ },
+ {
+ "Unit": "CXLDP",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "MCHBM",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "M2HBM",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "UBOX",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "2"
+ },
+ {
+ "Unit": "MDF",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/emr-metrics.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/emr-metrics.json
new file mode 100644
index 000000000000..ee288099a8d3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/emr-metrics.json
@@ -0,0 +1,2186 @@
+[
+ {
+ "BriefDescription": "C1 residency percent per core",
+ "MetricExpr": "cstate_core@c1\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C1_Core_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C2 residency percent per package",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C2_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C6 residency percent per core",
+ "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Core_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C6 residency percent per package",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Uncore frequency per die [GHZ]",
+ "MetricExpr": "tma_info_system_socket_clks / #num_dies / duration_time / 1e9",
+ "MetricGroup": "SoC",
+ "MetricName": "UNCORE_FREQ"
+ },
+ {
+ "BriefDescription": "Cycles per instruction retired; indicating how much time each executed instruction took; in units of cycles.",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD / INST_RETIRED.ANY",
+ "MetricName": "cpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "CPU operating frequency (in GHz)",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC * #SYSTEM_TSC_FREQ / 1e9",
+ "MetricName": "cpu_operating_frequency",
+ "ScaleUnit": "1GHz"
+ },
+ {
+ "BriefDescription": "Percentage of time spent in the active CPU power state C0",
+ "MetricExpr": "tma_info_system_cpus_utilized",
+ "MetricName": "cpu_utilization",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for 2 megabyte page sizes) caused by demand data loads to the total number of completed instructions",
+ "MetricExpr": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M / INST_RETIRED.ANY",
+ "MetricName": "dtlb_2nd_level_2mb_large_page_load_mpi",
+ "PublicDescription": "Ratio of number of completed page walks (for 2 megabyte page sizes) caused by demand data loads to the total number of completed instructions. This implies it missed in the Data Translation Lookaside Buffer (DTLB) and further levels of TLB.",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for all page sizes) caused by demand data loads to the total number of completed instructions",
+ "MetricExpr": "DTLB_LOAD_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricName": "dtlb_2nd_level_load_mpi",
+ "PublicDescription": "Ratio of number of completed page walks (for all page sizes) caused by demand data loads to the total number of completed instructions. This implies it missed in the DTLB and further levels of TLB.",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for all page sizes) caused by demand data stores to the total number of completed instructions",
+ "MetricExpr": "DTLB_STORE_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricName": "dtlb_2nd_level_store_mpi",
+ "PublicDescription": "Ratio of number of completed page walks (for all page sizes) caused by demand data stores to the total number of completed instructions. This implies it missed in the DTLB and further levels of TLB.",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Bandwidth observed by the integrated I/O traffic controller (IIO) of IO reads that are initiated by end device controllers that are requesting memory from the CPU.",
+ "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.ALL_PARTS * 4 / 1e6 / duration_time",
+ "MetricName": "iio_bandwidth_read",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth observed by the integrated I/O traffic controller (IIO) of IO writes that are initiated by end device controllers that are writing memory to the CPU.",
+ "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.ALL_PARTS * 4 / 1e6 / duration_time",
+ "MetricName": "iio_bandwidth_write",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth of IO reads that are initiated by end device controllers that are requesting memory from the CPU.",
+ "MetricExpr": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR * 64 / 1e6 / duration_time",
+ "MetricName": "io_bandwidth_read",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth of IO reads that are initiated by end device controllers that are requesting memory from the local CPU socket.",
+ "MetricExpr": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR_LOCAL * 64 / 1e6 / duration_time",
+ "MetricName": "io_bandwidth_read_local",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth of IO reads that are initiated by end device controllers that are requesting memory from a remote CPU socket.",
+ "MetricExpr": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR_REMOTE * 64 / 1e6 / duration_time",
+ "MetricName": "io_bandwidth_read_remote",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth of IO writes that are initiated by end device controllers that are writing memory to the CPU.",
+ "MetricExpr": "(UNC_CHA_TOR_INSERTS.IO_ITOM + UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR) * 64 / 1e6 / duration_time",
+ "MetricName": "io_bandwidth_write",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth of IO writes that are initiated by end device controllers that are writing memory to the local CPU socket.",
+ "MetricExpr": "(UNC_CHA_TOR_INSERTS.IO_ITOM_LOCAL + UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR_LOCAL) * 64 / 1e6 / duration_time",
+ "MetricName": "io_bandwidth_write_local",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth of IO writes that are initiated by end device controllers that are writing memory to a remote CPU socket.",
+ "MetricExpr": "(UNC_CHA_TOR_INSERTS.IO_ITOM_REMOTE + UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR_REMOTE) * 64 / 1e6 / duration_time",
+ "MetricName": "io_bandwidth_write_remote",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Percentage of inbound full cacheline writes initiated by end device controllers that miss the L3 cache.",
+ "MetricExpr": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM / UNC_CHA_TOR_INSERTS.IO_ITOM",
+ "MetricName": "io_percent_of_inbound_full_writes_that_miss_l3",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of inbound partial cacheline writes initiated by end device controllers that miss the L3 cache.",
+ "MetricExpr": "(UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR + UNC_CHA_TOR_INSERTS.IO_MISS_RFO) / (UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR + UNC_CHA_TOR_INSERTS.IO_RFO)",
+ "MetricName": "io_percent_of_inbound_partial_writes_that_miss_l3",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of inbound reads initiated by end device controllers that miss the L3 cache.",
+ "MetricExpr": "UNC_CHA_TOR_INSERTS.IO_MISS_PCIRDCUR / UNC_CHA_TOR_INSERTS.IO_PCIRDCUR",
+ "MetricName": "io_percent_of_inbound_reads_that_miss_l3",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for 2 megabyte and 4 megabyte page sizes) caused by a code fetch to the total number of completed instructions",
+ "MetricExpr": "ITLB_MISSES.WALK_COMPLETED_2M_4M / INST_RETIRED.ANY",
+ "MetricName": "itlb_2nd_level_large_page_mpi",
+ "PublicDescription": "Ratio of number of completed page walks (for 2 megabyte and 4 megabyte page sizes) caused by a code fetch to the total number of completed instructions. This implies it missed in the Instruction Translation Lookaside Buffer (ITLB) and further levels of TLB.",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for all page sizes) caused by a code fetch to the total number of completed instructions",
+ "MetricExpr": "ITLB_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricName": "itlb_2nd_level_mpi",
+ "PublicDescription": "Ratio of number of completed page walks (for all page sizes) caused by a code fetch to the total number of completed instructions. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB.",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of code read requests missing in L1 instruction cache (includes prefetches) to the total number of completed instructions",
+ "MetricExpr": "L2_RQSTS.ALL_CODE_RD / INST_RETIRED.ANY",
+ "MetricName": "l1_i_code_read_misses_with_prefetches_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of demand load requests hitting in L1 data cache to the total number of completed instructions",
+ "MetricExpr": "MEM_LOAD_RETIRED.L1_HIT / INST_RETIRED.ANY",
+ "MetricName": "l1d_demand_data_read_hits_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of requests missing L1 data cache (includes data+rfo w/ prefetches) to the total number of completed instructions",
+ "MetricExpr": "L1D.REPLACEMENT / INST_RETIRED.ANY",
+ "MetricName": "l1d_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of code read request missing L2 cache to the total number of completed instructions",
+ "MetricExpr": "L2_RQSTS.CODE_RD_MISS / INST_RETIRED.ANY",
+ "MetricName": "l2_demand_code_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed demand load requests hitting in L2 cache to the total number of completed instructions",
+ "MetricExpr": "MEM_LOAD_RETIRED.L2_HIT / INST_RETIRED.ANY",
+ "MetricName": "l2_demand_data_read_hits_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed data read request missing L2 cache to the total number of completed instructions",
+ "MetricExpr": "MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricName": "l2_demand_data_read_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of requests missing L2 cache (includes code+data+rfo w/ prefetches) to the total number of completed instructions",
+ "MetricExpr": "L2_LINES_IN.ALL / INST_RETIRED.ANY",
+ "MetricName": "l2_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of code read requests missing last level core cache (includes demand w/ prefetches) to the total number of completed instructions",
+ "MetricExpr": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD / INST_RETIRED.ANY",
+ "MetricName": "llc_code_read_mpi_demand_plus_prefetch",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of data read requests missing last level core cache (includes demand w/ prefetches) to the total number of completed instructions",
+ "MetricExpr": "(UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFDATA + UNC_CHA_TOR_INSERTS.IA_MISS_DRD + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF) / INST_RETIRED.ANY",
+ "MetricName": "llc_data_read_mpi_demand_plus_prefetch",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Average latency of a last level cache (LLC) demand data read miss (read memory access) in nano seconds",
+ "MetricExpr": "1e9 * (UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD / UNC_CHA_TOR_INSERTS.IA_MISS_DRD) / (UNC_CHA_CLOCKTICKS / (source_count(UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD) * #num_packages)) * duration_time",
+ "MetricName": "llc_demand_data_read_miss_latency",
+ "ScaleUnit": "1ns"
+ },
+ {
+ "BriefDescription": "Average latency of a last level cache (LLC) demand data read miss (read memory access) addressed to local memory in nano seconds",
+ "MetricExpr": "1e9 * (UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_LOCAL / UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL) / (UNC_CHA_CLOCKTICKS / (source_count(UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_LOCAL) * #num_packages)) * duration_time",
+ "MetricName": "llc_demand_data_read_miss_latency_for_local_requests",
+ "ScaleUnit": "1ns"
+ },
+ {
+ "BriefDescription": "Average latency of a last level cache (LLC) demand data read miss (read memory access) addressed to remote memory in nano seconds",
+ "MetricExpr": "1e9 * (UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_REMOTE / UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE) / (UNC_CHA_CLOCKTICKS / (source_count(UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_REMOTE) * #num_packages)) * duration_time",
+ "MetricName": "llc_demand_data_read_miss_latency_for_remote_requests",
+ "ScaleUnit": "1ns"
+ },
+ {
+ "BriefDescription": "Average latency of a last level cache (LLC) demand data read miss (read memory access) addressed to DRAM in nano seconds",
+ "MetricExpr": "1e9 * (UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_DDR / UNC_CHA_TOR_INSERTS.IA_MISS_DRD_DDR) / (UNC_CHA_CLOCKTICKS / (source_count(UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_DDR) * #num_packages)) * duration_time",
+ "MetricName": "llc_demand_data_read_miss_to_dram_latency",
+ "ScaleUnit": "1ns"
+ },
+ {
+ "BriefDescription": "Average latency of a last level cache (LLC) demand data read miss (read memory access) addressed to Intel(R) Optane(TM) Persistent Memory(PMEM) in nano seconds",
+ "MetricExpr": "1e9 * (UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PMM / UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PMM) / (UNC_CHA_CLOCKTICKS / (source_count(UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PMM) * #num_packages)) * duration_time",
+ "MetricName": "llc_demand_data_read_miss_to_pmem_latency",
+ "ScaleUnit": "1ns"
+ },
+ {
+ "BriefDescription": "Bandwidth (MB/sec) of read requests that miss the last level cache (LLC) and go to local memory.",
+ "MetricExpr": "UNC_CHA_REQUESTS.READS_LOCAL * 64 / 1e6 / duration_time",
+ "MetricName": "llc_miss_local_memory_bandwidth_read",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth (MB/sec) of write requests that miss the last level cache (LLC) and go to local memory.",
+ "MetricExpr": "UNC_CHA_REQUESTS.WRITES_LOCAL * 64 / 1e6 / duration_time",
+ "MetricName": "llc_miss_local_memory_bandwidth_write",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth (MB/sec) of read requests that miss the last level cache (LLC) and go to remote memory.",
+ "MetricExpr": "UNC_CHA_REQUESTS.READS_REMOTE * 64 / 1e6 / duration_time",
+ "MetricName": "llc_miss_remote_memory_bandwidth_read",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth (MB/sec) of write requests that miss the last level cache (LLC) and go to remote memory.",
+ "MetricExpr": "UNC_CHA_REQUESTS.WRITES_REMOTE * 64 / 1e6 / duration_time",
+ "MetricName": "llc_miss_remote_memory_bandwidth_write",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "The ratio of number of completed memory load instructions to the total number completed instructions",
+ "MetricExpr": "MEM_INST_RETIRED.ALL_LOADS / INST_RETIRED.ANY",
+ "MetricName": "loads_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "DDR memory read bandwidth (MB/sec)",
+ "MetricExpr": "UNC_M_CAS_COUNT.RD * 64 / 1e6 / duration_time",
+ "MetricName": "memory_bandwidth_read",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "DDR memory bandwidth (MB/sec)",
+ "MetricExpr": "(UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR) * 64 / 1e6 / duration_time",
+ "MetricName": "memory_bandwidth_total",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "DDR memory write bandwidth (MB/sec)",
+ "MetricExpr": "UNC_M_CAS_COUNT.WR * 64 / 1e6 / duration_time",
+ "MetricName": "memory_bandwidth_write",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Memory write bandwidth (MB/sec) caused by directory updates; includes DDR and Intel(R) Optane(TM) Persistent Memory(PMEM).",
+ "MetricExpr": "(UNC_CHA_DIR_UPDATE.HA + UNC_CHA_DIR_UPDATE.TOR + UNC_M2M_DIRECTORY_UPDATE.ANY) * 64 / 1e6 / duration_time",
+ "MetricName": "memory_extra_write_bw_due_to_directory_updates",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Memory read that miss the last level cache (LLC) addressed to local DRAM as a percentage of total memory read accesses, does not include LLC prefetches.",
+ "MetricExpr": "(UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL) / (UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE)",
+ "MetricName": "numa_reads_addressed_to_local_dram",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Memory reads that miss the last level cache (LLC) addressed to remote DRAM as a percentage of total memory read accesses, does not include LLC prefetches.",
+ "MetricExpr": "(UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE) / (UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE)",
+ "MetricName": "numa_reads_addressed_to_remote_dram",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Uops delivered from decoded instruction cache (decoded stream buffer or DSB) as a percent of total uops delivered to Instruction Decode Queue",
+ "MetricExpr": "IDQ.DSB_UOPS / (IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS + LSD.UOPS)",
+ "MetricName": "percent_uops_delivered_from_decoded_icache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Uops delivered from legacy decode pipeline (Micro-instruction Translation Engine or MITE) as a percent of total uops delivered to Instruction Decode Queue",
+ "MetricExpr": "IDQ.MITE_UOPS / (IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS + LSD.UOPS)",
+ "MetricName": "percent_uops_delivered_from_legacy_decode_pipeline",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Uops delivered from microcode sequencer (MS) as a percent of total uops delivered to Instruction Decode Queue",
+ "MetricExpr": "IDQ.MS_UOPS / (IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS + LSD.UOPS)",
+ "MetricName": "percent_uops_delivered_from_microcode_sequencer",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Intel(R) Optane(TM) Persistent Memory(PMEM) memory read bandwidth (MB/sec)",
+ "MetricExpr": "UNC_M_PMM_RPQ_INSERTS * 64 / 1e6 / duration_time",
+ "MetricName": "pmem_memory_bandwidth_read",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Intel(R) Optane(TM) Persistent Memory(PMEM) memory bandwidth (MB/sec)",
+ "MetricExpr": "(UNC_M_PMM_RPQ_INSERTS + UNC_M_PMM_WPQ_INSERTS) * 64 / 1e6 / duration_time",
+ "MetricName": "pmem_memory_bandwidth_total",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Intel(R) Optane(TM) Persistent Memory(PMEM) memory write bandwidth (MB/sec)",
+ "MetricExpr": "UNC_M_PMM_WPQ_INSERTS * 64 / 1e6 / duration_time",
+ "MetricName": "pmem_memory_bandwidth_write",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Percentage of cycles spent in System Management Interrupts.",
+ "MetricExpr": "((msr@aperf@ - cycles) / msr@aperf@ if msr@smi@ > 0 else 0)",
+ "MetricGroup": "smi",
+ "MetricName": "smi_cycles",
+ "MetricThreshold": "smi_cycles > 0.1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Number of SMI interrupts.",
+ "MetricExpr": "msr@smi@",
+ "MetricGroup": "smi",
+ "MetricName": "smi_num",
+ "ScaleUnit": "1SMI#"
+ },
+ {
+ "BriefDescription": "The ratio of number of completed memory store instructions to the total number completed instructions",
+ "MetricExpr": "MEM_INST_RETIRED.ALL_STORES / INST_RETIRED.ANY",
+ "MetricName": "stores_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
+ "MetricExpr": "(UOPS_DISPATCHED.PORT_0 + UOPS_DISPATCHED.PORT_1 + UOPS_DISPATCHED.PORT_5_11 + UOPS_DISPATCHED.PORT_6) / (5 * tma_info_core_core_clks)",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_alu_op_utilization",
+ "MetricThreshold": "tma_alu_op_utilization > 0.4",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the Advanced Matrix eXtensions (AMX) execution engine was busy with tile (arithmetic) operations",
+ "MetricExpr": "EXE.AMX_BUSY / tma_info_core_core_clks",
+ "MetricGroup": "BvCB;Compute;HPC;Server;TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_amx_busy",
+ "MetricThreshold": "tma_amx_busy > 0.5 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists",
+ "MetricExpr": "78 * ASSISTS.ANY / tma_info_thread_slots",
+ "MetricGroup": "BvIO;TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricName": "tma_assists",
+ "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: ASSISTS.ANY",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of slots the CPU retired uops as a result of handing SSE to AVX* or AVX* to SSE transition Assists.",
+ "MetricExpr": "63 * ASSISTS.SSE_AVX_MIX / tma_info_thread_slots",
+ "MetricGroup": "HPC;TopdownL5;tma_L5_group;tma_assists_group",
+ "MetricName": "tma_avx_assists",
+ "MetricThreshold": "tma_avx_assists > 0.1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "DefaultMetricgroupName": "TopdownL1",
+ "MetricExpr": "topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * tma_info_thread_slots",
+ "MetricGroup": "BvOB;Default;TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_backend_bound",
+ "MetricThreshold": "tma_backend_bound > 0.2",
+ "MetricgroupNoGroup": "TopdownL1;Default",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. Sample with: TOPDOWN.BACKEND_BOUND_SLOTS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "DefaultMetricgroupName": "TopdownL1",
+ "MetricExpr": "max(1 - (tma_frontend_bound + tma_backend_bound + tma_retiring), 0)",
+ "MetricGroup": "Default;TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_bad_speculation",
+ "MetricThreshold": "tma_bad_speculation > 0.15",
+ "MetricgroupNoGroup": "TopdownL1;Default",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
+ "DefaultMetricgroupName": "TopdownL2",
+ "MetricExpr": "topdown\\-br\\-mispredict / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * tma_info_thread_slots",
+ "MetricGroup": "BadSpec;BrMispredicts;BvMP;Default;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
+ "MetricName": "tma_branch_mispredicts",
+ "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "MetricgroupNoGroup": "TopdownL2;Default",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: TOPDOWN.BR_MISPREDICT_SLOTS. Related metrics: tma_info_bad_spec_branch_misprediction_cost, tma_info_bottleneck_mispredictions, tma_mispredicts_resteers",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers",
+ "MetricExpr": "INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks + tma_unknown_branches",
+ "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_branch_resteers",
+ "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due staying in C0.1 power-performance optimized state (Faster wakeup time; Smaller power savings).",
+ "MetricExpr": "CPU_CLK_UNHALTED.C01 / tma_info_thread_clks",
+ "MetricGroup": "C0Wait;TopdownL4;tma_L4_group;tma_serializing_operation_group",
+ "MetricName": "tma_c01_wait",
+ "MetricThreshold": "tma_c01_wait > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due staying in C0.2 power-performance optimized state (Slower wakeup time; Larger power savings).",
+ "MetricExpr": "CPU_CLK_UNHALTED.C02 / tma_info_thread_clks",
+ "MetricGroup": "C0Wait;TopdownL4;tma_L4_group;tma_serializing_operation_group",
+ "MetricName": "tma_c02_wait",
+ "MetricThreshold": "tma_c02_wait > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction",
+ "MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricName": "tma_cisc",
+ "MetricThreshold": "tma_cisc > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears",
+ "MetricExpr": "(1 - tma_branch_mispredicts / tma_bad_speculation) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks",
+ "MetricGroup": "BadSpec;MachineClears;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueMC",
+ "MetricName": "tma_clears_resteers",
+ "MetricThreshold": "tma_clears_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
+ "MetricExpr": "(76.6 * tma_info_system_core_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) + 74.6 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricGroup": "BvMS;DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricName": "tma_contested_accesses",
+ "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck",
+ "DefaultMetricgroupName": "TopdownL2",
+ "MetricExpr": "max(0, tma_backend_bound - tma_memory_bound)",
+ "MetricGroup": "Backend;Compute;Default;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_core_bound",
+ "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricgroupNoGroup": "TopdownL2;Default",
+ "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
+ "MetricExpr": "74.6 * tma_info_system_core_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD + MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (1 - OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricGroup": "BvMS;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricName": "tma_data_sharing",
+ "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder",
+ "MetricExpr": "(cpu@INST_DECODED.DECODERS\\,cmask\\=1@ - cpu@INST_DECODED.DECODERS\\,cmask\\=2@) / tma_info_core_core_clks / 2",
+ "MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_issueD0;tma_mite_group",
+ "MetricName": "tma_decoder0_alone",
+ "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & tma_fetch_bandwidth > 0.2)",
+ "PublicDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder. Related metrics: tma_few_uops_instructions",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
+ "MetricExpr": "ARITH.DIV_ACTIVE / tma_info_thread_clks",
+ "MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_divider",
+ "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_ACTIVE",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads",
+ "MetricExpr": "( MEMORY_ACTIVITY.STALLS_L3_MISS / tma_info_thread_clks )",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_dram_bound",
+ "MetricThreshold": "tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_MISS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline",
+ "MetricExpr": "(IDQ.DSB_CYCLES_ANY - IDQ.DSB_CYCLES_OK) / tma_info_core_core_clks / 2",
+ "MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_dsb",
+ "MetricThreshold": "tma_dsb > 0.15 & tma_fetch_bandwidth > 0.2",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / tma_info_thread_clks",
+ "MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
+ "MetricName": "tma_dsb_switches",
+ "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
+ "MetricExpr": "min(7 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_LOAD_MISSES.WALK_ACTIVE, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - MEMORY_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
+ "MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
+ "MetricName": "tma_dtlb_load",
+ "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs, tma_info_bottleneck_memory_synchronization",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
+ "MetricExpr": "(7 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_STORE_MISSES.WALK_ACTIVE) / tma_info_core_core_clks",
+ "MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
+ "MetricName": "tma_dtlb_store",
+ "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load, tma_info_bottleneck_memory_data_tlbs, tma_info_bottleneck_memory_synchronization",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing",
+ "MetricExpr": "81 * tma_info_system_core_frequency * OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM / tma_info_thread_clks",
+ "MetricGroup": "BvMS;DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
+ "MetricName": "tma_false_sharing",
+ "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
+ "MetricExpr": "L1D_PEND_MISS.FB_FULL / tma_info_thread_clks",
+ "MetricGroup": "BvMS;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
+ "MetricName": "tma_fb_full",
+ "MetricThreshold": "tma_fb_full > 0.3",
+ "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues",
+ "DefaultMetricgroupName": "TopdownL2",
+ "MetricExpr": "max(0, tma_frontend_bound - tma_fetch_latency)",
+ "MetricGroup": "Default;FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
+ "MetricName": "tma_fetch_bandwidth",
+ "MetricThreshold": "tma_fetch_bandwidth > 0.2",
+ "MetricgroupNoGroup": "TopdownL2;Default",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues",
+ "DefaultMetricgroupName": "TopdownL2",
+ "MetricExpr": "topdown\\-fetch\\-lat / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) - INT_MISC.UOP_DROPPING / tma_info_thread_slots",
+ "MetricGroup": "Default;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
+ "MetricName": "tma_fetch_latency",
+ "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricgroupNoGroup": "TopdownL2;Default",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or up to ([SNB+] four; [ADL+] five) uops",
+ "MetricExpr": "max(0, tma_heavy_operations - tma_microcode_sequencer)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueD0",
+ "MetricName": "tma_few_uops_instructions",
+ "MetricThreshold": "tma_few_uops_instructions > 0.05 & tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or up to ([SNB+] four; [ADL+] five) uops. This highly-correlates with the number of uops in such instructions. Related metrics: tma_decoder0_alone",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired)",
+ "MetricExpr": "tma_x87_use + tma_fp_scalar + tma_fp_vector",
+ "MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_fp_arith",
+ "MetricThreshold": "tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists",
+ "MetricExpr": "30 * ASSISTS.FP / tma_info_thread_slots",
+ "MetricGroup": "HPC;TopdownL5;tma_L5_group;tma_assists_group",
+ "MetricName": "tma_fp_assists",
+ "MetricThreshold": "tma_fp_assists > 0.1",
+ "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + FP_ARITH_INST_RETIRED2.SCALAR) / (tma_retiring * tma_info_thread_slots)",
+ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
+ "MetricName": "tma_fp_scalar",
+ "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.VECTOR + FP_ARITH_INST_RETIRED2.VECTOR) / (tma_retiring * tma_info_thread_slots)",
+ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
+ "MetricName": "tma_fp_vector",
+ "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED2.128B_PACKED_HALF) / (tma_retiring * tma_info_thread_slots)",
+ "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
+ "MetricName": "tma_fp_vector_128b",
+ "MetricThreshold": "tma_fp_vector_128b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED2.256B_PACKED_HALF) / (tma_retiring * tma_info_thread_slots)",
+ "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
+ "MetricName": "tma_fp_vector_256b",
+ "MetricThreshold": "tma_fp_vector_256b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 512-bit wide vectors",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE + FP_ARITH_INST_RETIRED2.512B_PACKED_HALF) / (tma_retiring * tma_info_thread_slots)",
+ "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
+ "MetricName": "tma_fp_vector_512b",
+ "MetricThreshold": "tma_fp_vector_512b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 512-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "DefaultMetricgroupName": "TopdownL1",
+ "MetricExpr": "topdown\\-fe\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) - INT_MISC.UOP_DROPPING / tma_info_thread_slots",
+ "MetricGroup": "BvFB;BvIO;Default;PGO;TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_frontend_bound",
+ "MetricThreshold": "tma_frontend_bound > 0.15",
+ "MetricgroupNoGroup": "TopdownL1;Default",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions",
+ "MetricExpr": "tma_light_operations * INST_RETIRED.MACRO_FUSED / (tma_retiring * tma_info_thread_slots)",
+ "MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_fused_instructions",
+ "MetricThreshold": "tma_fused_instructions > 0.1 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. CMP+JCC or DEC+JCC are common examples of legacy fusions. {([MTL] Note new MOV+OP and Load+OP fusions appear under Other_Light_Ops in MTL!)}",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences",
+ "DefaultMetricgroupName": "TopdownL2",
+ "MetricExpr": "topdown\\-heavy\\-ops / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * tma_info_thread_slots",
+ "MetricGroup": "Default;Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_heavy_operations",
+ "MetricThreshold": "tma_heavy_operations > 0.1",
+ "MetricgroupNoGroup": "TopdownL2;Default",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences. ([ICL+] Note this may overcount due to approximation using indirect events; [ADL+] .)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses",
+ "MetricExpr": "ICACHE_DATA.STALLS / tma_info_thread_clks",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_icache_misses",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
+ "MetricExpr": "tma_info_bottleneck_mispredictions * tma_info_thread_slots / BR_MISP_RETIRED.ALL_BRANCHES / 100",
+ "MetricGroup": "Bad;BrMispredicts;tma_issueBM",
+ "MetricName": "tma_info_bad_spec_branch_misprediction_cost",
+ "PublicDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear). Related metrics: tma_branch_mispredicts, tma_info_bottleneck_mispredictions, tma_mispredicts_resteers"
+ },
+ {
+ "BriefDescription": "Instructions per retired mispredicts for conditional non-taken branches (lower number means higher occurrence rate).",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_NTAKEN",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_bad_spec_ipmisp_cond_ntaken",
+ "MetricThreshold": "tma_info_bad_spec_ipmisp_cond_ntaken < 200"
+ },
+ {
+ "BriefDescription": "Instructions per retired mispredicts for conditional taken branches (lower number means higher occurrence rate).",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_TAKEN",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_bad_spec_ipmisp_cond_taken",
+ "MetricThreshold": "tma_info_bad_spec_ipmisp_cond_taken < 200"
+ },
+ {
+ "BriefDescription": "Instructions per retired mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.INDIRECT",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_bad_spec_ipmisp_indirect",
+ "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1e3"
+ },
+ {
+ "BriefDescription": "Instructions per retired mispredicts for return branches (lower number means higher occurrence rate).",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.RET",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_bad_spec_ipmisp_ret",
+ "MetricThreshold": "tma_info_bad_spec_ipmisp_ret < 500"
+ },
+ {
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts",
+ "MetricName": "tma_info_bad_spec_ipmispredict",
+ "MetricThreshold": "tma_info_bad_spec_ipmispredict < 200"
+ },
+ {
+ "BriefDescription": "Speculative to Retired ratio of all clears (covering mispredicts and nukes)",
+ "MetricExpr": "INT_MISC.CLEARS_COUNT / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT)",
+ "MetricGroup": "BrMispredicts",
+ "MetricName": "tma_info_bad_spec_spec_clears_ratio"
+ },
+ {
+ "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts",
+ "MetricExpr": "(100 * (1 - tma_core_bound / tma_ports_utilization if tma_core_bound < tma_ports_utilization else 1) if tma_info_system_smt_2t_utilization > 0.5 else 0)",
+ "MetricGroup": "Cor;SMT",
+ "MetricName": "tma_info_botlnk_l0_core_bound_likely",
+ "MetricThreshold": "tma_info_botlnk_l0_core_bound_likely > 0.5"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck",
+ "MetricExpr": "100 * (tma_frontend_bound * (tma_fetch_bandwidth / (tma_fetch_bandwidth + tma_fetch_latency)) * (tma_dsb / (tma_dsb + tma_mite)))",
+ "MetricGroup": "DSB;FetchBW;tma_issueFB",
+ "MetricName": "tma_info_botlnk_l2_dsb_bandwidth",
+ "MetricThreshold": "tma_info_botlnk_l2_dsb_bandwidth > 10",
+ "PublicDescription": "Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_mite))",
+ "MetricGroup": "DSBmiss;Fed;tma_issueFB",
+ "MetricName": "tma_info_botlnk_l2_dsb_misses",
+ "MetricThreshold": "tma_info_botlnk_l2_dsb_misses > 10",
+ "PublicDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
+ "MetricGroup": "Fed;FetchLat;IcMiss;tma_issueFL",
+ "MetricName": "tma_info_botlnk_l2_ic_misses",
+ "MetricThreshold": "tma_info_botlnk_l2_ic_misses > 5",
+ "PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: "
+ },
+ {
+ "BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
+ "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)",
+ "MetricGroup": "BigFootprint;BvBC;Fed;Frontend;IcMiss;MemoryTLB",
+ "MetricName": "tma_info_bottleneck_big_code",
+ "MetricThreshold": "tma_info_bottleneck_big_code > 20"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA",
+ "MetricExpr": "100 * ((BR_INST_RETIRED.ALL_BRANCHES + 2 * BR_INST_RETIRED.NEAR_CALL + INST_RETIRED.NOP) / tma_info_thread_slots)",
+ "MetricGroup": "BvBO;Ret",
+ "MetricName": "tma_info_bottleneck_branching_overhead",
+ "MetricThreshold": "tma_info_bottleneck_branching_overhead > 5",
+ "PublicDescription": "Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA. Examples include function calls; loops and alignments. (A lower bound)"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks",
+ "MetricExpr": "100 * ( ( tma_memory_bound * ( tma_dram_bound / ( tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound ) ) * ( tma_mem_bandwidth / ( tma_mem_bandwidth + tma_mem_latency ) ) ) + ( tma_memory_bound * ( tma_l3_bound / ( tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound ) ) * ( tma_sq_full / ( tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full ) ) ) + ( tma_memory_bound * ( tma_l1_bound / ( tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound ) ) * ( tma_fb_full / ( tma_dtlb_load + tma_store_fwd_blk + tma_l1_hit_latency + tma_lock_latency + tma_split_loads + tma_fb_full ) ) ) )",
+ "MetricGroup": "BvMB;Mem;MemoryBW;Offcore;tma_issueBW",
+ "MetricName": "tma_info_bottleneck_cache_memory_bandwidth",
+ "MetricThreshold": "tma_info_bottleneck_cache_memory_bandwidth > 20",
+ "PublicDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks",
+ "MetricExpr": "100 * ( ( tma_memory_bound * ( tma_dram_bound / ( tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound ) ) * ( tma_mem_latency / ( tma_mem_bandwidth + tma_mem_latency ) ) ) + ( tma_memory_bound * ( tma_l3_bound / ( tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound ) ) * ( tma_l3_hit_latency / ( tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full ) ) ) + ( tma_memory_bound * tma_l2_bound / ( tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound ) ) + ( tma_memory_bound * ( tma_store_bound / ( tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound ) ) * ( tma_store_latency / ( tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store ) ) ) + ( tma_memory_bound * ( tma_l1_bound / ( tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound ) ) * ( tma_l1_hit_latency / ( tma_dtlb_load + tma_store_fwd_blk + tma_l1_hit_latency + tma_lock_latency + tma_split_loads + tma_fb_full ) ) ) )",
+ "MetricGroup": "BvML;Mem;MemoryLat;Offcore;tma_issueLat",
+ "MetricName": "tma_info_bottleneck_cache_memory_latency",
+ "MetricThreshold": "tma_info_bottleneck_cache_memory_latency > 20",
+ "PublicDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks. Related metrics: tma_l3_hit_latency, tma_mem_latency"
+ },
+ {
+ "BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation",
+ "MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_amx_busy + tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * tma_amx_busy / (tma_amx_busy + tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * (tma_ports_utilization / (tma_amx_busy + tma_divider + tma_ports_utilization + tma_serializing_operation)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))",
+ "MetricGroup": "BvCB;Cor;tma_issueComp",
+ "MetricName": "tma_info_bottleneck_compute_bound_est",
+ "MetricThreshold": "tma_info_bottleneck_compute_bound_est > 20",
+ "PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. Related metrics: "
+ },
+ {
+ "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end)",
+ "MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) - (1 - INST_RETIRED.REP_ITERATION / cpu@UOPS_RETIRED.MS\\,cmask\\=1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))) - tma_info_bottleneck_big_code",
+ "MetricGroup": "BvFB;Fed;FetchBW;Frontend",
+ "MetricName": "tma_info_bottleneck_instruction_fetch_bw",
+ "MetricThreshold": "tma_info_bottleneck_instruction_fetch_bw > 20"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of irregular execution (e.g",
+ "MetricExpr": "100 * ((1 - INST_RETIRED.REP_ITERATION / cpu@UOPS_RETIRED.MS\\,cmask\\=1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + cpu@RS.EMPTY\\,umask\\=1@ / tma_info_thread_clks * tma_ports_utilized_0) / (tma_amx_busy + tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
+ "MetricGroup": "Bad;BvIO;Cor;Ret;tma_issueMS",
+ "MetricName": "tma_info_bottleneck_irregular_overhead",
+ "MetricThreshold": "tma_info_bottleneck_irregular_overhead > 10",
+ "PublicDescription": "Total pipeline cost of irregular execution (e.g. FP-assists in HPC, Wait time with work imbalance multithreaded workloads, overhead in system services or virtualized environments). Related metrics: tma_microcode_sequencer, tma_ms_switches"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
+ "MetricExpr": "100 * ( tma_memory_bound * ( tma_l1_bound / max( tma_memory_bound , ( tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound ) ) ) * ( tma_dtlb_load / max( tma_l1_bound , ( tma_dtlb_load + tma_store_fwd_blk + tma_l1_hit_latency + tma_lock_latency + tma_split_loads + tma_fb_full ) ) ) + ( tma_memory_bound * ( tma_store_bound / ( tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound ) ) * ( tma_dtlb_store / ( tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store ) ) ) )",
+ "MetricGroup": "BvMT;Mem;MemoryTLB;Offcore;tma_issueTLB",
+ "MetricName": "tma_info_bottleneck_memory_data_tlbs",
+ "MetricThreshold": "tma_info_bottleneck_memory_data_tlbs > 20",
+ "PublicDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs). Related metrics: tma_dtlb_load, tma_dtlb_store, tma_info_bottleneck_memory_synchronization"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)",
+ "MetricExpr": "100 * ( tma_memory_bound * ( ( tma_dram_bound / ( tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound ) ) * ( tma_mem_latency / ( tma_mem_bandwidth + tma_mem_latency ) ) * tma_remote_cache / ( tma_local_mem + tma_remote_mem + tma_remote_cache ) + ( tma_l3_bound / ( tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound ) ) * ( tma_contested_accesses + tma_data_sharing ) / ( tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full ) + ( tma_store_bound / ( tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound ) ) * tma_false_sharing / ( ( tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store ) - tma_store_latency ) ) + tma_machine_clears * ( 1 - tma_other_nukes / ( tma_other_nukes ) ) )",
+ "MetricGroup": "BvMS;Mem;Offcore;tma_issueTLB",
+ "MetricName": "tma_info_bottleneck_memory_synchronization",
+ "MetricThreshold": "tma_info_bottleneck_memory_synchronization > 10",
+ "PublicDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors). Related metrics: tma_dtlb_load, tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks",
+ "MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts;BvMP;tma_issueBM",
+ "MetricName": "tma_info_bottleneck_mispredictions",
+ "MetricThreshold": "tma_info_bottleneck_mispredictions > 20",
+ "PublicDescription": "Total pipeline cost of Branch Misprediction related bottlenecks. Related metrics: tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost, tma_mispredicts_resteers"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of remaining bottlenecks in the back-end",
+ "MetricExpr": "100 - (tma_info_bottleneck_big_code + tma_info_bottleneck_instruction_fetch_bw + tma_info_bottleneck_mispredictions + tma_info_bottleneck_cache_memory_bandwidth + tma_info_bottleneck_cache_memory_latency + tma_info_bottleneck_memory_data_tlbs + tma_info_bottleneck_memory_synchronization + tma_info_bottleneck_compute_bound_est + tma_info_bottleneck_irregular_overhead + tma_info_bottleneck_branching_overhead + tma_info_bottleneck_useful_work)",
+ "MetricGroup": "BvOB;Cor;Offcore",
+ "MetricName": "tma_info_bottleneck_other_bottlenecks",
+ "MetricThreshold": "tma_info_bottleneck_other_bottlenecks > 20",
+ "PublicDescription": "Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls."
+ },
+ {
+ "BriefDescription": "Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead.",
+ "MetricExpr": "100 * (tma_retiring - (BR_INST_RETIRED.ALL_BRANCHES + 2 * BR_INST_RETIRED.NEAR_CALL + INST_RETIRED.NOP) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
+ "MetricGroup": "BvUW;Ret",
+ "MetricName": "tma_info_bottleneck_useful_work",
+ "MetricThreshold": "tma_info_bottleneck_useful_work > 20"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are CALL or RET",
+ "MetricExpr": "(BR_INST_RETIRED.NEAR_CALL + BR_INST_RETIRED.NEAR_RETURN) / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "tma_info_branches_callret"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are non-taken conditionals",
+ "MetricExpr": "BR_INST_RETIRED.COND_NTAKEN / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches;CodeGen;PGO",
+ "MetricName": "tma_info_branches_cond_nt"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are taken conditionals",
+ "MetricExpr": "BR_INST_RETIRED.COND_TAKEN / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches;CodeGen;PGO",
+ "MetricName": "tma_info_branches_cond_tk"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are unconditional (direct or indirect) jumps",
+ "MetricExpr": "(BR_INST_RETIRED.NEAR_TAKEN - BR_INST_RETIRED.COND_TAKEN - 2 * BR_INST_RETIRED.NEAR_CALL) / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "tma_info_branches_jump"
+ },
+ {
+ "BriefDescription": "Fraction of branches of other types (not individually covered by other metrics in Info.Branches group)",
+ "MetricExpr": "1 - (tma_info_branches_cond_nt + tma_info_branches_cond_tk + tma_info_branches_callret + tma_info_branches_jump)",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "tma_info_branches_other_branches"
+ },
+ {
+ "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
+ "MetricExpr": "(CPU_CLK_UNHALTED.DISTRIBUTED if #SMT_on else tma_info_thread_clks)",
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_core_core_clks"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle across hyper-threads (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_core_core_clks",
+ "MetricGroup": "Ret;SMT;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_core_coreipc"
+ },
+ {
+ "BriefDescription": "uops Executed per Cycle",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / tma_info_thread_clks",
+ "MetricGroup": "Power",
+ "MetricName": "tma_info_core_epc"
+ },
+ {
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + FP_ARITH_INST_RETIRED2.SCALAR_HALF + 2 * (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED2.COMPLEX_SCALAR_HALF) + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * (FP_ARITH_INST_RETIRED2.128B_PACKED_HALF + FP_ARITH_INST_RETIRED.8_FLOPS) + 16 * (FP_ARITH_INST_RETIRED2.256B_PACKED_HALF + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) + 32 * FP_ARITH_INST_RETIRED2.512B_PACKED_HALF) / tma_info_core_core_clks",
+ "MetricGroup": "Flops;Ret",
+ "MetricName": "tma_info_core_flopc"
+ },
+ {
+ "BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
+ "MetricExpr": "(FP_ARITH_DISPATCHED.PORT_0 + FP_ARITH_DISPATCHED.PORT_1 + FP_ARITH_DISPATCHED.PORT_5) / (2 * tma_info_core_core_clks)",
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "tma_info_core_fp_arith_utilization",
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
+ "MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
+ "MetricName": "tma_info_core_ilp"
+ },
+ {
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricExpr": "IDQ.DSB_UOPS / UOPS_ISSUED.ANY",
+ "MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
+ "MetricName": "tma_info_frontend_dsb_coverage",
+ "MetricThreshold": "tma_info_frontend_dsb_coverage < 0.7 & tma_info_thread_ipc / 6 > 0.35",
+ "PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_inst_mix_iptb, tma_lcp"
+ },
+ {
+ "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / cpu@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=1\\,edge@",
+ "MetricGroup": "DSBmiss",
+ "MetricName": "tma_info_frontend_dsb_switch_cost"
+ },
+ {
+ "BriefDescription": "Average number of Uops issued by front-end when it issued something",
+ "MetricExpr": "UOPS_ISSUED.ANY / cpu@UOPS_ISSUED.ANY\\,cmask\\=1@",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "tma_info_frontend_fetch_upc"
+ },
+ {
+ "BriefDescription": "Average Latency for L1 instruction cache misses",
+ "MetricExpr": "ICACHE_DATA.STALLS / cpu@ICACHE_DATA.STALLS\\,cmask\\=1\\,edge@",
+ "MetricGroup": "Fed;FetchLat;IcMiss",
+ "MetricName": "tma_info_frontend_icache_miss_latency"
+ },
+ {
+ "BriefDescription": "Instructions per non-speculative DSB miss (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FRONTEND_RETIRED.ANY_DSB_MISS",
+ "MetricGroup": "DSBmiss;Fed",
+ "MetricName": "tma_info_frontend_ipdsb_miss_ret",
+ "MetricThreshold": "tma_info_frontend_ipdsb_miss_ret < 50"
+ },
+ {
+ "BriefDescription": "Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "tma_info_inst_mix_instructions / BACLEARS.ANY",
+ "MetricGroup": "Fed",
+ "MetricName": "tma_info_frontend_ipunknown_branch"
+ },
+ {
+ "BriefDescription": "L2 cache true code cacheline misses per kilo instruction",
+ "MetricExpr": "1e3 * FRONTEND_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "IcMiss",
+ "MetricName": "tma_info_frontend_l2mpki_code"
+ },
+ {
+ "BriefDescription": "L2 cache speculative code cacheline misses per kilo instruction",
+ "MetricExpr": "1e3 * L2_RQSTS.CODE_RD_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "IcMiss",
+ "MetricName": "tma_info_frontend_l2mpki_code_all"
+ },
+ {
+ "BriefDescription": "Average number of cycles the front-end was delayed due to an Unknown Branch detection",
+ "MetricExpr": "INT_MISC.UNKNOWN_BRANCH_CYCLES / cpu@INT_MISC.UNKNOWN_BRANCH_CYCLES\\,cmask\\=1\\,edge@",
+ "MetricGroup": "Fed",
+ "MetricName": "tma_info_frontend_unknown_branch_cost",
+ "PublicDescription": "Average number of cycles the front-end was delayed due to an Unknown Branch detection. See Unknown_Branches node."
+ },
+ {
+ "BriefDescription": "Branch instructions per taken branch.",
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "tma_info_inst_mix_bptkbranch"
+ },
+ {
+ "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "INST_RETIRED.ANY",
+ "MetricGroup": "Summary;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_inst_mix_instructions",
+ "PublicDescription": "Total number of retired Instructions. Sample with: INST_RETIRED.PREC_DIST"
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR + FP_ARITH_INST_RETIRED2.SCALAR + (FP_ARITH_INST_RETIRED.VECTOR + FP_ARITH_INST_RETIRED2.VECTOR))",
+ "MetricGroup": "Flops;InsType",
+ "MetricName": "tma_info_inst_mix_iparith",
+ "MetricThreshold": "tma_info_inst_mix_iparith < 10",
+ "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED2.128B_PACKED_HALF)",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "tma_info_inst_mix_iparith_avx128",
+ "MetricThreshold": "tma_info_inst_mix_iparith_avx128 < 10",
+ "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED2.256B_PACKED_HALF)",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "tma_info_inst_mix_iparith_avx256",
+ "MetricThreshold": "tma_info_inst_mix_iparith_avx256 < 10",
+ "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE + FP_ARITH_INST_RETIRED2.512B_PACKED_HALF)",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "tma_info_inst_mix_iparith_avx512",
+ "MetricThreshold": "tma_info_inst_mix_iparith_avx512 < 10",
+ "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "MetricGroup": "Flops;FpScalar;InsType",
+ "MetricName": "tma_info_inst_mix_iparith_scalar_dp",
+ "MetricThreshold": "tma_info_inst_mix_iparith_scalar_dp < 10",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Half-Precision instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED2.SCALAR",
+ "MetricGroup": "Flops;FpScalar;InsType;Server",
+ "MetricName": "tma_info_inst_mix_iparith_scalar_hp",
+ "MetricThreshold": "tma_info_inst_mix_iparith_scalar_hp < 10",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Half-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "MetricGroup": "Flops;FpScalar;InsType",
+ "MetricName": "tma_info_inst_mix_iparith_scalar_sp",
+ "MetricThreshold": "tma_info_inst_mix_iparith_scalar_sp < 10",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Branches;Fed;InsType",
+ "MetricName": "tma_info_inst_mix_ipbranch",
+ "MetricThreshold": "tma_info_inst_mix_ipbranch < 8"
+ },
+ {
+ "BriefDescription": "Instructions per (near) call (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "tma_info_inst_mix_ipcall",
+ "MetricThreshold": "tma_info_inst_mix_ipcall < 200"
+ },
+ {
+ "BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR + FP_ARITH_INST_RETIRED2.SCALAR_HALF + 2 * (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED2.COMPLEX_SCALAR_HALF) + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * (FP_ARITH_INST_RETIRED2.128B_PACKED_HALF + FP_ARITH_INST_RETIRED.8_FLOPS) + 16 * (FP_ARITH_INST_RETIRED2.256B_PACKED_HALF + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) + 32 * FP_ARITH_INST_RETIRED2.512B_PACKED_HALF)",
+ "MetricGroup": "Flops;InsType",
+ "MetricName": "tma_info_inst_mix_ipflop",
+ "MetricThreshold": "tma_info_inst_mix_ipflop < 10"
+ },
+ {
+ "BriefDescription": "Instructions per Load (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_LOADS",
+ "MetricGroup": "InsType",
+ "MetricName": "tma_info_inst_mix_ipload",
+ "MetricThreshold": "tma_info_inst_mix_ipload < 3"
+ },
+ {
+ "BriefDescription": "Instructions per PAUSE (lower number means higher occurrence rate)",
+ "MetricExpr": "tma_info_inst_mix_instructions / CPU_CLK_UNHALTED.PAUSE_INST",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "tma_info_inst_mix_ippause"
+ },
+ {
+ "BriefDescription": "Instructions per Store (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES",
+ "MetricGroup": "InsType",
+ "MetricName": "tma_info_inst_mix_ipstore",
+ "MetricThreshold": "tma_info_inst_mix_ipstore < 8"
+ },
+ {
+ "BriefDescription": "Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / cpu@SW_PREFETCH_ACCESS.T0\\,umask\\=0xF@",
+ "MetricGroup": "Prefetches",
+ "MetricName": "tma_info_inst_mix_ipswpf",
+ "MetricThreshold": "tma_info_inst_mix_ipswpf < 100"
+ },
+ {
+ "BriefDescription": "Instructions per taken branch",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
+ "MetricName": "tma_info_inst_mix_iptb",
+ "MetricThreshold": "tma_info_inst_mix_iptb < 13",
+ "PublicDescription": "Instructions per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_lcp"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "tma_info_memory_l1d_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_memory_core_l1d_cache_fill_bw_2t"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "tma_info_memory_l2_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_memory_core_l2_cache_fill_bw_2t"
+ },
+ {
+ "BriefDescription": "Rate of non silent evictions from the L2 cache per Kilo instruction",
+ "MetricExpr": "1e3 * L2_LINES_OUT.NON_SILENT / tma_info_inst_mix_instructions",
+ "MetricGroup": "L2Evicts;Mem;Server",
+ "MetricName": "tma_info_memory_core_l2_evictions_nonsilent_pki"
+ },
+ {
+ "BriefDescription": "Rate of silent evictions from the L2 cache per Kilo instruction where the evicted lines are dropped (no writeback to L3 or memory)",
+ "MetricExpr": "1e3 * L2_LINES_OUT.SILENT / tma_info_inst_mix_instructions",
+ "MetricGroup": "L2Evicts;Mem;Server",
+ "MetricName": "tma_info_memory_core_l2_evictions_silent_pki"
+ },
+ {
+ "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "tma_info_memory_l3_cache_access_bw",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "tma_info_memory_core_l3_cache_access_bw_2t"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "tma_info_memory_l3_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_memory_core_l3_cache_fill_bw_2t"
+ },
+ {
+ "BriefDescription": "Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)",
+ "MetricExpr": "1e3 * MEM_LOAD_RETIRED.FB_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "CacheHits;Mem",
+ "MetricName": "tma_info_memory_fb_hpki"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_memory_l1d_cache_fill_bw"
+ },
+ {
+ "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheHits;Mem",
+ "MetricName": "tma_info_memory_l1mpki"
+ },
+ {
+ "BriefDescription": "L1 cache true misses per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.ALL_DEMAND_DATA_RD / INST_RETIRED.ANY",
+ "MetricGroup": "CacheHits;Mem",
+ "MetricName": "tma_info_memory_l1mpki_load"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_memory_l2_cache_fill_bw"
+ },
+ {
+ "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
+ "MetricExpr": "1e3 * (L2_RQSTS.REFERENCES - L2_RQSTS.MISS) / INST_RETIRED.ANY",
+ "MetricGroup": "CacheHits;Mem",
+ "MetricName": "tma_info_memory_l2hpki_all"
+ },
+ {
+ "BriefDescription": "L2 cache hits per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "CacheHits;Mem",
+ "MetricName": "tma_info_memory_l2hpki_load"
+ },
+ {
+ "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Backend;CacheHits;Mem",
+ "MetricName": "tma_info_memory_l2mpki"
+ },
+ {
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheHits;Mem;Offcore",
+ "MetricName": "tma_info_memory_l2mpki_all"
+ },
+ {
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheHits;Mem",
+ "MetricName": "tma_info_memory_l2mpki_load"
+ },
+ {
+ "BriefDescription": "Offcore requests (L2 cache miss) per kilo instruction for demand RFOs",
+ "MetricExpr": "1e3 * L2_RQSTS.RFO_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Offcore",
+ "MetricName": "tma_info_memory_l2mpki_rfo"
+ },
+ {
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "tma_info_memory_l3_cache_access_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_memory_l3_cache_fill_bw"
+ },
+ {
+ "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Mem",
+ "MetricName": "tma_info_memory_l3mpki"
+ },
+ {
+ "BriefDescription": "Average Parallel L2 cache miss data reads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "MetricGroup": "Memory_BW;Offcore",
+ "MetricName": "tma_info_memory_latency_data_l2_mlp"
+ },
+ {
+ "BriefDescription": "Average Latency for L2 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "MetricGroup": "Memory_Lat;Offcore",
+ "MetricName": "tma_info_memory_latency_load_l2_miss_latency"
+ },
+ {
+ "BriefDescription": "Average Parallel L2 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=1@",
+ "MetricGroup": "Memory_BW;Offcore",
+ "MetricName": "tma_info_memory_latency_load_l2_mlp"
+ },
+ {
+ "BriefDescription": "Average Latency for L3 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD / OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
+ "MetricGroup": "Memory_Lat;Offcore",
+ "MetricName": "tma_info_memory_latency_load_l3_miss_latency"
+ },
+ {
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / MEM_LOAD_COMPLETED.L1_MISS_ANY",
+ "MetricGroup": "Mem;MemoryBound;MemoryLat",
+ "MetricName": "tma_info_memory_load_miss_real_latency"
+ },
+ {
+ "BriefDescription": "\"Bus lock\" per kilo instruction",
+ "MetricExpr": "1e3 * SQ_MISC.BUS_LOCK / INST_RETIRED.ANY",
+ "MetricGroup": "Mem",
+ "MetricName": "tma_info_memory_mix_bus_lock_pki"
+ },
+ {
+ "BriefDescription": "Off-core accesses per kilo instruction for modified write requests",
+ "MetricExpr": "1e3 * OCR.MODIFIED_WRITE.ANY_RESPONSE / tma_info_inst_mix_instructions",
+ "MetricGroup": "Offcore",
+ "MetricName": "tma_info_memory_mix_offcore_mwrite_any_pki"
+ },
+ {
+ "BriefDescription": "Off-core accesses per kilo instruction for reads-to-core requests (speculative; including in-core HW prefetches)",
+ "MetricExpr": "1e3 * OCR.READS_TO_CORE.ANY_RESPONSE / tma_info_inst_mix_instructions",
+ "MetricGroup": "CacheHits;Offcore",
+ "MetricName": "tma_info_memory_mix_offcore_read_any_pki"
+ },
+ {
+ "BriefDescription": "L3 cache misses per kilo instruction for reads-to-core requests (speculative; including in-core HW prefetches)",
+ "MetricExpr": "1e3 * OCR.READS_TO_CORE.L3_MISS / tma_info_inst_mix_instructions",
+ "MetricGroup": "Offcore",
+ "MetricName": "tma_info_memory_mix_offcore_read_l3m_pki"
+ },
+ {
+ "BriefDescription": "Un-cacheable retired load per kilo instruction",
+ "MetricExpr": "1e3 * MEM_LOAD_MISC_RETIRED.UC / INST_RETIRED.ANY",
+ "MetricGroup": "Mem",
+ "MetricName": "tma_info_memory_mix_uc_load_pki"
+ },
+ {
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "MetricGroup": "Mem;MemoryBW;MemoryBound",
+ "MetricName": "tma_info_memory_mlp",
+ "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)"
+ },
+ {
+ "BriefDescription": "Average DRAM BW for Reads-to-Core (R2C) covering for memory attached to local- and remote-socket",
+ "MetricExpr": "64 * OCR.READS_TO_CORE.DRAM / 1e9 / duration_time",
+ "MetricGroup": "HPC;Mem;MemoryBW;SoC",
+ "MetricName": "tma_info_memory_soc_r2c_dram_bw",
+ "PublicDescription": "Average DRAM BW for Reads-to-Core (R2C) covering for memory attached to local- and remote-socket. See R2C_Offcore_BW."
+ },
+ {
+ "BriefDescription": "Average L3-cache miss BW for Reads-to-Core (R2C)",
+ "MetricExpr": "64 * OCR.READS_TO_CORE.L3_MISS / 1e9 / duration_time",
+ "MetricGroup": "HPC;Mem;MemoryBW;SoC",
+ "MetricName": "tma_info_memory_soc_r2c_l3m_bw",
+ "PublicDescription": "Average L3-cache miss BW for Reads-to-Core (R2C). This covering going to DRAM or other memory off-chip memory tears. See R2C_Offcore_BW."
+ },
+ {
+ "BriefDescription": "Average Off-core access BW for Reads-to-Core (R2C)",
+ "MetricExpr": "64 * OCR.READS_TO_CORE.ANY_RESPONSE / 1e9 / duration_time",
+ "MetricGroup": "HPC;Mem;MemoryBW;SoC",
+ "MetricName": "tma_info_memory_soc_r2c_offcore_bw",
+ "PublicDescription": "Average Off-core access BW for Reads-to-Core (R2C). R2C account for demand or prefetch load/RFO/code access that fill data into the Core caches."
+ },
+ {
+ "BriefDescription": "STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
+ "MetricExpr": "1e3 * ITLB_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "Fed;MemoryTLB",
+ "MetricName": "tma_info_memory_tlb_code_stlb_mpki"
+ },
+ {
+ "BriefDescription": "STLB (2nd level TLB) data load speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
+ "MetricExpr": "1e3 * DTLB_LOAD_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "tma_info_memory_tlb_load_stlb_mpki"
+ },
+ {
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "(ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING) / (4 * tma_info_core_core_clks)",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "tma_info_memory_tlb_page_walks_utilization",
+ "MetricThreshold": "tma_info_memory_tlb_page_walks_utilization > 0.5"
+ },
+ {
+ "BriefDescription": "STLB (2nd level TLB) data store speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
+ "MetricExpr": "1e3 * DTLB_STORE_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "tma_info_memory_tlb_store_stlb_mpki"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per core",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@)",
+ "MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
+ "MetricName": "tma_info_pipeline_execute"
+ },
+ {
+ "BriefDescription": "Average number of uops fetched from DSB per cycle",
+ "MetricExpr": "IDQ.DSB_UOPS / IDQ.DSB_CYCLES_ANY",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "tma_info_pipeline_fetch_dsb"
+ },
+ {
+ "BriefDescription": "Average number of uops fetched from MITE per cycle",
+ "MetricExpr": "IDQ.MITE_UOPS / IDQ.MITE_CYCLES_ANY",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "tma_info_pipeline_fetch_mite"
+ },
+ {
+ "BriefDescription": "Instructions per a microcode Assist invocation",
+ "MetricExpr": "INST_RETIRED.ANY / ASSISTS.ANY",
+ "MetricGroup": "MicroSeq;Pipeline;Ret;Retire",
+ "MetricName": "tma_info_pipeline_ipassist",
+ "MetricThreshold": "tma_info_pipeline_ipassist < 100e3",
+ "PublicDescription": "Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)"
+ },
+ {
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "tma_retiring * tma_info_thread_slots / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
+ "MetricGroup": "Pipeline;Ret",
+ "MetricName": "tma_info_pipeline_retire"
+ },
+ {
+ "BriefDescription": "Estimated fraction of retirement-cycles dealing with repeat instructions",
+ "MetricExpr": "INST_RETIRED.REP_ITERATION / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
+ "MetricGroup": "MicroSeq;Pipeline;Ret",
+ "MetricName": "tma_info_pipeline_strings_cycles",
+ "MetricThreshold": "tma_info_pipeline_strings_cycles > 0.1"
+ },
+ {
+ "BriefDescription": "Fraction of cycles the processor is waiting yet unhalted; covering legacy PAUSE instruction, as well as C0.1 / C0.2 power-performance optimized states",
+ "MetricExpr": "CPU_CLK_UNHALTED.C0_WAIT / tma_info_thread_clks",
+ "MetricGroup": "C0Wait",
+ "MetricName": "tma_info_system_c0_wait",
+ "MetricThreshold": "tma_info_system_c0_wait > 0.05"
+ },
+ {
+ "BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]",
+ "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / duration_time",
+ "MetricGroup": "Power;Summary",
+ "MetricName": "tma_info_system_core_frequency"
+ },
+ {
+ "BriefDescription": "Average CPU Utilization (percentage)",
+ "MetricExpr": "tma_info_system_cpus_utilized / #num_cpus_online",
+ "MetricGroup": "HPC;Summary",
+ "MetricName": "tma_info_system_cpu_utilization"
+ },
+ {
+ "BriefDescription": "Average number of utilized CPUs",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricGroup": "Summary",
+ "MetricName": "tma_info_system_cpus_utilized"
+ },
+ {
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricExpr": "64 * (UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR) / 1e9 / duration_time",
+ "MetricGroup": "HPC;MemOffcore;MemoryBW;SoC;tma_issueBW",
+ "MetricName": "tma_info_system_dram_bw_use",
+ "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_mem_bandwidth, tma_sq_full"
+ },
+ {
+ "BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + FP_ARITH_INST_RETIRED2.SCALAR_HALF + 2 * (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED2.COMPLEX_SCALAR_HALF) + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * (FP_ARITH_INST_RETIRED2.128B_PACKED_HALF + FP_ARITH_INST_RETIRED.8_FLOPS) + 16 * (FP_ARITH_INST_RETIRED2.256B_PACKED_HALF + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) + 32 * FP_ARITH_INST_RETIRED2.512B_PACKED_HALF) / 1e9 / duration_time",
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "tma_info_system_gflops",
+ "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width"
+ },
+ {
+ "BriefDescription": "Average IO (network or disk) Bandwidth Use for Reads [GB / sec]",
+ "MetricExpr": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR * 64 / 1e9 / duration_time",
+ "MetricGroup": "IoBW;MemOffcore;Server;SoC",
+ "MetricName": "tma_info_system_io_read_bw",
+ "PublicDescription": "Average IO (network or disk) Bandwidth Use for Reads [GB / sec]. Bandwidth of IO reads that are initiated by end device controllers that are requesting memory from the CPU"
+ },
+ {
+ "BriefDescription": "Average IO (network or disk) Bandwidth Use for Writes [GB / sec]",
+ "MetricExpr": "(UNC_CHA_TOR_INSERTS.IO_ITOM + UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR) * 64 / 1e9 / duration_time",
+ "MetricGroup": "IoBW;MemOffcore;Server;SoC",
+ "MetricName": "tma_info_system_io_write_bw",
+ "PublicDescription": "Average IO (network or disk) Bandwidth Use for Writes [GB / sec]. Bandwidth of IO writes that are initiated by end device controllers that are writing memory to the CPU"
+ },
+ {
+ "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
+ "MetricGroup": "Branches;OS",
+ "MetricName": "tma_info_system_ipfarbranch",
+ "MetricThreshold": "tma_info_system_ipfarbranch < 1e6"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
+ "MetricGroup": "OS",
+ "MetricName": "tma_info_system_kernel_cpi"
+ },
+ {
+ "BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "OS",
+ "MetricName": "tma_info_system_kernel_utilization",
+ "MetricThreshold": "tma_info_system_kernel_utilization > 0.05"
+ },
+ {
+ "BriefDescription": "Average latency of data read request to external DRAM memory [in nanoseconds]",
+ "MetricExpr": "1e9 * (UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_DDR / UNC_CHA_TOR_INSERTS.IA_MISS_DRD_DDR) / uncore_cha_0@event\\=0x1@",
+ "MetricGroup": "MemOffcore;MemoryLat;Server;SoC",
+ "MetricName": "tma_info_system_mem_dram_read_latency",
+ "PublicDescription": "Average latency of data read request to external DRAM memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches"
+ },
+ {
+ "BriefDescription": "Average number of parallel data read requests to external memory",
+ "MetricExpr": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD / UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD@thresh\\=1@",
+ "MetricGroup": "Mem;MemoryBW;SoC",
+ "MetricName": "tma_info_system_mem_parallel_reads",
+ "PublicDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches"
+ },
+ {
+ "BriefDescription": "Average latency of data read request to external 3D X-Point memory [in nanoseconds]",
+ "MetricExpr": "(1e9 * (UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PMM / UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PMM) / uncore_cha_0@event\\=0x1@ if #has_pmem > 0 else 0)",
+ "MetricGroup": "MemOffcore;MemoryLat;Server;SoC",
+ "MetricName": "tma_info_system_mem_pmm_read_latency",
+ "PublicDescription": "Average latency of data read request to external 3D X-Point memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches"
+ },
+ {
+ "BriefDescription": "Average latency of data read request to external memory (in nanoseconds)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "1e9 * (UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD / UNC_CHA_TOR_INSERTS.IA_MISS_DRD) / (tma_info_system_socket_clks / duration_time)",
+ "MetricGroup": "Mem;MemoryLat;SoC",
+ "MetricName": "tma_info_system_mem_read_latency",
+ "PublicDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches. ([RKL+]memory-controller only)"
+ },
+ {
+ "BriefDescription": "Average 3DXP Memory Bandwidth Use for reads [GB / sec]",
+ "MetricExpr": "(64 * UNC_M_PMM_RPQ_INSERTS / 1e9 / duration_time if #has_pmem > 0 else 0)",
+ "MetricGroup": "MemOffcore;MemoryBW;Server;SoC",
+ "MetricName": "tma_info_system_pmm_read_bw"
+ },
+ {
+ "BriefDescription": "Average 3DXP Memory Bandwidth Use for Writes [GB / sec]",
+ "MetricExpr": "(64 * UNC_M_PMM_WPQ_INSERTS / 1e9 / duration_time if #has_pmem > 0 else 0)",
+ "MetricGroup": "MemOffcore;MemoryBW;Server;SoC",
+ "MetricName": "tma_info_system_pmm_write_bw"
+ },
+ {
+ "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
+ "MetricExpr": "(1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_DISTRIBUTED if #SMT_on else 0)",
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_system_smt_2t_utilization"
+ },
+ {
+ "BriefDescription": "Socket actual clocks when any core is active on that socket",
+ "MetricExpr": "uncore_cha_0@event\\=0x1@",
+ "MetricGroup": "SoC",
+ "MetricName": "tma_info_system_socket_clks"
+ },
+ {
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "tma_info_thread_clks / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": "Power",
+ "MetricName": "tma_info_system_turbo_utilization"
+ },
+ {
+ "BriefDescription": "Measured Average Uncore Frequency for the SoC [GHz]",
+ "MetricExpr": "tma_info_system_socket_clks / 1e9 / duration_time",
+ "MetricGroup": "SoC",
+ "MetricName": "tma_info_system_uncore_frequency"
+ },
+ {
+ "BriefDescription": "Cross-socket Ultra Path Interconnect (UPI) data transmit bandwidth for data only [MB / sec]",
+ "MetricExpr": "UNC_UPI_TxL_FLITS.ALL_DATA * 64 / 9 / 1e6",
+ "MetricGroup": "Server;SoC",
+ "MetricName": "tma_info_system_upi_data_transmit_bw"
+ },
+ {
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "Pipeline",
+ "MetricName": "tma_info_thread_clks"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
+ "MetricExpr": "1 / tma_info_thread_ipc",
+ "MetricGroup": "Mem;Pipeline",
+ "MetricName": "tma_info_thread_cpi"
+ },
+ {
+ "BriefDescription": "The ratio of Executed- by Issued-Uops",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / UOPS_ISSUED.ANY",
+ "MetricGroup": "Cor;Pipeline",
+ "MetricName": "tma_info_thread_execute_per_issue",
+ "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage."
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_thread_clks",
+ "MetricGroup": "Ret;Summary",
+ "MetricName": "tma_info_thread_ipc"
+ },
+ {
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)",
+ "MetricExpr": "TOPDOWN.SLOTS",
+ "MetricGroup": "TmaL1;tma_L1_group",
+ "MetricName": "tma_info_thread_slots"
+ },
+ {
+ "BriefDescription": "Fraction of Physical Core issue-slots utilized by this Logical Processor",
+ "MetricExpr": "(tma_info_thread_slots / (TOPDOWN.SLOTS / 2) if #SMT_on else 1)",
+ "MetricGroup": "SMT;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_thread_slots_utilization"
+ },
+ {
+ "BriefDescription": "Uops Per Instruction",
+ "MetricExpr": "tma_retiring * tma_info_thread_slots / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline;Ret;Retire",
+ "MetricName": "tma_info_thread_uoppi",
+ "MetricThreshold": "tma_info_thread_uoppi > 1.05"
+ },
+ {
+ "BriefDescription": "Uops per taken branch",
+ "MetricExpr": "tma_retiring * tma_info_thread_slots / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;FetchBW",
+ "MetricName": "tma_info_thread_uptb",
+ "MetricThreshold": "tma_info_thread_uptb < 9"
+ },
+ {
+ "BriefDescription": "This metric represents overall Integer (Int) select operations fraction the CPU has executed (retired)",
+ "MetricExpr": "tma_int_vector_128b + tma_int_vector_256b",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_int_operations",
+ "MetricThreshold": "tma_int_operations > 0.1 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents overall Integer (Int) select operations fraction the CPU has executed (retired). Vector/Matrix Int operations and shuffles are counted. Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents 128-bit vector Integer ADD/SUB/SAD or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired",
+ "MetricExpr": "(INT_VEC_RETIRED.ADD_128 + INT_VEC_RETIRED.VNNI_128) / (tma_retiring * tma_info_thread_slots)",
+ "MetricGroup": "Compute;IntVector;Pipeline;TopdownL4;tma_L4_group;tma_int_operations_group;tma_issue2P",
+ "MetricName": "tma_int_vector_128b",
+ "MetricThreshold": "tma_int_vector_128b > 0.1 & (tma_int_operations > 0.1 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric represents 128-bit vector Integer ADD/SUB/SAD or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents 256-bit vector Integer ADD/SUB/SAD/MUL or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired",
+ "MetricExpr": "(INT_VEC_RETIRED.ADD_256 + INT_VEC_RETIRED.MUL_256 + INT_VEC_RETIRED.VNNI_256) / (tma_retiring * tma_info_thread_slots)",
+ "MetricGroup": "Compute;IntVector;Pipeline;TopdownL4;tma_L4_group;tma_int_operations_group;tma_issue2P",
+ "MetricName": "tma_int_vector_256b",
+ "MetricThreshold": "tma_int_vector_256b > 0.1 & (tma_int_operations > 0.1 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric represents 256-bit vector Integer ADD/SUB/SAD/MUL or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
+ "MetricExpr": "ICACHE_TAG.STALLS / tma_info_thread_clks",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_itlb_misses",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache",
+ "MetricExpr": "max((EXE_ACTIVITY.BOUND_ON_LOADS - MEMORY_ACTIVITY.STALLS_L1D_MISS) / tma_info_thread_clks, 0)",
+ "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group",
+ "MetricName": "tma_l1_bound",
+ "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_RETIRED.L1_HIT_PS;MEM_LOAD_RETIRED.FB_HIT_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates fraction of cycles with demand load accesses that hit the L1 cache",
+ "MetricExpr": "min(2 * (MEM_INST_RETIRED.ALL_LOADS - MEM_LOAD_RETIRED.FB_HIT - MEM_LOAD_RETIRED.L1_MISS) * 20 / 100, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - MEMORY_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
+ "MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_l1_hit_latency",
+ "MetricThreshold": "tma_l1_hit_latency > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles with demand load accesses that hit the L1 cache. The short latency of the L1 data cache may be exposed in pointer-chasing memory access patterns as an example. Sample with: MEM_LOAD_RETIRED.L1_HIT",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
+ "MetricExpr": "(MEMORY_ACTIVITY.STALLS_L1D_MISS - MEMORY_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks",
+ "MetricGroup": "BvML;CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l2_bound",
+ "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core",
+ "MetricExpr": "(MEMORY_ACTIVITY.STALLS_L2_MISS - MEMORY_ACTIVITY.STALLS_L3_MISS) / tma_info_thread_clks",
+ "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l3_bound",
+ "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
+ "MetricExpr": "32.6 * tma_info_system_core_frequency * (MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2)) / tma_info_thread_clks",
+ "MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
+ "MetricName": "tma_l3_hit_latency",
+ "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_info_bottleneck_cache_memory_latency, tma_mem_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs)",
+ "MetricExpr": "DECODE.LCP / tma_info_thread_clks",
+ "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
+ "MetricName": "tma_lcp",
+ "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation)",
+ "DefaultMetricgroupName": "TopdownL2",
+ "MetricExpr": "max(0, tma_retiring - tma_heavy_operations)",
+ "MetricGroup": "Default;Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_light_operations",
+ "MetricThreshold": "tma_light_operations > 0.6",
+ "MetricgroupNoGroup": "TopdownL2;Default",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations",
+ "MetricExpr": "UOPS_DISPATCHED.PORT_2_3_10 / (3 * tma_info_core_core_clks)",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_load_op_utilization",
+ "MetricThreshold": "tma_load_op_utilization > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations. Sample with: UOPS_DISPATCHED.PORT_2_3_10",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the (first level) DTLB was missed by load accesses, that later on hit in second-level TLB (STLB)",
+ "MetricExpr": "tma_dtlb_load - tma_load_stlb_miss",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
+ "MetricName": "tma_load_stlb_hit",
+ "MetricThreshold": "tma_load_stlb_hit > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles where the Second-level TLB (STLB) was missed by load accesses, performing a hardware page walk",
+ "MetricExpr": "DTLB_LOAD_MISSES.WALK_ACTIVE / tma_info_thread_clks",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
+ "MetricName": "tma_load_stlb_miss",
+ "MetricThreshold": "tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory",
+ "MetricExpr": "72 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricGroup": "Server;TopdownL5;tma_L5_group;tma_mem_latency_group",
+ "MetricName": "tma_local_mem",
+ "MetricThreshold": "tma_local_mem > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory. Caching will improve the latency and increase performance. Sample with: MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations",
+ "MetricExpr": "(16 * max(0, MEM_INST_RETIRED.LOCK_LOADS - L2_RQSTS.ALL_RFO) + MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES * (10 * L2_RQSTS.RFO_HIT + min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO))) / tma_info_thread_clks",
+ "MetricGroup": "Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
+ "MetricName": "tma_lock_latency",
+ "MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS. Related metrics: tma_store_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears",
+ "DefaultMetricgroupName": "TopdownL2",
+ "MetricExpr": "max(0, tma_bad_speculation - tma_branch_mispredicts)",
+ "MetricGroup": "BadSpec;BvMS;Default;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
+ "MetricName": "tma_machine_clears",
+ "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+ "MetricgroupNoGroup": "TopdownL2;Default",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to memory bandwidth Allocation feature (RDT's memory bandwidth throttling).",
+ "MetricExpr": "INT_MISC.MBA_STALLS / tma_info_thread_clks",
+ "MetricGroup": "MemoryBW;Offcore;Server;TopdownL5;tma_L5_group;tma_mem_bandwidth_group",
+ "MetricName": "tma_mba_stalls",
+ "MetricThreshold": "tma_mba_stalls > 0.1 & (tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_thread_clks",
+ "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricName": "tma_mem_bandwidth",
+ "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_sq_full",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM)",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth",
+ "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
+ "MetricName": "tma_mem_latency",
+ "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_info_bottleneck_cache_memory_latency, tma_l3_hit_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck",
+ "DefaultMetricgroupName": "TopdownL2",
+ "MetricExpr": "topdown\\-mem\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * tma_info_thread_slots",
+ "MetricGroup": "Backend;Default;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_memory_bound",
+ "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricgroupNoGroup": "TopdownL2;Default",
+ "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to LFENCE Instructions.",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "13 * MISC2_RETIRED.LFENCE / tma_info_thread_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_serializing_operation_group",
+ "MetricName": "tma_memory_fence",
+ "MetricThreshold": "tma_memory_fence > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.",
+ "MetricExpr": "tma_light_operations * MEM_UOP_RETIRED.ANY / (tma_retiring * tma_info_thread_slots)",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_memory_operations",
+ "MetricThreshold": "tma_memory_operations > 0.1 & tma_light_operations > 0.6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit",
+ "MetricExpr": "UOPS_RETIRED.MS / tma_info_thread_slots",
+ "MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS",
+ "MetricName": "tma_microcode_sequencer",
+ "MetricThreshold": "tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: UOPS_RETIRED.MS. Related metrics: tma_clears_resteers, tma_info_bottleneck_irregular_overhead, tma_l1_bound, tma_machine_clears, tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage",
+ "MetricExpr": "tma_branch_mispredicts / tma_bad_speculation * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks",
+ "MetricGroup": "BadSpec;BrMispredicts;BvMP;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
+ "MetricName": "tma_mispredicts_resteers",
+ "MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost, tma_info_bottleneck_mispredictions",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline)",
+ "MetricExpr": "(IDQ.MITE_CYCLES_ANY - IDQ.MITE_CYCLES_OK) / tma_info_core_core_clks / 2",
+ "MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_mite",
+ "MetricThreshold": "tma_mite > 0.1 & tma_fetch_bandwidth > 0.2",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck. Sample with: FRONTEND_RETIRED.ANY_DSB_MISS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles)",
+ "MetricExpr": "160 * ASSISTS.SSE_AVX_MIX / tma_info_thread_clks",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_issueMV;tma_ports_utilized_0_group",
+ "MetricName": "tma_mixing_vectors",
+ "MetricThreshold": "tma_mixing_vectors > 0.05",
+ "PublicDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles). Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS)",
+ "MetricExpr": "3 * cpu@UOPS_RETIRED.MS\\,cmask\\=1\\,edge@ / (UOPS_RETIRED.SLOTS / UOPS_ISSUED.ANY) / tma_info_thread_clks",
+ "MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO",
+ "MetricName": "tma_ms_switches",
+ "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_clears_resteers, tma_info_bottleneck_irregular_overhead, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused",
+ "MetricExpr": "tma_light_operations * (BR_INST_RETIRED.ALL_BRANCHES - INST_RETIRED.MACRO_FUSED) / (tma_retiring * tma_info_thread_slots)",
+ "MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_non_fused_branches",
+ "MetricThreshold": "tma_non_fused_branches > 0.1 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions",
+ "MetricExpr": "tma_light_operations * INST_RETIRED.NOP / (tma_retiring * tma_info_thread_slots)",
+ "MetricGroup": "BvBO;Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
+ "MetricName": "tma_nop_instructions",
+ "MetricThreshold": "tma_nop_instructions > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes",
+ "MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_int_operations + tma_memory_operations + tma_fused_instructions + tma_non_fused_branches))",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_other_light_ops",
+ "MetricThreshold": "tma_other_light_ops > 0.3 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes. May undercount due to FMA double counting",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).",
+ "MetricExpr": "max(tma_branch_mispredicts * (1 - BR_MISP_RETIRED.ALL_BRANCHES / (INT_MISC.CLEARS_COUNT - MACHINE_CLEARS.COUNT)), 0.0001)",
+ "MetricGroup": "BrMispredicts;BvIO;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
+ "MetricName": "tma_other_mispredicts",
+ "MetricThreshold": "tma_other_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.",
+ "MetricExpr": "max(tma_machine_clears * (1 - MACHINE_CLEARS.MEMORY_ORDERING / MACHINE_CLEARS.COUNT), 0.0001)",
+ "MetricGroup": "BvIO;Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group",
+ "MetricName": "tma_other_nukes",
+ "MetricThreshold": "tma_other_nukes > 0.05 & (tma_machine_clears > 0.1 & tma_bad_speculation > 0.15)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Page Faults",
+ "MetricExpr": "99 * ASSISTS.PAGE_FAULT / tma_info_thread_slots",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_assists_group",
+ "MetricName": "tma_page_faults",
+ "MetricThreshold": "tma_page_faults > 0.05",
+ "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Page Faults. A Page Fault may apply on first application access to a memory page. Note operating system handling of page faults accounts for the majority of its cost.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch)",
+ "MetricExpr": "UOPS_DISPATCHED.PORT_0 / tma_info_core_core_clks",
+ "MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_0",
+ "MetricThreshold": "tma_port_0 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU)",
+ "MetricExpr": "UOPS_DISPATCHED.PORT_1 / tma_info_core_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_1",
+ "MetricThreshold": "tma_port_1 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU)",
+ "MetricExpr": "UOPS_DISPATCHED.PORT_6 / tma_info_core_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_6",
+ "MetricThreshold": "tma_port_6 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)",
+ "MetricExpr": "((tma_ports_utilized_0 * tma_info_thread_clks + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * cpu@EXE_ACTIVITY.2_PORTS_UTIL\\,umask\\=0xc@)) / tma_info_thread_clks if ARITH.DIV_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - EXE_ACTIVITY.BOUND_ON_LOADS else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * cpu@EXE_ACTIVITY.2_PORTS_UTIL\\,umask\\=0xc@) / tma_info_thread_clks)",
+ "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_ports_utilization",
+ "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "(EXE_ACTIVITY.EXE_BOUND_0_PORTS + max(cpu@RS.EMPTY\\,umask\\=1@ - RESOURCE_STALLS.SCOREBOARD, 0)) / tma_info_thread_clks * (CYCLE_ACTIVITY.STALLS_TOTAL - EXE_ACTIVITY.BOUND_ON_LOADS) / tma_info_thread_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_0",
+ "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "EXE_ACTIVITY.1_PORTS_UTIL / tma_info_thread_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_1",
+ "MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful. Sample with: EXE_ACTIVITY.1_PORTS_UTIL. Related metrics: tma_l1_bound",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "EXE_ACTIVITY.2_PORTS_UTIL / tma_info_thread_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_2",
+ "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Sample with: EXE_ACTIVITY.2_PORTS_UTIL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "UOPS_EXECUTED.CYCLES_GE_3 / tma_info_thread_clks",
+ "MetricGroup": "BvCB;PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_3m",
+ "MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Sample with: UOPS_EXECUTED.CYCLES_GE_3",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues",
+ "MetricExpr": "(133 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM + 133 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricGroup": "Offcore;Server;Snoop;TopdownL5;tma_L5_group;tma_issueSyncxn;tma_mem_latency_group",
+ "MetricName": "tma_remote_cache",
+ "MetricThreshold": "tma_remote_cache > 0.05 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues. This is caused often due to non-optimal NUMA allocations. #link to NUMA article. Sample with: MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM_PS;MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD_PS. Related metrics: tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_machine_clears",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory",
+ "MetricExpr": "153 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricGroup": "Server;Snoop;TopdownL5;tma_L5_group;tma_mem_latency_group",
+ "MetricName": "tma_remote_mem",
+ "MetricThreshold": "tma_remote_mem > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory. This is caused often due to non-optimal NUMA allocations. #link to NUMA article. Sample with: MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "DefaultMetricgroupName": "TopdownL1",
+ "MetricExpr": "topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * tma_info_thread_slots",
+ "MetricGroup": "BvUW;Default;TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_retiring",
+ "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+ "MetricgroupNoGroup": "TopdownL1;Default",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.SLOTS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations",
+ "MetricExpr": "RESOURCE_STALLS.SCOREBOARD / tma_info_thread_clks + tma_c02_wait",
+ "MetricGroup": "BvIO;PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO",
+ "MetricName": "tma_serializing_operation",
+ "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: RESOURCE_STALLS.SCOREBOARD. Related metrics: tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring Shuffle operations of 256-bit vector size (FP or Integer)",
+ "MetricExpr": "tma_light_operations * INT_VEC_RETIRED.SHUFFLES / (tma_retiring * tma_info_thread_slots)",
+ "MetricGroup": "HPC;Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
+ "MetricName": "tma_shuffles_256b",
+ "MetricThreshold": "tma_shuffles_256b > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring Shuffle operations of 256-bit vector size (FP or Integer). Shuffles may incur slow cross \"vector lane\" data transfers.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "CPU_CLK_UNHALTED.PAUSE / tma_info_thread_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_serializing_operation_group",
+ "MetricName": "tma_slow_pause",
+ "MetricThreshold": "tma_slow_pause > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions. Sample with: CPU_CLK_UNHALTED.PAUSE_INST",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary",
+ "MetricExpr": "tma_info_memory_load_miss_real_latency * LD_BLOCKS.NO_SR / tma_info_thread_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_split_loads",
+ "MetricThreshold": "tma_split_loads > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents rate of split store accesses",
+ "MetricExpr": "MEM_INST_RETIRED.SPLIT_STORES / tma_info_core_core_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
+ "MetricName": "tma_split_stores",
+ "MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES_PS. Related metrics: tma_port_4",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors)",
+ "MetricExpr": "(XQ.FULL_CYCLES + L1D_PEND_MISS.L2_STALLS) / tma_info_thread_clks",
+ "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
+ "MetricName": "tma_sq_full",
+ "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write",
+ "MetricExpr": "EXE_ACTIVITY.BOUND_ON_STORES / tma_info_thread_clks",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_store_bound",
+ "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores",
+ "MetricExpr": "13 * LD_BLOCKS.STORE_FORWARD / tma_info_thread_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_store_fwd_blk",
+ "MetricThreshold": "tma_store_fwd_blk > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses",
+ "MetricExpr": "(MEM_STORE_RETIRED.L2_HIT * 10 * (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) + (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_thread_clks",
+ "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
+ "MetricName": "tma_store_latency",
+ "MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations",
+ "MetricExpr": "(UOPS_DISPATCHED.PORT_4_9 + UOPS_DISPATCHED.PORT_7_8) / (4 * tma_info_core_core_clks)",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_store_op_utilization",
+ "MetricThreshold": "tma_store_op_utilization > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations. Sample with: UOPS_DISPATCHED.PORT_7_8",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the TLB was missed by store accesses, hitting in the second-level TLB (STLB)",
+ "MetricExpr": "tma_dtlb_store - tma_store_stlb_miss",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
+ "MetricName": "tma_store_stlb_hit",
+ "MetricThreshold": "tma_store_stlb_hit > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles where the STLB was missed by store accesses, performing a hardware page walk",
+ "MetricExpr": "DTLB_STORE_MISSES.WALK_ACTIVE / tma_info_core_core_clks",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
+ "MetricName": "tma_store_stlb_miss",
+ "MetricThreshold": "tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often CPU was stalled due to Streaming store memory accesses; Streaming store optimize out a read request required by RFO stores",
+ "MetricExpr": "9 * OCR.STREAMING_WR.ANY_RESPONSE / tma_info_thread_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueSmSt;tma_store_bound_group",
+ "MetricName": "tma_streaming_stores",
+ "MetricThreshold": "tma_streaming_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to Streaming store memory accesses; Streaming store optimize out a read request required by RFO stores. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should Streaming stores be a bottleneck. Sample with: OCR.STREAMING_WR.ANY_RESPONSE. Related metrics: tma_fb_full",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears",
+ "MetricExpr": "INT_MISC.UNKNOWN_BRANCH_CYCLES / tma_info_thread_clks",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
+ "MetricName": "tma_unknown_branches",
+ "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: FRONTEND_RETIRED.UNKNOWN_BRANCH",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric serves as an approximation of legacy x87 usage",
+ "MetricExpr": "tma_retiring * UOPS_EXECUTED.X87 / UOPS_EXECUTED.THREAD",
+ "MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group",
+ "MetricName": "tma_x87_use",
+ "MetricThreshold": "tma_x87_use > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of cycles in aborted transactions.",
+ "MetricExpr": "(max(cycles\\-t - cycles\\-ct, 0) / cycles if has_event(cycles\\-t) else 0)",
+ "MetricGroup": "transaction",
+ "MetricName": "tsx_aborted_cycles",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Number of cycles within a transaction divided by the number of transactions.",
+ "MetricExpr": "(cycles\\-t / tx\\-start if has_event(cycles\\-t) else 0)",
+ "MetricGroup": "transaction",
+ "MetricName": "tsx_cycles_per_transaction",
+ "ScaleUnit": "1cycles / transaction"
+ },
+ {
+ "BriefDescription": "Percentage of cycles within a transaction region.",
+ "MetricExpr": "(cycles\\-t / cycles if has_event(cycles\\-t) else 0)",
+ "MetricGroup": "transaction",
+ "MetricName": "tsx_transactional_cycles",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Uncore operating frequency in GHz",
+ "MetricExpr": "UNC_CHA_CLOCKTICKS / (source_count(UNC_CHA_CLOCKTICKS) * #num_packages) / 1e9 / duration_time",
+ "MetricName": "uncore_frequency",
+ "ScaleUnit": "1GHz"
+ },
+ {
+ "BriefDescription": "Intel(R) Ultra Path Interconnect (UPI) data receive bandwidth (MB/sec)",
+ "MetricExpr": "UNC_UPI_RxL_FLITS.ALL_DATA * 7.111111111111111 / 1e6 / duration_time",
+ "MetricName": "upi_data_receive_bw",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Intel(R) Ultra Path Interconnect (UPI) data transmit bandwidth (MB/sec)",
+ "MetricExpr": "UNC_UPI_TxL_FLITS.ALL_DATA * 7.111111111111111 / 1e6 / duration_time",
+ "MetricName": "upi_data_transmit_bw",
+ "ScaleUnit": "1MB/s"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/floating-point.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/floating-point.json
index 1bdefaf96287..bc475e163227 100644
--- a/tools/perf/pmu-events/arch/x86/emeraldrapids/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/floating-point.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "ARITH.FPDIV_ACTIVE",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xb0",
"EventName": "ARITH.FPDIV_ACTIVE",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Counts all microcode FP assists.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc1",
"EventName": "ASSISTS.FP",
"PublicDescription": "Counts all microcode Floating Point assists.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "ASSISTS.SSE_AVX_MIX",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc1",
"EventName": "ASSISTS.SSE_AVX_MIX",
"SampleAfterValue": "1000003",
@@ -24,6 +27,7 @@
},
{
"BriefDescription": "FP_ARITH_DISPATCHED.PORT_0 [This event is alias to FP_ARITH_DISPATCHED.V0]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb3",
"EventName": "FP_ARITH_DISPATCHED.PORT_0",
"SampleAfterValue": "2000003",
@@ -31,6 +35,7 @@
},
{
"BriefDescription": "FP_ARITH_DISPATCHED.PORT_1 [This event is alias to FP_ARITH_DISPATCHED.V1]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb3",
"EventName": "FP_ARITH_DISPATCHED.PORT_1",
"SampleAfterValue": "2000003",
@@ -38,6 +43,7 @@
},
{
"BriefDescription": "FP_ARITH_DISPATCHED.PORT_5 [This event is alias to FP_ARITH_DISPATCHED.V2]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb3",
"EventName": "FP_ARITH_DISPATCHED.PORT_5",
"SampleAfterValue": "2000003",
@@ -45,6 +51,7 @@
},
{
"BriefDescription": "FP_ARITH_DISPATCHED.V0 [This event is alias to FP_ARITH_DISPATCHED.PORT_0]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb3",
"EventName": "FP_ARITH_DISPATCHED.V0",
"SampleAfterValue": "2000003",
@@ -52,6 +59,7 @@
},
{
"BriefDescription": "FP_ARITH_DISPATCHED.V1 [This event is alias to FP_ARITH_DISPATCHED.PORT_1]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb3",
"EventName": "FP_ARITH_DISPATCHED.V1",
"SampleAfterValue": "2000003",
@@ -59,6 +67,7 @@
},
{
"BriefDescription": "FP_ARITH_DISPATCHED.V2 [This event is alias to FP_ARITH_DISPATCHED.PORT_5]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb3",
"EventName": "FP_ARITH_DISPATCHED.V2",
"SampleAfterValue": "2000003",
@@ -66,6 +75,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -74,6 +84,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
"PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -82,6 +93,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -90,6 +102,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
"PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -98,6 +111,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 128-bit packed single and 256-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and packed double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.4_FLOPS",
"PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision and 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point and packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -106,6 +120,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -114,6 +129,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE",
"PublicDescription": "Number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -122,6 +138,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision and 512-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RSQRT14 RCP RCP14 DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.8_FLOPS",
"PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision and 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision and double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RSQRT14 RCP RCP14 DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -130,6 +147,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired; some instructions will count twice as noted below. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 RANGE SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR",
"PublicDescription": "Number of SSE/AVX computational scalar single precision and double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -138,6 +156,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -146,6 +165,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
"PublicDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -154,6 +174,7 @@
},
{
"BriefDescription": "Number of any Vector retired FP arithmetic instructions",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.VECTOR",
"PublicDescription": "Number of any Vector retired FP arithmetic instructions. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -162,6 +183,7 @@
},
{
"BriefDescription": "FP_ARITH_INST_RETIRED2.128B_PACKED_HALF",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xcf",
"EventName": "FP_ARITH_INST_RETIRED2.128B_PACKED_HALF",
"SampleAfterValue": "100003",
@@ -169,6 +191,7 @@
},
{
"BriefDescription": "FP_ARITH_INST_RETIRED2.256B_PACKED_HALF",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xcf",
"EventName": "FP_ARITH_INST_RETIRED2.256B_PACKED_HALF",
"SampleAfterValue": "100003",
@@ -176,6 +199,7 @@
},
{
"BriefDescription": "FP_ARITH_INST_RETIRED2.512B_PACKED_HALF",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xcf",
"EventName": "FP_ARITH_INST_RETIRED2.512B_PACKED_HALF",
"SampleAfterValue": "100003",
@@ -183,6 +207,7 @@
},
{
"BriefDescription": "FP_ARITH_INST_RETIRED2.COMPLEX_SCALAR_HALF",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xcf",
"EventName": "FP_ARITH_INST_RETIRED2.COMPLEX_SCALAR_HALF",
"SampleAfterValue": "100003",
@@ -190,6 +215,7 @@
},
{
"BriefDescription": "Number of all Scalar Half-Precision FP arithmetic instructions(1) retired - regular and complex.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xcf",
"EventName": "FP_ARITH_INST_RETIRED2.SCALAR",
"PublicDescription": "FP_ARITH_INST_RETIRED2.SCALAR",
@@ -198,6 +224,7 @@
},
{
"BriefDescription": "FP_ARITH_INST_RETIRED2.SCALAR_HALF",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xcf",
"EventName": "FP_ARITH_INST_RETIRED2.SCALAR_HALF",
"SampleAfterValue": "100003",
@@ -205,6 +232,7 @@
},
{
"BriefDescription": "Number of all Vector (also called packed) Half-Precision FP arithmetic instructions(1) retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xcf",
"EventName": "FP_ARITH_INST_RETIRED2.VECTOR",
"PublicDescription": "FP_ARITH_INST_RETIRED2.VECTOR",
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/frontend.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/frontend.json
index 93d99318a623..f6e3e40a3b20 100644
--- a/tools/perf/pmu-events/arch/x86/emeraldrapids/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/frontend.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Clears due to Unknown Branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "BACLEARS.ANY",
"PublicDescription": "Number of times the front-end is resteered when it finds a branch instruction in a fetch line. This is called Unknown Branch which occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "DECODE.LCP",
"PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Cycles the Microcode Sequencer is busy.",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "DECODE.MS_BUSY",
"SampleAfterValue": "500009",
@@ -24,6 +27,7 @@
},
{
"BriefDescription": "DSB-to-MITE switch true penalty cycles.",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
"PublicDescription": "Decode Stream Buffer (DSB) is a Uop-cache that holds translations of previously fetched instructions that were decoded by the legacy x86 decode pipeline (MITE). This event counts fetch penalty cycles when a transition occurs from DSB to MITE.",
@@ -32,6 +36,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced DSB miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.ANY_DSB_MISS",
"MSRIndex": "0x3F7",
@@ -43,6 +48,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced a critical DSB miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.DSB_MISS",
"MSRIndex": "0x3F7",
@@ -54,6 +60,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced iTLB true miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.ITLB_MISS",
"MSRIndex": "0x3F7",
@@ -65,6 +72,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.L1I_MISS",
"MSRIndex": "0x3F7",
@@ -76,6 +84,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.L2_MISS",
"MSRIndex": "0x3F7",
@@ -87,6 +96,7 @@
},
{
"BriefDescription": "Retired instructions after front-end starvation of at least 1 cycle",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_1",
"MSRIndex": "0x3F7",
@@ -98,6 +108,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
"MSRIndex": "0x3F7",
@@ -109,6 +120,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
"MSRIndex": "0x3F7",
@@ -120,6 +132,7 @@
},
{
"BriefDescription": "Retired instructions after front-end starvation of at least 2 cycles",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
"MSRIndex": "0x3F7",
@@ -131,6 +144,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
"MSRIndex": "0x3F7",
@@ -142,6 +156,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
"MSRIndex": "0x3F7",
@@ -153,6 +168,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
"MSRIndex": "0x3F7",
@@ -164,6 +180,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
"MSRIndex": "0x3F7",
@@ -175,6 +192,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
"MSRIndex": "0x3F7",
@@ -186,6 +204,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
"MSRIndex": "0x3F7",
@@ -197,6 +216,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
"MSRIndex": "0x3F7",
@@ -208,6 +228,7 @@
},
{
"BriefDescription": "FRONTEND_RETIRED.MS_FLOWS",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.MS_FLOWS",
"MSRIndex": "0x3F7",
@@ -218,6 +239,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.STLB_MISS",
"MSRIndex": "0x3F7",
@@ -229,6 +251,7 @@
},
{
"BriefDescription": "FRONTEND_RETIRED.UNKNOWN_BRANCH",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.UNKNOWN_BRANCH",
"MSRIndex": "0x3F7",
@@ -239,6 +262,7 @@
},
{
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE_DATA.STALLS",
"PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The decode pipeline works at a 32 Byte granularity.",
@@ -246,7 +270,18 @@
"UMask": "0x4"
},
{
+ "BriefDescription": "ICACHE_DATA.STALL_PERIODS",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x80",
+ "EventName": "ICACHE_DATA.STALL_PERIODS",
+ "SampleAfterValue": "500009",
+ "UMask": "0x4"
+ },
+ {
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "ICACHE_TAG.STALLS",
"PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
@@ -255,6 +290,7 @@
},
{
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.DSB_CYCLES_ANY",
@@ -264,6 +300,7 @@
},
{
"BriefDescription": "Cycles DSB is delivering optimal number of Uops",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0x79",
"EventName": "IDQ.DSB_CYCLES_OK",
@@ -273,6 +310,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.DSB_UOPS",
"PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
@@ -281,6 +319,7 @@
},
{
"BriefDescription": "Cycles MITE is delivering any Uop",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MITE_CYCLES_ANY",
@@ -290,6 +329,7 @@
},
{
"BriefDescription": "Cycles MITE is delivering optimal number of Uops",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0x79",
"EventName": "IDQ.MITE_CYCLES_OK",
@@ -299,6 +339,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MITE_UOPS",
"PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
@@ -307,6 +348,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to IDQ while MS is busy",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MS_CYCLES_ANY",
@@ -316,6 +358,7 @@
},
{
"BriefDescription": "Number of switches from DSB or MITE to the MS",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x79",
@@ -326,6 +369,7 @@
},
{
"BriefDescription": "Uops delivered to IDQ while MS is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_UOPS",
"PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS).",
@@ -334,6 +378,7 @@
},
{
"BriefDescription": "Uops not delivered by IDQ when backend of the machine is not stalled [This event is alias to IDQ_UOPS_NOT_DELIVERED.CORE]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x9c",
"EventName": "IDQ_BUBBLES.CORE",
"PublicDescription": "Counts the number of uops not delivered to by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle. [This event is alias to IDQ_UOPS_NOT_DELIVERED.CORE]",
@@ -342,6 +387,7 @@
},
{
"BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled [This event is alias to IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE]",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "6",
"EventCode": "0x9c",
"EventName": "IDQ_BUBBLES.CYCLES_0_UOPS_DELIV.CORE",
@@ -351,6 +397,7 @@
},
{
"BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled [This event is alias to IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK]",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0x9c",
"EventName": "IDQ_BUBBLES.CYCLES_FE_WAS_OK",
@@ -361,6 +408,7 @@
},
{
"BriefDescription": "Uops not delivered by IDQ when backend of the machine is not stalled [This event is alias to IDQ_BUBBLES.CORE]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x9c",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
"PublicDescription": "Counts the number of uops not delivered to by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle. [This event is alias to IDQ_BUBBLES.CORE]",
@@ -369,6 +417,7 @@
},
{
"BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled [This event is alias to IDQ_BUBBLES.CYCLES_0_UOPS_DELIV.CORE]",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "6",
"EventCode": "0x9c",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
@@ -378,6 +427,7 @@
},
{
"BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled [This event is alias to IDQ_BUBBLES.CYCLES_FE_WAS_OK]",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0x9c",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/memory.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/memory.json
index 5420f529f491..2ea19539291b 100644
--- a/tools/perf/pmu-events/arch/x86/emeraldrapids/memory.json
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Number of machine clears due to memory ordering conflicts.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
"PublicDescription": "Counts the number of Machine Clears detected dye to memory ordering. Memory Ordering Machine Clears may apply when a memory read may not conform to the memory ordering rules of the x86 architecture",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0x47",
"EventName": "MEMORY_ACTIVITY.CYCLES_L1D_MISS",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "3",
"EventCode": "0x47",
"EventName": "MEMORY_ACTIVITY.STALLS_L1D_MISS",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Execution stalls while L2 cache miss demand cacheable load request is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"EventCode": "0x47",
"EventName": "MEMORY_ACTIVITY.STALLS_L2_MISS",
@@ -42,6 +47,7 @@
},
{
"BriefDescription": "Execution stalls while L3 cache miss demand cacheable load request is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "9",
"EventCode": "0x47",
"EventName": "MEMORY_ACTIVITY.STALLS_L3_MISS",
@@ -50,7 +56,21 @@
"UMask": "0x9"
},
{
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 1024 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_1024",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x400",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 1024 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "53",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
@@ -63,6 +83,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
@@ -75,6 +96,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
@@ -87,6 +109,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
@@ -99,6 +122,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
@@ -111,6 +135,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
@@ -123,6 +148,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
@@ -135,6 +161,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
@@ -147,6 +174,7 @@
},
{
"BriefDescription": "Retired memory store access operations. A PDist event for PEBS Store Latency Facility.",
+ "Counter": "0",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.STORE_SAMPLE",
@@ -157,6 +185,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the local socket's L1, L2, or L3 caches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -166,6 +195,7 @@
},
{
"BriefDescription": "Counts demand data reads that were not supplied by the local socket's L1, L2, or L3 caches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -175,6 +205,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the local socket's L1, L2, or L3 caches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -184,6 +215,7 @@
},
{
"BriefDescription": "Counts hardware prefetches to the L3 only that missed the local socket's L1, L2, and L3 caches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.HWPF_L3.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -193,6 +225,7 @@
},
{
"BriefDescription": "Counts hardware prefetches to the L3 only that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline is homed locally.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.HWPF_L3.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -202,6 +235,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -211,6 +245,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline is homed locally.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -220,6 +255,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that missed the L3 Cache and were supplied by the local socket (DRAM or PMM), whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts PMM or DRAM accesses that are controlled by the close or distant SNC Cluster. It does not count misses to the L3 which go to Local CXL Type 2 Memory or Local Non DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.L3_MISS_LOCAL_SOCKET",
"MSRIndex": "0x1a6,0x1a7",
@@ -229,6 +265,7 @@
},
{
"BriefDescription": "Counts streaming stores that missed the local socket's L1, L2, and L3 caches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.STREAMING_WR.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -238,6 +275,7 @@
},
{
"BriefDescription": "Counts streaming stores that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline is homed locally.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.STREAMING_WR.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -247,6 +285,7 @@
},
{
"BriefDescription": "Counts demand data read requests that miss the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
"SampleAfterValue": "100003",
@@ -254,6 +293,7 @@
},
{
"BriefDescription": "For every cycle, increments by the number of demand data read requests pending that are known to have missed the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD",
"PublicDescription": "For every cycle, increments by the number of demand data read requests pending that are known to have missed the L3 cache. Note that this does not capture all elapsed cycles while requests are outstanding - only cycles from when the requests were known by the requesting core to have missed the L3 cache.",
@@ -262,6 +302,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED",
"PEBS": "1",
@@ -271,6 +312,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_EVENTS",
"PublicDescription": "Counts the number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
@@ -279,6 +321,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_MEM",
"PublicDescription": "Counts the number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).",
@@ -287,6 +330,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_MEMTYPE",
"PublicDescription": "Counts the number of times an RTM execution aborted due to incompatible memory type.",
@@ -295,6 +339,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_UNFRIENDLY",
"PublicDescription": "Counts the number of times an RTM execution aborted due to HLE-unfriendly instructions.",
@@ -303,6 +348,7 @@
},
{
"BriefDescription": "Number of times an RTM execution successfully committed",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.COMMIT",
"PublicDescription": "Counts the number of times RTM commit succeeded.",
@@ -311,6 +357,7 @@
},
{
"BriefDescription": "Number of times an RTM execution started.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.START",
"PublicDescription": "Counts the number of times we entered an RTM region. Does not count nested transactions.",
@@ -319,6 +366,7 @@
},
{
"BriefDescription": "Speculatively counts the number of TSX aborts due to a data capacity limitation for transactional reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_CAPACITY_READ",
"PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional reads",
@@ -327,6 +375,7 @@
},
{
"BriefDescription": "Speculatively counts the number of TSX aborts due to a data capacity limitation for transactional writes.",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
"PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional writes.",
@@ -335,6 +384,7 @@
},
{
"BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_CONFLICT",
"PublicDescription": "Counts the number of times a TSX line had a cache conflict.",
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/metricgroups.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/metricgroups.json
new file mode 100644
index 000000000000..e1de6c2675c4
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/metricgroups.json
@@ -0,0 +1,137 @@
+{
+ "Backend": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Bad": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BadSpec": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BigFootprint": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BrMispredicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Branches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvBC": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvBO": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvCB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvFB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvIO": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvML": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMP": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMS": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMT": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvOB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvUW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "C0Wait": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "CacheHits": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "CacheMisses": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "CodeGen": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Compute": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Cor": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "DSB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "DSBmiss": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "DataSharing": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Fed": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "FetchBW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "FetchLat": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Flops": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "FpScalar": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "FpVector": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Frontend": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "HPC": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "IcMiss": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "InsType": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "IntVector": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "IoBW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "L2Evicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "LSD": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "MachineClears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Machine_Clears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Mem": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "MemOffcore": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "MemoryBW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "MemoryBound": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "MemoryLat": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "MemoryTLB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Memory_BW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Memory_Lat": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "MicroSeq": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "OS": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Offcore": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "PGO": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Pipeline": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "PortsUtil": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Power": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Prefetches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Ret": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Retire": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "SMT": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Server": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Snoop": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "SoC": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Summary": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "TmaL1": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "TmaL2": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "TmaL3mem": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "TopdownL1": "Metrics for top-down breakdown at level 1",
+ "TopdownL2": "Metrics for top-down breakdown at level 2",
+ "TopdownL3": "Metrics for top-down breakdown at level 3",
+ "TopdownL4": "Metrics for top-down breakdown at level 4",
+ "TopdownL5": "Metrics for top-down breakdown at level 5",
+ "TopdownL6": "Metrics for top-down breakdown at level 6",
+ "tma_L1_group": "Metrics for top-down breakdown at level 1",
+ "tma_L2_group": "Metrics for top-down breakdown at level 2",
+ "tma_L3_group": "Metrics for top-down breakdown at level 3",
+ "tma_L4_group": "Metrics for top-down breakdown at level 4",
+ "tma_L5_group": "Metrics for top-down breakdown at level 5",
+ "tma_L6_group": "Metrics for top-down breakdown at level 6",
+ "tma_alu_op_utilization_group": "Metrics contributing to tma_alu_op_utilization category",
+ "tma_assists_group": "Metrics contributing to tma_assists category",
+ "tma_backend_bound_group": "Metrics contributing to tma_backend_bound category",
+ "tma_bad_speculation_group": "Metrics contributing to tma_bad_speculation category",
+ "tma_branch_mispredicts_group": "Metrics contributing to tma_branch_mispredicts category",
+ "tma_branch_resteers_group": "Metrics contributing to tma_branch_resteers category",
+ "tma_core_bound_group": "Metrics contributing to tma_core_bound category",
+ "tma_dram_bound_group": "Metrics contributing to tma_dram_bound category",
+ "tma_dtlb_load_group": "Metrics contributing to tma_dtlb_load category",
+ "tma_dtlb_store_group": "Metrics contributing to tma_dtlb_store category",
+ "tma_fetch_bandwidth_group": "Metrics contributing to tma_fetch_bandwidth category",
+ "tma_fetch_latency_group": "Metrics contributing to tma_fetch_latency category",
+ "tma_fp_arith_group": "Metrics contributing to tma_fp_arith category",
+ "tma_fp_vector_group": "Metrics contributing to tma_fp_vector category",
+ "tma_frontend_bound_group": "Metrics contributing to tma_frontend_bound category",
+ "tma_heavy_operations_group": "Metrics contributing to tma_heavy_operations category",
+ "tma_int_operations_group": "Metrics contributing to tma_int_operations category",
+ "tma_issue2P": "Metrics related by the issue $issue2P",
+ "tma_issueBM": "Metrics related by the issue $issueBM",
+ "tma_issueBW": "Metrics related by the issue $issueBW",
+ "tma_issueComp": "Metrics related by the issue $issueComp",
+ "tma_issueD0": "Metrics related by the issue $issueD0",
+ "tma_issueFB": "Metrics related by the issue $issueFB",
+ "tma_issueFL": "Metrics related by the issue $issueFL",
+ "tma_issueL1": "Metrics related by the issue $issueL1",
+ "tma_issueLat": "Metrics related by the issue $issueLat",
+ "tma_issueMC": "Metrics related by the issue $issueMC",
+ "tma_issueMS": "Metrics related by the issue $issueMS",
+ "tma_issueMV": "Metrics related by the issue $issueMV",
+ "tma_issueRFO": "Metrics related by the issue $issueRFO",
+ "tma_issueSL": "Metrics related by the issue $issueSL",
+ "tma_issueSO": "Metrics related by the issue $issueSO",
+ "tma_issueSmSt": "Metrics related by the issue $issueSmSt",
+ "tma_issueSpSt": "Metrics related by the issue $issueSpSt",
+ "tma_issueSyncxn": "Metrics related by the issue $issueSyncxn",
+ "tma_issueTLB": "Metrics related by the issue $issueTLB",
+ "tma_l1_bound_group": "Metrics contributing to tma_l1_bound category",
+ "tma_l3_bound_group": "Metrics contributing to tma_l3_bound category",
+ "tma_light_operations_group": "Metrics contributing to tma_light_operations category",
+ "tma_load_op_utilization_group": "Metrics contributing to tma_load_op_utilization category",
+ "tma_machine_clears_group": "Metrics contributing to tma_machine_clears category",
+ "tma_mem_bandwidth_group": "Metrics contributing to tma_mem_bandwidth category",
+ "tma_mem_latency_group": "Metrics contributing to tma_mem_latency category",
+ "tma_memory_bound_group": "Metrics contributing to tma_memory_bound category",
+ "tma_microcode_sequencer_group": "Metrics contributing to tma_microcode_sequencer category",
+ "tma_mite_group": "Metrics contributing to tma_mite category",
+ "tma_other_light_ops_group": "Metrics contributing to tma_other_light_ops category",
+ "tma_ports_utilization_group": "Metrics contributing to tma_ports_utilization category",
+ "tma_ports_utilized_0_group": "Metrics contributing to tma_ports_utilized_0 category",
+ "tma_ports_utilized_3m_group": "Metrics contributing to tma_ports_utilized_3m category",
+ "tma_retiring_group": "Metrics contributing to tma_retiring category",
+ "tma_serializing_operation_group": "Metrics contributing to tma_serializing_operation category",
+ "tma_store_bound_group": "Metrics contributing to tma_store_bound category",
+ "tma_store_op_utilization_group": "Metrics contributing to tma_store_op_utilization category"
+}
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/other.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/other.json
index 2f375a6badcd..c424facf1b95 100644
--- a/tools/perf/pmu-events/arch/x86/emeraldrapids/other.json
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/other.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "ASSISTS.PAGE_FAULT",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc1",
"EventName": "ASSISTS.PAGE_FAULT",
"SampleAfterValue": "1000003",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Counts the cycles where the AMX (Advance Matrix Extension) unit is busy performing an operation.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb7",
"EventName": "EXE.AMX_BUSY",
"SampleAfterValue": "2000003",
@@ -15,6 +17,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -24,6 +27,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_CODE_RD.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_CODE_RD.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -42,6 +47,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_CODE_RD.SNC_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -51,6 +57,7 @@
},
{
"BriefDescription": "Counts demand data reads that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -60,6 +67,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -69,6 +77,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -78,6 +87,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by DRAM attached to another socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -87,6 +97,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.SNC_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -96,6 +107,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -105,6 +117,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_RFO.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -114,6 +127,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_RFO.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -123,6 +137,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_RFO.SNC_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -132,6 +147,7 @@
},
{
"BriefDescription": "Counts data load hardware prefetch requests to the L1 data cache that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.HWPF_L1D.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -141,6 +157,7 @@
},
{
"BriefDescription": "Counts hardware prefetches (which bring data to L2) that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.HWPF_L2.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -150,6 +167,7 @@
},
{
"BriefDescription": "Counts hardware prefetches to the L3 only that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.HWPF_L3.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -159,6 +177,7 @@
},
{
"BriefDescription": "Counts hardware prefetches to the L3 only that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline was homed in a remote socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.HWPF_L3.REMOTE",
"MSRIndex": "0x1a6,0x1a7",
@@ -168,6 +187,7 @@
},
{
"BriefDescription": "Counts writebacks of modified cachelines and streaming stores that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.MODIFIED_WRITE.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -177,6 +197,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -186,6 +207,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -195,6 +217,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -204,6 +227,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts DRAM accesses that are controlled by the close or distant SNC Cluster.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.LOCAL_SOCKET_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -213,6 +237,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches and were supplied by a remote socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.REMOTE",
"MSRIndex": "0x1a6,0x1a7",
@@ -222,6 +247,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to another socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -231,6 +257,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM or PMM attached to another socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.REMOTE_MEMORY",
"MSRIndex": "0x1a6,0x1a7",
@@ -240,6 +267,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.SNC_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -249,6 +277,7 @@
},
{
"BriefDescription": "Counts streaming stores that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -258,6 +287,7 @@
},
{
"BriefDescription": "Counts Demand RFOs, ItoM's, PREFECTHW's, Hardware RFO Prefetches to the L1/L2 and Streaming stores that likely resulted in a store to Memory (DRAM or PMM)",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.WRITE_ESTIMATE.MEMORY",
"MSRIndex": "0x1a6,0x1a7",
@@ -267,6 +297,7 @@
},
{
"BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa5",
"EventName": "RS.EMPTY",
"PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into starvation periods (e.g. branch mispredictions or i-cache misses)",
@@ -275,6 +306,7 @@
},
{
"BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xa5",
@@ -285,7 +317,16 @@
"UMask": "0x7"
},
{
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty due to a resource in the back-end",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY_RESOURCE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "This event is deprecated. Refer to new event RS.EMPTY_COUNT",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"Deprecated": "1",
"EdgeDetect": "1",
@@ -297,6 +338,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event RS.EMPTY",
+ "Counter": "0,1,2,3,4,5,6,7",
"Deprecated": "1",
"EventCode": "0xa5",
"EventName": "RS_EMPTY.CYCLES",
@@ -305,6 +347,7 @@
},
{
"BriefDescription": "Cycles the uncore cannot take further requests",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x2d",
"EventName": "XQ.FULL_CYCLES",
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/pipeline.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/pipeline.json
index e2086bedeca8..5d5811f26151 100644
--- a/tools/perf/pmu-events/arch/x86/emeraldrapids/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/pipeline.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "This event is deprecated. Refer to new event ARITH.DIV_ACTIVE",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"Deprecated": "1",
"EventCode": "0xb0",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "Cycles when divide unit is busy executing divide or square root operations.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xb0",
"EventName": "ARITH.DIV_ACTIVE",
@@ -19,6 +21,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event ARITH.FPDIV_ACTIVE",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"Deprecated": "1",
"EventCode": "0xb0",
@@ -28,6 +31,7 @@
},
{
"BriefDescription": "This event counts the cycles the integer divider is busy.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xb0",
"EventName": "ARITH.IDIV_ACTIVE",
@@ -36,6 +40,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event ARITH.IDIV_ACTIVE",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"Deprecated": "1",
"EventCode": "0xb0",
@@ -45,6 +50,7 @@
},
{
"BriefDescription": "Number of occurrences where a microcode assist is invoked by hardware.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc1",
"EventName": "ASSISTS.ANY",
"PublicDescription": "Counts the number of occurrences where a microcode assist is invoked by hardware. Examples include AD (page Access Dirty), FP and AVX related assists.",
@@ -53,6 +59,7 @@
},
{
"BriefDescription": "All branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -61,6 +68,7 @@
},
{
"BriefDescription": "Conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND",
"PEBS": "1",
@@ -70,6 +78,7 @@
},
{
"BriefDescription": "Not taken branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_NTAKEN",
"PEBS": "1",
@@ -79,6 +88,7 @@
},
{
"BriefDescription": "Taken conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_TAKEN",
"PEBS": "1",
@@ -88,6 +98,7 @@
},
{
"BriefDescription": "Far branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"PEBS": "1",
@@ -97,6 +108,7 @@
},
{
"BriefDescription": "Indirect near branch instructions retired (excluding returns)",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.INDIRECT",
"PEBS": "1",
@@ -106,6 +118,7 @@
},
{
"BriefDescription": "Direct and indirect near call instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
"PEBS": "1",
@@ -115,6 +128,7 @@
},
{
"BriefDescription": "Return instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
"PEBS": "1",
@@ -124,6 +138,7 @@
},
{
"BriefDescription": "Taken branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
"PEBS": "1",
@@ -133,6 +148,7 @@
},
{
"BriefDescription": "All mispredicted branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -141,6 +157,7 @@
},
{
"BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND",
"PEBS": "1",
@@ -150,6 +167,7 @@
},
{
"BriefDescription": "Mispredicted non-taken conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_NTAKEN",
"PEBS": "1",
@@ -159,6 +177,7 @@
},
{
"BriefDescription": "number of branch instructions retired that were mispredicted and taken.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_TAKEN",
"PEBS": "1",
@@ -168,6 +187,7 @@
},
{
"BriefDescription": "Miss-predicted near indirect branch instructions retired (excluding returns)",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT",
"PEBS": "1",
@@ -177,6 +197,7 @@
},
{
"BriefDescription": "Mispredicted indirect CALL retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT_CALL",
"PEBS": "1",
@@ -186,6 +207,7 @@
},
{
"BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
"PEBS": "1",
@@ -195,6 +217,7 @@
},
{
"BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.RET",
"PEBS": "1",
@@ -204,6 +227,7 @@
},
{
"BriefDescription": "Core clocks when the thread is in the C0.1 light-weight slower wakeup time but more power saving optimized state.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xec",
"EventName": "CPU_CLK_UNHALTED.C01",
"PublicDescription": "Counts core clocks when the thread is in the C0.1 light-weight slower wakeup time but more power saving optimized state. This state can be entered via the TPAUSE or UMWAIT instructions.",
@@ -212,6 +236,7 @@
},
{
"BriefDescription": "Core clocks when the thread is in the C0.2 light-weight faster wakeup time but less power saving optimized state.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xec",
"EventName": "CPU_CLK_UNHALTED.C02",
"PublicDescription": "Counts core clocks when the thread is in the C0.2 light-weight faster wakeup time but less power saving optimized state. This state can be entered via the TPAUSE or UMWAIT instructions.",
@@ -220,6 +245,7 @@
},
{
"BriefDescription": "Core clocks when the thread is in the C0.1 or C0.2 or running a PAUSE in C0 ACPI state.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xec",
"EventName": "CPU_CLK_UNHALTED.C0_WAIT",
"PublicDescription": "Counts core clocks when the thread is in the C0.1 or C0.2 power saving optimized states (TPAUSE or UMWAIT instructions) or running the PAUSE instruction.",
@@ -228,6 +254,7 @@
},
{
"BriefDescription": "Cycle counts are evenly distributed between active threads in the Core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xec",
"EventName": "CPU_CLK_UNHALTED.DISTRIBUTED",
"PublicDescription": "This event distributes cycle counts between active hyperthreads, i.e., those in C0. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If all other hyperthreads are inactive (or disabled or do not exist), all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
@@ -236,6 +263,7 @@
},
{
"BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
"PublicDescription": "Counts Core crystal clock cycles when current thread is unhalted and the other thread is halted.",
@@ -244,6 +272,7 @@
},
{
"BriefDescription": "CPU_CLK_UNHALTED.PAUSE",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xec",
"EventName": "CPU_CLK_UNHALTED.PAUSE",
"SampleAfterValue": "2000003",
@@ -251,6 +280,7 @@
},
{
"BriefDescription": "CPU_CLK_UNHALTED.PAUSE_INST",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xec",
@@ -260,6 +290,7 @@
},
{
"BriefDescription": "Core crystal clock cycles. Cycle counts are evenly distributed between active threads in the Core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.REF_DISTRIBUTED",
"PublicDescription": "This event distributes Core crystal clock cycle counts between active hyperthreads, i.e., those in C0 sleep-state. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If one thread is active in a core, all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
@@ -268,6 +299,7 @@
},
{
"BriefDescription": "Reference cycles when the core is not in halt state.",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
"SampleAfterValue": "2000003",
@@ -275,6 +307,7 @@
},
{
"BriefDescription": "Reference cycles when the core is not in halt state.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.REF_TSC_P",
"PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
@@ -283,6 +316,7 @@
},
{
"BriefDescription": "Core cycles when the thread is not in halt state",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events.",
"SampleAfterValue": "2000003",
@@ -290,6 +324,7 @@
},
{
"BriefDescription": "Thread cycles when thread is not in halt state",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
"PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
@@ -297,6 +332,7 @@
},
{
"BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "8",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
@@ -305,6 +341,7 @@
},
{
"BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
@@ -313,6 +350,7 @@
},
{
"BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "16",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
@@ -321,6 +359,7 @@
},
{
"BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "12",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
@@ -329,6 +368,7 @@
},
{
"BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
@@ -337,6 +377,7 @@
},
{
"BriefDescription": "Total execution stalls.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "4",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
@@ -345,6 +386,7 @@
},
{
"BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.1_PORTS_UTIL",
"PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
@@ -352,7 +394,16 @@
"UMask": "0x2"
},
{
+ "BriefDescription": "Cycles total of 2 or 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.2_3_PORTS_UTIL",
+ "SampleAfterValue": "2000003",
+ "UMask": "0xc"
+ },
+ {
"BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.2_PORTS_UTIL",
"PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
@@ -361,6 +412,7 @@
},
{
"BriefDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.3_PORTS_UTIL",
"PublicDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
@@ -369,6 +421,7 @@
},
{
"BriefDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.4_PORTS_UTIL",
"PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.",
@@ -377,6 +430,7 @@
},
{
"BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "5",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.BOUND_ON_LOADS",
@@ -385,6 +439,7 @@
},
{
"BriefDescription": "Cycles where the Store Buffer was full and no loads caused an execution stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "2",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.BOUND_ON_STORES",
@@ -394,6 +449,7 @@
},
{
"BriefDescription": "Cycles no uop executed while RS was not empty, the SB was not full and there was no outstanding load.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.EXE_BOUND_0_PORTS",
"PublicDescription": "Number of cycles total of 0 uops executed on all ports, Reservation Station (RS) was not empty, the Store Buffer (SB) was not full and there was no outstanding load.",
@@ -402,6 +458,7 @@
},
{
"BriefDescription": "Instruction decoders utilized in a cycle",
+ "Counter": "0,1,2,3",
"EventCode": "0x75",
"EventName": "INST_DECODED.DECODERS",
"PublicDescription": "Number of decoders utilized in a cycle when the MITE (legacy decode pipeline) fetches instructions.",
@@ -410,6 +467,7 @@
},
{
"BriefDescription": "Number of instructions retired. Fixed Counter - architectural event",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PEBS": "1",
"PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
@@ -418,6 +476,7 @@
},
{
"BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.ANY_P",
"PEBS": "1",
@@ -426,6 +485,7 @@
},
{
"BriefDescription": "INST_RETIRED.MACRO_FUSED",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.MACRO_FUSED",
"PEBS": "1",
@@ -434,6 +494,7 @@
},
{
"BriefDescription": "Retired NOP instructions.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.NOP",
"PEBS": "1",
@@ -443,6 +504,7 @@
},
{
"BriefDescription": "Precise instruction retired with PEBS precise-distribution",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.PREC_DIST",
"PEBS": "1",
"PublicDescription": "A version of INST_RETIRED that allows for a precise distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR++) feature to fix bias in how retired instructions get sampled. Use on Fixed Counter 0.",
@@ -451,6 +513,7 @@
},
{
"BriefDescription": "Iterations of Repeat string retired instructions.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.REP_ITERATION",
"PEBS": "1",
@@ -460,6 +523,7 @@
},
{
"BriefDescription": "Clears speculative count",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xad",
@@ -470,6 +534,7 @@
},
{
"BriefDescription": "Counts cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xad",
"EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
"PublicDescription": "Cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
@@ -478,6 +543,7 @@
},
{
"BriefDescription": "INT_MISC.MBA_STALLS",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xad",
"EventName": "INT_MISC.MBA_STALLS",
"SampleAfterValue": "1000003",
@@ -485,6 +551,7 @@
},
{
"BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xad",
"EventName": "INT_MISC.RECOVERY_CYCLES",
"PublicDescription": "Counts core cycles when the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
@@ -493,6 +560,7 @@
},
{
"BriefDescription": "Bubble cycles of BAClear (Unknown Branch).",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xad",
"EventName": "INT_MISC.UNKNOWN_BRANCH_CYCLES",
"MSRIndex": "0x3F7",
@@ -502,6 +570,7 @@
},
{
"BriefDescription": "TMA slots where uops got dropped",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xad",
"EventName": "INT_MISC.UOP_DROPPING",
"PublicDescription": "Estimated number of Top-down Microarchitecture Analysis slots that got dropped due to non front-end reasons",
@@ -510,6 +579,7 @@
},
{
"BriefDescription": "INT_VEC_RETIRED.128BIT",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe7",
"EventName": "INT_VEC_RETIRED.128BIT",
"SampleAfterValue": "1000003",
@@ -517,6 +587,7 @@
},
{
"BriefDescription": "INT_VEC_RETIRED.256BIT",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe7",
"EventName": "INT_VEC_RETIRED.256BIT",
"SampleAfterValue": "1000003",
@@ -524,6 +595,7 @@
},
{
"BriefDescription": "integer ADD, SUB, SAD 128-bit vector instructions.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe7",
"EventName": "INT_VEC_RETIRED.ADD_128",
"PublicDescription": "Number of retired integer ADD/SUB (regular or horizontal), SAD 128-bit vector instructions.",
@@ -532,6 +604,7 @@
},
{
"BriefDescription": "integer ADD, SUB, SAD 256-bit vector instructions.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe7",
"EventName": "INT_VEC_RETIRED.ADD_256",
"PublicDescription": "Number of retired integer ADD/SUB (regular or horizontal), SAD 256-bit vector instructions.",
@@ -540,6 +613,7 @@
},
{
"BriefDescription": "INT_VEC_RETIRED.MUL_256",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe7",
"EventName": "INT_VEC_RETIRED.MUL_256",
"SampleAfterValue": "1000003",
@@ -547,6 +621,7 @@
},
{
"BriefDescription": "INT_VEC_RETIRED.SHUFFLES",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe7",
"EventName": "INT_VEC_RETIRED.SHUFFLES",
"SampleAfterValue": "1000003",
@@ -554,6 +629,7 @@
},
{
"BriefDescription": "INT_VEC_RETIRED.VNNI_128",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe7",
"EventName": "INT_VEC_RETIRED.VNNI_128",
"SampleAfterValue": "1000003",
@@ -561,6 +637,7 @@
},
{
"BriefDescription": "INT_VEC_RETIRED.VNNI_256",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe7",
"EventName": "INT_VEC_RETIRED.VNNI_256",
"SampleAfterValue": "1000003",
@@ -568,6 +645,7 @@
},
{
"BriefDescription": "False dependencies in MOB due to partial compare on address.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.ADDRESS_ALIAS",
"PublicDescription": "Counts the number of times a load got blocked due to false dependencies in MOB due to partial compare on address.",
@@ -576,6 +654,7 @@
},
{
"BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.NO_SR",
"PublicDescription": "Counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
@@ -584,6 +663,7 @@
},
{
"BriefDescription": "Loads blocked due to overlapping with a preceding store that cannot be forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.STORE_FORWARD",
"PublicDescription": "Counts the number of times where store forwarding was prevented for a load operation. The most common case is a load blocked due to the address of memory access (partially) overlapping with a preceding uncompleted store. Note: See the table of not supported store forwards in the Optimization Guide.",
@@ -592,6 +672,7 @@
},
{
"BriefDescription": "Counts the number of demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.",
+ "Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "LOAD_HIT_PREFETCH.SWPF",
"PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
@@ -600,6 +681,7 @@
},
{
"BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xa8",
"EventName": "LSD.CYCLES_ACTIVE",
@@ -609,6 +691,7 @@
},
{
"BriefDescription": "Cycles optimal number of Uops delivered by the LSD, but did not come from the decoder.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "6",
"EventCode": "0xa8",
"EventName": "LSD.CYCLES_OK",
@@ -618,6 +701,7 @@
},
{
"BriefDescription": "Number of Uops delivered by the LSD.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa8",
"EventName": "LSD.UOPS",
"PublicDescription": "Counts the number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
@@ -626,6 +710,7 @@
},
{
"BriefDescription": "Number of machine clears (nukes) of any type.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xc3",
@@ -636,6 +721,7 @@
},
{
"BriefDescription": "Self-modifying code (SMC) detected.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.SMC",
"PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
@@ -644,6 +730,7 @@
},
{
"BriefDescription": "LFENCE instructions retired",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe0",
"EventName": "MISC2_RETIRED.LFENCE",
"PublicDescription": "number of LFENCE retired instructions",
@@ -652,6 +739,7 @@
},
{
"BriefDescription": "Increments whenever there is an update to the LBR array.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xcc",
"EventName": "MISC_RETIRED.LBR_INSERTS",
"PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR enable via IA32_DEBUGCTL MSR and branch type selection via MSR_LBR_SELECT.",
@@ -660,6 +748,7 @@
},
{
"BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa2",
"EventName": "RESOURCE_STALLS.SB",
"PublicDescription": "Counts allocation stall cycles caused by the store buffer (SB) being full. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
@@ -668,6 +757,7 @@
},
{
"BriefDescription": "Counts cycles where the pipeline is stalled due to serializing operations.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa2",
"EventName": "RESOURCE_STALLS.SCOREBOARD",
"SampleAfterValue": "100003",
@@ -675,6 +765,7 @@
},
{
"BriefDescription": "TMA slots where no uops were being issued due to lack of back-end resources.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa4",
"EventName": "TOPDOWN.BACKEND_BOUND_SLOTS",
"PublicDescription": "Number of slots in TMA method where no micro-operations were being issued from front-end to back-end of the machine due to lack of back-end resources.",
@@ -683,6 +774,7 @@
},
{
"BriefDescription": "TMA slots wasted due to incorrect speculations.",
+ "Counter": "0",
"EventCode": "0xa4",
"EventName": "TOPDOWN.BAD_SPEC_SLOTS",
"PublicDescription": "Number of slots of TMA method that were wasted due to incorrect speculation. It covers all types of control-flow or data-related mis-speculations.",
@@ -691,6 +783,7 @@
},
{
"BriefDescription": "TMA slots wasted due to incorrect speculation by branch mispredictions",
+ "Counter": "0",
"EventCode": "0xa4",
"EventName": "TOPDOWN.BR_MISPREDICT_SLOTS",
"PublicDescription": "Number of TMA slots that were wasted due to incorrect speculation by (any type of) branch mispredictions. This event estimates number of speculative operations that were issued but not retired as well as the out-of-order engine recovery past a branch misprediction.",
@@ -699,6 +792,7 @@
},
{
"BriefDescription": "TOPDOWN.MEMORY_BOUND_SLOTS",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa4",
"EventName": "TOPDOWN.MEMORY_BOUND_SLOTS",
"SampleAfterValue": "10000003",
@@ -706,6 +800,7 @@
},
{
"BriefDescription": "TMA slots available for an unhalted logical processor. Fixed counter - architectural event",
+ "Counter": "Fixed counter 3",
"EventName": "TOPDOWN.SLOTS",
"PublicDescription": "Number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method (TMA). The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core. Software can use this event as the denominator for the top-level metrics of the TMA method. This architectural event is counted on a designated fixed counter (Fixed Counter 3).",
"SampleAfterValue": "10000003",
@@ -713,6 +808,7 @@
},
{
"BriefDescription": "TMA slots available for an unhalted logical processor. General counter - architectural event",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa4",
"EventName": "TOPDOWN.SLOTS_P",
"PublicDescription": "Counts the number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method. The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core.",
@@ -721,6 +817,7 @@
},
{
"BriefDescription": "UOPS_DECODED.DEC0_UOPS",
+ "Counter": "0,1,2,3",
"EventCode": "0x76",
"EventName": "UOPS_DECODED.DEC0_UOPS",
"SampleAfterValue": "1000003",
@@ -728,6 +825,7 @@
},
{
"BriefDescription": "Uops executed on port 0",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb2",
"EventName": "UOPS_DISPATCHED.PORT_0",
"PublicDescription": "Number of uops dispatch to execution port 0.",
@@ -736,6 +834,7 @@
},
{
"BriefDescription": "Uops executed on port 1",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb2",
"EventName": "UOPS_DISPATCHED.PORT_1",
"PublicDescription": "Number of uops dispatch to execution port 1.",
@@ -744,6 +843,7 @@
},
{
"BriefDescription": "Uops executed on ports 2, 3 and 10",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb2",
"EventName": "UOPS_DISPATCHED.PORT_2_3_10",
"PublicDescription": "Number of uops dispatch to execution ports 2, 3 and 10",
@@ -752,6 +852,7 @@
},
{
"BriefDescription": "Uops executed on ports 4 and 9",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb2",
"EventName": "UOPS_DISPATCHED.PORT_4_9",
"PublicDescription": "Number of uops dispatch to execution ports 4 and 9",
@@ -760,6 +861,7 @@
},
{
"BriefDescription": "Uops executed on ports 5 and 11",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb2",
"EventName": "UOPS_DISPATCHED.PORT_5_11",
"PublicDescription": "Number of uops dispatch to execution ports 5 and 11",
@@ -768,6 +870,7 @@
},
{
"BriefDescription": "Uops executed on port 6",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb2",
"EventName": "UOPS_DISPATCHED.PORT_6",
"PublicDescription": "Number of uops dispatch to execution port 6.",
@@ -776,6 +879,7 @@
},
{
"BriefDescription": "Uops executed on ports 7 and 8",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb2",
"EventName": "UOPS_DISPATCHED.PORT_7_8",
"PublicDescription": "Number of uops dispatch to execution ports 7 and 8.",
@@ -784,6 +888,7 @@
},
{
"BriefDescription": "Number of uops executed on the core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE",
"PublicDescription": "Counts the number of uops executed from any thread.",
@@ -792,6 +897,7 @@
},
{
"BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
@@ -801,6 +907,7 @@
},
{
"BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "2",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
@@ -810,6 +917,7 @@
},
{
"BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "3",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
@@ -819,6 +927,7 @@
},
{
"BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "4",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
@@ -828,6 +937,7 @@
},
{
"BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_1",
@@ -837,6 +947,7 @@
},
{
"BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "2",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_2",
@@ -846,6 +957,7 @@
},
{
"BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "3",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_3",
@@ -855,6 +967,7 @@
},
{
"BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "4",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_4",
@@ -864,6 +977,7 @@
},
{
"BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.STALLS",
@@ -874,6 +988,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UOPS_EXECUTED.STALLS",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"Deprecated": "1",
"EventCode": "0xb1",
@@ -884,6 +999,7 @@
},
{
"BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.THREAD",
"SampleAfterValue": "2000003",
@@ -891,6 +1007,7 @@
},
{
"BriefDescription": "Counts the number of x87 uops dispatched.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.X87",
"PublicDescription": "Counts the number of x87 uops executed.",
@@ -899,6 +1016,7 @@
},
{
"BriefDescription": "Uops that RAT issues to RS",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xae",
"EventName": "UOPS_ISSUED.ANY",
"PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
@@ -906,7 +1024,17 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "UOPS_ISSUED.CYCLES",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xae",
+ "EventName": "UOPS_ISSUED.CYCLES",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Cycles with retired uop(s).",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.CYCLES",
@@ -916,6 +1044,7 @@
},
{
"BriefDescription": "Retired uops except the last uop of each instruction.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.HEAVY",
"PublicDescription": "Counts the number of retired micro-operations (uops) except the last uop of each instruction. An instruction that is decoded into less than two uops does not contribute to the count.",
@@ -924,6 +1053,7 @@
},
{
"BriefDescription": "UOPS_RETIRED.MS",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.MS",
"MSRIndex": "0x3F7",
@@ -933,6 +1063,7 @@
},
{
"BriefDescription": "Retirement slots used.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.SLOTS",
"PublicDescription": "Counts the retirement slots used each cycle.",
@@ -941,6 +1072,7 @@
},
{
"BriefDescription": "Cycles without actually retired uops.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.STALLS",
@@ -951,6 +1083,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UOPS_RETIRED.STALLS",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"Deprecated": "1",
"EventCode": "0xc2",
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-cache.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-cache.json
index 141dab46682e..f453202d80c2 100644
--- a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-cache.json
@@ -1,8 +1,10 @@
[
{
"BriefDescription": "CHA to iMC Bypass : Intermediate bypass Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x57",
"EventName": "UNC_CHA_BYPASS_CHA_IMC.INTERMEDIATE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA to iMC Bypass : Intermediate bypass Taken : Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not. : Filter for transactions that succeeded in taking the intermediate bypass.",
"UMask": "0x2",
@@ -10,8 +12,10 @@
},
{
"BriefDescription": "CHA to iMC Bypass : Not Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x57",
"EventName": "UNC_CHA_BYPASS_CHA_IMC.NOT_TAKEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA to iMC Bypass : Not Taken : Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not. : Filter for transactions that could not take the bypass, and issues a read to memory. Note that transactions that did not take the bypass but did not issue read to memory will not be counted.",
"UMask": "0x4",
@@ -19,8 +23,10 @@
},
{
"BriefDescription": "CHA to iMC Bypass : Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x57",
"EventName": "UNC_CHA_BYPASS_CHA_IMC.TAKEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA to iMC Bypass : Taken : Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not. : Filter for transactions that succeeded in taking the full bypass.",
"UMask": "0x1",
@@ -28,6 +34,7 @@
},
{
"BriefDescription": "CHA Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_CHA_CLOCKTICKS",
"PerPkg": "1",
@@ -36,6 +43,7 @@
},
{
"BriefDescription": "CMS Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0xc0",
"EventName": "UNC_CHA_CMS_CLOCKTICKS",
"PerPkg": "1",
@@ -43,8 +51,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued : Any Cycle with Multiple Snoops",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.ANY_GTONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Core Cross Snoops Issued : Any Cycle with Multiple Snoops : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0xf2",
@@ -52,8 +62,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued : Any Single Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.ANY_ONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Core Cross Snoops Issued : Any Single Snoop : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0xf1",
@@ -61,8 +73,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued : Multiple Core Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.CORE_GTONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Core Cross Snoops Issued : Multiple Core Requests : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x42",
@@ -70,8 +84,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued : Single Core Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.CORE_ONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Core Cross Snoops Issued : Single Core Requests : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x41",
@@ -79,8 +95,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued : Multiple Eviction",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.EVICT_GTONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Core Cross Snoops Issued : Multiple Eviction : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x82",
@@ -88,8 +106,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued : Single Eviction",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.EVICT_ONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Core Cross Snoops Issued : Single Eviction : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x81",
@@ -97,8 +117,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued : Multiple External Snoops",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.EXT_GTONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Core Cross Snoops Issued : Multiple External Snoops : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x22",
@@ -106,8 +128,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued : Single External Snoops",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.EXT_ONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Core Cross Snoops Issued : Single External Snoops : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x21",
@@ -115,8 +139,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued : Multiple Snoop Targets from Remote",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.REMOTE_GTONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Core Cross Snoops Issued : Multiple Snoop Targets from Remote : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x12",
@@ -124,8 +150,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued : Single Snoop Target from Remote",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.REMOTE_ONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Core Cross Snoops Issued : Single Snoop Target from Remote : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x11",
@@ -133,96 +161,120 @@
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6e",
"EventName": "UNC_CHA_DIRECT_GO.HA_SUPPRESS_DRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6e",
"EventName": "UNC_CHA_DIRECT_GO.HA_SUPPRESS_NO_D2C",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6e",
"EventName": "UNC_CHA_DIRECT_GO.HA_TOR_DEALLOC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6d",
"EventName": "UNC_CHA_DIRECT_GO_OPC.EXTCMP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6d",
"EventName": "UNC_CHA_DIRECT_GO_OPC.FAST_GO",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6d",
"EventName": "UNC_CHA_DIRECT_GO_OPC.FAST_GO_PULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6d",
"EventName": "UNC_CHA_DIRECT_GO_OPC.GO",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6d",
"EventName": "UNC_CHA_DIRECT_GO_OPC.GO_PULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6d",
"EventName": "UNC_CHA_DIRECT_GO_OPC.IDLE_DUE_SUPPRESS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6d",
"EventName": "UNC_CHA_DIRECT_GO_OPC.NOP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6d",
"EventName": "UNC_CHA_DIRECT_GO_OPC.PULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Multi-socket cacheline Directory state lookups; Snoop Not Needed",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_CHA_DIR_LOOKUP.NO_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts transactions that looked into the multi-socket cacheline Directory state, and therefore did not send a snoop because the Directory indicated it was not needed.",
"UMask": "0x2",
@@ -230,8 +282,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory state lookups; Snoop Needed",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_CHA_DIR_LOOKUP.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts transactions that looked into the multi-socket cacheline Directory state, and sent one or more snoops, because the Directory indicated it was needed.",
"UMask": "0x1",
@@ -239,6 +293,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory state updates; Directory Updated memory write from the HA pipe",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_CHA_DIR_UPDATE.HA",
"PerPkg": "1",
@@ -248,6 +303,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory state updates; Directory Updated memory write from TOR pipe",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_CHA_DIR_UPDATE.TOR",
"PerPkg": "1",
@@ -257,8 +313,10 @@
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "Counter": "0,1,2,3",
"EventCode": "0xba",
"EventName": "UNC_CHA_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress Blocking due to Ordering requirements : Down : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x4",
@@ -266,8 +324,10 @@
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "Counter": "0,1,2,3",
"EventCode": "0xba",
"EventName": "UNC_CHA_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress Blocking due to Ordering requirements : Up : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x1",
@@ -275,8 +335,10 @@
},
{
"BriefDescription": "Read request from a remote socket which hit in the HitMe Cache to a line In the E state",
+ "Counter": "0,1,2,3",
"EventCode": "0x5f",
"EventName": "UNC_CHA_HITME_HIT.EX_RDS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts read requests from a remote socket which hit in the HitME cache (used to cache the multi-socket Directory state) to a line in the E(Exclusive) state. This includes the following read opcodes (RdCode, RdData, RdDataMigratory, RdCur, RdInv*, Inv*).",
"UMask": "0x1",
@@ -284,80 +346,100 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache : Shared hit and op is RdInvOwn, RdInv, Inv*",
+ "Counter": "0,1,2,3",
"EventCode": "0x5f",
"EventName": "UNC_CHA_HITME_HIT.SHARED_OWNREQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache : op is WbMtoE",
+ "Counter": "0,1,2,3",
"EventCode": "0x5f",
"EventName": "UNC_CHA_HITME_HIT.WBMTOE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache : op is WbMtoI, WbPushMtoI, WbFlush, or WbMtoS",
+ "Counter": "0,1,2,3",
"EventCode": "0x5f",
"EventName": "UNC_CHA_HITME_HIT.WBMTOI_OR_S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed : op is RdCode, RdData, RdDataMigratory, RdCur, RdInvOwn, RdInv, Inv*",
+ "Counter": "0,1,2,3",
"EventCode": "0x5e",
"EventName": "UNC_CHA_HITME_LOOKUP.READ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed : op is WbMtoE, WbMtoI, WbPushMtoI, WbFlush, or WbMtoS",
+ "Counter": "0,1,2,3",
"EventCode": "0x5e",
"EventName": "UNC_CHA_HITME_LOOKUP.WRITE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Counts Number of Misses in HitMe Cache : No SF/LLC HitS/F and op is RdInvOwn",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_CHA_HITME_MISS.NOTSHARED_RDINVOWN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "Counts Number of Misses in HitMe Cache : op is RdCode, RdData, RdDataMigratory, RdCur, RdInv, Inv*",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_CHA_HITME_MISS.READ_OR_INV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "Counts Number of Misses in HitMe Cache : SF/LLC HitS/F and op is RdInvOwn",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_CHA_HITME_MISS.SHARED_RDINVOWN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "Counts the number of Allocate/Update to HitMe Cache : Deallocate HitME$ on Reads without RspFwdI*",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_CHA_HITME_UPDATE.DEALLOCATE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "Counts the number of Allocate/Update to HitMe Cache : op is RspIFwd or RspIFwdWb for a local request",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_CHA_HITME_UPDATE.DEALLOCATE_RSPFWDI_LOC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of Allocate/Update to HitMe Cache : op is RspIFwd or RspIFwdWb for a local request : Received RspFwdI* for a local request, but converted HitME$ to SF entry",
"UMask": "0x1",
@@ -365,16 +447,20 @@
},
{
"BriefDescription": "Counts the number of Allocate/Update to HitMe Cache : Update HitMe Cache on RdInvOwn even if not RspFwdI*",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_CHA_HITME_UPDATE.RDINVOWN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Counts the number of Allocate/Update to HitMe Cache : op is RspIFwd or RspIFwdWb for a remote request",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_CHA_HITME_UPDATE.RSPFWDI_REM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of Allocate/Update to HitMe Cache : op is RspIFwd or RspIFwdWb for a remote request : Updated HitME$ on RspFwdI* or local HitM/E received for a remote request",
"UMask": "0x2",
@@ -382,14 +468,17 @@
},
{
"BriefDescription": "Counts the number of Allocate/Update to HitMe Cache : Update HitMe Cache to SHARed",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_CHA_HITME_UPDATE.SHARED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Normal priority reads issued to the memory controller from the CHA",
+ "Counter": "0,1,2,3",
"EventCode": "0x59",
"EventName": "UNC_CHA_IMC_READS_COUNT.NORMAL",
"PerPkg": "1",
@@ -399,8 +488,10 @@
},
{
"BriefDescription": "HA to iMC Reads Issued : ISOCH",
+ "Counter": "0,1,2,3",
"EventCode": "0x59",
"EventName": "UNC_CHA_IMC_READS_COUNT.PRIORITY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "HA to iMC Reads Issued : ISOCH : Count of the number of reads issued to any of the memory controller channels. This can be filtered by the priority of the reads.",
"UMask": "0x2",
@@ -408,6 +499,7 @@
},
{
"BriefDescription": "CHA to iMC Full Line Writes Issued; Full Line Non-ISOCH",
+ "Counter": "0,1,2,3",
"EventCode": "0x5b",
"EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL",
"PerPkg": "1",
@@ -417,8 +509,10 @@
},
{
"BriefDescription": "CHA to iMC Full Line Writes Issued : ISOCH Full Line",
+ "Counter": "0,1,2,3",
"EventCode": "0x5b",
"EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL_PRIORITY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA to iMC Full Line Writes Issued : ISOCH Full Line : Counts the total number of full line writes issued from the HA into the memory controller.",
"UMask": "0x4",
@@ -426,8 +520,10 @@
},
{
"BriefDescription": "CHA to iMC Full Line Writes Issued : Partial Non-ISOCH",
+ "Counter": "0,1,2,3",
"EventCode": "0x5b",
"EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA to iMC Full Line Writes Issued : Partial Non-ISOCH : Counts the total number of full line writes issued from the HA into the memory controller.",
"UMask": "0x2",
@@ -435,8 +531,10 @@
},
{
"BriefDescription": "CHA to iMC Full Line Writes Issued : ISOCH Partial",
+ "Counter": "0,1,2,3",
"EventCode": "0x5b",
"EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL_PRIORITY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA to iMC Full Line Writes Issued : ISOCH Partial : Counts the total number of full line writes issued from the HA into the memory controller.",
"UMask": "0x8",
@@ -444,8 +542,10 @@
},
{
"BriefDescription": "Cache and Snoop Filter Lookups; Any Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.; Filters for any transaction originating from the IPQ or IRQ. This does not include lookups originating from the ISMQ.",
"UMask": "0x1fffff",
@@ -453,8 +553,10 @@
},
{
"BriefDescription": "Cache Lookups : All transactions from Remote Agents",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.ALL_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : All transactions from Remote Agents : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0x17e0ff",
@@ -462,16 +564,20 @@
},
{
"BriefDescription": "Cache Lookups : All Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.ANY_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : All Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Any local or remote transaction to the LLC, including prefetch.",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : CRd Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.CODE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : CRd Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote CRd transactions to the LLC. This includes CRd prefetch.",
"UMask": "0x1bd0ff",
@@ -479,24 +585,30 @@
},
{
"BriefDescription": "Cache Lookups : CRd Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.CODE_READ_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : CRd Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote CRd transactions to the LLC. This includes CRd prefetch.",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : Local non-prefetch requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.COREPREF_OR_DMND_LOCAL_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Local non-prefetch requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Any local transaction to the LLC, not including prefetch",
"Unit": "CHA"
},
{
"BriefDescription": "Cache and Snoop Filter Lookups; Data Read Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.DATA_RD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
"UMask": "0x1bc1ff",
@@ -504,8 +616,10 @@
},
{
"BriefDescription": "Cache Lookups : Data Reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Data Reads : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0x1fc1ff",
@@ -513,16 +627,20 @@
},
{
"BriefDescription": "Cache Lookups : Data Read Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Data Read Request : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Read transactions.",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : Demand Data Reads, Core and LLC prefetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Demand Data Reads, Core and LLC prefetches : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0x841ff",
@@ -530,8 +648,10 @@
},
{
"BriefDescription": "Cache Lookups : Data Read Misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Data Read Misses : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0x1fc101",
@@ -539,8 +659,10 @@
},
{
"BriefDescription": "Cache Lookups : E State",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.E",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : E State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Hit Exclusive State",
"UMask": "0x20",
@@ -548,8 +670,10 @@
},
{
"BriefDescription": "Cache Lookups : F State",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : F State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Hit Forward State",
"UMask": "0x80",
@@ -557,8 +681,10 @@
},
{
"BriefDescription": "Cache Lookups : Flush or Invalidate Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.FLUSH_INV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Flush : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing.",
"UMask": "0x1a44ff",
@@ -566,16 +692,20 @@
},
{
"BriefDescription": "Cache Lookups : Flush",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.FLUSH_OR_INV_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Flush : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing.",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : I State",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.I",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : I State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Miss",
"UMask": "0x1",
@@ -583,16 +713,20 @@
},
{
"BriefDescription": "Cache Lookups : Local LLC prefetch requests (from LLC)",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LLCPREF_LOCAL_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Local LLC prefetch requests (from LLC) : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Any local LLC prefetch to the LLC",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : Transactions homed locally",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCALLY_HOMED_ADDRESS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Transactions homed locally : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Transaction whose address resides in the local MC.",
"UMask": "0xbdfff",
@@ -600,8 +734,10 @@
},
{
"BriefDescription": "Cache Lookups : CRd Requests that come from the local socket (usually the core)",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_CODE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : CRd Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote CRd transactions to the LLC. This includes CRd prefetch.",
"UMask": "0x19d0ff",
@@ -609,8 +745,10 @@
},
{
"BriefDescription": "Cache and Snoop Filter Lookups; Data Read Request that come from the local socket (usually the core)",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_DATA_RD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
"UMask": "0x19c1ff",
@@ -618,8 +756,10 @@
},
{
"BriefDescription": "Cache Lookups : Demand CRd Requests that come from the local socket (usually the core)",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_DMND_CODE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : CRd Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote CRd transactions to the LLC. This includes CRd prefetch.",
"UMask": "0x1850ff",
@@ -627,8 +767,10 @@
},
{
"BriefDescription": "Cache and Snoop Filter Lookups; Demand Data Reads that come from the local socket (usually the core)",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_DMND_DATA_RD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
"UMask": "0x1841ff",
@@ -636,8 +778,10 @@
},
{
"BriefDescription": "Cache Lookups : Demand RFO Requests that come from the local socket (usually the core)",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_DMND_RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : RFO Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote RFO transactions to the LLC. This includes RFO prefetch.",
"UMask": "0x1848ff",
@@ -645,16 +789,20 @@
},
{
"BriefDescription": "Cache Lookups : Transactions homed locally",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Transactions homed locally : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Transaction whose address resides in the local MC.",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : Flush or Invalidate Requests that come from the local socket (usually the core)",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_FLUSH_INV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Flush : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing.",
"UMask": "0x1844ff",
@@ -662,8 +810,10 @@
},
{
"BriefDescription": "Cache and Snoop Filter Lookups; Prefetch requests to the LLC that come from the local socket (usually the core)",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_LLC_PF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
"UMask": "0x189dff",
@@ -671,8 +821,10 @@
},
{
"BriefDescription": "Cache and Snoop Filter Lookups; Data Read Prefetches that come from the local socket (usually the core)",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_PF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
"UMask": "0x199dff",
@@ -680,8 +832,10 @@
},
{
"BriefDescription": "Cache Lookups : CRd Prefetches that come from the local socket (usually the core)",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_PF_CODE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : CRd Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote CRd transactions to the LLC. This includes CRd prefetch.",
"UMask": "0x1910ff",
@@ -689,8 +843,10 @@
},
{
"BriefDescription": "Cache and Snoop Filter Lookups; Data Read Prefetches that come from the local socket (usually the core)",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_PF_DATA_RD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
"UMask": "0x1981ff",
@@ -698,8 +854,10 @@
},
{
"BriefDescription": "Cache Lookups : RFO Prefetches that come from the local socket (usually the core)",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_PF_RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : RFO Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote RFO transactions to the LLC. This includes RFO prefetch.",
"UMask": "0x1908ff",
@@ -707,8 +865,10 @@
},
{
"BriefDescription": "Cache Lookups : RFO Requests that come from the local socket (usually the core)",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : RFO Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote RFO transactions to the LLC. This includes RFO prefetch.",
"UMask": "0x19c8ff",
@@ -716,8 +876,10 @@
},
{
"BriefDescription": "Cache Lookups : M State",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.M",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : M State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Hit Modified State",
"UMask": "0x40",
@@ -725,8 +887,10 @@
},
{
"BriefDescription": "Cache Lookups : All Misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.MISS_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0x1fe001",
@@ -734,24 +898,30 @@
},
{
"BriefDescription": "Cache Lookups : Write Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.OTHER_REQ_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Write Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Writeback transactions from L2 to the LLC This includes all write transactions -- both Cacheable and UC.",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : Remote non-snoop requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.PREF_OR_DMND_REMOTE_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Remote non-snoop requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Remote non-snoop transactions to the LLC.",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : Transactions homed remotely",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.REMOTELY_HOMED_ADDRESS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Transactions homed remotely : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Transaction whose address resides in a remote MC",
"UMask": "0x15dfff",
@@ -759,8 +929,10 @@
},
{
"BriefDescription": "Cache Lookups : CRd Requests that come from a Remote socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_CODE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : CRd Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote CRd transactions to the LLC. This includes CRd prefetch.",
"UMask": "0x1a10ff",
@@ -768,8 +940,10 @@
},
{
"BriefDescription": "Cache and Snoop Filter Lookups; Data Read Requests that come from a Remote socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_DATA_RD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
"UMask": "0x1a01ff",
@@ -777,16 +951,20 @@
},
{
"BriefDescription": "Cache Lookups : Transactions homed remotely",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Transactions homed remotely : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Transaction whose address resides in a remote MC",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : Flush or Invalidate requests that come from a Remote socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_FLUSH_INV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Flush : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing.",
"UMask": "0x1a04ff",
@@ -794,8 +972,10 @@
},
{
"BriefDescription": "Cache Lookups : Filters Requests for those that write info into the cache that come from a remote socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_OTHER",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Write Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Writeback transactions from L2 to the LLC This includes all write transactions -- both Cacheable and UC.",
"UMask": "0x1a02ff",
@@ -803,8 +983,10 @@
},
{
"BriefDescription": "Cache Lookups : RFO Requests that come from a Remote socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : RFO Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote RFO transactions to the LLC. This includes RFO prefetch.",
"UMask": "0x1a08ff",
@@ -812,16 +994,20 @@
},
{
"BriefDescription": "Cache Lookups : Remote snoop requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_SNOOP_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Remote snoop requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Remote snoop transactions to the LLC.",
"Unit": "CHA"
},
{
"BriefDescription": "Cache and Snoop Filter Lookups; Snoop Requests from a Remote Socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.; Filters for any transaction originating from the IPQ or IRQ. This does not include lookups originating from the ISMQ.",
"UMask": "0x1c19ff",
@@ -829,8 +1015,10 @@
},
{
"BriefDescription": "Cache Lookups : RFO Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : RFO Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote RFO transactions to the LLC. This includes RFO prefetch.",
"UMask": "0x1bc8ff",
@@ -838,16 +1026,20 @@
},
{
"BriefDescription": "Cache Lookups : RFO Request Filter",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.RFO_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Local or remote RFO transactions to the LLC. This includes RFO prefetch.",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : Locally HOMed RFOs - Demand and Prefetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.RFO_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0x9c8ff",
@@ -855,8 +1047,10 @@
},
{
"BriefDescription": "Cache Lookups : S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.S",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : S State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Hit Shared State",
"UMask": "0x10",
@@ -864,8 +1058,10 @@
},
{
"BriefDescription": "Cache Lookups : SnoopFilter - E State",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.SF_E",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : SnoopFilter - E State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : SF Hit Exclusive State",
"UMask": "0x4",
@@ -873,8 +1069,10 @@
},
{
"BriefDescription": "Cache Lookups : SnoopFilter - H State",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.SF_H",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : SnoopFilter - H State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : SF Hit HitMe State",
"UMask": "0x8",
@@ -882,8 +1080,10 @@
},
{
"BriefDescription": "Cache Lookups : SnoopFilter - S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.SF_S",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : SnoopFilter - S State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : SF Hit Shared State",
"UMask": "0x2",
@@ -891,8 +1091,10 @@
},
{
"BriefDescription": "Cache Lookups : Writes",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.WRITE_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Requests that install or change a line in the LLC. Examples: Writebacks from Core L2's and UPI. Prefetches into the LLC.",
"UMask": "0x842ff",
@@ -900,8 +1102,10 @@
},
{
"BriefDescription": "Cache Lookups : Remote Writes",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.WRITE_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0x17c2ff",
@@ -909,8 +1113,10 @@
},
{
"BriefDescription": "Lines Victimized : Lines in E state",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.E_STATE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Lines in E state : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x2",
@@ -918,8 +1124,10 @@
},
{
"BriefDescription": "Lines Victimized : IA traffic",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.IA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : IA traffic : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x20",
@@ -927,8 +1135,10 @@
},
{
"BriefDescription": "Lines Victimized : IO traffic",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.IO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : IO traffic : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x10",
@@ -936,8 +1146,10 @@
},
{
"BriefDescription": "All LLC lines in E state that are victimized on a fill from an IO device",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.IO_E",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x12",
@@ -945,8 +1157,10 @@
},
{
"BriefDescription": "All LLC lines in F or S state that are victimized on a fill from an IO device",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.IO_FS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x1c",
@@ -954,8 +1168,10 @@
},
{
"BriefDescription": "All LLC lines in M state that are victimized on a fill from an IO device",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.IO_M",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x11",
@@ -963,8 +1179,10 @@
},
{
"BriefDescription": "All LLC lines in any state that are victimized on a fill from an IO device",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.IO_MESF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x1f",
@@ -972,8 +1190,10 @@
},
{
"BriefDescription": "Lines Victimized; Local - All Lines",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x200f",
@@ -981,8 +1201,10 @@
},
{
"BriefDescription": "Lines Victimized",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_E",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x2002",
@@ -990,8 +1212,10 @@
},
{
"BriefDescription": "Lines Victimized",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_M",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x2001",
@@ -999,16 +1223,20 @@
},
{
"BriefDescription": "Lines Victimized : Local Only",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_ONLY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Local Only : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"Unit": "CHA"
},
{
"BriefDescription": "Lines Victimized",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_S",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x2004",
@@ -1016,8 +1244,10 @@
},
{
"BriefDescription": "Lines Victimized : Lines in M state",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.M_STATE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Lines in M state : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x1",
@@ -1025,8 +1255,10 @@
},
{
"BriefDescription": "Lines Victimized; Remote - All Lines",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x800f",
@@ -1034,8 +1266,10 @@
},
{
"BriefDescription": "Lines Victimized",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_E",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x8002",
@@ -1043,8 +1277,10 @@
},
{
"BriefDescription": "Lines Victimized",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_M",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x8001",
@@ -1052,16 +1288,20 @@
},
{
"BriefDescription": "Lines Victimized : Remote Only",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_ONLY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Remote Only : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"Unit": "CHA"
},
{
"BriefDescription": "Lines Victimized",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_S",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x8004",
@@ -1069,8 +1309,10 @@
},
{
"BriefDescription": "Lines Victimized : Lines in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.S_STATE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Lines in S State : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x4",
@@ -1078,8 +1320,10 @@
},
{
"BriefDescription": "All LLC lines in E state that are victimized on a fill",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_E",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x2",
@@ -1087,8 +1331,10 @@
},
{
"BriefDescription": "All LLC lines in M state that are victimized on a fill",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_M",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x1",
@@ -1096,8 +1342,10 @@
},
{
"BriefDescription": "All LLC lines in S state that are victimized on a fill",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_S",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x4",
@@ -1105,8 +1353,10 @@
},
{
"BriefDescription": "Cbo Misc : CV0 Prefetch Miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_CHA_MISC.CV0_PREF_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cbo Misc : CV0 Prefetch Miss : Miscellaneous events in the Cbo.",
"UMask": "0x20",
@@ -1114,8 +1364,10 @@
},
{
"BriefDescription": "Cbo Misc : CV0 Prefetch Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_CHA_MISC.CV0_PREF_VIC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cbo Misc : CV0 Prefetch Victim : Miscellaneous events in the Cbo.",
"UMask": "0x10",
@@ -1123,8 +1375,10 @@
},
{
"BriefDescription": "Number of times that an RFO hit in S state.",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_CHA_MISC.RFO_HIT_S",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts when a RFO (the Read for Ownership issued before a write) request hit a cacheline in the S (Shared) state.",
"UMask": "0x8",
@@ -1132,8 +1386,10 @@
},
{
"BriefDescription": "Cbo Misc : Silent Snoop Eviction",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_CHA_MISC.RSPI_WAS_FSE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cbo Misc : Silent Snoop Eviction : Miscellaneous events in the Cbo. : Counts the number of times when a Snoop hit in FSE states and triggered a silent eviction. This is useful because this information is lost in the PRE encodings.",
"UMask": "0x1",
@@ -1141,8 +1397,10 @@
},
{
"BriefDescription": "Cbo Misc : Write Combining Aliasing",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_CHA_MISC.WC_ALIASING",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cbo Misc : Write Combining Aliasing : Miscellaneous events in the Cbo. : Counts the number of times that a USWC write (WCIL(F)) transaction hit in the LLC in M state, triggering a WBMtoI followed by the USWC write. This occurs when there is WC aliasing.",
"UMask": "0x2",
@@ -1150,8 +1408,10 @@
},
{
"BriefDescription": "OSB Snoop Broadcast : Local InvItoE",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_CHA_OSB.LOCAL_INVITOE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "OSB Snoop Broadcast : Local InvItoE : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
"UMask": "0x1",
@@ -1159,8 +1419,10 @@
},
{
"BriefDescription": "OSB Snoop Broadcast : Local Rd",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_CHA_OSB.LOCAL_READ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "OSB Snoop Broadcast : Local Rd : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
"UMask": "0x2",
@@ -1168,8 +1430,10 @@
},
{
"BriefDescription": "OSB Snoop Broadcast : Off",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_CHA_OSB.OFF_PWRHEURISTIC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "OSB Snoop Broadcast : Off : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
"UMask": "0x20",
@@ -1177,8 +1441,10 @@
},
{
"BriefDescription": "OSB Snoop Broadcast : Remote Rd",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_CHA_OSB.REMOTE_READ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "OSB Snoop Broadcast : Remote Rd : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
"UMask": "0x4",
@@ -1186,8 +1452,10 @@
},
{
"BriefDescription": "OSB Snoop Broadcast : Remote Rd InvItoE",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_CHA_OSB.REMOTE_READINVITOE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "OSB Snoop Broadcast : Remote Rd InvItoE : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
"UMask": "0x8",
@@ -1195,8 +1463,10 @@
},
{
"BriefDescription": "OSB Snoop Broadcast : RFO HitS Snoop Broadcast",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_CHA_OSB.RFO_HITS_SNP_BCAST",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "OSB Snoop Broadcast : RFO HitS Snoop Broadcast : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
"UMask": "0x10",
@@ -1204,32 +1474,40 @@
},
{
"BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.LOCAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x65",
"EventName": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.REMOTE",
+ "Counter": "0,1,2,3",
"EventCode": "0x65",
"EventName": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.SETCONFLICT",
+ "Counter": "0,1,2,3",
"EventCode": "0x65",
"EventName": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.SETCONFLICT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Memory Mode related events; Counts the number of times CHA saw a Near Memory set conflict in SF/LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x64",
"EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.LLC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Near Memory evictions due to another read to the same Near Memory set in the LLC.",
"UMask": "0x2",
@@ -1237,8 +1515,10 @@
},
{
"BriefDescription": "Memory Mode related events; Counts the number of times CHA saw a Near memory set conflict in SF/LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x64",
"EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.SF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Near Memory evictions due to another read to the same Near Memory set in the SF",
"UMask": "0x1",
@@ -1246,8 +1526,10 @@
},
{
"BriefDescription": "Memory Mode related events; Counts the number of times CHA saw a Near Memory set conflict in TOR",
+ "Counter": "0,1,2,3",
"EventCode": "0x64",
"EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.TOR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Reject in the CHA due to a pending read to the same Near Memory set in the TOR.",
"UMask": "0x4",
@@ -1255,88 +1537,110 @@
},
{
"BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.IODC",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.IODC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.MEMWR",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.MEMWR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.MEMWRNI",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.MEMWRNI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_PMM_QOS.DDR4_FAST_INSERT",
+ "Counter": "0,1,2,3",
"EventCode": "0x66",
"EventName": "UNC_CHA_PMM_QOS.DDR4_FAST_INSERT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_PMM_QOS.REJ_IRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x66",
"EventName": "UNC_CHA_PMM_QOS.REJ_IRQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_PMM_QOS.SLOWTORQ_SKIP",
+ "Counter": "0,1,2,3",
"EventCode": "0x66",
"EventName": "UNC_CHA_PMM_QOS.SLOWTORQ_SKIP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_PMM_QOS.SLOW_INSERT",
+ "Counter": "0,1,2,3",
"EventCode": "0x66",
"EventName": "UNC_CHA_PMM_QOS.SLOW_INSERT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_PMM_QOS.THROTTLE",
+ "Counter": "0,1,2,3",
"EventCode": "0x66",
"EventName": "UNC_CHA_PMM_QOS.THROTTLE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_PMM_QOS.THROTTLE_IRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x66",
"EventName": "UNC_CHA_PMM_QOS.THROTTLE_IRQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_PMM_QOS.THROTTLE_PRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x66",
"EventName": "UNC_CHA_PMM_QOS.THROTTLE_PRQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_PMM_QOS_OCCUPANCY.DDR_FAST_FIFO",
+ "Counter": "0,1,2,3",
"EventCode": "0x67",
"EventName": "UNC_CHA_PMM_QOS_OCCUPANCY.DDR_FAST_FIFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": count # of FAST TOR Request inserted to ha_tor_req_fifo",
"UMask": "0x2",
@@ -1344,16 +1648,20 @@
},
{
"BriefDescription": "Number of SLOW TOR Request inserted to ha_pmm_tor_req_fifo",
+ "Counter": "0,1,2,3",
"EventCode": "0x67",
"EventName": "UNC_CHA_PMM_QOS_OCCUPANCY.DDR_SLOW_FIFO",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty : MC0",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx READ Credits Empty : MC0 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 0 only.",
"UMask": "0x1",
@@ -1361,8 +1669,10 @@
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty : MC1",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx READ Credits Empty : MC1 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 1 only.",
"UMask": "0x2",
@@ -1370,8 +1680,10 @@
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty : MC2",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx READ Credits Empty : MC2 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 2 only.",
"UMask": "0x4",
@@ -1379,8 +1691,10 @@
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty : MC3",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx READ Credits Empty : MC3 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 3 only.",
"UMask": "0x8",
@@ -1388,8 +1702,10 @@
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty : MC4",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx READ Credits Empty : MC4 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 4 only.",
"UMask": "0x10",
@@ -1397,8 +1713,10 @@
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty : MC5",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx READ Credits Empty : MC5 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 5 only.",
"UMask": "0x20",
@@ -1406,8 +1724,10 @@
},
{
"BriefDescription": "Requests for exclusive ownership of a cache line without receiving data",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.INVITOE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the total number of requests coming from a unit on this socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.",
"UMask": "0x30",
@@ -1415,6 +1735,7 @@
},
{
"BriefDescription": "Local requests for exclusive ownership of a cache line without receiving data",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.INVITOE_LOCAL",
"PerPkg": "1",
@@ -1424,6 +1745,7 @@
},
{
"BriefDescription": "Remote requests for exclusive ownership of a cache line without receiving data",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.INVITOE_REMOTE",
"PerPkg": "1",
@@ -1433,6 +1755,7 @@
},
{
"BriefDescription": "Read requests made into the CHA",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.READS",
"PerPkg": "1",
@@ -1442,6 +1765,7 @@
},
{
"BriefDescription": "Read requests from a unit on this socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.READS_LOCAL",
"PerPkg": "1",
@@ -1451,6 +1775,7 @@
},
{
"BriefDescription": "Read requests from a remote socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.READS_REMOTE",
"PerPkg": "1",
@@ -1460,6 +1785,7 @@
},
{
"BriefDescription": "Write requests made into the CHA",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.WRITES",
"PerPkg": "1",
@@ -1469,6 +1795,7 @@
},
{
"BriefDescription": "Write Requests from a unit on this socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.WRITES_LOCAL",
"PerPkg": "1",
@@ -1478,6 +1805,7 @@
},
{
"BriefDescription": "Read and Write Requests; Writes Remote",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.WRITES_REMOTE",
"PerPkg": "1",
@@ -1487,8 +1815,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Allocations : IPQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_CHA_RxC_INSERTS.IPQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Allocations : IPQ : Counts number of allocations per cycle into the specified Ingress queue.",
"UMask": "0x4",
@@ -1496,8 +1826,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Allocations : IRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_CHA_RxC_INSERTS.IRQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Allocations : IRQ : Counts number of allocations per cycle into the specified Ingress queue.",
"UMask": "0x1",
@@ -1505,8 +1837,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Allocations : IRQ Rejected",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_CHA_RxC_INSERTS.IRQ_REJ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Allocations : IRQ Rejected : Counts number of allocations per cycle into the specified Ingress queue.",
"UMask": "0x2",
@@ -1514,8 +1848,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Allocations : PRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_CHA_RxC_INSERTS.PRQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Allocations : PRQ : Counts number of allocations per cycle into the specified Ingress queue.",
"UMask": "0x10",
@@ -1523,8 +1859,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Allocations : PRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_CHA_RxC_INSERTS.PRQ_REJ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Allocations : PRQ : Counts number of allocations per cycle into the specified Ingress queue.",
"UMask": "0x20",
@@ -1532,8 +1870,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Allocations : RRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_CHA_RxC_INSERTS.RRQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Allocations : RRQ : Counts number of allocations per cycle into the specified Ingress queue.",
"UMask": "0x40",
@@ -1541,8 +1881,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Allocations : WBQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_CHA_RxC_INSERTS.WBQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Allocations : WBQ : Counts number of allocations per cycle into the specified Ingress queue.",
"UMask": "0x80",
@@ -1550,8 +1892,10 @@
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_CHA_RxC_IPQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0 : No AD VN0 credit for generating a request",
"UMask": "0x1",
@@ -1559,8 +1903,10 @@
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_CHA_RxC_IPQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0 : No AD VN0 credit for generating a response",
"UMask": "0x2",
@@ -1568,8 +1914,10 @@
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_CHA_RxC_IPQ0_REJECT.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request : Can't inject AK ring message",
"UMask": "0x40",
@@ -1577,8 +1925,10 @@
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0 : No BL VN0 credit for NCB",
"UMask": "0x10",
@@ -1586,8 +1936,10 @@
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0 : No BL VN0 credit for NCS",
"UMask": "0x20",
@@ -1595,8 +1947,10 @@
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0 : No BL VN0 credit for generating a response",
"UMask": "0x4",
@@ -1604,8 +1958,10 @@
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0 : No BL VN0 credit for generating a writeback",
"UMask": "0x8",
@@ -1613,8 +1969,10 @@
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_CHA_RxC_IPQ0_REJECT.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request : Can't inject IV ring message",
"UMask": "0x80",
@@ -1622,16 +1980,20 @@
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : Allow Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_CHA_RxC_IPQ1_REJECT.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_CHA_RxC_IPQ1_REJECT.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IPQ Requests (from CMS) Rejected - Set 1 : ANY0 : Any condition listed in the IPQ0 Reject counter was true",
"UMask": "0x1",
@@ -1639,16 +2001,20 @@
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_CHA_RxC_IPQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : LLC OR SF Way",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_CHA_RxC_IPQ1_REJECT.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IPQ Requests (from CMS) Rejected - Set 1 : LLC OR SF Way : Way conflict with another request that caused the reject",
"UMask": "0x20",
@@ -1656,16 +2022,20 @@
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : LLC Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_CHA_RxC_IPQ1_REJECT.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : PhyAddr Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_CHA_RxC_IPQ1_REJECT.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IPQ Requests (from CMS) Rejected - Set 1 : PhyAddr Match : Address match with an outstanding request that was rejected.",
"UMask": "0x80",
@@ -1673,8 +2043,10 @@
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : SF Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_CHA_RxC_IPQ1_REJECT.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IPQ Requests (from CMS) Rejected - Set 1 : SF Victim : Requests did not generate Snoop filter victim",
"UMask": "0x8",
@@ -1682,16 +2054,20 @@
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_CHA_RxC_IPQ1_REJECT.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0 : No AD VN0 credit for generating a request",
"UMask": "0x1",
@@ -1699,8 +2075,10 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0 : No AD VN0 credit for generating a response",
"UMask": "0x2",
@@ -1708,8 +2086,10 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request : Can't inject AK ring message",
"UMask": "0x40",
@@ -1717,8 +2097,10 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0 : No BL VN0 credit for NCB",
"UMask": "0x10",
@@ -1726,8 +2108,10 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0 : No BL VN0 credit for NCS",
"UMask": "0x20",
@@ -1735,8 +2119,10 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0 : No BL VN0 credit for generating a response",
"UMask": "0x4",
@@ -1744,8 +2130,10 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0 : No BL VN0 credit for generating a writeback",
"UMask": "0x8",
@@ -1753,8 +2141,10 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request : Can't inject IV ring message",
"UMask": "0x80",
@@ -1762,16 +2152,20 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : Allow Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 1 : ANY0 : Any condition listed in the IRQ0 Reject counter was true",
"UMask": "0x1",
@@ -1779,16 +2173,20 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : LLC or SF Way",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 1 : LLC or SF Way : Way conflict with another request that caused the reject",
"UMask": "0x20",
@@ -1796,24 +2194,30 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : LLC Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; PhyAddr Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : SF Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 1 : SF Victim : Requests did not generate Snoop filter victim",
"UMask": "0x8",
@@ -1821,16 +2225,20 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "ISMQ Rejects - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Rejects - Set 0 : AD REQ on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No AD VN0 credit for generating a request",
"UMask": "0x1",
@@ -1838,8 +2246,10 @@
},
{
"BriefDescription": "ISMQ Rejects - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Rejects - Set 0 : AD RSP on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No AD VN0 credit for generating a response",
"UMask": "0x2",
@@ -1847,8 +2257,10 @@
},
{
"BriefDescription": "ISMQ Rejects - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Rejects - Set 0 : Non UPI AK Request : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Can't inject AK ring message",
"UMask": "0x40",
@@ -1856,8 +2268,10 @@
},
{
"BriefDescription": "ISMQ Rejects - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Rejects - Set 0 : BL NCB on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for NCB",
"UMask": "0x10",
@@ -1865,8 +2279,10 @@
},
{
"BriefDescription": "ISMQ Rejects - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Rejects - Set 0 : BL NCS on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for NCS",
"UMask": "0x20",
@@ -1874,8 +2290,10 @@
},
{
"BriefDescription": "ISMQ Rejects - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Rejects - Set 0 : BL RSP on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for generating a response",
"UMask": "0x4",
@@ -1883,8 +2301,10 @@
},
{
"BriefDescription": "ISMQ Rejects - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Rejects - Set 0 : BL WB on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for generating a writeback",
"UMask": "0x8",
@@ -1892,8 +2312,10 @@
},
{
"BriefDescription": "ISMQ Rejects - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Rejects - Set 0 : Non UPI IV Request : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Can't inject IV ring message",
"UMask": "0x80",
@@ -1901,8 +2323,10 @@
},
{
"BriefDescription": "ISMQ Retries - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2c",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Retries - Set 0 : AD REQ on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No AD VN0 credit for generating a request",
"UMask": "0x1",
@@ -1910,8 +2334,10 @@
},
{
"BriefDescription": "ISMQ Retries - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2c",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Retries - Set 0 : AD RSP on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No AD VN0 credit for generating a response",
"UMask": "0x2",
@@ -1919,8 +2345,10 @@
},
{
"BriefDescription": "ISMQ Retries - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x2c",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Retries - Set 0 : Non UPI AK Request : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Can't inject AK ring message",
"UMask": "0x40",
@@ -1928,8 +2356,10 @@
},
{
"BriefDescription": "ISMQ Retries - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2c",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Retries - Set 0 : BL NCB on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for NCB",
"UMask": "0x10",
@@ -1937,8 +2367,10 @@
},
{
"BriefDescription": "ISMQ Retries - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2c",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Retries - Set 0 : BL NCS on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for NCS",
"UMask": "0x20",
@@ -1946,8 +2378,10 @@
},
{
"BriefDescription": "ISMQ Retries - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2c",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Retries - Set 0 : BL RSP on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for generating a response",
"UMask": "0x4",
@@ -1955,8 +2389,10 @@
},
{
"BriefDescription": "ISMQ Retries - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2c",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Retries - Set 0 : BL WB on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for generating a writeback",
"UMask": "0x8",
@@ -1964,8 +2400,10 @@
},
{
"BriefDescription": "ISMQ Retries - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x2c",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Retries - Set 0 : Non UPI IV Request : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Can't inject IV ring message",
"UMask": "0x80",
@@ -1973,8 +2411,10 @@
},
{
"BriefDescription": "ISMQ Rejects - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_CHA_RxC_ISMQ1_REJECT.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Rejects - Set 1 : ANY0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Any condition listed in the ISMQ0 Reject counter was true",
"UMask": "0x1",
@@ -1982,8 +2422,10 @@
},
{
"BriefDescription": "ISMQ Rejects - Set 1 : HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_CHA_RxC_ISMQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Rejects - Set 1 : HA : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x2",
@@ -1991,8 +2433,10 @@
},
{
"BriefDescription": "ISMQ Retries - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2d",
"EventName": "UNC_CHA_RxC_ISMQ1_RETRY.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Retries - Set 1 : ANY0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Any condition listed in the ISMQ0 Reject counter was true",
"UMask": "0x1",
@@ -2000,8 +2444,10 @@
},
{
"BriefDescription": "ISMQ Retries - Set 1 : HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x2d",
"EventName": "UNC_CHA_RxC_ISMQ1_RETRY.HA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Retries - Set 1 : HA : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x2",
@@ -2009,8 +2455,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Occupancy : IPQ",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_CHA_RxC_OCCUPANCY.IPQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Occupancy : IPQ : Counts number of entries in the specified Ingress queue in each cycle.",
"UMask": "0x4",
@@ -2018,8 +2466,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Occupancy : RRQ",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_CHA_RxC_OCCUPANCY.RRQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Occupancy : RRQ : Counts number of entries in the specified Ingress queue in each cycle.",
"UMask": "0x40",
@@ -2027,8 +2477,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Occupancy : WBQ",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_CHA_RxC_OCCUPANCY.WBQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Occupancy : WBQ : Counts number of entries in the specified Ingress queue in each cycle.",
"UMask": "0x80",
@@ -2036,8 +2488,10 @@
},
{
"BriefDescription": "Other Retries - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 0 : AD REQ on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No AD VN0 credit for generating a request",
"UMask": "0x1",
@@ -2045,8 +2499,10 @@
},
{
"BriefDescription": "Other Retries - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 0 : AD RSP on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No AD VN0 credit for generating a response",
"UMask": "0x2",
@@ -2054,8 +2510,10 @@
},
{
"BriefDescription": "Other Retries - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 0 : Non UPI AK Request : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Can't inject AK ring message",
"UMask": "0x40",
@@ -2063,8 +2521,10 @@
},
{
"BriefDescription": "Other Retries - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 0 : BL NCB on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No BL VN0 credit for NCB",
"UMask": "0x10",
@@ -2072,8 +2532,10 @@
},
{
"BriefDescription": "Other Retries - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 0 : BL NCS on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No BL VN0 credit for NCS",
"UMask": "0x20",
@@ -2081,8 +2543,10 @@
},
{
"BriefDescription": "Other Retries - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 0 : BL RSP on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No BL VN0 credit for generating a response",
"UMask": "0x4",
@@ -2090,8 +2554,10 @@
},
{
"BriefDescription": "Other Retries - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 0 : BL WB on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No BL VN0 credit for generating a writeback",
"UMask": "0x8",
@@ -2099,8 +2565,10 @@
},
{
"BriefDescription": "Other Retries - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 0 : Non UPI IV Request : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Can't inject IV ring message",
"UMask": "0x80",
@@ -2108,8 +2576,10 @@
},
{
"BriefDescription": "Other Retries - Set 1 : Allow Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x2f",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 1 : Allow Snoop : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x40",
@@ -2117,8 +2587,10 @@
},
{
"BriefDescription": "Other Retries - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2f",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 1 : ANY0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Any condition listed in the Other0 Reject counter was true",
"UMask": "0x1",
@@ -2126,8 +2598,10 @@
},
{
"BriefDescription": "Other Retries - Set 1 : HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x2f",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.HA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 1 : HA : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x2",
@@ -2135,8 +2609,10 @@
},
{
"BriefDescription": "Other Retries - Set 1 : LLC OR SF Way",
+ "Counter": "0,1,2,3",
"EventCode": "0x2f",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 1 : LLC OR SF Way : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Way conflict with another request that caused the reject",
"UMask": "0x20",
@@ -2144,8 +2620,10 @@
},
{
"BriefDescription": "Other Retries - Set 1 : LLC Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x2f",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 1 : LLC Victim : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x4",
@@ -2153,8 +2631,10 @@
},
{
"BriefDescription": "Other Retries - Set 1 : PhyAddr Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x2f",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 1 : PhyAddr Match : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Address match with an outstanding request that was rejected.",
"UMask": "0x80",
@@ -2162,8 +2642,10 @@
},
{
"BriefDescription": "Other Retries - Set 1 : SF Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x2f",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 1 : SF Victim : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Requests did not generate Snoop filter victim",
"UMask": "0x8",
@@ -2171,8 +2653,10 @@
},
{
"BriefDescription": "Other Retries - Set 1 : Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x2f",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 1 : Victim : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x10",
@@ -2180,8 +2664,10 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0 : No AD VN0 credit for generating a request",
"UMask": "0x1",
@@ -2189,8 +2675,10 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0 : No AD VN0 credit for generating a response",
"UMask": "0x2",
@@ -2198,8 +2686,10 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request : Can't inject AK ring message",
"UMask": "0x40",
@@ -2207,8 +2697,10 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0 : No BL VN0 credit for NCB",
"UMask": "0x10",
@@ -2216,8 +2708,10 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0 : No BL VN0 credit for NCS",
"UMask": "0x20",
@@ -2225,8 +2719,10 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0 : No BL VN0 credit for generating a response",
"UMask": "0x4",
@@ -2234,8 +2730,10 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0 : No BL VN0 credit for generating a writeback",
"UMask": "0x8",
@@ -2243,8 +2741,10 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request : Can't inject IV ring message",
"UMask": "0x80",
@@ -2252,16 +2752,20 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : Allow Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 1 : ANY0 : Any condition listed in the PRQ0 Reject counter was true",
"UMask": "0x1",
@@ -2269,16 +2773,20 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : LLC OR SF Way",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 1 : LLC OR SF Way : Way conflict with another request that caused the reject",
"UMask": "0x20",
@@ -2286,16 +2794,20 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : LLC Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : PhyAddr Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 1 : PhyAddr Match : Address match with an outstanding request that was rejected.",
"UMask": "0x80",
@@ -2303,8 +2815,10 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : SF Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 1 : SF Victim : Requests did not generate Snoop filter victim",
"UMask": "0x8",
@@ -2312,16 +2826,20 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "Request Queue Retries - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 0 : AD REQ on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No AD VN0 credit for generating a request",
"UMask": "0x1",
@@ -2329,8 +2847,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 0 : AD RSP on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No AD VN0 credit for generating a response",
"UMask": "0x2",
@@ -2338,8 +2858,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 0 : Non UPI AK Request : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Can't inject AK ring message",
"UMask": "0x40",
@@ -2347,8 +2869,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 0 : BL NCB on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No BL VN0 credit for NCB",
"UMask": "0x10",
@@ -2356,8 +2880,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 0 : BL NCS on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No BL VN0 credit for NCS",
"UMask": "0x20",
@@ -2365,8 +2891,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 0 : BL RSP on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No BL VN0 credit for generating a response",
"UMask": "0x4",
@@ -2374,8 +2902,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 0 : BL WB on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No BL VN0 credit for generating a writeback",
"UMask": "0x8",
@@ -2383,8 +2913,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 0 : Non UPI IV Request : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Can't inject IV ring message",
"UMask": "0x80",
@@ -2392,8 +2924,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 1 : Allow Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x2b",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 1 : Allow Snoop : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x40",
@@ -2401,8 +2935,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2b",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 1 : ANY0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Any condition listed in the WBQ0 Reject counter was true",
"UMask": "0x1",
@@ -2410,8 +2946,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 1 : HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x2b",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.HA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 1 : HA : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x2",
@@ -2419,8 +2957,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 1 : LLC OR SF Way",
+ "Counter": "0,1,2,3",
"EventCode": "0x2b",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 1 : LLC OR SF Way : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Way conflict with another request that caused the reject",
"UMask": "0x20",
@@ -2428,8 +2968,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 1 : LLC Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x2b",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 1 : LLC Victim : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x4",
@@ -2437,8 +2979,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 1 : PhyAddr Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x2b",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 1 : PhyAddr Match : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Address match with an outstanding request that was rejected.",
"UMask": "0x80",
@@ -2446,8 +2990,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 1 : SF Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x2b",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 1 : SF Victim : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Requests did not generate Snoop filter victim",
"UMask": "0x8",
@@ -2455,8 +3001,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 1 : Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x2b",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 1 : Victim : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x10",
@@ -2464,8 +3012,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_CHA_RxC_RRQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 0 : AD REQ on VN0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : No AD VN0 credit for generating a request",
"UMask": "0x1",
@@ -2473,8 +3023,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_CHA_RxC_RRQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 0 : AD RSP on VN0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : No AD VN0 credit for generating a response",
"UMask": "0x2",
@@ -2482,8 +3034,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_CHA_RxC_RRQ0_REJECT.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 0 : Non UPI AK Request : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : Can't inject AK ring message",
"UMask": "0x40",
@@ -2491,8 +3045,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 0 : BL NCB on VN0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : No BL VN0 credit for NCB",
"UMask": "0x10",
@@ -2500,8 +3056,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 0 : BL NCS on VN0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : No BL VN0 credit for NCS",
"UMask": "0x20",
@@ -2509,8 +3067,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 0 : BL RSP on VN0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : No BL VN0 credit for generating a response",
"UMask": "0x4",
@@ -2518,8 +3078,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 0 : BL WB on VN0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : No BL VN0 credit for generating a writeback",
"UMask": "0x8",
@@ -2527,8 +3089,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_CHA_RxC_RRQ0_REJECT.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 0 : Non UPI IV Request : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : Can't inject IV ring message",
"UMask": "0x80",
@@ -2536,8 +3100,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 1 : Allow Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_CHA_RxC_RRQ1_REJECT.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 1 : Allow Snoop : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x40",
@@ -2545,8 +3111,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_CHA_RxC_RRQ1_REJECT.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 1 : ANY0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : Any condition listed in the RRQ0 Reject counter was true",
"UMask": "0x1",
@@ -2554,8 +3122,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 1 : HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_CHA_RxC_RRQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 1 : HA : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x2",
@@ -2563,8 +3133,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 1 : LLC OR SF Way",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_CHA_RxC_RRQ1_REJECT.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 1 : LLC OR SF Way : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : Way conflict with another request that caused the reject",
"UMask": "0x20",
@@ -2572,8 +3144,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 1 : LLC Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_CHA_RxC_RRQ1_REJECT.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 1 : LLC Victim : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x4",
@@ -2581,8 +3155,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 1 : PhyAddr Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_CHA_RxC_RRQ1_REJECT.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 1 : PhyAddr Match : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : Address match with an outstanding request that was rejected.",
"UMask": "0x80",
@@ -2590,8 +3166,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 1 : SF Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_CHA_RxC_RRQ1_REJECT.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 1 : SF Victim : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : Requests did not generate Snoop filter victim",
"UMask": "0x8",
@@ -2599,8 +3177,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 1 : Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_CHA_RxC_RRQ1_REJECT.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 1 : Victim : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x10",
@@ -2608,8 +3188,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_CHA_RxC_WBQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 0 : AD REQ on VN0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : No AD VN0 credit for generating a request",
"UMask": "0x1",
@@ -2617,8 +3199,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_CHA_RxC_WBQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 0 : AD RSP on VN0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : No AD VN0 credit for generating a response",
"UMask": "0x2",
@@ -2626,8 +3210,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_CHA_RxC_WBQ0_REJECT.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 0 : Non UPI AK Request : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : Can't inject AK ring message",
"UMask": "0x40",
@@ -2635,8 +3221,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 0 : BL NCB on VN0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : No BL VN0 credit for NCB",
"UMask": "0x10",
@@ -2644,8 +3232,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 0 : BL NCS on VN0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : No BL VN0 credit for NCS",
"UMask": "0x20",
@@ -2653,8 +3243,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 0 : BL RSP on VN0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : No BL VN0 credit for generating a response",
"UMask": "0x4",
@@ -2662,8 +3254,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 0 : BL WB on VN0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : No BL VN0 credit for generating a writeback",
"UMask": "0x8",
@@ -2671,8 +3265,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_CHA_RxC_WBQ0_REJECT.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 0 : Non UPI IV Request : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : Can't inject IV ring message",
"UMask": "0x80",
@@ -2680,8 +3276,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 1 : Allow Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_CHA_RxC_WBQ1_REJECT.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 1 : Allow Snoop : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x40",
@@ -2689,8 +3287,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_CHA_RxC_WBQ1_REJECT.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 1 : ANY0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : Any condition listed in the WBQ0 Reject counter was true",
"UMask": "0x1",
@@ -2698,8 +3298,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 1 : HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_CHA_RxC_WBQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 1 : HA : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x2",
@@ -2707,8 +3309,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 1 : LLC OR SF Way",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_CHA_RxC_WBQ1_REJECT.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 1 : LLC OR SF Way : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : Way conflict with another request that caused the reject",
"UMask": "0x20",
@@ -2716,8 +3320,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 1 : LLC Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_CHA_RxC_WBQ1_REJECT.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 1 : LLC Victim : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x4",
@@ -2725,8 +3331,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 1 : PhyAddr Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_CHA_RxC_WBQ1_REJECT.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 1 : PhyAddr Match : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : Address match with an outstanding request that was rejected.",
"UMask": "0x80",
@@ -2734,8 +3342,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 1 : SF Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_CHA_RxC_WBQ1_REJECT.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 1 : SF Victim : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : Requests did not generate Snoop filter victim",
"UMask": "0x8",
@@ -2743,8 +3353,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 1 : Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_CHA_RxC_WBQ1_REJECT.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 1 : Victim : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x10",
@@ -2752,8 +3364,10 @@
},
{
"BriefDescription": "Snoops Sent : All",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_CHA_SNOOPS_SENT.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoops Sent : All : Counts the number of snoops issued by the HA.",
"UMask": "0x1",
@@ -2761,8 +3375,10 @@
},
{
"BriefDescription": "Snoops Sent : Broadcast snoop for Local Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_CHA_SNOOPS_SENT.BCST_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoops Sent : Broadcast snoop for Local Requests : Counts the number of snoops issued by the HA. : Counts the number of broadcast snoops issued by the HA. This filter includes only requests coming from local sockets.",
"UMask": "0x10",
@@ -2770,8 +3386,10 @@
},
{
"BriefDescription": "Snoops Sent : Broadcast snoops for Remote Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_CHA_SNOOPS_SENT.BCST_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoops Sent : Broadcast snoops for Remote Requests : Counts the number of snoops issued by the HA. : Counts the number of broadcast snoops issued by the HA.This filter includes only requests coming from remote sockets.",
"UMask": "0x20",
@@ -2779,8 +3397,10 @@
},
{
"BriefDescription": "Snoops Sent : Directed snoops for Local Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_CHA_SNOOPS_SENT.DIRECT_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoops Sent : Directed snoops for Local Requests : Counts the number of snoops issued by the HA. : Counts the number of directed snoops issued by the HA. This filter includes only requests coming from local sockets.",
"UMask": "0x40",
@@ -2788,8 +3408,10 @@
},
{
"BriefDescription": "Snoops Sent : Directed snoops for Remote Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_CHA_SNOOPS_SENT.DIRECT_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoops Sent : Directed snoops for Remote Requests : Counts the number of snoops issued by the HA. : Counts the number of directed snoops issued by the HA. This filter includes only requests coming from remote sockets.",
"UMask": "0x80",
@@ -2797,8 +3419,10 @@
},
{
"BriefDescription": "Snoops Sent : Broadcast or directed Snoops sent for Local Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_CHA_SNOOPS_SENT.LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoops Sent : Broadcast or directed Snoops sent for Local Requests : Counts the number of snoops issued by the HA. : Counts the number of broadcast or directed snoops issued by the HA per request. This filter includes only requests coming from the local socket.",
"UMask": "0x4",
@@ -2806,8 +3430,10 @@
},
{
"BriefDescription": "Snoops Sent : Broadcast or directed Snoops sent for Remote Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_CHA_SNOOPS_SENT.REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoops Sent : Broadcast or directed Snoops sent for Remote Requests : Counts the number of snoops issued by the HA. : Counts the number of broadcast or directed snoops issued by the HA per request. This filter includes only requests coming from the remote socket.",
"UMask": "0x8",
@@ -2815,8 +3441,10 @@
},
{
"BriefDescription": "Snoop Responses Received : RSPCNFLCT*",
+ "Counter": "0,1,2,3",
"EventCode": "0x5c",
"EventName": "UNC_CHA_SNOOP_RESP.RSPCNFLCT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received : RSPCNFLCT* : Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1. : Filters for snoops responses of RspConflict. This is returned when a snoop finds an existing outstanding transaction in a remote caching agent when it CAMs that caching agent. This triggers conflict resolution hardware. This covers both RspCnflct and RspCnflctWbI.",
"UMask": "0x40",
@@ -2824,8 +3452,10 @@
},
{
"BriefDescription": "Snoop Responses Received : RspFwd",
+ "Counter": "0,1,2,3",
"EventCode": "0x5c",
"EventName": "UNC_CHA_SNOOP_RESP.RSPFWD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received : RspFwd : Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1. : Filters for a snoop response of RspFwd to a CA request. This snoop response is only possible for RdCur when a snoop HITM/E in a remote caching agent and it directly forwards data to a requestor without changing the requestor's cache line state.",
"UMask": "0x80",
@@ -2833,8 +3463,10 @@
},
{
"BriefDescription": "Snoop Responses Received : Rsp*Fwd*WB",
+ "Counter": "0,1,2,3",
"EventCode": "0x5c",
"EventName": "UNC_CHA_SNOOP_RESP.RSPFWDWB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received : Rsp*Fwd*WB : Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1. : Filters for a snoop response of Rsp*Fwd*WB. This snoop response is only used in 4s systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to the home to be written back to memory.",
"UMask": "0x20",
@@ -2842,8 +3474,10 @@
},
{
"BriefDescription": "RspI Snoop Responses Received",
+ "Counter": "0,1,2,3",
"EventCode": "0x5c",
"EventName": "UNC_CHA_SNOOP_RESP.RSPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts when a transaction with the opcode type RspI Snoop Response was received which indicates the remote cache does not have the data, or when the remote cache silently evicts data (such as when an RFO: the Read for Ownership issued before a write hits non-modified data).",
"UMask": "0x1",
@@ -2851,8 +3485,10 @@
},
{
"BriefDescription": "RspIFwd Snoop Responses Received",
+ "Counter": "0,1,2,3",
"EventCode": "0x5c",
"EventName": "UNC_CHA_SNOOP_RESP.RSPIFWD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts when a a transaction with the opcode type RspIFwd Snoop Response was received which indicates a remote caching agent forwarded the data and the requesting agent is able to acquire the data in E (Exclusive) or M (modified) states. This is commonly returned with RFO (the Read for Ownership issued before a write) transactions. The snoop could have either been to a cacheline in the M,E,F (Modified, Exclusive or Forward) states.",
"UMask": "0x4",
@@ -2860,8 +3496,10 @@
},
{
"BriefDescription": "RspS Snoop Responses Received",
+ "Counter": "0,1,2,3",
"EventCode": "0x5c",
"EventName": "UNC_CHA_SNOOP_RESP.RSPS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts when a transaction with the opcode type RspS Snoop Response was received which indicates when a remote cache has data but is not forwarding it. It is a way to let the requesting socket know that it cannot allocate the data in E state. No data is sent with S RspS.",
"UMask": "0x2",
@@ -2869,8 +3507,10 @@
},
{
"BriefDescription": "RspSFwd Snoop Responses Received",
+ "Counter": "0,1,2,3",
"EventCode": "0x5c",
"EventName": "UNC_CHA_SNOOP_RESP.RSPSFWD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts when a a transaction with the opcode type RspSFwd Snoop Response was received which indicates a remote caching agent forwarded the data but held on to its current copy. This is common for data and code reads that hit in a remote socket in E (Exclusive) or F (Forward) state.",
"UMask": "0x8",
@@ -2878,8 +3518,10 @@
},
{
"BriefDescription": "Snoop Responses Received : Rsp*WB",
+ "Counter": "0,1,2,3",
"EventCode": "0x5c",
"EventName": "UNC_CHA_SNOOP_RESP.RSPWB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received : Rsp*WB : Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1. : Filters for a snoop response of RspIWB or RspSWB. This is returned when a non-RFO request hits in M state. Data and Code Reads can return either RspIWB or RspSWB depending on how the system has been configured. InvItoE transactions will also return RspIWB because they must acquire ownership.",
"UMask": "0x10",
@@ -2887,8 +3529,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local : RspCnflct",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPCNFLCT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received Local : RspCnflct : Number of snoop responses received for a Local request : Filters for snoops responses of RspConflict to local CA requests. This is returned when a snoop finds an existing outstanding transaction in a remote caching agent when it CAMs that caching agent. This triggers conflict resolution hardware. This covers both RspCnflct and RspCnflctWbI.",
"UMask": "0x40",
@@ -2896,8 +3540,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local : RspFwd",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPFWD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received Local : RspFwd : Number of snoop responses received for a Local request : Filters for a snoop response of RspFwd to local CA requests. This snoop response is only possible for RdCur when a snoop HITM/E in a remote caching agent and it directly forwards data to a requestor without changing the requestor's cache line state.",
"UMask": "0x80",
@@ -2905,8 +3551,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local : Rsp*FWD*WB",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPFWDWB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received Local : Rsp*FWD*WB : Number of snoop responses received for a Local request : Filters for a snoop response of Rsp*Fwd*WB to local CA requests. This snoop response is only used in 4s systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to the home to be written back to memory.",
"UMask": "0x20",
@@ -2914,8 +3562,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local : RspI",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received Local : RspI : Number of snoop responses received for a Local request : Filters for snoops responses of RspI to local CA requests. RspI is returned when the remote cache does not have the data, or when the remote cache silently evicts data (such as when an RFO hits non-modified data).",
"UMask": "0x1",
@@ -2923,8 +3573,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local : RspIFwd",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPIFWD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received Local : RspIFwd : Number of snoop responses received for a Local request : Filters for snoop responses of RspIFwd to local CA requests. This is returned when a remote caching agent forwards data and the requesting agent is able to acquire the data in E or M states. This is commonly returned with RFO transactions. It can be either a HitM or a HitFE.",
"UMask": "0x4",
@@ -2932,8 +3584,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local : RspS",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received Local : RspS : Number of snoop responses received for a Local request : Filters for snoop responses of RspS to local CA requests. RspS is returned when a remote cache has data but is not forwarding it. It is a way to let the requesting socket know that it cannot allocate the data in E state. No data is sent with S RspS.",
"UMask": "0x2",
@@ -2941,8 +3595,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local : RspSFwd",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPSFWD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received Local : RspSFwd : Number of snoop responses received for a Local request : Filters for a snoop response of RspSFwd to local CA requests. This is returned when a remote caching agent forwards data but holds on to its current copy. This is common for data and code reads that hit in a remote socket in E or F state.",
"UMask": "0x8",
@@ -2950,8 +3606,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local : Rsp*WB",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPWB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received Local : Rsp*WB : Number of snoop responses received for a Local request : Filters for a snoop response of RspIWB or RspSWB to local CA requests. This is returned when a non-RFO request hits in M state. Data and Code Reads can return either RspIWB or RspSWB depending on how the system has been configured. InvItoE transactions will also return RspIWB because they must acquire ownership.",
"UMask": "0x10",
@@ -2959,56 +3617,70 @@
},
{
"BriefDescription": "Misc Snoop Responses Received : MtoI RspIDataM",
+ "Counter": "0,1,2,3",
"EventCode": "0x6b",
"EventName": "UNC_CHA_SNOOP_RSP_MISC.MTOI_RSPDATAM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Misc Snoop Responses Received : MtoI RspIFwdM",
+ "Counter": "0,1,2,3",
"EventCode": "0x6b",
"EventName": "UNC_CHA_SNOOP_RSP_MISC.MTOI_RSPIFWDM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Misc Snoop Responses Received : Pull Data Partial - Hit LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x6b",
"EventName": "UNC_CHA_SNOOP_RSP_MISC.PULLDATAPTL_HITLLC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "Misc Snoop Responses Received : Pull Data Partial - Hit SF",
+ "Counter": "0,1,2,3",
"EventCode": "0x6b",
"EventName": "UNC_CHA_SNOOP_RSP_MISC.PULLDATAPTL_HITSF",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "Misc Snoop Responses Received : RspIFwdPtl Hit LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x6b",
"EventName": "UNC_CHA_SNOOP_RSP_MISC.RSPIFWDMPTL_HITLLC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Misc Snoop Responses Received : RspIFwdPtl Hit SF",
+ "Counter": "0,1,2,3",
"EventCode": "0x6b",
"EventName": "UNC_CHA_SNOOP_RSP_MISC.RSPIFWDMPTL_HITSF",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : All",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : All : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"UMask": "0xc001ffff",
@@ -3016,16 +3688,20 @@
},
{
"BriefDescription": "TOR Inserts : DDR Access",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : DDR Access : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : SF/LLC Evictions",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.EVICT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : SF/LLC Evictions : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. : TOR allocation occurred as a result of SF/LLC evictions (came from the ISMQ)",
"UMask": "0x2",
@@ -3033,14 +3709,17 @@
},
{
"BriefDescription": "TOR Inserts : Just Hits",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.HIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : Just Hits : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts; All from Local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA",
"PerPkg": "1",
@@ -3050,6 +3729,7 @@
},
{
"BriefDescription": "TOR Inserts;CLFlush from Local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_CLFLUSH",
"PerPkg": "1",
@@ -3059,8 +3739,10 @@
},
{
"BriefDescription": "TOR Inserts;CLFlushOpt from Local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_CLFLUSHOPT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.; CLFlushOpt events that are initiated from the Core",
"UMask": "0xc8d7ff01",
@@ -3068,6 +3750,7 @@
},
{
"BriefDescription": "TOR Inserts; CRd from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_CRD",
"PerPkg": "1",
@@ -3077,8 +3760,10 @@
},
{
"BriefDescription": "TOR Inserts; CRd Pref from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_CRD_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts; Code read prefetch from local IA that misses in the snoop filter",
"UMask": "0xc88fff01",
@@ -3086,6 +3771,7 @@
},
{
"BriefDescription": "TOR Inserts; DRd from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_DRD",
"PerPkg": "1",
@@ -3095,8 +3781,10 @@
},
{
"BriefDescription": "TOR Inserts : DRd PTEs issued by iA Cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_DRDPTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : DRd PTEs issued by iA Cores due to a page walk : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc837ff01",
@@ -3104,8 +3792,10 @@
},
{
"BriefDescription": "TOR Inserts; DRd Opt from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_OPT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts; Data read opt from local IA that misses in the snoop filter",
"UMask": "0xc827ff01",
@@ -3113,8 +3803,10 @@
},
{
"BriefDescription": "TOR Inserts; DRd Opt Pref from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_OPT_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts; Data read opt prefetch from local IA that misses in the snoop filter",
"UMask": "0xc8a7ff01",
@@ -3122,6 +3814,7 @@
},
{
"BriefDescription": "TOR Inserts; DRd Pref from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_PREF",
"PerPkg": "1",
@@ -3131,6 +3824,7 @@
},
{
"BriefDescription": "TOR Inserts; Hits from Local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT",
"PerPkg": "1",
@@ -3140,6 +3834,7 @@
},
{
"BriefDescription": "TOR Inserts; CRd hits from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD",
"PerPkg": "1",
@@ -3149,6 +3844,7 @@
},
{
"BriefDescription": "TOR Inserts; CRd Pref hits from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD_PREF",
"PerPkg": "1",
@@ -3158,16 +3854,20 @@
},
{
"BriefDescription": "All requests issued from IA cores to CXL accelerator memory regions that hit the LLC.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10c0018101",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_TOR_INSERTS.IA_HIT_CXL_ACC_LOCAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CXL_ACC_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10c0008101",
@@ -3175,6 +3875,7 @@
},
{
"BriefDescription": "TOR Inserts; DRd hits from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD",
"PerPkg": "1",
@@ -3184,8 +3885,10 @@
},
{
"BriefDescription": "TOR Inserts : DRd PTEs issued by iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRDPTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : DRd PTEs issued by iA Cores due to page walks that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc837fd01",
@@ -3193,8 +3896,10 @@
},
{
"BriefDescription": "TOR Inserts; DRd Opt hits from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_OPT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts; Data read opt from local IA that hits in the snoop filter",
"UMask": "0xc827fd01",
@@ -3202,8 +3907,10 @@
},
{
"BriefDescription": "TOR Inserts; DRd Opt Pref hits from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_OPT_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts; Data read opt prefetch from local IA that hits in the snoop filter",
"UMask": "0xc8a7fd01",
@@ -3211,6 +3918,7 @@
},
{
"BriefDescription": "TOR Inserts; DRd Pref hits from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_PREF",
"PerPkg": "1",
@@ -3220,8 +3928,10 @@
},
{
"BriefDescription": "TOR Inserts : ItoMs issued by iA Cores that Hit LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_ITOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc47fd01",
@@ -3229,8 +3939,10 @@
},
{
"BriefDescription": "TOR Inserts; LLCPrefCode hits from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFCODE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts; Last level cache prefetch code read from local IA that hits in the snoop filter",
"UMask": "0xcccffd01",
@@ -3238,8 +3950,10 @@
},
{
"BriefDescription": "TOR Inserts; LLCPrefData hits from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFDATA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts; Last level cache prefetch data read from local IA that hits in the snoop filter",
"UMask": "0xccd7fd01",
@@ -3247,6 +3961,7 @@
},
{
"BriefDescription": "TOR Inserts; LLCPrefRFO hits from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFRFO",
"PerPkg": "1",
@@ -3256,6 +3971,7 @@
},
{
"BriefDescription": "TOR Inserts; RFO hits from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO",
"PerPkg": "1",
@@ -3265,6 +3981,7 @@
},
{
"BriefDescription": "TOR Inserts; RFO Pref hits from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO_PREF",
"PerPkg": "1",
@@ -3274,8 +3991,10 @@
},
{
"BriefDescription": "TOR Inserts;ItoM from Local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_ITOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.; ItoM events that are initiated from the Core",
"UMask": "0xcc47ff01",
@@ -3283,8 +4002,10 @@
},
{
"BriefDescription": "TOR Inserts : ItoMCacheNears issued by iA Cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_ITOMCACHENEAR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcd47ff01",
@@ -3292,8 +4013,10 @@
},
{
"BriefDescription": "TOR Inserts; LLCPrefCode from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFCODE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts; Last level cache prefetch code read from local IA.",
"UMask": "0xcccfff01",
@@ -3301,6 +4024,7 @@
},
{
"BriefDescription": "TOR Inserts; LLCPrefData from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFDATA",
"PerPkg": "1",
@@ -3310,6 +4034,7 @@
},
{
"BriefDescription": "TOR Inserts; LLCPrefRFO from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFRFO",
"PerPkg": "1",
@@ -3319,6 +4044,7 @@
},
{
"BriefDescription": "TOR Inserts; misses from Local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
"PerPkg": "1",
@@ -3328,6 +4054,7 @@
},
{
"BriefDescription": "TOR Inserts for CRd misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD",
"PerPkg": "1",
@@ -3337,16 +4064,20 @@
},
{
"BriefDescription": "CRds and equivalent opcodes issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRDMORPH_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10c80b8201",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : CRd issued by iA Cores that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc80efe01",
@@ -3354,6 +4085,7 @@
},
{
"BriefDescription": "TOR Inserts; CRd Pref misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF",
"PerPkg": "1",
@@ -3363,8 +4095,10 @@
},
{
"BriefDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc88efe01",
@@ -3372,8 +4106,10 @@
},
{
"BriefDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc88f7e01",
@@ -3381,8 +4117,10 @@
},
{
"BriefDescription": "TOR Inserts : CRd issued by iA Cores that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc80f7e01",
@@ -3390,16 +4128,20 @@
},
{
"BriefDescription": "All requests issued from IA cores to CXL accelerator memory regions that miss the LLC.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10c0018201",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_CXL_ACC_LOCAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CXL_ACC_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10c0008201",
@@ -3407,6 +4149,7 @@
},
{
"BriefDescription": "TOR Inserts for DRd misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD",
"PerPkg": "1",
@@ -3416,16 +4159,20 @@
},
{
"BriefDescription": "DRds and equivalent opcodes issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRDMORPH_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10c8138201",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : DRd PTEs issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRDPTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : DRd PTEs issued by iA Cores due to a page walk that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc837fe01",
@@ -3433,16 +4180,20 @@
},
{
"BriefDescription": "DRds issued from an IA core which miss the L3 and target memory in a CXL type 2 memory expander card.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10c8178201",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_CXL_ACC_LOCAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_CXL_ACC_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10c8168201",
@@ -3450,8 +4201,10 @@
},
{
"BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_CXL_EXP_LOCAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_CXL_EXP_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x20c8168201",
@@ -3459,6 +4212,7 @@
},
{
"BriefDescription": "TOR Inserts for DRds issued by IA Cores targeting DDR Mem that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_DDR",
"PerPkg": "1",
@@ -3468,6 +4222,7 @@
},
{
"BriefDescription": "TOR Inserts for DRd misses from local IA targeting local memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL",
"PerPkg": "1",
@@ -3477,6 +4232,7 @@
},
{
"BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL_DDR",
"PerPkg": "1",
@@ -3486,6 +4242,7 @@
},
{
"BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL_PMM",
"PerPkg": "1",
@@ -3495,8 +4252,10 @@
},
{
"BriefDescription": "TOR Inserts; DRd Opt misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts; Data read opt from local IA that misses in the snoop filter",
"UMask": "0xc827fe01",
@@ -3504,8 +4263,10 @@
},
{
"BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_CXL_ACC_LOCAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_CXL_ACC_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10c8268201",
@@ -3513,8 +4274,10 @@
},
{
"BriefDescription": "TOR Inserts; DRd Opt Pref misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts; Data read opt prefetch from local IA that misses in the snoop filter",
"UMask": "0xc8a7fe01",
@@ -3522,8 +4285,10 @@
},
{
"BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_PREF_CXL_ACC_LOCAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_PREF_CXL_ACC_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10c8a68201",
@@ -3531,6 +4296,7 @@
},
{
"BriefDescription": "TOR Inserts for DRds issued by iA Cores targeting PMM Mem that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PMM",
"PerPkg": "1",
@@ -3540,6 +4306,7 @@
},
{
"BriefDescription": "TOR Inserts for DRd Pref misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF",
"PerPkg": "1",
@@ -3549,16 +4316,20 @@
},
{
"BriefDescription": "L2 data prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10c8978201",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_CXL_ACC_LOCAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_CXL_ACC_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10c8968201",
@@ -3566,8 +4337,10 @@
},
{
"BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_CXL_EXP_LOCAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_CXL_EXP_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x20c8968201",
@@ -3575,8 +4348,10 @@
},
{
"BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8978601",
@@ -3584,6 +4359,7 @@
},
{
"BriefDescription": "TOR Inserts for DRd Pref misses from local IA targeting local memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL",
"PerPkg": "1",
@@ -3593,8 +4369,10 @@
},
{
"BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8968601",
@@ -3602,8 +4380,10 @@
},
{
"BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8968a01",
@@ -3611,8 +4391,10 @@
},
{
"BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8978a01",
@@ -3620,6 +4402,7 @@
},
{
"BriefDescription": "TOR Inserts for DRd Pref misses from local IA targeting remote memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE",
"PerPkg": "1",
@@ -3629,8 +4412,10 @@
},
{
"BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8970601",
@@ -3638,8 +4423,10 @@
},
{
"BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8970a01",
@@ -3647,6 +4434,7 @@
},
{
"BriefDescription": "TOR Inserts for DRd misses from local IA targeting remote memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE",
"PerPkg": "1",
@@ -3656,6 +4444,7 @@
},
{
"BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE_DDR",
"PerPkg": "1",
@@ -3665,6 +4454,7 @@
},
{
"BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE_PMM",
"PerPkg": "1",
@@ -3674,8 +4464,10 @@
},
{
"BriefDescription": "TOR Inserts : ItoMs issued by iA Cores that Missed LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_ITOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc47fe01",
@@ -3683,8 +4475,10 @@
},
{
"BriefDescription": "TOR Inserts; LLCPrefCode misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFCODE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts; Last level cache prefetch code read from local IA that misses in the snoop filter",
"UMask": "0xcccffe01",
@@ -3692,14 +4486,17 @@
},
{
"BriefDescription": "LLC Prefetch Code transactions issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFCODE_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10cccf8201",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts; LLCPrefData misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFDATA",
"PerPkg": "1",
@@ -3709,16 +4506,20 @@
},
{
"BriefDescription": "LLC data prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFDATA_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10ccd78201",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFDATA_CXL_ACC_LOCAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFDATA_CXL_ACC_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10ccd68201",
@@ -3726,8 +4527,10 @@
},
{
"BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFDATA_CXL_EXP_LOCAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFDATA_CXL_EXP_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x20ccd68201",
@@ -3735,6 +4538,7 @@
},
{
"BriefDescription": "TOR Inserts; LLCPrefRFO misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFRFO",
"PerPkg": "1",
@@ -3744,16 +4548,20 @@
},
{
"BriefDescription": "L2 RFO prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFRFO_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10c8878201",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFRFO_CXL_ACC_LOCAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFRFO_CXL_ACC_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10c8868201",
@@ -3761,8 +4569,10 @@
},
{
"BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFRFO_CXL_EXP_LOCAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFRFO_CXL_EXP_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x20c8868201",
@@ -3770,8 +4580,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCILF_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8668601",
@@ -3779,8 +4591,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCILF_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8668a01",
@@ -3788,8 +4602,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCIL_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86e8601",
@@ -3797,8 +4613,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCIL_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86e8a01",
@@ -3806,8 +4624,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCILF_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8670601",
@@ -3815,8 +4635,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCILF_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8670a01",
@@ -3824,8 +4646,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCIL_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86f0601",
@@ -3833,8 +4657,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCIL_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86f0a01",
@@ -3842,6 +4668,7 @@
},
{
"BriefDescription": "TOR Inserts; RFO misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO",
"PerPkg": "1",
@@ -3851,24 +4678,30 @@
},
{
"BriefDescription": "RFO and L2 RFO prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFOMORPH_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10c8038201",
"Unit": "CHA"
},
{
"BriefDescription": "RFOs issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10c8078201",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_CXL_ACC_LOCAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_CXL_ACC_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10c8068201",
@@ -3876,8 +4709,10 @@
},
{
"BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_CXL_EXP_LOCAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_CXL_EXP_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x20c8068201",
@@ -3885,6 +4720,7 @@
},
{
"BriefDescription": "TOR Inserts RFO misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_LOCAL",
"PerPkg": "1",
@@ -3894,6 +4730,7 @@
},
{
"BriefDescription": "TOR Inserts; RFO pref misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF",
"PerPkg": "1",
@@ -3903,16 +4740,20 @@
},
{
"BriefDescription": "LLC RFO prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10ccc78201",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_CXL_ACC_LOCAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_CXL_ACC_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10ccc68201",
@@ -3920,8 +4761,10 @@
},
{
"BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_CXL_EXP_LOCAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_CXL_EXP_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x20ccc68201",
@@ -3929,6 +4772,7 @@
},
{
"BriefDescription": "TOR Inserts; RFO prefetch misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_LOCAL",
"PerPkg": "1",
@@ -3938,6 +4782,7 @@
},
{
"BriefDescription": "TOR Inserts; RFO prefetch misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_REMOTE",
"PerPkg": "1",
@@ -3947,6 +4792,7 @@
},
{
"BriefDescription": "TOR Inserts; RFO misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_REMOTE",
"PerPkg": "1",
@@ -3956,8 +4802,10 @@
},
{
"BriefDescription": "TOR Inserts : UCRdFs issued by iA Cores that Missed LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_UCRDF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc877de01",
@@ -3965,8 +4813,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLs issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86ffe01",
@@ -3974,8 +4824,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLF issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc867fe01",
@@ -3983,8 +4835,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8678601",
@@ -3992,8 +4846,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8678a01",
@@ -4001,8 +4857,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86f8601",
@@ -4010,8 +4868,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86f8a01",
@@ -4019,8 +4879,10 @@
},
{
"BriefDescription": "TOR Inserts : WiLs issued by iA Cores that Missed LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WIL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc87fde01",
@@ -4028,6 +4890,7 @@
},
{
"BriefDescription": "TOR Inserts; RFO from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_RFO",
"PerPkg": "1",
@@ -4037,6 +4900,7 @@
},
{
"BriefDescription": "TOR Inserts; RFO pref from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_RFO_PREF",
"PerPkg": "1",
@@ -4046,6 +4910,7 @@
},
{
"BriefDescription": "TOR Inserts;SpecItoM from Local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_SPECITOM",
"PerPkg": "1",
@@ -4055,8 +4920,10 @@
},
{
"BriefDescription": "TOR Inserts : WBEFtoEs issued by an IA Core. Non Modified Write Backs",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WBEFTOE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WbEFtoEs issued by iA Cores . (Non Modified Write Backs) :Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc3fff01",
@@ -4064,8 +4931,10 @@
},
{
"BriefDescription": "TOR Inserts : WBEFtoEs issued by an IA Core. Non Modified Write Backs",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WBEFTOI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WbEFtoEs issued by iA Cores . (Non Modified Write Backs) :Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc37ff01",
@@ -4073,8 +4942,10 @@
},
{
"BriefDescription": "TOR Inserts : WBEFtoEs issued by an IA Core. Non Modified Write Backs",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WBMTOE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WbEFtoEs issued by iA Cores . (Non Modified Write Backs) :Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc2fff01",
@@ -4082,8 +4953,10 @@
},
{
"BriefDescription": "TOR Inserts : WbMtoIs issued by an iA Cores. Modified Write Backs",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WBMTOI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WbMtoIs issued by iA Cores . (Modified Write Backs) :Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc27ff01",
@@ -4091,8 +4964,10 @@
},
{
"BriefDescription": "TOR Inserts : WBEFtoEs issued by an IA Core. Non Modified Write Backs",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WBSTOI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WbEFtoEs issued by iA Cores . (Non Modified Write Backs) :Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc67ff01",
@@ -4100,8 +4975,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLs issued by iA Cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WCIL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86fff01",
@@ -4109,8 +4986,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLF issued by iA Cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WCILF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc867ff01",
@@ -4118,6 +4997,7 @@
},
{
"BriefDescription": "TOR Inserts; All from local IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO",
"PerPkg": "1",
@@ -4127,6 +5007,7 @@
},
{
"BriefDescription": "TOR Inserts : CLFlushes issued by IO Devices",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_CLFLUSH",
"PerPkg": "1",
@@ -4136,6 +5017,7 @@
},
{
"BriefDescription": "TOR Inserts; Hits from local IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_HIT",
"PerPkg": "1",
@@ -4145,6 +5027,7 @@
},
{
"BriefDescription": "TOR Inserts; ItoM hits from local IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_ITOM",
"PerPkg": "1",
@@ -4154,6 +5037,7 @@
},
{
"BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_ITOMCACHENEAR",
"PerPkg": "1",
@@ -4163,6 +5047,7 @@
},
{
"BriefDescription": "TOR Inserts; RdCur and FsRdCur hits from local IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_PCIRDCUR",
"PerPkg": "1",
@@ -4172,6 +5057,7 @@
},
{
"BriefDescription": "TOR Inserts; RFO hits from local IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_RFO",
"PerPkg": "1",
@@ -4181,6 +5067,7 @@
},
{
"BriefDescription": "TOR Inserts for ItoM from local IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM",
"PerPkg": "1",
@@ -4190,6 +5077,7 @@
},
{
"BriefDescription": "TOR Inserts for ItoMCacheNears from IO devices.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR",
"PerPkg": "1",
@@ -4199,6 +5087,7 @@
},
{
"BriefDescription": "ItoMCacheNear (partial write) transactions from an IO device that addresses memory on the local socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR_LOCAL",
"PerPkg": "1",
@@ -4208,6 +5097,7 @@
},
{
"BriefDescription": "ItoMCacheNear (partial write) transactions from an IO device that addresses memory on a remote socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR_REMOTE",
"PerPkg": "1",
@@ -4217,6 +5107,7 @@
},
{
"BriefDescription": "ItoM (write) transactions from an IO device that addresses memory on the local socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM_LOCAL",
"PerPkg": "1",
@@ -4226,6 +5117,7 @@
},
{
"BriefDescription": "ItoM (write) transactions from an IO device that addresses memory on a remote socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM_REMOTE",
"PerPkg": "1",
@@ -4235,6 +5127,7 @@
},
{
"BriefDescription": "TOR Inserts; Misses from local IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS",
"PerPkg": "1",
@@ -4244,6 +5137,7 @@
},
{
"BriefDescription": "TOR Inserts : ItoM, indicating a full cacheline write request, from IO Devices that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM",
"PerPkg": "1",
@@ -4253,6 +5147,7 @@
},
{
"BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR",
"PerPkg": "1",
@@ -4262,6 +5157,7 @@
},
{
"BriefDescription": "TOR Inserts; RdCur and FsRdCur requests from local IO that miss LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_PCIRDCUR",
"PerPkg": "1",
@@ -4271,6 +5167,7 @@
},
{
"BriefDescription": "TOR Inserts; RFO misses from local IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_RFO",
"PerPkg": "1",
@@ -4280,6 +5177,7 @@
},
{
"BriefDescription": "TOR Inserts for RdCur from local IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR",
"PerPkg": "1",
@@ -4289,6 +5187,7 @@
},
{
"BriefDescription": "PCIRDCUR (read) transactions from an IO device that addresses memory on a remote socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR_LOCAL",
"PerPkg": "1",
@@ -4298,6 +5197,7 @@
},
{
"BriefDescription": "PCIRDCUR (read) transactions from an IO device that addresses memory on the local socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR_REMOTE",
"PerPkg": "1",
@@ -4307,6 +5207,7 @@
},
{
"BriefDescription": "TOR Inserts; RFO from local IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_RFO",
"PerPkg": "1",
@@ -4316,6 +5217,7 @@
},
{
"BriefDescription": "TOR Inserts : WbMtoIs issued by IO Devices",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_WBMTOI",
"PerPkg": "1",
@@ -4325,8 +5227,10 @@
},
{
"BriefDescription": "TOR Inserts : IPQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IPQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : IPQ : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"UMask": "0x8",
@@ -4334,8 +5238,10 @@
},
{
"BriefDescription": "TOR Inserts : IRQ - iA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IRQ_IA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : IRQ - iA : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. : From an iA Core",
"UMask": "0x1",
@@ -4343,8 +5249,10 @@
},
{
"BriefDescription": "TOR Inserts : IRQ - Non iA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IRQ_NON_IA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : IRQ - Non iA : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"UMask": "0x10",
@@ -4352,24 +5260,30 @@
},
{
"BriefDescription": "TOR Inserts : Just ISOC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.ISOC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : Just ISOC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : Just Local Targets",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.LOCAL_TGT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : Just Local Targets : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : All from Local iA and IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.LOC_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : All from Local iA and IO : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. : All locally initiated requests",
"UMask": "0xc000ff05",
@@ -4377,8 +5291,10 @@
},
{
"BriefDescription": "TOR Inserts : All from Local iA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.LOC_IA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : All from Local iA : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. : All locally initiated requests from iA Cores",
"UMask": "0xc000ff01",
@@ -4386,8 +5302,10 @@
},
{
"BriefDescription": "TOR Inserts : All from Local IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.LOC_IO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : All from Local IO : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. : All locally generated IO traffic",
"UMask": "0xc000ff04",
@@ -4395,80 +5313,100 @@
},
{
"BriefDescription": "TOR Inserts : Match the Opcode in b[29:19] of the extended umask field",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.MATCH_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : Match the Opcode in b[29:19] of the extended umask field : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : Just Misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : Just Misses : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : MMCFG Access",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.MMCFG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : MMCFG Access : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : MMIO Access",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.MMIO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : MMIO Access : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : Just NearMem",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.NEARMEM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : Just NearMem : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : Just NonCoherent",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.NONCOH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : Just NonCoherent : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : Just NotNearMem",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.NOT_NEARMEM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : Just NotNearMem : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : PMM Access",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : PM Access : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : Match the PreMorphed Opcode in b[29:19] of the extended umask field",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.PREMORPH_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : Match the PreMorphed Opcode in b[29:19] of the extended umask field : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : PRQ - IOSF",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.PRQ_IOSF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : PRQ - IOSF : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. : From a PCIe Device",
"UMask": "0x4",
@@ -4476,8 +5414,10 @@
},
{
"BriefDescription": "TOR Inserts : PRQ - Non IOSF",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.PRQ_NON_IOSF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : PRQ - Non IOSF : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"UMask": "0x20",
@@ -4485,16 +5425,20 @@
},
{
"BriefDescription": "TOR Inserts : Just Remote Targets",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.REMOTE_TGT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : Just Remote Targets : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : All from Remote",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.REM_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : All from Remote : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. : All remote requests (e.g. snoops, writebacks) that came from remote sockets",
"UMask": "0xc001ffc8",
@@ -4502,8 +5446,10 @@
},
{
"BriefDescription": "TOR Inserts : All Snoops from Remote",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.REM_SNPS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : All Snoops from Remote : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. : All snoops to this LLC that came from remote sockets",
"UMask": "0xc001ff08",
@@ -4511,8 +5457,10 @@
},
{
"BriefDescription": "TOR Inserts : RRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.RRQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : RRQ : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"UMask": "0x40",
@@ -4520,48 +5468,60 @@
},
{
"BriefDescription": "TOR Inserts for INVXTOM opcodes received from a remote socket which miss the L3 and target memory in a CXL type 3 memory expander local to this socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.RRQ_MISS_INVXTOM_CXL_EXP_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20e87e8240",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts for RDCODE opcodes received from a remote socket which miss the L3 and target memory in a CXL type 3 memory expander local to this socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.RRQ_MISS_RDCODE_CXL_EXP_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20e80e8240",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts for RDCUR opcodes received from a remote socket which miss the L3 and target memory in a CXL type 3 memory expander local to this socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.RRQ_MISS_RDCUR_CXL_EXP_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20e8068240",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts for RDDATA opcodes received from a remote socket which miss the L3 and target memory in a CXL type 3 memory expander local to this socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.RRQ_MISS_RDDATA_CXL_EXP_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20e8168240",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts for RDINVOWN_OPT opcodes received from a remote socket which miss the L3 and target memory in a CXL type 3 memory expander local to this socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.RRQ_MISS_RDINVOWN_OPT_CXL_EXP_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20e8268240",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts; All Snoops from Remote",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.SNPS_FROM_REM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. All snoops to this LLC that came from remote sockets.",
"UMask": "0xc001ff08",
@@ -4569,8 +5529,10 @@
},
{
"BriefDescription": "TOR Inserts : WBQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.WBQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : WBQ : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"UMask": "0x80",
@@ -4578,8 +5540,10 @@
},
{
"BriefDescription": "TOR Occupancy : All",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"UMask": "0xc001ffff",
@@ -4587,16 +5551,20 @@
},
{
"BriefDescription": "TOR Occupancy : DDR Access",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DDR Access : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : SF/LLC Evictions",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.EVICT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : SF/LLC Evictions : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T : TOR allocation occurred as a result of SF/LLC evictions (came from the ISMQ)",
"UMask": "0x2",
@@ -4604,14 +5572,17 @@
},
{
"BriefDescription": "TOR Occupancy : Just Hits",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.HIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : Just Hits : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy; All from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA",
"PerPkg": "1",
@@ -4621,6 +5592,7 @@
},
{
"BriefDescription": "TOR Occupancy : CLFlushes issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CLFLUSH",
"PerPkg": "1",
@@ -4630,8 +5602,10 @@
},
{
"BriefDescription": "TOR Occupancy : CLFlushOpts issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CLFLUSHOPT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : CLFlushOpts issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8d7ff01",
@@ -4639,6 +5613,7 @@
},
{
"BriefDescription": "TOR Occupancy; CRd from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CRD",
"PerPkg": "1",
@@ -4648,8 +5623,10 @@
},
{
"BriefDescription": "TOR Occupancy; CRd Pref from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CRD_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Code read prefetch from local IA that misses in the snoop filter",
"UMask": "0xc88fff01",
@@ -4657,6 +5634,7 @@
},
{
"BriefDescription": "TOR Occupancy; DRd from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD",
"PerPkg": "1",
@@ -4666,8 +5644,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRDPTE",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -4677,8 +5657,10 @@
},
{
"BriefDescription": "TOR Occupancy; DRd Opt from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_OPT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Data read opt from local IA that misses in the snoop filter",
"UMask": "0xc827ff01",
@@ -4686,8 +5668,10 @@
},
{
"BriefDescription": "TOR Occupancy; DRd Opt Pref from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_OPT_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Data read opt prefetch from local IA that misses in the snoop filter",
"UMask": "0xc8a7ff01",
@@ -4695,6 +5679,7 @@
},
{
"BriefDescription": "TOR Occupancy; DRd Pref from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_PREF",
"PerPkg": "1",
@@ -4704,6 +5689,7 @@
},
{
"BriefDescription": "TOR Occupancy; Hits from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT",
"PerPkg": "1",
@@ -4713,6 +5699,7 @@
},
{
"BriefDescription": "TOR Occupancy; CRd hits from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD",
"PerPkg": "1",
@@ -4722,6 +5709,7 @@
},
{
"BriefDescription": "TOR Occupancy; CRd Pref hits from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD_PREF",
"PerPkg": "1",
@@ -4731,16 +5719,20 @@
},
{
"BriefDescription": "TOR Occupancy for All requests issued from IA cores to CXL accelerator memory regions that hit the LLC.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10c0018101",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CXL_ACC_LOCAL",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CXL_ACC_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10c0008101",
@@ -4748,6 +5740,7 @@
},
{
"BriefDescription": "TOR Occupancy; DRd hits from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD",
"PerPkg": "1",
@@ -4757,8 +5750,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk that hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRDPTE",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -4768,8 +5763,10 @@
},
{
"BriefDescription": "TOR Occupancy; DRd Opt hits from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_OPT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Data read opt from local IA that hits in the snoop filter",
"UMask": "0xc827fd01",
@@ -4777,8 +5774,10 @@
},
{
"BriefDescription": "TOR Occupancy; DRd Opt Pref hits from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_OPT_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Data read opt prefetch from local IA that hits in the snoop filter",
"UMask": "0xc8a7fd01",
@@ -4786,6 +5785,7 @@
},
{
"BriefDescription": "TOR Occupancy; DRd Pref hits from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_PREF",
"PerPkg": "1",
@@ -4795,8 +5795,10 @@
},
{
"BriefDescription": "TOR Occupancy : ItoMs issued by iA Cores that Hit LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_ITOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : ItoMs issued by iA Cores that Hit LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc47fd01",
@@ -4804,8 +5806,10 @@
},
{
"BriefDescription": "TOR Occupancy; LLCPrefCode hits from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFCODE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Last level cache prefetch code read from local IA that hits in the snoop filter",
"UMask": "0xcccffd01",
@@ -4813,8 +5817,10 @@
},
{
"BriefDescription": "TOR Occupancy; LLCPrefData hits from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFDATA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Last level cache prefetch data read from local IA that hits in the snoop filter",
"UMask": "0xccd7fd01",
@@ -4822,6 +5828,7 @@
},
{
"BriefDescription": "TOR Occupancy; LLCPrefRFO hits from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFRFO",
"PerPkg": "1",
@@ -4831,6 +5838,7 @@
},
{
"BriefDescription": "TOR Occupancy; RFO hits from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO",
"PerPkg": "1",
@@ -4840,6 +5848,7 @@
},
{
"BriefDescription": "TOR Occupancy; RFO Pref hits from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO_PREF",
"PerPkg": "1",
@@ -4849,8 +5858,10 @@
},
{
"BriefDescription": "TOR Occupancy : ItoMs issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_ITOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : ItoMs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc47ff01",
@@ -4858,8 +5869,10 @@
},
{
"BriefDescription": "TOR Occupancy : ItoMCacheNears issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_ITOMCACHENEAR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : ItoMCacheNears issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcd47ff01",
@@ -4867,8 +5880,10 @@
},
{
"BriefDescription": "TOR Occupancy; LLCPrefCode from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFCODE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Last level cache prefetch data read from local IA.",
"UMask": "0xcccfff01",
@@ -4876,6 +5891,7 @@
},
{
"BriefDescription": "TOR Occupancy; LLCPrefData from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFDATA",
"PerPkg": "1",
@@ -4885,6 +5901,7 @@
},
{
"BriefDescription": "TOR Occupancy; LLCPrefRFO from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFRFO",
"PerPkg": "1",
@@ -4894,6 +5911,7 @@
},
{
"BriefDescription": "TOR Occupancy; Misses from Local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS",
"PerPkg": "1",
@@ -4903,6 +5921,7 @@
},
{
"BriefDescription": "TOR Occupancy; CRd misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD",
"PerPkg": "1",
@@ -4912,16 +5931,20 @@
},
{
"BriefDescription": "TOR Occupancy for CRds and equivalent opcodes issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRDMORPH_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10c80b8201",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : CRd issued by iA Cores that Missed the LLC - HOMed locally",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : CRd issued by iA Cores that Missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc80efe01",
@@ -4929,8 +5952,10 @@
},
{
"BriefDescription": "TOR Occupancy; CRd Pref misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Code read prefetch from local IA that misses in the snoop filter",
"UMask": "0xc88ffe01",
@@ -4938,8 +5963,10 @@
},
{
"BriefDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed locally",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc88efe01",
@@ -4947,8 +5974,10 @@
},
{
"BriefDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed remotely",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc88f7e01",
@@ -4956,8 +5985,10 @@
},
{
"BriefDescription": "TOR Occupancy : CRd issued by iA Cores that Missed the LLC - HOMed remotely",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : CRd issued by iA Cores that Missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc80f7e01",
@@ -4965,16 +5996,20 @@
},
{
"BriefDescription": "TOR Occupancy for All requests issued from IA cores to CXL accelerator memory regions that miss the LLC.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10c0018201",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CXL_ACC_LOCAL",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CXL_ACC_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10c0008201",
@@ -4982,6 +6017,7 @@
},
{
"BriefDescription": "TOR Occupancy for DRd misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD",
"PerPkg": "1",
@@ -4991,16 +6027,20 @@
},
{
"BriefDescription": "TOR Occupancy for DRds and equivalent opcodes issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRDMORPH_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10c8138201",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk that missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRDPTE",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -5010,16 +6050,20 @@
},
{
"BriefDescription": "TOR Occupancy for DRds and equivalent opcodes issued from an IA core which miss the L3 and target memory in a CXL type 2 memory expander card.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10c8178201",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_CXL_ACC_LOCAL",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_CXL_ACC_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10c8168201",
@@ -5027,8 +6071,10 @@
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_CXL_EXP_LOCAL",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_CXL_EXP_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x20c8168201",
@@ -5036,6 +6082,7 @@
},
{
"BriefDescription": "TOR Occupancy for DRds issued by iA Cores targeting DDR Mem that Missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_DDR",
"PerPkg": "1",
@@ -5045,6 +6092,7 @@
},
{
"BriefDescription": "TOR Occupancy for DRd misses from local IA targeting local memory",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_LOCAL",
"PerPkg": "1",
@@ -5054,6 +6102,7 @@
},
{
"BriefDescription": "TOR Occupancy : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_LOCAL_DDR",
"PerPkg": "1",
@@ -5063,6 +6112,7 @@
},
{
"BriefDescription": "TOR Occupancy : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_LOCAL_PMM",
"PerPkg": "1",
@@ -5072,8 +6122,10 @@
},
{
"BriefDescription": "TOR Occupancy; DRd Opt misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Data read opt from local IA that misses in the snoop filter",
"UMask": "0xc827fe01",
@@ -5081,8 +6133,10 @@
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT_CXL_ACC_LOCAL",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT_CXL_ACC_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10c8268201",
@@ -5090,8 +6144,10 @@
},
{
"BriefDescription": "TOR Occupancy; DRd Opt Pref misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Data read opt prefetch from local IA that misses in the snoop filter",
"UMask": "0xc8a7fe01",
@@ -5099,8 +6155,10 @@
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT_PREF_CXL_ACC_LOCAL",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT_PREF_CXL_ACC_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10c8a68201",
@@ -5108,6 +6166,7 @@
},
{
"BriefDescription": "TOR Occupancy for DRds issued by iA Cores targeting PMM Mem that Missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PMM",
"PerPkg": "1",
@@ -5117,6 +6176,7 @@
},
{
"BriefDescription": "TOR Occupancy; DRd Pref misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF",
"PerPkg": "1",
@@ -5126,16 +6186,20 @@
},
{
"BriefDescription": "TOR Occupancy for L2 data prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10c8978201",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_CXL_ACC_LOCAL",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_CXL_ACC_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10c8968201",
@@ -5143,8 +6207,10 @@
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_CXL_EXP_LOCAL",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_CXL_EXP_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x20c8968201",
@@ -5152,8 +6218,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8978601",
@@ -5161,8 +6229,10 @@
},
{
"BriefDescription": "TOR Occupancy; DRd Pref misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Data read prefetch from local IA that misses in the snoop filter",
"UMask": "0xc896fe01",
@@ -5170,8 +6240,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_LOCAL_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8968601",
@@ -5179,8 +6251,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_LOCAL_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8968a01",
@@ -5188,8 +6262,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8978a01",
@@ -5197,6 +6273,7 @@
},
{
"BriefDescription": "TOR Occupancy; DRd Pref misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_REMOTE",
"PerPkg": "1",
@@ -5206,8 +6283,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_REMOTE_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8970601",
@@ -5215,8 +6294,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_REMOTE_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8970a01",
@@ -5224,6 +6305,7 @@
},
{
"BriefDescription": "TOR Occupancy for DRd misses from local IA targeting remote memory",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_REMOTE",
"PerPkg": "1",
@@ -5233,6 +6315,7 @@
},
{
"BriefDescription": "TOR Occupancy : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_REMOTE_DDR",
"PerPkg": "1",
@@ -5242,6 +6325,7 @@
},
{
"BriefDescription": "TOR Occupancy : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_REMOTE_PMM",
"PerPkg": "1",
@@ -5251,8 +6335,10 @@
},
{
"BriefDescription": "TOR Occupancy : ItoMs issued by iA Cores that Missed LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_ITOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : ItoMs issued by iA Cores that Missed LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc47fe01",
@@ -5260,8 +6346,10 @@
},
{
"BriefDescription": "TOR Occupancy; LLCPrefCode misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFCODE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Last level cache prefetch code read from local IA that misses in the snoop filter",
"UMask": "0xcccffe01",
@@ -5269,14 +6357,17 @@
},
{
"BriefDescription": "TOR Occupancy for LLC Prefetch Code transactions issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFCODE_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10cccf8201",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy; LLCPrefData misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFDATA",
"PerPkg": "1",
@@ -5286,16 +6377,20 @@
},
{
"BriefDescription": "TOR Occupancy for LLC data prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFDATA_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10ccd78201",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFDATA_CXL_ACC_LOCAL",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFDATA_CXL_ACC_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10ccd68201",
@@ -5303,8 +6398,10 @@
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFDATA_CXL_EXP_LOCAL",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFDATA_CXL_EXP_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x20ccd68201",
@@ -5312,6 +6409,7 @@
},
{
"BriefDescription": "TOR Occupancy; LLCPrefRFO misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFRFO",
"PerPkg": "1",
@@ -5321,16 +6419,20 @@
},
{
"BriefDescription": "TOR Occupancy for L2 RFO prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFRFO_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10c8878201",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFRFO_CXL_ACC_LOCAL",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFRFO_CXL_ACC_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10c8868201",
@@ -5338,8 +6440,10 @@
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFRFO_CXL_EXP_LOCAL",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFRFO_CXL_EXP_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x20c8868201",
@@ -5347,8 +6451,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed locally",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCILF_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8668601",
@@ -5356,8 +6462,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed locally",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCILF_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8668a01",
@@ -5365,8 +6473,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed locally",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCIL_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86e8601",
@@ -5374,8 +6484,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed locally",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCIL_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86e8a01",
@@ -5383,8 +6495,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCILF_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8670601",
@@ -5392,8 +6506,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCILF_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8670a01",
@@ -5401,8 +6517,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCIL_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86f0601",
@@ -5410,8 +6528,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCIL_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86f0a01",
@@ -5419,6 +6539,7 @@
},
{
"BriefDescription": "TOR Occupancy; RFO misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO",
"PerPkg": "1",
@@ -5428,24 +6549,30 @@
},
{
"BriefDescription": "TOR Occupancy for RFO and L2 RFO prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFOMORPH_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10c8038201",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy for RFOs issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10c8078201",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_CXL_ACC_LOCAL",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_CXL_ACC_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10c8068201",
@@ -5453,8 +6580,10 @@
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_CXL_EXP_LOCAL",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_CXL_EXP_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x20c8068201",
@@ -5462,6 +6591,7 @@
},
{
"BriefDescription": "TOR Occupancy; RFO misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_LOCAL",
"PerPkg": "1",
@@ -5471,6 +6601,7 @@
},
{
"BriefDescription": "TOR Occupancy; RFO prefetch misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF",
"PerPkg": "1",
@@ -5480,16 +6611,20 @@
},
{
"BriefDescription": "TOR Occupancy for LLC RFO prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10ccc78201",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_CXL_ACC_LOCAL",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_CXL_ACC_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10ccc68201",
@@ -5497,8 +6632,10 @@
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_CXL_EXP_LOCAL",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_CXL_EXP_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x20ccc68201",
@@ -5506,6 +6643,7 @@
},
{
"BriefDescription": "TOR Occupancy; RFO prefetch misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_LOCAL",
"PerPkg": "1",
@@ -5515,6 +6653,7 @@
},
{
"BriefDescription": "TOR Occupancy; RFO prefetch misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_REMOTE",
"PerPkg": "1",
@@ -5524,6 +6663,7 @@
},
{
"BriefDescription": "TOR Occupancy; RFO misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_REMOTE",
"PerPkg": "1",
@@ -5533,8 +6673,10 @@
},
{
"BriefDescription": "TOR Occupancy : UCRdFs issued by iA Cores that Missed LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_UCRDF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : UCRdFs issued by iA Cores that Missed LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc877de01",
@@ -5542,8 +6684,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores that Missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86ffe01",
@@ -5551,8 +6695,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLF issued by iA Cores that Missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLF issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc867fe01",
@@ -5560,8 +6706,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8678601",
@@ -5569,8 +6717,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8678a01",
@@ -5578,8 +6728,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86f8601",
@@ -5587,8 +6739,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86f8a01",
@@ -5596,8 +6750,10 @@
},
{
"BriefDescription": "TOR Occupancy : WiLs issued by iA Cores that Missed LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WIL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WiLs issued by iA Cores that Missed LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc87fde01",
@@ -5605,6 +6761,7 @@
},
{
"BriefDescription": "TOR Occupancy; RFO from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_RFO",
"PerPkg": "1",
@@ -5614,6 +6771,7 @@
},
{
"BriefDescription": "TOR Occupancy; RFO prefetch from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_RFO_PREF",
"PerPkg": "1",
@@ -5623,6 +6781,7 @@
},
{
"BriefDescription": "TOR Occupancy : SpecItoMs issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_SPECITOM",
"PerPkg": "1",
@@ -5632,8 +6791,10 @@
},
{
"BriefDescription": "TOR Occupancy : WbMtoIs issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WBMTOI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WbMtoIs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc27ff01",
@@ -5641,8 +6802,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WCIL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86fff01",
@@ -5650,8 +6813,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLF issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WCILF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLF issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc867ff01",
@@ -5659,6 +6824,7 @@
},
{
"BriefDescription": "TOR Occupancy; All from local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO",
"PerPkg": "1",
@@ -5668,8 +6834,10 @@
},
{
"BriefDescription": "TOR Occupancy : CLFlushes issued by IO Devices",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_CLFLUSH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : CLFlushes issued by IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8c3ff04",
@@ -5677,6 +6845,7 @@
},
{
"BriefDescription": "TOR Occupancy; Hits from local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT",
"PerPkg": "1",
@@ -5686,6 +6855,7 @@
},
{
"BriefDescription": "TOR Occupancy; ITOM hits from local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_ITOM",
"PerPkg": "1",
@@ -5695,6 +6865,7 @@
},
{
"BriefDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_ITOMCACHENEAR",
"PerPkg": "1",
@@ -5704,6 +6875,7 @@
},
{
"BriefDescription": "TOR Occupancy; RdCur and FsRdCur hits from local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_PCIRDCUR",
"PerPkg": "1",
@@ -5713,8 +6885,10 @@
},
{
"BriefDescription": "TOR Occupancy; RFO hits from local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : RFOs issued by IO Devices that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc803fd04",
@@ -5722,6 +6896,7 @@
},
{
"BriefDescription": "TOR Occupancy; ITOM from local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_ITOM",
"PerPkg": "1",
@@ -5731,8 +6906,10 @@
},
{
"BriefDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_ITOMCACHENEAR",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -5742,6 +6919,7 @@
},
{
"BriefDescription": "TOR Occupancy; Misses from local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS",
"PerPkg": "1",
@@ -5751,6 +6929,7 @@
},
{
"BriefDescription": "TOR Occupancy; ITOM misses from local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM",
"PerPkg": "1",
@@ -5760,6 +6939,7 @@
},
{
"BriefDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOMCACHENEAR",
"PerPkg": "1",
@@ -5769,8 +6949,10 @@
},
{
"BriefDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC and targets local memory",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOMCACHENEAR_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcd42fe04",
@@ -5778,8 +6960,10 @@
},
{
"BriefDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC and targets remote memory",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOMCACHENEAR_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcd437e04",
@@ -5787,8 +6971,10 @@
},
{
"BriefDescription": "TOR Occupancy; ITOM misses from local IO and targets local memory",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc42fe04",
@@ -5796,8 +6982,10 @@
},
{
"BriefDescription": "TOR Occupancy; ITOM misses from local IO and targets remote memory",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc437e04",
@@ -5805,6 +6993,7 @@
},
{
"BriefDescription": "TOR Occupancy; RdCur and FsRdCur misses from local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_PCIRDCUR",
"PerPkg": "1",
@@ -5814,8 +7003,10 @@
},
{
"BriefDescription": "TOR Occupancy; RdCur and FsRdCur misses from local IO and targets local memory",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_PCIRDCUR_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8f2fe04",
@@ -5823,8 +7014,10 @@
},
{
"BriefDescription": "TOR Occupancy; RdCur and FsRdCur misses from local IO and targets remote memory",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_PCIRDCUR_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8f37e04",
@@ -5832,8 +7025,10 @@
},
{
"BriefDescription": "TOR Occupancy; RFO misses from local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : RFOs issued by IO Devices that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc803fe04",
@@ -5841,6 +7036,7 @@
},
{
"BriefDescription": "TOR Occupancy; RdCur and FsRdCur from local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_PCIRDCUR",
"PerPkg": "1",
@@ -5850,8 +7046,10 @@
},
{
"BriefDescription": "TOR Occupancy; ItoM from local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : RFOs issued by IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc803ff04",
@@ -5859,8 +7057,10 @@
},
{
"BriefDescription": "TOR Occupancy : WbMtoIs issued by IO Devices",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_WBMTOI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WbMtoIs issued by IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc23ff04",
@@ -5868,8 +7068,10 @@
},
{
"BriefDescription": "TOR Occupancy : IPQ",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IPQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : IPQ : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"UMask": "0x8",
@@ -5877,8 +7079,10 @@
},
{
"BriefDescription": "TOR Occupancy : IRQ - iA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IRQ_IA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : IRQ - iA : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T : From an iA Core",
"UMask": "0x1",
@@ -5886,8 +7090,10 @@
},
{
"BriefDescription": "TOR Occupancy : IRQ - Non iA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IRQ_NON_IA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : IRQ - Non iA : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"UMask": "0x10",
@@ -5895,24 +7101,30 @@
},
{
"BriefDescription": "TOR Occupancy : Just ISOC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.ISOC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : Just ISOC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : Just Local Targets",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.LOCAL_TGT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : Just Local Targets : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : All from Local iA and IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All from Local iA and IO : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T : All locally initiated requests",
"UMask": "0xc000ff05",
@@ -5920,8 +7132,10 @@
},
{
"BriefDescription": "TOR Occupancy : All from Local iA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_IA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All from Local iA : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T : All locally initiated requests from iA Cores",
"UMask": "0xc000ff01",
@@ -5929,8 +7143,10 @@
},
{
"BriefDescription": "TOR Occupancy : All from Local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_IO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All from Local IO : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T : All locally generated IO traffic",
"UMask": "0xc000ff04",
@@ -5938,80 +7154,100 @@
},
{
"BriefDescription": "TOR Occupancy : Match the Opcode in b[29:19] of the extended umask field",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.MATCH_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : Match the Opcode in b[29:19] of the extended umask field : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : Just Misses",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : Just Misses : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : MMCFG Access",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.MMCFG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : MMCFG Access : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : MMIO Access",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.MMIO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : MMIO Access : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : Just NearMem",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.NEARMEM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : Just NearMem : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : Just NonCoherent",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.NONCOH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : Just NonCoherent : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : Just NotNearMem",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.NOT_NEARMEM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : Just NotNearMem : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : PMM Access",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : PMM Access : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : Match the PreMorphed Opcode in b[29:19] of the extended umask field",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.PREMORPH_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : Match the PreMorphed Opcode in b[29:19] of the extended umask field : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : PRQ - IOSF",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.PRQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : PRQ - IOSF : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T : From a PCIe Device",
"UMask": "0x4",
@@ -6019,8 +7255,10 @@
},
{
"BriefDescription": "TOR Occupancy : PRQ - Non IOSF",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.PRQ_NON_IOSF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : PRQ - Non IOSF : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"UMask": "0x20",
@@ -6028,16 +7266,20 @@
},
{
"BriefDescription": "TOR Occupancy : Just Remote Targets",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.REMOTE_TGT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : Just Remote Targets : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : All from Remote",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.REM_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All from Remote : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T : All remote requests (e.g. snoops, writebacks) that came from remote sockets",
"UMask": "0xc001ffc8",
@@ -6045,8 +7287,10 @@
},
{
"BriefDescription": "TOR Occupancy : All Snoops from Remote",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.REM_SNPS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All Snoops from Remote : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T : All snoops to this LLC that came from remote sockets",
"UMask": "0xc001ff08",
@@ -6054,8 +7298,10 @@
},
{
"BriefDescription": "TOR Occupancy : RRQ",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.RRQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : RRQ : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"UMask": "0x40",
@@ -6063,48 +7309,60 @@
},
{
"BriefDescription": "TOR Occupancy for INVXTOM opcodes received from a remote socket which miss the L3 and target memory in a CXL type 3 memory expander local to this socket.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.RRQ_MISS_INVXTOM_CXL_EXP_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20e87e8240",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy for RDCODE opcodes received from a remote socket which miss the L3 and target memory in a CXL type 3 memory expander local to this socket.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.RRQ_MISS_RDCODE_CXL_EXP_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20e80e8240",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy for RDCUR opcodes received from a remote socket which miss the L3 and target memory in a CXL type 3 memory expander local to this socket.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.RRQ_MISS_RDCUR_CXL_EXP_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20e8068240",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy for RDDATA opcodes received from a remote socket which miss the L3 and target memory in a CXL type 3 memory expander local to this socket.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.RRQ_MISS_RDDATA_CXL_EXP_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20e8168240",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy for RDINVOWN_OPT opcodes received from a remote socket which miss the L3 and target memory in a CXL type 3 memory expander local to this socket.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.RRQ_MISS_RDINVOWN_OPT_CXL_EXP_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20e8268240",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy; All Snoops from Remote",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.SNPS_FROM_REM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. All snoops to this LLC that came from remote sockets.",
"UMask": "0xc001ff08",
@@ -6112,8 +7370,10 @@
},
{
"BriefDescription": "TOR Occupancy : WBQ",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.WBQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WBQ : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"UMask": "0x80",
@@ -6121,8 +7381,10 @@
},
{
"BriefDescription": "WbPushMtoI : Pushed to LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_CHA_WB_PUSH_MTOI.LLC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WbPushMtoI : Pushed to LLC : Counts the number of times when the CHA was received WbPushMtoI : Counts the number of times when the CHA was able to push WbPushMToI to LLC",
"UMask": "0x1",
@@ -6130,8 +7392,10 @@
},
{
"BriefDescription": "WbPushMtoI : Pushed to Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_CHA_WB_PUSH_MTOI.MEM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WbPushMtoI : Pushed to Memory : Counts the number of times when the CHA was received WbPushMtoI : Counts the number of times when the CHA was unable to push WbPushMToI to LLC (hence pushed it to MEM)",
"UMask": "0x2",
@@ -6139,8 +7403,10 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC0",
+ "Counter": "0,1,2,3",
"EventCode": "0x5a",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC0 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 0 only.",
"UMask": "0x1",
@@ -6148,8 +7414,10 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC1",
+ "Counter": "0,1,2,3",
"EventCode": "0x5a",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC1 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 1 only.",
"UMask": "0x2",
@@ -6157,8 +7425,10 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC2",
+ "Counter": "0,1,2,3",
"EventCode": "0x5a",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC2 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 2 only.",
"UMask": "0x4",
@@ -6166,8 +7436,10 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC3",
+ "Counter": "0,1,2,3",
"EventCode": "0x5a",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC3 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 3 only.",
"UMask": "0x8",
@@ -6175,8 +7447,10 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC4",
+ "Counter": "0,1,2,3",
"EventCode": "0x5a",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC4 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 4 only.",
"UMask": "0x10",
@@ -6184,8 +7458,10 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC5",
+ "Counter": "0,1,2,3",
"EventCode": "0x5a",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC5 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 5 only.",
"UMask": "0x20",
@@ -6193,8 +7469,10 @@
},
{
"BriefDescription": "XPT Prefetches : Dropped (on 0?) - Conflict",
+ "Counter": "0,1,2,3",
"EventCode": "0x6f",
"EventName": "UNC_CHA_XPT_PREF.DROP0_CONFLICT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "XPT Prefetches : Dropped (on 0?) - Conflict : Number of XPT prefetches dropped due to AD CMS write port contention",
"UMask": "0x8",
@@ -6202,8 +7480,10 @@
},
{
"BriefDescription": "XPT Prefetches : Dropped (on 0?) - No Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x6f",
"EventName": "UNC_CHA_XPT_PREF.DROP0_NOCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "XPT Prefetches : Dropped (on 0?) - No Credits : Number of XPT prefetches dropped due to lack of XPT AD egress credits",
"UMask": "0x4",
@@ -6211,8 +7491,10 @@
},
{
"BriefDescription": "XPT Prefetches : Dropped (on 1?) - Conflict",
+ "Counter": "0,1,2,3",
"EventCode": "0x6f",
"EventName": "UNC_CHA_XPT_PREF.DROP1_CONFLICT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "XPT Prefetches : Dropped (on 1?) - Conflict : Number of XPT prefetches dropped due to AD CMS write port contention",
"UMask": "0x80",
@@ -6220,8 +7502,10 @@
},
{
"BriefDescription": "XPT Prefetches : Dropped (on 1?) - No Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x6f",
"EventName": "UNC_CHA_XPT_PREF.DROP1_NOCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "XPT Prefetches : Dropped (on 1?) - No Credits : Number of XPT prefetches dropped due to lack of XPT AD egress credits",
"UMask": "0x40",
@@ -6229,8 +7513,10 @@
},
{
"BriefDescription": "XPT Prefetches : Sent (on 0?)",
+ "Counter": "0,1,2,3",
"EventCode": "0x6f",
"EventName": "UNC_CHA_XPT_PREF.SENT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "XPT Prefetches : Sent (on 0?) : Number of XPT prefetches sent",
"UMask": "0x1",
@@ -6238,8 +7524,10 @@
},
{
"BriefDescription": "XPT Prefetches : Sent (on 1?)",
+ "Counter": "0,1,2,3",
"EventCode": "0x6f",
"EventName": "UNC_CHA_XPT_PREF.SENT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "XPT Prefetches : Sent (on 1?) : Number of XPT prefetches sent",
"UMask": "0x10",
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-cxl.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-cxl.json
index f3e84fd88de3..ff81f3a6426a 100644
--- a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-cxl.json
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-cxl.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of lfclk ticks",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x01",
"EventName": "UNC_CXLCM_CLOCKTICKS",
"PerPkg": "1",
@@ -9,390 +10,487 @@
},
{
"BriefDescription": "Number of Allocation to Mem Rxx AGF 0",
+ "Counter": "4,5,6,7",
"EventCode": "0x43",
"EventName": "UNC_CXLCM_RxC_AGF_INSERTS.CACHE_DATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of Allocation to Cache Req AGF0",
+ "Counter": "4,5,6,7",
"EventCode": "0x43",
"EventName": "UNC_CXLCM_RxC_AGF_INSERTS.CACHE_REQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of Allocation to Cache Rsp AGF",
+ "Counter": "4,5,6,7",
"EventCode": "0x43",
"EventName": "UNC_CXLCM_RxC_AGF_INSERTS.CACHE_REQ1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of Allocation to Cache Data AGF",
+ "Counter": "4,5,6,7",
"EventCode": "0x43",
"EventName": "UNC_CXLCM_RxC_AGF_INSERTS.CACHE_RSP0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of Allocation to Cache Rsp AGF",
+ "Counter": "4,5,6,7",
"EventCode": "0x43",
"EventName": "UNC_CXLCM_RxC_AGF_INSERTS.CACHE_RSP1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of Allocation to Cache Req AGF 1",
+ "Counter": "4,5,6,7",
"EventCode": "0x43",
"EventName": "UNC_CXLCM_RxC_AGF_INSERTS.MEM_DATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of Allocation to Mem Data AGF",
+ "Counter": "4,5,6,7",
"EventCode": "0x43",
"EventName": "UNC_CXLCM_RxC_AGF_INSERTS.MEM_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CXLCM"
},
{
"BriefDescription": "Count the number of Flits with AK set",
+ "Counter": "4,5,6,7",
"EventCode": "0x4b",
"EventName": "UNC_CXLCM_RxC_FLITS.AK_HDR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CXLCM"
},
{
"BriefDescription": "Count the number of Flits with BE set",
+ "Counter": "4,5,6,7",
"EventCode": "0x4b",
"EventName": "UNC_CXLCM_RxC_FLITS.BE_HDR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CXLCM"
},
{
"BriefDescription": "Count the number of control flits received",
+ "Counter": "4,5,6,7",
"EventCode": "0x4b",
"EventName": "UNC_CXLCM_RxC_FLITS.CTRL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CXLCM"
},
{
"BriefDescription": "Count the number of Headerless flits received",
+ "Counter": "4,5,6,7",
"EventCode": "0x4b",
"EventName": "UNC_CXLCM_RxC_FLITS.NO_HDR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CXLCM"
},
{
"BriefDescription": "Count the number of protocol flits received",
+ "Counter": "4,5,6,7",
"EventCode": "0x4b",
"EventName": "UNC_CXLCM_RxC_FLITS.PROT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CXLCM"
},
{
"BriefDescription": "Count the number of Flits with SZ set",
+ "Counter": "4,5,6,7",
"EventCode": "0x4b",
"EventName": "UNC_CXLCM_RxC_FLITS.SZ_HDR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CXLCM"
},
{
"BriefDescription": "Count the number of flits received",
+ "Counter": "4,5,6,7",
"EventCode": "0x4b",
"EventName": "UNC_CXLCM_RxC_FLITS.VALID",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CXLCM"
},
{
"BriefDescription": "Count the number of valid messages in the flit",
+ "Counter": "4,5,6,7",
"EventCode": "0x4b",
"EventName": "UNC_CXLCM_RxC_FLITS.VALID_MSG",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CXLCM"
},
{
"BriefDescription": "Count the number of CRC errors detected",
+ "Counter": "4,5,6,7",
"EventCode": "0x40",
"EventName": "UNC_CXLCM_RxC_MISC.CRC_ERRORS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CXLCM"
},
{
"BriefDescription": "Count the number of Init flits sent",
+ "Counter": "4,5,6,7",
"EventCode": "0x40",
"EventName": "UNC_CXLCM_RxC_MISC.INIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CXLCM"
},
{
"BriefDescription": "Count the number of LLCRD flits sent",
+ "Counter": "4,5,6,7",
"EventCode": "0x40",
"EventName": "UNC_CXLCM_RxC_MISC.LLCRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CXLCM"
},
{
"BriefDescription": "Count the number of Retry flits sent",
+ "Counter": "4,5,6,7",
"EventCode": "0x40",
"EventName": "UNC_CXLCM_RxC_MISC.RETRY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of cycles the Packing Buffer is Full",
+ "Counter": "4,5,6,7",
"EventCode": "0x52",
"EventName": "UNC_CXLCM_RxC_PACK_BUF_FULL.CACHE_DATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of cycles the Packing Buffer is Full",
+ "Counter": "4,5,6,7",
"EventCode": "0x52",
"EventName": "UNC_CXLCM_RxC_PACK_BUF_FULL.CACHE_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of cycles the Packing Buffer is Full",
+ "Counter": "4,5,6,7",
"EventCode": "0x52",
"EventName": "UNC_CXLCM_RxC_PACK_BUF_FULL.CACHE_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of cycles the Packing Buffer is Full",
+ "Counter": "4,5,6,7",
"EventCode": "0x52",
"EventName": "UNC_CXLCM_RxC_PACK_BUF_FULL.MEM_DATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of cycles the Packing Buffer is Full",
+ "Counter": "4,5,6,7",
"EventCode": "0x52",
"EventName": "UNC_CXLCM_RxC_PACK_BUF_FULL.MEM_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of Allocation to Cache Data Packing buffer",
+ "Counter": "4,5,6,7",
"EventCode": "0x41",
"EventName": "UNC_CXLCM_RxC_PACK_BUF_INSERTS.CACHE_DATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of Allocation to Cache Req Packing buffer",
+ "Counter": "4,5,6,7",
"EventCode": "0x41",
"EventName": "UNC_CXLCM_RxC_PACK_BUF_INSERTS.CACHE_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of Allocation to Cache Rsp Packing buffer",
+ "Counter": "4,5,6,7",
"EventCode": "0x41",
"EventName": "UNC_CXLCM_RxC_PACK_BUF_INSERTS.CACHE_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of Allocation to Mem Data Packing buffer",
+ "Counter": "4,5,6,7",
"EventCode": "0x41",
"EventName": "UNC_CXLCM_RxC_PACK_BUF_INSERTS.MEM_DATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of Allocation to Mem Rxx Packing buffer",
+ "Counter": "4,5,6,7",
"EventCode": "0x41",
"EventName": "UNC_CXLCM_RxC_PACK_BUF_INSERTS.MEM_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of cycles of Not Empty for Cache Data Packing buffer",
+ "Counter": "4,5,6,7",
"EventCode": "0x42",
"EventName": "UNC_CXLCM_RxC_PACK_BUF_NE.CACHE_DATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of cycles of Not Empty for Cache Req Packing buffer",
+ "Counter": "4,5,6,7",
"EventCode": "0x42",
"EventName": "UNC_CXLCM_RxC_PACK_BUF_NE.CACHE_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of cycles of Not Empty for Cache Rsp Packing buffer",
+ "Counter": "4,5,6,7",
"EventCode": "0x42",
"EventName": "UNC_CXLCM_RxC_PACK_BUF_NE.CACHE_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of cycles of Not Empty for Mem Data Packing buffer",
+ "Counter": "4,5,6,7",
"EventCode": "0x42",
"EventName": "UNC_CXLCM_RxC_PACK_BUF_NE.MEM_DATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of cycles of Not Empty for Mem Rxx Packing buffer",
+ "Counter": "4,5,6,7",
"EventCode": "0x42",
"EventName": "UNC_CXLCM_RxC_PACK_BUF_NE.MEM_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CXLCM"
},
{
"BriefDescription": "Count the number of Flits with AK set",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_CXLCM_TxC_FLITS.AK_HDR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CXLCM"
},
{
"BriefDescription": "Count the number of Flits with BE set",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_CXLCM_TxC_FLITS.BE_HDR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CXLCM"
},
{
"BriefDescription": "Count the number of control flits packed",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_CXLCM_TxC_FLITS.CTRL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CXLCM"
},
{
"BriefDescription": "Count the number of Headerless flits packed",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_CXLCM_TxC_FLITS.NO_HDR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CXLCM"
},
{
"BriefDescription": "Count the number of protocol flits packed",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_CXLCM_TxC_FLITS.PROT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CXLCM"
},
{
"BriefDescription": "Count the number of Flits with SZ set",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_CXLCM_TxC_FLITS.SZ_HDR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CXLCM"
},
{
"BriefDescription": "Count the number of flits packed",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_CXLCM_TxC_FLITS.VALID",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of Allocation to Cache Data Packing buffer",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_CXLCM_TxC_PACK_BUF_INSERTS.CACHE_DATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of Allocation to Cache Req Packing buffer",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_CXLCM_TxC_PACK_BUF_INSERTS.CACHE_REQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of Allocation to Cache Rsp1 Packing buffer",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_CXLCM_TxC_PACK_BUF_INSERTS.CACHE_REQ1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of Allocation to Cache Rsp0 Packing buffer",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_CXLCM_TxC_PACK_BUF_INSERTS.CACHE_RSP0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of Allocation to Cache Req Packing buffer",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_CXLCM_TxC_PACK_BUF_INSERTS.CACHE_RSP1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of Allocation to Mem Data Packing buffer",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_CXLCM_TxC_PACK_BUF_INSERTS.MEM_DATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of Allocation to Mem Rxx Packing buffer",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_CXLCM_TxC_PACK_BUF_INSERTS.MEM_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CXLCM"
},
{
"BriefDescription": "Counts the number of uclk ticks",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_CXLDP_CLOCKTICKS",
"PerPkg": "1",
@@ -401,48 +499,60 @@
},
{
"BriefDescription": "Number of Allocation to M2S Data AGF",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_CXLDP_TxC_AGF_INSERTS.M2S_DATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CXLDP"
},
{
"BriefDescription": "Number of Allocation to M2S Req AGF",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_CXLDP_TxC_AGF_INSERTS.M2S_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CXLDP"
},
{
"BriefDescription": "Number of Allocation to U2C Data AGF",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_CXLDP_TxC_AGF_INSERTS.U2C_DATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CXLDP"
},
{
"BriefDescription": "Number of Allocation to U2C Req AGF",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_CXLDP_TxC_AGF_INSERTS.U2C_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CXLDP"
},
{
"BriefDescription": "Number of Allocation to U2C Rsp AGF 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_CXLDP_TxC_AGF_INSERTS.U2C_RSP0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CXLDP"
},
{
"BriefDescription": "Number of Allocation to U2C Rsp AGF 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_CXLDP_TxC_AGF_INSERTS.U2C_RSP1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CXLDP"
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-interconnect.json
index 22bb490e9666..8b1ae9540066 100644
--- a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-interconnect.json
@@ -1,8 +1,10 @@
[
{
"BriefDescription": "Total IRP occupancy of inbound read and write requests to coherent memory.",
+ "Counter": "0,1",
"EventCode": "0x0f",
"EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.MEM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Total IRP occupancy of inbound read and write requests to coherent memory. This is effectively the sum of read occupancy and write occupancy.",
"UMask": "0x4",
@@ -10,6 +12,7 @@
},
{
"BriefDescription": "IRP Clockticks",
+ "Counter": "0,1",
"EventCode": "0x01",
"EventName": "UNC_I_CLOCKTICKS",
"PerPkg": "1",
@@ -18,6 +21,7 @@
},
{
"BriefDescription": "FAF RF full",
+ "Counter": "0,1",
"EventCode": "0x17",
"EventName": "UNC_I_FAF_FULL",
"PerPkg": "1",
@@ -25,6 +29,7 @@
},
{
"BriefDescription": "FAF - request insert from TC.",
+ "Counter": "0,1",
"EventCode": "0x18",
"EventName": "UNC_I_FAF_INSERTS",
"PerPkg": "1",
@@ -32,6 +37,7 @@
},
{
"BriefDescription": "FAF occupancy",
+ "Counter": "0,1",
"EventCode": "0x19",
"EventName": "UNC_I_FAF_OCCUPANCY",
"PerPkg": "1",
@@ -39,6 +45,7 @@
},
{
"BriefDescription": "FAF allocation -- sent to ADQ",
+ "Counter": "0,1",
"EventCode": "0x16",
"EventName": "UNC_I_FAF_TRANSACTIONS",
"PerPkg": "1",
@@ -46,14 +53,17 @@
},
{
"BriefDescription": ": All Inserts Outbound (BL, AK, Snoops)",
+ "Counter": "0,1",
"EventCode": "0x20",
"EventName": "UNC_I_IRP_ALL.EVICTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "IRP"
},
{
"BriefDescription": ": All Inserts Inbound (p2p + faf + cset)",
+ "Counter": "0,1",
"EventCode": "0x20",
"EventName": "UNC_I_IRP_ALL.INBOUND_INSERTS",
"PerPkg": "1",
@@ -62,78 +72,97 @@
},
{
"BriefDescription": ": All Inserts Outbound (BL, AK, Snoops)",
+ "Counter": "0,1",
"EventCode": "0x20",
"EventName": "UNC_I_IRP_ALL.OUTBOUND_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "IRP"
},
{
"BriefDescription": "Counts Timeouts - Set 0 : Cache Inserts of Atomic Transactions as Secondary",
+ "Counter": "0,1",
"EventCode": "0x1e",
"EventName": "UNC_I_MISC0.2ND_ATOMIC_INSERT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "IRP"
},
{
"BriefDescription": "Counts Timeouts - Set 0 : Cache Inserts of Read Transactions as Secondary",
+ "Counter": "0,1",
"EventCode": "0x1e",
"EventName": "UNC_I_MISC0.2ND_RD_INSERT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "IRP"
},
{
"BriefDescription": "Counts Timeouts - Set 0 : Cache Inserts of Write Transactions as Secondary",
+ "Counter": "0,1",
"EventCode": "0x1e",
"EventName": "UNC_I_MISC0.2ND_WR_INSERT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "IRP"
},
{
"BriefDescription": "Counts Timeouts - Set 0 : Fastpath Rejects",
+ "Counter": "0,1",
"EventCode": "0x1e",
"EventName": "UNC_I_MISC0.FAST_REJ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "IRP"
},
{
"BriefDescription": "Counts Timeouts - Set 0 : Fastpath Requests",
+ "Counter": "0,1",
"EventCode": "0x1e",
"EventName": "UNC_I_MISC0.FAST_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "IRP"
},
{
"BriefDescription": "Counts Timeouts - Set 0 : Fastpath Transfers From Primary to Secondary",
+ "Counter": "0,1",
"EventCode": "0x1e",
"EventName": "UNC_I_MISC0.FAST_XFER",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "IRP"
},
{
"BriefDescription": "Counts Timeouts - Set 0 : Prefetch Ack Hints From Primary to Secondary",
+ "Counter": "0,1",
"EventCode": "0x1e",
"EventName": "UNC_I_MISC0.PF_ACK_HINT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "IRP"
},
{
"BriefDescription": "Counts Timeouts - Set 0 : Slow path fwpf didn't find prefetch",
+ "Counter": "0,1",
"EventCode": "0x1e",
"EventName": "UNC_I_MISC0.SLOWPATH_FWPF_NO_PRF",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "IRP"
},
{
"BriefDescription": "Misc Events - Set 1 : Lost Forward",
+ "Counter": "0,1",
"EventCode": "0x1f",
"EventName": "UNC_I_MISC1.LOST_FWD",
"PerPkg": "1",
@@ -143,8 +172,10 @@
},
{
"BriefDescription": "Misc Events - Set 1 : Received Invalid",
+ "Counter": "0,1",
"EventCode": "0x1f",
"EventName": "UNC_I_MISC1.SEC_RCVD_INVLD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Misc Events - Set 1 : Received Invalid : Secondary received a transfer that did not have sufficient MESI state",
"UMask": "0x20",
@@ -152,8 +183,10 @@
},
{
"BriefDescription": "Misc Events - Set 1 : Received Valid",
+ "Counter": "0,1",
"EventCode": "0x1f",
"EventName": "UNC_I_MISC1.SEC_RCVD_VLD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Misc Events - Set 1 : Received Valid : Secondary received a transfer that did have sufficient MESI state",
"UMask": "0x40",
@@ -161,8 +194,10 @@
},
{
"BriefDescription": "Misc Events - Set 1 : Slow Transfer of E Line",
+ "Counter": "0,1",
"EventCode": "0x1f",
"EventName": "UNC_I_MISC1.SLOW_E",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Misc Events - Set 1 : Slow Transfer of E Line : Secondary received a transfer that did have sufficient MESI state",
"UMask": "0x4",
@@ -170,8 +205,10 @@
},
{
"BriefDescription": "Misc Events - Set 1 : Slow Transfer of I Line",
+ "Counter": "0,1",
"EventCode": "0x1f",
"EventName": "UNC_I_MISC1.SLOW_I",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Misc Events - Set 1 : Slow Transfer of I Line : Snoop took cacheline ownership before write from data was committed.",
"UMask": "0x1",
@@ -179,8 +216,10 @@
},
{
"BriefDescription": "Misc Events - Set 1 : Slow Transfer of M Line",
+ "Counter": "0,1",
"EventCode": "0x1f",
"EventName": "UNC_I_MISC1.SLOW_M",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Misc Events - Set 1 : Slow Transfer of M Line : Snoop took cacheline ownership before write from data was committed.",
"UMask": "0x8",
@@ -188,8 +227,10 @@
},
{
"BriefDescription": "Misc Events - Set 1 : Slow Transfer of S Line",
+ "Counter": "0,1",
"EventCode": "0x1f",
"EventName": "UNC_I_MISC1.SLOW_S",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Misc Events - Set 1 : Slow Transfer of S Line : Secondary received a transfer that did not have sufficient MESI state",
"UMask": "0x2",
@@ -197,8 +238,10 @@
},
{
"BriefDescription": "Responses to snoops of any type that hit M, E, S or I line in the IIO",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.ALL_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit M, E, S or I line in the IIO",
"UMask": "0x7e",
@@ -206,8 +249,10 @@
},
{
"BriefDescription": "Responses to snoops of any type that hit E or S line in the IIO cache",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.ALL_HIT_ES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit E or S line in the IIO cache",
"UMask": "0x74",
@@ -215,8 +260,10 @@
},
{
"BriefDescription": "Responses to snoops of any type that hit I line in the IIO cache",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.ALL_HIT_I",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit I line in the IIO cache",
"UMask": "0x72",
@@ -224,6 +271,7 @@
},
{
"BriefDescription": "Responses to snoops of any type that hit M line in the IIO cache",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.ALL_HIT_M",
"PerPkg": "1",
@@ -233,8 +281,10 @@
},
{
"BriefDescription": "Responses to snoops of any type that miss the IIO cache",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.ALL_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Responses to snoops of any type (code, data, invalidate) that miss the IIO cache",
"UMask": "0x71",
@@ -242,62 +292,77 @@
},
{
"BriefDescription": "Snoop Responses : Hit E or S",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.HIT_ES",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses : Hit I",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.HIT_I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses : Hit M",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.HIT_M",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses : Miss",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses : SnpCode",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.SNPCODE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses : SnpData",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.SNPDATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses : SnpInv",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.SNPINV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "IRP"
},
{
"BriefDescription": "Inbound write (fast path) requests received by the IRP.",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_I_TRANSACTIONS.WR_PREF",
"PerPkg": "1",
@@ -307,132 +372,167 @@
},
{
"BriefDescription": "AK Egress Allocations",
+ "Counter": "0,1",
"EventCode": "0x0b",
"EventName": "UNC_I_TxC_AK_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL DRS Egress Cycles Full",
+ "Counter": "0,1",
"EventCode": "0x05",
"EventName": "UNC_I_TxC_BL_DRS_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL DRS Egress Inserts",
+ "Counter": "0,1",
"EventCode": "0x02",
"EventName": "UNC_I_TxC_BL_DRS_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL DRS Egress Occupancy",
+ "Counter": "0,1",
"EventCode": "0x08",
"EventName": "UNC_I_TxC_BL_DRS_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL NCB Egress Cycles Full",
+ "Counter": "0,1",
"EventCode": "0x06",
"EventName": "UNC_I_TxC_BL_NCB_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL NCB Egress Inserts",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "UNC_I_TxC_BL_NCB_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL NCB Egress Occupancy",
+ "Counter": "0,1",
"EventCode": "0x09",
"EventName": "UNC_I_TxC_BL_NCB_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL NCS Egress Cycles Full",
+ "Counter": "0,1",
"EventCode": "0x07",
"EventName": "UNC_I_TxC_BL_NCS_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL NCS Egress Inserts",
+ "Counter": "0,1",
"EventCode": "0x04",
"EventName": "UNC_I_TxC_BL_NCS_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL NCS Egress Occupancy",
+ "Counter": "0,1",
"EventCode": "0x0a",
"EventName": "UNC_I_TxC_BL_NCS_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "UNC_I_TxR2_AD01_STALL_CREDIT_CYCLES",
+ "Counter": "0,1",
"EventCode": "0x1c",
"EventName": "UNC_I_TxR2_AD01_STALL_CREDIT_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": Counts the number times when it is not possible to issue a request to the M2PCIe because there are no Egress Credits available on AD0, A1 or AD0AD1 both. Stalls on both AD0 and AD1 will count as 2",
"Unit": "IRP"
},
{
"BriefDescription": "No AD0 Egress Credits Stalls",
+ "Counter": "0,1",
"EventCode": "0x1a",
"EventName": "UNC_I_TxR2_AD0_STALL_CREDIT_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No AD0 Egress Credits Stalls : Counts the number times when it is not possible to issue a request to the M2PCIe because there are no AD0 Egress Credits available.",
"Unit": "IRP"
},
{
"BriefDescription": "No AD1 Egress Credits Stalls",
+ "Counter": "0,1",
"EventCode": "0x1b",
"EventName": "UNC_I_TxR2_AD1_STALL_CREDIT_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No AD1 Egress Credits Stalls : Counts the number times when it is not possible to issue a request to the M2PCIe because there are no AD1 Egress Credits available.",
"Unit": "IRP"
},
{
"BriefDescription": "No BL Egress Credit Stalls",
+ "Counter": "0,1",
"EventCode": "0x1d",
"EventName": "UNC_I_TxR2_BL_STALL_CREDIT_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No BL Egress Credit Stalls : Counts the number times when it is not possible to issue data to the R2PCIe because there are no BL Egress Credits available.",
"Unit": "IRP"
},
{
"BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
"EventCode": "0x0d",
"EventName": "UNC_I_TxS_DATA_INSERTS_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Outbound Read Requests : Counts the number of requests issued to the switch (towards the devices).",
"Unit": "IRP"
},
{
"BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
"EventCode": "0x0e",
"EventName": "UNC_I_TxS_DATA_INSERTS_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Outbound Read Requests : Counts the number of requests issued to the switch (towards the devices).",
"Unit": "IRP"
},
{
"BriefDescription": "Outbound Request Queue Occupancy",
+ "Counter": "0,1",
"EventCode": "0x0c",
"EventName": "UNC_I_TxS_REQUEST_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Outbound Request Queue Occupancy : Accumulates the number of outstanding outbound requests from the IRP to the switch (towards the devices). This can be used in conjunction with the allocations event in order to calculate average latency of outbound requests.",
"Unit": "IRP"
},
{
"BriefDescription": "M2M Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_M2M_CLOCKTICKS",
"PerPkg": "1",
@@ -441,6 +541,7 @@
},
{
"BriefDescription": "CMS Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0xc0",
"EventName": "UNC_M2M_CMS_CLOCKTICKS",
"PerPkg": "1",
@@ -448,16 +549,20 @@
},
{
"BriefDescription": "Cycles when direct to core mode (which bypasses the CHA) was disabled",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_DIRSTATE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles when direct to core mode, which bypasses the CHA, was disabled : Non Cisgress",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_DIRSTATE.NON_CISGRESS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles when direct to core mode, which bypasses the CHA, was disabled : Non Cisgress : Counts the number of time non cisgress D2C was not honoured by egress due to directory state constraints",
"UMask": "0x2",
@@ -465,39 +570,49 @@
},
{
"BriefDescription": "Counts the time when FM didn't do d2c for fill reads (cross tile case)",
+ "Counter": "0,1,2,3",
"EventCode": "0x4a",
"EventName": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_NOTFORKED",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Number of reads in which direct to core transaction were overridden",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M2M_DIRECT2CORE_TXN_OVERRIDE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2M"
},
{
"BriefDescription": "Number of reads in which direct to core transaction was overridden : Cisgress",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M2M_DIRECT2CORE_TXN_OVERRIDE.CISGRESS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Number of reads in which direct to core transaction was overridden : 2LM Hit?",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M2M_DIRECT2CORE_TXN_OVERRIDE.PMM_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Number of times a direct to UPI transaction was overridden.",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_M2M_DIRECT2UPITXN_OVERRIDE.PMM_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a direct to UPI transaction was overridden. : Counts the number of times D2K wasn't honored even though the incoming request had d2k set",
"UMask": "0x1",
@@ -505,24 +620,30 @@
},
{
"BriefDescription": "Number of reads in which direct to Intel UPI transactions were overridden",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_CREDITS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles when direct to Intel UPI was disabled",
+ "Counter": "0,1,2,3",
"EventCode": "0x1a",
"EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_DIRSTATE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles when Direct2UPI was Disabled : Cisgress D2U Ignored",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_DIRSTATE.CISGRESS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles when Direct2UPI was Disabled : Cisgress D2U Ignored : Counts cisgress d2K that was not honored due to directory constraints",
"UMask": "0x4",
@@ -530,8 +651,10 @@
},
{
"BriefDescription": "Cycles when Direct2UPI was Disabled : Egress Ignored D2U",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_DIRSTATE.EGRESS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles when Direct2UPI was Disabled : Egress Ignored D2U : Counts the number of time D2K was not honoured by egress due to directory state constraints",
"UMask": "0x1",
@@ -539,8 +662,10 @@
},
{
"BriefDescription": "Cycles when Direct2UPI was Disabled : Non Cisgress D2U Ignored",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_DIRSTATE.NON_CISGRESS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles when Direct2UPI was Disabled : Non Cisgress D2U Ignored : Counts non cisgress d2K that was not honored due to directory constraints",
"UMask": "0x2",
@@ -548,8 +673,10 @@
},
{
"BriefDescription": "Messages sent direct to the Intel UPI",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2M_DIRECT2UPI_TAKEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times egress did D2K (Direct to KTI)",
"UMask": "0x7",
@@ -557,86 +684,107 @@
},
{
"BriefDescription": "Number of reads that a message sent direct2 Intel UPI was overridden",
+ "Counter": "0,1,2,3",
"EventCode": "0x1c",
"EventName": "UNC_M2M_DIRECT2UPI_TXN_OVERRIDE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2M"
},
{
"BriefDescription": "Number of times a direct to UPI transaction was overridden.",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_M2M_DIRECT2UPI_TXN_OVERRIDE.CISGRESS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Hit : On NonDirty Line in A State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1d",
"EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Hit : On NonDirty Line in I State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1d",
"EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Hit : On NonDirty Line in L State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1d",
"EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_P",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Hit : On NonDirty Line in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1d",
"EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Hit : On Dirty Line in A State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1d",
"EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Hit : On Dirty Line in I State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1d",
"EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Hit : On Dirty Line in L State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1d",
"EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_P",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Hit : On Dirty Line in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1d",
"EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Multi-socket cacheline Directory lookups (any state found)",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M2M_DIRECTORY_LOOKUP.ANY",
"PerPkg": "1",
@@ -646,6 +794,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory lookups (cacheline found in A state)",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_A",
"PerPkg": "1",
@@ -655,6 +804,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory lookup (cacheline found in I state)",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_I",
"PerPkg": "1",
@@ -664,6 +814,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory lookup (cacheline found in S state)",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_S",
"PerPkg": "1",
@@ -673,86 +824,107 @@
},
{
"BriefDescription": "Directory Miss : On NonDirty Line in A State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Miss : On NonDirty Line in I State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Miss : On NonDirty Line in L State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_P",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Miss : On NonDirty Line in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Miss : On Dirty Line in A State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Miss : On Dirty Line in I State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Miss : On Dirty Line in L State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_P",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Miss : On Dirty Line in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Multi-socket cacheline Directory update from A to I",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.A2I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x320",
"Unit": "M2M"
},
{
"BriefDescription": "Multi-socket cacheline Directory update from A to S",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.A2S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x340",
"Unit": "M2M"
},
{
"BriefDescription": "Multi-socket cacheline Directory update from/to Any state",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.ANY",
"PerPkg": "1",
@@ -761,8 +933,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.A_TO_I_HIT_NON_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts 1lm or 2lm hit data returns that would result in directory update from A to I to non persistent memory (DRAM or HBM)",
"UMask": "0x120",
@@ -770,8 +944,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.A_TO_I_MISS_NON_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts 2lm miss data returns that would result in directory update from A to I to non persistent memory (DRAM or HBM)",
"UMask": "0x220",
@@ -779,8 +955,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.A_TO_S_HIT_NON_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts 1lm or 2lm hit data returns that would result in directory update from A to S to non persistent memory (DRAM or HBM)",
"UMask": "0x140",
@@ -788,8 +966,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.A_TO_S_MISS_NON_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts 2lm miss data returns that would result in directory update from A to S to non persistent memory (DRAM or HBM)",
"UMask": "0x240",
@@ -797,8 +977,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.HIT_NON_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts any 1lm or 2lm hit data return that would result in directory update to non persistent memory (DRAM or HBM)",
"UMask": "0x101",
@@ -806,24 +988,30 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory update from I to A",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.I2A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x304",
"Unit": "M2M"
},
{
"BriefDescription": "Multi-socket cacheline Directory update from I to S",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.I2S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x302",
"Unit": "M2M"
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.I_TO_A_HIT_NON_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts 1lm or 2lm hit data returns that would result in directory update from I to A to non persistent memory (DRAM or HBM)",
"UMask": "0x104",
@@ -831,8 +1019,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.I_TO_A_MISS_NON_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts 2lm miss data returns that would result in directory update from I to A to non persistent memory (DRAM or HBM)",
"UMask": "0x204",
@@ -840,8 +1030,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.I_TO_S_HIT_NON_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts 1lm or 2lm hit data returns that would result in directory update from I to S to non persistent memory (DRAM or HBM)",
"UMask": "0x102",
@@ -849,8 +1041,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.I_TO_S_MISS_NON_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts 2lm miss data returns that would result in directory update from I to S to non persistent memory (DRAM or HBM)",
"UMask": "0x202",
@@ -858,8 +1052,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.MISS_NON_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts any 2lm miss data return that would result in directory update to non persistent memory (DRAM or HBM)",
"UMask": "0x201",
@@ -867,24 +1063,30 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory update from S to A",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.S2A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x310",
"Unit": "M2M"
},
{
"BriefDescription": "Multi-socket cacheline Directory update from S to I",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.S2I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x308",
"Unit": "M2M"
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.S_TO_A_HIT_NON_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts 1lm or 2lm hit data returns that would result in directory update from S to A to non persistent memory (DRAM or HBM)",
"UMask": "0x110",
@@ -892,8 +1094,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.S_TO_A_MISS_NON_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts 2lm miss data returns that would result in directory update from S to A to non persistent memory (DRAM or HBM)",
"UMask": "0x210",
@@ -901,8 +1105,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.S_TO_I_HIT_NON_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts 1lm or 2lm hit data returns that would result in directory update from S to I to non persistent memory (DRAM or HBM)",
"UMask": "0x108",
@@ -910,8 +1116,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.S_TO_I_MISS_NON_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts 2lm miss data returns that would result in directory update from S to I to non persistent memory (DRAM or HBM)",
"UMask": "0x208",
@@ -919,8 +1127,10 @@
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "Counter": "0,1,2,3",
"EventCode": "0xba",
"EventName": "UNC_M2M_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress Blocking due to Ordering requirements : Down : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x80000004",
@@ -928,8 +1138,10 @@
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "Counter": "0,1,2,3",
"EventCode": "0xba",
"EventName": "UNC_M2M_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress Blocking due to Ordering requirements : Up : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x80000001",
@@ -937,40 +1149,50 @@
},
{
"BriefDescription": "Count when Starve Glocab counter is at 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M2M_IGR_STARVE_WINNER.MASK7",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M2M"
},
{
"BriefDescription": "Reads to iMC issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x304",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_READS.CH0.TO_NM1LM",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.CH0.TO_NM1LM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x108",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_READS.CH0.TO_NMCache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.CH0.TO_NMCache",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x110",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_READS.CH0_ALL",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.CH0_ALL",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -979,24 +1201,30 @@
},
{
"BriefDescription": "UNC_M2M_IMC_READS.CH0_FROM_TGR",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.CH0_FROM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x140",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_READS.CH0_ISOCH",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.CH0_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x102",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_READS.CH0_NORMAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.CH0_NORMAL",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -1005,24 +1233,30 @@
},
{
"BriefDescription": "UNC_M2M_IMC_READS.CH0_TO_DDR_AS_CACHE",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.CH0_TO_DDR_AS_CACHE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x110",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_READS.CH0_TO_DDR_AS_MEM",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.CH0_TO_DDR_AS_MEM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x108",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_READS.CH0_TO_PMM",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.CH0_TO_PMM",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -1031,24 +1265,30 @@
},
{
"BriefDescription": "UNC_M2M_IMC_READS.CH1.TO_NM1LM",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.CH1.TO_NM1LM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x208",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_READS.CH1.TO_NMCache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.CH1.TO_NMCache",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x210",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_READS.CH1_ALL",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.CH1_ALL",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -1057,24 +1297,30 @@
},
{
"BriefDescription": "UNC_M2M_IMC_READS.CH1_FROM_TGR",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.CH1_FROM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x240",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_READS.CH1_ISOCH",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.CH1_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x202",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_READS.CH1_NORMAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.CH1_NORMAL",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -1083,24 +1329,30 @@
},
{
"BriefDescription": "UNC_M2M_IMC_READS.CH1_TO_DDR_AS_CACHE",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.CH1_TO_DDR_AS_CACHE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x210",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_READS.CH1_TO_DDR_AS_MEM",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.CH1_TO_DDR_AS_MEM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x208",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_READS.CH1_TO_PMM",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.CH1_TO_PMM",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -1109,62 +1361,77 @@
},
{
"BriefDescription": "UNC_M2M_IMC_READS.FROM_TGR",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.FROM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x340",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_READS.ISOCH",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x302",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_READS.NORMAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.NORMAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x301",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_READS.TO_DDR_AS_CACHE",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.TO_DDR_AS_CACHE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x310",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_READS.TO_DDR_AS_MEM",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.TO_DDR_AS_MEM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x308",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_READS.TO_NM1LM",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.TO_NM1LM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x308",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_READS.TO_NMCACHE",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.TO_NMCACHE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x310",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_READS.TO_PMM",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.TO_PMM",
"PerPkg": "1",
@@ -1173,23 +1440,29 @@
},
{
"BriefDescription": "All Writes - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1810",
"Unit": "M2M"
},
{
"BriefDescription": "Non-Inclusive - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH0.NI",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_WRITES.CH0_ALL",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH0_ALL",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -1198,15 +1471,19 @@
},
{
"BriefDescription": "From TGR - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH0_FROM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_WRITES.CH0_FULL",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH0_FULL",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -1215,30 +1492,38 @@
},
{
"BriefDescription": "UNC_M2M_IMC_WRITES.CH0_FULL_ISOCH",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH0_FULL_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x804",
"Unit": "M2M"
},
{
"BriefDescription": "Non-Inclusive - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH0_NI",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Non-Inclusive Miss - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH0_NI_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_WRITES.CH0_PARTIAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH0_PARTIAL",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -1247,32 +1532,40 @@
},
{
"BriefDescription": "UNC_M2M_IMC_WRITES.CH0_PARTIAL_ISOCH",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH0_PARTIAL_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x808",
"Unit": "M2M"
},
{
"BriefDescription": "DDR, acting as Cache - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH0_TO_DDR_AS_CACHE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x840",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_WRITES.CH0_TO_DDR_AS_MEM",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH0_TO_DDR_AS_MEM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x820",
"Unit": "M2M"
},
{
"BriefDescription": "PMM - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH0_TO_PMM",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -1282,15 +1575,19 @@
},
{
"BriefDescription": "Non-Inclusive - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH1.NI",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "All Writes - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH1_ALL",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -1299,15 +1596,19 @@
},
{
"BriefDescription": "From TGR - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH1_FROM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Full Line Non-ISOCH - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH1_FULL",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -1316,30 +1617,38 @@
},
{
"BriefDescription": "ISOCH Full Line - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH1_FULL_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1004",
"Unit": "M2M"
},
{
"BriefDescription": "Non-Inclusive - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH1_NI",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Non-Inclusive Miss - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH1_NI_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Partial Non-ISOCH - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH1_PARTIAL",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -1348,32 +1657,40 @@
},
{
"BriefDescription": "ISOCH Partial - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH1_PARTIAL_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1008",
"Unit": "M2M"
},
{
"BriefDescription": "DDR, acting as Cache - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH1_TO_DDR_AS_CACHE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1040",
"Unit": "M2M"
},
{
"BriefDescription": "DDR - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH1_TO_DDR_AS_MEM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1020",
"Unit": "M2M"
},
{
"BriefDescription": "PMM - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH1_TO_PMM",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -1383,75 +1700,94 @@
},
{
"BriefDescription": "From TGR - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.FROM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Full Non-ISOCH - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.FULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1801",
"Unit": "M2M"
},
{
"BriefDescription": "ISOCH Full Line - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.FULL_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1804",
"Unit": "M2M"
},
{
"BriefDescription": "Non-Inclusive - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.NI",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Non-Inclusive Miss - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.NI_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Partial Non-ISOCH - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.PARTIAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1802",
"Unit": "M2M"
},
{
"BriefDescription": "ISOCH Partial - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.PARTIAL_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1808",
"Unit": "M2M"
},
{
"BriefDescription": "DDR, acting as Cache - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.TO_DDR_AS_CACHE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1840",
"Unit": "M2M"
},
{
"BriefDescription": "DDR - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.TO_DDR_AS_MEM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1820",
"Unit": "M2M"
},
{
"BriefDescription": "PMM - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.TO_PMM",
"PerPkg": "1",
@@ -1460,143 +1796,179 @@
},
{
"BriefDescription": "UNC_M2M_PREFCAM_CIS_DROPS",
+ "Counter": "0,1,2,3",
"EventCode": "0x5c",
"EventName": "UNC_M2M_PREFCAM_CIS_DROPS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH0_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH0_XPT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH1_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH1_XPT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped : UPI - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.UPI_ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.XPT_ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "M2M"
},
{
"BriefDescription": ": UPI - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.UPI_ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "M2M"
},
{
"BriefDescription": ": XPT - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.XPT_ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "M2M"
},
{
"BriefDescription": "Demands Not Merged with CAMed Prefetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x5E",
"EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.RD_MERGED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2M"
},
{
"BriefDescription": "Demands Not Merged with CAMed Prefetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x5E",
"EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.WR_MERGED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "Demands Not Merged with CAMed Prefetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x5E",
"EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.WR_SQUASHED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Inserts : UPI - Ch 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_M2M_PREFCAM_INSERTS.CH0_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Inserts : XPT - Ch 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_M2M_PREFCAM_INSERTS.CH0_XPT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Inserts : UPI - Ch 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_M2M_PREFCAM_INSERTS.CH1_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Inserts : XPT - Ch 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_M2M_PREFCAM_INSERTS.CH1_XPT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Inserts : UPI - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_M2M_PREFCAM_INSERTS.UPI_ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Inserts : XPT - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_M2M_PREFCAM_INSERTS.XPT_ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Prefetch CAM Inserts : XPT -All Channels",
"UMask": "0x5",
@@ -1604,108 +1976,135 @@
},
{
"BriefDescription": "Prefetch CAM Occupancy : All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_M2M_PREFCAM_OCCUPANCY.ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_M2M_PREFCAM_OCCUPANCY.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Occupancy : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_M2M_PREFCAM_OCCUPANCY.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x5F",
"EventName": "UNC_M2M_PREFCAM_RESP_MISS.ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2M"
},
{
"BriefDescription": ": Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x5f",
"EventName": "UNC_M2M_PREFCAM_RESP_MISS.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": ": Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x5f",
"EventName": "UNC_M2M_PREFCAM_RESP_MISS.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_PREFCAM_RxC_DEALLOCS.1LM_POSTED",
+ "Counter": "0,1,2,3",
"EventCode": "0x62",
"EventName": "UNC_M2M_PREFCAM_RxC_DEALLOCS.1LM_POSTED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_PREFCAM_RxC_DEALLOCS.CIS",
+ "Counter": "0,1,2,3",
"EventCode": "0x62",
"EventName": "UNC_M2M_PREFCAM_RxC_DEALLOCS.CIS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_PREFCAM_RxC_DEALLOCS.PMM_MEMMODE_ACCEPT",
+ "Counter": "0,1,2,3",
"EventCode": "0x62",
"EventName": "UNC_M2M_PREFCAM_RxC_DEALLOCS.PMM_MEMMODE_ACCEPT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_PREFCAM_RxC_DEALLOCS.SQUASHED",
+ "Counter": "0,1,2,3",
"EventCode": "0x62",
"EventName": "UNC_M2M_PREFCAM_RxC_DEALLOCS.SQUASHED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "AD Ingress (from CMS) Occupancy - Prefetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_M2M_PREFCAM_RxC_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "AD Ingress (from CMS) : AD Ingress (from CMS) Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_M2M_RxC_AD_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "AD Ingress (from CMS) Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M2M_RxC_AD_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Clean NearMem Read Hit",
+ "Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_M2M_TAG_HIT.NM_RD_HIT_CLEAN",
"PerPkg": "1",
@@ -1715,6 +2114,7 @@
},
{
"BriefDescription": "Dirty NearMem Read Hit",
+ "Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_M2M_TAG_HIT.NM_RD_HIT_DIRTY",
"PerPkg": "1",
@@ -1724,8 +2124,10 @@
},
{
"BriefDescription": "Tag Hit : Clean NearMem Underfill Hit",
+ "Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_M2M_TAG_HIT.NM_UFILL_HIT_CLEAN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Tag Hit indicates when a request sent to the iMC hit in Near Memory. : Counts clean underfill hits due to a partial write",
"UMask": "0x4",
@@ -1733,8 +2135,10 @@
},
{
"BriefDescription": "Tag Hit : Dirty NearMem Underfill Hit",
+ "Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_M2M_TAG_HIT.NM_UFILL_HIT_DIRTY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Tag Hit indicates when a request sent to the iMC hit in Near Memory. : Counts dirty underfill read hits due to a partial write",
"UMask": "0x8",
@@ -1742,230 +2146,288 @@
},
{
"BriefDescription": "UNC_M2M_TAG_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4b",
"EventName": "UNC_M2M_TAG_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2M"
},
{
"BriefDescription": "Number AD Ingress Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "UNC_M2M_TGR_AD_CREDITS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Number BL Ingress Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2f",
"EventName": "UNC_M2M_TGR_BL_CREDITS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Inserts : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_M2M_TRACKER_INSERTS.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x104",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Inserts : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_M2M_TRACKER_INSERTS.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x204",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Occupancy : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "WPQ Flush : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M2M_WPQ_FLUSH.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "WPQ Flush : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M2M_WPQ_FLUSH.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_WPQ_NO_REG_CRD.CHN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_WPQ_NO_REG_CRD.CHN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Special : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_WPQ_NO_SPEC_CRD.CHN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Special : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_WPQ_NO_SPEC_CRD.CHN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Inserts : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2M_WR_TRACKER_INSERTS.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Inserts : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2M_WR_TRACKER_INSERTS.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Cycles Not Empty : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_M2M_WR_TRACKER_NE.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Cycles Not Empty : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_M2M_WR_TRACKER_NE.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Cycles Not Empty : Mirror",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_M2M_WR_TRACKER_NE.MIRR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_M2M_WR_TRACKER_NE.MIRR_NONTGR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_M2M_WR_TRACKER_NE.MIRR_PWR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Non-Posted Inserts : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x4d",
"EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_INSERTS.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Non-Posted Inserts : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x4d",
"EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_INSERTS.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Non-Posted Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_OCCUPANCY.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Non-Posted Occupancy : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_OCCUPANCY.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Posted Inserts : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "UNC_M2M_WR_TRACKER_POSTED_INSERTS.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Posted Inserts : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "UNC_M2M_WR_TRACKER_POSTED_INSERTS.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Posted Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M2M_WR_TRACKER_POSTED_OCCUPANCY.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Posted Occupancy : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M2M_WR_TRACKER_POSTED_OCCUPANCY.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "CBox AD Credits Empty : Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CBox AD Credits Empty : Requests : No credits available to send to Cbox on the AD Ring (covers higher CBoxes)",
"UMask": "0x4",
@@ -1973,8 +2435,10 @@
},
{
"BriefDescription": "CBox AD Credits Empty : Snoops",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CBox AD Credits Empty : Snoops : No credits available to send to Cbox on the AD Ring (covers higher CBoxes)",
"UMask": "0x8",
@@ -1982,8 +2446,10 @@
},
{
"BriefDescription": "CBox AD Credits Empty : VNA Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.VNA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CBox AD Credits Empty : VNA Messages : No credits available to send to Cbox on the AD Ring (covers higher CBoxes)",
"UMask": "0x1",
@@ -1991,8 +2457,10 @@
},
{
"BriefDescription": "CBox AD Credits Empty : Writebacks",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CBox AD Credits Empty : Writebacks : No credits available to send to Cbox on the AD Ring (covers higher CBoxes)",
"UMask": "0x2",
@@ -2000,6 +2468,7 @@
},
{
"BriefDescription": "M3UPI Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_M3UPI_CLOCKTICKS",
"PerPkg": "1",
@@ -2008,31 +2477,39 @@
},
{
"BriefDescription": "M3UPI CMS Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0xc0",
"EventName": "UNC_M3UPI_CMS_CLOCKTICKS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M3UPI"
},
{
"BriefDescription": "D2C Sent",
+ "Counter": "0,1,2,3",
"EventCode": "0x2b",
"EventName": "UNC_M3UPI_D2C_SENT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "D2C Sent : Count cases BL sends direct to core",
"Unit": "M3UPI"
},
{
"BriefDescription": "D2U Sent",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_M3UPI_D2U_SENT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "D2U Sent : Cases where SMI3 sends D2U command",
"Unit": "M3UPI"
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "Counter": "0,1,2,3",
"EventCode": "0xba",
"EventName": "UNC_M3UPI_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress Blocking due to Ordering requirements : Down : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x4",
@@ -2040,8 +2517,10 @@
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "Counter": "0,1,2,3",
"EventCode": "0xba",
"EventName": "UNC_M3UPI_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress Blocking due to Ordering requirements : Up : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x1",
@@ -2049,8 +2528,10 @@
},
{
"BriefDescription": "M2 BL Credits Empty : IIO0 and IIO1 share the same ring destination. (1 VN0 credit only)",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2 BL Credits Empty : IIO0 and IIO1 share the same ring destination. (1 VN0 credit only) : No vn0 and vna credits available to send to M2",
"UMask": "0x1",
@@ -2058,8 +2539,10 @@
},
{
"BriefDescription": "M2 BL Credits Empty : IIO2",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO2_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2 BL Credits Empty : IIO2 : No vn0 and vna credits available to send to M2",
"UMask": "0x2",
@@ -2067,8 +2550,10 @@
},
{
"BriefDescription": "M2 BL Credits Empty : IIO3",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO3_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2 BL Credits Empty : IIO3 : No vn0 and vna credits available to send to M2",
"UMask": "0x4",
@@ -2076,8 +2561,10 @@
},
{
"BriefDescription": "M2 BL Credits Empty : IIO4",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO4_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2 BL Credits Empty : IIO4 : No vn0 and vna credits available to send to M2",
"UMask": "0x8",
@@ -2085,8 +2572,10 @@
},
{
"BriefDescription": "M2 BL Credits Empty : IIO5",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO5_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2 BL Credits Empty : IIO5 : No vn0 and vna credits available to send to M2",
"UMask": "0x10",
@@ -2094,8 +2583,10 @@
},
{
"BriefDescription": "M2 BL Credits Empty : All IIO targets for NCS are in single mask. ORs them together",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2 BL Credits Empty : All IIO targets for NCS are in single mask. ORs them together : No vn0 and vna credits available to send to M2",
"UMask": "0x40",
@@ -2103,8 +2594,10 @@
},
{
"BriefDescription": "M2 BL Credits Empty : Selected M2p BL NCS credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.NCS_SEL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2 BL Credits Empty : Selected M2p BL NCS credits : No vn0 and vna credits available to send to M2",
"UMask": "0x80",
@@ -2112,8 +2605,10 @@
},
{
"BriefDescription": "M2 BL Credits Empty : IIO5",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.UBOX_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2 BL Credits Empty : IIO5 : No vn0 and vna credits available to send to M2",
"UMask": "0x20",
@@ -2121,8 +2616,10 @@
},
{
"BriefDescription": "Multi Slot Flit Received : AD - Slot 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x3e",
"EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AD_SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Multi Slot Flit Received : AD - Slot 0 : Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
"UMask": "0x1",
@@ -2130,8 +2627,10 @@
},
{
"BriefDescription": "Multi Slot Flit Received : AD - Slot 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x3e",
"EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AD_SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Multi Slot Flit Received : AD - Slot 1 : Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
"UMask": "0x2",
@@ -2139,8 +2638,10 @@
},
{
"BriefDescription": "Multi Slot Flit Received : AD - Slot 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x3e",
"EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AD_SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Multi Slot Flit Received : AD - Slot 2 : Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
"UMask": "0x4",
@@ -2148,8 +2649,10 @@
},
{
"BriefDescription": "Multi Slot Flit Received : AK - Slot 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x3e",
"EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AK_SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Multi Slot Flit Received : AK - Slot 0 : Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
"UMask": "0x10",
@@ -2157,8 +2660,10 @@
},
{
"BriefDescription": "Multi Slot Flit Received : AK - Slot 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x3e",
"EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AK_SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Multi Slot Flit Received : AK - Slot 2 : Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
"UMask": "0x20",
@@ -2166,8 +2671,10 @@
},
{
"BriefDescription": "Multi Slot Flit Received : BL - Slot 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x3e",
"EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.BL_SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Multi Slot Flit Received : BL - Slot 0 : Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
"UMask": "0x8",
@@ -2175,8 +2682,10 @@
},
{
"BriefDescription": "Lost Arb for VN0 : REQ on AD",
+ "Counter": "0",
"EventCode": "0x4b",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN0 : REQ on AD : VN0 message requested but lost arbitration : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -2184,8 +2693,10 @@
},
{
"BriefDescription": "Lost Arb for VN0 : RSP on AD",
+ "Counter": "0",
"EventCode": "0x4b",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN0 : RSP on AD : VN0 message requested but lost arbitration : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -2193,8 +2704,10 @@
},
{
"BriefDescription": "Lost Arb for VN0 : SNP on AD",
+ "Counter": "0",
"EventCode": "0x4b",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN0 : SNP on AD : VN0 message requested but lost arbitration : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -2202,8 +2715,10 @@
},
{
"BriefDescription": "Lost Arb for VN0 : NCB on BL",
+ "Counter": "0",
"EventCode": "0x4b",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN0 : NCB on BL : VN0 message requested but lost arbitration : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -2211,8 +2726,10 @@
},
{
"BriefDescription": "Lost Arb for VN0 : NCS on BL",
+ "Counter": "0",
"EventCode": "0x4b",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN0 : NCS on BL : VN0 message requested but lost arbitration : Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -2220,8 +2737,10 @@
},
{
"BriefDescription": "Lost Arb for VN0 : RSP on BL",
+ "Counter": "0",
"EventCode": "0x4b",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN0 : RSP on BL : VN0 message requested but lost arbitration : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -2229,8 +2748,10 @@
},
{
"BriefDescription": "Lost Arb for VN0 : WB on BL",
+ "Counter": "0",
"EventCode": "0x4b",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN0 : WB on BL : VN0 message requested but lost arbitration : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -2238,8 +2759,10 @@
},
{
"BriefDescription": "Lost Arb for VN1 : REQ on AD",
+ "Counter": "0",
"EventCode": "0x4c",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN1 : REQ on AD : VN1 message requested but lost arbitration : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -2247,8 +2770,10 @@
},
{
"BriefDescription": "Lost Arb for VN1 : RSP on AD",
+ "Counter": "0",
"EventCode": "0x4c",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN1 : RSP on AD : VN1 message requested but lost arbitration : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -2256,8 +2781,10 @@
},
{
"BriefDescription": "Lost Arb for VN1 : SNP on AD",
+ "Counter": "0",
"EventCode": "0x4c",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN1 : SNP on AD : VN1 message requested but lost arbitration : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -2265,8 +2792,10 @@
},
{
"BriefDescription": "Lost Arb for VN1 : NCB on BL",
+ "Counter": "0",
"EventCode": "0x4c",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN1 : NCB on BL : VN1 message requested but lost arbitration : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -2274,8 +2803,10 @@
},
{
"BriefDescription": "Lost Arb for VN1 : NCS on BL",
+ "Counter": "0",
"EventCode": "0x4c",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN1 : NCS on BL : VN1 message requested but lost arbitration : Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -2283,8 +2814,10 @@
},
{
"BriefDescription": "Lost Arb for VN1 : RSP on BL",
+ "Counter": "0",
"EventCode": "0x4c",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN1 : RSP on BL : VN1 message requested but lost arbitration : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -2292,8 +2825,10 @@
},
{
"BriefDescription": "Lost Arb for VN1 : WB on BL",
+ "Counter": "0",
"EventCode": "0x4c",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN1 : WB on BL : VN1 message requested but lost arbitration : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -2301,8 +2836,10 @@
},
{
"BriefDescription": "Arb Miscellaneous : AD, BL Parallel Win VN0",
+ "Counter": "0",
"EventCode": "0x4d",
"EventName": "UNC_M3UPI_RxC_ARB_MISC.ADBL_PARALLEL_WIN_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Arb Miscellaneous : AD, BL Parallel Win VN0 : AD and BL messages won arbitration concurrently / in parallel",
"UMask": "0x10",
@@ -2310,8 +2847,10 @@
},
{
"BriefDescription": "Arb Miscellaneous : AD, BL Parallel Win VN1",
+ "Counter": "0",
"EventCode": "0x4d",
"EventName": "UNC_M3UPI_RxC_ARB_MISC.ADBL_PARALLEL_WIN_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Arb Miscellaneous : AD, BL Parallel Win VN1 : AD and BL messages won arbitration concurrently / in parallel",
"UMask": "0x20",
@@ -2319,8 +2858,10 @@
},
{
"BriefDescription": "Arb Miscellaneous : Max Parallel Win",
+ "Counter": "0",
"EventCode": "0x4d",
"EventName": "UNC_M3UPI_RxC_ARB_MISC.ALL_PARALLEL_WIN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Arb Miscellaneous : Max Parallel Win : VN0 and VN1 arbitration sub-pipelines both produced AD and BL winners (maximum possible parallel winners)",
"UMask": "0x80",
@@ -2328,8 +2869,10 @@
},
{
"BriefDescription": "Arb Miscellaneous : No Progress on Pending AD VN0",
+ "Counter": "0",
"EventCode": "0x4d",
"EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_AD_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Arb Miscellaneous : No Progress on Pending AD VN0 : Arbitration stage made no progress on pending ad vn0 messages because slotting stage cannot accept new message",
"UMask": "0x1",
@@ -2337,8 +2880,10 @@
},
{
"BriefDescription": "Arb Miscellaneous : No Progress on Pending AD VN1",
+ "Counter": "0",
"EventCode": "0x4d",
"EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_AD_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Arb Miscellaneous : No Progress on Pending AD VN1 : Arbitration stage made no progress on pending ad vn1 messages because slotting stage cannot accept new message",
"UMask": "0x2",
@@ -2346,8 +2891,10 @@
},
{
"BriefDescription": "Arb Miscellaneous : No Progress on Pending BL VN0",
+ "Counter": "0",
"EventCode": "0x4d",
"EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_BL_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Arb Miscellaneous : No Progress on Pending BL VN0 : Arbitration stage made no progress on pending bl vn0 messages because slotting stage cannot accept new message",
"UMask": "0x4",
@@ -2355,8 +2902,10 @@
},
{
"BriefDescription": "Arb Miscellaneous : No Progress on Pending BL VN1",
+ "Counter": "0",
"EventCode": "0x4d",
"EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_BL_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Arb Miscellaneous : No Progress on Pending BL VN1 : Arbitration stage made no progress on pending bl vn1 messages because slotting stage cannot accept new message",
"UMask": "0x8",
@@ -2364,8 +2913,10 @@
},
{
"BriefDescription": "Arb Miscellaneous : VN0, VN1 Parallel Win",
+ "Counter": "0",
"EventCode": "0x4d",
"EventName": "UNC_M3UPI_RxC_ARB_MISC.VN01_PARALLEL_WIN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Arb Miscellaneous : VN0, VN1 Parallel Win : VN0 and VN1 arbitration sub-pipelines had parallel winners (at least one AD or BL on each side)",
"UMask": "0x40",
@@ -2373,8 +2924,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN0 : REQ on AD",
+ "Counter": "0",
"EventCode": "0x47",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN0 : REQ on AD : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -2382,8 +2935,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN0 : RSP on AD",
+ "Counter": "0",
"EventCode": "0x47",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN0 : RSP on AD : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -2391,8 +2946,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN0 : SNP on AD",
+ "Counter": "0",
"EventCode": "0x47",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN0 : SNP on AD : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -2400,8 +2957,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN0 : NCB on BL",
+ "Counter": "0",
"EventCode": "0x47",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN0 : NCB on BL : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -2409,8 +2968,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN0 : NCS on BL",
+ "Counter": "0",
"EventCode": "0x47",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN0 : NCS on BL : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -2418,8 +2979,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN0 : RSP on BL",
+ "Counter": "0",
"EventCode": "0x47",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN0 : RSP on BL : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -2427,8 +2990,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN0 : WB on BL",
+ "Counter": "0",
"EventCode": "0x47",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN0 : WB on BL : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -2436,8 +3001,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN1 : REQ on AD",
+ "Counter": "0",
"EventCode": "0x48",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN1 : REQ on AD : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -2445,8 +3012,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN1 : RSP on AD",
+ "Counter": "0",
"EventCode": "0x48",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN1 : RSP on AD : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -2454,8 +3023,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN1 : SNP on AD",
+ "Counter": "0",
"EventCode": "0x48",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN1 : SNP on AD : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -2463,8 +3034,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN1 : NCB on BL",
+ "Counter": "0",
"EventCode": "0x48",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN1 : NCB on BL : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -2472,8 +3045,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN1 : NCS on BL",
+ "Counter": "0",
"EventCode": "0x48",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN1 : NCS on BL : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -2481,8 +3056,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN1 : RSP on BL",
+ "Counter": "0",
"EventCode": "0x48",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN1 : RSP on BL : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -2490,8 +3067,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN1 : WB on BL",
+ "Counter": "0",
"EventCode": "0x48",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN1 : WB on BL : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -2499,8 +3078,10 @@
},
{
"BriefDescription": "Can't Arb for VN0 : REQ on AD",
+ "Counter": "0",
"EventCode": "0x49",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN0 : REQ on AD : VN0 message was not able to request arbitration while some other message won arbitration : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -2508,8 +3089,10 @@
},
{
"BriefDescription": "Can't Arb for VN0 : RSP on AD",
+ "Counter": "0",
"EventCode": "0x49",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN0 : RSP on AD : VN0 message was not able to request arbitration while some other message won arbitration : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -2517,8 +3100,10 @@
},
{
"BriefDescription": "Can't Arb for VN0 : SNP on AD",
+ "Counter": "0",
"EventCode": "0x49",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN0 : SNP on AD : VN0 message was not able to request arbitration while some other message won arbitration : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -2526,8 +3111,10 @@
},
{
"BriefDescription": "Can't Arb for VN0 : NCB on BL",
+ "Counter": "0",
"EventCode": "0x49",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN0 : NCB on BL : VN0 message was not able to request arbitration while some other message won arbitration : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -2535,8 +3122,10 @@
},
{
"BriefDescription": "Can't Arb for VN0 : NCS on BL",
+ "Counter": "0",
"EventCode": "0x49",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN0 : NCS on BL : VN0 message was not able to request arbitration while some other message won arbitration : Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -2544,8 +3133,10 @@
},
{
"BriefDescription": "Can't Arb for VN0 : RSP on BL",
+ "Counter": "0",
"EventCode": "0x49",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN0 : RSP on BL : VN0 message was not able to request arbitration while some other message won arbitration : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -2553,8 +3144,10 @@
},
{
"BriefDescription": "Can't Arb for VN0 : WB on BL",
+ "Counter": "0",
"EventCode": "0x49",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN0 : WB on BL : VN0 message was not able to request arbitration while some other message won arbitration : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -2562,8 +3155,10 @@
},
{
"BriefDescription": "Can't Arb for VN1 : REQ on AD",
+ "Counter": "0",
"EventCode": "0x4a",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN1 : REQ on AD : VN1 message was not able to request arbitration while some other message won arbitration : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -2571,8 +3166,10 @@
},
{
"BriefDescription": "Can't Arb for VN1 : RSP on AD",
+ "Counter": "0",
"EventCode": "0x4a",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN1 : RSP on AD : VN1 message was not able to request arbitration while some other message won arbitration : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -2580,8 +3177,10 @@
},
{
"BriefDescription": "Can't Arb for VN1 : SNP on AD",
+ "Counter": "0",
"EventCode": "0x4a",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN1 : SNP on AD : VN1 message was not able to request arbitration while some other message won arbitration : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -2589,8 +3188,10 @@
},
{
"BriefDescription": "Can't Arb for VN1 : NCB on BL",
+ "Counter": "0",
"EventCode": "0x4a",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN1 : NCB on BL : VN1 message was not able to request arbitration while some other message won arbitration : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -2598,8 +3199,10 @@
},
{
"BriefDescription": "Can't Arb for VN1 : NCS on BL",
+ "Counter": "0",
"EventCode": "0x4a",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN1 : NCS on BL : VN1 message was not able to request arbitration while some other message won arbitration : Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -2607,8 +3210,10 @@
},
{
"BriefDescription": "Can't Arb for VN1 : RSP on BL",
+ "Counter": "0",
"EventCode": "0x4a",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN1 : RSP on BL : VN1 message was not able to request arbitration while some other message won arbitration : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -2616,8 +3221,10 @@
},
{
"BriefDescription": "Can't Arb for VN1 : WB on BL",
+ "Counter": "0",
"EventCode": "0x4a",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN1 : WB on BL : VN1 message was not able to request arbitration while some other message won arbitration : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -2625,8 +3232,10 @@
},
{
"BriefDescription": "Ingress Queue Bypasses : AD to Slot 0 on BL Arb",
+ "Counter": "0,1,2",
"EventCode": "0x40",
"EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S0_BL_ARB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress Queue Bypasses : AD to Slot 0 on BL Arb : Number of times message is bypassed around the Ingress Queue : AD is taking bypass to slot 0 of independent flit while bl message is in arbitration",
"UMask": "0x2",
@@ -2634,8 +3243,10 @@
},
{
"BriefDescription": "Ingress Queue Bypasses : AD to Slot 0 on Idle",
+ "Counter": "0,1,2",
"EventCode": "0x40",
"EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S0_IDLE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress Queue Bypasses : AD to Slot 0 on Idle : Number of times message is bypassed around the Ingress Queue : AD is taking bypass to slot 0 of independent flit while pipeline is idle",
"UMask": "0x1",
@@ -2643,8 +3254,10 @@
},
{
"BriefDescription": "Ingress Queue Bypasses : AD + BL to Slot 1",
+ "Counter": "0,1,2",
"EventCode": "0x40",
"EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S1_BL_SLOT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress Queue Bypasses : AD + BL to Slot 1 : Number of times message is bypassed around the Ingress Queue : AD is taking bypass to flit slot 1 while merging with bl message in same flit",
"UMask": "0x4",
@@ -2652,8 +3265,10 @@
},
{
"BriefDescription": "Ingress Queue Bypasses : AD + BL to Slot 2",
+ "Counter": "0,1,2",
"EventCode": "0x40",
"EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S2_BL_SLOT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress Queue Bypasses : AD + BL to Slot 2 : Number of times message is bypassed around the Ingress Queue : AD is taking bypass to flit slot 2 while merging with bl message in same flit",
"UMask": "0x8",
@@ -2661,8 +3276,10 @@
},
{
"BriefDescription": "Miscellaneous Credit Events : Any In BGF FIFO",
+ "Counter": "0",
"EventCode": "0x5f",
"EventName": "UNC_M3UPI_RxC_CRD_MISC.ANY_BGF_FIFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Miscellaneous Credit Events : Any In BGF FIFO : Indication that at least one packet (flit) is in the bgf (fifo only)",
"UMask": "0x1",
@@ -2670,8 +3287,10 @@
},
{
"BriefDescription": "Miscellaneous Credit Events : Any in BGF Path",
+ "Counter": "0",
"EventCode": "0x5f",
"EventName": "UNC_M3UPI_RxC_CRD_MISC.ANY_BGF_PATH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Miscellaneous Credit Events : Any in BGF Path : Indication that at least one packet (flit) is in the bgf path (i.e. pipe to fifo)",
"UMask": "0x2",
@@ -2679,8 +3298,10 @@
},
{
"BriefDescription": "Miscellaneous Credit Events",
+ "Counter": "0",
"EventCode": "0x5f",
"EventName": "UNC_M3UPI_RxC_CRD_MISC.LT1_FOR_D2K",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Miscellaneous Credit Events : d2k credit count is less than 1",
"UMask": "0x10",
@@ -2688,8 +3309,10 @@
},
{
"BriefDescription": "Miscellaneous Credit Events",
+ "Counter": "0",
"EventCode": "0x5f",
"EventName": "UNC_M3UPI_RxC_CRD_MISC.LT2_FOR_D2K",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Miscellaneous Credit Events : d2k credit count is less than 2",
"UMask": "0x20",
@@ -2697,8 +3320,10 @@
},
{
"BriefDescription": "Miscellaneous Credit Events : No D2K For Arb",
+ "Counter": "0",
"EventCode": "0x5f",
"EventName": "UNC_M3UPI_RxC_CRD_MISC.VN0_NO_D2K_FOR_ARB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Miscellaneous Credit Events : No D2K For Arb : VN0 BL RSP message was blocked from arbitration request due to lack of D2K CMP credit",
"UMask": "0x4",
@@ -2706,8 +3331,10 @@
},
{
"BriefDescription": "Miscellaneous Credit Events",
+ "Counter": "0",
"EventCode": "0x5f",
"EventName": "UNC_M3UPI_RxC_CRD_MISC.VN1_NO_D2K_FOR_ARB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Miscellaneous Credit Events : VN1 BL RSP message was blocked from arbitration request due to lack of D2K CMP credits",
"UMask": "0x8",
@@ -2715,8 +3342,10 @@
},
{
"BriefDescription": "Credit Occupancy : Credits Consumed",
+ "Counter": "0",
"EventCode": "0x60",
"EventName": "UNC_M3UPI_RxC_CRD_OCC.CONSUMED",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Credit Occupancy : Credits Consumed : number of remote vna credits consumed per cycle",
"UMask": "0x80",
@@ -2724,8 +3353,10 @@
},
{
"BriefDescription": "Credit Occupancy : D2K Credits",
+ "Counter": "0",
"EventCode": "0x60",
"EventName": "UNC_M3UPI_RxC_CRD_OCC.D2K_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Credit Occupancy : D2K Credits : D2K completion fifo credit occupancy (credits in use), accumulated across all cycles",
"UMask": "0x10",
@@ -2733,8 +3364,10 @@
},
{
"BriefDescription": "Credit Occupancy : Packets in BGF FIFO",
+ "Counter": "0",
"EventCode": "0x60",
"EventName": "UNC_M3UPI_RxC_CRD_OCC.FLITS_IN_FIFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Credit Occupancy : Packets in BGF FIFO : Occupancy of m3upi ingress -> upi link layer bgf; packets (flits) in fifo",
"UMask": "0x2",
@@ -2742,8 +3375,10 @@
},
{
"BriefDescription": "Credit Occupancy : Packets in BGF Path",
+ "Counter": "0",
"EventCode": "0x60",
"EventName": "UNC_M3UPI_RxC_CRD_OCC.FLITS_IN_PATH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Credit Occupancy : Packets in BGF Path : Occupancy of m3upi ingress -> upi link layer bgf; packets (flits) in path (i.e. pipe to fifo or fifo)",
"UMask": "0x4",
@@ -2751,8 +3386,10 @@
},
{
"BriefDescription": "Credit Occupancy",
+ "Counter": "0",
"EventCode": "0x60",
"EventName": "UNC_M3UPI_RxC_CRD_OCC.P1P_FIFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Credit Occupancy : count of bl messages in pump-1-pending state, in completion fifo only",
"UMask": "0x40",
@@ -2760,8 +3397,10 @@
},
{
"BriefDescription": "Credit Occupancy",
+ "Counter": "0",
"EventCode": "0x60",
"EventName": "UNC_M3UPI_RxC_CRD_OCC.P1P_TOTAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Credit Occupancy : count of bl messages in pump-1-pending state, in marker table and in fifo",
"UMask": "0x20",
@@ -2769,8 +3408,10 @@
},
{
"BriefDescription": "Credit Occupancy : Transmit Credits",
+ "Counter": "0",
"EventCode": "0x60",
"EventName": "UNC_M3UPI_RxC_CRD_OCC.TxQ_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Credit Occupancy : Transmit Credits : Link layer transmit queue credit occupancy (credits in use), accumulated across all cycles",
"UMask": "0x8",
@@ -2778,8 +3419,10 @@
},
{
"BriefDescription": "Credit Occupancy : VNA In Use",
+ "Counter": "0",
"EventCode": "0x60",
"EventName": "UNC_M3UPI_RxC_CRD_OCC.VNA_IN_USE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Credit Occupancy : VNA In Use : Remote UPI VNA credit occupancy (number of credits in use), accumulated across all cycles",
"UMask": "0x1",
@@ -2787,8 +3430,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : REQ on AD",
+ "Counter": "0",
"EventCode": "0x43",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : REQ on AD : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -2796,8 +3441,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : RSP on AD",
+ "Counter": "0",
"EventCode": "0x43",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : RSP on AD : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -2805,8 +3452,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : SNP on AD",
+ "Counter": "0",
"EventCode": "0x43",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : SNP on AD : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -2814,8 +3463,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : NCB on BL",
+ "Counter": "0",
"EventCode": "0x43",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : NCB on BL : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -2823,8 +3474,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : NCS on BL",
+ "Counter": "0",
"EventCode": "0x43",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : NCS on BL : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -2832,8 +3485,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : RSP on BL",
+ "Counter": "0",
"EventCode": "0x43",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : RSP on BL : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -2841,8 +3496,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : WB on BL",
+ "Counter": "0",
"EventCode": "0x43",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : WB on BL : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -2850,8 +3507,10 @@
},
{
"BriefDescription": "Data Flit Not Sent : All",
+ "Counter": "0",
"EventCode": "0x55",
"EventName": "UNC_M3UPI_RxC_DATA_FLITS_NOT_SENT.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Data Flit Not Sent : All : Data flit is ready for transmission but could not be sent : data flit is ready for transmission but could not be sent for any reason, e.g. low credits, low tsv, stall injection",
"UMask": "0x1",
@@ -2859,8 +3518,10 @@
},
{
"BriefDescription": "Data Flit Not Sent : No BGF Credits",
+ "Counter": "0",
"EventCode": "0x55",
"EventName": "UNC_M3UPI_RxC_DATA_FLITS_NOT_SENT.NO_BGF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Data Flit Not Sent : No BGF Credits : Data flit is ready for transmission but could not be sent",
"UMask": "0x8",
@@ -2868,8 +3529,10 @@
},
{
"BriefDescription": "Data Flit Not Sent : No TxQ Credits",
+ "Counter": "0",
"EventCode": "0x55",
"EventName": "UNC_M3UPI_RxC_DATA_FLITS_NOT_SENT.NO_TXQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Data Flit Not Sent : No TxQ Credits : Data flit is ready for transmission but could not be sent",
"UMask": "0x10",
@@ -2877,8 +3540,10 @@
},
{
"BriefDescription": "Data Flit Not Sent : TSV High",
+ "Counter": "0",
"EventCode": "0x55",
"EventName": "UNC_M3UPI_RxC_DATA_FLITS_NOT_SENT.TSV_HI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Data Flit Not Sent : TSV High : Data flit is ready for transmission but could not be sent : data flit is ready for transmission but was not sent while tsv high",
"UMask": "0x2",
@@ -2886,8 +3551,10 @@
},
{
"BriefDescription": "Data Flit Not Sent : Cycle valid for Flit",
+ "Counter": "0",
"EventCode": "0x55",
"EventName": "UNC_M3UPI_RxC_DATA_FLITS_NOT_SENT.VALID_FOR_FLIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Data Flit Not Sent : Cycle valid for Flit : Data flit is ready for transmission but could not be sent : data flit is ready for transmission but was not sent while cycle is valid for flit transmission",
"UMask": "0x4",
@@ -2895,8 +3562,10 @@
},
{
"BriefDescription": "Generating BL Data Flit Sequence : Wait on Pump 0",
+ "Counter": "0",
"EventCode": "0x57",
"EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P0_WAIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Generating BL Data Flit Sequence : Wait on Pump 0 : generating bl data flit sequence; waiting for data pump 0",
"UMask": "0x1",
@@ -2904,8 +3573,10 @@
},
{
"BriefDescription": "Generating BL Data Flit Sequence",
+ "Counter": "0",
"EventCode": "0x57",
"EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_AT_LIMIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Generating BL Data Flit Sequence : pump-1-pending logic is at capacity (pending table plus completion fifo at limit)",
"UMask": "0x10",
@@ -2913,8 +3584,10 @@
},
{
"BriefDescription": "Generating BL Data Flit Sequence",
+ "Counter": "0",
"EventCode": "0x57",
"EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_BUSY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Generating BL Data Flit Sequence : pump-1-pending logic is tracking at least one message",
"UMask": "0x8",
@@ -2922,8 +3595,10 @@
},
{
"BriefDescription": "Generating BL Data Flit Sequence",
+ "Counter": "0",
"EventCode": "0x57",
"EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_FIFO_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Generating BL Data Flit Sequence : pump-1-pending completion fifo is full",
"UMask": "0x40",
@@ -2931,8 +3606,10 @@
},
{
"BriefDescription": "Generating BL Data Flit Sequence",
+ "Counter": "0",
"EventCode": "0x57",
"EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_HOLD_P0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Generating BL Data Flit Sequence : pump-1-pending logic is at or near capacity, such that pump-0-only bl messages are getting stalled in slotting stage",
"UMask": "0x20",
@@ -2940,8 +3617,10 @@
},
{
"BriefDescription": "Generating BL Data Flit Sequence",
+ "Counter": "0",
"EventCode": "0x57",
"EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_TO_LIMBO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Generating BL Data Flit Sequence : a bl message finished but is in limbo and moved to pump-1-pending logic",
"UMask": "0x4",
@@ -2949,8 +3628,10 @@
},
{
"BriefDescription": "Generating BL Data Flit Sequence : Wait on Pump 1",
+ "Counter": "0",
"EventCode": "0x57",
"EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1_WAIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Generating BL Data Flit Sequence : Wait on Pump 1 : generating bl data flit sequence; waiting for data pump 1",
"UMask": "0x2",
@@ -2958,8 +3639,10 @@
},
{
"BriefDescription": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_IN_HOLDOFF",
+ "Counter": "0",
"EventCode": "0x58",
"EventName": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_IN_HOLDOFF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": slot 2 request naturally serviced during hold-off period",
"UMask": "0x4",
@@ -2967,8 +3650,10 @@
},
{
"BriefDescription": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_IN_SERVICE",
+ "Counter": "0",
"EventCode": "0x58",
"EventName": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_IN_SERVICE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": slot 2 request forcibly serviced during service window",
"UMask": "0x8",
@@ -2976,8 +3661,10 @@
},
{
"BriefDescription": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_RECEIVED",
+ "Counter": "0",
"EventCode": "0x58",
"EventName": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_RECEIVED",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": slot 2 request received from link layer while idle (with no slot 2 request active immediately prior)",
"UMask": "0x1",
@@ -2985,8 +3672,10 @@
},
{
"BriefDescription": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_WITHDRAWN",
+ "Counter": "0",
"EventCode": "0x58",
"EventName": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_WITHDRAWN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": slot 2 request withdrawn during hold-off period or service window",
"UMask": "0x2",
@@ -2994,16 +3683,20 @@
},
{
"BriefDescription": "Slotting BL Message Into Header Flit : All",
+ "Counter": "0",
"EventCode": "0x56",
"EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M3UPI"
},
{
"BriefDescription": "Slotting BL Message Into Header Flit : Needs Data Flit",
+ "Counter": "0",
"EventCode": "0x56",
"EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.NEED_DATA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Slotting BL Message Into Header Flit : Needs Data Flit : BL message requires data flit sequence",
"UMask": "0x2",
@@ -3011,8 +3704,10 @@
},
{
"BriefDescription": "Slotting BL Message Into Header Flit : Wait on Pump 0",
+ "Counter": "0",
"EventCode": "0x56",
"EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P0_WAIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Slotting BL Message Into Header Flit : Wait on Pump 0 : Waiting for header pump 0",
"UMask": "0x4",
@@ -3020,8 +3715,10 @@
},
{
"BriefDescription": "Slotting BL Message Into Header Flit : Don't Need Pump 1",
+ "Counter": "0",
"EventCode": "0x56",
"EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_NOT_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Slotting BL Message Into Header Flit : Don't Need Pump 1 : Header pump 1 is not required for flit",
"UMask": "0x10",
@@ -3029,8 +3726,10 @@
},
{
"BriefDescription": "Slotting BL Message Into Header Flit : Don't Need Pump 1 - Bubble",
+ "Counter": "0",
"EventCode": "0x56",
"EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_NOT_REQ_BUT_BUBBLE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Slotting BL Message Into Header Flit : Don't Need Pump 1 - Bubble : Header pump 1 is not required for flit but flit transmission delayed",
"UMask": "0x20",
@@ -3038,8 +3737,10 @@
},
{
"BriefDescription": "Slotting BL Message Into Header Flit : Don't Need Pump 1 - Not Avail",
+ "Counter": "0",
"EventCode": "0x56",
"EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_NOT_REQ_NOT_AVAIL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Slotting BL Message Into Header Flit : Don't Need Pump 1 - Not Avail : Header pump 1 is not required for flit and not available",
"UMask": "0x40",
@@ -3047,8 +3748,10 @@
},
{
"BriefDescription": "Slotting BL Message Into Header Flit : Wait on Pump 1",
+ "Counter": "0",
"EventCode": "0x56",
"EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_WAIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Slotting BL Message Into Header Flit : Wait on Pump 1 : Waiting for header pump 1",
"UMask": "0x8",
@@ -3056,8 +3759,10 @@
},
{
"BriefDescription": "Flit Gen - Header 1 : Accumulate",
+ "Counter": "0",
"EventCode": "0x51",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.ACCUM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Flit Gen - Header 1 : Accumulate : Events related to Header Flit Generation - Set 1 : Header flit slotting control state machine is in any accumulate state; multi-message flit may be assembled over multiple cycles",
"UMask": "0x1",
@@ -3065,8 +3770,10 @@
},
{
"BriefDescription": "Flit Gen - Header 1 : Accumulate Ready",
+ "Counter": "0",
"EventCode": "0x51",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.ACCUM_READ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Flit Gen - Header 1 : Accumulate Ready : Events related to Header Flit Generation - Set 1 : header flit slotting control state machine is in accum_ready state; flit is ready to send but transmission is blocked; more messages may be slotted into flit",
"UMask": "0x2",
@@ -3074,8 +3781,10 @@
},
{
"BriefDescription": "Flit Gen - Header 1 : Accumulate Wasted",
+ "Counter": "0",
"EventCode": "0x51",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.ACCUM_WASTED",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Flit Gen - Header 1 : Accumulate Wasted : Events related to Header Flit Generation - Set 1 : Flit is being assembled over multiple cycles, but no additional message is being slotted into flit in current cycle; accumulate cycle is wasted",
"UMask": "0x4",
@@ -3083,8 +3792,10 @@
},
{
"BriefDescription": "Flit Gen - Header 1 : Run-Ahead - Blocked",
+ "Counter": "0",
"EventCode": "0x51",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_BLOCKED",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Flit Gen - Header 1 : Run-Ahead - Blocked : Events related to Header Flit Generation - Set 1 : Header flit slotting entered run-ahead state; new header flit is started while transmission of prior, fully assembled flit is blocked",
"UMask": "0x8",
@@ -3092,8 +3803,10 @@
},
{
"BriefDescription": "Flit Gen - Header 1",
+ "Counter": "0",
"EventCode": "0x51",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_MSG1_AFTER",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Flit Gen - Header 1 : Events related to Header Flit Generation - Set 1 : run-ahead mode: message was slotted only after run-ahead was over; run-ahead mode definitely wasted",
"UMask": "0x80",
@@ -3101,8 +3814,10 @@
},
{
"BriefDescription": "Flit Gen - Header 1 : Run-Ahead - Message",
+ "Counter": "0",
"EventCode": "0x51",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_MSG1_DURING",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Flit Gen - Header 1 : Run-Ahead - Message : Events related to Header Flit Generation - Set 1 : run-ahead mode: one message slotted during run-ahead",
"UMask": "0x10",
@@ -3110,8 +3825,10 @@
},
{
"BriefDescription": "Flit Gen - Header 1",
+ "Counter": "0",
"EventCode": "0x51",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_MSG2_AFTER",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Flit Gen - Header 1 : Events related to Header Flit Generation - Set 1 : run-ahead mode: second message slotted immediately after run-ahead; potential run-ahead success",
"UMask": "0x20",
@@ -3119,8 +3836,10 @@
},
{
"BriefDescription": "Flit Gen - Header 1",
+ "Counter": "0",
"EventCode": "0x51",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_MSG2_SENT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Flit Gen - Header 1 : Events related to Header Flit Generation - Set 1 : run-ahead mode: two (or three) message flit sent immediately after run-ahead; complete run-ahead success",
"UMask": "0x40",
@@ -3128,8 +3847,10 @@
},
{
"BriefDescription": "Flit Gen - Header 2 : Parallel Ok",
+ "Counter": "0",
"EventCode": "0x52",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.PAR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Flit Gen - Header 2 : Parallel Ok : Events related to Header Flit Generation - Set 2 : new header flit construction may proceed in parallel with data flit sequence",
"UMask": "0x4",
@@ -3137,8 +3858,10 @@
},
{
"BriefDescription": "Flit Gen - Header 2 : Parallel Flit Finished",
+ "Counter": "0",
"EventCode": "0x52",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.PAR_FLIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Flit Gen - Header 2 : Parallel Flit Finished : Events related to Header Flit Generation - Set 2 : header flit finished assembly in parallel with data flit sequence",
"UMask": "0x10",
@@ -3146,8 +3869,10 @@
},
{
"BriefDescription": "Flit Gen - Header 2 : Parallel Message",
+ "Counter": "0",
"EventCode": "0x52",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.PAR_MSG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Flit Gen - Header 2 : Parallel Message : Events related to Header Flit Generation - Set 2 : message is slotted into header flit in parallel with data flit sequence",
"UMask": "0x8",
@@ -3155,8 +3880,10 @@
},
{
"BriefDescription": "Flit Gen - Header 2 : Rate-matching Stall",
+ "Counter": "0",
"EventCode": "0x52",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.RMSTALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Flit Gen - Header 2 : Rate-matching Stall : Events related to Header Flit Generation - Set 2 : Rate-matching stall injected",
"UMask": "0x1",
@@ -3164,8 +3891,10 @@
},
{
"BriefDescription": "Flit Gen - Header 2 : Rate-matching Stall - No Message",
+ "Counter": "0",
"EventCode": "0x52",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.RMSTALL_NOMSG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Flit Gen - Header 2 : Rate-matching Stall - No Message : Events related to Header Flit Generation - Set 2 : Rate matching stall injected, but no additional message slotted during stall cycle",
"UMask": "0x2",
@@ -3173,8 +3902,10 @@
},
{
"BriefDescription": "Sent Header Flit : One Message",
+ "Counter": "0",
"EventCode": "0x54",
"EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.1_MSG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Sent Header Flit : One Message : One message in flit; VNA or non-VNA flit",
"UMask": "0x1",
@@ -3182,8 +3913,10 @@
},
{
"BriefDescription": "Sent Header Flit : One Message in non-VNA",
+ "Counter": "0",
"EventCode": "0x54",
"EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.1_MSG_VNX",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Sent Header Flit : One Message in non-VNA : One message in flit; non-VNA flit",
"UMask": "0x8",
@@ -3191,8 +3924,10 @@
},
{
"BriefDescription": "Sent Header Flit : Two Messages",
+ "Counter": "0",
"EventCode": "0x54",
"EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.2_MSGS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Sent Header Flit : Two Messages : Two messages in flit; VNA flit",
"UMask": "0x2",
@@ -3200,8 +3935,10 @@
},
{
"BriefDescription": "Sent Header Flit : Three Messages",
+ "Counter": "0",
"EventCode": "0x54",
"EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.3_MSGS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Sent Header Flit : Three Messages : Three messages in flit; VNA flit",
"UMask": "0x4",
@@ -3209,32 +3946,40 @@
},
{
"BriefDescription": "Sent Header Flit : One Slot Taken",
+ "Counter": "0",
"EventCode": "0x54",
"EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.SLOTS_1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M3UPI"
},
{
"BriefDescription": "Sent Header Flit : Two Slots Taken",
+ "Counter": "0",
"EventCode": "0x54",
"EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.SLOTS_2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M3UPI"
},
{
"BriefDescription": "Sent Header Flit : All Slots Taken",
+ "Counter": "0",
"EventCode": "0x54",
"EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.SLOTS_3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M3UPI"
},
{
"BriefDescription": "Header Not Sent : All",
+ "Counter": "0",
"EventCode": "0x53",
"EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Header Not Sent : All : header flit is ready for transmission but could not be sent : header flit is ready for transmission but could not be sent for any reason, e.g. no credits, low tsv, stall injection",
"UMask": "0x1",
@@ -3242,8 +3987,10 @@
},
{
"BriefDescription": "Header Not Sent : No BGF Credits",
+ "Counter": "0",
"EventCode": "0x53",
"EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.NO_BGF_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Header Not Sent : No BGF Credits : header flit is ready for transmission but could not be sent : No BGF credits available",
"UMask": "0x8",
@@ -3251,8 +3998,10 @@
},
{
"BriefDescription": "Header Not Sent : No BGF Credits + No Extra Message Slotted",
+ "Counter": "0",
"EventCode": "0x53",
"EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.NO_BGF_NO_MSG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Header Not Sent : No BGF Credits + No Extra Message Slotted : header flit is ready for transmission but could not be sent : No BGF credits available; no additional message slotted into flit",
"UMask": "0x20",
@@ -3260,8 +4009,10 @@
},
{
"BriefDescription": "Header Not Sent : No TxQ Credits",
+ "Counter": "0",
"EventCode": "0x53",
"EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.NO_TXQ_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Header Not Sent : No TxQ Credits : header flit is ready for transmission but could not be sent : No TxQ credits available",
"UMask": "0x10",
@@ -3269,8 +4020,10 @@
},
{
"BriefDescription": "Header Not Sent : No TxQ Credits + No Extra Message Slotted",
+ "Counter": "0",
"EventCode": "0x53",
"EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.NO_TXQ_NO_MSG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Header Not Sent : No TxQ Credits + No Extra Message Slotted : header flit is ready for transmission but could not be sent : No TxQ credits available; no additional message slotted into flit",
"UMask": "0x40",
@@ -3278,8 +4031,10 @@
},
{
"BriefDescription": "Header Not Sent : TSV High",
+ "Counter": "0",
"EventCode": "0x53",
"EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.TSV_HI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Header Not Sent : TSV High : header flit is ready for transmission but could not be sent : header flit is ready for transmission but was not sent while tsv high",
"UMask": "0x2",
@@ -3287,8 +4042,10 @@
},
{
"BriefDescription": "Header Not Sent : Cycle valid for Flit",
+ "Counter": "0",
"EventCode": "0x53",
"EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.VALID_FOR_FLIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Header Not Sent : Cycle valid for Flit : header flit is ready for transmission but could not be sent : header flit is ready for transmission but was not sent while cycle is valid for flit transmission",
"UMask": "0x4",
@@ -3296,8 +4053,10 @@
},
{
"BriefDescription": "Message Held : Can't Slot AD",
+ "Counter": "0,1,2",
"EventCode": "0x50",
"EventName": "UNC_M3UPI_RxC_HELD.CANT_SLOT_AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Message Held : Can't Slot AD : some AD message could not be slotted (logical OR of all AD events under INGR_SLOT_CANT_MC_VN{0,1})",
"UMask": "0x10",
@@ -3305,8 +4064,10 @@
},
{
"BriefDescription": "Message Held : Can't Slot BL",
+ "Counter": "0,1,2",
"EventCode": "0x50",
"EventName": "UNC_M3UPI_RxC_HELD.CANT_SLOT_BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Message Held : Can't Slot BL : some BL message could not be slotted (logical OR of all BL events under INGR_SLOT_CANT_MC_VN{0,1})",
"UMask": "0x20",
@@ -3314,8 +4075,10 @@
},
{
"BriefDescription": "Message Held : Parallel Attempt",
+ "Counter": "0,1,2",
"EventCode": "0x50",
"EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_ATTEMPT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Message Held : Parallel Attempt : ad and bl messages attempted to slot into the same flit in parallel",
"UMask": "0x4",
@@ -3323,8 +4086,10 @@
},
{
"BriefDescription": "Message Held : Parallel Success",
+ "Counter": "0,1,2",
"EventCode": "0x50",
"EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_SUCCESS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Message Held : Parallel Success : ad and bl messages were actually slotted into the same flit in parallel",
"UMask": "0x8",
@@ -3332,8 +4097,10 @@
},
{
"BriefDescription": "Message Held : VN0",
+ "Counter": "0,1,2",
"EventCode": "0x50",
"EventName": "UNC_M3UPI_RxC_HELD.VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Message Held : VN0 : vn0 message(s) that couldn't be slotted into last vn0 flit are held in slotting stage while processing vn1 flit",
"UMask": "0x1",
@@ -3341,8 +4108,10 @@
},
{
"BriefDescription": "Message Held : VN1",
+ "Counter": "0,1,2",
"EventCode": "0x50",
"EventName": "UNC_M3UPI_RxC_HELD.VN1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Message Held : VN1 : vn1 message(s) that couldn't be slotted into last vn1 flit are held in slotting stage while processing vn0 flit",
"UMask": "0x2",
@@ -3350,8 +4119,10 @@
},
{
"BriefDescription": "VN0 message can't slot into flit : REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4e",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message can't slot into flit : REQ on AD : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -3359,8 +4130,10 @@
},
{
"BriefDescription": "VN0 message can't slot into flit : RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4e",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message can't slot into flit : RSP on AD : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -3368,8 +4141,10 @@
},
{
"BriefDescription": "VN0 message can't slot into flit : SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4e",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message can't slot into flit : SNP on AD : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -3377,8 +4152,10 @@
},
{
"BriefDescription": "VN0 message can't slot into flit : NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4e",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message can't slot into flit : NCB on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -3386,8 +4163,10 @@
},
{
"BriefDescription": "VN0 message can't slot into flit : NCS on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4e",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message can't slot into flit : NCS on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -3395,8 +4174,10 @@
},
{
"BriefDescription": "VN0 message can't slot into flit : RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4e",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message can't slot into flit : RSP on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -3404,8 +4185,10 @@
},
{
"BriefDescription": "VN0 message can't slot into flit : WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4e",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message can't slot into flit : WB on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -3413,8 +4196,10 @@
},
{
"BriefDescription": "VN1 message can't slot into flit : REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4f",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message can't slot into flit : REQ on AD : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -3422,8 +4207,10 @@
},
{
"BriefDescription": "VN1 message can't slot into flit : RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4f",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message can't slot into flit : RSP on AD : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -3431,8 +4218,10 @@
},
{
"BriefDescription": "VN1 message can't slot into flit : SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4f",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message can't slot into flit : SNP on AD : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -3440,8 +4229,10 @@
},
{
"BriefDescription": "VN1 message can't slot into flit : NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4f",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message can't slot into flit : NCB on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -3449,8 +4240,10 @@
},
{
"BriefDescription": "VN1 message can't slot into flit : NCS on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4f",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message can't slot into flit : NCS on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -3458,8 +4251,10 @@
},
{
"BriefDescription": "VN1 message can't slot into flit : RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4f",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message can't slot into flit : RSP on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -3467,8 +4262,10 @@
},
{
"BriefDescription": "VN1 message can't slot into flit : WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4f",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message can't slot into flit : WB on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -3476,8 +4273,10 @@
},
{
"BriefDescription": "Remote VNA Credits : Any In Use",
+ "Counter": "0",
"EventCode": "0x5a",
"EventName": "UNC_M3UPI_RxC_VNA_CRD.ANY_IN_USE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Remote VNA Credits : Any In Use : At least one remote vna credit is in use",
"UMask": "0x20",
@@ -3485,8 +4284,10 @@
},
{
"BriefDescription": "Remote VNA Credits : Corrected",
+ "Counter": "0",
"EventCode": "0x5a",
"EventName": "UNC_M3UPI_RxC_VNA_CRD.CORRECTED",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Remote VNA Credits : Corrected : Number of remote vna credits corrected (local return) per cycle",
"UMask": "0x1",
@@ -3494,8 +4295,10 @@
},
{
"BriefDescription": "Remote VNA Credits : Level < 1",
+ "Counter": "0",
"EventCode": "0x5a",
"EventName": "UNC_M3UPI_RxC_VNA_CRD.LT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Remote VNA Credits : Level < 1 : Remote vna credit level is less than 1 (i.e. no vna credits available)",
"UMask": "0x2",
@@ -3503,8 +4306,10 @@
},
{
"BriefDescription": "Remote VNA Credits : Level < 10",
+ "Counter": "0",
"EventCode": "0x5a",
"EventName": "UNC_M3UPI_RxC_VNA_CRD.LT10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Remote VNA Credits : Level < 10 : remote vna credit level is less than 10; parallel vn0/vn1 arb not possible",
"UMask": "0x10",
@@ -3512,8 +4317,10 @@
},
{
"BriefDescription": "Remote VNA Credits : Level < 4",
+ "Counter": "0",
"EventCode": "0x5a",
"EventName": "UNC_M3UPI_RxC_VNA_CRD.LT4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Remote VNA Credits : Level < 4 : Remote vna credit level is less than 4; bl (or ad requiring 4 vna) cannot arb on vna",
"UMask": "0x4",
@@ -3521,8 +4328,10 @@
},
{
"BriefDescription": "Remote VNA Credits : Level < 5",
+ "Counter": "0",
"EventCode": "0x5a",
"EventName": "UNC_M3UPI_RxC_VNA_CRD.LT5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Remote VNA Credits : Level < 5 : Remote vna credit level is less than 5; parallel ad/bl arb on vna not possible",
"UMask": "0x8",
@@ -3530,8 +4339,10 @@
},
{
"BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.REQ_ADBL_ALLOC_L5",
+ "Counter": "0",
"EventCode": "0x59",
"EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.REQ_ADBL_ALLOC_L5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": remote vna credit count was less than 5 and allocation to ad or bl messages was required",
"UMask": "0x2",
@@ -3539,8 +4350,10 @@
},
{
"BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.REQ_VN01_ALLOC_LT10",
+ "Counter": "0",
"EventCode": "0x59",
"EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.REQ_VN01_ALLOC_LT10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": remote vna credit count was less than 10 and allocation to vn0 or vn1 was required",
"UMask": "0x1",
@@ -3548,8 +4361,10 @@
},
{
"BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_JUST_AD",
+ "Counter": "0",
"EventCode": "0x59",
"EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_JUST_AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": on vn0, remote vna credits were allocated only to ad messages, not to bl",
"UMask": "0x10",
@@ -3557,8 +4372,10 @@
},
{
"BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_JUST_BL",
+ "Counter": "0",
"EventCode": "0x59",
"EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_JUST_BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": on vn0, remote vna credits were allocated only to bl messages, not to ad",
"UMask": "0x20",
@@ -3566,8 +4383,10 @@
},
{
"BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_ONLY",
+ "Counter": "0",
"EventCode": "0x59",
"EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_ONLY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": remote vna credits were allocated only to vn0, not to vn1",
"UMask": "0x4",
@@ -3575,8 +4394,10 @@
},
{
"BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_JUST_AD",
+ "Counter": "0",
"EventCode": "0x59",
"EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_JUST_AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": on vn1, remote vna credits were allocated only to ad messages, not to bl",
"UMask": "0x40",
@@ -3584,8 +4405,10 @@
},
{
"BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_JUST_BL",
+ "Counter": "0",
"EventCode": "0x59",
"EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_JUST_BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": on vn1, remote vna credits were allocated only to bl messages, not to ad",
"UMask": "0x80",
@@ -3593,8 +4416,10 @@
},
{
"BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_ONLY",
+ "Counter": "0",
"EventCode": "0x59",
"EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_ONLY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": remote vna credits were allocated only to vn1, not to vn0",
"UMask": "0x8",
@@ -3602,8 +4427,10 @@
},
{
"BriefDescription": "Failed ARB for AD : VN0 REQ Messages",
+ "Counter": "0",
"EventCode": "0x30",
"EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for AD : VN0 REQ Messages : AD arb but no win; arb request asserted but not won",
"UMask": "0x1",
@@ -3611,8 +4438,10 @@
},
{
"BriefDescription": "Failed ARB for AD : VN0 RSP Messages",
+ "Counter": "0",
"EventCode": "0x30",
"EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for AD : VN0 RSP Messages : AD arb but no win; arb request asserted but not won",
"UMask": "0x4",
@@ -3620,8 +4449,10 @@
},
{
"BriefDescription": "Failed ARB for AD : VN0 SNP Messages",
+ "Counter": "0",
"EventCode": "0x30",
"EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for AD : VN0 SNP Messages : AD arb but no win; arb request asserted but not won",
"UMask": "0x2",
@@ -3629,8 +4460,10 @@
},
{
"BriefDescription": "Failed ARB for AD : VN0 WB Messages",
+ "Counter": "0",
"EventCode": "0x30",
"EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for AD : VN0 WB Messages : AD arb but no win; arb request asserted but not won",
"UMask": "0x8",
@@ -3638,8 +4471,10 @@
},
{
"BriefDescription": "Failed ARB for AD : VN1 REQ Messages",
+ "Counter": "0",
"EventCode": "0x30",
"EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for AD : VN1 REQ Messages : AD arb but no win; arb request asserted but not won",
"UMask": "0x10",
@@ -3647,8 +4482,10 @@
},
{
"BriefDescription": "Failed ARB for AD : VN1 RSP Messages",
+ "Counter": "0",
"EventCode": "0x30",
"EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for AD : VN1 RSP Messages : AD arb but no win; arb request asserted but not won",
"UMask": "0x40",
@@ -3656,8 +4493,10 @@
},
{
"BriefDescription": "Failed ARB for AD : VN1 SNP Messages",
+ "Counter": "0",
"EventCode": "0x30",
"EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for AD : VN1 SNP Messages : AD arb but no win; arb request asserted but not won",
"UMask": "0x20",
@@ -3665,8 +4504,10 @@
},
{
"BriefDescription": "Failed ARB for AD : VN1 WB Messages",
+ "Counter": "0",
"EventCode": "0x30",
"EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for AD : VN1 WB Messages : AD arb but no win; arb request asserted but not won",
"UMask": "0x80",
@@ -3674,8 +4515,10 @@
},
{
"BriefDescription": "AD FlowQ Bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -3684,8 +4527,10 @@
},
{
"BriefDescription": "AD FlowQ Bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x2c",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.AD_SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD FlowQ Bypass : Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
"UMask": "0x1",
@@ -3693,8 +4538,10 @@
},
{
"BriefDescription": "AD FlowQ Bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x2c",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.AD_SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD FlowQ Bypass : Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
"UMask": "0x2",
@@ -3702,8 +4549,10 @@
},
{
"BriefDescription": "AD FlowQ Bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x2c",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.AD_SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD FlowQ Bypass : Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
"UMask": "0x4",
@@ -3711,8 +4560,10 @@
},
{
"BriefDescription": "AD FlowQ Bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x2c",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.BL_EARLY_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD FlowQ Bypass : Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
"UMask": "0x8",
@@ -3720,8 +4571,10 @@
},
{
"BriefDescription": "AD Flow Q Not Empty : VN0 REQ Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Not Empty : VN0 REQ Messages : Number of cycles the AD Egress queue is Not Empty",
"UMask": "0x1",
@@ -3729,8 +4582,10 @@
},
{
"BriefDescription": "AD Flow Q Not Empty : VN0 RSP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Not Empty : VN0 RSP Messages : Number of cycles the AD Egress queue is Not Empty",
"UMask": "0x4",
@@ -3738,8 +4593,10 @@
},
{
"BriefDescription": "AD Flow Q Not Empty : VN0 SNP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Not Empty : VN0 SNP Messages : Number of cycles the AD Egress queue is Not Empty",
"UMask": "0x2",
@@ -3747,8 +4604,10 @@
},
{
"BriefDescription": "AD Flow Q Not Empty : VN0 WB Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Not Empty : VN0 WB Messages : Number of cycles the AD Egress queue is Not Empty",
"UMask": "0x8",
@@ -3756,8 +4615,10 @@
},
{
"BriefDescription": "AD Flow Q Not Empty : VN1 REQ Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Not Empty : VN1 REQ Messages : Number of cycles the AD Egress queue is Not Empty",
"UMask": "0x10",
@@ -3765,8 +4626,10 @@
},
{
"BriefDescription": "AD Flow Q Not Empty : VN1 RSP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Not Empty : VN1 RSP Messages : Number of cycles the AD Egress queue is Not Empty",
"UMask": "0x40",
@@ -3774,8 +4637,10 @@
},
{
"BriefDescription": "AD Flow Q Not Empty : VN1 SNP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Not Empty : VN1 SNP Messages : Number of cycles the AD Egress queue is Not Empty",
"UMask": "0x20",
@@ -3783,8 +4648,10 @@
},
{
"BriefDescription": "AD Flow Q Not Empty : VN1 WB Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Not Empty : VN1 WB Messages : Number of cycles the AD Egress queue is Not Empty",
"UMask": "0x80",
@@ -3792,8 +4659,10 @@
},
{
"BriefDescription": "AD Flow Q Inserts : VN0 REQ Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2d",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Inserts : VN0 REQ Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x1",
@@ -3801,8 +4670,10 @@
},
{
"BriefDescription": "AD Flow Q Inserts : VN0 RSP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2d",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Inserts : VN0 RSP Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x4",
@@ -3810,8 +4681,10 @@
},
{
"BriefDescription": "AD Flow Q Inserts : VN0 SNP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2d",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Inserts : VN0 SNP Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x2",
@@ -3819,8 +4692,10 @@
},
{
"BriefDescription": "AD Flow Q Inserts : VN0 WB Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2d",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Inserts : VN0 WB Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x8",
@@ -3828,8 +4703,10 @@
},
{
"BriefDescription": "AD Flow Q Inserts : VN1 REQ Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2d",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Inserts : VN1 REQ Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x10",
@@ -3837,8 +4714,10 @@
},
{
"BriefDescription": "AD Flow Q Inserts : VN1 RSP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2d",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Inserts : VN1 RSP Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x40",
@@ -3846,8 +4725,10 @@
},
{
"BriefDescription": "AD Flow Q Inserts : VN1 SNP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2d",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN1_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Inserts : VN1 SNP Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x20",
@@ -3855,78 +4736,98 @@
},
{
"BriefDescription": "AD Flow Q Occupancy : VN0 REQ Messages",
+ "Counter": "0",
"EventCode": "0x1c",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M3UPI"
},
{
"BriefDescription": "AD Flow Q Occupancy : VN0 RSP Messages",
+ "Counter": "0",
"EventCode": "0x1c",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M3UPI"
},
{
"BriefDescription": "AD Flow Q Occupancy : VN0 SNP Messages",
+ "Counter": "0",
"EventCode": "0x1c",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M3UPI"
},
{
"BriefDescription": "AD Flow Q Occupancy : VN0 WB Messages",
+ "Counter": "0",
"EventCode": "0x1c",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M3UPI"
},
{
"BriefDescription": "AD Flow Q Occupancy : VN1 REQ Messages",
+ "Counter": "0",
"EventCode": "0x1c",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M3UPI"
},
{
"BriefDescription": "AD Flow Q Occupancy : VN1 RSP Messages",
+ "Counter": "0",
"EventCode": "0x1c",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M3UPI"
},
{
"BriefDescription": "AD Flow Q Occupancy : VN1 SNP Messages",
+ "Counter": "0",
"EventCode": "0x1c",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN1_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M3UPI"
},
{
"BriefDescription": "AK Flow Q Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x2f",
"EventName": "UNC_M3UPI_TxC_AK_FLQ_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M3UPI"
},
{
"BriefDescription": "AK Flow Q Occupancy",
+ "Counter": "0",
"EventCode": "0x1e",
"EventName": "UNC_M3UPI_TxC_AK_FLQ_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M3UPI"
},
{
"BriefDescription": "Failed ARB for BL : VN0 NCB Messages",
+ "Counter": "0",
"EventCode": "0x35",
"EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for BL : VN0 NCB Messages : BL arb but no win; arb request asserted but not won",
"UMask": "0x4",
@@ -3934,8 +4835,10 @@
},
{
"BriefDescription": "Failed ARB for BL : VN0 NCS Messages",
+ "Counter": "0",
"EventCode": "0x35",
"EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for BL : VN0 NCS Messages : BL arb but no win; arb request asserted but not won",
"UMask": "0x8",
@@ -3943,8 +4846,10 @@
},
{
"BriefDescription": "Failed ARB for BL : VN0 RSP Messages",
+ "Counter": "0",
"EventCode": "0x35",
"EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for BL : VN0 RSP Messages : BL arb but no win; arb request asserted but not won",
"UMask": "0x1",
@@ -3952,8 +4857,10 @@
},
{
"BriefDescription": "Failed ARB for BL : VN0 WB Messages",
+ "Counter": "0",
"EventCode": "0x35",
"EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for BL : VN0 WB Messages : BL arb but no win; arb request asserted but not won",
"UMask": "0x2",
@@ -3961,8 +4868,10 @@
},
{
"BriefDescription": "Failed ARB for BL : VN1 NCS Messages",
+ "Counter": "0",
"EventCode": "0x35",
"EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for BL : VN1 NCS Messages : BL arb but no win; arb request asserted but not won",
"UMask": "0x40",
@@ -3970,8 +4879,10 @@
},
{
"BriefDescription": "Failed ARB for BL : VN1 NCB Messages",
+ "Counter": "0",
"EventCode": "0x35",
"EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for BL : VN1 NCB Messages : BL arb but no win; arb request asserted but not won",
"UMask": "0x80",
@@ -3979,8 +4890,10 @@
},
{
"BriefDescription": "Failed ARB for BL : VN1 RSP Messages",
+ "Counter": "0",
"EventCode": "0x35",
"EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for BL : VN1 RSP Messages : BL arb but no win; arb request asserted but not won",
"UMask": "0x10",
@@ -3988,8 +4901,10 @@
},
{
"BriefDescription": "Failed ARB for BL : VN1 WB Messages",
+ "Counter": "0",
"EventCode": "0x35",
"EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for BL : VN1 WB Messages : BL arb but no win; arb request asserted but not won",
"UMask": "0x20",
@@ -3997,8 +4912,10 @@
},
{
"BriefDescription": "BL Flow Q Not Empty : VN0 REQ Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Not Empty : VN0 REQ Messages : Number of cycles the BL Egress queue is Not Empty",
"UMask": "0x1",
@@ -4006,8 +4923,10 @@
},
{
"BriefDescription": "BL Flow Q Not Empty : VN0 RSP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Not Empty : VN0 RSP Messages : Number of cycles the BL Egress queue is Not Empty",
"UMask": "0x4",
@@ -4015,8 +4934,10 @@
},
{
"BriefDescription": "BL Flow Q Not Empty : VN0 SNP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Not Empty : VN0 SNP Messages : Number of cycles the BL Egress queue is Not Empty",
"UMask": "0x2",
@@ -4024,8 +4945,10 @@
},
{
"BriefDescription": "BL Flow Q Not Empty : VN0 WB Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Not Empty : VN0 WB Messages : Number of cycles the BL Egress queue is Not Empty",
"UMask": "0x8",
@@ -4033,8 +4956,10 @@
},
{
"BriefDescription": "BL Flow Q Not Empty : VN1 REQ Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Not Empty : VN1 REQ Messages : Number of cycles the BL Egress queue is Not Empty",
"UMask": "0x10",
@@ -4042,8 +4967,10 @@
},
{
"BriefDescription": "BL Flow Q Not Empty : VN1 RSP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Not Empty : VN1 RSP Messages : Number of cycles the BL Egress queue is Not Empty",
"UMask": "0x40",
@@ -4051,8 +4978,10 @@
},
{
"BriefDescription": "BL Flow Q Not Empty : VN1 SNP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Not Empty : VN1 SNP Messages : Number of cycles the BL Egress queue is Not Empty",
"UMask": "0x20",
@@ -4060,8 +4989,10 @@
},
{
"BriefDescription": "BL Flow Q Not Empty : VN1 WB Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Not Empty : VN1 WB Messages : Number of cycles the BL Egress queue is Not Empty",
"UMask": "0x80",
@@ -4069,8 +5000,10 @@
},
{
"BriefDescription": "BL Flow Q Inserts : VN0 RSP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Inserts : VN0 RSP Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x1",
@@ -4078,8 +5011,10 @@
},
{
"BriefDescription": "BL Flow Q Inserts : VN0 WB Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Inserts : VN0 WB Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x2",
@@ -4087,8 +5022,10 @@
},
{
"BriefDescription": "BL Flow Q Inserts : VN0 NCS Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Inserts : VN0 NCS Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x8",
@@ -4096,8 +5033,10 @@
},
{
"BriefDescription": "BL Flow Q Inserts : VN0 NCB Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Inserts : VN0 NCB Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x4",
@@ -4105,8 +5044,10 @@
},
{
"BriefDescription": "BL Flow Q Inserts : VN1 RSP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Inserts : VN1 RSP Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x10",
@@ -4114,8 +5055,10 @@
},
{
"BriefDescription": "BL Flow Q Inserts : VN1 WB Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Inserts : VN1 WB Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x20",
@@ -4123,8 +5066,10 @@
},
{
"BriefDescription": "BL Flow Q Inserts : VN1_NCB Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Inserts : VN1_NCB Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x80",
@@ -4132,8 +5077,10 @@
},
{
"BriefDescription": "BL Flow Q Inserts : VN1_NCS Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Inserts : VN1_NCS Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x40",
@@ -4141,120 +5088,150 @@
},
{
"BriefDescription": "BL Flow Q Occupancy : VN0 NCB Messages",
+ "Counter": "0",
"EventCode": "0x1d",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy : VN0 NCS Messages",
+ "Counter": "0",
"EventCode": "0x1d",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy : VN0 RSP Messages",
+ "Counter": "0",
"EventCode": "0x1d",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy : VN0 WB Messages",
+ "Counter": "0",
"EventCode": "0x1d",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy : VN1_NCS Messages",
+ "Counter": "0",
"EventCode": "0x1d",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy : VN1_NCB Messages",
+ "Counter": "0",
"EventCode": "0x1d",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy : VN1 RSP Messages",
+ "Counter": "0",
"EventCode": "0x1d",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy : VN1 WB Messages",
+ "Counter": "0",
"EventCode": "0x1d",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy : VN0 RSP Messages",
+ "Counter": "0",
"EventCode": "0x1f",
"EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN0_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy : VN0 WB Messages",
+ "Counter": "0",
"EventCode": "0x1f",
"EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN0_THROUGH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy : VN0 NCB Messages",
+ "Counter": "0",
"EventCode": "0x1f",
"EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN0_WRPULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy : VN1 RSP Messages",
+ "Counter": "0",
"EventCode": "0x1f",
"EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN1_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy : VN1 WB Messages",
+ "Counter": "0",
"EventCode": "0x1f",
"EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN1_THROUGH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy : VN1_NCS Messages",
+ "Counter": "0",
"EventCode": "0x1f",
"EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN1_WRPULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M3UPI"
},
{
"BriefDescription": "UPI0 AD Credits Empty : VN0 REQ Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN0_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 AD Credits Empty : VN0 REQ Messages : No credits available to send to UPIs on the AD Ring",
"UMask": "0x2",
@@ -4262,8 +5239,10 @@
},
{
"BriefDescription": "UPI0 AD Credits Empty : VN0 RSP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 AD Credits Empty : VN0 RSP Messages : No credits available to send to UPIs on the AD Ring",
"UMask": "0x8",
@@ -4271,8 +5250,10 @@
},
{
"BriefDescription": "UPI0 AD Credits Empty : VN0 SNP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN0_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 AD Credits Empty : VN0 SNP Messages : No credits available to send to UPIs on the AD Ring",
"UMask": "0x4",
@@ -4280,8 +5261,10 @@
},
{
"BriefDescription": "UPI0 AD Credits Empty : VN1 REQ Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 AD Credits Empty : VN1 REQ Messages : No credits available to send to UPIs on the AD Ring",
"UMask": "0x10",
@@ -4289,8 +5272,10 @@
},
{
"BriefDescription": "UPI0 AD Credits Empty : VN1 RSP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 AD Credits Empty : VN1 RSP Messages : No credits available to send to UPIs on the AD Ring",
"UMask": "0x40",
@@ -4298,8 +5283,10 @@
},
{
"BriefDescription": "UPI0 AD Credits Empty : VN1 SNP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN1_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 AD Credits Empty : VN1 SNP Messages : No credits available to send to UPIs on the AD Ring",
"UMask": "0x20",
@@ -4307,8 +5294,10 @@
},
{
"BriefDescription": "UPI0 AD Credits Empty : VNA",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VNA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 AD Credits Empty : VNA : No credits available to send to UPIs on the AD Ring",
"UMask": "0x1",
@@ -4316,8 +5305,10 @@
},
{
"BriefDescription": "UPI0 BL Credits Empty : VN0 RSP Messages",
+ "Counter": "0",
"EventCode": "0x21",
"EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN0_NCS_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 BL Credits Empty : VN0 RSP Messages : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
"UMask": "0x4",
@@ -4325,8 +5316,10 @@
},
{
"BriefDescription": "UPI0 BL Credits Empty : VN0 REQ Messages",
+ "Counter": "0",
"EventCode": "0x21",
"EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 BL Credits Empty : VN0 REQ Messages : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
"UMask": "0x2",
@@ -4334,8 +5327,10 @@
},
{
"BriefDescription": "UPI0 BL Credits Empty : VN0 SNP Messages",
+ "Counter": "0",
"EventCode": "0x21",
"EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 BL Credits Empty : VN0 SNP Messages : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
"UMask": "0x8",
@@ -4343,8 +5338,10 @@
},
{
"BriefDescription": "UPI0 BL Credits Empty : VN1 RSP Messages",
+ "Counter": "0",
"EventCode": "0x21",
"EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN1_NCS_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 BL Credits Empty : VN1 RSP Messages : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
"UMask": "0x20",
@@ -4352,8 +5349,10 @@
},
{
"BriefDescription": "UPI0 BL Credits Empty : VN1 REQ Messages",
+ "Counter": "0",
"EventCode": "0x21",
"EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 BL Credits Empty : VN1 REQ Messages : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
"UMask": "0x10",
@@ -4361,8 +5360,10 @@
},
{
"BriefDescription": "UPI0 BL Credits Empty : VN1 SNP Messages",
+ "Counter": "0",
"EventCode": "0x21",
"EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 BL Credits Empty : VN1 SNP Messages : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
"UMask": "0x40",
@@ -4370,8 +5371,10 @@
},
{
"BriefDescription": "UPI0 BL Credits Empty : VNA",
+ "Counter": "0",
"EventCode": "0x21",
"EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VNA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 BL Credits Empty : VNA : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
"UMask": "0x1",
@@ -4379,16 +5382,20 @@
},
{
"BriefDescription": "FlowQ Generated Prefetch",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_M3UPI_UPI_PREFETCH_SPAWN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "FlowQ Generated Prefetch : Count cases where FlowQ causes spawn of Prefetch to iMC/SMI3 target",
"Unit": "M3UPI"
},
{
"BriefDescription": "VN0 Credit Used : WB on BL",
+ "Counter": "0",
"EventCode": "0x5b",
"EventName": "UNC_M3UPI_VN0_CREDITS_USED.NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Credit Used : WB on BL : Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers. : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -4396,8 +5403,10 @@
},
{
"BriefDescription": "VN0 Credit Used : NCB on BL",
+ "Counter": "0",
"EventCode": "0x5b",
"EventName": "UNC_M3UPI_VN0_CREDITS_USED.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Credit Used : NCB on BL : Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers. : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -4405,8 +5414,10 @@
},
{
"BriefDescription": "VN0 Credit Used : REQ on AD",
+ "Counter": "0",
"EventCode": "0x5b",
"EventName": "UNC_M3UPI_VN0_CREDITS_USED.REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Credit Used : REQ on AD : Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers. : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -4414,8 +5425,10 @@
},
{
"BriefDescription": "VN0 Credit Used : RSP on AD",
+ "Counter": "0",
"EventCode": "0x5b",
"EventName": "UNC_M3UPI_VN0_CREDITS_USED.RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Credit Used : RSP on AD : Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers. : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -4423,8 +5436,10 @@
},
{
"BriefDescription": "VN0 Credit Used : SNP on AD",
+ "Counter": "0",
"EventCode": "0x5b",
"EventName": "UNC_M3UPI_VN0_CREDITS_USED.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Credit Used : SNP on AD : Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers. : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -4432,8 +5447,10 @@
},
{
"BriefDescription": "VN0 Credit Used : RSP on BL",
+ "Counter": "0",
"EventCode": "0x5b",
"EventName": "UNC_M3UPI_VN0_CREDITS_USED.WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Credit Used : RSP on BL : Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers. : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -4441,8 +5458,10 @@
},
{
"BriefDescription": "VN0 No Credits : WB on BL",
+ "Counter": "0",
"EventCode": "0x5d",
"EventName": "UNC_M3UPI_VN0_NO_CREDITS.NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 No Credits : WB on BL : Number of Cycles there were no VN0 Credits : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -4450,8 +5469,10 @@
},
{
"BriefDescription": "VN0 No Credits : NCB on BL",
+ "Counter": "0",
"EventCode": "0x5d",
"EventName": "UNC_M3UPI_VN0_NO_CREDITS.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 No Credits : NCB on BL : Number of Cycles there were no VN0 Credits : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -4459,8 +5480,10 @@
},
{
"BriefDescription": "VN0 No Credits : REQ on AD",
+ "Counter": "0",
"EventCode": "0x5d",
"EventName": "UNC_M3UPI_VN0_NO_CREDITS.REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 No Credits : REQ on AD : Number of Cycles there were no VN0 Credits : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -4468,8 +5491,10 @@
},
{
"BriefDescription": "VN0 No Credits : RSP on AD",
+ "Counter": "0",
"EventCode": "0x5d",
"EventName": "UNC_M3UPI_VN0_NO_CREDITS.RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 No Credits : RSP on AD : Number of Cycles there were no VN0 Credits : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -4477,8 +5502,10 @@
},
{
"BriefDescription": "VN0 No Credits : SNP on AD",
+ "Counter": "0",
"EventCode": "0x5d",
"EventName": "UNC_M3UPI_VN0_NO_CREDITS.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 No Credits : SNP on AD : Number of Cycles there were no VN0 Credits : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -4486,8 +5513,10 @@
},
{
"BriefDescription": "VN0 No Credits : RSP on BL",
+ "Counter": "0",
"EventCode": "0x5d",
"EventName": "UNC_M3UPI_VN0_NO_CREDITS.WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 No Credits : RSP on BL : Number of Cycles there were no VN0 Credits : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -4495,8 +5524,10 @@
},
{
"BriefDescription": "VN1 Credit Used : WB on BL",
+ "Counter": "0",
"EventCode": "0x5c",
"EventName": "UNC_M3UPI_VN1_CREDITS_USED.NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Credit Used : WB on BL : Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers. : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -4504,8 +5535,10 @@
},
{
"BriefDescription": "VN1 Credit Used : NCB on BL",
+ "Counter": "0",
"EventCode": "0x5c",
"EventName": "UNC_M3UPI_VN1_CREDITS_USED.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Credit Used : NCB on BL : Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers. : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -4513,8 +5546,10 @@
},
{
"BriefDescription": "VN1 Credit Used : REQ on AD",
+ "Counter": "0",
"EventCode": "0x5c",
"EventName": "UNC_M3UPI_VN1_CREDITS_USED.REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Credit Used : REQ on AD : Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers. : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -4522,8 +5557,10 @@
},
{
"BriefDescription": "VN1 Credit Used : RSP on AD",
+ "Counter": "0",
"EventCode": "0x5c",
"EventName": "UNC_M3UPI_VN1_CREDITS_USED.RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Credit Used : RSP on AD : Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers. : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -4531,8 +5568,10 @@
},
{
"BriefDescription": "VN1 Credit Used : SNP on AD",
+ "Counter": "0",
"EventCode": "0x5c",
"EventName": "UNC_M3UPI_VN1_CREDITS_USED.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Credit Used : SNP on AD : Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers. : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -4540,8 +5579,10 @@
},
{
"BriefDescription": "VN1 Credit Used : RSP on BL",
+ "Counter": "0",
"EventCode": "0x5c",
"EventName": "UNC_M3UPI_VN1_CREDITS_USED.WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Credit Used : RSP on BL : Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers. : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -4549,8 +5590,10 @@
},
{
"BriefDescription": "VN1 No Credits : WB on BL",
+ "Counter": "0",
"EventCode": "0x5e",
"EventName": "UNC_M3UPI_VN1_NO_CREDITS.NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 No Credits : WB on BL : Number of Cycles there were no VN1 Credits : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -4558,8 +5601,10 @@
},
{
"BriefDescription": "VN1 No Credits : NCB on BL",
+ "Counter": "0",
"EventCode": "0x5e",
"EventName": "UNC_M3UPI_VN1_NO_CREDITS.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 No Credits : NCB on BL : Number of Cycles there were no VN1 Credits : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -4567,8 +5612,10 @@
},
{
"BriefDescription": "VN1 No Credits : REQ on AD",
+ "Counter": "0",
"EventCode": "0x5e",
"EventName": "UNC_M3UPI_VN1_NO_CREDITS.REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 No Credits : REQ on AD : Number of Cycles there were no VN1 Credits : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -4576,8 +5623,10 @@
},
{
"BriefDescription": "VN1 No Credits : RSP on AD",
+ "Counter": "0",
"EventCode": "0x5e",
"EventName": "UNC_M3UPI_VN1_NO_CREDITS.RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 No Credits : RSP on AD : Number of Cycles there were no VN1 Credits : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -4585,8 +5634,10 @@
},
{
"BriefDescription": "VN1 No Credits : SNP on AD",
+ "Counter": "0",
"EventCode": "0x5e",
"EventName": "UNC_M3UPI_VN1_NO_CREDITS.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 No Credits : SNP on AD : Number of Cycles there were no VN1 Credits : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -4594,8 +5645,10 @@
},
{
"BriefDescription": "VN1 No Credits : RSP on BL",
+ "Counter": "0",
"EventCode": "0x5e",
"EventName": "UNC_M3UPI_VN1_NO_CREDITS.WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 No Credits : RSP on BL : Number of Cycles there were no VN1 Credits : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -4603,168 +5656,210 @@
},
{
"BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_EQ_LOCALDEST_VN0",
+ "Counter": "0",
"EventCode": "0x7e",
"EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_EQ_LOCALDEST_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x82",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_EQ_LOCALDEST_VN1",
+ "Counter": "0",
"EventCode": "0x7e",
"EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_EQ_LOCALDEST_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa0",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_GT_LOCALDEST_VN0",
+ "Counter": "0",
"EventCode": "0x7e",
"EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_GT_LOCALDEST_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x81",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_GT_LOCALDEST_VN1",
+ "Counter": "0",
"EventCode": "0x7e",
"EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_GT_LOCALDEST_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x90",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_LT_LOCALDEST_VN0",
+ "Counter": "0",
"EventCode": "0x7e",
"EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_LT_LOCALDEST_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x84",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_LT_LOCALDEST_VN1",
+ "Counter": "0",
"EventCode": "0x7e",
"EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_LT_LOCALDEST_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc0",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_EQ_LOCALDEST_VN0",
+ "Counter": "0",
"EventCode": "0x7e",
"EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_EQ_LOCALDEST_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_EQ_LOCALDEST_VN1",
+ "Counter": "0",
"EventCode": "0x7e",
"EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_EQ_LOCALDEST_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_GT_LOCALDEST_VN0",
+ "Counter": "0",
"EventCode": "0x7e",
"EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_GT_LOCALDEST_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_GT_LOCALDEST_VN1",
+ "Counter": "0",
"EventCode": "0x7e",
"EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_GT_LOCALDEST_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_LT_LOCALDEST_VN0",
+ "Counter": "0",
"EventCode": "0x7e",
"EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_LT_LOCALDEST_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_LT_LOCALDEST_VN1",
+ "Counter": "0",
"EventCode": "0x7e",
"EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_LT_LOCALDEST_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_PENDING.LOCALDEST_VN0",
+ "Counter": "0",
"EventCode": "0x7d",
"EventName": "UNC_M3UPI_WB_PENDING.LOCALDEST_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_PENDING.LOCALDEST_VN1",
+ "Counter": "0",
"EventCode": "0x7d",
"EventName": "UNC_M3UPI_WB_PENDING.LOCALDEST_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_PENDING.LOCAL_AND_RT_VN0",
+ "Counter": "0",
"EventCode": "0x7d",
"EventName": "UNC_M3UPI_WB_PENDING.LOCAL_AND_RT_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_PENDING.LOCAL_AND_RT_VN1",
+ "Counter": "0",
"EventCode": "0x7d",
"EventName": "UNC_M3UPI_WB_PENDING.LOCAL_AND_RT_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_PENDING.ROUTETHRU_VN0",
+ "Counter": "0",
"EventCode": "0x7d",
"EventName": "UNC_M3UPI_WB_PENDING.ROUTETHRU_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_PENDING.ROUTETHRU_VN1",
+ "Counter": "0",
"EventCode": "0x7d",
"EventName": "UNC_M3UPI_WB_PENDING.ROUTETHRU_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_PENDING.WAITING4PULL_VN0",
+ "Counter": "0",
"EventCode": "0x7d",
"EventName": "UNC_M3UPI_WB_PENDING.WAITING4PULL_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_PENDING.WAITING4PULL_VN1",
+ "Counter": "0",
"EventCode": "0x7d",
"EventName": "UNC_M3UPI_WB_PENDING.WAITING4PULL_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_XPT_PFTCH.ARB",
+ "Counter": "0",
"EventCode": "0x61",
"EventName": "UNC_M3UPI_XPT_PFTCH.ARB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": xpt prefetch message is making arbitration request",
"UMask": "0x4",
@@ -4772,8 +5867,10 @@
},
{
"BriefDescription": "UNC_M3UPI_XPT_PFTCH.ARRIVED",
+ "Counter": "0",
"EventCode": "0x61",
"EventName": "UNC_M3UPI_XPT_PFTCH.ARRIVED",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": xpt prefetch message arrived in ingress pipeline",
"UMask": "0x1",
@@ -4781,8 +5878,10 @@
},
{
"BriefDescription": "UNC_M3UPI_XPT_PFTCH.BYPASS",
+ "Counter": "0",
"EventCode": "0x61",
"EventName": "UNC_M3UPI_XPT_PFTCH.BYPASS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": xpt prefetch message took bypass path",
"UMask": "0x2",
@@ -4790,8 +5889,10 @@
},
{
"BriefDescription": "UNC_M3UPI_XPT_PFTCH.FLITTED",
+ "Counter": "0",
"EventCode": "0x61",
"EventName": "UNC_M3UPI_XPT_PFTCH.FLITTED",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": xpt prefetch message was slotted into flit (non bypass)",
"UMask": "0x10",
@@ -4799,8 +5900,10 @@
},
{
"BriefDescription": "UNC_M3UPI_XPT_PFTCH.LOST_ARB",
+ "Counter": "0",
"EventCode": "0x61",
"EventName": "UNC_M3UPI_XPT_PFTCH.LOST_ARB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": xpt prefetch message lost arbitration",
"UMask": "0x8",
@@ -4808,8 +5911,10 @@
},
{
"BriefDescription": "UNC_M3UPI_XPT_PFTCH.LOST_OLD",
+ "Counter": "0",
"EventCode": "0x61",
"EventName": "UNC_M3UPI_XPT_PFTCH.LOST_OLD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": xpt prefetch message was dropped because it became too old",
"UMask": "0x20",
@@ -4817,8 +5922,10 @@
},
{
"BriefDescription": "UNC_M3UPI_XPT_PFTCH.LOST_QFULL",
+ "Counter": "0",
"EventCode": "0x61",
"EventName": "UNC_M3UPI_XPT_PFTCH.LOST_QFULL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": xpt prefetch message was dropped because it was overwritten by new message while prefetch queue was full",
"UMask": "0x40",
@@ -4826,8 +5933,10 @@
},
{
"BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (AD Bounceable)",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_MDF_CRS_TxR_INSERTS.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Bounceable : Number of allocations into the CRS Egress",
"UMask": "0x1",
@@ -4835,8 +5944,10 @@
},
{
"BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (AD credited)",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_MDF_CRS_TxR_INSERTS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD credited : Number of allocations into the CRS Egress",
"UMask": "0x2",
@@ -4844,8 +5955,10 @@
},
{
"BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (AK)",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_MDF_CRS_TxR_INSERTS.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AK : Number of allocations into the CRS Egress",
"UMask": "0x10",
@@ -4853,8 +5966,10 @@
},
{
"BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (AKC)",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_MDF_CRS_TxR_INSERTS.AKC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AKC : Number of allocations into the CRS Egress",
"UMask": "0x40",
@@ -4862,8 +5977,10 @@
},
{
"BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (BL Bounceable)",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_MDF_CRS_TxR_INSERTS.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Bounceable : Number of allocations into the CRS Egress",
"UMask": "0x4",
@@ -4871,8 +5988,10 @@
},
{
"BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (BL credited)",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_MDF_CRS_TxR_INSERTS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL credited : Number of allocations into the CRS Egress",
"UMask": "0x8",
@@ -4880,8 +5999,10 @@
},
{
"BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (IV)",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_MDF_CRS_TxR_INSERTS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IV : Number of allocations into the CRS Egress",
"UMask": "0x20",
@@ -4889,8 +6010,10 @@
},
{
"BriefDescription": "Number of cycles incoming messages from the vertical ring that are bounced at the SBO Ingress (V-EMIB) (AD)",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_MDF_CRS_TxR_V_BOUNCES.AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD : Number of cycles incoming messages from the vertical ring that are bounced at the SBO",
"UMask": "0x1",
@@ -4898,8 +6021,10 @@
},
{
"BriefDescription": "Number of cycles incoming messages from the vertical ring that are bounced at the SBO Ingress (V-EMIB) (AK)",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_MDF_CRS_TxR_V_BOUNCES.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AK : Number of cycles incoming messages from the vertical ring that are bounced at the SBO",
"UMask": "0x4",
@@ -4907,8 +6032,10 @@
},
{
"BriefDescription": "Number of cycles incoming messages from the vertical ring that are bounced at the SBO Ingress (V-EMIB) (AKC)",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_MDF_CRS_TxR_V_BOUNCES.AKC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AKC : Number of cycles incoming messages from the vertical ring that are bounced at the SBO",
"UMask": "0x10",
@@ -4916,8 +6043,10 @@
},
{
"BriefDescription": "Number of cycles incoming messages from the vertical ring that are bounced at the SBO Ingress (V-EMIB) (BL)",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_MDF_CRS_TxR_V_BOUNCES.BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL : Number of cycles incoming messages from the vertical ring that are bounced at the SBO",
"UMask": "0x2",
@@ -4925,8 +6054,10 @@
},
{
"BriefDescription": "Number of cycles incoming messages from the vertical ring that are bounced at the SBO Ingress (V-EMIB) (IV)",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_MDF_CRS_TxR_V_BOUNCES.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IV : Number of cycles incoming messages from the vertical ring that are bounced at the SBO",
"UMask": "0x8",
@@ -4934,8 +6065,10 @@
},
{
"BriefDescription": "Counts the number of cycles when the distress signals are asserted based on SBO Ingress threshold",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_MDF_FAST_ASSERTED.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD bnc : Counts the number of cycles when the distress signals are asserted based on SBO Ingress threshold",
"UMask": "0x1",
@@ -4943,8 +6076,10 @@
},
{
"BriefDescription": "Counts the number of cycles when the distress signals are asserted based on SBO Ingress threshold",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_MDF_FAST_ASSERTED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL bnc : Counts the number of cycles when the distress signals are asserted based on SBO Ingress threshold",
"UMask": "0x2",
@@ -4952,6 +6087,7 @@
},
{
"BriefDescription": "UPI Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_UPI_CLOCKTICKS",
"PerPkg": "1",
@@ -4960,8 +6096,10 @@
},
{
"BriefDescription": "Direct packet attempts : D2C",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2C",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Direct packet attempts : D2C : Counts the number of DRS packets that we attempted to do direct2core/direct2UPI on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.",
"UMask": "0x1",
@@ -4969,8 +6107,10 @@
},
{
"BriefDescription": "Direct packet attempts : D2K",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2K",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Direct packet attempts : D2K : Counts the number of DRS packets that we attempted to do direct2core/direct2UPI on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.",
"UMask": "0x2",
@@ -4978,70 +6118,87 @@
},
{
"BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ1",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ2",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ1",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ2",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ3",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.BL_VNA_EQ0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.BL_VNA_EQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "Cycles in L1",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_UPI_L1_POWER_CYCLES",
"PerPkg": "1",
@@ -5050,246 +6207,308 @@
},
{
"BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.BGF_CRD",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_UPI_M3_BYP_BLOCKED.BGF_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.GV_BLOCK",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_UPI_M3_BYP_BLOCKED.GV_BLOCK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_CRD_RETURN_BLOCKED",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_UPI_M3_CRD_RETURN_BLOCKED",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.BGF_CRD",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_UPI_M3_RXQ_BLOCKED.BGF_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_BTW_2_THRESH",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_BTW_2_THRESH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_BTW_0_THRESH",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_BTW_0_THRESH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.GV_BLOCK",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_UPI_M3_RXQ_BLOCKED.GV_BLOCK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "UPI"
},
{
"BriefDescription": "Cycles where phy is not in L0, L0c, L0p, L1",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_UPI_PHY_INIT_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UPI"
},
{
"BriefDescription": "L1 Req Nack",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_UPI_POWER_L1_NACK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "L1 Req Nack : Counts the number of times a link sends/receives a LinkReqNAck. When the UPI links would like to change power state, the Tx side initiates a request to the Rx side requesting to change states. This requests can either be accepted or denied. If the Rx side replies with an Ack, the power mode will change. If it replies with NAck, no change will take place. This can be filtered based on Rx and Tx. An Rx LinkReqNAck refers to receiving an NAck (meaning this agent's Tx originally requested the power change). A Tx LinkReqNAck refers to sending this command (meaning the peer agent's Tx originally requested the power change and this agent accepted it).",
"Unit": "UPI"
},
{
"BriefDescription": "L1 Req (same as L1 Ack).",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_UPI_POWER_L1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "L1 Req (same as L1 Ack). : Counts the number of times a link sends/receives a LinkReqAck. When the UPI links would like to change power state, the Tx side initiates a request to the Rx side requesting to change states. This requests can either be accepted or denied. If the Rx side replies with an Ack, the power mode will change. If it replies with NAck, no change will take place. This can be filtered based on Rx and Tx. An Rx LinkReqAck refers to receiving an Ack (meaning this agent's Tx originally requested the power change). A Tx LinkReqAck refers to sending this command (meaning the peer agent's Tx originally requested the power change and this agent accepted it).",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.ACK",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.ACK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.VN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.VNA",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.VNA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UPI"
},
{
"BriefDescription": "Cycles in L0p",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_UPI_RxL0P_POWER_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles in L0p : Number of UPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 1/2 of the UPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize UPI for snoops and their responses. Use edge detect to count the number of instances when the UPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.",
"Unit": "UPI"
},
{
"BriefDescription": "Cycles in L0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_UPI_RxL0_POWER_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles in L0 : Number of UPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_ANY_FLITS.DATA",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_UPI_RxL_ANY_FLITS.DATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_ANY_FLITS.LLCRD",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_UPI_RxL_ANY_FLITS.LLCRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_ANY_FLITS.LLCTRL",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_UPI_RxL_ANY_FLITS.LLCTRL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_ANY_FLITS.NULL",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_UPI_RxL_ANY_FLITS.NULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_ANY_FLITS.PROTHDR",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_UPI_RxL_ANY_FLITS.PROTHDR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_ANY_FLITS.SLOT0",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_UPI_RxL_ANY_FLITS.SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_ANY_FLITS.SLOT1",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_UPI_RxL_ANY_FLITS.SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_ANY_FLITS.SLOT2",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_UPI_RxL_ANY_FLITS.SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0xe",
@@ -5297,8 +6516,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass, Match Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass, Match Opcode : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0x10e",
@@ -5306,8 +6527,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0xf",
@@ -5315,8 +6538,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard, Match Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard, Match Opcode : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0x10f",
@@ -5324,8 +6549,10 @@
},
{
"BriefDescription": "RxQ Flit Buffer Bypassed : Slot 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_UPI_RxL_BYPASSED.SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RxQ Flit Buffer Bypassed : Slot 0 : Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of flits transferred, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
"UMask": "0x1",
@@ -5333,8 +6560,10 @@
},
{
"BriefDescription": "RxQ Flit Buffer Bypassed : Slot 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_UPI_RxL_BYPASSED.SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RxQ Flit Buffer Bypassed : Slot 1 : Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of flits transferred, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
"UMask": "0x2",
@@ -5342,8 +6571,10 @@
},
{
"BriefDescription": "RxQ Flit Buffer Bypassed : Slot 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_UPI_RxL_BYPASSED.SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RxQ Flit Buffer Bypassed : Slot 2 : Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of flits transferred, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
"UMask": "0x4",
@@ -5351,40 +6582,50 @@
},
{
"BriefDescription": "CRC Errors Detected",
+ "Counter": "0,1,2,3",
"EventCode": "0x0b",
"EventName": "UNC_UPI_RxL_CRC_ERRORS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CRC Errors Detected : Number of CRC errors detected in the UPI Agent. Each UPI flit incorporates 8 bits of CRC for error detection. This counts the number of flits where the CRC was able to detect an error. After an error has been detected, the UPI agent will send a request to the transmitting socket to resend the flit (as well as any flits that came after it).",
"Unit": "UPI"
},
{
"BriefDescription": "LLR Requests Sent",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "UNC_UPI_RxL_CRC_LLR_REQ_TRANSMIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "LLR Requests Sent : Number of LLR Requests were transmitted. This should generally be <= the number of CRC errors detected. If multiple errors are detected before the Rx side receives a LLC_REQ_ACK from the Tx side, there is no need to send more LLR_REQ_NACKs..",
"Unit": "UPI"
},
{
"BriefDescription": "VN0 Credit Consumed",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_UPI_RxL_CREDITS_CONSUMED_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Credit Consumed : Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
"Unit": "UPI"
},
{
"BriefDescription": "VN1 Credit Consumed",
+ "Counter": "0,1,2,3",
"EventCode": "0x3a",
"EventName": "UNC_UPI_RxL_CREDITS_CONSUMED_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Credit Consumed : Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
"Unit": "UPI"
},
{
"BriefDescription": "VNA Credit Consumed",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_UPI_RxL_CREDITS_CONSUMED_VNA",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -5393,6 +6634,7 @@
},
{
"BriefDescription": "Valid Flits Received : All Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.ALL_DATA",
"PerPkg": "1",
@@ -5402,6 +6644,7 @@
},
{
"BriefDescription": "Null FLITs received from any slot",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.ALL_NULL",
"PerPkg": "1",
@@ -5410,8 +6653,10 @@
},
{
"BriefDescription": "Valid Flits Received : Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.DATA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Received : Data : Shows legal flit time (hides impact of L0p and L0c). : Count Data Flits (which consume all slots), but how much to count is based on Slot0-2 mask, so count can be 0-3 depending on which slots are enabled for counting..",
"UMask": "0x8",
@@ -5419,8 +6664,10 @@
},
{
"BriefDescription": "Valid Flits Received : Idle",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.IDLE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Received : Idle : Shows legal flit time (hides impact of L0p and L0c).",
"UMask": "0x47",
@@ -5428,8 +6675,10 @@
},
{
"BriefDescription": "Valid Flits Received : LLCRD Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.LLCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Received : LLCRD Not Empty : Shows legal flit time (hides impact of L0p and L0c). : Enables counting of LLCRD (with non-zero payload). This only applies to slot 2 since LLCRD is only allowed in slot 2",
"UMask": "0x10",
@@ -5437,8 +6686,10 @@
},
{
"BriefDescription": "Valid Flits Received : LLCTRL",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.LLCTRL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Received : LLCTRL : Shows legal flit time (hides impact of L0p and L0c). : Equivalent to an idle packet. Enables counting of slot 0 LLCTRL messages.",
"UMask": "0x40",
@@ -5446,6 +6697,7 @@
},
{
"BriefDescription": "Valid Flits Received : All Non Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.NON_DATA",
"PerPkg": "1",
@@ -5455,8 +6707,10 @@
},
{
"BriefDescription": "Valid Flits Received : Slot NULL or LLCRD Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.NULL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Received : Slot NULL or LLCRD Empty : Shows legal flit time (hides impact of L0p and L0c). : LLCRD with all zeros is treated as NULL. Slot 1 is not treated as NULL if slot 0 is a dual slot. This can apply to slot 0,1, or 2.",
"UMask": "0x20",
@@ -5464,8 +6718,10 @@
},
{
"BriefDescription": "Valid Flits Received : Protocol Header",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.PROTHDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Received : Protocol Header : Shows legal flit time (hides impact of L0p and L0c). : Enables count of protocol headers in slot 0,1,2 (depending on slot uMask bits)",
"UMask": "0x80",
@@ -5473,8 +6729,10 @@
},
{
"BriefDescription": "Valid Flits Received : Slot 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Received : Slot 0 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 0 - Other mask bits determine types of headers to count.",
"UMask": "0x1",
@@ -5482,8 +6740,10 @@
},
{
"BriefDescription": "Valid Flits Received : Slot 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Received : Slot 1 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 1 - Other mask bits determine types of headers to count.",
"UMask": "0x2",
@@ -5491,8 +6751,10 @@
},
{
"BriefDescription": "Valid Flits Received : Slot 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Received : Slot 2 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 2 - Other mask bits determine types of headers to count.",
"UMask": "0x4",
@@ -5500,8 +6762,10 @@
},
{
"BriefDescription": "RxQ Flit Buffer Allocations : Slot 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_UPI_RxL_INSERTS.SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RxQ Flit Buffer Allocations : Slot 0 : Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
"UMask": "0x1",
@@ -5509,8 +6773,10 @@
},
{
"BriefDescription": "RxQ Flit Buffer Allocations : Slot 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_UPI_RxL_INSERTS.SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RxQ Flit Buffer Allocations : Slot 1 : Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
"UMask": "0x2",
@@ -5518,8 +6784,10 @@
},
{
"BriefDescription": "RxQ Flit Buffer Allocations : Slot 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_UPI_RxL_INSERTS.SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RxQ Flit Buffer Allocations : Slot 2 : Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
"UMask": "0x4",
@@ -5527,8 +6795,10 @@
},
{
"BriefDescription": "RxQ Occupancy - All Packets : Slot 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RxQ Occupancy - All Packets : Slot 0 : Accumulates the number of elements in the UPI RxQ in each cycle. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.",
"UMask": "0x1",
@@ -5536,8 +6806,10 @@
},
{
"BriefDescription": "RxQ Occupancy - All Packets : Slot 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RxQ Occupancy - All Packets : Slot 1 : Accumulates the number of elements in the UPI RxQ in each cycle. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.",
"UMask": "0x2",
@@ -5545,8 +6817,10 @@
},
{
"BriefDescription": "RxQ Occupancy - All Packets : Slot 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RxQ Occupancy - All Packets : Slot 2 : Accumulates the number of elements in the UPI RxQ in each cycle. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.",
"UMask": "0x4",
@@ -5554,214 +6828,268 @@
},
{
"BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ1",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ2",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ0",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ2",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ0",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ1",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.CFG_CTL",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.CFG_CTL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.DFX",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.DFX",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RETRY",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RETRY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_BYPASS",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_BYPASS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_CRED",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_CRED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.SPARE",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.SPARE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.TXQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.TXQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "UPI"
},
{
"BriefDescription": "Cycles in L0p",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_UPI_TxL0P_POWER_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles in L0p : Number of UPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 1/2 of the UPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize UPI for snoops and their responses. Use edge detect to count the number of instances when the UPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_POWER_CYCLES_LL_ENTER",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_UPI_TxL0P_POWER_CYCLES_LL_ENTER",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_POWER_CYCLES_M3_EXIT",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_UPI_TxL0P_POWER_CYCLES_M3_EXIT",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UPI"
},
{
"BriefDescription": "Cycles in L0",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_UPI_TxL0_POWER_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles in L0 : Number of UPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL_ANY_FLITS.DATA",
+ "Counter": "0,1,2,3",
"EventCode": "0x4A",
"EventName": "UNC_UPI_TxL_ANY_FLITS.DATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL_ANY_FLITS.LLCRD",
+ "Counter": "0,1,2,3",
"EventCode": "0x4A",
"EventName": "UNC_UPI_TxL_ANY_FLITS.LLCRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL_ANY_FLITS.LLCTRL",
+ "Counter": "0,1,2,3",
"EventCode": "0x4A",
"EventName": "UNC_UPI_TxL_ANY_FLITS.LLCTRL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL_ANY_FLITS.NULL",
+ "Counter": "0,1,2,3",
"EventCode": "0x4A",
"EventName": "UNC_UPI_TxL_ANY_FLITS.NULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL_ANY_FLITS.PROTHDR",
+ "Counter": "0,1,2,3",
"EventCode": "0x4A",
"EventName": "UNC_UPI_TxL_ANY_FLITS.PROTHDR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL_ANY_FLITS.SLOT0",
+ "Counter": "0,1,2,3",
"EventCode": "0x4A",
"EventName": "UNC_UPI_TxL_ANY_FLITS.SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL_ANY_FLITS.SLOT1",
+ "Counter": "0,1,2,3",
"EventCode": "0x4A",
"EventName": "UNC_UPI_TxL_ANY_FLITS.SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL_ANY_FLITS.SLOT2",
+ "Counter": "0,1,2,3",
"EventCode": "0x4A",
"EventName": "UNC_UPI_TxL_ANY_FLITS.SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0xe",
@@ -5769,8 +7097,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass, Match Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass, Match Opcode : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0x10e",
@@ -5778,8 +7108,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0xf",
@@ -5787,8 +7119,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard, Match Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard, Match Opcode : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0x10f",
@@ -5796,14 +7130,17 @@
},
{
"BriefDescription": "Tx Flit Buffer Bypassed",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_UPI_TxL_BYPASSED",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Tx Flit Buffer Bypassed : Counts the number of times that an incoming flit was able to bypass the Tx flit buffer and pass directly out the UPI Link. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link.",
"Unit": "UPI"
},
{
"BriefDescription": "Valid Flits Sent : All Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.ALL_DATA",
"PerPkg": "1",
@@ -5813,8 +7150,10 @@
},
{
"BriefDescription": "Valid Flits Sent : All LLCRD Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.ALL_LLCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Sent : All Data : Shows legal flit time (hides impact of L0p and L0c).",
"UMask": "0x17",
@@ -5822,8 +7161,10 @@
},
{
"BriefDescription": "Valid Flits Sent : All LLCTRL",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.ALL_LLCTRL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Sent : All LLCTRL : Shows legal flit time (hides impact of L0p and L0c).",
"UMask": "0x47",
@@ -5831,6 +7172,7 @@
},
{
"BriefDescription": "All Null Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.ALL_NULL",
"PerPkg": "1",
@@ -5839,8 +7181,10 @@
},
{
"BriefDescription": "Valid Flits Sent : All Protocol Header",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.ALL_PROTHDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Sent : All ProtDDR : Shows legal flit time (hides impact of L0p and L0c).",
"UMask": "0x87",
@@ -5848,8 +7192,10 @@
},
{
"BriefDescription": "Valid Flits Sent : Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.DATA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Sent : Data : Shows legal flit time (hides impact of L0p and L0c). : Count Data Flits (which consume all slots), but how much to count is based on Slot0-2 mask, so count can be 0-3 depending on which slots are enabled for counting..",
"UMask": "0x8",
@@ -5857,8 +7203,10 @@
},
{
"BriefDescription": "Valid Flits Sent : Idle",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.IDLE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Sent : Idle : Shows legal flit time (hides impact of L0p and L0c).",
"UMask": "0x47",
@@ -5866,8 +7214,10 @@
},
{
"BriefDescription": "Valid Flits Sent : LLCRD Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.LLCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Sent : LLCRD Not Empty : Shows legal flit time (hides impact of L0p and L0c). : Enables counting of LLCRD (with non-zero payload). This only applies to slot 2 since LLCRD is only allowed in slot 2",
"UMask": "0x10",
@@ -5875,8 +7225,10 @@
},
{
"BriefDescription": "Valid Flits Sent : LLCTRL",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.LLCTRL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Sent : LLCTRL : Shows legal flit time (hides impact of L0p and L0c). : Equivalent to an idle packet. Enables counting of slot 0 LLCTRL messages.",
"UMask": "0x40",
@@ -5884,6 +7236,7 @@
},
{
"BriefDescription": "Valid Flits Sent : All Non Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.NON_DATA",
"PerPkg": "1",
@@ -5893,8 +7246,10 @@
},
{
"BriefDescription": "Valid Flits Sent : Slot NULL or LLCRD Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.NULL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Sent : Slot NULL or LLCRD Empty : Shows legal flit time (hides impact of L0p and L0c). : LLCRD with all zeros is treated as NULL. Slot 1 is not treated as NULL if slot 0 is a dual slot. This can apply to slot 0,1, or 2.",
"UMask": "0x20",
@@ -5902,8 +7257,10 @@
},
{
"BriefDescription": "Valid Flits Sent : Protocol Header",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.PROTHDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Sent : Protocol Header : Shows legal flit time (hides impact of L0p and L0c). : Enables count of protocol headers in slot 0,1,2 (depending on slot uMask bits)",
"UMask": "0x80",
@@ -5911,8 +7268,10 @@
},
{
"BriefDescription": "Valid Flits Sent : Slot 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Sent : Slot 0 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 0 - Other mask bits determine types of headers to count.",
"UMask": "0x1",
@@ -5920,8 +7279,10 @@
},
{
"BriefDescription": "Valid Flits Sent : Slot 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Sent : Slot 1 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 1 - Other mask bits determine types of headers to count.",
"UMask": "0x2",
@@ -5929,8 +7290,10 @@
},
{
"BriefDescription": "Valid Flits Sent : Slot 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Sent : Slot 2 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 2 - Other mask bits determine types of headers to count.",
"UMask": "0x4",
@@ -5938,47 +7301,59 @@
},
{
"BriefDescription": "Tx Flit Buffer Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_UPI_TxL_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Tx Flit Buffer Allocations : Number of allocations into the UPI Tx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
"Unit": "UPI"
},
{
"BriefDescription": "Tx Flit Buffer Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_UPI_TxL_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Tx Flit Buffer Occupancy : Accumulates the number of flits in the TxQ. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This can be used with the cycles not empty event to track average occupancy, or the allocations event to track average lifetime in the TxQ.",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_VNA_CREDIT_RETURN_BLOCKED_VN01",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_UPI_VNA_CREDIT_RETURN_BLOCKED_VN01",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UPI"
},
{
"BriefDescription": "VNA Credits Pending Return - Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_UPI_VNA_CREDIT_RETURN_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VNA Credits Pending Return - Occupancy : Number of VNA credits in the Rx side that are waitng to be returned back across the link.",
"Unit": "UPI"
},
{
"BriefDescription": "Message Received : Doorbell",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.DOORBELL_RCVD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UBOX"
},
{
"BriefDescription": "Message Received : Interrupt",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.INT_PRIO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Message Received : Interrupt : Interrupts",
"UMask": "0x10",
@@ -5986,8 +7361,10 @@
},
{
"BriefDescription": "Message Received : IPI",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.IPI_RCVD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Message Received : IPI : Inter Processor Interrupts",
"UMask": "0x4",
@@ -5995,8 +7372,10 @@
},
{
"BriefDescription": "Message Received : MSI",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.MSI_RCVD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Message Received : MSI : Message Signaled Interrupts - interrupts sent by devices (including PCIe via IOxAPIC) (Socket Mode only)",
"UMask": "0x2",
@@ -6004,8 +7383,10 @@
},
{
"BriefDescription": "Message Received : VLW",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.VLW_RCVD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Message Received : VLW : Virtual Logical Wire (legacy) message were received from Uncore.",
"UMask": "0x1",
@@ -6013,152 +7394,190 @@
},
{
"BriefDescription": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_CBO_NCB",
+ "Counter": "0",
"EventCode": "0x4d",
"EventName": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_CBO_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_CBO_NCS",
+ "Counter": "0",
"EventCode": "0x4d",
"EventName": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_CBO_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_UPI_NCB",
+ "Counter": "0",
"EventCode": "0x4d",
"EventName": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_UPI_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_UPI_NCS",
+ "Counter": "0",
"EventCode": "0x4d",
"EventName": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_UPI_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCB",
+ "Counter": "0",
"EventCode": "0x4d",
"EventName": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCS",
+ "Counter": "0",
"EventCode": "0x4d",
"EventName": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_UPI_NCB",
+ "Counter": "0",
"EventCode": "0x4d",
"EventName": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_UPI_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_UPI_NCS",
+ "Counter": "0",
"EventCode": "0x4d",
"EventName": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_UPI_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC2.RxC_CYCLES_EMPTY_BL",
+ "Counter": "0",
"EventCode": "0x4e",
"EventName": "UNC_U_M2U_MISC2.RxC_CYCLES_EMPTY_BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC2.RxC_CYCLES_FULL_BL",
+ "Counter": "0",
"EventCode": "0x4e",
"EventName": "UNC_U_M2U_MISC2.RxC_CYCLES_FULL_BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCB",
+ "Counter": "0",
"EventCode": "0x4e",
"EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCS",
+ "Counter": "0",
"EventCode": "0x4e",
"EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_AK",
+ "Counter": "0",
"EventCode": "0x4e",
"EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_AKC",
+ "Counter": "0",
"EventCode": "0x4e",
"EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_AKC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_BL",
+ "Counter": "0",
"EventCode": "0x4e",
"EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_FULL_BL",
+ "Counter": "0",
"EventCode": "0x4e",
"EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_FULL_BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC3.TxC_CYCLES_FULL_AK",
+ "Counter": "0",
"EventCode": "0x4f",
"EventName": "UNC_U_M2U_MISC3.TxC_CYCLES_FULL_AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC3.TxC_CYCLES_FULL_AKC",
+ "Counter": "0",
"EventCode": "0x4f",
"EventName": "UNC_U_M2U_MISC3.TxC_CYCLES_FULL_AKC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UBOX"
},
{
"BriefDescription": "Cycles PHOLD Assert to Ack : Assert to ACK",
+ "Counter": "0,1",
"EventCode": "0x45",
"EventName": "UNC_U_PHOLD_CYCLES.ASSERT_TO_ACK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles PHOLD Assert to Ack : Assert to ACK : PHOLD cycles.",
"UMask": "0x1",
@@ -6166,32 +7585,40 @@
},
{
"BriefDescription": "UNC_U_RACU_DRNG.PFTCH_BUF_EMPTY",
+ "Counter": "0",
"EventCode": "0x4c",
"EventName": "UNC_U_RACU_DRNG.PFTCH_BUF_EMPTY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_RACU_DRNG.RDRAND",
+ "Counter": "0",
"EventCode": "0x4c",
"EventName": "UNC_U_RACU_DRNG.RDRAND",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_RACU_DRNG.RDSEED",
+ "Counter": "0",
"EventCode": "0x4c",
"EventName": "UNC_U_RACU_DRNG.RDSEED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UBOX"
},
{
"BriefDescription": "RACU Request",
+ "Counter": "0,1",
"EventCode": "0x46",
"EventName": "UNC_U_RACU_REQUESTS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RACU Request : Number outstanding register requests within message channel tracker",
"Unit": "UBOX"
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-io.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-io.json
index 0761980c34a0..91013ced74aa 100644
--- a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-io.json
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-io.json
@@ -1,70 +1,167 @@
[
{
"BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "1",
"EventCode": "0xff",
"EventName": "UNC_IIO_BANDWIDTH_IN.PART0_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "iio_free_running"
},
{
"BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "2",
"EventCode": "0xff",
"EventName": "UNC_IIO_BANDWIDTH_IN.PART1_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x21",
"Unit": "iio_free_running"
},
{
"BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "3",
"EventCode": "0xff",
"EventName": "UNC_IIO_BANDWIDTH_IN.PART2_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x22",
"Unit": "iio_free_running"
},
{
"BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "4",
"EventCode": "0xff",
"EventName": "UNC_IIO_BANDWIDTH_IN.PART3_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x23",
"Unit": "iio_free_running"
},
{
"BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "5",
"EventCode": "0xff",
"EventName": "UNC_IIO_BANDWIDTH_IN.PART4_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x24",
"Unit": "iio_free_running"
},
{
"BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "6",
"EventCode": "0xff",
"EventName": "UNC_IIO_BANDWIDTH_IN.PART5_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x25",
"Unit": "iio_free_running"
},
{
"BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "7",
"EventCode": "0xff",
"EventName": "UNC_IIO_BANDWIDTH_IN.PART6_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x26",
"Unit": "iio_free_running"
},
{
"BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "8",
"EventCode": "0xff",
"EventName": "UNC_IIO_BANDWIDTH_IN.PART7_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x27",
"Unit": "iio_free_running"
},
{
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "9",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_OUT.PART0_FREERUN",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x30",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "10",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_OUT.PART1_FREERUN",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x31",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "11",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_OUT.PART2_FREERUN",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x32",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "12",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_OUT.PART3_FREERUN",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x33",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "13",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_OUT.PART4_FREERUN",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x34",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "14",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_OUT.PART5_FREERUN",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x35",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "15",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_OUT.PART6_FREERUN",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x36",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "16",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_OUT.PART7_FREERUN",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x37",
+ "Unit": "iio_free_running"
+ },
+ {
"BriefDescription": "IIO Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_IIO_CLOCKTICKS",
"PerPkg": "1",
@@ -74,6 +171,7 @@
},
{
"BriefDescription": "Free running counter that increments for IIO clocktick",
+ "Counter": "0",
"EventCode": "0xff",
"EventName": "UNC_IIO_CLOCKTICKS_FREERUN",
"PerPkg": "1",
@@ -82,8 +180,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0-7",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.ALL_PARTS",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xff",
@@ -93,8 +193,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0001",
@@ -104,8 +206,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0002",
@@ -115,8 +219,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0004",
@@ -126,8 +232,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0008",
@@ -137,8 +245,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0010",
@@ -148,8 +258,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0020",
@@ -159,8 +271,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0040",
@@ -170,8 +284,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0080",
@@ -181,8 +297,10 @@
},
{
"BriefDescription": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL_PARTS",
+ "Counter": "2,3",
"EventCode": "0xd5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL_PARTS",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"UMask": "0xff",
@@ -190,8 +308,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Occupancy : Part 0",
+ "Counter": "2,3",
"EventCode": "0xd5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0000",
@@ -201,8 +321,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Occupancy : Part 1",
+ "Counter": "2,3",
"EventCode": "0xd5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0000",
@@ -212,8 +334,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Occupancy : Part 2",
+ "Counter": "2,3",
"EventCode": "0xd5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0000",
@@ -223,8 +347,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Occupancy : Part 3",
+ "Counter": "2,3",
"EventCode": "0xd5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0000",
@@ -234,8 +360,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Occupancy : Part 4",
+ "Counter": "2,3",
"EventCode": "0xd5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0000",
@@ -245,8 +373,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Occupancy : Part 5",
+ "Counter": "2,3",
"EventCode": "0xd5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0000",
@@ -256,8 +386,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Occupancy : Part 6",
+ "Counter": "2,3",
"EventCode": "0xd5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0000",
@@ -267,8 +399,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Occupancy : Part 7",
+ "Counter": "2,3",
"EventCode": "0xd5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0000",
@@ -278,8 +412,10 @@
},
{
"BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part0-7",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.ALL_PARTS",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00ff",
@@ -288,6 +424,7 @@
},
{
"BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part0",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART0",
"FCMask": "0x07",
@@ -299,6 +436,7 @@
},
{
"BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part1",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART1",
"FCMask": "0x07",
@@ -310,6 +448,7 @@
},
{
"BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part2",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART2",
"FCMask": "0x07",
@@ -321,6 +460,7 @@
},
{
"BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part3",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART3",
"FCMask": "0x07",
@@ -332,6 +472,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART4",
"FCMask": "0x07",
@@ -343,6 +484,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART5",
"FCMask": "0x07",
@@ -354,6 +496,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART6",
"FCMask": "0x07",
@@ -365,6 +508,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART7",
"FCMask": "0x07",
@@ -376,8 +520,10 @@
},
{
"BriefDescription": "Write request of 4 bytes made to IIO Part0-7 by the CPU",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.ALL_PARTS",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00ff",
@@ -386,8 +532,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0100",
@@ -397,8 +545,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0200",
@@ -408,6 +558,7 @@
},
{
"BriefDescription": "Write request of 4 bytes made to IIO Part0 by the CPU",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART0",
"FCMask": "0x07",
@@ -419,6 +570,7 @@
},
{
"BriefDescription": "Write request of 4 bytes made to IIO Part1 by the CPU",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART1",
"FCMask": "0x07",
@@ -430,6 +582,7 @@
},
{
"BriefDescription": "Write request of 4 bytes made to IIO Part2 by the CPU",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART2",
"FCMask": "0x07",
@@ -441,6 +594,7 @@
},
{
"BriefDescription": "Write request of 4 bytes made to IIO Part3 by the CPU",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART3",
"FCMask": "0x07",
@@ -452,6 +606,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART4",
"FCMask": "0x07",
@@ -463,6 +618,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART5",
"FCMask": "0x07",
@@ -474,6 +630,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART6",
"FCMask": "0x07",
@@ -485,6 +642,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART7",
"FCMask": "0x07",
@@ -496,8 +654,10 @@
},
{
"BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part0",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0001",
@@ -507,8 +667,10 @@
},
{
"BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part0",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0002",
@@ -518,8 +680,10 @@
},
{
"BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part0",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0004",
@@ -529,8 +693,10 @@
},
{
"BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part0",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0008",
@@ -540,8 +706,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0010",
@@ -551,8 +719,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0020",
@@ -562,8 +732,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0040",
@@ -573,8 +745,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0080",
@@ -584,8 +758,10 @@
},
{
"BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part0 by a different IIO unit",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0001",
@@ -595,8 +771,10 @@
},
{
"BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part0 by a different IIO unit",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0002",
@@ -606,8 +784,10 @@
},
{
"BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part0 by a different IIO unit",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0004",
@@ -617,8 +797,10 @@
},
{
"BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part0 by a different IIO unit",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0008",
@@ -628,8 +810,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0010",
@@ -639,8 +823,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0020",
@@ -650,8 +836,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0040",
@@ -661,8 +849,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0080",
@@ -672,8 +862,10 @@
},
{
"BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.ALL_PARTS",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xff",
@@ -683,6 +875,7 @@
},
{
"BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART0",
"FCMask": "0x07",
@@ -694,6 +887,7 @@
},
{
"BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART1",
"FCMask": "0x07",
@@ -705,6 +899,7 @@
},
{
"BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART2",
"FCMask": "0x07",
@@ -716,6 +911,7 @@
},
{
"BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART3",
"FCMask": "0x07",
@@ -727,6 +923,7 @@
},
{
"BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART4",
"FCMask": "0x07",
@@ -738,6 +935,7 @@
},
{
"BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART5",
"FCMask": "0x07",
@@ -749,6 +947,7 @@
},
{
"BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART6",
"FCMask": "0x07",
@@ -760,6 +959,7 @@
},
{
"BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART7",
"FCMask": "0x07",
@@ -771,8 +971,10 @@
},
{
"BriefDescription": "Read request for 4 bytes made by IIO Part0-7 to Memory",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.ALL_PARTS",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00ff",
@@ -781,6 +983,7 @@
},
{
"BriefDescription": "Read request for 4 bytes made by IIO Part0 to Memory",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0",
"FCMask": "0x07",
@@ -792,6 +995,7 @@
},
{
"BriefDescription": "Read request for 4 bytes made by IIO Part1 to Memory",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1",
"FCMask": "0x07",
@@ -803,6 +1007,7 @@
},
{
"BriefDescription": "Read request for 4 bytes made by IIO Part2 to Memory",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2",
"FCMask": "0x07",
@@ -814,6 +1019,7 @@
},
{
"BriefDescription": "Read request for 4 bytes made by IIO Part3 to Memory",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
"FCMask": "0x07",
@@ -825,6 +1031,7 @@
},
{
"BriefDescription": "Data requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART4",
"FCMask": "0x07",
@@ -836,6 +1043,7 @@
},
{
"BriefDescription": "Data requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART5",
"FCMask": "0x07",
@@ -847,6 +1055,7 @@
},
{
"BriefDescription": "Data requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART6",
"FCMask": "0x07",
@@ -858,6 +1067,7 @@
},
{
"BriefDescription": "Data requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART7",
"FCMask": "0x07",
@@ -869,8 +1079,10 @@
},
{
"BriefDescription": "Write request of 4 bytes made by IIO Part0-7 to Memory",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.ALL_PARTS",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00ff",
@@ -879,6 +1091,7 @@
},
{
"BriefDescription": "Write request of 4 bytes made by IIO Part0 to Memory",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0",
"FCMask": "0x07",
@@ -890,6 +1103,7 @@
},
{
"BriefDescription": "Write request of 4 bytes made by IIO Part1 to Memory",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1",
"FCMask": "0x07",
@@ -901,6 +1115,7 @@
},
{
"BriefDescription": "Write request of 4 bytes made by IIO Part2 to Memory",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2",
"FCMask": "0x07",
@@ -912,6 +1127,7 @@
},
{
"BriefDescription": "Write request of 4 bytes made by IIO Part3 to Memory",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
"FCMask": "0x07",
@@ -923,6 +1139,7 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART4",
"FCMask": "0x07",
@@ -934,6 +1151,7 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART5",
"FCMask": "0x07",
@@ -945,6 +1163,7 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART6",
"FCMask": "0x07",
@@ -956,6 +1175,7 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART7",
"FCMask": "0x07",
@@ -967,8 +1187,10 @@
},
{
"BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0001",
@@ -978,8 +1200,10 @@
},
{
"BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0002",
@@ -989,8 +1213,10 @@
},
{
"BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0004",
@@ -1000,8 +1226,10 @@
},
{
"BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0008",
@@ -1011,8 +1239,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0010",
@@ -1022,8 +1252,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0020",
@@ -1033,8 +1265,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0040",
@@ -1044,8 +1278,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0080",
@@ -1055,8 +1291,10 @@
},
{
"BriefDescription": "Incoming arbitration requests : Passing data to be written",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_IIO_INBOUND_ARB_REQ.DATA",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1066,8 +1304,10 @@
},
{
"BriefDescription": "Incoming arbitration requests : Issuing final read or write of line",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_IIO_INBOUND_ARB_REQ.FINAL_RD_WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1077,8 +1317,10 @@
},
{
"BriefDescription": "Incoming arbitration requests : Processing response from IOMMU",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_IIO_INBOUND_ARB_REQ.IOMMU_HIT",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1088,8 +1330,10 @@
},
{
"BriefDescription": "Incoming arbitration requests : Issuing to IOMMU",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_IIO_INBOUND_ARB_REQ.IOMMU_REQ",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1099,8 +1343,10 @@
},
{
"BriefDescription": "Incoming arbitration requests : Request Ownership",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_IIO_INBOUND_ARB_REQ.REQ_OWN",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1110,8 +1356,10 @@
},
{
"BriefDescription": "Incoming arbitration requests : Writing line",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_IIO_INBOUND_ARB_REQ.WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1121,8 +1369,10 @@
},
{
"BriefDescription": "Incoming arbitration requests granted : Passing data to be written",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_IIO_INBOUND_ARB_WON.DATA",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1132,8 +1382,10 @@
},
{
"BriefDescription": "Incoming arbitration requests granted : Issuing final read or write of line",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_IIO_INBOUND_ARB_WON.FINAL_RD_WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1143,8 +1395,10 @@
},
{
"BriefDescription": "Incoming arbitration requests granted : Processing response from IOMMU",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_IIO_INBOUND_ARB_WON.IOMMU_HIT",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1154,8 +1408,10 @@
},
{
"BriefDescription": "Incoming arbitration requests granted : Issuing to IOMMU",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_IIO_INBOUND_ARB_WON.IOMMU_REQ",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1165,8 +1421,10 @@
},
{
"BriefDescription": "Incoming arbitration requests granted : Request Ownership",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_IIO_INBOUND_ARB_WON.REQ_OWN",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1176,8 +1434,10 @@
},
{
"BriefDescription": "Incoming arbitration requests granted : Writing line",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_IIO_INBOUND_ARB_WON.WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1187,8 +1447,10 @@
},
{
"BriefDescription": ": IOTLB Hits to a 1G Page",
+ "Counter": "0",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.1G_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": ": IOTLB Hits to a 1G Page : Counts if a transaction to a 1G page, on its first lookup, hits the IOTLB.",
@@ -1197,8 +1459,10 @@
},
{
"BriefDescription": ": IOTLB Hits to a 2M Page",
+ "Counter": "0",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.2M_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": ": IOTLB Hits to a 2M Page : Counts if a transaction to a 2M page, on its first lookup, hits the IOTLB.",
@@ -1207,8 +1471,10 @@
},
{
"BriefDescription": ": IOTLB Hits to a 4K Page",
+ "Counter": "0",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.4K_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": ": IOTLB Hits to a 4K Page : Counts if a transaction to a 4K page, on its first lookup, hits the IOTLB.",
@@ -1217,8 +1483,10 @@
},
{
"BriefDescription": ": Context cache hits",
+ "Counter": "0",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.CTXT_CACHE_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": ": Context cache hits : Counts each time a first look up of the transaction hits the RCC.",
@@ -1227,8 +1495,10 @@
},
{
"BriefDescription": ": Context cache lookups",
+ "Counter": "0",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.CTXT_CACHE_LOOKUPS",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": ": Context cache lookups : Counts each time a transaction looks up root context cache.",
@@ -1237,8 +1507,10 @@
},
{
"BriefDescription": ": IOTLB lookups first",
+ "Counter": "0",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.FIRST_LOOKUPS",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": ": IOTLB lookups first : Some transactions have to look up IOTLB multiple times. Counts the first time a request looks up IOTLB.",
@@ -1247,8 +1519,10 @@
},
{
"BriefDescription": "IOTLB Fills (same as IOTLB miss)",
+ "Counter": "0",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.MISSES",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": "IOTLB Fills (same as IOTLB miss) : When a transaction misses IOTLB, it does a page walk to look up memory and bring in the relevant page translation. Counts when this page translation is written to IOTLB.",
@@ -1257,8 +1531,10 @@
},
{
"BriefDescription": ": IOMMU memory access",
+ "Counter": "0",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.NUM_MEM_ACCESSES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": IOMMU memory access : IOMMU sends out memory fetches when it misses the cache look up which is indicated by this signal. M2IOSF only uses low priority channel",
"UMask": "0xc0",
@@ -1266,8 +1542,10 @@
},
{
"BriefDescription": ": PWC Hit to a 2M page",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.PWC_1G_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": PWC Hit to a 2M page : Counts each time a transaction's first look up hits the SLPWC at the 2M level",
"UMask": "0x4",
@@ -1275,8 +1553,10 @@
},
{
"BriefDescription": ": PWT Hit to a 256T page",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.PWC_256T_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": PWT Hit to a 256T page : Counts each time a transaction's first look up hits the SLPWC at the 512G level",
"UMask": "0x10",
@@ -1284,8 +1564,10 @@
},
{
"BriefDescription": ": PWC Hit to a 4K page",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.PWC_2M_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": PWC Hit to a 4K page : Counts each time a transaction's first look up hits the SLPWC at the 4K level",
"UMask": "0x2",
@@ -1293,8 +1575,10 @@
},
{
"BriefDescription": ": PWC Hit to a 1G page",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.PWC_512G_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": PWC Hit to a 1G page : Counts each time a transaction's first look up hits the SLPWC at the 1G level",
"UMask": "0x8",
@@ -1302,8 +1586,10 @@
},
{
"BriefDescription": ": PageWalk cache fill",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.PWC_CACHE_FILLS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": PageWalk cache fill : When a transaction misses SLPWC, it does a page walk to look up memory and bring in the relevant page translation. When this page translation is written to SLPWC, ObsPwcFillValid_nnnH is asserted.",
"UMask": "0x20",
@@ -1311,8 +1597,10 @@
},
{
"BriefDescription": ": PageWalk cache lookup",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.PWT_CACHE_LOOKUPS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": PageWalk cache lookup : Counts each time a transaction looks up second level page walk cache.",
"UMask": "0x1",
@@ -1320,8 +1608,10 @@
},
{
"BriefDescription": ": PWC Hit to a 2M page",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.SLPWC_1G_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": PWC Hit to a 2M page : Counts each time a transaction's first look up hits the SLPWC at the 2M level",
"UMask": "0x4",
@@ -1329,8 +1619,10 @@
},
{
"BriefDescription": ": PWC Hit to a 2M page",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.SLPWC_256T_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": PWC Hit to a 2M page : Counts each time a transaction's first look up hits the SLPWC at the 2M level",
"UMask": "0x10",
@@ -1338,8 +1630,10 @@
},
{
"BriefDescription": ": PWC Hit to a 1G page",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.SLPWC_512G_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": PWC Hit to a 1G page : Counts each time a transaction's first look up hits the SLPWC at the 1G level",
"UMask": "0x8",
@@ -1347,8 +1641,10 @@
},
{
"BriefDescription": ": Global IOTLB invalidation cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_IIO_IOMMU3.PWT_OCCUPANCY_MSB",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": ": Global IOTLB invalidation cycles : Indicates that IOMMU is doing global invalidation.",
@@ -1357,8 +1653,10 @@
},
{
"BriefDescription": "AND Mask/match for debug bus : Non-PCIE bus",
+ "Counter": "0,1",
"EventCode": "0x02",
"EventName": "UNC_IIO_MASK_MATCH_AND.BUS0",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": "AND Mask/match for debug bus : Non-PCIE bus : Asserted if all bits specified by mask match",
@@ -1367,8 +1665,10 @@
},
{
"BriefDescription": "AND Mask/match for debug bus : Non-PCIE bus and PCIE bus",
+ "Counter": "0,1",
"EventCode": "0x02",
"EventName": "UNC_IIO_MASK_MATCH_AND.BUS0_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": "AND Mask/match for debug bus : Non-PCIE bus and PCIE bus : Asserted if all bits specified by mask match",
@@ -1377,8 +1677,10 @@
},
{
"BriefDescription": "AND Mask/match for debug bus : Non-PCIE bus and !(PCIE bus)",
+ "Counter": "0,1",
"EventCode": "0x02",
"EventName": "UNC_IIO_MASK_MATCH_AND.BUS0_NOT_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": "AND Mask/match for debug bus : Non-PCIE bus and !(PCIE bus) : Asserted if all bits specified by mask match",
@@ -1387,8 +1689,10 @@
},
{
"BriefDescription": "AND Mask/match for debug bus : PCIE bus",
+ "Counter": "0,1",
"EventCode": "0x02",
"EventName": "UNC_IIO_MASK_MATCH_AND.BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": "AND Mask/match for debug bus : PCIE bus : Asserted if all bits specified by mask match",
@@ -1397,8 +1701,10 @@
},
{
"BriefDescription": "AND Mask/match for debug bus : !(Non-PCIE bus) and PCIE bus",
+ "Counter": "0,1",
"EventCode": "0x02",
"EventName": "UNC_IIO_MASK_MATCH_AND.NOT_BUS0_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": "AND Mask/match for debug bus : !(Non-PCIE bus) and PCIE bus : Asserted if all bits specified by mask match",
@@ -1407,8 +1713,10 @@
},
{
"BriefDescription": "AND Mask/match for debug bus : !(Non-PCIE bus) and !(PCIE bus)",
+ "Counter": "0,1",
"EventCode": "0x02",
"EventName": "UNC_IIO_MASK_MATCH_AND.NOT_BUS0_NOT_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": "AND Mask/match for debug bus : !(Non-PCIE bus) and !(PCIE bus) : Asserted if all bits specified by mask match",
@@ -1417,8 +1725,10 @@
},
{
"BriefDescription": "OR Mask/match for debug bus : Non-PCIE bus",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "UNC_IIO_MASK_MATCH_OR.BUS0",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": "OR Mask/match for debug bus : Non-PCIE bus : Asserted if any bits specified by mask match",
@@ -1427,8 +1737,10 @@
},
{
"BriefDescription": "OR Mask/match for debug bus : Non-PCIE bus and PCIE bus",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "UNC_IIO_MASK_MATCH_OR.BUS0_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": "OR Mask/match for debug bus : Non-PCIE bus and PCIE bus : Asserted if any bits specified by mask match",
@@ -1437,8 +1749,10 @@
},
{
"BriefDescription": "OR Mask/match for debug bus : Non-PCIE bus and !(PCIE bus)",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "UNC_IIO_MASK_MATCH_OR.BUS0_NOT_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": "OR Mask/match for debug bus : Non-PCIE bus and !(PCIE bus) : Asserted if any bits specified by mask match",
@@ -1447,8 +1761,10 @@
},
{
"BriefDescription": "OR Mask/match for debug bus : PCIE bus",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "UNC_IIO_MASK_MATCH_OR.BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": "OR Mask/match for debug bus : PCIE bus : Asserted if any bits specified by mask match",
@@ -1457,8 +1773,10 @@
},
{
"BriefDescription": "OR Mask/match for debug bus : !(Non-PCIE bus) and PCIE bus",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "UNC_IIO_MASK_MATCH_OR.NOT_BUS0_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": "OR Mask/match for debug bus : !(Non-PCIE bus) and PCIE bus : Asserted if any bits specified by mask match",
@@ -1467,8 +1785,10 @@
},
{
"BriefDescription": "OR Mask/match for debug bus : !(Non-PCIE bus) and !(PCIE bus)",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "UNC_IIO_MASK_MATCH_OR.NOT_BUS0_NOT_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": "OR Mask/match for debug bus : !(Non-PCIE bus) and !(PCIE bus) : Asserted if any bits specified by mask match",
@@ -1477,6 +1797,7 @@
},
{
"BriefDescription": "Number requests PCIe makes of the main die : All",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU.COMMIT.ALL",
"FCMask": "0x07",
@@ -1488,8 +1809,10 @@
},
{
"BriefDescription": "Num requests sent by PCIe - by target : Abort",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.ABORT",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1498,8 +1821,10 @@
},
{
"BriefDescription": "Num requests sent by PCIe - by target : Confined P2P",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.CONFINED_P2P",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1508,8 +1833,10 @@
},
{
"BriefDescription": "Num requests sent by PCIe - by target : Local P2P",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.LOC_P2P",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1518,8 +1845,10 @@
},
{
"BriefDescription": "Num requests sent by PCIe - by target : Multi-cast",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MCAST",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1528,8 +1857,10 @@
},
{
"BriefDescription": "Num requests sent by PCIe - by target : Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MEM",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1538,8 +1869,10 @@
},
{
"BriefDescription": "Num requests sent by PCIe - by target : MsgB",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MSGB",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1548,8 +1881,10 @@
},
{
"BriefDescription": "Num requests sent by PCIe - by target : Remote P2P",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.REM_P2P",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1558,8 +1893,10 @@
},
{
"BriefDescription": "Num requests sent by PCIe - by target : Ubox",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.UBOX",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1568,8 +1905,10 @@
},
{
"BriefDescription": "ITC address map 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8f",
"EventName": "UNC_IIO_NUM_TGT_MATCHED_REQ_OF_CPU",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": "UNC_IIO_NUM_TGT_MATCHED_REQ_OF_CPU",
@@ -1577,8 +1916,10 @@
},
{
"BriefDescription": "Outbound cacheline requests issued : 64B requests issued to device",
+ "Counter": "0,1,2,3",
"EventCode": "0xd0",
"EventName": "UNC_IIO_OUTBOUND_CL_REQS_ISSUED.TO_IO",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1588,8 +1929,10 @@
},
{
"BriefDescription": "Outbound TLP (transaction layer packet) requests issued : To device",
+ "Counter": "0,1,2,3",
"EventCode": "0xd1",
"EventName": "UNC_IIO_OUTBOUND_TLP_REQS_ISSUED.TO_IO",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1599,8 +1942,10 @@
},
{
"BriefDescription": "PWT occupancy. Does not include 9th bit of occupancy (will undercount if PWT is greater than 255 per cycle).",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_IIO_PWT_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": "PWT occupancy : Indicates how many page walks are outstanding at any point in time.",
@@ -1609,8 +1954,10 @@
},
{
"BriefDescription": "Request Ownership : PCIe Request complete",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_IIO_REQ_FROM_PCIE_CL_CMPL.DATA",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1620,8 +1967,10 @@
},
{
"BriefDescription": "Request Ownership : Writing line",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_IIO_REQ_FROM_PCIE_CL_CMPL.FINAL_RD_WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1631,8 +1980,10 @@
},
{
"BriefDescription": "Request Ownership : Issuing final read or write of line",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_IIO_REQ_FROM_PCIE_CL_CMPL.REQ_OWN",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1642,8 +1993,10 @@
},
{
"BriefDescription": "Request Ownership : Passing data to be written",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_IIO_REQ_FROM_PCIE_CL_CMPL.WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1653,8 +2006,10 @@
},
{
"BriefDescription": "Processing response from IOMMU : Passing data to be written",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.FINAL_RD_WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1664,8 +2019,10 @@
},
{
"BriefDescription": "Processing response from IOMMU : Issuing final read or write of line",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.IOMMU_HIT",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1674,8 +2031,10 @@
},
{
"BriefDescription": "Processing response from IOMMU : Request Ownership",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.IOMMU_REQ",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1685,8 +2044,10 @@
},
{
"BriefDescription": "Processing response from IOMMU : Writing line",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.REQ_OWN",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1696,8 +2057,10 @@
},
{
"BriefDescription": "PCIe Request - pass complete : Passing data to be written",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_IIO_REQ_FROM_PCIE_PASS_CMPL.DATA",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1707,8 +2070,10 @@
},
{
"BriefDescription": "PCIe Request - pass complete : Issuing final read or write of line",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_IIO_REQ_FROM_PCIE_PASS_CMPL.FINAL_RD_WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1718,8 +2083,10 @@
},
{
"BriefDescription": "PCIe Request - pass complete : Request Ownership",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_IIO_REQ_FROM_PCIE_PASS_CMPL.REQ_OWN",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1729,8 +2096,10 @@
},
{
"BriefDescription": "PCIe Request - pass complete : Writing line",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_IIO_REQ_FROM_PCIE_PASS_CMPL.WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1740,6 +2109,7 @@
},
{
"BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part0",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART0",
"FCMask": "0x07",
@@ -1751,6 +2121,7 @@
},
{
"BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part1",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART1",
"FCMask": "0x07",
@@ -1762,6 +2133,7 @@
},
{
"BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part2",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART2",
"FCMask": "0x07",
@@ -1773,6 +2145,7 @@
},
{
"BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part3",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART3",
"FCMask": "0x07",
@@ -1784,6 +2157,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART4",
"FCMask": "0x07",
@@ -1795,6 +2169,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART5",
"FCMask": "0x07",
@@ -1806,6 +2181,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART6",
"FCMask": "0x07",
@@ -1817,6 +2193,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART7",
"FCMask": "0x07",
@@ -1828,6 +2205,7 @@
},
{
"BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part0 by the CPU",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART0",
"FCMask": "0x07",
@@ -1839,6 +2217,7 @@
},
{
"BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part1 by the CPU",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART1",
"FCMask": "0x07",
@@ -1850,6 +2229,7 @@
},
{
"BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part2 by the CPU",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART2",
"FCMask": "0x07",
@@ -1861,6 +2241,7 @@
},
{
"BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part3 by the CPU",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART3",
"FCMask": "0x07",
@@ -1872,6 +2253,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART4",
"FCMask": "0x07",
@@ -1883,6 +2265,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART5",
"FCMask": "0x07",
@@ -1894,6 +2277,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART6",
"FCMask": "0x07",
@@ -1905,6 +2289,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART7",
"FCMask": "0x07",
@@ -1916,8 +2301,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0001",
@@ -1927,8 +2314,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0002",
@@ -1938,8 +2327,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0004",
@@ -1949,8 +2340,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0008",
@@ -1960,8 +2353,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0010",
@@ -1971,8 +2366,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0020",
@@ -1982,8 +2379,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0040",
@@ -1993,8 +2392,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0080",
@@ -2004,6 +2405,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART0",
"FCMask": "0x07",
@@ -2015,6 +2417,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART1",
"FCMask": "0x07",
@@ -2026,6 +2429,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART2",
"FCMask": "0x07",
@@ -2037,6 +2441,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART3",
"FCMask": "0x07",
@@ -2048,6 +2453,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART4",
"FCMask": "0x07",
@@ -2059,6 +2465,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART5",
"FCMask": "0x07",
@@ -2070,6 +2477,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART6",
"FCMask": "0x07",
@@ -2081,6 +2489,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART7",
"FCMask": "0x07",
@@ -2092,6 +2501,7 @@
},
{
"BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part0 to Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART0",
"FCMask": "0x07",
@@ -2103,6 +2513,7 @@
},
{
"BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part1 to Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART1",
"FCMask": "0x07",
@@ -2114,6 +2525,7 @@
},
{
"BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part2 to Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART2",
"FCMask": "0x07",
@@ -2125,6 +2537,7 @@
},
{
"BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part3 to Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART3",
"FCMask": "0x07",
@@ -2136,6 +2549,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART4",
"FCMask": "0x07",
@@ -2147,6 +2561,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART5",
"FCMask": "0x07",
@@ -2158,6 +2573,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART6",
"FCMask": "0x07",
@@ -2169,6 +2585,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART7",
"FCMask": "0x07",
@@ -2180,6 +2597,7 @@
},
{
"BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part0 to Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART0",
"FCMask": "0x07",
@@ -2191,6 +2609,7 @@
},
{
"BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part1 to Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART1",
"FCMask": "0x07",
@@ -2202,6 +2621,7 @@
},
{
"BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part2 to Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART2",
"FCMask": "0x07",
@@ -2213,6 +2633,7 @@
},
{
"BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part3 to Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART3",
"FCMask": "0x07",
@@ -2224,6 +2645,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART4",
"FCMask": "0x07",
@@ -2235,6 +2657,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART5",
"FCMask": "0x07",
@@ -2246,6 +2669,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART6",
"FCMask": "0x07",
@@ -2257,6 +2681,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART7",
"FCMask": "0x07",
@@ -2268,8 +2693,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0001",
@@ -2279,8 +2706,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0002",
@@ -2290,8 +2719,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0004",
@@ -2301,8 +2732,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0008",
@@ -2312,8 +2745,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0010",
@@ -2323,8 +2758,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0020",
@@ -2334,8 +2771,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0040",
@@ -2345,8 +2784,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0080",
@@ -2356,6 +2797,7 @@
},
{
"BriefDescription": "M2P Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_M2P_CLOCKTICKS",
"PerPkg": "1",
@@ -2364,6 +2806,7 @@
},
{
"BriefDescription": "CMS Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0xc0",
"EventName": "UNC_M2P_CMS_CLOCKTICKS",
"PerPkg": "1",
@@ -2371,8 +2814,10 @@
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "Counter": "0,1,2,3",
"EventCode": "0xba",
"EventName": "UNC_M2P_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress Blocking due to Ordering requirements : Down : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x4",
@@ -2380,8 +2825,10 @@
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "Counter": "0,1,2,3",
"EventCode": "0xba",
"EventName": "UNC_M2P_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress Blocking due to Ordering requirements : Up : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x1",
@@ -2389,8 +2836,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credit Acquired : DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.DRS_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credit Acquired : DRS : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the DRS message class.",
"UMask": "0x1",
@@ -2398,8 +2847,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credit Acquired : DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.DRS_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credit Acquired : DRS : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the DRS message class.",
"UMask": "0x2",
@@ -2407,8 +2858,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credit Acquired : NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.NCB_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credit Acquired : NCB : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCB message class.",
"UMask": "0x4",
@@ -2416,8 +2869,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credit Acquired : NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.NCB_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credit Acquired : NCB : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCB message class.",
"UMask": "0x8",
@@ -2425,8 +2880,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credit Acquired : NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.NCS_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credit Acquired : NCS : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCS message class.",
"UMask": "0x10",
@@ -2434,8 +2891,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credit Acquired : NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.NCS_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credit Acquired : NCS : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credit for transfer through CMS Port 0s to the IIO for the NCS message class.",
"UMask": "0x20",
@@ -2443,8 +2902,10 @@
},
{
"BriefDescription": "M2PCIe IIO Failed to Acquire a Credit : DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_M2P_IIO_CREDITS_REJECT.DRS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Failed to Acquire a Credit : DRS : Counts the number of times that a request pending in the BL Ingress attempted to acquire either a NCB or NCS credit to transmit into the IIO, but was rejected because no credits were available. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits to the IIO for the DRS message class.",
"UMask": "0x8",
@@ -2452,8 +2913,10 @@
},
{
"BriefDescription": "M2PCIe IIO Failed to Acquire a Credit : NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_M2P_IIO_CREDITS_REJECT.NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Failed to Acquire a Credit : NCB : Counts the number of times that a request pending in the BL Ingress attempted to acquire either a NCB or NCS credit to transmit into the IIO, but was rejected because no credits were available. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits to the IIO for the NCB message class.",
"UMask": "0x10",
@@ -2461,8 +2924,10 @@
},
{
"BriefDescription": "M2PCIe IIO Failed to Acquire a Credit : NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_M2P_IIO_CREDITS_REJECT.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Failed to Acquire a Credit : NCS : Counts the number of times that a request pending in the BL Ingress attempted to acquire either a NCB or NCS credit to transmit into the IIO, but was rejected because no credits were available. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits to the IIO for the NCS message class.",
"UMask": "0x20",
@@ -2470,8 +2935,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credits in Use : DRS to CMS Port 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_M2P_IIO_CREDITS_USED.DRS_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credits in Use : DRS to CMS Port 0 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the DRS message class.",
"UMask": "0x1",
@@ -2479,8 +2946,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credits in Use : DRS to CMS Port 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_M2P_IIO_CREDITS_USED.DRS_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credits in Use : DRS to CMS Port 1 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the DRS message class.",
"UMask": "0x2",
@@ -2488,8 +2957,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credits in Use : NCB to CMS Port 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_M2P_IIO_CREDITS_USED.NCB_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credits in Use : NCB to CMS Port 0 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCB message class.",
"UMask": "0x4",
@@ -2497,8 +2968,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credits in Use : NCB to CMS Port 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_M2P_IIO_CREDITS_USED.NCB_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credits in Use : NCB to CMS Port 1 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCB message class.",
"UMask": "0x8",
@@ -2506,8 +2979,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credits in Use : NCS to CMS Port 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_M2P_IIO_CREDITS_USED.NCS_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credits in Use : NCS to CMS Port 0 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCS message class.",
"UMask": "0x10",
@@ -2515,8 +2990,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credits in Use : NCS to CMS Port 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_M2P_IIO_CREDITS_USED.NCS_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credits in Use : NCS to CMS Port 1 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credit for transfer through CMS Port 0s to the IIO for the NCS message class.",
"UMask": "0x20",
@@ -2524,896 +3001,1120 @@
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF0 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF0 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF1 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF1 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF2 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF2_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF2 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF2_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF3 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF3_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF3 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF3_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 1 : M2IOSF4 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF4_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 1 : M2IOSF4 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF4_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 1 : M2IOSF5 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF5_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 1 : M2IOSF5 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF5_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF0 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF0 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF1 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF1 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF2 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF2_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF2 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF2_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF3 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF3_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF3 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF3_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 1 : M2IOSF4 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x1a",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_1.MS2IOSF4_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 1 : M2IOSF4 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x1a",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_1.MS2IOSF4_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 1 : M2IOSF5 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x1a",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_1.MS2IOSF5_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 1 : M2IOSF5 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x1a",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_1.MS2IOSF5_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Shared Credits Returned : Agent0",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M2P_LOCAL_P2P_SHAR_RETURNED.AGENT_0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Shared Credits Returned : Agent1",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M2P_LOCAL_P2P_SHAR_RETURNED.AGENT_1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Shared Credits Returned : Agent2",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M2P_LOCAL_P2P_SHAR_RETURNED.AGENT_2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent0",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent1",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent2",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent3",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent4",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent5",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF0 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF0 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF1 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF1 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF2 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF2_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF2 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF2_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF3 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF3_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF3 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF3_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 1 : M2IOSF4 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF4_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 1 : M2IOSF4 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF4_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 1 : M2IOSF5 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF5_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 1 : M2IOSF5 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF5_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF0 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x4a",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF0 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4a",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF1 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x4a",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF1 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4a",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF2 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x4a",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF2_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF2 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4a",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF2_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF3 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x4a",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF3_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF3 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4a",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF3_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 1 : M2IOSF4 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x4b",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF4_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 1 : M2IOSF4 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4b",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF4_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 1 : M2IOSF5 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x4b",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF5_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 1 : M2IOSF5 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4b",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF5_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "P2P Credit Occupancy : All",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "P2P Credit Occupancy : Local NCB",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.LOCAL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "P2P Credit Occupancy : Local NCS",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.LOCAL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "P2P Credit Occupancy : Remote NCB",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.REMOTE_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "P2P Credit Occupancy : Remote NCS",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.REMOTE_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Dedicated Credits Received : All",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_M2P_P2P_DED_RECEIVED.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Dedicated Credits Received : Local NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_M2P_P2P_DED_RECEIVED.LOCAL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Dedicated Credits Received : Local NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_M2P_P2P_DED_RECEIVED.LOCAL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Dedicated Credits Received : Remote NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_M2P_P2P_DED_RECEIVED.REMOTE_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Dedicated Credits Received : Remote NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_M2P_P2P_DED_RECEIVED.REMOTE_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Shared Credits Received : All",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_M2P_P2P_SHAR_RECEIVED.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Shared Credits Received : Local NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_M2P_P2P_SHAR_RECEIVED.LOCAL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Shared Credits Received : Local NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_M2P_P2P_SHAR_RECEIVED.LOCAL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Shared Credits Received : Remote NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_M2P_P2P_SHAR_RECEIVED.REMOTE_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Shared Credits Received : Remote NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_M2P_P2P_SHAR_RECEIVED.REMOTE_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI0 - DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI0_DRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI0 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI0 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI1 - DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI1_DRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI1 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI1 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Dedicated P2P Credit Taken - 1 : UPI2 - DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_1.UPI2_DRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Dedicated P2P Credit Taken - 1 : UPI2 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_1.UPI2_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Dedicated P2P Credit Taken - 1 : UPI2 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_1.UPI2_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote P2P Dedicated Credits Returned : UPI0 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote P2P Dedicated Credits Returned : UPI0 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote P2P Dedicated Credits Returned : UPI1 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote P2P Dedicated Credits Returned : UPI1 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote P2P Dedicated Credits Returned : UPI2 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI2_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote P2P Dedicated Credits Returned : UPI2 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI2_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote P2P Shared Credits Returned : Agent0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M2P_REMOTE_P2P_SHAR_RETURNED.AGENT_0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote P2P Shared Credits Returned : Agent1",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M2P_REMOTE_P2P_SHAR_RETURNED.AGENT_1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote P2P Shared Credits Returned : Agent2",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M2P_REMOTE_P2P_SHAR_RETURNED.AGENT_2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Shared P2P Credit Returned to credit ring : Agent0",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_RETURNED.AGENT_0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Shared P2P Credit Returned to credit ring : Agent1",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_RETURNED.AGENT_1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Shared P2P Credit Returned to credit ring : Agent2",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_RETURNED.AGENT_2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI0 - DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI0_DRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI0 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI0 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI1 - DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI1_DRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI1 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI1 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Shared P2P Credit Taken - 1 : UPI2 - DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_1.UPI2_DRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Shared P2P Credit Taken - 1 : UPI2 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_1.UPI2_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Shared P2P Credit Taken - 1 : UPI2 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_1.UPI2_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI0 - DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI0_DRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI0 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI0 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI1 - DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI1_DRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI1 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI1 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Remote Shared P2P Credit - 1 : UPI2 - DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4d",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_1.UPI2_DRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Remote Shared P2P Credit - 1 : UPI2 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x4d",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_1.UPI2_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Remote Shared P2P Credit - 1 : UPI2 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4d",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_1.UPI2_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M2P_RxC_CYCLES_NE.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
"UMask": "0x80",
@@ -3421,8 +4122,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M2P_RxC_CYCLES_NE.CHA_IDI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
"UMask": "0x1",
@@ -3430,8 +4133,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M2P_RxC_CYCLES_NE.CHA_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
"UMask": "0x2",
@@ -3439,8 +4144,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M2P_RxC_CYCLES_NE.CHA_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
"UMask": "0x4",
@@ -3448,8 +4155,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M2P_RxC_CYCLES_NE.IIO_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
"UMask": "0x20",
@@ -3457,8 +4166,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M2P_RxC_CYCLES_NE.IIO_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
"UMask": "0x40",
@@ -3466,8 +4177,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M2P_RxC_CYCLES_NE.UPI_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
"UMask": "0x8",
@@ -3475,8 +4188,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M2P_RxC_CYCLES_NE.UPI_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
"UMask": "0x10",
@@ -3484,8 +4199,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2P_RxC_INSERTS.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
"UMask": "0x80",
@@ -3493,8 +4210,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2P_RxC_INSERTS.CHA_IDI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
"UMask": "0x1",
@@ -3502,8 +4221,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2P_RxC_INSERTS.CHA_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
"UMask": "0x2",
@@ -3511,8 +4232,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2P_RxC_INSERTS.CHA_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
"UMask": "0x4",
@@ -3520,8 +4243,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2P_RxC_INSERTS.IIO_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
"UMask": "0x20",
@@ -3529,8 +4254,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2P_RxC_INSERTS.IIO_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
"UMask": "0x40",
@@ -3538,8 +4265,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2P_RxC_INSERTS.UPI_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
"UMask": "0x8",
@@ -3547,8 +4276,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2P_RxC_INSERTS.UPI_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
"UMask": "0x10",
@@ -3556,24 +4287,30 @@
},
{
"BriefDescription": "UNC_M2P_TxC_CREDITS.PMM",
+ "Counter": "0,1",
"EventCode": "0x2d",
"EventName": "UNC_M2P_TxC_CREDITS.PMM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "UNC_M2P_TxC_CREDITS.PRQ",
+ "Counter": "0,1",
"EventCode": "0x2d",
"EventName": "UNC_M2P_TxC_CREDITS.PRQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Egress (to CMS) Cycles Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2P_TxC_CYCLES_FULL.PMM_BLOCK_0",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -3583,8 +4320,10 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2P_TxC_CYCLES_FULL.PMM_BLOCK_1",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -3594,8 +4333,10 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "Counter": "0,1",
"EventCode": "0x23",
"EventName": "UNC_M2P_TxC_CYCLES_NE.PMM_DISTRESS_0",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -3605,8 +4346,10 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "Counter": "0,1",
"EventCode": "0x23",
"EventName": "UNC_M2P_TxC_CYCLES_NE.PMM_DISTRESS_1",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-memory.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-memory.json
index 3ff9e9b722c8..aa06088dd26f 100644
--- a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Cycles - at UCLK",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_M2HBM_CLOCKTICKS",
"PerPkg": "1",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "CMS Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0xc0",
"EventName": "UNC_M2HBM_CMS_CLOCKTICKS",
"PerPkg": "1",
@@ -15,16 +17,20 @@
},
{
"BriefDescription": "Cycles when direct to core mode (which bypasses the CHA) was disabled",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M2HBM_DIRECT2CORE_NOT_TAKEN_DIRSTATE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "M2HBM"
},
{
"BriefDescription": "Cycles when direct to core mode, which bypasses the CHA, was disabled : Non Cisgress",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M2HBM_DIRECT2CORE_NOT_TAKEN_DIRSTATE.NON_CISGRESS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of time non cisgress D2C was not honoured by egress due to directory state constraints",
"UMask": "0x2",
@@ -32,47 +38,59 @@
},
{
"BriefDescription": "Counts the time when FM didn't do d2c for fill reads (cross tile case)",
+ "Counter": "0,1,2,3",
"EventCode": "0x4a",
"EventName": "UNC_M2HBM_DIRECT2CORE_NOT_TAKEN_NOTFORKED",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2HBM"
},
{
"BriefDescription": "Number of reads in which direct to core transaction were overridden",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M2HBM_DIRECT2CORE_TXN_OVERRIDE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2HBM"
},
{
"BriefDescription": "Number of reads in which direct to core transaction was overridden : Cisgress",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M2HBM_DIRECT2CORE_TXN_OVERRIDE.CISGRESS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2HBM"
},
{
"BriefDescription": "Number of reads in which direct to Intel UPI transactions were overridden",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_M2HBM_DIRECT2UPI_NOT_TAKEN_CREDITS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "M2HBM"
},
{
"BriefDescription": "Cycles when direct to Intel UPI was disabled",
+ "Counter": "0,1,2,3",
"EventCode": "0x1a",
"EventName": "UNC_M2HBM_DIRECT2UPI_NOT_TAKEN_DIRSTATE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "M2HBM"
},
{
"BriefDescription": "Cycles when Direct2UPI was Disabled : Cisgress D2U Ignored",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_M2HBM_DIRECT2UPI_NOT_TAKEN_DIRSTATE.CISGRESS",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -82,8 +100,10 @@
},
{
"BriefDescription": "Cycles when Direct2UPI was Disabled : Egress Ignored D2U",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_M2HBM_DIRECT2UPI_NOT_TAKEN_DIRSTATE.EGRESS",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -93,8 +113,10 @@
},
{
"BriefDescription": "Cycles when Direct2UPI was Disabled : Non Cisgress D2U Ignored",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_M2HBM_DIRECT2UPI_NOT_TAKEN_DIRSTATE.NON_CISGRESS",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -104,86 +126,107 @@
},
{
"BriefDescription": "Number of reads that a message sent direct2 Intel UPI was overridden",
+ "Counter": "0,1,2,3",
"EventCode": "0x1c",
"EventName": "UNC_M2HBM_DIRECT2UPI_TXN_OVERRIDE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2HBM"
},
{
"BriefDescription": "Number of times a direct to UPI transaction was overridden.",
+ "Counter": "0,1,2,3",
"EventCode": "0x1c",
"EventName": "UNC_M2HBM_DIRECT2UPI_TXN_OVERRIDE.CISGRESS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2HBM"
},
{
"BriefDescription": "Directory Hit : On NonDirty Line in A State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1d",
"EventName": "UNC_M2HBM_DIRECTORY_HIT.CLEAN_A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M2HBM"
},
{
"BriefDescription": "Directory Hit : On NonDirty Line in I State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1d",
"EventName": "UNC_M2HBM_DIRECTORY_HIT.CLEAN_I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2HBM"
},
{
"BriefDescription": "Directory Hit : On NonDirty Line in L State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1d",
"EventName": "UNC_M2HBM_DIRECTORY_HIT.CLEAN_P",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2HBM"
},
{
"BriefDescription": "Directory Hit : On NonDirty Line in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1d",
"EventName": "UNC_M2HBM_DIRECTORY_HIT.CLEAN_S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2HBM"
},
{
"BriefDescription": "Directory Hit : On Dirty Line in A State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1d",
"EventName": "UNC_M2HBM_DIRECTORY_HIT.DIRTY_A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2HBM"
},
{
"BriefDescription": "Directory Hit : On Dirty Line in I State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1d",
"EventName": "UNC_M2HBM_DIRECTORY_HIT.DIRTY_I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2HBM"
},
{
"BriefDescription": "Directory Hit : On Dirty Line in L State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1d",
"EventName": "UNC_M2HBM_DIRECTORY_HIT.DIRTY_P",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2HBM"
},
{
"BriefDescription": "Directory Hit : On Dirty Line in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1d",
"EventName": "UNC_M2HBM_DIRECTORY_HIT.DIRTY_S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2HBM"
},
{
"BriefDescription": "Multi-socket cacheline Directory lookups (any state found)",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M2HBM_DIRECTORY_LOOKUP.ANY",
"PerPkg": "1",
@@ -193,6 +236,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory lookups (cacheline found in A state)",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M2HBM_DIRECTORY_LOOKUP.STATE_A",
"PerPkg": "1",
@@ -202,6 +246,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory lookup (cacheline found in I state)",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M2HBM_DIRECTORY_LOOKUP.STATE_I",
"PerPkg": "1",
@@ -211,6 +256,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory lookup (cacheline found in S state)",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M2HBM_DIRECTORY_LOOKUP.STATE_S",
"PerPkg": "1",
@@ -220,86 +266,107 @@
},
{
"BriefDescription": "Directory Miss : On NonDirty Line in A State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_M2HBM_DIRECTORY_MISS.CLEAN_A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M2HBM"
},
{
"BriefDescription": "Directory Miss : On NonDirty Line in I State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_M2HBM_DIRECTORY_MISS.CLEAN_I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2HBM"
},
{
"BriefDescription": "Directory Miss : On NonDirty Line in L State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_M2HBM_DIRECTORY_MISS.CLEAN_P",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2HBM"
},
{
"BriefDescription": "Directory Miss : On NonDirty Line in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_M2HBM_DIRECTORY_MISS.CLEAN_S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2HBM"
},
{
"BriefDescription": "Directory Miss : On Dirty Line in A State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_M2HBM_DIRECTORY_MISS.DIRTY_A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2HBM"
},
{
"BriefDescription": "Directory Miss : On Dirty Line in I State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_M2HBM_DIRECTORY_MISS.DIRTY_I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2HBM"
},
{
"BriefDescription": "Directory Miss : On Dirty Line in L State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_M2HBM_DIRECTORY_MISS.DIRTY_P",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2HBM"
},
{
"BriefDescription": "Directory Miss : On Dirty Line in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_M2HBM_DIRECTORY_MISS.DIRTY_S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2HBM"
},
{
"BriefDescription": "Multi-socket cacheline Directory update from A to I",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.A2I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x320",
"Unit": "M2HBM"
},
{
"BriefDescription": "Multi-socket cacheline Directory update from A to S",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.A2S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x340",
"Unit": "M2HBM"
},
{
"BriefDescription": "Multi-socket cacheline Directory update from/to Any state",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.ANY",
"PerPkg": "1",
@@ -308,8 +375,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.A_TO_I_HIT_NON_PMM",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -319,8 +388,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.A_TO_I_MISS_NON_PMM",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -330,8 +401,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.A_TO_S_HIT_NON_PMM",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -341,8 +414,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.A_TO_S_MISS_NON_PMM",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -352,8 +427,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.HIT_NON_PMM",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -363,24 +440,30 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory update from I to A",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.I2A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x304",
"Unit": "M2HBM"
},
{
"BriefDescription": "Multi-socket cacheline Directory update from I to S",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.I2S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x302",
"Unit": "M2HBM"
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.I_TO_A_HIT_NON_PMM",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -390,8 +473,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.I_TO_A_MISS_NON_PMM",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -401,8 +486,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.I_TO_S_HIT_NON_PMM",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -412,8 +499,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.I_TO_S_MISS_NON_PMM",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -423,8 +512,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.MISS_NON_PMM",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -434,24 +525,30 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory update from S to A",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.S2A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x310",
"Unit": "M2HBM"
},
{
"BriefDescription": "Multi-socket cacheline Directory update from S to I",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.S2I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x308",
"Unit": "M2HBM"
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.S_TO_A_HIT_NON_PMM",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -461,8 +558,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.S_TO_A_MISS_NON_PMM",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -472,8 +571,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.S_TO_I_HIT_NON_PMM",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -483,8 +584,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.S_TO_I_MISS_NON_PMM",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -494,64 +597,80 @@
},
{
"BriefDescription": "Count distress signalled on AkAd cmp message",
+ "Counter": "0,1,2,3",
"EventCode": "0x67",
"EventName": "UNC_M2HBM_DISTRESS.AD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2HBM"
},
{
"BriefDescription": "Count distress signalled on any packet type",
+ "Counter": "0,1,2,3",
"EventCode": "0x67",
"EventName": "UNC_M2HBM_DISTRESS.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2HBM"
},
{
"BriefDescription": "Count distress signalled on Bl Cmp message",
+ "Counter": "0,1,2,3",
"EventCode": "0x67",
"EventName": "UNC_M2HBM_DISTRESS.BL_CMP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2HBM"
},
{
"BriefDescription": "Count distress signalled on NM fill write message",
+ "Counter": "0,1,2,3",
"EventCode": "0x67",
"EventName": "UNC_M2HBM_DISTRESS.CROSSTILE_NMWR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2HBM"
},
{
"BriefDescription": "Count distress signalled on D2Cha message",
+ "Counter": "0,1,2,3",
"EventCode": "0x67",
"EventName": "UNC_M2HBM_DISTRESS.D2CHA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2HBM"
},
{
"BriefDescription": "Count distress signalled on D2c message",
+ "Counter": "0,1,2,3",
"EventCode": "0x67",
"EventName": "UNC_M2HBM_DISTRESS.D2CORE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2HBM"
},
{
"BriefDescription": "Count distress signalled on D2k message",
+ "Counter": "0,1,2,3",
"EventCode": "0x67",
"EventName": "UNC_M2HBM_DISTRESS.D2UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2HBM"
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "Counter": "0,1,2,3",
"EventCode": "0xba",
"EventName": "UNC_M2HBM_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress Blocking due to Ordering requirements : Down : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x80000004",
@@ -559,8 +678,10 @@
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "Counter": "0,1,2,3",
"EventCode": "0xba",
"EventName": "UNC_M2HBM_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress Blocking due to Ordering requirements : Up : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x80000001",
@@ -568,8 +689,10 @@
},
{
"BriefDescription": "Count when Starve Glocab counter is at 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M2HBM_IGR_STARVE_WINNER.MASK7",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -578,32 +701,40 @@
},
{
"BriefDescription": "Reads to iMC issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2HBM_IMC_READS.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x304",
"Unit": "M2HBM"
},
{
"BriefDescription": "UNC_M2HBM_IMC_READS.CH0.ALL",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2HBM_IMC_READS.CH0.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x104",
"Unit": "M2HBM"
},
{
"BriefDescription": "UNC_M2HBM_IMC_READS.CH0.NORMAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2HBM_IMC_READS.CH0.NORMAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x101",
"Unit": "M2HBM"
},
{
"BriefDescription": "UNC_M2HBM_IMC_READS.CH0_ALL",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2HBM_IMC_READS.CH0_ALL",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -612,24 +743,30 @@
},
{
"BriefDescription": "UNC_M2HBM_IMC_READS.CH0_FROM_TGR",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2HBM_IMC_READS.CH0_FROM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x140",
"Unit": "M2HBM"
},
{
"BriefDescription": "Critical Priority - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2HBM_IMC_READS.CH0_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x102",
"Unit": "M2HBM"
},
{
"BriefDescription": "UNC_M2HBM_IMC_READS.CH0_NORMAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2HBM_IMC_READS.CH0_NORMAL",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -638,24 +775,30 @@
},
{
"BriefDescription": "UNC_M2HBM_IMC_READS.CH1.ALL",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2HBM_IMC_READS.CH1.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x204",
"Unit": "M2HBM"
},
{
"BriefDescription": "UNC_M2HBM_IMC_READS.CH1.NORMAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2HBM_IMC_READS.CH1.NORMAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x201",
"Unit": "M2HBM"
},
{
"BriefDescription": "UNC_M2HBM_IMC_READS.CH1_ALL",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2HBM_IMC_READS.CH1_ALL",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -664,24 +807,30 @@
},
{
"BriefDescription": "From TGR - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2HBM_IMC_READS.CH1_FROM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x240",
"Unit": "M2HBM"
},
{
"BriefDescription": "Critical Priority - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2HBM_IMC_READS.CH1_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x202",
"Unit": "M2HBM"
},
{
"BriefDescription": "UNC_M2HBM_IMC_READS.CH1_NORMAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2HBM_IMC_READS.CH1_NORMAL",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -690,64 +839,80 @@
},
{
"BriefDescription": "From TGR - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2HBM_IMC_READS.FROM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x340",
"Unit": "M2HBM"
},
{
"BriefDescription": "Critical Priority - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2HBM_IMC_READS.ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x302",
"Unit": "M2HBM"
},
{
"BriefDescription": "UNC_M2HBM_IMC_READS.NORMAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2HBM_IMC_READS.NORMAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x301",
"Unit": "M2HBM"
},
{
"BriefDescription": "All Writes - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1810",
"Unit": "M2HBM"
},
{
"BriefDescription": "UNC_M2HBM_IMC_WRITES.CH0.ALL",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH0.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x810",
"Unit": "M2HBM"
},
{
"BriefDescription": "UNC_M2HBM_IMC_WRITES.CH0.FULL",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH0.FULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x801",
"Unit": "M2HBM"
},
{
"BriefDescription": "UNC_M2HBM_IMC_WRITES.CH0.PARTIAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH0.PARTIAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x802",
"Unit": "M2HBM"
},
{
"BriefDescription": "UNC_M2HBM_IMC_WRITES.CH0_ALL",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH0_ALL",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -756,15 +921,19 @@
},
{
"BriefDescription": "From TGR - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH0_FROM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2HBM"
},
{
"BriefDescription": "UNC_M2HBM_IMC_WRITES.CH0_FULL",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH0_FULL",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -773,16 +942,20 @@
},
{
"BriefDescription": "ISOCH Full Line - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH0_FULL_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x804",
"Unit": "M2HBM"
},
{
"BriefDescription": "Non-Inclusive - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH0_NI",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -790,8 +963,10 @@
},
{
"BriefDescription": "Non-Inclusive Miss - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH0_NI_MISS",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -799,8 +974,10 @@
},
{
"BriefDescription": "UNC_M2HBM_IMC_WRITES.CH0_PARTIAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH0_PARTIAL",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -809,40 +986,50 @@
},
{
"BriefDescription": "ISOCH Partial - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH0_PARTIAL_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x808",
"Unit": "M2HBM"
},
{
"BriefDescription": "All Writes - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH1.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1010",
"Unit": "M2HBM"
},
{
"BriefDescription": "Full Line Non-ISOCH - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH1.FULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1001",
"Unit": "M2HBM"
},
{
"BriefDescription": "Partial Non-ISOCH - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH1.PARTIAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1002",
"Unit": "M2HBM"
},
{
"BriefDescription": "All Writes - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH1_ALL",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -851,15 +1038,19 @@
},
{
"BriefDescription": "From TGR - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH1_FROM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2HBM"
},
{
"BriefDescription": "Full Line Non-ISOCH - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH1_FULL",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -868,16 +1059,20 @@
},
{
"BriefDescription": "ISOCH Full Line - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH1_FULL_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1004",
"Unit": "M2HBM"
},
{
"BriefDescription": "Non-Inclusive - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH1_NI",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -885,8 +1080,10 @@
},
{
"BriefDescription": "Non-Inclusive Miss - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH1_NI_MISS",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -894,8 +1091,10 @@
},
{
"BriefDescription": "Partial Non-ISOCH - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH1_PARTIAL",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -904,39 +1103,49 @@
},
{
"BriefDescription": "ISOCH Partial - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH1_PARTIAL_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1008",
"Unit": "M2HBM"
},
{
"BriefDescription": "From TGR - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.FROM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2HBM"
},
{
"BriefDescription": "Full Non-ISOCH - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.FULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1801",
"Unit": "M2HBM"
},
{
"BriefDescription": "ISOCH Full Line - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.FULL_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1804",
"Unit": "M2HBM"
},
{
"BriefDescription": "Non-Inclusive - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.NI",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -944,8 +1153,10 @@
},
{
"BriefDescription": "Non-Inclusive Miss - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.NI_MISS",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -953,159 +1164,199 @@
},
{
"BriefDescription": "Partial Non-ISOCH - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.PARTIAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1802",
"Unit": "M2HBM"
},
{
"BriefDescription": "ISOCH Partial - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.PARTIAL_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1808",
"Unit": "M2HBM"
},
{
"BriefDescription": "UNC_M2HBM_PREFCAM_CIS_DROPS",
+ "Counter": "0,1,2,3",
"EventCode": "0x5c",
"EventName": "UNC_M2HBM_PREFCAM_CIS_DROPS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2HBM"
},
{
"BriefDescription": "Data Prefetches Dropped",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_M2HBM_PREFCAM_DEMAND_DROPS.CH0_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2HBM"
},
{
"BriefDescription": "Data Prefetches Dropped",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_M2HBM_PREFCAM_DEMAND_DROPS.CH0_XPT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2HBM"
},
{
"BriefDescription": "Data Prefetches Dropped",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_M2HBM_PREFCAM_DEMAND_DROPS.CH1_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2HBM"
},
{
"BriefDescription": "Data Prefetches Dropped",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_M2HBM_PREFCAM_DEMAND_DROPS.CH1_XPT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2HBM"
},
{
"BriefDescription": "Data Prefetches Dropped : UPI - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_M2HBM_PREFCAM_DEMAND_DROPS.UPI_ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "M2HBM"
},
{
"BriefDescription": "Data Prefetches Dropped",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_M2HBM_PREFCAM_DEMAND_DROPS.XPT_ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "M2HBM"
},
{
"BriefDescription": ": UPI - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "UNC_M2HBM_PREFCAM_DEMAND_MERGE.UPI_ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "M2HBM"
},
{
"BriefDescription": ": XPT - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "UNC_M2HBM_PREFCAM_DEMAND_MERGE.XPT_ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "M2HBM"
},
{
"BriefDescription": "Demands Not Merged with CAMed Prefetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x5e",
"EventName": "UNC_M2HBM_PREFCAM_DEMAND_NO_MERGE.RD_MERGED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2HBM"
},
{
"BriefDescription": "Demands Not Merged with CAMed Prefetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x5e",
"EventName": "UNC_M2HBM_PREFCAM_DEMAND_NO_MERGE.WR_MERGED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2HBM"
},
{
"BriefDescription": "Demands Not Merged with CAMed Prefetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x5e",
"EventName": "UNC_M2HBM_PREFCAM_DEMAND_NO_MERGE.WR_SQUASHED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2HBM"
},
{
"BriefDescription": "Prefetch CAM Inserts : UPI - Ch 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_M2HBM_PREFCAM_INSERTS.CH0_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2HBM"
},
{
"BriefDescription": "Prefetch CAM Inserts : XPT - Ch 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_M2HBM_PREFCAM_INSERTS.CH0_XPT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2HBM"
},
{
"BriefDescription": "Prefetch CAM Inserts : UPI - Ch 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_M2HBM_PREFCAM_INSERTS.CH1_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2HBM"
},
{
"BriefDescription": "Prefetch CAM Inserts : XPT - Ch 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_M2HBM_PREFCAM_INSERTS.CH1_XPT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2HBM"
},
{
"BriefDescription": "Prefetch CAM Inserts : UPI - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_M2HBM_PREFCAM_INSERTS.UPI_ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "M2HBM"
},
{
"BriefDescription": "Prefetch CAM Inserts : XPT - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_M2HBM_PREFCAM_INSERTS.XPT_ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Prefetch CAM Inserts : XPT -All Channels",
"UMask": "0x5",
@@ -1113,80 +1364,100 @@
},
{
"BriefDescription": "Prefetch CAM Occupancy : All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_M2HBM_PREFCAM_OCCUPANCY.ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2HBM"
},
{
"BriefDescription": "Prefetch CAM Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_M2HBM_PREFCAM_OCCUPANCY.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2HBM"
},
{
"BriefDescription": "Prefetch CAM Occupancy : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_M2HBM_PREFCAM_OCCUPANCY.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2HBM"
},
{
"BriefDescription": "All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x5f",
"EventName": "UNC_M2HBM_PREFCAM_RESP_MISS.ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2HBM"
},
{
"BriefDescription": ": Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x5f",
"EventName": "UNC_M2HBM_PREFCAM_RESP_MISS.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2HBM"
},
{
"BriefDescription": ": Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x5f",
"EventName": "UNC_M2HBM_PREFCAM_RESP_MISS.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2HBM"
},
{
"BriefDescription": "UNC_M2HBM_PREFCAM_RxC_DEALLOCS.1LM_POSTED",
+ "Counter": "0,1,2,3",
"EventCode": "0x62",
"EventName": "UNC_M2HBM_PREFCAM_RxC_DEALLOCS.1LM_POSTED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2HBM"
},
{
"BriefDescription": "UNC_M2HBM_PREFCAM_RxC_DEALLOCS.CIS",
+ "Counter": "0,1,2,3",
"EventCode": "0x62",
"EventName": "UNC_M2HBM_PREFCAM_RxC_DEALLOCS.CIS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2HBM"
},
{
"BriefDescription": "UNC_M2HBM_PREFCAM_RxC_DEALLOCS.SQUASHED",
+ "Counter": "0,1,2,3",
"EventCode": "0x62",
"EventName": "UNC_M2HBM_PREFCAM_RxC_DEALLOCS.SQUASHED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2HBM"
},
{
"BriefDescription": "UNC_M2HBM_PREFCAM_RxC_OCCUPANCY",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_M2HBM_PREFCAM_RxC_OCCUPANCY",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -1194,8 +1465,10 @@
},
{
"BriefDescription": "AD Ingress (from CMS) : AD Ingress (from CMS) Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_M2HBM_RxC_AD.INSERTS",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -1204,23 +1477,29 @@
},
{
"BriefDescription": "AD Ingress (from CMS) : AD Ingress (from CMS) Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_M2HBM_RxC_AD_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2HBM"
},
{
"BriefDescription": "AD Ingress (from CMS) Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M2HBM_RxC_AD_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2HBM"
},
{
"BriefDescription": "BL Ingress (from CMS) : BL Ingress (from CMS) Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_M2HBM_RxC_BL.INSERTS",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -1230,8 +1509,10 @@
},
{
"BriefDescription": "BL Ingress (from CMS) : BL Ingress (from CMS) Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_M2HBM_RxC_BL_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts anytime a BL packet is added to Ingress",
"UMask": "0x1",
@@ -1239,61 +1520,77 @@
},
{
"BriefDescription": "BL Ingress (from CMS) Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_M2HBM_RxC_BL_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2HBM"
},
{
"BriefDescription": "Number AD Ingress Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "UNC_M2HBM_TGR_AD_CREDITS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2HBM"
},
{
"BriefDescription": "Number BL Ingress Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2f",
"EventName": "UNC_M2HBM_TGR_BL_CREDITS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2HBM"
},
{
"BriefDescription": "Tracker Inserts : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_M2HBM_TRACKER_INSERTS.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x104",
"Unit": "M2HBM"
},
{
"BriefDescription": "Tracker Inserts : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_M2HBM_TRACKER_INSERTS.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x204",
"Unit": "M2HBM"
},
{
"BriefDescription": "Tracker Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_M2HBM_TRACKER_OCCUPANCY.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2HBM"
},
{
"BriefDescription": "Tracker Occupancy : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_M2HBM_TRACKER_OCCUPANCY.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2HBM"
},
{
"BriefDescription": "AD Egress (to CMS) : AD Egress (to CMS) Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_M2HBM_TxC_AD.INSERTS",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -1303,8 +1600,10 @@
},
{
"BriefDescription": "AD Egress (to CMS) : AD Egress (to CMS) Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_M2HBM_TxC_AD_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts anytime a AD packet is added to Egress",
"UMask": "0x1",
@@ -1312,15 +1611,19 @@
},
{
"BriefDescription": "AD Egress (to CMS) Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x07",
"EventName": "UNC_M2HBM_TxC_AD_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2HBM"
},
{
"BriefDescription": "BL Egress (to CMS) : Inserts - CMS0 - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UNC_M2HBM_TxC_BL.INSERTS_CMS0",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -1330,8 +1633,10 @@
},
{
"BriefDescription": "BL Egress (to CMS) : Inserts - CMS1 - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UNC_M2HBM_TxC_BL.INSERTS_CMS1",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -1341,160 +1646,200 @@
},
{
"BriefDescription": "BL Egress (to CMS) Occupancy : All",
+ "Counter": "0,1,2,3",
"EventCode": "0x0f",
"EventName": "UNC_M2HBM_TxC_BL_OCCUPANCY.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2HBM"
},
{
"BriefDescription": "BL Egress (to CMS) Occupancy : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x0f",
"EventName": "UNC_M2HBM_TxC_BL_OCCUPANCY.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2HBM"
},
{
"BriefDescription": "BL Egress (to CMS) Occupancy : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x0f",
"EventName": "UNC_M2HBM_TxC_BL_OCCUPANCY.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2HBM"
},
{
"BriefDescription": "WPQ Flush : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M2HBM_WPQ_FLUSH.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2HBM"
},
{
"BriefDescription": "WPQ Flush : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M2HBM_WPQ_FLUSH.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2HBM"
},
{
"BriefDescription": "M2M and iMC WPQ Cycles w/Credits - Regular : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2HBM_WPQ_NO_REG_CRD.CHN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2HBM"
},
{
"BriefDescription": "M2M and iMC WPQ Cycles w/Credits - Regular : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2HBM_WPQ_NO_REG_CRD.CHN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2HBM"
},
{
"BriefDescription": "M2M and iMC WPQ Cycles w/Credits - Special : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2HBM_WPQ_NO_SPEC_CRD.CHN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2HBM"
},
{
"BriefDescription": "M2M and iMC WPQ Cycles w/Credits - Special : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2HBM_WPQ_NO_SPEC_CRD.CHN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2HBM"
},
{
"BriefDescription": "Write Tracker Inserts : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2HBM_WR_TRACKER_INSERTS.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2HBM"
},
{
"BriefDescription": "Write Tracker Inserts : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2HBM_WR_TRACKER_INSERTS.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2HBM"
},
{
"BriefDescription": "Write Tracker Non-Posted Inserts : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x4d",
"EventName": "UNC_M2HBM_WR_TRACKER_NONPOSTED_INSERTS.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2HBM"
},
{
"BriefDescription": "Write Tracker Non-Posted Inserts : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x4d",
"EventName": "UNC_M2HBM_WR_TRACKER_NONPOSTED_INSERTS.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2HBM"
},
{
"BriefDescription": "Write Tracker Non-Posted Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "UNC_M2HBM_WR_TRACKER_NONPOSTED_OCCUPANCY.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2HBM"
},
{
"BriefDescription": "Write Tracker Non-Posted Occupancy : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "UNC_M2HBM_WR_TRACKER_NONPOSTED_OCCUPANCY.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2HBM"
},
{
"BriefDescription": "Write Tracker Posted Inserts : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "UNC_M2HBM_WR_TRACKER_POSTED_INSERTS.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2HBM"
},
{
"BriefDescription": "Write Tracker Posted Inserts : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "UNC_M2HBM_WR_TRACKER_POSTED_INSERTS.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2HBM"
},
{
"BriefDescription": "Write Tracker Posted Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M2HBM_WR_TRACKER_POSTED_OCCUPANCY.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2HBM"
},
{
"BriefDescription": "Write Tracker Posted Occupancy : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M2HBM_WR_TRACKER_POSTED_OCCUPANCY.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2HBM"
},
{
"BriefDescription": "Activate due to read, write, underfill, or bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_MCHBM_ACT_COUNT.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Activate commands sent on this channel. Activate commands are issued to open up a page on the HBM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
"UMask": "0xff",
@@ -1502,8 +1847,10 @@
},
{
"BriefDescription": "Activate due to read",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_MCHBM_ACT_COUNT.RD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Activate commands sent on this channel. Activate commands are issued to open up a page on the HBM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
"UMask": "0x11",
@@ -1511,8 +1858,10 @@
},
{
"BriefDescription": "HBM Activate Count : Activate due to Read in PCH0",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_MCHBM_ACT_COUNT.RD_PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Activate commands sent on this channel. Activate commands are issued to open up a page on the HBM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
"UMask": "0x1",
@@ -1520,8 +1869,10 @@
},
{
"BriefDescription": "HBM Activate Count : Activate due to Read in PCH1",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_MCHBM_ACT_COUNT.RD_PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Activate commands sent on this channel. Activate commands are issued to open up a page on the HBM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
"UMask": "0x10",
@@ -1529,8 +1880,10 @@
},
{
"BriefDescription": "HBM Activate Count : Underfill Read transaction on Page Empty or Page Miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_MCHBM_ACT_COUNT.UFILL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Activate commands sent on this channel. Activate commands are issued to open up a page on the HBM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
"UMask": "0x44",
@@ -1538,8 +1891,10 @@
},
{
"BriefDescription": "HBM Activate Count",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_MCHBM_ACT_COUNT.UFILL_PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Activate commands sent on this channel. Activate commands are issued to open up a page on the HBM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
"UMask": "0x4",
@@ -1547,8 +1902,10 @@
},
{
"BriefDescription": "HBM Activate Count",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_MCHBM_ACT_COUNT.UFILL_PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Activate commands sent on this channel. Activate commands are issued to open up a page on the HBM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
"UMask": "0x40",
@@ -1556,8 +1913,10 @@
},
{
"BriefDescription": "Activate due to write",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_MCHBM_ACT_COUNT.WR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Activate commands sent on this channel. Activate commands are issued to open up a page on the HBM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
"UMask": "0x22",
@@ -1565,8 +1924,10 @@
},
{
"BriefDescription": "HBM Activate Count : Activate due to Write in PCH0",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_MCHBM_ACT_COUNT.WR_PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Activate commands sent on this channel. Activate commands are issued to open up a page on the HBM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
"UMask": "0x2",
@@ -1574,8 +1935,10 @@
},
{
"BriefDescription": "HBM Activate Count : Activate due to Write in PCH1",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_MCHBM_ACT_COUNT.WR_PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Activate commands sent on this channel. Activate commands are issued to open up a page on the HBM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
"UMask": "0x20",
@@ -1583,16 +1946,20 @@
},
{
"BriefDescription": "All CAS commands issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_MCHBM_CAS_COUNT.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xff",
"Unit": "MCHBM"
},
{
"BriefDescription": "Pseudo Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_MCHBM_CAS_COUNT.PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "HBM RD_CAS and WR_CAS Commands",
"UMask": "0x40",
@@ -1600,8 +1967,10 @@
},
{
"BriefDescription": "Pseudo Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_MCHBM_CAS_COUNT.PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "HBM RD_CAS and WR_CAS Commands",
"UMask": "0x80",
@@ -1609,134 +1978,167 @@
},
{
"BriefDescription": "Read CAS commands issued (regular and underfill)",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_MCHBM_CAS_COUNT.RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xcf",
"Unit": "MCHBM"
},
{
"BriefDescription": "Regular read CAS commands with precharge",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_MCHBM_CAS_COUNT.RD_PRE_REG",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc2",
"Unit": "MCHBM"
},
{
"BriefDescription": "Underfill read CAS commands with precharge",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_MCHBM_CAS_COUNT.RD_PRE_UNDERFILL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc8",
"Unit": "MCHBM"
},
{
"BriefDescription": "Regular read CAS commands issued (does not include underfills)",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_MCHBM_CAS_COUNT.RD_REG",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc1",
"Unit": "MCHBM"
},
{
"BriefDescription": "Underfill read CAS commands issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_MCHBM_CAS_COUNT.RD_UNDERFILL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc4",
"Unit": "MCHBM"
},
{
"BriefDescription": "Write CAS commands issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_MCHBM_CAS_COUNT.WR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf0",
"Unit": "MCHBM"
},
{
"BriefDescription": "HBM RD_CAS and WR_CAS Commands. : HBM WR_CAS commands w/o auto-pre",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_MCHBM_CAS_COUNT.WR_NONPRE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd0",
"Unit": "MCHBM"
},
{
"BriefDescription": "Write CAS commands with precharge",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_MCHBM_CAS_COUNT.WR_PRE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe0",
"Unit": "MCHBM"
},
{
"BriefDescription": "Pseudo Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_MCHBM_CAS_ISSUED_REQ_LEN.PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "MCHBM"
},
{
"BriefDescription": "Pseudo Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_MCHBM_CAS_ISSUED_REQ_LEN.PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "MCHBM"
},
{
"BriefDescription": "Read CAS Command in Interleaved Mode (32B)",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_MCHBM_CAS_ISSUED_REQ_LEN.RD_32B",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc8",
"Unit": "MCHBM"
},
{
"BriefDescription": "Read CAS Command in Regular Mode (64B) in Pseudochannel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_MCHBM_CAS_ISSUED_REQ_LEN.RD_64B",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc1",
"Unit": "MCHBM"
},
{
"BriefDescription": "Underfill Read CAS Command in Interleaved Mode (32B)",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_MCHBM_CAS_ISSUED_REQ_LEN.RD_UFILL_32B",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd0",
"Unit": "MCHBM"
},
{
"BriefDescription": "Underfill Read CAS Command in Regular Mode (64B) in Pseudochannel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_MCHBM_CAS_ISSUED_REQ_LEN.RD_UFILL_64B",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc2",
"Unit": "MCHBM"
},
{
"BriefDescription": "Write CAS Command in Interleaved Mode (32B)",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_MCHBM_CAS_ISSUED_REQ_LEN.WR_32B",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe0",
"Unit": "MCHBM"
},
{
"BriefDescription": "Write CAS Command in Regular Mode (64B) in Pseudochannel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_MCHBM_CAS_ISSUED_REQ_LEN.WR_64B",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc4",
"Unit": "MCHBM"
},
{
"BriefDescription": "IMC Clockticks at DCLK frequency",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_MCHBM_CLOCKTICKS",
"PerPkg": "1",
@@ -1745,8 +2147,10 @@
},
{
"BriefDescription": "HBM Precharge All Commands",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_MCHBM_HBM_PREALL.PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times that the precharge all command was sent.",
"UMask": "0x1",
@@ -1754,8 +2158,10 @@
},
{
"BriefDescription": "HBM Precharge All Commands",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_MCHBM_HBM_PREALL.PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times that the precharge all command was sent.",
"UMask": "0x2",
@@ -1763,8 +2169,10 @@
},
{
"BriefDescription": "All Precharge Commands",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_MCHBM_HBM_PRE_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Precharge All Commands: Counts the number of times that the precharge all command was sent.",
"UMask": "0x3",
@@ -1772,15 +2180,19 @@
},
{
"BriefDescription": "IMC Clockticks at HCLK frequency",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_MCHBM_HCLOCKTICKS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "MCHBM"
},
{
"BriefDescription": "All precharge events",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_MCHBM_PRE_COUNT.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Precharge commands sent on this channel.",
"UMask": "0xff",
@@ -1788,8 +2200,10 @@
},
{
"BriefDescription": "Precharge from MC page table",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_MCHBM_PRE_COUNT.PGT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Precharge commands sent on this channel.",
"UMask": "0x88",
@@ -1797,8 +2211,10 @@
},
{
"BriefDescription": "HBM Precharge commands. : Precharges from Page Table",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_MCHBM_PRE_COUNT.PGT_PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Precharge commands sent on this channel. : Equivalent to PAGE_EMPTY",
"UMask": "0x8",
@@ -1806,8 +2222,10 @@
},
{
"BriefDescription": "HBM Precharge commands.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_MCHBM_PRE_COUNT.PGT_PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Precharge commands sent on this channel.",
"UMask": "0x80",
@@ -1815,8 +2233,10 @@
},
{
"BriefDescription": "Precharge due to read on page miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_MCHBM_PRE_COUNT.RD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Precharge commands sent on this channel.",
"UMask": "0x11",
@@ -1824,8 +2244,10 @@
},
{
"BriefDescription": "HBM Precharge commands. : Precharge due to read",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_MCHBM_PRE_COUNT.RD_PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Precharge commands sent on this channel. : Precharge from read bank scheduler",
"UMask": "0x1",
@@ -1833,8 +2255,10 @@
},
{
"BriefDescription": "HBM Precharge commands.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_MCHBM_PRE_COUNT.RD_PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Precharge commands sent on this channel.",
"UMask": "0x10",
@@ -1842,8 +2266,10 @@
},
{
"BriefDescription": "HBM Precharge commands.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_MCHBM_PRE_COUNT.UFILL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Precharge commands sent on this channel.",
"UMask": "0x44",
@@ -1851,8 +2277,10 @@
},
{
"BriefDescription": "HBM Precharge commands.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_MCHBM_PRE_COUNT.UFILL_PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Precharge commands sent on this channel.",
"UMask": "0x4",
@@ -1860,8 +2288,10 @@
},
{
"BriefDescription": "HBM Precharge commands.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_MCHBM_PRE_COUNT.UFILL_PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Precharge commands sent on this channel.",
"UMask": "0x40",
@@ -1869,8 +2299,10 @@
},
{
"BriefDescription": "Precharge due to write on page miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_MCHBM_PRE_COUNT.WR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Precharge commands sent on this channel.",
"UMask": "0x22",
@@ -1878,8 +2310,10 @@
},
{
"BriefDescription": "HBM Precharge commands. : Precharge due to write",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_MCHBM_PRE_COUNT.WR_PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Precharge commands sent on this channel. : Precharge from write bank scheduler",
"UMask": "0x2",
@@ -1887,8 +2321,10 @@
},
{
"BriefDescription": "HBM Precharge commands.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_MCHBM_PRE_COUNT.WR_PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Precharge commands sent on this channel.",
"UMask": "0x20",
@@ -1896,46 +2332,58 @@
},
{
"BriefDescription": "Counts the number of cycles where the read buffer has greater than UMASK elements. NOTE: Umask must be set to the maximum number of elements in the queue (24 entries for SPR).",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_MCHBM_RDB_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "MCHBM"
},
{
"BriefDescription": "Counts the number of inserts into the read buffer.",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_MCHBM_RDB_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "MCHBM"
},
{
"BriefDescription": "Read Data Buffer Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_MCHBM_RDB_INSERTS.PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "MCHBM"
},
{
"BriefDescription": "Read Data Buffer Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_MCHBM_RDB_INSERTS.PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "MCHBM"
},
{
"BriefDescription": "Counts the number of elements in the read buffer per cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x1a",
"EventName": "UNC_MCHBM_RDB_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "MCHBM"
},
{
"BriefDescription": "Read Pending Queue Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_MCHBM_RPQ_INSERTS.PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Read Pending Queue Allocations: Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.",
"UMask": "0x1",
@@ -1943,8 +2391,10 @@
},
{
"BriefDescription": "Read Pending Queue Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_MCHBM_RPQ_INSERTS.PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Read Pending Queue Allocations: Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.",
"UMask": "0x2",
@@ -1952,24 +2402,30 @@
},
{
"BriefDescription": "Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_MCHBM_RPQ_OCCUPANCY_PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Read Pending Queue Occupancy: Accumulates the occupancies of the Read Pending Queue each cycle. This can then be used to calculate both the average occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory.",
"Unit": "MCHBM"
},
{
"BriefDescription": "Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "UNC_MCHBM_RPQ_OCCUPANCY_PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Read Pending Queue Occupancy: Accumulates the occupancies of the Read Pending Queue each cycle. This can then be used to calculate both the average occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory.",
"Unit": "MCHBM"
},
{
"BriefDescription": "Write Pending Queue Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_MCHBM_WPQ_INSERTS.PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Write Pending Queue Allocations: Counts the number of allocations into the Write Pending Queue. This can then be used to calculate the average queuing latency (in conjunction with the WPQ occupancy count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate after being issued. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC.",
"UMask": "0x1",
@@ -1977,8 +2433,10 @@
},
{
"BriefDescription": "Write Pending Queue Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_MCHBM_WPQ_INSERTS.PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Write Pending Queue Allocations: Counts the number of allocations into the Write Pending Queue. This can then be used to calculate the average queuing latency (in conjunction with the WPQ occupancy count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate after being issued. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC.",
"UMask": "0x2",
@@ -1986,24 +2444,30 @@
},
{
"BriefDescription": "Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_MCHBM_WPQ_OCCUPANCY_PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Write Pending Queue Occupancy: Accumulates the occupancies of the Write Pending Queue each cycle. This can then be used to calculate both the average queue occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to memory. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC. This is not to be confused with actually performing the write. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies. So, we provide filtering based on if the request has posted or not. By using the not posted filter, we can track how long writes spent in the iMC before completions were sent to the HA. The posted filter, on the other hand, provides information about how much queueing is actually happening in the iMC for writes before they are actually issued to memory. High average occupancies will generally coincide with high write major mode counts.",
"Unit": "MCHBM"
},
{
"BriefDescription": "Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_MCHBM_WPQ_OCCUPANCY_PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Write Pending Queue Occupancy: Accumulates the occupancies of the Write Pending Queue each cycle. This can then be used to calculate both the average queue occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to memory. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC. This is not to be confused with actually performing the write. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies. So, we provide filtering based on if the request has posted or not. By using the not posted filter, we can track how long writes spent in the iMC before completions were sent to the HA. The posted filter, on the other hand, provides information about how much queueing is actually happening in the iMC for writes before they are actually issued to memory. High average occupancies will generally coincide with high write major mode counts.",
"Unit": "MCHBM"
},
{
"BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_MCHBM_WPQ_READ_HIT",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -2012,8 +2476,10 @@
},
{
"BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_MCHBM_WPQ_READ_HIT.PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Write Pending Queue CAM Match: Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
"UMask": "0x1",
@@ -2021,8 +2487,10 @@
},
{
"BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_MCHBM_WPQ_READ_HIT.PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Write Pending Queue CAM Match: Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
"UMask": "0x2",
@@ -2030,8 +2498,10 @@
},
{
"BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_MCHBM_WPQ_WRITE_HIT",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -2040,8 +2510,10 @@
},
{
"BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_MCHBM_WPQ_WRITE_HIT.PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Write Pending Queue CAM Match: Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
"UMask": "0x1",
@@ -2049,8 +2521,10 @@
},
{
"BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_MCHBM_WPQ_WRITE_HIT.PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Write Pending Queue CAM Match: Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
"UMask": "0x2",
@@ -2058,6 +2532,7 @@
},
{
"BriefDescription": "Activate due to read, write, underfill, or bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_M_ACT_COUNT.ALL",
"PerPkg": "1",
@@ -2067,6 +2542,7 @@
},
{
"BriefDescription": "All DRAM CAS commands issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_M_CAS_COUNT.ALL",
"PerPkg": "1",
@@ -2076,8 +2552,10 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands. : Pseudo Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_M_CAS_COUNT.PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : Pseudo Channel 0 : DRAM RD_CAS and WR_CAS Commands",
"UMask": "0x40",
@@ -2085,8 +2563,10 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands. : Pseudo Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_M_CAS_COUNT.PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : Pseudo Channel 1 : DRAM RD_CAS and WR_CAS Commands",
"UMask": "0x80",
@@ -2094,6 +2574,7 @@
},
{
"BriefDescription": "All DRAM read CAS commands issued (including underfills)",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_M_CAS_COUNT.RD",
"PerPkg": "1",
@@ -2103,8 +2584,10 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_M_CAS_COUNT.RD_PRE_REG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM RD_CAS and WR_CAS Commands",
"UMask": "0xc2",
@@ -2112,8 +2595,10 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_M_CAS_COUNT.RD_PRE_UNDERFILL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM RD_CAS and WR_CAS Commands",
"UMask": "0xc8",
@@ -2121,8 +2606,10 @@
},
{
"BriefDescription": "All DRAM read CAS commands issued (does not include underfills)",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_M_CAS_COUNT.RD_REG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM RD_CAS commands w/out auto-pre : DRAM RD_CAS and WR_CAS Commands : Counts the total number or DRAM Read CAS commands issued on this channel. This includes both regular RD CAS commands as well as those with implicit Precharge. We do not filter based on major mode, as RD_CAS is not issued during WMM (with the exception of underfills).",
"UMask": "0xc1",
@@ -2130,8 +2617,10 @@
},
{
"BriefDescription": "DRAM underfill read CAS commands issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : Underfill Read Issued : DRAM RD_CAS and WR_CAS Commands",
"UMask": "0xc4",
@@ -2139,6 +2628,7 @@
},
{
"BriefDescription": "All DRAM write CAS commands issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_M_CAS_COUNT.WR",
"PerPkg": "1",
@@ -2148,8 +2638,10 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM WR_CAS commands w/o auto-pre",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_M_CAS_COUNT.WR_NONPRE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM WR_CAS commands w/o auto-pre : DRAM RD_CAS and WR_CAS Commands",
"UMask": "0xd0",
@@ -2157,8 +2649,10 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_M_CAS_COUNT.WR_PRE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM RD_CAS and WR_CAS Commands",
"UMask": "0xe0",
@@ -2166,70 +2660,87 @@
},
{
"BriefDescription": "Pseudo Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_M_CAS_ISSUED_REQ_LEN.PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "iMC"
},
{
"BriefDescription": "Pseudo Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_M_CAS_ISSUED_REQ_LEN.PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "iMC"
},
{
"BriefDescription": "Read CAS Command in Interleaved Mode (32B)",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_M_CAS_ISSUED_REQ_LEN.RD_32B",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc8",
"Unit": "iMC"
},
{
"BriefDescription": "Read CAS Command in Regular Mode (64B) in Pseudochannel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_M_CAS_ISSUED_REQ_LEN.RD_64B",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc1",
"Unit": "iMC"
},
{
"BriefDescription": "Underfill Read CAS Command in Interleaved Mode (32B)",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_M_CAS_ISSUED_REQ_LEN.RD_UFILL_32B",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd0",
"Unit": "iMC"
},
{
"BriefDescription": "Underfill Read CAS Command in Regular Mode (64B) in Pseudochannel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_M_CAS_ISSUED_REQ_LEN.RD_UFILL_64B",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc2",
"Unit": "iMC"
},
{
"BriefDescription": "Write CAS Command in Interleaved Mode (32B)",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_M_CAS_ISSUED_REQ_LEN.WR_32B",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe0",
"Unit": "iMC"
},
{
"BriefDescription": "Write CAS Command in Regular Mode (64B) in Pseudochannel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_M_CAS_ISSUED_REQ_LEN.WR_64B",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc4",
"Unit": "iMC"
},
{
"BriefDescription": "IMC Clockticks at DCLK frequency",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_M_CLOCKTICKS",
"PerPkg": "1",
@@ -2239,8 +2750,10 @@
},
{
"BriefDescription": "DRAM Precharge All Commands",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M_DRAM_PRE_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM Precharge All Commands : Counts the number of times that the precharge all command was sent.",
"UMask": "0x3",
@@ -2248,6 +2761,7 @@
},
{
"BriefDescription": "IMC Clockticks at HCLK frequency",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_M_HCLOCKTICKS",
"PerPkg": "1",
@@ -2256,30 +2770,37 @@
},
{
"BriefDescription": "UNC_M_PCLS.RD",
+ "Counter": "0,1,2,3",
"EventCode": "0xa0",
"EventName": "UNC_M_PCLS.RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_PCLS.TOTAL",
+ "Counter": "0,1,2,3",
"EventCode": "0xa0",
"EventName": "UNC_M_PCLS.TOTAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_PCLS.WR",
+ "Counter": "0,1,2,3",
"EventCode": "0xa0",
"EventName": "UNC_M_PCLS.WR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "iMC"
},
{
"BriefDescription": "PMM Read Pending Queue inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0xe3",
"EventName": "UNC_M_PMM_RPQ_INSERTS",
"PerPkg": "1",
@@ -2288,6 +2809,7 @@
},
{
"BriefDescription": "PMM Read Pending Queue occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0xe0",
"EventName": "UNC_M_PMM_RPQ_OCCUPANCY.ALL_SCH0",
"PerPkg": "1",
@@ -2297,6 +2819,7 @@
},
{
"BriefDescription": "PMM Read Pending Queue occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0xe0",
"EventName": "UNC_M_PMM_RPQ_OCCUPANCY.ALL_SCH1",
"PerPkg": "1",
@@ -2306,8 +2829,10 @@
},
{
"BriefDescription": "PMM Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M_PMM_RPQ_OCCUPANCY.GNT_WAIT_SCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PMM Read Pending Queue Occupancy : Accumulates the per cycle occupancy of the PMM Read Pending Queue.",
"UMask": "0x10",
@@ -2315,8 +2840,10 @@
},
{
"BriefDescription": "PMM Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M_PMM_RPQ_OCCUPANCY.GNT_WAIT_SCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PMM Read Pending Queue Occupancy : Accumulates the per cycle occupancy of the PMM Read Pending Queue.",
"UMask": "0x20",
@@ -2324,8 +2851,10 @@
},
{
"BriefDescription": "PMM Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0xe0",
"EventName": "UNC_M_PMM_RPQ_OCCUPANCY.NO_GNT_SCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the per cycle occupancy of the PMM Read Pending Queue.",
"UMask": "0x4",
@@ -2333,8 +2862,10 @@
},
{
"BriefDescription": "PMM Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0xe0",
"EventName": "UNC_M_PMM_RPQ_OCCUPANCY.NO_GNT_SCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the per cycle occupancy of the PMM Read Pending Queue.",
"UMask": "0x8",
@@ -2342,13 +2873,16 @@
},
{
"BriefDescription": "PMM (for IXP) Write Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0xe5",
"EventName": "UNC_M_PMM_WPQ_CYCLES_NE",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "PMM Write Pending Queue inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0xe7",
"EventName": "UNC_M_PMM_WPQ_INSERTS",
"PerPkg": "1",
@@ -2357,6 +2891,7 @@
},
{
"BriefDescription": "PMM Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0xe4",
"EventName": "UNC_M_PMM_WPQ_OCCUPANCY.ALL",
"PerPkg": "1",
@@ -2366,6 +2901,7 @@
},
{
"BriefDescription": "PMM Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0xE4",
"EventName": "UNC_M_PMM_WPQ_OCCUPANCY.ALL_SCH0",
"PerPkg": "1",
@@ -2375,6 +2911,7 @@
},
{
"BriefDescription": "PMM Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0xE4",
"EventName": "UNC_M_PMM_WPQ_OCCUPANCY.ALL_SCH1",
"PerPkg": "1",
@@ -2384,8 +2921,10 @@
},
{
"BriefDescription": "PMM (for IXP) Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0xe4",
"EventName": "UNC_M_PMM_WPQ_OCCUPANCY.CAS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PMM (for IXP) Write Pending Queue Occupancy : Accumulates the per cycle occupancy of the Write Pending Queue to the IXP DIMM.",
"UMask": "0xc",
@@ -2393,8 +2932,10 @@
},
{
"BriefDescription": "PMM (for IXP) Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0xe4",
"EventName": "UNC_M_PMM_WPQ_OCCUPANCY.PWR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PMM (for IXP) Write Pending Queue Occupancy : Accumulates the per cycle occupancy of the Write Pending Queue to the IXP DIMM.",
"UMask": "0x30",
@@ -2402,16 +2943,20 @@
},
{
"BriefDescription": "Channel PPD Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_M_POWER_CHANNEL_PPD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Channel PPD Cycles : Number of cycles when all the ranks in the channel are in PPD mode. If IBT=off is enabled, then this can be used to count those cycles. If it is not enabled, then this can count the number of cycles when that could have been taken advantage of.",
"Unit": "iMC"
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank : DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M_POWER_CKE_CYCLES.LOW_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CKE_ON_CYCLES by Rank : DIMM ID : Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
"UMask": "0x1",
@@ -2419,8 +2964,10 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank : DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M_POWER_CKE_CYCLES.LOW_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CKE_ON_CYCLES by Rank : DIMM ID : Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
"UMask": "0x2",
@@ -2428,8 +2975,10 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank : DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M_POWER_CKE_CYCLES.LOW_2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CKE_ON_CYCLES by Rank : DIMM ID : Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
"UMask": "0x4",
@@ -2437,8 +2986,10 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank : DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M_POWER_CKE_CYCLES.LOW_3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CKE_ON_CYCLES by Rank : DIMM ID : Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
"UMask": "0x8",
@@ -2446,8 +2997,10 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M_POWER_CRIT_THROTTLE_CYCLES.SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Throttle Cycles for Rank 0 : Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1. : Thermal throttling is performed per DIMM. We support 3 DIMMs per channel. This ID allows us to filter by ID.",
"UMask": "0x1",
@@ -2455,8 +3008,10 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M_POWER_CRIT_THROTTLE_CYCLES.SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Throttle Cycles for Rank 0 : Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
"UMask": "0x2",
@@ -2464,14 +3019,17 @@
},
{
"BriefDescription": "Clock-Enabled Self-Refresh",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M_POWER_SELF_REFRESH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Clock-Enabled Self-Refresh : Counts the number of cycles when the iMC is in self-refresh and the iMC still has a clock. This happens in some package C-states. For example, the PCU may ask the iMC to enter self-refresh even though some of the cores are still processing. One use of this is for Monroe technology. Self-refresh is required during package C3 and C6, but there is no clock in the iMC at this time, so it is not possible to count these cases.",
"Unit": "iMC"
},
{
"BriefDescription": "Precharge due to read, write, underfill, or PGT.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M_PRE_COUNT.ALL",
"PerPkg": "1",
@@ -2481,6 +3039,7 @@
},
{
"BriefDescription": "DRAM Precharge commands",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M_PRE_COUNT.PGT",
"PerPkg": "1",
@@ -2490,8 +3049,10 @@
},
{
"BriefDescription": "DRAM Precharge commands. : Precharges from Page Table",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M_PRE_COUNT.PGT_PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM Precharge commands. : Precharges from Page Table : Counts the number of DRAM Precharge commands sent on this channel. : Equivalent to PAGE_EMPTY",
"UMask": "0x8",
@@ -2499,8 +3060,10 @@
},
{
"BriefDescription": "DRAM Precharge commands.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M_PRE_COUNT.PGT_PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
"UMask": "0x80",
@@ -2508,6 +3071,7 @@
},
{
"BriefDescription": "Precharge due to read on page miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M_PRE_COUNT.RD",
"PerPkg": "1",
@@ -2517,8 +3081,10 @@
},
{
"BriefDescription": "DRAM Precharge commands. : Precharge due to read",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M_PRE_COUNT.RD_PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM Precharge commands. : Precharge due to read : Counts the number of DRAM Precharge commands sent on this channel. : Precharge from read bank scheduler",
"UMask": "0x1",
@@ -2526,8 +3092,10 @@
},
{
"BriefDescription": "DRAM Precharge commands.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M_PRE_COUNT.RD_PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
"UMask": "0x10",
@@ -2535,8 +3103,10 @@
},
{
"BriefDescription": "DRAM Precharge commands.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M_PRE_COUNT.UFILL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
"UMask": "0x44",
@@ -2544,8 +3114,10 @@
},
{
"BriefDescription": "DRAM Precharge commands.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M_PRE_COUNT.UFILL_PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
"UMask": "0x4",
@@ -2553,8 +3125,10 @@
},
{
"BriefDescription": "DRAM Precharge commands.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M_PRE_COUNT.UFILL_PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
"UMask": "0x40",
@@ -2562,6 +3136,7 @@
},
{
"BriefDescription": "Precharge due to write on page miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M_PRE_COUNT.WR",
"PerPkg": "1",
@@ -2571,8 +3146,10 @@
},
{
"BriefDescription": "DRAM Precharge commands. : Precharge due to write",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M_PRE_COUNT.WR_PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM Precharge commands. : Precharge due to write : Counts the number of DRAM Precharge commands sent on this channel. : Precharge from write bank scheduler",
"UMask": "0x2",
@@ -2580,8 +3157,10 @@
},
{
"BriefDescription": "DRAM Precharge commands.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M_PRE_COUNT.WR_PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
"UMask": "0x20",
@@ -2589,21 +3168,26 @@
},
{
"BriefDescription": "Counts the number of cycles where the read buffer has greater than UMASK elements. This includes reads to both DDR and PMEM. NOTE: Umask must be set to the maximum number of elements in the queue (24 entries for SPR).",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M_RDB_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "Counts the number of inserts into the read buffer destined for DDR. Does not count reads destined for PMEM.",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M_RDB_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "Read Data Buffer Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M_RDB_INSERTS.PCH0",
"PerPkg": "1",
@@ -2612,6 +3196,7 @@
},
{
"BriefDescription": "Read Data Buffer Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M_RDB_INSERTS.PCH1",
"PerPkg": "1",
@@ -2620,45 +3205,56 @@
},
{
"BriefDescription": "Counts the number of cycles where there's at least one element in the read buffer. This includes reads to both DDR and PMEM.",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M_RDB_NE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "Read Data Buffer Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M_RDB_NE.PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "Read Data Buffer Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M_RDB_NE.PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "Counts the number of cycles where there's at least one element in the read buffer. This includes reads to both DDR and PMEM.",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M_RDB_NOT_EMPTY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "Counts the number of elements in the read buffer, including reads to both DDR and PMEM.",
+ "Counter": "0,1,2,3",
"EventCode": "0x1a",
"EventName": "UNC_M_RDB_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "Read Pending Queue Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M_RPQ_INSERTS.PCH0",
"PerPkg": "1",
@@ -2668,6 +3264,7 @@
},
{
"BriefDescription": "Read Pending Queue Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M_RPQ_INSERTS.PCH1",
"PerPkg": "1",
@@ -2677,6 +3274,7 @@
},
{
"BriefDescription": "Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M_RPQ_OCCUPANCY_PCH0",
"PerPkg": "1",
@@ -2685,6 +3283,7 @@
},
{
"BriefDescription": "Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "UNC_M_RPQ_OCCUPANCY_PCH1",
"PerPkg": "1",
@@ -2693,294 +3292,368 @@
},
{
"BriefDescription": "Scoreboard accepts",
+ "Counter": "0,1,2,3",
"EventCode": "0xd2",
"EventName": "UNC_M_SB_ACCESSES.ACCEPTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Accesses : Write Accepts",
+ "Counter": "0,1,2,3",
"EventCode": "0xd2",
"EventName": "UNC_M_SB_ACCESSES.FM_RD_CMPS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Accesses : Write Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0xd2",
"EventName": "UNC_M_SB_ACCESSES.FM_WR_CMPS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Accesses : FM read completions",
+ "Counter": "0,1,2,3",
"EventCode": "0xd2",
"EventName": "UNC_M_SB_ACCESSES.NM_RD_CMPS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Accesses : FM write completions",
+ "Counter": "0,1,2,3",
"EventCode": "0xd2",
"EventName": "UNC_M_SB_ACCESSES.NM_WR_CMPS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Accesses : Read Accepts",
+ "Counter": "0,1,2,3",
"EventCode": "0xd2",
"EventName": "UNC_M_SB_ACCESSES.RD_ACCEPTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Accesses : Read Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0xd2",
"EventName": "UNC_M_SB_ACCESSES.RD_REJECTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0xd2",
"EventName": "UNC_M_SB_ACCESSES.REJECTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Accesses : NM read completions",
+ "Counter": "0,1,2,3",
"EventCode": "0xd2",
"EventName": "UNC_M_SB_ACCESSES.WR_ACCEPTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Accesses : NM write completions",
+ "Counter": "0,1,2,3",
"EventCode": "0xd2",
"EventName": "UNC_M_SB_ACCESSES.WR_REJECTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": ": Alloc",
+ "Counter": "0,1,2,3",
"EventCode": "0xd9",
"EventName": "UNC_M_SB_CANARY.ALLOC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": ": Dealloc",
+ "Counter": "0,1,2,3",
"EventCode": "0xd9",
"EventName": "UNC_M_SB_CANARY.DEALLOC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": ": Near Mem Write Starved",
+ "Counter": "0,1,2,3",
"EventCode": "0xd9",
"EventName": "UNC_M_SB_CANARY.FM_RD_STARVED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "iMC"
},
{
"BriefDescription": ": Far Mem Write Starved",
+ "Counter": "0,1,2,3",
"EventCode": "0xd9",
"EventName": "UNC_M_SB_CANARY.FM_TGR_WR_STARVED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "iMC"
},
{
"BriefDescription": ": Far Mem Read Starved",
+ "Counter": "0,1,2,3",
"EventCode": "0xd9",
"EventName": "UNC_M_SB_CANARY.FM_WR_STARVED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "iMC"
},
{
"BriefDescription": ": Valid",
+ "Counter": "0,1,2,3",
"EventCode": "0xd9",
"EventName": "UNC_M_SB_CANARY.NM_RD_STARVED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": ": Near Mem Read Starved",
+ "Counter": "0,1,2,3",
"EventCode": "0xd9",
"EventName": "UNC_M_SB_CANARY.NM_WR_STARVED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": ": Reject",
+ "Counter": "0,1,2,3",
"EventCode": "0xd9",
"EventName": "UNC_M_SB_CANARY.VLD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Cycles Full",
+ "Counter": "0,1,2,3",
"EventCode": "0xd1",
"EventName": "UNC_M_SB_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Cycles Not-Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0xd0",
"EventName": "UNC_M_SB_CYCLES_NE",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Inserts : Block region reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xd6",
"EventName": "UNC_M_SB_INSERTS.BLOCK_RDS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Inserts : Block region writes",
+ "Counter": "0,1,2,3",
"EventCode": "0xd6",
"EventName": "UNC_M_SB_INSERTS.BLOCK_WRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Inserts : Persistent Mem reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xd6",
"EventName": "UNC_M_SB_INSERTS.PMM_RDS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Inserts : Persistent Mem writes",
+ "Counter": "0,1,2,3",
"EventCode": "0xd6",
"EventName": "UNC_M_SB_INSERTS.PMM_WRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Inserts : Reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xd6",
"EventName": "UNC_M_SB_INSERTS.RDS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Inserts : Writes",
+ "Counter": "0,1,2,3",
"EventCode": "0xd6",
"EventName": "UNC_M_SB_INSERTS.WRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Occupancy : Block region reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xd5",
"EventName": "UNC_M_SB_OCCUPANCY.BLOCK_RDS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Occupancy : Block region writes",
+ "Counter": "0,1,2,3",
"EventCode": "0xd5",
"EventName": "UNC_M_SB_OCCUPANCY.BLOCK_WRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Occupancy : Persistent Mem reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xd5",
"EventName": "UNC_M_SB_OCCUPANCY.PMM_RDS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Occupancy : Persistent Mem writes",
+ "Counter": "0,1,2,3",
"EventCode": "0xd5",
"EventName": "UNC_M_SB_OCCUPANCY.PMM_WRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Occupancy : Reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xd5",
"EventName": "UNC_M_SB_OCCUPANCY.RDS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Prefetch Inserts : All",
+ "Counter": "0,1,2,3",
"EventCode": "0xda",
"EventName": "UNC_M_SB_PREF_INSERTS.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Prefetch Inserts : DDR4",
+ "Counter": "0,1,2,3",
"EventCode": "0xda",
"EventName": "UNC_M_SB_PREF_INSERTS.DDR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Prefetch Inserts : PMM",
+ "Counter": "0,1,2,3",
"EventCode": "0xda",
"EventName": "UNC_M_SB_PREF_INSERTS.PMM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Prefetch Occupancy : All",
+ "Counter": "0,1,2,3",
"EventCode": "0xdb",
"EventName": "UNC_M_SB_PREF_OCCUPANCY.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Prefetch Occupancy : DDR4",
+ "Counter": "0,1,2,3",
"EventCode": "0xdb",
"EventName": "UNC_M_SB_PREF_OCCUPANCY.DDR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Prefetch Occupancy : Persistent Mem",
+ "Counter": "0,1,2,3",
"EventCode": "0xDB",
"EventName": "UNC_M_SB_PREF_OCCUPANCY.PMM",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -2989,230 +3662,287 @@
},
{
"BriefDescription": "Number of Scoreboard Requests Rejected",
+ "Counter": "0,1,2,3",
"EventCode": "0xd4",
"EventName": "UNC_M_SB_REJECT.CANARY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "Number of Scoreboard Requests Rejected",
+ "Counter": "0,1,2,3",
"EventCode": "0xd4",
"EventName": "UNC_M_SB_REJECT.DDR_EARLY_CMP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "iMC"
},
{
"BriefDescription": "Number of Scoreboard Requests Rejected : FM requests rejected due to full address conflict",
+ "Counter": "0,1,2,3",
"EventCode": "0xd4",
"EventName": "UNC_M_SB_REJECT.FM_ADDR_CNFLT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "Number of Scoreboard Requests Rejected : NM requests rejected due to set conflict",
+ "Counter": "0,1,2,3",
"EventCode": "0xd4",
"EventName": "UNC_M_SB_REJECT.NM_SET_CNFLT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "Number of Scoreboard Requests Rejected : Patrol requests rejected due to set conflict",
+ "Counter": "0,1,2,3",
"EventCode": "0xd4",
"EventName": "UNC_M_SB_REJECT.PATROL_SET_CNFLT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": ": Far Mem Read - Set",
+ "Counter": "0,1,2,3",
"EventCode": "0xd7",
"EventName": "UNC_M_SB_STRV_ALLOC.FM_RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": ": Near Mem Read - Clear",
+ "Counter": "0,1,2,3",
"EventCode": "0xd7",
"EventName": "UNC_M_SB_STRV_ALLOC.FM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": ": Far Mem Write - Set",
+ "Counter": "0,1,2,3",
"EventCode": "0xd7",
"EventName": "UNC_M_SB_STRV_ALLOC.FM_WR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": ": Near Mem Read - Set",
+ "Counter": "0,1,2,3",
"EventCode": "0xd7",
"EventName": "UNC_M_SB_STRV_ALLOC.NM_RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": ": Near Mem Write - Set",
+ "Counter": "0,1,2,3",
"EventCode": "0xd7",
"EventName": "UNC_M_SB_STRV_ALLOC.NM_WR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": ": Far Mem Read - Set",
+ "Counter": "0,1,2,3",
"EventCode": "0xde",
"EventName": "UNC_M_SB_STRV_DEALLOC.FM_RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": ": Near Mem Read - Clear",
+ "Counter": "0,1,2,3",
"EventCode": "0xde",
"EventName": "UNC_M_SB_STRV_DEALLOC.FM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": ": Far Mem Write - Set",
+ "Counter": "0,1,2,3",
"EventCode": "0xde",
"EventName": "UNC_M_SB_STRV_DEALLOC.FM_WR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": ": Near Mem Read - Set",
+ "Counter": "0,1,2,3",
"EventCode": "0xde",
"EventName": "UNC_M_SB_STRV_DEALLOC.NM_RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": ": Near Mem Write - Set",
+ "Counter": "0,1,2,3",
"EventCode": "0xde",
"EventName": "UNC_M_SB_STRV_DEALLOC.NM_WR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": ": Far Mem Read",
+ "Counter": "0,1,2,3",
"EventCode": "0xd8",
"EventName": "UNC_M_SB_STRV_OCC.FM_RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": ": Near Mem Read - Clear",
+ "Counter": "0,1,2,3",
"EventCode": "0xd8",
"EventName": "UNC_M_SB_STRV_OCC.FM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": ": Far Mem Write",
+ "Counter": "0,1,2,3",
"EventCode": "0xd8",
"EventName": "UNC_M_SB_STRV_OCC.FM_WR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": ": Near Mem Read",
+ "Counter": "0,1,2,3",
"EventCode": "0xd8",
"EventName": "UNC_M_SB_STRV_OCC.NM_RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": ": Near Mem Write",
+ "Counter": "0,1,2,3",
"EventCode": "0xd8",
"EventName": "UNC_M_SB_STRV_OCC.NM_WR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_SB_TAGGED.DDR4_CMP",
+ "Counter": "0,1,2,3",
"EventCode": "0xdd",
"EventName": "UNC_M_SB_TAGGED.DDR4_CMP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_SB_TAGGED.NEW",
+ "Counter": "0,1,2,3",
"EventCode": "0xdd",
"EventName": "UNC_M_SB_TAGGED.NEW",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_SB_TAGGED.OCC",
+ "Counter": "0,1,2,3",
"EventCode": "0xdd",
"EventName": "UNC_M_SB_TAGGED.OCC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_SB_TAGGED.PMM0_CMP",
+ "Counter": "0,1,2,3",
"EventCode": "0xdd",
"EventName": "UNC_M_SB_TAGGED.PMM0_CMP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_SB_TAGGED.PMM1_CMP",
+ "Counter": "0,1,2,3",
"EventCode": "0xdd",
"EventName": "UNC_M_SB_TAGGED.PMM1_CMP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_SB_TAGGED.PMM2_CMP",
+ "Counter": "0,1,2,3",
"EventCode": "0xdd",
"EventName": "UNC_M_SB_TAGGED.PMM2_CMP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_SB_TAGGED.RD_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xdd",
"EventName": "UNC_M_SB_TAGGED.RD_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_SB_TAGGED.RD_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xdd",
"EventName": "UNC_M_SB_TAGGED.RD_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "2LM Tag check hit in near memory cache (DDR4)",
+ "Counter": "0,1,2,3",
"EventCode": "0xd3",
"EventName": "UNC_M_TAGCHK.HIT",
"PerPkg": "1",
@@ -3221,6 +3951,7 @@
},
{
"BriefDescription": "2LM Tag check miss, no data at this line",
+ "Counter": "0,1,2,3",
"EventCode": "0xd3",
"EventName": "UNC_M_TAGCHK.MISS_CLEAN",
"PerPkg": "1",
@@ -3229,6 +3960,7 @@
},
{
"BriefDescription": "2LM Tag check miss, existing data may be evicted to PMM",
+ "Counter": "0,1,2,3",
"EventCode": "0xd3",
"EventName": "UNC_M_TAGCHK.MISS_DIRTY",
"PerPkg": "1",
@@ -3237,6 +3969,7 @@
},
{
"BriefDescription": "2LM Tag check hit due to memory read",
+ "Counter": "0,1,2,3",
"EventCode": "0xd3",
"EventName": "UNC_M_TAGCHK.NM_RD_HIT",
"PerPkg": "1",
@@ -3245,6 +3978,7 @@
},
{
"BriefDescription": "2LM Tag check hit due to memory write",
+ "Counter": "0,1,2,3",
"EventCode": "0xd3",
"EventName": "UNC_M_TAGCHK.NM_WR_HIT",
"PerPkg": "1",
@@ -3253,6 +3987,7 @@
},
{
"BriefDescription": "Write Pending Queue Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M_WPQ_INSERTS.PCH0",
"PerPkg": "1",
@@ -3262,6 +3997,7 @@
},
{
"BriefDescription": "Write Pending Queue Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M_WPQ_INSERTS.PCH1",
"PerPkg": "1",
@@ -3271,6 +4007,7 @@
},
{
"BriefDescription": "Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M_WPQ_OCCUPANCY_PCH0",
"PerPkg": "1",
@@ -3279,6 +4016,7 @@
},
{
"BriefDescription": "Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_WPQ_OCCUPANCY_PCH1",
"PerPkg": "1",
@@ -3287,8 +4025,10 @@
},
{
"BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_M_WPQ_READ_HIT",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -3297,8 +4037,10 @@
},
{
"BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M_WPQ_WRITE_HIT",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-power.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-power.json
index 8948e85074f0..9482ddaea4d1 100644
--- a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-power.json
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-power.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "PCU PCLK Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_P_CLOCKTICKS",
"PerPkg": "1",
@@ -9,187 +10,235 @@
},
{
"BriefDescription": "UNC_P_CORE_TRANSITION_CYCLES",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_P_CORE_TRANSITION_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "PCU"
},
{
"BriefDescription": "UNC_P_DEMOTIONS",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_P_DEMOTIONS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "PCU"
},
{
"BriefDescription": "Phase Shed 0 Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x75",
"EventName": "UNC_P_FIVR_PS_PS0_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Phase Shed 0 Cycles : Cycles spent in phase-shedding power state 0",
"Unit": "PCU"
},
{
"BriefDescription": "Phase Shed 1 Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x76",
"EventName": "UNC_P_FIVR_PS_PS1_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Phase Shed 1 Cycles : Cycles spent in phase-shedding power state 1",
"Unit": "PCU"
},
{
"BriefDescription": "Phase Shed 2 Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x77",
"EventName": "UNC_P_FIVR_PS_PS2_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Phase Shed 2 Cycles : Cycles spent in phase-shedding power state 2",
"Unit": "PCU"
},
{
"BriefDescription": "Phase Shed 3 Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x78",
"EventName": "UNC_P_FIVR_PS_PS3_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Phase Shed 3 Cycles : Cycles spent in phase-shedding power state 3",
"Unit": "PCU"
},
{
"BriefDescription": "AVX256 Frequency Clipping",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "UNC_P_FREQ_CLIP_AVX256",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "PCU"
},
{
"BriefDescription": "AVX512 Frequency Clipping",
+ "Counter": "0,1,2,3",
"EventCode": "0x4a",
"EventName": "UNC_P_FREQ_CLIP_AVX512",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "PCU"
},
{
"BriefDescription": "Thermal Strongest Upper Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Thermal Strongest Upper Limit Cycles : Number of cycles any frequency is reduced due to a thermal limit. Count only if throttling is occurring.",
"Unit": "PCU"
},
{
"BriefDescription": "Power Strongest Upper Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_P_FREQ_MAX_POWER_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Power Strongest Upper Limit Cycles : Counts the number of cycles when power is the upper limit on frequency.",
"Unit": "PCU"
},
{
"BriefDescription": "IO P Limit Strongest Lower Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x73",
"EventName": "UNC_P_FREQ_MIN_IO_P_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IO P Limit Strongest Lower Limit Cycles : Counts the number of cycles when IO P Limit is preventing us from dropping the frequency lower. This algorithm monitors the needs to the IO subsystem on both local and remote sockets and will maintain a frequency high enough to maintain good IO BW. This is necessary for when all the IA cores on a socket are idle but a user still would like to maintain high IO Bandwidth.",
"Unit": "PCU"
},
{
"BriefDescription": "Cycles spent changing Frequency",
+ "Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "UNC_P_FREQ_TRANS_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles spent changing Frequency : Counts the number of cycles when the system is changing frequency. This can not be filtered by thread ID. One can also use it with the occupancy counter that monitors number of threads in C0 to estimate the performance impact that frequency transitions had on the system.",
"Unit": "PCU"
},
{
"BriefDescription": "Memory Phase Shedding Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x2f",
"EventName": "UNC_P_MEMORY_PHASE_SHEDDING_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Memory Phase Shedding Cycles : Counts the number of cycles that the PCU has triggered memory phase shedding. This is a mode that can be run in the iMC physicals that saves power at the expense of additional latency.",
"Unit": "PCU"
},
{
"BriefDescription": "Package C State Residency - C0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_P_PKG_RESIDENCY_C0_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Package C State Residency - C0 : Counts the number of cycles when the package was in C0. This event can be used in conjunction with edge detect to count C0 entrances (or exits using invert). Residency events do not include transition times.",
"Unit": "PCU"
},
{
"BriefDescription": "Package C State Residency - C2E",
+ "Counter": "0,1,2,3",
"EventCode": "0x2b",
"EventName": "UNC_P_PKG_RESIDENCY_C2E_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Package C State Residency - C2E : Counts the number of cycles when the package was in C2E. This event can be used in conjunction with edge detect to count C2E entrances (or exits using invert). Residency events do not include transition times.",
"Unit": "PCU"
},
{
"BriefDescription": "Package C State Residency - C6",
+ "Counter": "0,1,2,3",
"EventCode": "0x2d",
"EventName": "UNC_P_PKG_RESIDENCY_C6_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Package C State Residency - C6 : Counts the number of cycles when the package was in C6. This event can be used in conjunction with edge detect to count C6 entrances (or exits using invert). Residency events do not include transition times.",
"Unit": "PCU"
},
{
"BriefDescription": "UNC_P_PMAX_THROTTLED_CYCLES",
+ "Counter": "0",
"EventCode": "0x06",
"EventName": "UNC_P_PMAX_THROTTLED_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "PCU"
},
{
"BriefDescription": "Number of cores in C0",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_P_POWER_STATE_OCCUPANCY_CORES_C0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cores in C0 : This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
"Unit": "PCU"
},
{
"BriefDescription": "Number of cores in C3",
+ "Counter": "0,1,2,3",
"EventCode": "0x36",
"EventName": "UNC_P_POWER_STATE_OCCUPANCY_CORES_C3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cores in C3 : This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
"Unit": "PCU"
},
{
"BriefDescription": "Number of cores in C6",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_P_POWER_STATE_OCCUPANCY_CORES_C6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cores in C6 : This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
"Unit": "PCU"
},
{
"BriefDescription": "External Prochot",
+ "Counter": "0,1,2,3",
"EventCode": "0x0a",
"EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "External Prochot : Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip.",
"Unit": "PCU"
},
{
"BriefDescription": "Internal Prochot",
+ "Counter": "0,1,2,3",
"EventCode": "0x09",
"EventName": "UNC_P_PROCHOT_INTERNAL_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Internal Prochot : Counts the number of cycles that we are in Internal PROCHOT mode. This mode is triggered when a sensor on the die determines that we are too hot and must throttle to avoid damaging the chip.",
"Unit": "PCU"
},
{
"BriefDescription": "Total Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_P_TOTAL_TRANSITION_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Total Core C State Transition Cycles : Number of cycles spent performing core C state transitions across all cores.",
"Unit": "PCU"
},
{
"BriefDescription": "VR Hot",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_P_VR_HOT_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VR Hot : Number of cycles that a CPU SVID VR is hot. Does not cover DRAM VRs",
"Unit": "PCU"
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/virtual-memory.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/virtual-memory.json
index a1e3b8d2ebe7..609a9549cbf3 100644
--- a/tools/perf/pmu-events/arch/x86/emeraldrapids/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/virtual-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Loads that miss the DTLB and hit the STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for a demand load.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x12",
"EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (all page sizes) caused by demand data loads. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -26,6 +29,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data load to a 1G page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
"PublicDescription": "Counts completed page walks (1G sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -34,6 +38,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data load to a 2M/4M page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -42,6 +47,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data load to a 4K page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts completed page walks (4K sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -50,6 +56,7 @@
},
{
"BriefDescription": "Number of page walks outstanding for a demand load in the PMH each cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding for a demand load in the PMH (Page Miss Handler) each cycle.",
@@ -58,6 +65,7 @@
},
{
"BriefDescription": "Stores that miss the DTLB and hit the STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "DTLB_STORE_MISSES.STLB_HIT",
"PublicDescription": "Counts stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
@@ -66,6 +74,7 @@
},
{
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x13",
"EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
@@ -75,6 +84,7 @@
},
{
"BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (all page sizes) caused by demand data stores. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -83,6 +93,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data store to a 1G page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
"PublicDescription": "Counts completed page walks (1G sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -91,6 +102,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data store to a 2M/4M page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -99,6 +111,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data store to a 4K page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts completed page walks (4K sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -107,6 +120,7 @@
},
{
"BriefDescription": "Number of page walks outstanding for a store in the PMH each cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "DTLB_STORE_MISSES.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding for a store in the PMH (Page Miss Handler) each cycle.",
@@ -115,6 +129,7 @@
},
{
"BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "ITLB_MISSES.STLB_HIT",
"PublicDescription": "Counts instruction fetch requests that miss the ITLB (Instruction TLB) and hit the STLB (Second-level TLB).",
@@ -123,6 +138,7 @@
},
{
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x11",
"EventName": "ITLB_MISSES.WALK_ACTIVE",
@@ -132,6 +148,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (all page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
@@ -140,6 +157,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts completed page walks (2M/4M page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
@@ -148,6 +166,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts completed page walks (4K page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
@@ -156,6 +175,7 @@
},
{
"BriefDescription": "Number of page walks outstanding for an outstanding code request in the PMH each cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "ITLB_MISSES.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding for an outstanding code (instruction fetch) request in the PMH (Page Miss Handler) each cycle.",
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/cache.json b/tools/perf/pmu-events/arch/x86/goldmont/cache.json
index ee47a09172a1..1a121ef47a99 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/cache.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Requests rejected by the L2Q",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "CORE_REJECT_L2Q.ALL",
"PublicDescription": "Counts the number of demand and L1 prefetcher requests rejected by the L2Q due to a full or nearly full condition which likely indicates back pressure from L2Q. It also counts requests that would have gone directly to the XQ, but are rejected due to a full or nearly full condition, indicating back pressure from the IDI link. The L2Q may also reject transactions from a core to ensure fairness between cores, or to delay a core's dirty eviction when the address conflicts with incoming external snoops.",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "L1 Cache evictions for dirty data",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "DL1.DIRTY_EVICTION",
"PublicDescription": "Counts when a modified (dirty) cache line is evicted from the data L1 cache and needs to be written back to memory. No count will occur if the evicted line is clean, and hence does not require a writeback.",
@@ -16,6 +18,7 @@
},
{
"BriefDescription": "Cycles code-fetch stalled due to an outstanding ICache miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "FETCH_STALL.ICACHE_FILL_PENDING_CYCLES",
"PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ICache miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ICache miss. Note: this event is not the same as the total number of cycles spent retrieving instruction cache lines from the memory hierarchy.",
@@ -24,6 +27,7 @@
},
{
"BriefDescription": "Requests rejected by the XQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "L2_REJECT_XQ.ALL",
"PublicDescription": "Counts the number of demand and prefetch transactions that the L2 XQ rejects due to a full or near full condition which likely indicates back pressure from the intra-die interconnect (IDI) fabric. The XQ may reject transactions from the L2Q (non-cacheable requests), L2 misses and L2 write-back victims.",
@@ -31,6 +35,7 @@
},
{
"BriefDescription": "L2 cache request misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.MISS",
"PublicDescription": "Counts memory requests originating from the core that miss in the L2 cache.",
@@ -39,6 +44,7 @@
},
{
"BriefDescription": "L2 cache requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
"PublicDescription": "Counts memory requests originating from the core that reference a cache line in the L2 cache.",
@@ -47,6 +53,7 @@
},
{
"BriefDescription": "Loads retired that came from DRAM (Precise event capable)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.DRAM_HIT",
@@ -57,6 +64,7 @@
},
{
"BriefDescription": "Memory uop retired where cross core or cross module HITM occurred (Precise event capable)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.HITM",
@@ -67,6 +75,7 @@
},
{
"BriefDescription": "Load uops retired that hit L1 data cache (Precise event capable)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
@@ -77,6 +86,7 @@
},
{
"BriefDescription": "Load uops retired that missed L1 data cache (Precise event capable)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
@@ -87,6 +97,7 @@
},
{
"BriefDescription": "Load uops retired that hit L2 (Precise event capable)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
@@ -97,6 +108,7 @@
},
{
"BriefDescription": "Load uops retired that missed L2 (Precise event capable)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
@@ -107,6 +119,7 @@
},
{
"BriefDescription": "Loads retired that hit WCB (Precise event capable)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.WCB_HIT",
@@ -117,6 +130,7 @@
},
{
"BriefDescription": "Memory uops retired (Precise event capable)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.ALL",
@@ -127,6 +141,7 @@
},
{
"BriefDescription": "Load uops retired (Precise event capable)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
@@ -137,6 +152,7 @@
},
{
"BriefDescription": "Store uops retired (Precise event capable)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
@@ -147,6 +163,7 @@
},
{
"BriefDescription": "Locked load uops retired (Precise event capable)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
@@ -157,6 +174,7 @@
},
{
"BriefDescription": "Memory uops retired that split a cache-line (Precise event capable)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.SPLIT",
@@ -167,6 +185,7 @@
},
{
"BriefDescription": "Load uops retired that split a cache-line (Precise event capable)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
@@ -177,6 +196,7 @@
},
{
"BriefDescription": "Stores uops retired that split a cache-line (Precise event capable)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
@@ -187,6 +207,7 @@
},
{
"BriefDescription": "Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE",
"SampleAfterValue": "100007",
@@ -194,6 +215,7 @@
},
{
"BriefDescription": "Counts data reads (demand & prefetch) that hit the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -204,6 +226,7 @@
},
{
"BriefDescription": "Counts data reads (demand & prefetch) that miss the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -214,6 +237,7 @@
},
{
"BriefDescription": "Counts data reads (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -224,6 +248,7 @@
},
{
"BriefDescription": "Counts data reads (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -234,6 +259,7 @@
},
{
"BriefDescription": "Counts data reads (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -244,6 +270,7 @@
},
{
"BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that hit the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -254,6 +281,7 @@
},
{
"BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -264,6 +292,7 @@
},
{
"BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -274,6 +303,7 @@
},
{
"BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -284,6 +314,7 @@
},
{
"BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that true miss for the L2 cache with a snoop miss in the other processor module.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -294,6 +325,7 @@
},
{
"BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that hit the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -304,6 +336,7 @@
},
{
"BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -314,6 +347,7 @@
},
{
"BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -324,6 +358,7 @@
},
{
"BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -334,6 +369,7 @@
},
{
"BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -344,6 +380,7 @@
},
{
"BriefDescription": "Counts requests to the uncore subsystem that have any transaction responses from the uncore subsystem.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -354,6 +391,7 @@
},
{
"BriefDescription": "Counts requests to the uncore subsystem that hit the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -364,6 +402,7 @@
},
{
"BriefDescription": "Counts requests to the uncore subsystem that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -374,6 +413,7 @@
},
{
"BriefDescription": "Counts requests to the uncore subsystem that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -384,6 +424,7 @@
},
{
"BriefDescription": "Counts requests to the uncore subsystem that true miss for the L2 cache with a snoop miss in the other processor module.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -394,6 +435,7 @@
},
{
"BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that hit the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -404,6 +446,7 @@
},
{
"BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -414,6 +457,7 @@
},
{
"BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -424,6 +468,7 @@
},
{
"BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -434,6 +479,7 @@
},
{
"BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -444,6 +490,7 @@
},
{
"BriefDescription": "Counts bus lock and split lock requests that have any transaction responses from the uncore subsystem.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -454,6 +501,7 @@
},
{
"BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that hit the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.L2_HIT",
"MSRIndex": "0x1a6",
@@ -464,6 +512,7 @@
},
{
"BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.ANY",
"MSRIndex": "0x1a6",
@@ -474,6 +523,7 @@
},
{
"BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6",
@@ -484,6 +534,7 @@
},
{
"BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6",
@@ -494,6 +545,7 @@
},
{
"BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that true miss for the L2 cache with a snoop miss in the other processor module.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6",
@@ -504,6 +556,7 @@
},
{
"BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that hit the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -514,6 +567,7 @@
},
{
"BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -524,6 +578,7 @@
},
{
"BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -534,6 +589,7 @@
},
{
"BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that true miss for the L2 cache with a snoop miss in the other processor module.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -544,6 +600,7 @@
},
{
"BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -554,6 +611,7 @@
},
{
"BriefDescription": "Counts demand cacheable data reads of full cache lines that hit the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -564,6 +622,7 @@
},
{
"BriefDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -574,6 +633,7 @@
},
{
"BriefDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -584,6 +644,7 @@
},
{
"BriefDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -594,6 +655,7 @@
},
{
"BriefDescription": "Counts demand cacheable data reads of full cache lines that true miss for the L2 cache with a snoop miss in the other processor module.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -604,6 +666,7 @@
},
{
"BriefDescription": "Counts demand cacheable data reads of full cache lines that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -614,6 +677,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that hit the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -624,6 +688,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -634,6 +699,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -644,6 +710,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -654,6 +721,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that true miss for the L2 cache with a snoop miss in the other processor module.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -664,6 +732,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -674,6 +743,7 @@
},
{
"BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that hit the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -684,6 +754,7 @@
},
{
"BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -694,6 +765,7 @@
},
{
"BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -704,6 +776,7 @@
},
{
"BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -714,6 +787,7 @@
},
{
"BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that true miss for the L2 cache with a snoop miss in the other processor module.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -724,6 +798,7 @@
},
{
"BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -734,6 +809,7 @@
},
{
"BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that hit the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -744,6 +820,7 @@
},
{
"BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -754,6 +831,7 @@
},
{
"BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -764,6 +842,7 @@
},
{
"BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -774,6 +853,7 @@
},
{
"BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that true miss for the L2 cache with a snoop miss in the other processor module.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -784,6 +864,7 @@
},
{
"BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -794,6 +875,7 @@
},
{
"BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that hit the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -804,6 +886,7 @@
},
{
"BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -814,6 +897,7 @@
},
{
"BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -824,6 +908,7 @@
},
{
"BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -834,6 +919,7 @@
},
{
"BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -844,6 +930,7 @@
},
{
"BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that hit the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -854,6 +941,7 @@
},
{
"BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -864,6 +952,7 @@
},
{
"BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -874,6 +963,7 @@
},
{
"BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -884,6 +974,7 @@
},
{
"BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -894,6 +985,7 @@
},
{
"BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that hit the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -904,6 +996,7 @@
},
{
"BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -914,6 +1007,7 @@
},
{
"BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -924,6 +1018,7 @@
},
{
"BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -934,6 +1029,7 @@
},
{
"BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that true miss for the L2 cache with a snoop miss in the other processor module.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -944,6 +1040,7 @@
},
{
"BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that hit the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -954,6 +1051,7 @@
},
{
"BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -964,6 +1062,7 @@
},
{
"BriefDescription": "Counts data cache lines requests by software prefetch instructions that hit the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -974,6 +1073,7 @@
},
{
"BriefDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -984,6 +1084,7 @@
},
{
"BriefDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -994,6 +1095,7 @@
},
{
"BriefDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1004,6 +1106,7 @@
},
{
"BriefDescription": "Counts data cache lines requests by software prefetch instructions that true miss for the L2 cache with a snoop miss in the other processor module.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/counter.json b/tools/perf/pmu-events/arch/x86/goldmont/counter.json
new file mode 100644
index 000000000000..aa443347b694
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/goldmont/counter.json
@@ -0,0 +1,7 @@
+[
+ {
+ "Unit": "core",
+ "CountersNumFixed": "3",
+ "CountersNumGeneric": "4"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/floating-point.json b/tools/perf/pmu-events/arch/x86/goldmont/floating-point.json
index a3f03855ca05..0141e214ff39 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/floating-point.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Cycles the FP divide unit is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0xCD",
"EventName": "CYCLES_DIV_BUSY.FPDIV",
"PublicDescription": "Counts core cycles the floating point divide unit is busy.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Machine clears due to FP assists",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.FP_ASSIST",
"PublicDescription": "Counts machine clears due to floating point (FP) operations needing assists. For instance, if the result was a floating point denormal, the hardware clears the pipeline and reissues uops to produce the correct IEEE compliant denormal result.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Floating point divide uops retired. (Precise Event Capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.FPDIV",
"PEBS": "2",
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/frontend.json b/tools/perf/pmu-events/arch/x86/goldmont/frontend.json
index ace2a114b546..249a97cf3f4c 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/frontend.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "BACLEARs asserted for any branch type",
+ "Counter": "0,1,2,3",
"EventCode": "0xE6",
"EventName": "BACLEARS.ALL",
"PublicDescription": "Counts the number of times a BACLEAR is signaled for any reason, including, but not limited to indirect branch/call, Jcc (Jump on Conditional Code/Jump if Condition is Met) branch, unconditional branch/call, and returns.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "BACLEARs asserted for conditional branch",
+ "Counter": "0,1,2,3",
"EventCode": "0xE6",
"EventName": "BACLEARS.COND",
"PublicDescription": "Counts BACLEARS on Jcc (Jump on Conditional Code/Jump if Condition is Met) branches.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "BACLEARs asserted for return branch",
+ "Counter": "0,1,2,3",
"EventCode": "0xE6",
"EventName": "BACLEARS.RETURN",
"PublicDescription": "Counts BACLEARS on return instructions.",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Decode restrictions due to predicting wrong instruction length",
+ "Counter": "0,1,2,3",
"EventCode": "0xE9",
"EventName": "DECODE_RESTRICTION.PREDECODE_WRONG",
"PublicDescription": "Counts the number of times the prediction (from the predecode cache) for instruction length is incorrect.",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "References per ICache line. This event counts differently than Intel processors based on Silvermont microarchitecture",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.ACCESSES",
"PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line. The event strives to count on a cache line basis, so that multiple fetches to a single cache line count as one ICACHE.ACCESS. Specifically, the event counts when accesses from straight line code crosses the cache line boundary, or when a branch target is to a new line.\r\nThis event counts differently than Intel processors based on Silvermont microarchitecture.",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "References per ICache line that are available in the ICache (hit). This event counts differently than Intel processors based on Silvermont microarchitecture",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.HIT",
"PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line and that cache line is in the ICache (hit). The event strives to count on a cache line basis, so that multiple accesses which hit in a single cache line count as one ICACHE.HIT. Specifically, the event counts when straight line code crosses the cache line boundary, or when a branch target is to a new line, and that cache line is in the ICache. This event counts differently than Intel processors based on Silvermont microarchitecture.",
@@ -49,6 +55,7 @@
},
{
"BriefDescription": "References per ICache line that are not available in the ICache (miss). This event counts differently than Intel processors based on Silvermont microarchitecture",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.MISSES",
"PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line and that cache line is not in the ICache (miss). The event strives to count on a cache line basis, so that multiple accesses which miss in a single cache line count as one ICACHE.MISS. Specifically, the event counts when straight line code crosses the cache line boundary, or when a branch target is to a new line, and that cache line is not in the ICache. This event counts differently than Intel processors based on Silvermont microarchitecture.",
@@ -57,6 +64,7 @@
},
{
"BriefDescription": "MS decode starts",
+ "Counter": "0,1,2,3",
"EventCode": "0xE7",
"EventName": "MS_DECODED.MS_ENTRY",
"PublicDescription": "Counts the number of times the Microcode Sequencer (MS) starts a flow of uops from the MSROM. It does not count every time a uop is read from the MSROM. The most common case that this counts is when a micro-coded instruction is encountered by the front end of the machine. Other cases include when an instruction encounters a fault, trap, or microcode assist of any sort that initiates a flow of uops. The event will count MS startups for uops that are speculative, and subsequently cleared by branch mispredict or a machine clear.",
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/memory.json b/tools/perf/pmu-events/arch/x86/goldmont/memory.json
index b97642a109ee..abd0fc02afac 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/memory.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Machine clears due to memory ordering issue",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
"PublicDescription": "Counts machine clears due to memory ordering issues. This occurs when a snoop request happens and the machine is uncertain if memory ordering will be preserved as another core is in the process of modifying the data.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Load uops that split a page (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "MISALIGN_MEM_REF.LOAD_PAGE_SPLIT",
"PEBS": "2",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "Store uops that split a page (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "MISALIGN_MEM_REF.STORE_PAGE_SPLIT",
"PEBS": "2",
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/other.json b/tools/perf/pmu-events/arch/x86/goldmont/other.json
index c4fd0acb15bc..260b10740644 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/other.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/other.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Cycles code-fetch stalled due to any reason.",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "FETCH_STALL.ALL",
"PublicDescription": "Counts cycles that fetch is stalled due to any reason. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes. This will include cycles due to an ITLB miss, ICache miss and other events.",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Cycles code-fetch stalled due to an outstanding ITLB miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "FETCH_STALL.ITLB_FILL_PENDING_CYCLES",
"PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ITLB miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ITLB miss. Note: this event is not the same as page walk cycles to retrieve an instruction translation.",
@@ -16,6 +18,7 @@
},
{
"BriefDescription": "Cycles hardware interrupts are masked",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "HW_INTERRUPTS.MASKED",
"PublicDescription": "Counts the number of core cycles during which interrupts are masked (disabled). Increments by 1 each core cycle that EFLAGS.IF is 0, regardless of whether interrupts are pending or not.",
@@ -24,6 +27,7 @@
},
{
"BriefDescription": "Cycles pending interrupts are masked",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "HW_INTERRUPTS.PENDING_AND_MASKED",
"PublicDescription": "Counts core cycles during which there are pending interrupts, but interrupts are masked (EFLAGS.IF = 0).",
@@ -32,6 +36,7 @@
},
{
"BriefDescription": "Hardware interrupts received",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "HW_INTERRUPTS.RECEIVED",
"PublicDescription": "Counts hardware interrupts received by the processor.",
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json b/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json
index acb897483a87..013d2d5b50df 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Retired branch instructions (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"PEBS": "2",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Retired taken branch instructions (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.ALL_TAKEN_BRANCHES",
"PEBS": "2",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "Retired near call instructions (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.CALL",
"PEBS": "2",
@@ -27,6 +30,7 @@
},
{
"BriefDescription": "Retired far branch instructions (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"PEBS": "2",
@@ -36,6 +40,7 @@
},
{
"BriefDescription": "Retired near indirect call instructions (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.IND_CALL",
"PEBS": "2",
@@ -45,6 +50,7 @@
},
{
"BriefDescription": "Retired conditional branch instructions (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.JCC",
"PEBS": "2",
@@ -54,6 +60,7 @@
},
{
"BriefDescription": "Retired instructions of near indirect Jmp or call (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NON_RETURN_IND",
"PEBS": "2",
@@ -63,6 +70,7 @@
},
{
"BriefDescription": "Retired near relative call instructions (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.REL_CALL",
"PEBS": "2",
@@ -72,6 +80,7 @@
},
{
"BriefDescription": "Retired near return instructions (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.RETURN",
"PEBS": "2",
@@ -81,6 +90,7 @@
},
{
"BriefDescription": "Retired conditional branch instructions that were taken (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.TAKEN_JCC",
"PEBS": "2",
@@ -90,6 +100,7 @@
},
{
"BriefDescription": "Retired mispredicted branch instructions (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"PEBS": "2",
@@ -98,6 +109,7 @@
},
{
"BriefDescription": "Retired mispredicted near indirect call instructions (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.IND_CALL",
"PEBS": "2",
@@ -107,6 +119,7 @@
},
{
"BriefDescription": "Retired mispredicted conditional branch instructions (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.JCC",
"PEBS": "2",
@@ -116,6 +129,7 @@
},
{
"BriefDescription": "Retired mispredicted instructions of near indirect Jmp or near indirect call. (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.NON_RETURN_IND",
"PEBS": "2",
@@ -125,6 +139,7 @@
},
{
"BriefDescription": "Retired mispredicted near return instructions (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.RETURN",
"PEBS": "2",
@@ -134,6 +149,7 @@
},
{
"BriefDescription": "Retired mispredicted conditional branch instructions that were taken (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.TAKEN_JCC",
"PEBS": "2",
@@ -143,6 +159,7 @@
},
{
"BriefDescription": "Core cycles when core is not halted (Fixed event)",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.CORE",
"PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1. You cannot collect a PEBs record for this event.",
"SampleAfterValue": "2000003",
@@ -150,6 +167,7 @@
},
{
"BriefDescription": "Core cycles when core is not halted",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.CORE_P",
"PublicDescription": "Core cycles when core is not halted. This event uses a (_P)rogrammable general purpose performance counter.",
@@ -157,6 +175,7 @@
},
{
"BriefDescription": "Reference cycles when core is not halted",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.REF",
"PublicDescription": "Reference cycles when core is not halted. This event uses a programmable general purpose performance counter.",
@@ -165,6 +184,7 @@
},
{
"BriefDescription": "Reference cycles when core is not halted (Fixed event)",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time. This event is not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. This event uses fixed counter 2. You cannot collect a PEBs record for this event.",
"SampleAfterValue": "2000003",
@@ -172,6 +192,7 @@
},
{
"BriefDescription": "Cycles a divider is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0xCD",
"EventName": "CYCLES_DIV_BUSY.ALL",
"PublicDescription": "Counts core cycles if either divide unit is busy.",
@@ -179,6 +200,7 @@
},
{
"BriefDescription": "Cycles the integer divide unit is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0xCD",
"EventName": "CYCLES_DIV_BUSY.IDIV",
"PublicDescription": "Counts core cycles the integer divide unit is busy.",
@@ -187,6 +209,7 @@
},
{
"BriefDescription": "Instructions retired (Fixed event)",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The counter continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0. You cannot collect a PEBs record for this event.",
"SampleAfterValue": "2000003",
@@ -194,6 +217,7 @@
},
{
"BriefDescription": "Instructions retired (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.ANY_P",
"PEBS": "2",
@@ -202,6 +226,7 @@
},
{
"BriefDescription": "Unfilled issue slots per cycle",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "ISSUE_SLOTS_NOT_CONSUMED.ANY",
"PublicDescription": "Counts the number of issue slots per core cycle that were not consumed by the backend due to either a full resource in the backend (RESOURCE_FULL) or due to the processor recovering from some event (RECOVERY).",
@@ -209,6 +234,7 @@
},
{
"BriefDescription": "Unfilled issue slots per cycle to recover",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "ISSUE_SLOTS_NOT_CONSUMED.RECOVERY",
"PublicDescription": "Counts the number of issue slots per core cycle that were not consumed by the backend because allocation is stalled waiting for a mispredicted jump to retire or other branch-like conditions (e.g. the event is relevant during certain microcode flows). Counts all issue slots blocked while within this window including slots where uops were not available in the Instruction Queue.",
@@ -217,6 +243,7 @@
},
{
"BriefDescription": "Unfilled issue slots per cycle because of a full resource in the backend",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "ISSUE_SLOTS_NOT_CONSUMED.RESOURCE_FULL",
"PublicDescription": "Counts the number of issue slots per core cycle that were not consumed because of a full resource in the backend. Including but not limited to resources such as the Re-order Buffer (ROB), reservation stations (RS), load/store buffers, physical registers, or any other needed machine resource that is currently unavailable. Note that uops must be available for consumption in order for this event to fire. If a uop is not available (Instruction Queue is empty), this event will not count.",
@@ -225,6 +252,7 @@
},
{
"BriefDescription": "Loads blocked because address has 4k partial address false dependence (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.4K_ALIAS",
"PEBS": "2",
@@ -234,6 +262,7 @@
},
{
"BriefDescription": "Loads blocked (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.ALL_BLOCK",
"PEBS": "2",
@@ -243,6 +272,7 @@
},
{
"BriefDescription": "Loads blocked due to store data not ready (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.DATA_UNKNOWN",
"PEBS": "2",
@@ -252,6 +282,7 @@
},
{
"BriefDescription": "Loads blocked due to store forward restriction (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.STORE_FORWARD",
"PEBS": "2",
@@ -261,6 +292,7 @@
},
{
"BriefDescription": "Loads blocked because address in not in the UTLB (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.UTLB_MISS",
"PEBS": "2",
@@ -270,6 +302,7 @@
},
{
"BriefDescription": "All machine clears",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.ALL",
"PublicDescription": "Counts machine clears for any reason.",
@@ -277,6 +310,7 @@
},
{
"BriefDescription": "Machine clears due to memory disambiguation",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.DISAMBIGUATION",
"PublicDescription": "Counts machine clears due to memory disambiguation. Memory disambiguation happens when a load which has been issued conflicts with a previous unretired store in the pipeline whose address was not known at issue time, but is later resolved to be the same as the load address.",
@@ -285,6 +319,7 @@
},
{
"BriefDescription": "Self-Modifying Code detected",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.SMC",
"PublicDescription": "Counts the number of times that the processor detects that a program is writing to a code section and has to perform a machine clear because of that modification. Self-modifying code (SMC) causes a severe penalty in all Intel(R) architecture processors.",
@@ -293,6 +328,7 @@
},
{
"BriefDescription": "Uops issued to the back end per cycle",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.ANY",
"PublicDescription": "Counts uops issued by the front end and allocated into the back end of the machine. This event counts uops that retire as well as uops that were speculatively executed but didn't retire. The sort of speculative uops that might be counted includes, but is not limited to those uops issued in the shadow of a miss-predicted branch, those uops that are inserted during an assist (such as for a denormal floating point result), and (previously allocated) uops that might be canceled during a machine clear.",
@@ -300,6 +336,7 @@
},
{
"BriefDescription": "Uops requested but not-delivered to the back-end per cycle",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UOPS_NOT_DELIVERED.ANY",
"PublicDescription": "This event used to measure front-end inefficiencies. I.e. when front-end of the machine is not delivering uops to the back-end and the back-end has is not stalled. This event can be used to identify if the machine is truly front-end bound. When this event occurs, it is an indication that the front-end of the machine is operating at less than its theoretical peak performance. Background: We can think of the processor pipeline as being divided into 2 broader parts: Front-end and Back-end. Front-end is responsible for fetching the instruction, decoding into uops in machine understandable format and putting them into a uop queue to be consumed by back end. The back-end then takes these uops, allocates the required resources. When all resources are ready, uops are executed. If the back-end is not ready to accept uops from the front-end, then we do not want to count these as front-end bottlenecks. However, whenever we have bottlenecks in the back-end, we will have allocation unit stalls and eventually forcing the front-end to wait until the back-end is ready to receive more uops. This event counts only when back-end is requesting more uops and front-end is not able to provide them. When 3 uops are requested and no uops are delivered, the event counts 3. When 3 are requested, and only 1 is delivered, the event counts 2. When only 2 are delivered, the event counts 1. Alternatively stated, the event will not count if 3 uops are delivered, or if the back end is stalled and not requesting any uops at all. Counts indicate missed opportunities for the front-end to deliver a uop to the back end. Some examples of conditions that cause front-end efficiencies are: ICache misses, ITLB misses, and decoder restrictions that limit the front-end bandwidth. Known Issues: Some uops require multiple allocation slots. These uops will not be charged as a front end 'not delivered' opportunity, and will be regarded as a back end problem. For example, the INC instruction has one uop that requires 2 issue slots. A stream of INC instructions will not count as UOPS_NOT_DELIVERED, even though only one instruction can be issued per clock. The low uop issue rate for a stream of INC instructions is considered to be a back end issue.",
@@ -307,6 +344,7 @@
},
{
"BriefDescription": "Uops retired (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.ANY",
"PEBS": "2",
@@ -315,6 +353,7 @@
},
{
"BriefDescription": "Integer divide uops retired. (Precise Event Capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.IDIV",
"PEBS": "2",
@@ -324,6 +363,7 @@
},
{
"BriefDescription": "MS uops retired (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.MS",
"PEBS": "2",
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json b/tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json
index 8c4929a517fa..13b23bbe4226 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "ITLB misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "ITLB.MISS",
"PublicDescription": "Counts the number of times the machine was unable to find a translation in the Instruction Translation Lookaside Buffer (ITLB) for a linear address of an instruction fetch. It counts when new translation are filled into the ITLB. The event is speculative in nature, but will not count translations (page walks) that are begun and not finished, or translations that are finished but not filled into the ITLB.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Memory uops retired that missed the DTLB (Precise event capable)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS",
@@ -19,6 +21,7 @@
},
{
"BriefDescription": "Load uops retired that missed the DTLB (Precise event capable)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS_LOADS",
@@ -29,6 +32,7 @@
},
{
"BriefDescription": "Store uops retired that missed the DTLB (Precise event capable)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS_STORES",
@@ -39,6 +43,7 @@
},
{
"BriefDescription": "Duration of page-walks in cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "PAGE_WALKS.CYCLES",
"PublicDescription": "Counts every core cycle a page-walk is in progress due to either a data memory operation or an instruction fetch.",
@@ -47,6 +52,7 @@
},
{
"BriefDescription": "Duration of D-side page-walks in cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "PAGE_WALKS.D_SIDE_CYCLES",
"PublicDescription": "Counts every core cycle when a Data-side (walks due to a data operation) page walk is in progress.",
@@ -55,6 +61,7 @@
},
{
"BriefDescription": "Duration of I-side pagewalks in cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "PAGE_WALKS.I_SIDE_CYCLES",
"PublicDescription": "Counts every core cycle when a Instruction-side (walks due to an instruction fetch) page walk is in progress.",
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/cache.json b/tools/perf/pmu-events/arch/x86/goldmontplus/cache.json
index a7f80fd1b1df..92086758e7ce 100644
--- a/tools/perf/pmu-events/arch/x86/goldmontplus/cache.json
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Requests rejected by the L2Q",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "CORE_REJECT_L2Q.ALL",
"PublicDescription": "Counts the number of demand and L1 prefetcher requests rejected by the L2Q due to a full or nearly full condition which likely indicates back pressure from L2Q. It also counts requests that would have gone directly to the XQ, but are rejected due to a full or nearly full condition, indicating back pressure from the IDI link. The L2Q may also reject transactions from a core to insure fairness between cores, or to delay a core's dirty eviction when the address conflicts with incoming external snoops.",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "L1 Cache evictions for dirty data",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "DL1.REPLACEMENT",
"PublicDescription": "Counts when a modified (dirty) cache line is evicted from the data L1 cache and needs to be written back to memory. No count will occur if the evicted line is clean, and hence does not require a writeback.",
@@ -16,6 +18,7 @@
},
{
"BriefDescription": "Cycles code-fetch stalled due to an outstanding ICache miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "FETCH_STALL.ICACHE_FILL_PENDING_CYCLES",
"PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ICache miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ICache miss. Note: this event is not the same as the total number of cycles spent retrieving instruction cache lines from the memory hierarchy.",
@@ -24,6 +27,7 @@
},
{
"BriefDescription": "Requests rejected by the XQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "L2_REJECT_XQ.ALL",
"PublicDescription": "Counts the number of demand and prefetch transactions that the L2 XQ rejects due to a full or near full condition which likely indicates back pressure from the intra-die interconnect (IDI) fabric. The XQ may reject transactions from the L2Q (non-cacheable requests), L2 misses and L2 write-back victims.",
@@ -31,6 +35,7 @@
},
{
"BriefDescription": "L2 cache request misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.MISS",
"PublicDescription": "Counts memory requests originating from the core that miss in the L2 cache.",
@@ -39,6 +44,7 @@
},
{
"BriefDescription": "L2 cache requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
"PublicDescription": "Counts memory requests originating from the core that reference a cache line in the L2 cache.",
@@ -47,6 +53,7 @@
},
{
"BriefDescription": "Loads retired that came from DRAM (Precise event capable)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.DRAM_HIT",
@@ -57,6 +64,7 @@
},
{
"BriefDescription": "Memory uop retired where cross core or cross module HITM occurred (Precise event capable)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.HITM",
@@ -67,6 +75,7 @@
},
{
"BriefDescription": "Load uops retired that hit L1 data cache (Precise event capable)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
@@ -77,6 +86,7 @@
},
{
"BriefDescription": "Load uops retired that missed L1 data cache (Precise event capable)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
@@ -87,6 +97,7 @@
},
{
"BriefDescription": "Load uops retired that hit L2 (Precise event capable)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
@@ -97,6 +108,7 @@
},
{
"BriefDescription": "Load uops retired that missed L2 (Precise event capable)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
@@ -107,6 +119,7 @@
},
{
"BriefDescription": "Loads retired that hit WCB (Precise event capable)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.WCB_HIT",
@@ -117,6 +130,7 @@
},
{
"BriefDescription": "Memory uops retired (Precise event capable)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.ALL",
@@ -127,6 +141,7 @@
},
{
"BriefDescription": "Load uops retired (Precise event capable)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
@@ -137,6 +152,7 @@
},
{
"BriefDescription": "Store uops retired (Precise event capable)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
@@ -147,6 +163,7 @@
},
{
"BriefDescription": "Locked load uops retired (Precise event capable)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
@@ -157,6 +174,7 @@
},
{
"BriefDescription": "Memory uops retired that split a cache-line (Precise event capable)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.SPLIT",
@@ -167,6 +185,7 @@
},
{
"BriefDescription": "Load uops retired that split a cache-line (Precise event capable)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
@@ -177,6 +196,7 @@
},
{
"BriefDescription": "Stores uops retired that split a cache-line (Precise event capable)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
@@ -187,6 +207,7 @@
},
{
"BriefDescription": "Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE",
"SampleAfterValue": "100007",
@@ -194,6 +215,7 @@
},
{
"BriefDescription": "Counts data reads (demand & prefetch) have any transaction responses from the uncore subsystem.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6, 0x1a7",
@@ -204,6 +226,7 @@
},
{
"BriefDescription": "Counts data reads (demand & prefetch) hit the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT",
"MSRIndex": "0x1a6, 0x1a7",
@@ -214,6 +237,7 @@
},
{
"BriefDescription": "Counts data reads (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6, 0x1a7",
@@ -224,6 +248,7 @@
},
{
"BriefDescription": "Counts data reads (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6, 0x1a7",
@@ -234,6 +259,7 @@
},
{
"BriefDescription": "Counts data reads (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -244,6 +270,7 @@
},
{
"BriefDescription": "Counts data reads generated by L1 or L2 prefetchers have any transaction responses from the uncore subsystem.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6, 0x1a7",
@@ -254,6 +281,7 @@
},
{
"BriefDescription": "Counts data reads generated by L1 or L2 prefetchers hit the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_HIT",
"MSRIndex": "0x1a6, 0x1a7",
@@ -264,6 +292,7 @@
},
{
"BriefDescription": "Counts data reads generated by L1 or L2 prefetchers miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6, 0x1a7",
@@ -274,6 +303,7 @@
},
{
"BriefDescription": "Counts data reads generated by L1 or L2 prefetchers true miss for the L2 cache with a snoop miss in the other processor module.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6, 0x1a7",
@@ -284,6 +314,7 @@
},
{
"BriefDescription": "Counts data reads generated by L1 or L2 prefetchers outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -294,6 +325,7 @@
},
{
"BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) have any transaction responses from the uncore subsystem.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_READ.ANY_RESPONSE",
"MSRIndex": "0x1a6, 0x1a7",
@@ -304,6 +336,7 @@
},
{
"BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) hit the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT",
"MSRIndex": "0x1a6, 0x1a7",
@@ -314,6 +347,7 @@
},
{
"BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6, 0x1a7",
@@ -324,6 +358,7 @@
},
{
"BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6, 0x1a7",
@@ -334,6 +369,7 @@
},
{
"BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_READ.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -344,6 +380,7 @@
},
{
"BriefDescription": "Counts requests to the uncore subsystem have any transaction responses from the uncore subsystem.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_RESPONSE",
"MSRIndex": "0x1a6, 0x1a7",
@@ -354,6 +391,7 @@
},
{
"BriefDescription": "Counts requests to the uncore subsystem hit the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT",
"MSRIndex": "0x1a6, 0x1a7",
@@ -364,6 +402,7 @@
},
{
"BriefDescription": "Counts requests to the uncore subsystem miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6, 0x1a7",
@@ -374,6 +413,7 @@
},
{
"BriefDescription": "Counts requests to the uncore subsystem true miss for the L2 cache with a snoop miss in the other processor module.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6, 0x1a7",
@@ -384,6 +424,7 @@
},
{
"BriefDescription": "Counts requests to the uncore subsystem outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -394,6 +435,7 @@
},
{
"BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) have any transaction responses from the uncore subsystem.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6, 0x1a7",
@@ -404,6 +446,7 @@
},
{
"BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) hit the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT",
"MSRIndex": "0x1a6, 0x1a7",
@@ -414,6 +457,7 @@
},
{
"BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6, 0x1a7",
@@ -424,6 +468,7 @@
},
{
"BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6, 0x1a7",
@@ -434,6 +479,7 @@
},
{
"BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -444,6 +490,7 @@
},
{
"BriefDescription": "Counts bus lock and split lock requests have any transaction responses from the uncore subsystem.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.ANY_RESPONSE",
"MSRIndex": "0x1a6, 0x1a7",
@@ -454,6 +501,7 @@
},
{
"BriefDescription": "Counts bus lock and split lock requests hit the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT",
"MSRIndex": "0x1a6, 0x1a7",
@@ -464,6 +512,7 @@
},
{
"BriefDescription": "Counts bus lock and split lock requests miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6, 0x1a7",
@@ -474,6 +523,7 @@
},
{
"BriefDescription": "Counts bus lock and split lock requests true miss for the L2 cache with a snoop miss in the other processor module.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6, 0x1a7",
@@ -484,6 +534,7 @@
},
{
"BriefDescription": "Counts bus lock and split lock requests outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -494,6 +545,7 @@
},
{
"BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions have any transaction responses from the uncore subsystem.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
"MSRIndex": "0x1a6, 0x1a7",
@@ -504,6 +556,7 @@
},
{
"BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions hit the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.L2_HIT",
"MSRIndex": "0x1a6, 0x1a7",
@@ -514,6 +567,7 @@
},
{
"BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6, 0x1a7",
@@ -524,6 +578,7 @@
},
{
"BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions true miss for the L2 cache with a snoop miss in the other processor module.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6, 0x1a7",
@@ -534,6 +589,7 @@
},
{
"BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -544,6 +600,7 @@
},
{
"BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache have any transaction responses from the uncore subsystem.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6, 0x1a7",
@@ -554,6 +611,7 @@
},
{
"BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache hit the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT",
"MSRIndex": "0x1a6, 0x1a7",
@@ -564,6 +622,7 @@
},
{
"BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6, 0x1a7",
@@ -574,6 +633,7 @@
},
{
"BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache true miss for the L2 cache with a snoop miss in the other processor module.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6, 0x1a7",
@@ -584,6 +644,7 @@
},
{
"BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -594,6 +655,7 @@
},
{
"BriefDescription": "Counts demand cacheable data reads of full cache lines have any transaction responses from the uncore subsystem.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6, 0x1a7",
@@ -604,6 +666,7 @@
},
{
"BriefDescription": "Counts demand cacheable data reads of full cache lines hit the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT",
"MSRIndex": "0x1a6, 0x1a7",
@@ -614,6 +677,7 @@
},
{
"BriefDescription": "Counts demand cacheable data reads of full cache lines miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6, 0x1a7",
@@ -624,6 +688,7 @@
},
{
"BriefDescription": "Counts demand cacheable data reads of full cache lines true miss for the L2 cache with a snoop miss in the other processor module.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6, 0x1a7",
@@ -634,6 +699,7 @@
},
{
"BriefDescription": "Counts demand cacheable data reads of full cache lines outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -644,6 +710,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line have any transaction responses from the uncore subsystem.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6, 0x1a7",
@@ -654,6 +721,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line hit the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT",
"MSRIndex": "0x1a6, 0x1a7",
@@ -664,6 +732,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6, 0x1a7",
@@ -674,6 +743,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line true miss for the L2 cache with a snoop miss in the other processor module.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6, 0x1a7",
@@ -684,6 +754,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -694,6 +765,7 @@
},
{
"BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes have any transaction responses from the uncore subsystem.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.ANY_RESPONSE",
"MSRIndex": "0x1a6, 0x1a7",
@@ -704,6 +776,7 @@
},
{
"BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes hit the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_HIT",
"MSRIndex": "0x1a6, 0x1a7",
@@ -714,6 +787,7 @@
},
{
"BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6, 0x1a7",
@@ -724,6 +798,7 @@
},
{
"BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes true miss for the L2 cache with a snoop miss in the other processor module.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6, 0x1a7",
@@ -734,6 +809,7 @@
},
{
"BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -744,6 +820,7 @@
},
{
"BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher have any transaction responses from the uncore subsystem.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6, 0x1a7",
@@ -754,6 +831,7 @@
},
{
"BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher hit the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT",
"MSRIndex": "0x1a6, 0x1a7",
@@ -764,6 +842,7 @@
},
{
"BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6, 0x1a7",
@@ -774,6 +853,7 @@
},
{
"BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6, 0x1a7",
@@ -784,6 +864,7 @@
},
{
"BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -794,6 +875,7 @@
},
{
"BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher have any transaction responses from the uncore subsystem.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6, 0x1a7",
@@ -804,6 +886,7 @@
},
{
"BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher hit the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_HIT",
"MSRIndex": "0x1a6, 0x1a7",
@@ -814,6 +897,7 @@
},
{
"BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6, 0x1a7",
@@ -824,6 +908,7 @@
},
{
"BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6, 0x1a7",
@@ -834,6 +919,7 @@
},
{
"BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -844,6 +930,7 @@
},
{
"BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher have any transaction responses from the uncore subsystem.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6, 0x1a7",
@@ -854,6 +941,7 @@
},
{
"BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher hit the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT",
"MSRIndex": "0x1a6, 0x1a7",
@@ -864,6 +952,7 @@
},
{
"BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6, 0x1a7",
@@ -874,6 +963,7 @@
},
{
"BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher true miss for the L2 cache with a snoop miss in the other processor module.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6, 0x1a7",
@@ -884,6 +974,7 @@
},
{
"BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -894,6 +985,7 @@
},
{
"BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region have any transaction responses from the uncore subsystem.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
"MSRIndex": "0x1a6, 0x1a7",
@@ -904,6 +996,7 @@
},
{
"BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region hit the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_HIT",
"MSRIndex": "0x1a6, 0x1a7",
@@ -914,6 +1007,7 @@
},
{
"BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6, 0x1a7",
@@ -924,6 +1018,7 @@
},
{
"BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region true miss for the L2 cache with a snoop miss in the other processor module.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6, 0x1a7",
@@ -934,6 +1029,7 @@
},
{
"BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.STREAMING_STORES.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -944,6 +1040,7 @@
},
{
"BriefDescription": "Counts data cache lines requests by software prefetch instructions have any transaction responses from the uncore subsystem.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.SW_PREFETCH.ANY_RESPONSE",
"MSRIndex": "0x1a6, 0x1a7",
@@ -954,6 +1051,7 @@
},
{
"BriefDescription": "Counts data cache lines requests by software prefetch instructions hit the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_HIT",
"MSRIndex": "0x1a6, 0x1a7",
@@ -964,6 +1062,7 @@
},
{
"BriefDescription": "Counts data cache lines requests by software prefetch instructions miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6, 0x1a7",
@@ -974,6 +1073,7 @@
},
{
"BriefDescription": "Counts data cache lines requests by software prefetch instructions true miss for the L2 cache with a snoop miss in the other processor module.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6, 0x1a7",
@@ -984,6 +1084,7 @@
},
{
"BriefDescription": "Counts data cache lines requests by software prefetch instructions outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.SW_PREFETCH.OUTSTANDING",
"MSRIndex": "0x1a6",
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/counter.json b/tools/perf/pmu-events/arch/x86/goldmontplus/counter.json
new file mode 100644
index 000000000000..aa443347b694
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/counter.json
@@ -0,0 +1,7 @@
+[
+ {
+ "Unit": "core",
+ "CountersNumFixed": "3",
+ "CountersNumGeneric": "4"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/floating-point.json b/tools/perf/pmu-events/arch/x86/goldmontplus/floating-point.json
index 822a7a6bcaeb..3d06ac1ee0cf 100644
--- a/tools/perf/pmu-events/arch/x86/goldmontplus/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/floating-point.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Cycles the FP divide unit is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0xCD",
"EventName": "CYCLES_DIV_BUSY.FPDIV",
"PublicDescription": "Counts core cycles the floating point divide unit is busy.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Machine clears due to FP assists",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.FP_ASSIST",
"PublicDescription": "Counts machine clears due to floating point (FP) operations needing assists. For instance, if the result was a floating point denormal, the hardware clears the pipeline and reissues uops to produce the correct IEEE compliant denormal result.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Floating point divide uops retired (Precise Event Capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.FPDIV",
"PEBS": "2",
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/frontend.json b/tools/perf/pmu-events/arch/x86/goldmontplus/frontend.json
index ace2a114b546..249a97cf3f4c 100644
--- a/tools/perf/pmu-events/arch/x86/goldmontplus/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/frontend.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "BACLEARs asserted for any branch type",
+ "Counter": "0,1,2,3",
"EventCode": "0xE6",
"EventName": "BACLEARS.ALL",
"PublicDescription": "Counts the number of times a BACLEAR is signaled for any reason, including, but not limited to indirect branch/call, Jcc (Jump on Conditional Code/Jump if Condition is Met) branch, unconditional branch/call, and returns.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "BACLEARs asserted for conditional branch",
+ "Counter": "0,1,2,3",
"EventCode": "0xE6",
"EventName": "BACLEARS.COND",
"PublicDescription": "Counts BACLEARS on Jcc (Jump on Conditional Code/Jump if Condition is Met) branches.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "BACLEARs asserted for return branch",
+ "Counter": "0,1,2,3",
"EventCode": "0xE6",
"EventName": "BACLEARS.RETURN",
"PublicDescription": "Counts BACLEARS on return instructions.",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Decode restrictions due to predicting wrong instruction length",
+ "Counter": "0,1,2,3",
"EventCode": "0xE9",
"EventName": "DECODE_RESTRICTION.PREDECODE_WRONG",
"PublicDescription": "Counts the number of times the prediction (from the predecode cache) for instruction length is incorrect.",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "References per ICache line. This event counts differently than Intel processors based on Silvermont microarchitecture",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.ACCESSES",
"PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line. The event strives to count on a cache line basis, so that multiple fetches to a single cache line count as one ICACHE.ACCESS. Specifically, the event counts when accesses from straight line code crosses the cache line boundary, or when a branch target is to a new line.\r\nThis event counts differently than Intel processors based on Silvermont microarchitecture.",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "References per ICache line that are available in the ICache (hit). This event counts differently than Intel processors based on Silvermont microarchitecture",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.HIT",
"PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line and that cache line is in the ICache (hit). The event strives to count on a cache line basis, so that multiple accesses which hit in a single cache line count as one ICACHE.HIT. Specifically, the event counts when straight line code crosses the cache line boundary, or when a branch target is to a new line, and that cache line is in the ICache. This event counts differently than Intel processors based on Silvermont microarchitecture.",
@@ -49,6 +55,7 @@
},
{
"BriefDescription": "References per ICache line that are not available in the ICache (miss). This event counts differently than Intel processors based on Silvermont microarchitecture",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.MISSES",
"PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line and that cache line is not in the ICache (miss). The event strives to count on a cache line basis, so that multiple accesses which miss in a single cache line count as one ICACHE.MISS. Specifically, the event counts when straight line code crosses the cache line boundary, or when a branch target is to a new line, and that cache line is not in the ICache. This event counts differently than Intel processors based on Silvermont microarchitecture.",
@@ -57,6 +64,7 @@
},
{
"BriefDescription": "MS decode starts",
+ "Counter": "0,1,2,3",
"EventCode": "0xE7",
"EventName": "MS_DECODED.MS_ENTRY",
"PublicDescription": "Counts the number of times the Microcode Sequencer (MS) starts a flow of uops from the MSROM. It does not count every time a uop is read from the MSROM. The most common case that this counts is when a micro-coded instruction is encountered by the front end of the machine. Other cases include when an instruction encounters a fault, trap, or microcode assist of any sort that initiates a flow of uops. The event will count MS startups for uops that are speculative, and subsequently cleared by branch mispredict or a machine clear.",
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/memory.json b/tools/perf/pmu-events/arch/x86/goldmontplus/memory.json
index 7038873a5c8d..72bc2155ed00 100644
--- a/tools/perf/pmu-events/arch/x86/goldmontplus/memory.json
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Machine clears due to memory ordering issue",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
"PublicDescription": "Counts machine clears due to memory ordering issues. This occurs when a snoop request happens and the machine is uncertain if memory ordering will be preserved - as another core is in the process of modifying the data.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Load uops that split a page (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "MISALIGN_MEM_REF.LOAD_PAGE_SPLIT",
"PEBS": "2",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "Store uops that split a page (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "MISALIGN_MEM_REF.STORE_PAGE_SPLIT",
"PEBS": "2",
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/other.json b/tools/perf/pmu-events/arch/x86/goldmontplus/other.json
index ec0ce9078c98..96bbc4fc82a1 100644
--- a/tools/perf/pmu-events/arch/x86/goldmontplus/other.json
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/other.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Cycles code-fetch stalled due to any reason.",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "FETCH_STALL.ALL",
"PublicDescription": "Counts cycles that fetch is stalled due to any reason. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes. This will include cycles due to an ITLB miss, ICache miss and other events.",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Cycles the code-fetch stalls and an ITLB miss is outstanding.",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "FETCH_STALL.ITLB_FILL_PENDING_CYCLES",
"PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ITLB miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ITLB miss. Note: this event is not the same as page walk cycles to retrieve an instruction translation.",
@@ -16,6 +18,7 @@
},
{
"BriefDescription": "Cycles hardware interrupts are masked",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "HW_INTERRUPTS.MASKED",
"PublicDescription": "Counts the number of core cycles during which interrupts are masked (disabled). Increments by 1 each core cycle that EFLAGS.IF is 0, regardless of whether interrupts are pending or not.",
@@ -24,6 +27,7 @@
},
{
"BriefDescription": "Cycles pending interrupts are masked",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "HW_INTERRUPTS.PENDING_AND_MASKED",
"PublicDescription": "Counts core cycles during which there are pending interrupts, but interrupts are masked (EFLAGS.IF = 0).",
@@ -32,6 +36,7 @@
},
{
"BriefDescription": "Hardware interrupts received",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "HW_INTERRUPTS.RECEIVED",
"PublicDescription": "Counts hardware interrupts received by the processor.",
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json b/tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json
index 33ef331e77e0..8cbf253d0c30 100644
--- a/tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Retired branch instructions (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"PEBS": "2",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Retired taken branch instructions (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.ALL_TAKEN_BRANCHES",
"PEBS": "2",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "Retired near call instructions (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.CALL",
"PEBS": "2",
@@ -27,6 +30,7 @@
},
{
"BriefDescription": "Retired far branch instructions (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"PEBS": "2",
@@ -36,6 +40,7 @@
},
{
"BriefDescription": "Retired near indirect call instructions (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.IND_CALL",
"PEBS": "2",
@@ -45,6 +50,7 @@
},
{
"BriefDescription": "Retired conditional branch instructions (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.JCC",
"PEBS": "2",
@@ -54,6 +60,7 @@
},
{
"BriefDescription": "Retired instructions of near indirect Jmp or call (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NON_RETURN_IND",
"PEBS": "2",
@@ -63,6 +70,7 @@
},
{
"BriefDescription": "Retired near relative call instructions (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.REL_CALL",
"PEBS": "2",
@@ -72,6 +80,7 @@
},
{
"BriefDescription": "Retired near return instructions (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.RETURN",
"PEBS": "2",
@@ -81,6 +90,7 @@
},
{
"BriefDescription": "Retired conditional branch instructions that were taken (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.TAKEN_JCC",
"PEBS": "2",
@@ -90,6 +100,7 @@
},
{
"BriefDescription": "Retired mispredicted branch instructions (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"PEBS": "2",
@@ -98,6 +109,7 @@
},
{
"BriefDescription": "Retired mispredicted near indirect call instructions (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.IND_CALL",
"PEBS": "2",
@@ -107,6 +119,7 @@
},
{
"BriefDescription": "Retired mispredicted conditional branch instructions (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.JCC",
"PEBS": "2",
@@ -116,6 +129,7 @@
},
{
"BriefDescription": "Retired mispredicted instructions of near indirect Jmp or near indirect call (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.NON_RETURN_IND",
"PEBS": "2",
@@ -125,6 +139,7 @@
},
{
"BriefDescription": "Retired mispredicted near return instructions (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.RETURN",
"PEBS": "2",
@@ -134,6 +149,7 @@
},
{
"BriefDescription": "Retired mispredicted conditional branch instructions that were taken (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.TAKEN_JCC",
"PEBS": "2",
@@ -143,6 +159,7 @@
},
{
"BriefDescription": "Core cycles when core is not halted (Fixed event)",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.CORE",
"PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1. You cannot collect a PEBs record for this event.",
"SampleAfterValue": "2000003",
@@ -150,6 +167,7 @@
},
{
"BriefDescription": "Core cycles when core is not halted",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.CORE_P",
"PublicDescription": "Core cycles when core is not halted. This event uses a (_P)rogrammable general purpose performance counter.",
@@ -157,6 +175,7 @@
},
{
"BriefDescription": "Reference cycles when core is not halted",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.REF",
"PublicDescription": "Reference cycles when core is not halted. This event uses a (_P)rogrammable general purpose performance counter.",
@@ -165,6 +184,7 @@
},
{
"BriefDescription": "Reference cycles when core is not halted (Fixed event)",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time. This event is not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. This event uses fixed counter 2. You cannot collect a PEBs record for this event.",
"SampleAfterValue": "2000003",
@@ -172,6 +192,7 @@
},
{
"BriefDescription": "Cycles a divider is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0xCD",
"EventName": "CYCLES_DIV_BUSY.ALL",
"PublicDescription": "Counts core cycles if either divide unit is busy.",
@@ -179,6 +200,7 @@
},
{
"BriefDescription": "Cycles the integer divide unit is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0xCD",
"EventName": "CYCLES_DIV_BUSY.IDIV",
"PublicDescription": "Counts core cycles the integer divide unit is busy.",
@@ -187,6 +209,7 @@
},
{
"BriefDescription": "Instructions retired (Fixed event)",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PEBS": "2",
"PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The counter continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0. You cannot collect a PEBs record for this event.",
@@ -195,6 +218,7 @@
},
{
"BriefDescription": "Instructions retired (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.ANY_P",
"PEBS": "2",
@@ -203,6 +227,7 @@
},
{
"BriefDescription": "Instructions retired - using Reduced Skid PEBS feature",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.PREC_DIST",
"PEBS": "2",
@@ -211,6 +236,7 @@
},
{
"BriefDescription": "Unfilled issue slots per cycle",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "ISSUE_SLOTS_NOT_CONSUMED.ANY",
"PublicDescription": "Counts the number of issue slots per core cycle that were not consumed by the backend due to either a full resource in the backend (RESOURCE_FULL) or due to the processor recovering from some event (RECOVERY).",
@@ -218,6 +244,7 @@
},
{
"BriefDescription": "Unfilled issue slots per cycle to recover",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "ISSUE_SLOTS_NOT_CONSUMED.RECOVERY",
"PublicDescription": "Counts the number of issue slots per core cycle that were not consumed by the backend because allocation is stalled waiting for a mispredicted jump to retire or other branch-like conditions (e.g. the event is relevant during certain microcode flows). Counts all issue slots blocked while within this window including slots where uops were not available in the Instruction Queue.",
@@ -226,6 +253,7 @@
},
{
"BriefDescription": "Unfilled issue slots per cycle because of a full resource in the backend",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "ISSUE_SLOTS_NOT_CONSUMED.RESOURCE_FULL",
"PublicDescription": "Counts the number of issue slots per core cycle that were not consumed because of a full resource in the backend. Including but not limited to resources such as the Re-order Buffer (ROB), reservation stations (RS), load/store buffers, physical registers, or any other needed machine resource that is currently unavailable. Note that uops must be available for consumption in order for this event to fire. If a uop is not available (Instruction Queue is empty), this event will not count.",
@@ -234,6 +262,7 @@
},
{
"BriefDescription": "Loads blocked because address has 4k partial address false dependence (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.4K_ALIAS",
"PEBS": "2",
@@ -243,6 +272,7 @@
},
{
"BriefDescription": "Loads blocked (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.ALL_BLOCK",
"PEBS": "2",
@@ -252,6 +282,7 @@
},
{
"BriefDescription": "Loads blocked due to store data not ready (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.DATA_UNKNOWN",
"PEBS": "2",
@@ -261,6 +292,7 @@
},
{
"BriefDescription": "Loads blocked due to store forward restriction (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.STORE_FORWARD",
"PEBS": "2",
@@ -270,6 +302,7 @@
},
{
"BriefDescription": "Loads blocked because address in not in the UTLB (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.UTLB_MISS",
"PEBS": "2",
@@ -279,6 +312,7 @@
},
{
"BriefDescription": "All machine clears",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.ALL",
"PublicDescription": "Counts machine clears for any reason.",
@@ -286,6 +320,7 @@
},
{
"BriefDescription": "Machine clears due to memory disambiguation",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.DISAMBIGUATION",
"PublicDescription": "Counts machine clears due to memory disambiguation. Memory disambiguation happens when a load which has been issued conflicts with a previous unretired store in the pipeline whose address was not known at issue time, but is later resolved to be the same as the load address.",
@@ -294,6 +329,7 @@
},
{
"BriefDescription": "Machines clear due to a page fault",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.PAGE_FAULT",
"PublicDescription": "Counts the number of times that the machines clears due to a page fault. Covers both I-side and D-side(Loads/Stores) page faults. A page fault occurs when either page is not present, or an access violation",
@@ -302,6 +338,7 @@
},
{
"BriefDescription": "Self-Modifying Code detected",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.SMC",
"PublicDescription": "Counts the number of times that the processor detects that a program is writing to a code section and has to perform a machine clear because of that modification. Self-modifying code (SMC) causes a severe penalty in all Intel(R) architecture processors.",
@@ -310,6 +347,7 @@
},
{
"BriefDescription": "Uops issued to the back end per cycle",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.ANY",
"PublicDescription": "Counts uops issued by the front end and allocated into the back end of the machine. This event counts uops that retire as well as uops that were speculatively executed but didn't retire. The sort of speculative uops that might be counted includes, but is not limited to those uops issued in the shadow of a miss-predicted branch, those uops that are inserted during an assist (such as for a denormal floating point result), and (previously allocated) uops that might be canceled during a machine clear.",
@@ -317,6 +355,7 @@
},
{
"BriefDescription": "Uops requested but not-delivered to the back-end per cycle",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UOPS_NOT_DELIVERED.ANY",
"PublicDescription": "This event used to measure front-end inefficiencies. I.e. when front-end of the machine is not delivering uops to the back-end and the back-end has is not stalled. This event can be used to identify if the machine is truly front-end bound. When this event occurs, it is an indication that the front-end of the machine is operating at less than its theoretical peak performance. Background: We can think of the processor pipeline as being divided into 2 broader parts: Front-end and Back-end. Front-end is responsible for fetching the instruction, decoding into uops in machine understandable format and putting them into a uop queue to be consumed by back end. The back-end then takes these uops, allocates the required resources. When all resources are ready, uops are executed. If the back-end is not ready to accept uops from the front-end, then we do not want to count these as front-end bottlenecks. However, whenever we have bottlenecks in the back-end, we will have allocation unit stalls and eventually forcing the front-end to wait until the back-end is ready to receive more uops. This event counts only when back-end is requesting more uops and front-end is not able to provide them. When 3 uops are requested and no uops are delivered, the event counts 3. When 3 are requested, and only 1 is delivered, the event counts 2. When only 2 are delivered, the event counts 1. Alternatively stated, the event will not count if 3 uops are delivered, or if the back end is stalled and not requesting any uops at all. Counts indicate missed opportunities for the front-end to deliver a uop to the back end. Some examples of conditions that cause front-end efficiencies are: ICache misses, ITLB misses, and decoder restrictions that limit the front-end bandwidth. Known Issues: Some uops require multiple allocation slots. These uops will not be charged as a front end 'not delivered' opportunity, and will be regarded as a back end problem. For example, the INC instruction has one uop that requires 2 issue slots. A stream of INC instructions will not count as UOPS_NOT_DELIVERED, even though only one instruction can be issued per clock. The low uop issue rate for a stream of INC instructions is considered to be a back end issue.",
@@ -324,6 +363,7 @@
},
{
"BriefDescription": "Uops retired (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.ANY",
"PEBS": "2",
@@ -332,6 +372,7 @@
},
{
"BriefDescription": "Integer divide uops retired (Precise Event Capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.IDIV",
"PEBS": "2",
@@ -341,6 +382,7 @@
},
{
"BriefDescription": "MS uops retired (Precise event capable)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.MS",
"PEBS": "2",
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json b/tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json
index 3d6feb45a50b..09208af2e0ba 100644
--- a/tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Page walk completed due to a demand load to a 1GB page",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1GB",
"PublicDescription": "Counts page walks completed due to demand data loads (including SW prefetches) whose address translations missed in all TLB levels and were mapped to 1GB pages. The page walks can end with or without a page fault.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Page walk completed due to a demand load to a 2M or 4M page",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts page walks completed due to demand data loads (including SW prefetches) whose address translations missed in all TLB levels and were mapped to 2M or 4M pages. The page walks can end with or without a page fault.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Page walk completed due to a demand load to a 4K page",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts page walks completed due to demand data loads (including SW prefetches) whose address translations missed in all TLB levels and were mapped to 4K pages. The page walks can end with or without a page fault.",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Page walks outstanding due to a demand load every cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
"PublicDescription": "Counts once per cycle for each page walk occurring due to a load (demand data loads or SW prefetches). Includes cycles spent traversing the Extended Page Table (EPT). Average cycles per walk can be calculated by dividing by the number of walks.",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Page walk completed due to a demand data store to a 1GB page",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1GB",
"PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 1GB pages. The page walks can end with or without a page fault.",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "Page walk completed due to a demand data store to a 2M or 4M page",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 2M or 4M pages. The page walks can end with or without a page fault.",
@@ -49,6 +55,7 @@
},
{
"BriefDescription": "Page walk completed due to a demand data store to a 4K page",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
@@ -57,6 +64,7 @@
},
{
"BriefDescription": "Page walks outstanding due to a demand data store every cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_PENDING",
"PublicDescription": "Counts once per cycle for each page walk occurring due to a demand data store. Includes cycles spent traversing the Extended Page Table (EPT). Average cycles per walk can be calculated by dividing by the number of walks.",
@@ -65,6 +73,7 @@
},
{
"BriefDescription": "Page walks outstanding due to walking the EPT every cycle",
+ "Counter": "0,1,2,3",
"EventCode": "0x4F",
"EventName": "EPT.WALK_PENDING",
"PublicDescription": "Counts once per cycle for each page walk only while traversing the Extended Page Table (EPT), and does not count during the rest of the translation. The EPT is used for translating Guest-Physical Addresses to Physical Addresses for Virtual Machine Monitors (VMMs). Average cycles per walk can be calculated by dividing the count by number of walks.",
@@ -73,6 +82,7 @@
},
{
"BriefDescription": "ITLB misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "ITLB.MISS",
"PublicDescription": "Counts the number of times the machine was unable to find a translation in the Instruction Translation Lookaside Buffer (ITLB) for a linear address of an instruction fetch. It counts when new translation are filled into the ITLB. The event is speculative in nature, but will not count translations (page walks) that are begun and not finished, or translations that are finished but not filled into the ITLB.",
@@ -81,6 +91,7 @@
},
{
"BriefDescription": "Page walk completed due to an instruction fetch in a 1GB page",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_1GB",
"PublicDescription": "Counts page walks completed due to instruction fetches whose address translations missed in the TLB and were mapped to 1GB pages. The page walks can end with or without a page fault.",
@@ -89,6 +100,7 @@
},
{
"BriefDescription": "Page walk completed due to an instruction fetch in a 2M or 4M page",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts page walks completed due to instruction fetches whose address translations missed in the TLB and were mapped to 2M or 4M pages. The page walks can end with or without a page fault.",
@@ -97,6 +109,7 @@
},
{
"BriefDescription": "Page walk completed due to an instruction fetch in a 4K page",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts page walks completed due to instruction fetches whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
@@ -105,6 +118,7 @@
},
{
"BriefDescription": "Page walks outstanding due to an instruction fetch every cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_PENDING",
"PublicDescription": "Counts once per cycle for each page walk occurring due to an instruction fetch. Includes cycles spent traversing the Extended Page Table (EPT). Average cycles per walk can be calculated by dividing by the number of walks.",
@@ -113,6 +127,7 @@
},
{
"BriefDescription": "Memory uops retired that missed the DTLB (Precise event capable)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS",
@@ -123,6 +138,7 @@
},
{
"BriefDescription": "Load uops retired that missed the DTLB (Precise event capable)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS_LOADS",
@@ -133,6 +149,7 @@
},
{
"BriefDescription": "Store uops retired that missed the DTLB (Precise event capable)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS_STORES",
@@ -143,6 +160,7 @@
},
{
"BriefDescription": "STLB flushes",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "TLB_FLUSHES.STLB_ANY",
"PublicDescription": "Counts STLB flushes. The TLBs are flushed on instructions like INVLPG and MOV to CR3.",
diff --git a/tools/perf/pmu-events/arch/x86/grandridge/cache.json b/tools/perf/pmu-events/arch/x86/grandridge/cache.json
index f937ba0e50e1..04802e254e51 100644
--- a/tools/perf/pmu-events/arch/x86/grandridge/cache.json
+++ b/tools/perf/pmu-events/arch/x86/grandridge/cache.json
@@ -1,22 +1,25 @@
[
{
"BriefDescription": "Counts the number of cacheable memory requests that miss in the LLC. Counts on a per core basis.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.MISS",
- "PublicDescription": "Counts the number of cacheable memory requests that miss in the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the platform has an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
+ "PublicDescription": "Counts the number of cacheable memory requests that miss in the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the core has access to an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
"SampleAfterValue": "200003",
"UMask": "0x41"
},
{
"BriefDescription": "Counts the number of cacheable memory requests that access the LLC. Counts on a per core basis.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
- "PublicDescription": "Counts the number of cacheable memory requests that access the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the platform has an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
+ "PublicDescription": "Counts the number of cacheable memory requests that access the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the core has access to an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
"SampleAfterValue": "200003",
"UMask": "0x4f"
},
{
"BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an instruction cache or TLB miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x35",
"EventName": "MEM_BOUND_STALLS_IFETCH.ALL",
"SampleAfterValue": "1000003",
@@ -24,6 +27,7 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in the L2 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x35",
"EventName": "MEM_BOUND_STALLS_IFETCH.L2_HIT",
"PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or Translation Lookaside Buffer (TLB) miss which hit in the L2 cache.",
@@ -32,6 +36,7 @@
},
{
"BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an icache or itlb miss which hit in the LLC.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x35",
"EventName": "MEM_BOUND_STALLS_IFETCH.LLC_HIT",
"SampleAfterValue": "1000003",
@@ -39,6 +44,7 @@
},
{
"BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an icache or itlb miss which missed all the caches.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x35",
"EventName": "MEM_BOUND_STALLS_IFETCH.LLC_MISS",
"SampleAfterValue": "1000003",
@@ -46,6 +52,7 @@
},
{
"BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an L1 demand load miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS_LOAD.ALL",
"SampleAfterValue": "1000003",
@@ -53,6 +60,7 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to a demand load which hit in the L2 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS_LOAD.L2_HIT",
"PublicDescription": "Counts the number of cycles a core is stalled due to a demand load which hit in the L2 cache.",
@@ -61,6 +69,7 @@
},
{
"BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to a demand load miss which hit in the LLC.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS_LOAD.LLC_HIT",
"SampleAfterValue": "1000003",
@@ -68,6 +77,7 @@
},
{
"BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to a demand load miss which missed all the local caches.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS_LOAD.LLC_MISS",
"SampleAfterValue": "1000003",
@@ -75,62 +85,63 @@
},
{
"BriefDescription": "Counts the number of load ops retired that miss the L3 cache and hit in DRAM",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xd3",
"EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
- "PEBS": "1",
"SampleAfterValue": "1000003",
"UMask": "0x1"
},
{
"BriefDescription": "Counts the number of load ops retired that hit the L1 data cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x1"
},
{
"BriefDescription": "Counts the number of load ops retired that miss in the L1 data cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x40"
},
{
"BriefDescription": "Counts the number of load ops retired that hit in the L2 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x2"
},
{
"BriefDescription": "Counts the number of load ops retired that miss in the L2 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x80"
},
{
"BriefDescription": "Counts the number of load ops retired that hit in the L3 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x1c"
},
{
"BriefDescription": "Counts the number of loads that hit in a write combining buffer (WCB), excluding the first load that caused the WCB to allocate.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.WCB_HIT",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x20"
},
{
"BriefDescription": "Counts the number of cycles that uops are blocked for any of the following reasons: load buffer, store buffer or RSV full.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x04",
"EventName": "MEM_SCHEDULER_BLOCK.ALL",
"SampleAfterValue": "20003",
@@ -138,6 +149,7 @@
},
{
"BriefDescription": "Counts the number of cycles that uops are blocked due to a load buffer full condition.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x04",
"EventName": "MEM_SCHEDULER_BLOCK.LD_BUF",
"SampleAfterValue": "20003",
@@ -145,6 +157,7 @@
},
{
"BriefDescription": "Counts the number of cycles that uops are blocked due to an RSV full condition.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x04",
"EventName": "MEM_SCHEDULER_BLOCK.RSV",
"SampleAfterValue": "20003",
@@ -152,6 +165,7 @@
},
{
"BriefDescription": "Counts the number of cycles that uops are blocked due to a store buffer full condition.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x04",
"EventName": "MEM_SCHEDULER_BLOCK.ST_BUF",
"SampleAfterValue": "20003",
@@ -159,179 +173,210 @@
},
{
"BriefDescription": "Counts the number of load ops retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x81"
},
{
"BriefDescription": "Counts the number of store ops retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x82"
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_1024",
"MSRIndex": "0x3F6",
"MSRValue": "0x400",
- "PEBS": "2",
"SampleAfterValue": "1000003",
"UMask": "0x5"
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_128",
"MSRIndex": "0x3F6",
"MSRValue": "0x80",
- "PEBS": "2",
"SampleAfterValue": "1000003",
"UMask": "0x5"
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_16",
"MSRIndex": "0x3F6",
"MSRValue": "0x10",
- "PEBS": "2",
"SampleAfterValue": "1000003",
"UMask": "0x5"
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_2048",
"MSRIndex": "0x3F6",
"MSRValue": "0x800",
- "PEBS": "2",
"SampleAfterValue": "1000003",
"UMask": "0x5"
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_256",
"MSRIndex": "0x3F6",
"MSRValue": "0x100",
- "PEBS": "2",
"SampleAfterValue": "1000003",
"UMask": "0x5"
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_32",
"MSRIndex": "0x3F6",
"MSRValue": "0x20",
- "PEBS": "2",
"SampleAfterValue": "1000003",
"UMask": "0x5"
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_4",
"MSRIndex": "0x3F6",
"MSRValue": "0x4",
- "PEBS": "2",
"SampleAfterValue": "1000003",
"UMask": "0x5"
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_512",
"MSRIndex": "0x3F6",
"MSRValue": "0x200",
- "PEBS": "2",
"SampleAfterValue": "1000003",
"UMask": "0x5"
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_64",
"MSRIndex": "0x3F6",
"MSRValue": "0x40",
- "PEBS": "2",
"SampleAfterValue": "1000003",
"UMask": "0x5"
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_8",
"MSRIndex": "0x3F6",
"MSRValue": "0x8",
- "PEBS": "2",
"SampleAfterValue": "1000003",
"UMask": "0x5"
},
{
"BriefDescription": "Counts the number of load uops retired that performed one or more locks",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x21"
},
{
"BriefDescription": "Counts the number of memory uops retired that were splits.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.SPLIT",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x43"
},
{
"BriefDescription": "Counts the number of retired split load uops.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x41"
},
{
"BriefDescription": "Counts the number of retired split store uops.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x42"
},
{
"BriefDescription": "Counts the number of stores uops retired same as MEM_UOPS_RETIRED.ALL_STORES",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.STORE_LATENCY",
- "PEBS": "2",
"SampleAfterValue": "1000003",
"UMask": "0x6"
},
{
+ "BriefDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to an icache miss",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.ICACHE",
"SampleAfterValue": "1000003",
diff --git a/tools/perf/pmu-events/arch/x86/grandridge/counter.json b/tools/perf/pmu-events/arch/x86/grandridge/counter.json
new file mode 100644
index 000000000000..9fd5d8ad6d3b
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/grandridge/counter.json
@@ -0,0 +1,42 @@
+[
+ {
+ "Unit": "core",
+ "CountersNumFixed": "3",
+ "CountersNumGeneric": "8"
+ },
+ {
+ "Unit": "B2CMI",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "CHA",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "IMC",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "IIO",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "IRP",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "PCU",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": 4
+ },
+ {
+ "Unit": "CHACMS",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": 4
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/grandridge/floating-point.json b/tools/perf/pmu-events/arch/x86/grandridge/floating-point.json
index 00c9a8ae0f53..5266eed969be 100644
--- a/tools/perf/pmu-events/arch/x86/grandridge/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/grandridge/floating-point.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of cycles when any of the floating point dividers are active.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xcd",
"EventName": "ARITH.FPDIV_ACTIVE",
@@ -9,48 +10,89 @@
},
{
"BriefDescription": "Counts the number of all types of floating point operations per uop with all default weighting",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc8",
"EventName": "FP_FLOPS_RETIRED.ALL",
- "PEBS": "1",
"SampleAfterValue": "1000003",
"UMask": "0x3"
},
{
"BriefDescription": "This event is deprecated. [This event is alias to FP_FLOPS_RETIRED.FP64]",
+ "Counter": "0,1,2,3,4,5,6,7",
"Deprecated": "1",
"EventCode": "0xc8",
"EventName": "FP_FLOPS_RETIRED.DP",
- "PEBS": "1",
"SampleAfterValue": "1000003",
"UMask": "0x1"
},
{
"BriefDescription": "Counts the number of floating point operations that produce 32 bit single precision results [This event is alias to FP_FLOPS_RETIRED.SP]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc8",
"EventName": "FP_FLOPS_RETIRED.FP32",
- "PEBS": "1",
"SampleAfterValue": "1000003",
"UMask": "0x2"
},
{
"BriefDescription": "Counts the number of floating point operations that produce 64 bit double precision results [This event is alias to FP_FLOPS_RETIRED.DP]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc8",
"EventName": "FP_FLOPS_RETIRED.FP64",
- "PEBS": "1",
"SampleAfterValue": "1000003",
"UMask": "0x1"
},
{
"BriefDescription": "This event is deprecated. [This event is alias to FP_FLOPS_RETIRED.FP32]",
+ "Counter": "0,1,2,3,4,5,6,7",
"Deprecated": "1",
"EventCode": "0xc8",
"EventName": "FP_FLOPS_RETIRED.SP",
- "PEBS": "1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the total number of floating point retired instructions.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_INST_RETIRED.128B_DP",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of retired instructions whose sources are a packed 128 bit single precision floating point. This may be SSE or AVX.128 operations.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_INST_RETIRED.128B_SP",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of retired instructions whose sources are a packed 256 bit double precision floating point.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_INST_RETIRED.256B_DP",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Counts the number of retired instructions whose sources are a scalar 32bit single precision floating point.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_INST_RETIRED.32B_SP",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of retired instructions whose sources are a scalar 64 bit double precision floating point.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_INST_RETIRED.64B_DP",
"SampleAfterValue": "1000003",
"UMask": "0x2"
},
{
"BriefDescription": "Counts the number of floating point operations retired that required microcode assist.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.FP_ASSIST",
"PublicDescription": "Counts the number of floating point operations retired that required microcode assist, which is not a reflection of the number of FP operations, instructions or uops.",
@@ -59,9 +101,9 @@
},
{
"BriefDescription": "Counts the number of floating point divide uops retired (x87 and sse, including x87 sqrt).",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.FPDIV",
- "PEBS": "1",
"SampleAfterValue": "2000003",
"UMask": "0x8"
}
diff --git a/tools/perf/pmu-events/arch/x86/grandridge/frontend.json b/tools/perf/pmu-events/arch/x86/grandridge/frontend.json
index 356d36aecc81..7cdf611efb23 100644
--- a/tools/perf/pmu-events/arch/x86/grandridge/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/grandridge/frontend.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the total number of BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe6",
"EventName": "BACLEARS.ANY",
"PublicDescription": "Counts the total number of BACLEARS, which occur when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
@@ -9,14 +10,15 @@
},
{
"BriefDescription": "Counts the number of instructions retired that were tagged because empty issue slots were seen before the uop due to ITLB miss",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.ITLB_MISS",
- "PEBS": "1",
"SampleAfterValue": "1000003",
"UMask": "0x10"
},
{
"BriefDescription": "Counts every time the code stream enters into a new cache line by walking sequential from the previous line or being redirected by a jump.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x80",
"EventName": "ICACHE.ACCESSES",
"SampleAfterValue": "200003",
@@ -24,6 +26,7 @@
},
{
"BriefDescription": "Counts every time the code stream enters into a new cache line by walking sequential from the previous line or being redirected by a jump and the instruction cache registers bytes are not present. -",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x80",
"EventName": "ICACHE.MISSES",
"SampleAfterValue": "200003",
diff --git a/tools/perf/pmu-events/arch/x86/grandridge/grr-metrics.json b/tools/perf/pmu-events/arch/x86/grandridge/grr-metrics.json
new file mode 100644
index 000000000000..07e542297e93
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/grandridge/grr-metrics.json
@@ -0,0 +1,849 @@
+[
+ {
+ "BriefDescription": "C1 residency percent per core",
+ "MetricExpr": "cstate_core@c1\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C1_Core_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C6 residency percent per core",
+ "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Core_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C6 residency percent per module",
+ "MetricExpr": "cstate_module@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Module_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C6 residency percent per package",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Cycles per instruction retired; indicating how much time each executed instruction took; in units of cycles.",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD / INST_RETIRED.ANY",
+ "MetricName": "cpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "CPU operating frequency (in GHz)",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC * #SYSTEM_TSC_FREQ / 1e9",
+ "MetricName": "cpu_operating_frequency",
+ "ScaleUnit": "1GHz"
+ },
+ {
+ "BriefDescription": "Percentage of time spent in the active CPU power state C0",
+ "MetricExpr": "tma_info_system_cpu_utilization",
+ "MetricName": "cpu_utilization",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for 2 megabyte page sizes) caused by demand data loads to the total number of completed instructions",
+ "MetricExpr": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M / INST_RETIRED.ANY",
+ "MetricName": "dtlb_2nd_level_2mb_large_page_load_mpi",
+ "PublicDescription": "Ratio of number of completed page walks (for 2 megabyte page sizes) caused by demand data loads to the total number of completed instructions. This implies it missed in the Data Translation Lookaside Buffer (DTLB) and further levels of TLB.",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for all page sizes) caused by demand data loads to the total number of completed instructions",
+ "MetricExpr": "DTLB_LOAD_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricName": "dtlb_2nd_level_load_mpi",
+ "PublicDescription": "Ratio of number of completed page walks (for all page sizes) caused by demand data loads to the total number of completed instructions. This implies it missed in the DTLB and further levels of TLB.",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for all page sizes) caused by demand data stores to the total number of completed instructions",
+ "MetricExpr": "DTLB_STORE_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricName": "dtlb_2nd_level_store_mpi",
+ "PublicDescription": "Ratio of number of completed page walks (for all page sizes) caused by demand data stores to the total number of completed instructions. This implies it missed in the DTLB and further levels of TLB.",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Bandwidth of IO reads that are initiated by end device controllers that are requesting memory from the CPU.",
+ "MetricExpr": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR * 64 / 1e6 / duration_time",
+ "MetricName": "io_bandwidth_read",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth of IO writes that are initiated by end device controllers that are writing memory to the CPU.",
+ "MetricExpr": "(UNC_CHA_TOR_INSERTS.IO_ITOM + UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR) * 64 / 1e6 / duration_time",
+ "MetricName": "io_bandwidth_write",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for 2 megabyte and 4 megabyte page sizes) caused by a code fetch to the total number of completed instructions",
+ "MetricExpr": "ITLB_MISSES.WALK_COMPLETED_2M_4M / INST_RETIRED.ANY",
+ "MetricName": "itlb_2nd_level_large_page_mpi",
+ "PublicDescription": "Ratio of number of completed page walks (for 2 megabyte and 4 megabyte page sizes) caused by a code fetch to the total number of completed instructions. This implies it missed in the Instruction Translation Lookaside Buffer (ITLB) and further levels of TLB.",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for all page sizes) caused by a code fetch to the total number of completed instructions",
+ "MetricExpr": "ITLB_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricName": "itlb_2nd_level_mpi",
+ "PublicDescription": "Ratio of number of completed page walks (for all page sizes) caused by a code fetch to the total number of completed instructions. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB.",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of code read requests missing in L1 instruction cache (includes prefetches) to the total number of completed instructions",
+ "MetricExpr": "ICACHE.MISSES / INST_RETIRED.ANY",
+ "MetricName": "l1_i_code_read_misses_with_prefetches_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of demand load requests hitting in L1 data cache to the total number of completed instructions",
+ "MetricExpr": "MEM_LOAD_UOPS_RETIRED.L1_HIT / INST_RETIRED.ANY",
+ "MetricName": "l1d_demand_data_read_hits_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed demand load requests hitting in L2 cache to the total number of completed instructions",
+ "MetricExpr": "MEM_LOAD_UOPS_RETIRED.L2_HIT / INST_RETIRED.ANY",
+ "MetricName": "l2_demand_data_read_hits_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed data read request missing L2 cache to the total number of completed instructions",
+ "MetricExpr": "MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricName": "l2_demand_data_read_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of requests missing L2 cache (includes code+data+rfo w/ prefetches) to the total number of completed instructions",
+ "MetricExpr": "LONGEST_LAT_CACHE.REFERENCE / INST_RETIRED.ANY",
+ "MetricName": "l2_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of code read requests missing last level core cache (includes demand w/ prefetches) to the total number of completed instructions",
+ "MetricExpr": "(UNC_CHA_TOR_INSERTS.IA_MISS_CRD + UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF) / INST_RETIRED.ANY",
+ "MetricName": "llc_code_read_mpi_demand_plus_prefetch",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of data read requests missing last level core cache (includes demand w/ prefetches) to the total number of completed instructions",
+ "MetricExpr": "(UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_PREF + UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFDATA) / INST_RETIRED.ANY",
+ "MetricName": "llc_data_read_mpi_demand_plus_prefetch",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Average latency of a last level cache (LLC) demand data read miss (read memory access) in nano seconds",
+ "MetricExpr": "1e9 * (UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT / UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT) / (UNC_CHA_CLOCKTICKS / (source_count(UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT) * #num_packages)) * duration_time",
+ "MetricName": "llc_demand_data_read_miss_latency",
+ "ScaleUnit": "1ns"
+ },
+ {
+ "BriefDescription": "Load operations retired per instruction",
+ "MetricExpr": "MEM_UOPS_RETIRED.ALL_LOADS / INST_RETIRED.ANY",
+ "MetricName": "loads_retired_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "DDR memory read bandwidth (MB/sec)",
+ "MetricExpr": "(UNC_M_CAS_COUNT_SCH0.RD + UNC_M_CAS_COUNT_SCH1.RD) * 64 / 1e6 / duration_time",
+ "MetricName": "memory_bandwidth_read",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "DDR memory bandwidth (MB/sec)",
+ "MetricExpr": "(UNC_M_CAS_COUNT_SCH0.RD + UNC_M_CAS_COUNT_SCH1.RD + UNC_M_CAS_COUNT_SCH0.WR + UNC_M_CAS_COUNT_SCH1.WR) * 64 / 1e6 / duration_time",
+ "MetricName": "memory_bandwidth_total",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "DDR memory write bandwidth (MB/sec)",
+ "MetricExpr": "(UNC_M_CAS_COUNT_SCH0.WR + UNC_M_CAS_COUNT_SCH1.WR) * 64 / 1e6 / duration_time",
+ "MetricName": "memory_bandwidth_write",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Percentage of cycles spent in System Management Interrupts.",
+ "MetricExpr": "((msr@aperf@ - cycles) / msr@aperf@ if msr@smi@ > 0 else 0)",
+ "MetricGroup": "smi",
+ "MetricName": "smi_cycles",
+ "MetricThreshold": "smi_cycles > 0.1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Number of SMI interrupts.",
+ "MetricExpr": "msr@smi@",
+ "MetricGroup": "smi",
+ "MetricName": "smi_num",
+ "ScaleUnit": "1SMI#"
+ },
+ {
+ "BriefDescription": "Store operations retired per instruction",
+ "MetricExpr": "MEM_UOPS_RETIRED.ALL_STORES / INST_RETIRED.ANY",
+ "MetricName": "stores_retired_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to certain allocation restrictions",
+ "MetricExpr": "tma_core_bound",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_allocation_restriction",
+ "MetricThreshold": "tma_allocation_restriction > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.1)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls",
+ "MetricExpr": "TOPDOWN_BE_BOUND.ALL_P / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL1;tma_L1_group",
+ "MetricName": "tma_backend_bound",
+ "MetricThreshold": "tma_backend_bound > 0.1",
+ "MetricgroupNoGroup": "TopdownL1",
+ "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls. Note that uops must be available for consumption in order for this event to count. If a uop is not available (IQ is empty), this event will not count",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear",
+ "MetricExpr": "TOPDOWN_BAD_SPECULATION.ALL_P / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL1;tma_L1_group",
+ "MetricName": "tma_bad_speculation",
+ "MetricThreshold": "tma_bad_speculation > 0.15",
+ "MetricgroupNoGroup": "TopdownL1",
+ "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window including relevant microcode flows and while uops are not yet available in the instruction queue (IQ). Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend",
+ "MetricExpr": "TOPDOWN_FE_BOUND.BRANCH_DETECT / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
+ "MetricName": "tma_branch_detect",
+ "MetricThreshold": "tma_branch_detect > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
+ "PublicDescription": "Counts the number of issue slots that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to branch mispredicts",
+ "MetricExpr": "TOPDOWN_BAD_SPECULATION.MISPREDICT / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
+ "MetricName": "tma_branch_mispredicts",
+ "MetricThreshold": "tma_branch_mispredicts > 0.05 & tma_bad_speculation > 0.15",
+ "MetricgroupNoGroup": "TopdownL2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to BTCLEARS, which occurs when the Branch Target Buffer (BTB) predicts a taken branch.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.BRANCH_RESTEER / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
+ "MetricName": "tma_branch_resteer",
+ "MetricThreshold": "tma_branch_resteer > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to the microcode sequencer (MS).",
+ "MetricExpr": "TOPDOWN_FE_BOUND.CISC / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
+ "MetricName": "tma_cisc",
+ "MetricThreshold": "tma_cisc > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles due to backend bound stalls that are bounded by core restrictions and not attributed to an outstanding load or stores, or resource limitation",
+ "MetricExpr": "TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_core_bound",
+ "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.1",
+ "MetricgroupNoGroup": "TopdownL2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to decode stalls.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.DECODE / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
+ "MetricName": "tma_decode",
+ "MetricThreshold": "tma_decode > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to a machine clear that does not require the use of microcode, classified as a fast nuke, due to memory ordering, memory disambiguation and memory renaming",
+ "MetricExpr": "TOPDOWN_BAD_SPECULATION.FASTNUKE / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group",
+ "MetricName": "tma_fast_nuke",
+ "MetricThreshold": "tma_fast_nuke > 0.05 & (tma_machine_clears > 0.05 & tma_bad_speculation > 0.15)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to frontend stalls.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.ALL_P / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL1;tma_L1_group",
+ "MetricName": "tma_frontend_bound",
+ "MetricThreshold": "tma_frontend_bound > 0.2",
+ "MetricgroupNoGroup": "TopdownL1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to instruction cache misses.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.ICACHE / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
+ "MetricName": "tma_icache_misses",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.FRONTEND_BANDWIDTH / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
+ "MetricName": "tma_ifetch_bandwidth",
+ "MetricThreshold": "tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2",
+ "MetricgroupNoGroup": "TopdownL2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to frontend latency restrictions due to icache misses, itlb misses, branch detection, and resteer limitations.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.FRONTEND_LATENCY / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
+ "MetricName": "tma_ifetch_latency",
+ "MetricThreshold": "tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2",
+ "MetricgroupNoGroup": "TopdownL2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Instructions per Floating Point (FP) Operation",
+ "MetricExpr": "INST_RETIRED.ANY / FP_FLOPS_RETIRED.ALL",
+ "MetricGroup": "Flops",
+ "MetricName": "tma_info_arith_inst_mix_ipflop"
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_INST_RETIRED.128B_DP + FP_INST_RETIRED.128B_SP)",
+ "MetricGroup": "Flops",
+ "MetricName": "tma_info_arith_inst_mix_ipfparith_avx128"
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction",
+ "MetricExpr": "INST_RETIRED.ANY / FP_INST_RETIRED.64B_DP",
+ "MetricGroup": "Flops",
+ "MetricName": "tma_info_arith_inst_mix_ipfparith_scalar_dp"
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction",
+ "MetricExpr": "INST_RETIRED.ANY / FP_INST_RETIRED.32B_SP",
+ "MetricGroup": "Flops",
+ "MetricName": "tma_info_arith_inst_mix_ipfparith_scalar_sp"
+ },
+ {
+ "BriefDescription": "Percentage of time that retirement is stalled due to a first level data TLB miss",
+ "MetricExpr": "tma_info_bottleneck_dtlb_miss_bound_cycles",
+ "MetricName": "tma_info_bottleneck_%_dtlb_miss_bound_cycles"
+ },
+ {
+ "BriefDescription": "Percentage of time that allocation and retirement is stalled by the Frontend Cluster due to an Ifetch Miss, either Icache or ITLB Miss",
+ "MetricExpr": "tma_info_bottleneck_ifetch_miss_bound_cycles",
+ "MetricGroup": "Ifetch",
+ "MetricName": "tma_info_bottleneck_%_ifetch_miss_bound_cycles",
+ "PublicDescription": "Percentage of time that allocation and retirement is stalled by the Frontend Cluster due to an Ifetch Miss, either Icache or ITLB Miss. See Info.Ifetch_Bound"
+ },
+ {
+ "BriefDescription": "Percentage of time that retirement is stalled due to an L1 miss",
+ "MetricExpr": "tma_info_bottleneck_load_miss_bound_cycles",
+ "MetricGroup": "Load_Store_Miss",
+ "MetricName": "tma_info_bottleneck_%_load_miss_bound_cycles",
+ "PublicDescription": "Percentage of time that retirement is stalled due to an L1 miss. See Info.Load_Miss_Bound"
+ },
+ {
+ "BriefDescription": "Percentage of time that retirement is stalled by the Memory Cluster due to a pipeline stall",
+ "MetricExpr": "tma_info_bottleneck_mem_exec_bound_cycles",
+ "MetricGroup": "Mem_Exec",
+ "MetricName": "tma_info_bottleneck_%_mem_exec_bound_cycles",
+ "PublicDescription": "Percentage of time that retirement is stalled by the Memory Cluster due to a pipeline stall. See Info.Mem_Exec_Bound"
+ },
+ {
+ "BriefDescription": "Percentage of time that retirement is stalled due to a first level data TLB miss",
+ "MetricExpr": "100 * (LD_HEAD.DTLB_MISS_AT_RET + LD_HEAD.PGWALK_AT_RET) / CPU_CLK_UNHALTED.CORE",
+ "MetricGroup": "Cycles",
+ "MetricName": "tma_info_bottleneck_dtlb_miss_bound_cycles",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of time that allocation and retirement is stalled by the Frontend Cluster due to an Ifetch Miss, either Icache or ITLB Miss",
+ "MetricExpr": "100 * MEM_BOUND_STALLS_IFETCH.ALL / CPU_CLK_UNHALTED.CORE",
+ "MetricGroup": "Cycles;Ifetch",
+ "MetricName": "tma_info_bottleneck_ifetch_miss_bound_cycles",
+ "PublicDescription": "Percentage of time that allocation and retirement is stalled by the Frontend Cluster due to an Ifetch Miss, either Icache or ITLB Miss. See Info.Ifetch_Bound",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of time that retirement is stalled due to an L1 miss",
+ "MetricExpr": "100 * MEM_BOUND_STALLS_LOAD.ALL / CPU_CLK_UNHALTED.CORE",
+ "MetricGroup": "Cycles;Load_Store_Miss",
+ "MetricName": "tma_info_bottleneck_load_miss_bound_cycles",
+ "PublicDescription": "Percentage of time that retirement is stalled due to an L1 miss. See Info.Load_Miss_Bound",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of time that retirement is stalled by the Memory Cluster due to a pipeline stall",
+ "MetricExpr": "100 * LD_HEAD.ANY_AT_RET / CPU_CLK_UNHALTED.CORE",
+ "MetricGroup": "Cycles;Mem_Exec",
+ "MetricName": "tma_info_bottleneck_mem_exec_bound_cycles",
+ "PublicDescription": "Percentage of time that retirement is stalled by the Memory Cluster due to a pipeline stall. See Info.Mem_Exec_Bound",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricName": "tma_info_br_inst_mix_ipbranch"
+ },
+ {
+ "BriefDescription": "Instruction per (near) call (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "MetricName": "tma_info_br_inst_mix_ipcall"
+ },
+ {
+ "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
+ "MetricName": "tma_info_br_inst_mix_ipfarbranch"
+ },
+ {
+ "BriefDescription": "Instructions per retired conditional Branch Misprediction where the branch was not taken",
+ "MetricExpr": "INST_RETIRED.ANY / (BR_MISP_RETIRED.COND - BR_MISP_RETIRED.COND_TAKEN)",
+ "MetricName": "tma_info_br_inst_mix_ipmisp_cond_ntaken"
+ },
+ {
+ "BriefDescription": "Instructions per retired conditional Branch Misprediction where the branch was taken",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_TAKEN",
+ "MetricName": "tma_info_br_inst_mix_ipmisp_cond_taken"
+ },
+ {
+ "BriefDescription": "Instructions per retired indirect call or jump Branch Misprediction",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.INDIRECT",
+ "MetricName": "tma_info_br_inst_mix_ipmisp_indirect"
+ },
+ {
+ "BriefDescription": "Instructions per retired return Branch Misprediction",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.RETURN",
+ "MetricName": "tma_info_br_inst_mix_ipmisp_ret"
+ },
+ {
+ "BriefDescription": "Instructions per retired Branch Misprediction",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricName": "tma_info_br_inst_mix_ipmispredict"
+ },
+ {
+ "BriefDescription": "Ratio of all branches which mispredict",
+ "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricName": "tma_info_br_mispredict_bound_branch_mispredict_ratio"
+ },
+ {
+ "BriefDescription": "Ratio between Mispredicted branches and unknown branches",
+ "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / BACLEARS.ANY",
+ "MetricName": "tma_info_br_mispredict_bound_branch_mispredict_to_unknown_branch_ratio"
+ },
+ {
+ "BriefDescription": "Percentage of time that allocation is stalled due to load buffer full",
+ "MetricExpr": "tma_info_buffer_stalls_load_buffer_stall_cycles",
+ "MetricName": "tma_info_buffer_stalls_%_load_buffer_stall_cycles"
+ },
+ {
+ "BriefDescription": "Percentage of time that allocation is stalled due to memory reservation stations full",
+ "MetricExpr": "tma_info_buffer_stalls_mem_rsv_stall_cycles",
+ "MetricName": "tma_info_buffer_stalls_%_mem_rsv_stall_cycles"
+ },
+ {
+ "BriefDescription": "Percentage of time that allocation is stalled due to store buffer full",
+ "MetricExpr": "tma_info_buffer_stalls_store_buffer_stall_cycles",
+ "MetricName": "tma_info_buffer_stalls_%_store_buffer_stall_cycles"
+ },
+ {
+ "BriefDescription": "Percentage of time that allocation is stalled due to load buffer full",
+ "MetricExpr": "100 * MEM_SCHEDULER_BLOCK.LD_BUF / CPU_CLK_UNHALTED.CORE",
+ "MetricName": "tma_info_buffer_stalls_load_buffer_stall_cycles",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of time that allocation is stalled due to memory reservation stations full",
+ "MetricExpr": "100 * MEM_SCHEDULER_BLOCK.RSV / CPU_CLK_UNHALTED.CORE",
+ "MetricName": "tma_info_buffer_stalls_mem_rsv_stall_cycles",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of time that allocation is stalled due to store buffer full",
+ "MetricExpr": "100 * MEM_SCHEDULER_BLOCK.ST_BUF / CPU_CLK_UNHALTED.CORE",
+ "MetricName": "tma_info_buffer_stalls_store_buffer_stall_cycles",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction",
+ "MetricExpr": "CPU_CLK_UNHALTED.CORE / INST_RETIRED.ANY",
+ "MetricName": "tma_info_core_cpi"
+ },
+ {
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricExpr": "FP_FLOPS_RETIRED.ALL / CPU_CLK_UNHALTED.CORE",
+ "MetricGroup": "Flops",
+ "MetricName": "tma_info_core_flopc"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle",
+ "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.CORE",
+ "MetricName": "tma_info_core_ipc"
+ },
+ {
+ "BriefDescription": "Uops Per Instruction",
+ "MetricExpr": "TOPDOWN_RETIRING.ALL_P / INST_RETIRED.ANY",
+ "MetricName": "tma_info_core_upi"
+ },
+ {
+ "BriefDescription": "Percentage of ifetch miss bound stalls, where the ifetch miss hits in the L2",
+ "MetricExpr": "tma_info_ifetch_miss_bound_ifetchmissbound_with_l2hit",
+ "MetricName": "tma_info_ifetch_miss_bound_%_ifetchmissbound_with_l2hit"
+ },
+ {
+ "BriefDescription": "Percentage of ifetch miss bound stalls, where the ifetch miss hits in the L3",
+ "MetricExpr": "tma_info_ifetch_miss_bound_ifetchmissbound_with_l3hit",
+ "MetricName": "tma_info_ifetch_miss_bound_%_ifetchmissbound_with_l3hit"
+ },
+ {
+ "BriefDescription": "Percentage of ifetch miss bound stalls, where the ifetch miss subsequently misses in the L3",
+ "MetricExpr": "100 * MEM_BOUND_STALLS_IFETCH.LLC_MISS / MEM_BOUND_STALLS_IFETCH.ALL",
+ "MetricName": "tma_info_ifetch_miss_bound_%_ifetchmissbound_with_l3miss"
+ },
+ {
+ "BriefDescription": "Percentage of ifetch miss bound stalls, where the ifetch miss hits in the L2",
+ "MetricExpr": "100 * MEM_BOUND_STALLS_IFETCH.L2_HIT / MEM_BOUND_STALLS_IFETCH.ALL",
+ "MetricName": "tma_info_ifetch_miss_bound_ifetchmissbound_with_l2hit",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of ifetch miss bound stalls, where the ifetch miss hits in the L3",
+ "MetricExpr": "100 * MEM_BOUND_STALLS_IFETCH.LLC_HIT / MEM_BOUND_STALLS_IFETCH.ALL",
+ "MetricName": "tma_info_ifetch_miss_bound_ifetchmissbound_with_l3hit",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of memory bound stalls where retirement is stalled due to an L1 miss that hit the L2",
+ "MetricExpr": "tma_info_load_miss_bound_loadmissbound_with_l2hit",
+ "MetricGroup": "load_store_bound",
+ "MetricName": "tma_info_load_miss_bound_%_loadmissbound_with_l2hit"
+ },
+ {
+ "BriefDescription": "Percentage of memory bound stalls where retirement is stalled due to an L1 miss that hit the L3",
+ "MetricExpr": "tma_info_load_miss_bound_loadmissbound_with_l3hit",
+ "MetricGroup": "load_store_bound",
+ "MetricName": "tma_info_load_miss_bound_%_loadmissbound_with_l3hit"
+ },
+ {
+ "BriefDescription": "Percentage of memory bound stalls where retirement is stalled due to an L1 miss that subsequently misses the L3",
+ "MetricExpr": "100 * MEM_BOUND_STALLS_LOAD.LLC_MISS / MEM_BOUND_STALLS_LOAD.ALL",
+ "MetricGroup": "load_store_bound",
+ "MetricName": "tma_info_load_miss_bound_%_loadmissbound_with_l3miss"
+ },
+ {
+ "BriefDescription": "Percentage of memory bound stalls where retirement is stalled due to an L1 miss that hit the L2",
+ "MetricExpr": "100 * MEM_BOUND_STALLS_LOAD.L2_HIT / MEM_BOUND_STALLS_LOAD.ALL",
+ "MetricGroup": "load_store_bound",
+ "MetricName": "tma_info_load_miss_bound_loadmissbound_with_l2hit",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of memory bound stalls where retirement is stalled due to an L1 miss that hit the L3",
+ "MetricExpr": "100 * MEM_BOUND_STALLS_LOAD.LLC_HIT / MEM_BOUND_STALLS_LOAD.ALL",
+ "MetricGroup": "load_store_bound",
+ "MetricName": "tma_info_load_miss_bound_loadmissbound_with_l3hit",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a pipeline block",
+ "MetricExpr": "100 * LD_HEAD.L1_BOUND_AT_RET / CPU_CLK_UNHALTED.CORE",
+ "MetricGroup": "load_store_bound",
+ "MetricName": "tma_info_load_store_bound_l1_bound"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement",
+ "MetricExpr": "100 * (LD_HEAD.L1_BOUND_AT_RET + MEM_BOUND_STALLS_LOAD.ALL) / CPU_CLK_UNHALTED.CORE",
+ "MetricGroup": "load_store_bound",
+ "MetricName": "tma_info_load_store_bound_load_bound"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to store buffer full",
+ "MetricExpr": "100 * (MEM_SCHEDULER_BLOCK.ST_BUF / MEM_SCHEDULER_BLOCK.ALL) * tma_mem_scheduler",
+ "MetricGroup": "load_store_bound",
+ "MetricName": "tma_info_load_store_bound_store_bound"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears relative to thousands of instructions retired, due to floating point assists",
+ "MetricExpr": "1e3 * MACHINE_CLEARS.FP_ASSIST / INST_RETIRED.ANY",
+ "MetricName": "tma_info_machine_clear_bound_machine_clears_fp_assist_pki"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears relative to thousands of instructions retired, due to page faults",
+ "MetricExpr": "1e3 * MACHINE_CLEARS.PAGE_FAULT / INST_RETIRED.ANY",
+ "MetricName": "tma_info_machine_clear_bound_machine_clears_page_fault_pki"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears relative to thousands of instructions retired, due to self-modifying code",
+ "MetricExpr": "1e3 * MACHINE_CLEARS.SMC / INST_RETIRED.ANY",
+ "MetricName": "tma_info_machine_clear_bound_machine_clears_smc_pki"
+ },
+ {
+ "BriefDescription": "Percentage of total non-speculative loads with an address aliasing block",
+ "MetricExpr": "tma_info_mem_exec_blocks_loads_with_adressaliasing",
+ "MetricName": "tma_info_mem_exec_blocks_%_loads_with_adressaliasing"
+ },
+ {
+ "BriefDescription": "Percentage of total non-speculative loads with a store forward or unknown store address block",
+ "MetricExpr": "tma_info_mem_exec_blocks_loads_with_storefwdblk",
+ "MetricName": "tma_info_mem_exec_blocks_%_loads_with_storefwdblk"
+ },
+ {
+ "BriefDescription": "Percentage of total non-speculative loads with an address aliasing block",
+ "MetricExpr": "100 * LD_BLOCKS.ADDRESS_ALIAS / MEM_UOPS_RETIRED.ALL_LOADS",
+ "MetricName": "tma_info_mem_exec_blocks_loads_with_adressaliasing",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of total non-speculative loads with a store forward or unknown store address block",
+ "MetricExpr": "100 * LD_BLOCKS.DATA_UNKNOWN / MEM_UOPS_RETIRED.ALL_LOADS",
+ "MetricName": "tma_info_mem_exec_blocks_loads_with_storefwdblk",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of Memory Execution Bound due to a first level data cache miss",
+ "MetricExpr": "tma_info_mem_exec_bound_loadhead_with_l1miss",
+ "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_l1miss"
+ },
+ {
+ "BriefDescription": "Percentage of Memory Execution Bound due to other block cases, such as pipeline conflicts, fences, etc",
+ "MetricExpr": "tma_info_mem_exec_bound_loadhead_with_otherpipelineblks",
+ "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_otherpipelineblks"
+ },
+ {
+ "BriefDescription": "Percentage of Memory Execution Bound due to a pagewalk",
+ "MetricExpr": "tma_info_mem_exec_bound_loadhead_with_pagewalk",
+ "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_pagewalk"
+ },
+ {
+ "BriefDescription": "Percentage of Memory Execution Bound due to a second level TLB miss",
+ "MetricExpr": "tma_info_mem_exec_bound_loadhead_with_stlbhit",
+ "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_stlbhit"
+ },
+ {
+ "BriefDescription": "Percentage of Memory Execution Bound due to a store forward address match",
+ "MetricExpr": "tma_info_mem_exec_bound_loadhead_with_storefwding",
+ "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_storefwding"
+ },
+ {
+ "BriefDescription": "Percentage of Memory Execution Bound due to a first level data cache miss",
+ "MetricExpr": "100 * LD_HEAD.L1_MISS_AT_RET / LD_HEAD.ANY_AT_RET",
+ "MetricName": "tma_info_mem_exec_bound_loadhead_with_l1miss",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of Memory Execution Bound due to other block cases, such as pipeline conflicts, fences, etc",
+ "MetricExpr": "100 * LD_HEAD.OTHER_AT_RET / LD_HEAD.ANY_AT_RET",
+ "MetricName": "tma_info_mem_exec_bound_loadhead_with_otherpipelineblks",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of Memory Execution Bound due to a pagewalk",
+ "MetricExpr": "100 * LD_HEAD.PGWALK_AT_RET / LD_HEAD.ANY_AT_RET",
+ "MetricName": "tma_info_mem_exec_bound_loadhead_with_pagewalk",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of Memory Execution Bound due to a second level TLB miss",
+ "MetricExpr": "100 * LD_HEAD.DTLB_MISS_AT_RET / LD_HEAD.ANY_AT_RET",
+ "MetricName": "tma_info_mem_exec_bound_loadhead_with_stlbhit",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of Memory Execution Bound due to a store forward address match",
+ "MetricExpr": "100 * LD_HEAD.ST_ADDR_AT_RET / LD_HEAD.ANY_AT_RET",
+ "MetricName": "tma_info_mem_exec_bound_loadhead_with_storefwding",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Instructions per Load",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
+ "MetricName": "tma_info_mem_mix_ipload"
+ },
+ {
+ "BriefDescription": "Instructions per Store",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
+ "MetricName": "tma_info_mem_mix_ipstore"
+ },
+ {
+ "BriefDescription": "Percentage of total non-speculative loads that perform one or more locks",
+ "MetricExpr": "100 * MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_LOADS",
+ "MetricName": "tma_info_mem_mix_load_locks_ratio"
+ },
+ {
+ "BriefDescription": "Percentage of total non-speculative loads that are splits",
+ "MetricExpr": "100 * MEM_UOPS_RETIRED.SPLIT_LOADS / MEM_UOPS_RETIRED.ALL_LOADS",
+ "MetricName": "tma_info_mem_mix_load_splits_ratio"
+ },
+ {
+ "BriefDescription": "Ratio of mem load uops to all uops",
+ "MetricExpr": "1e3 * MEM_UOPS_RETIRED.ALL_LOADS / TOPDOWN_RETIRING.ALL_P",
+ "MetricName": "tma_info_mem_mix_memload_ratio"
+ },
+ {
+ "BriefDescription": "Percentage of time that the core is stalled due to a TPAUSE or UMWAIT instruction",
+ "MetricExpr": "tma_info_serialization_tpause_cycles",
+ "MetricName": "tma_info_serialization _%_tpause_cycles"
+ },
+ {
+ "BriefDescription": "Percentage of time that the core is stalled due to a TPAUSE or UMWAIT instruction",
+ "MetricExpr": "100 * SERIALIZATION.C01_MS_SCB / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricName": "tma_info_serialization_tpause_cycles",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricName": "tma_info_system_cpu_utilization"
+ },
+ {
+ "BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricExpr": "FP_FLOPS_RETIRED.ALL / (duration_time * 1e9)",
+ "MetricGroup": "Flops",
+ "MetricName": "tma_info_system_gflops",
+ "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width"
+ },
+ {
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
+ "MetricExpr": "cpu@CPU_CLK_UNHALTED.CORE_P@k / CPU_CLK_UNHALTED.CORE",
+ "MetricGroup": "Summary",
+ "MetricName": "tma_info_system_kernel_utilization"
+ },
+ {
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "CPU_CLK_UNHALTED.CORE / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": "Power",
+ "MetricName": "tma_info_system_turbo_utilization"
+ },
+ {
+ "BriefDescription": "Percentage of all uops which are FPDiv uops",
+ "MetricExpr": "100 * UOPS_RETIRED.FPDIV / TOPDOWN_RETIRING.ALL_P",
+ "MetricName": "tma_info_uop_mix_fpdiv_uop_ratio"
+ },
+ {
+ "BriefDescription": "Percentage of all uops which are IDiv uops",
+ "MetricExpr": "100 * UOPS_RETIRED.IDIV / TOPDOWN_RETIRING.ALL_P",
+ "MetricName": "tma_info_uop_mix_idiv_uop_ratio"
+ },
+ {
+ "BriefDescription": "Percentage of all uops which are microcode ops",
+ "MetricExpr": "100 * UOPS_RETIRED.MS / TOPDOWN_RETIRING.ALL_P",
+ "MetricName": "tma_info_uop_mix_microcode_uop_ratio"
+ },
+ {
+ "BriefDescription": "Percentage of all uops which are x87 uops",
+ "MetricExpr": "100 * UOPS_RETIRED.X87 / TOPDOWN_RETIRING.ALL_P",
+ "MetricName": "tma_info_uop_mix_x87_uop_ratio"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to Instruction Table Lookaside Buffer (ITLB) misses.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.ITLB_MISS / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
+ "MetricName": "tma_itlb_misses",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a machine clear (nuke) of any kind including memory ordering and memory disambiguation",
+ "MetricExpr": "TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
+ "MetricName": "tma_machine_clears",
+ "MetricThreshold": "tma_machine_clears > 0.05 & tma_bad_speculation > 0.15",
+ "MetricgroupNoGroup": "TopdownL2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to memory reservation stalls in which a scheduler is not able to accept uops",
+ "MetricExpr": "TOPDOWN_BE_BOUND.MEM_SCHEDULER / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricName": "tma_mem_scheduler",
+ "MetricThreshold": "tma_mem_scheduler > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to IEC or FPC RAT stalls, which can be due to FIQ or IEC reservation stalls in which the integer, floating point or SIMD scheduler is not able to accept uops",
+ "MetricExpr": "TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricName": "tma_non_mem_scheduler",
+ "MetricThreshold": "tma_non_mem_scheduler > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to a machine clear that requires the use of microcode (slow nuke)",
+ "MetricExpr": "TOPDOWN_BAD_SPECULATION.NUKE / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group",
+ "MetricName": "tma_nuke",
+ "MetricThreshold": "tma_nuke > 0.05 & (tma_machine_clears > 0.05 & tma_bad_speculation > 0.15)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to other common frontend stalls not categorized.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.OTHER / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
+ "MetricName": "tma_other_fb",
+ "MetricThreshold": "tma_other_fb > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to wrong predecodes.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.PREDECODE / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
+ "MetricName": "tma_predecode",
+ "MetricThreshold": "tma_predecode > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to the physical register file unable to accept an entry (marble stalls)",
+ "MetricExpr": "TOPDOWN_BE_BOUND.REGISTER / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricName": "tma_register",
+ "MetricThreshold": "tma_register > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to the reorder buffer being full (ROB stalls)",
+ "MetricExpr": "TOPDOWN_BE_BOUND.REORDER_BUFFER / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricName": "tma_reorder_buffer",
+ "MetricThreshold": "tma_reorder_buffer > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to a resource limitation",
+ "MetricExpr": "tma_backend_bound - tma_core_bound",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_resource_bound",
+ "MetricThreshold": "tma_resource_bound > 0.2 & tma_backend_bound > 0.1",
+ "MetricgroupNoGroup": "TopdownL2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that result in retirement slots",
+ "MetricExpr": "TOPDOWN_RETIRING.ALL_P / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL1;tma_L1_group",
+ "MetricName": "tma_retiring",
+ "MetricThreshold": "tma_retiring > 0.75",
+ "MetricgroupNoGroup": "TopdownL1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to scoreboards from the instruction queue (IQ), jump execution unit (JEU), or microcode sequencer (MS)",
+ "MetricExpr": "TOPDOWN_BE_BOUND.SERIALIZATION / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricName": "tma_serialization",
+ "MetricThreshold": "tma_serialization > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Uncore operating frequency in GHz",
+ "MetricExpr": "UNC_CHA_CLOCKTICKS / (source_count(UNC_CHA_CLOCKTICKS) * #num_packages) / 1e9 / duration_time",
+ "MetricName": "uncore_frequency",
+ "ScaleUnit": "1GHz"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/grandridge/memory.json b/tools/perf/pmu-events/arch/x86/grandridge/memory.json
index e0ce2decc805..22d23077618e 100644
--- a/tools/perf/pmu-events/arch/x86/grandridge/memory.json
+++ b/tools/perf/pmu-events/arch/x86/grandridge/memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to any number of reasons, including an L1 miss, WCB full, pagewalk, store address block or store data block, on a load that retires.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x05",
"EventName": "LD_HEAD.ANY_AT_RET",
"SampleAfterValue": "1000003",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to a core bound stall including a store address match, a DTLB miss or a page walk that detains the load from retiring.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x05",
"EventName": "LD_HEAD.L1_BOUND_AT_RET",
"SampleAfterValue": "1000003",
@@ -15,6 +17,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a DL1 miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x05",
"EventName": "LD_HEAD.L1_MISS_AT_RET",
"SampleAfterValue": "1000003",
@@ -22,6 +25,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to other block cases.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x05",
"EventName": "LD_HEAD.OTHER_AT_RET",
"PublicDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to other block cases such as pipeline conflicts, fences, etc.",
@@ -30,6 +34,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a pagewalk.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x05",
"EventName": "LD_HEAD.PGWALK_AT_RET",
"SampleAfterValue": "1000003",
@@ -37,6 +42,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a store address match.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x05",
"EventName": "LD_HEAD.ST_ADDR_AT_RET",
"SampleAfterValue": "1000003",
@@ -44,6 +50,7 @@
},
{
"BriefDescription": "Counts the number of machine clears due to memory ordering caused by a snoop from an external agent. Does not count internally generated machine clears such as those due to memory disambiguation.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
"SampleAfterValue": "20003",
@@ -51,22 +58,23 @@
},
{
"BriefDescription": "Counts misaligned loads that are 4K page splits.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x13",
"EventName": "MISALIGN_MEM_REF.LOAD_PAGE_SPLIT",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x2"
},
{
"BriefDescription": "Counts misaligned stores that are 4K page splits.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x13",
"EventName": "MISALIGN_MEM_REF.STORE_PAGE_SPLIT",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x4"
},
{
"BriefDescription": "Counts demand data reads that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -76,6 +84,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
diff --git a/tools/perf/pmu-events/arch/x86/grandridge/metricgroups.json b/tools/perf/pmu-events/arch/x86/grandridge/metricgroups.json
new file mode 100644
index 000000000000..40984c23a6c9
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/grandridge/metricgroups.json
@@ -0,0 +1,23 @@
+{
+ "Flops": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Ifetch": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Load_Store_Miss": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Mem_Exec": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Power": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Summary": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "TopdownL1": "Metrics for top-down breakdown at level 1",
+ "TopdownL2": "Metrics for top-down breakdown at level 2",
+ "TopdownL3": "Metrics for top-down breakdown at level 3",
+ "load_store_bound": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "tma_L1_group": "Metrics for top-down breakdown at level 1",
+ "tma_L2_group": "Metrics for top-down breakdown at level 2",
+ "tma_L3_group": "Metrics for top-down breakdown at level 3",
+ "tma_backend_bound_group": "Metrics contributing to tma_backend_bound category",
+ "tma_bad_speculation_group": "Metrics contributing to tma_bad_speculation category",
+ "tma_core_bound_group": "Metrics contributing to tma_core_bound category",
+ "tma_frontend_bound_group": "Metrics contributing to tma_frontend_bound category",
+ "tma_ifetch_bandwidth_group": "Metrics contributing to tma_ifetch_bandwidth category",
+ "tma_ifetch_latency_group": "Metrics contributing to tma_ifetch_latency category",
+ "tma_machine_clears_group": "Metrics contributing to tma_machine_clears category",
+ "tma_resource_bound_group": "Metrics contributing to tma_resource_bound category"
+}
diff --git a/tools/perf/pmu-events/arch/x86/grandridge/other.json b/tools/perf/pmu-events/arch/x86/grandridge/other.json
index 70a9da7e97df..28f9a4c3ea84 100644
--- a/tools/perf/pmu-events/arch/x86/grandridge/other.json
+++ b/tools/perf/pmu-events/arch/x86/grandridge/other.json
@@ -1,15 +1,16 @@
[
{
"BriefDescription": "This event is deprecated. [This event is alias to MISC_RETIRED.LBR_INSERTS]",
+ "Counter": "0,1,2,3,4,5,6,7",
"Deprecated": "1",
"EventCode": "0xe4",
"EventName": "LBR_INSERTS.ANY",
- "PEBS": "1",
"SampleAfterValue": "1000003",
"UMask": "0x1"
},
{
"BriefDescription": "Counts demand data reads that have any type of response.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -19,6 +20,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -27,7 +29,18 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts streaming stores that have any type of response.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts the number of issue slots in a UMWAIT or TPAUSE instruction where no uop issues due to the instruction putting the CPU into the C0.1 activity state.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x75",
"EventName": "SERIALIZATION.C01_MS_SCB",
"SampleAfterValue": "200003",
diff --git a/tools/perf/pmu-events/arch/x86/grandridge/pipeline.json b/tools/perf/pmu-events/arch/x86/grandridge/pipeline.json
index 90292dc03d33..b67c0c89054d 100644
--- a/tools/perf/pmu-events/arch/x86/grandridge/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/grandridge/pipeline.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of cycles when any of the dividers are active.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xcd",
"EventName": "ARITH.DIV_ACTIVE",
@@ -9,153 +10,157 @@
},
{
"BriefDescription": "Counts the total number of branch instructions retired for all branch types.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "PEBS": "1",
"PublicDescription": "Counts the total number of instructions in which the instruction pointer (IP) of the processor is resteered due to a branch instruction and the branch instruction successfully retires. All branch type instructions are accounted for.",
"SampleAfterValue": "200003"
},
{
"BriefDescription": "Counts the number of retired JCC (Jump on Conditional Code) branch instructions retired, includes both taken and not taken branches.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x7e"
},
{
"BriefDescription": "Counts the number of taken JCC (Jump on Conditional Code) branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_TAKEN",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0xfe"
},
{
"BriefDescription": "Counts the number of far branch instructions retired, includes far jump, far call and return, and interrupt call and return.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0xbf"
},
{
"BriefDescription": "Counts the number of near indirect JMP and near indirect CALL branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.INDIRECT",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0xeb"
},
{
"BriefDescription": "Counts the number of near indirect CALL branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.INDIRECT_CALL",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0xfb"
},
{
"BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.INDIRECT_CALL",
+ "Counter": "0,1,2,3,4,5,6,7",
"Deprecated": "1",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.IND_CALL",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0xfb"
},
{
"BriefDescription": "Counts the number of near CALL branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0xf9"
},
{
"BriefDescription": "Counts the number of near RET branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0xf7"
},
{
"BriefDescription": "Counts the total number of mispredicted branch instructions retired for all branch types.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "PEBS": "1",
"PublicDescription": "Counts the total number of mispredicted branch instructions retired. All branch type instructions are accounted for. Prediction of the branch target address enables the processor to begin executing instructions before the non-speculative execution path is known. The branch prediction unit (BPU) predicts the target address based on the instruction pointer (IP) of the branch and on the execution path through which execution reached this IP. A branch misprediction occurs when the prediction is wrong, and results in discarding all instructions executed in the speculative path and re-fetching from the correct path.",
"SampleAfterValue": "200003"
},
{
"BriefDescription": "Counts the number of mispredicted JCC (Jump on Conditional Code) branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x7e"
},
{
"BriefDescription": "Counts the number of mispredicted taken JCC (Jump on Conditional Code) branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_TAKEN",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0xfe"
},
{
"BriefDescription": "Counts the number of mispredicted near indirect JMP and near indirect CALL branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0xeb"
},
{
"BriefDescription": "Counts the number of mispredicted near indirect CALL branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT_CALL",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0xfb"
},
{
"BriefDescription": "Counts the number of mispredicted near taken branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x80"
},
{
"BriefDescription": "Counts the number of mispredicted near RET branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.RETURN",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0xf7"
},
{
"BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.CORE",
"SampleAfterValue": "2000003",
"UMask": "0x2"
},
{
"BriefDescription": "Counts the number of unhalted core clock cycles [This event is alias to CPU_CLK_UNHALTED.THREAD_P]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.CORE_P",
"SampleAfterValue": "2000003"
},
{
"BriefDescription": "Fixed Counter: Counts the number of unhalted reference clock cycles",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"SampleAfterValue": "2000003",
"UMask": "0x3"
},
{
"BriefDescription": "Counts the number of unhalted reference clock cycles at TSC frequency.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.REF_TSC_P",
"PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is not affected by core frequency changes and increments at a fixed frequency that is also used for the Time Stamp Counter (TSC). This event uses a programmable general purpose performance counter.",
@@ -164,18 +169,21 @@
},
{
"BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"SampleAfterValue": "2000003",
"UMask": "0x2"
},
{
"BriefDescription": "Counts the number of unhalted core clock cycles [This event is alias to CPU_CLK_UNHALTED.CORE_P]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
"SampleAfterValue": "2000003"
},
{
"BriefDescription": "Fixed Counter: Counts the number of instructions retired",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PEBS": "1",
"SampleAfterValue": "2000003",
@@ -183,37 +191,38 @@
},
{
"BriefDescription": "Counts the number of instructions retired",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.ANY_P",
- "PEBS": "1",
"SampleAfterValue": "2000003"
},
{
"BriefDescription": "Counts the number of retired loads that are blocked because it initially appears to be store forward blocked, but subsequently is shown not to be blocked based on 4K alias check.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.ADDRESS_ALIAS",
- "PEBS": "1",
"SampleAfterValue": "1000003",
"UMask": "0x4"
},
{
"BriefDescription": "Counts the number of retired loads that are blocked because its address exactly matches an older store whose data is not ready.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.DATA_UNKNOWN",
- "PEBS": "1",
"SampleAfterValue": "1000003",
"UMask": "0x1"
},
{
"BriefDescription": "Counts the number of retired loads that are blocked because its address partially overlapped with an older store.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.STORE_FORWARD",
- "PEBS": "1",
"SampleAfterValue": "1000003",
"UMask": "0x2"
},
{
"BriefDescription": "Counts the number of machine clears due to memory ordering in which an internal load passes an older store within the same CPU.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.DISAMBIGUATION",
"SampleAfterValue": "20003",
@@ -221,6 +230,7 @@
},
{
"BriefDescription": "Counts the number of machine clears due to a page fault. Counts both I-Side and D-Side (Loads/Stores) page faults. A page fault occurs when either the page is not present, or an access violation occurs.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.PAGE_FAULT",
"SampleAfterValue": "20003",
@@ -228,6 +238,7 @@
},
{
"BriefDescription": "Counts the number of machine clears that flush the pipeline and restart the machine with the use of microcode due to SMC, MEMORY_ORDERING, FP_ASSISTS, PAGE_FAULT, DISAMBIGUATION, and FPC_VIRTUAL_TRAP.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.SLOW",
"SampleAfterValue": "20003",
@@ -235,6 +246,7 @@
},
{
"BriefDescription": "Counts the number of machine clears due to program modifying data (self modifying code) within 1K of a recently fetched code page.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.SMC",
"SampleAfterValue": "20003",
@@ -242,14 +254,15 @@
},
{
"BriefDescription": "Counts the number of Last Branch Record (LBR) entries. Requires LBRs to be enabled and configured in IA32_LBR_CTL. [This event is alias to LBR_INSERTS.ANY]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe4",
"EventName": "MISC_RETIRED.LBR_INSERTS",
- "PEBS": "1",
"SampleAfterValue": "1000003",
"UMask": "0x1"
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. [This event is alias to TOPDOWN_BAD_SPECULATION.ALL_P]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.ALL",
"PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window, including relevant microcode flows, and while uops are not yet available in the instruction queue (IQ) or until an FE_BOUND event occurs besides OTHER and CISC. Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear. [This event is alias to TOPDOWN_BAD_SPECULATION.ALL_P]",
@@ -257,6 +270,7 @@
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. [This event is alias to TOPDOWN_BAD_SPECULATION.ALL]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.ALL_P",
"PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window, including relevant microcode flows, and while uops are not yet available in the instruction queue (IQ) or until an FE_BOUND event occurs besides OTHER and CISC. Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear. [This event is alias to TOPDOWN_BAD_SPECULATION.ALL]",
@@ -264,6 +278,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to Fast Nukes such as Memory Ordering Machine clears and MRN nukes",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.FASTNUKE",
"SampleAfterValue": "1000003",
@@ -271,6 +286,7 @@
},
{
"BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a machine clear (nuke) of any kind including memory ordering and memory disambiguation.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS",
"SampleAfterValue": "1000003",
@@ -278,6 +294,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to Branch Mispredict",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.MISPREDICT",
"SampleAfterValue": "1000003",
@@ -285,6 +302,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to a machine clear (nuke).",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.NUKE",
"SampleAfterValue": "1000003",
@@ -292,12 +310,14 @@
},
{
"BriefDescription": "Counts the number of retirement slots not consumed due to backend stalls [This event is alias to TOPDOWN_BE_BOUND.ALL_P]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.ALL",
"SampleAfterValue": "1000003"
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to due to certain allocation restrictions",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS",
"SampleAfterValue": "1000003",
@@ -305,12 +325,14 @@
},
{
"BriefDescription": "Counts the number of retirement slots not consumed due to backend stalls [This event is alias to TOPDOWN_BE_BOUND.ALL]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.ALL_P",
"SampleAfterValue": "1000003"
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to memory reservation stall (scheduler not being able to accept another uop). This could be caused by RSV full or load/store buffer block.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.MEM_SCHEDULER",
"SampleAfterValue": "1000003",
@@ -318,6 +340,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to IEC and FPC RAT stalls - which can be due to the FIQ and IEC reservation station stall (integer, FP and SIMD scheduler not being able to accept another uop. )",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER",
"SampleAfterValue": "1000003",
@@ -325,6 +348,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to mrbl stall. A 'marble' refers to a physical register file entry, also known as the physical destination (PDST).",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.REGISTER",
"SampleAfterValue": "1000003",
@@ -332,6 +356,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to ROB full",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.REORDER_BUFFER",
"SampleAfterValue": "1000003",
@@ -339,6 +364,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to iq/jeu scoreboards or ms scb",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.SERIALIZATION",
"SampleAfterValue": "1000003",
@@ -346,18 +372,21 @@
},
{
"BriefDescription": "Counts the number of retirement slots not consumed due to front end stalls [This event is alias to TOPDOWN_FE_BOUND.ALL_P]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.ALL",
"SampleAfterValue": "1000003"
},
{
"BriefDescription": "Counts the number of retirement slots not consumed due to front end stalls [This event is alias to TOPDOWN_FE_BOUND.ALL]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.ALL_P",
"SampleAfterValue": "1000003"
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BAClear",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.BRANCH_DETECT",
"SampleAfterValue": "1000003",
@@ -365,6 +394,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BTClear",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.BRANCH_RESTEER",
"SampleAfterValue": "1000003",
@@ -372,6 +402,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to ms",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.CISC",
"SampleAfterValue": "1000003",
@@ -379,6 +410,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to decode stall",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.DECODE",
"SampleAfterValue": "1000003",
@@ -386,6 +418,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.FRONTEND_BANDWIDTH",
"SampleAfterValue": "1000003",
@@ -393,6 +426,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to latency related stalls including BACLEARs, BTCLEARs, ITLB misses, and ICache misses.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.FRONTEND_LATENCY",
"SampleAfterValue": "1000003",
@@ -400,6 +434,7 @@
},
{
"BriefDescription": "This event is deprecated. [This event is alias to TOPDOWN_FE_BOUND.ITLB_MISS]",
+ "Counter": "0,1,2,3,4,5,6,7",
"Deprecated": "1",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.ITLB",
@@ -408,6 +443,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to itlb miss [This event is alias to TOPDOWN_FE_BOUND.ITLB]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.ITLB_MISS",
"SampleAfterValue": "1000003",
@@ -415,6 +451,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend that do not categorize into any other common frontend stall",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.OTHER",
"SampleAfterValue": "1000003",
@@ -422,27 +459,29 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to predecode wrong",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.PREDECODE",
"SampleAfterValue": "1000003",
"UMask": "0x4"
},
{
- "BriefDescription": "Counts the number of consumed retirement slots. Similar to UOPS_RETIRED.ALL [This event is alias to TOPDOWN_RETIRING.ALL_P]",
+ "BriefDescription": "Counts the number of consumed retirement slots. [This event is alias to TOPDOWN_RETIRING.ALL_P]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x72",
"EventName": "TOPDOWN_RETIRING.ALL",
- "PEBS": "1",
"SampleAfterValue": "1000003"
},
{
- "BriefDescription": "Counts the number of consumed retirement slots. Similar to UOPS_RETIRED.ALL [This event is alias to TOPDOWN_RETIRING.ALL]",
+ "BriefDescription": "Counts the number of consumed retirement slots. [This event is alias to TOPDOWN_RETIRING.ALL]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x72",
"EventName": "TOPDOWN_RETIRING.ALL_P",
- "PEBS": "1",
"SampleAfterValue": "1000003"
},
{
"BriefDescription": "Counts the number of uops issued by the front end every cycle.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x0e",
"EventName": "UOPS_ISSUED.ANY",
"PublicDescription": "Counts the number of uops issued by the front end every cycle. When 4-uops are requested and only 2-uops are delivered, the event counts 2. Uops_issued correlates to the number of ROB entries. If uop takes 2 ROB slots it counts as 2 uops_issued.",
@@ -450,32 +489,32 @@
},
{
"BriefDescription": "Counts the total number of uops retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.ALL",
- "PEBS": "1",
"SampleAfterValue": "2000003"
},
{
"BriefDescription": "Counts the number of integer divide uops retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.IDIV",
- "PEBS": "1",
"SampleAfterValue": "2000003",
"UMask": "0x10"
},
{
"BriefDescription": "Counts the number of uops that are from the complex flows issued by the micro-sequencer (MS). This includes uops from flows due to complex instructions, faults, assists, and inserted flows.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.MS",
- "PEBS": "1",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
{
"BriefDescription": "Counts the number of x87 uops retired, includes those in ms flows",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.X87",
- "PEBS": "1",
"SampleAfterValue": "2000003",
"UMask": "0x2"
}
diff --git a/tools/perf/pmu-events/arch/x86/grandridge/uncore-cache.json b/tools/perf/pmu-events/arch/x86/grandridge/uncore-cache.json
index 36614429dd72..1eaf796601b1 100644
--- a/tools/perf/pmu-events/arch/x86/grandridge/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/grandridge/uncore-cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Clockticks for CMS units attached to CHA",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_CHACMS_CLOCKTICKS",
"PerPkg": "1",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Number of CHA clock cycles while the event is enabled",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_CHA_CLOCKTICKS",
"PerPkg": "1",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Distress signal assertion for dynamic prefetch throttle (DPT). Threshold for distress signal assertion reached in TOR or IRQ (immediate cause for triggering).",
+ "Counter": "0,1,2,3",
"EventCode": "0x59",
"EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_ANY",
"PerPkg": "1",
@@ -26,6 +29,7 @@
},
{
"BriefDescription": "Distress signal assertion for dynamic prefetch throttle (DPT). Threshold for distress signal assertion reached in IRQ (immediate cause for triggering).",
+ "Counter": "0,1,2,3",
"EventCode": "0x59",
"EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_IRQ",
"PerPkg": "1",
@@ -34,6 +38,7 @@
},
{
"BriefDescription": "Distress signal assertion for dynamic prefetch throttle (DPT). Threshold for distress signal assertion reached in TOR (immediate cause for triggering).",
+ "Counter": "0,1,2,3",
"EventCode": "0x59",
"EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_TOR",
"PerPkg": "1",
@@ -42,40 +47,50 @@
},
{
"BriefDescription": "Counts when a normal (Non-Isochronous) full line write is issued from the CHA to the any of the memory controller channels.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5b",
"EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "CHA to iMC Full Line Writes Issued : ISOCH Full Line : Counts the total number of full line writes issued from the HA into the memory controller.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5b",
"EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL_PRIORITY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "CHA to iMC Full Line Writes Issued : Partial Non-ISOCH : Counts the total number of full line writes issued from the HA into the memory controller.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5b",
"EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "CHA to iMC Full Line Writes Issued : ISOCH Partial : Counts the total number of full line writes issued from the HA into the memory controller.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5b",
"EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL_PRIORITY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups: CRd Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.CODE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : CRd Requests",
"UMask": "0x1bd0ff",
@@ -83,8 +98,10 @@
},
{
"BriefDescription": "Cache Lookups: Read Requests and Read Prefetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.DATA_RD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
"UMask": "0x1bc1ff",
@@ -92,8 +109,10 @@
},
{
"BriefDescription": "Cache Lookups: Read Requests, Read Prefetches, and Snoops",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Data Reads",
"UMask": "0x1fc1ff",
@@ -101,8 +120,10 @@
},
{
"BriefDescription": "Cache Lookups: Read Requests to Locally Homed Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Demand Data Reads, Core and LLC prefetches",
"UMask": "0x841ff",
@@ -110,8 +131,10 @@
},
{
"BriefDescription": "Cache Lookups: Read Requests, Read Prefetches, and Snoops which miss the Cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Data Read Misses",
"UMask": "0x1fc101",
@@ -119,8 +142,10 @@
},
{
"BriefDescription": "Cache Lookups: All Requests to Locally Homed Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCALLY_HOMED_ADDRESS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Transactions homed locally",
"UMask": "0xbdfff",
@@ -128,8 +153,10 @@
},
{
"BriefDescription": "Cache Lookups: Code Read Requests and Code Read Prefetches to Locally Homed Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_CODE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : CRd Requests",
"UMask": "0x19d0ff",
@@ -137,8 +164,10 @@
},
{
"BriefDescription": "Cache Lookups: Read Requests and Read Prefetches to Locally Homed Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_DATA_RD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
"UMask": "0x19c1ff",
@@ -146,8 +175,10 @@
},
{
"BriefDescription": "Cache Lookups: Code Read Requests to Locally Homed Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_DMND_CODE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : CRd Requests",
"UMask": "0x1850ff",
@@ -155,8 +186,10 @@
},
{
"BriefDescription": "Cache Lookups: Read Requests to Locally Homed Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_DMND_DATA_RD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
"UMask": "0x1841ff",
@@ -164,8 +197,10 @@
},
{
"BriefDescription": "Cache Lookups: RFO Requests to Locally Homed Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_DMND_RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : RFO Requests",
"UMask": "0x1848ff",
@@ -173,8 +208,10 @@
},
{
"BriefDescription": "Cache Lookups: LLC Prefetch Requests to Locally Homed Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_LLC_PF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
"UMask": "0x189dff",
@@ -182,8 +219,10 @@
},
{
"BriefDescription": "Cache Lookups: All Prefetches to Locally Homed Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_PF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
"UMask": "0x199dff",
@@ -191,8 +230,10 @@
},
{
"BriefDescription": "Cache Lookups: Code Prefetches to Locally Homed Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_PF_CODE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : CRd Requests",
"UMask": "0x1910ff",
@@ -200,8 +241,10 @@
},
{
"BriefDescription": "Cache Lookups: Read Prefetches to Locally Homed Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_PF_DATA_RD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
"UMask": "0x1981ff",
@@ -209,8 +252,10 @@
},
{
"BriefDescription": "Cache Lookups: RFO Prefetches to Locally Homed Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_PF_RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : RFO Requests",
"UMask": "0x1908ff",
@@ -218,8 +263,10 @@
},
{
"BriefDescription": "Cache Lookups: RFO Requests and RFO Prefetches to Locally Homed Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : RFO Requests",
"UMask": "0x19c8ff",
@@ -227,8 +274,10 @@
},
{
"BriefDescription": "Cache Lookups: All RFO and RFO Prefetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : All RFOs - Demand and Prefetches",
"UMask": "0x1bc8ff",
@@ -236,8 +285,10 @@
},
{
"BriefDescription": "Cache Lookups: RFO Requests and RFO Prefetches to Locally Homed Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.RFO_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Locally HOMed RFOs - Demand and Prefetches",
"UMask": "0x9c8ff",
@@ -245,8 +296,10 @@
},
{
"BriefDescription": "Cache Lookups: Writes to Locally Homed Memory (includes writebacks from L1/L2)",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.WRITE_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Writes",
"UMask": "0x842ff",
@@ -254,8 +307,10 @@
},
{
"BriefDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : All Lines Victimized",
"UMask": "0xf",
@@ -263,24 +318,30 @@
},
{
"BriefDescription": "Lines Victimized : IA traffic : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.IA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "Lines Victimized : IO traffic : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.IO",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Local - All Lines",
"UMask": "0x200f",
@@ -288,8 +349,10 @@
},
{
"BriefDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_E",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Local - Lines in E State",
"UMask": "0x2002",
@@ -297,8 +360,10 @@
},
{
"BriefDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Local - Lines in F State",
"UMask": "0x2008",
@@ -306,8 +371,10 @@
},
{
"BriefDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_M",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Local - Lines in M State",
"UMask": "0x2001",
@@ -315,8 +382,10 @@
},
{
"BriefDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_S",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Local - Lines in S State",
"UMask": "0x2004",
@@ -324,8 +393,10 @@
},
{
"BriefDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_E",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Lines in E state",
"UMask": "0x2",
@@ -333,8 +404,10 @@
},
{
"BriefDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_M",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Lines in M state",
"UMask": "0x1",
@@ -342,8 +415,10 @@
},
{
"BriefDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_S",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Lines in S State",
"UMask": "0x4",
@@ -351,6 +426,7 @@
},
{
"BriefDescription": "Counts when a RFO (the Read for Ownership issued before a write) request hit a cacheline in the S (Shared) state.",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_CHA_MISC.RFO_HIT_S",
"PerPkg": "1",
@@ -360,30 +436,37 @@
},
{
"BriefDescription": "OSB Snoop Broadcast : Local InvItoE : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_CHA_OSB.LOCAL_INVITOE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "OSB Snoop Broadcast : Local Rd : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_CHA_OSB.LOCAL_READ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "OSB Snoop Broadcast : Off : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_CHA_OSB.OFF_PWRHEURISTIC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "OSB Snoop Broadcast : RFO HitS Snoop Broadcast : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_CHA_OSB.RFO_HITS_SNP_BCAST",
"PerPkg": "1",
@@ -392,6 +475,7 @@
},
{
"BriefDescription": "Counts the total number of requests coming from a unit on this socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.INVITOE",
"PerPkg": "1",
@@ -401,6 +485,7 @@
},
{
"BriefDescription": "Counts the total number of requests coming from a unit on this socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.INVITOE_LOCAL",
"PerPkg": "1",
@@ -409,6 +494,7 @@
},
{
"BriefDescription": "Counts read requests made into this CHA. Reads include all read opcodes (including RFO: the Read for Ownership issued before a write) .",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.READS",
"PerPkg": "1",
@@ -418,6 +504,7 @@
},
{
"BriefDescription": "Counts read requests coming from a unit on this socket made into this CHA. Reads include all read opcodes (including RFO: the Read for Ownership issued before a write).",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.READS_LOCAL",
"PerPkg": "1",
@@ -426,6 +513,7 @@
},
{
"BriefDescription": "Counts write requests made into the CHA, including streaming, evictions, HitM (Reads from another core to a Modified cacheline), etc.",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.WRITES",
"PerPkg": "1",
@@ -435,6 +523,7 @@
},
{
"BriefDescription": "Counts write requests coming from a unit on this socket made into this CHA, including streaming, evictions, HitM (Reads from another core to a Modified cacheline), etc.",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.WRITES_LOCAL",
"PerPkg": "1",
@@ -443,8 +532,10 @@
},
{
"BriefDescription": "All TOR Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : All",
"UMask": "0xc001ffff",
@@ -452,8 +543,10 @@
},
{
"BriefDescription": "All locally initiated requests from IA Cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : All requests from iA Cores",
"UMask": "0xc001ff01",
@@ -461,6 +554,7 @@
},
{
"BriefDescription": "CLFlush events that are initiated from the Core",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_CLFLUSH",
"PerPkg": "1",
@@ -470,6 +564,7 @@
},
{
"BriefDescription": "CLFlushOpt events that are initiated from the Core",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_CLFLUSHOPT",
"PerPkg": "1",
@@ -479,6 +574,7 @@
},
{
"BriefDescription": "Code read from local IA that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_CRD",
"PerPkg": "1",
@@ -488,6 +584,7 @@
},
{
"BriefDescription": "Code read prefetch from local IA that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_CRD_PREF",
"PerPkg": "1",
@@ -497,6 +594,7 @@
},
{
"BriefDescription": "Data read opt from local IA that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_OPT",
"PerPkg": "1",
@@ -506,6 +604,7 @@
},
{
"BriefDescription": "Data read opt prefetch from local IA that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_OPT_PREF",
"PerPkg": "1",
@@ -515,8 +614,10 @@
},
{
"BriefDescription": "All locally initiated requests from IA Cores which hit the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : All requests from iA Cores that Hit the LLC",
"UMask": "0xc001fd01",
@@ -524,6 +625,7 @@
},
{
"BriefDescription": "Code read from local IA that hit the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD",
"PerPkg": "1",
@@ -533,6 +635,7 @@
},
{
"BriefDescription": "Code read prefetch from local IA that hit the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD_PREF",
"PerPkg": "1",
@@ -542,6 +645,7 @@
},
{
"BriefDescription": "Data read opt from local IA that hit the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_OPT",
"PerPkg": "1",
@@ -551,6 +655,7 @@
},
{
"BriefDescription": "Data read opt prefetch from local IA that hit the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_OPT_PREF",
"PerPkg": "1",
@@ -560,6 +665,7 @@
},
{
"BriefDescription": "ItoM requests from local IA cores that hit the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_ITOM",
"PerPkg": "1",
@@ -569,6 +675,7 @@
},
{
"BriefDescription": "Last level cache prefetch code read from local IA that hit the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFCODE",
"PerPkg": "1",
@@ -578,6 +685,7 @@
},
{
"BriefDescription": "Last level cache prefetch data read from local IA that hit the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFDATA",
"PerPkg": "1",
@@ -587,6 +695,7 @@
},
{
"BriefDescription": "Last level cache prefetch read for ownership from local IA that hit the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFRFO",
"PerPkg": "1",
@@ -596,6 +705,7 @@
},
{
"BriefDescription": "Read for ownership from local IA that hit the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO",
"PerPkg": "1",
@@ -605,6 +715,7 @@
},
{
"BriefDescription": "Read for ownership prefetch from local IA that hit the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO_PREF",
"PerPkg": "1",
@@ -614,6 +725,7 @@
},
{
"BriefDescription": "ItoM events that are initiated from the Core",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_ITOM",
"PerPkg": "1",
@@ -623,6 +735,7 @@
},
{
"BriefDescription": "ItoMCacheNear requests from local IA cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_ITOMCACHENEAR",
"PerPkg": "1",
@@ -632,6 +745,7 @@
},
{
"BriefDescription": "Last level cache prefetch code read from local IA.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFCODE",
"PerPkg": "1",
@@ -641,6 +755,7 @@
},
{
"BriefDescription": "Last level cache prefetch data read from local IA.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFDATA",
"PerPkg": "1",
@@ -650,6 +765,7 @@
},
{
"BriefDescription": "Last level cache prefetch read for ownership from local IA that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFRFO",
"PerPkg": "1",
@@ -659,6 +775,7 @@
},
{
"BriefDescription": "All locally initiated requests from IA Cores which miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
"PerPkg": "1",
@@ -668,6 +785,7 @@
},
{
"BriefDescription": "Code read from local IA that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD",
"PerPkg": "1",
@@ -677,6 +795,7 @@
},
{
"BriefDescription": "CRDs from local IA cores to locally homed memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_LOCAL",
"PerPkg": "1",
@@ -686,6 +805,7 @@
},
{
"BriefDescription": "Code read prefetch from local IA that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF",
"PerPkg": "1",
@@ -695,6 +815,7 @@
},
{
"BriefDescription": "CRD Prefetches from local IA cores to locally homed memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF_LOCAL",
"PerPkg": "1",
@@ -704,6 +825,7 @@
},
{
"BriefDescription": "Data read opt from local IA that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT",
"PerPkg": "1",
@@ -713,6 +835,7 @@
},
{
"BriefDescription": "Inserts into the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRd_Opt, and which target local memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_LOCAL",
"PerPkg": "1",
@@ -722,6 +845,7 @@
},
{
"BriefDescription": "Data read opt prefetch from local IA that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_PREF",
"PerPkg": "1",
@@ -731,6 +855,7 @@
},
{
"BriefDescription": "Inserts into the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRD_PREF_OPT, and target local memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_PREF_LOCAL",
"PerPkg": "1",
@@ -740,6 +865,7 @@
},
{
"BriefDescription": "ItoM requests from local IA cores that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_ITOM",
"PerPkg": "1",
@@ -749,6 +875,7 @@
},
{
"BriefDescription": "Last level cache prefetch code read from local IA that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFCODE",
"PerPkg": "1",
@@ -758,6 +885,7 @@
},
{
"BriefDescription": "Last level cache prefetch data read from local IA that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFDATA",
"PerPkg": "1",
@@ -767,6 +895,7 @@
},
{
"BriefDescription": "Last level cache prefetch read for ownership from local IA that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFRFO",
"PerPkg": "1",
@@ -776,6 +905,7 @@
},
{
"BriefDescription": "WCILF requests from local IA cores to locally homed DDR addresses that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCILF_DDR",
"PerPkg": "1",
@@ -785,6 +915,7 @@
},
{
"BriefDescription": "WCIL requests from local IA cores to locally homed DDR addresses that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCIL_DDR",
"PerPkg": "1",
@@ -794,6 +925,7 @@
},
{
"BriefDescription": "Read for ownership from local IA that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO",
"PerPkg": "1",
@@ -803,6 +935,7 @@
},
{
"BriefDescription": "Read for ownership from local IA that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_LOCAL",
"PerPkg": "1",
@@ -812,6 +945,7 @@
},
{
"BriefDescription": "Read for ownership prefetch from local IA that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF",
"PerPkg": "1",
@@ -821,6 +955,7 @@
},
{
"BriefDescription": "Read for ownership prefetch from local IA that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_LOCAL",
"PerPkg": "1",
@@ -830,6 +965,7 @@
},
{
"BriefDescription": "UCRDF requests from local IA cores that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_UCRDF",
"PerPkg": "1",
@@ -839,6 +975,7 @@
},
{
"BriefDescription": "WCIL requests from a local IA core that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL",
"PerPkg": "1",
@@ -848,6 +985,7 @@
},
{
"BriefDescription": "WCILF requests from local IA core that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF",
"PerPkg": "1",
@@ -857,6 +995,7 @@
},
{
"BriefDescription": "WCILF requests from local IA cores to DDR homed addresses which miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF_DDR",
"PerPkg": "1",
@@ -866,6 +1005,7 @@
},
{
"BriefDescription": "WCIL requests from local IA cores to DDR homed addresses which miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL_DDR",
"PerPkg": "1",
@@ -875,6 +1015,7 @@
},
{
"BriefDescription": "WIL requests from local IA cores that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WIL",
"PerPkg": "1",
@@ -884,6 +1025,7 @@
},
{
"BriefDescription": "Read for ownership from local IA that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_RFO",
"PerPkg": "1",
@@ -893,6 +1035,7 @@
},
{
"BriefDescription": "Read for ownership prefetch from local IA that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_RFO_PREF",
"PerPkg": "1",
@@ -902,6 +1045,7 @@
},
{
"BriefDescription": "SpecItoM events that are initiated from the Core",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_SPECITOM",
"PerPkg": "1",
@@ -911,6 +1055,7 @@
},
{
"BriefDescription": "WbEFtoEs issued by iA Cores. (Non Modified Write Backs)",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WBEFTOE",
"PerPkg": "1",
@@ -920,6 +1065,7 @@
},
{
"BriefDescription": "WbEFtoIs issued by iA Cores . (Non Modified Write Backs)",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WBEFTOI",
"PerPkg": "1",
@@ -929,6 +1075,7 @@
},
{
"BriefDescription": "WbMtoEs issued by iA Cores . (Modified Write Backs)",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WBMTOE",
"PerPkg": "1",
@@ -938,6 +1085,7 @@
},
{
"BriefDescription": "WbMtoI requests from local IA cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WBMTOI",
"PerPkg": "1",
@@ -947,6 +1095,7 @@
},
{
"BriefDescription": "WbStoIs issued by iA Cores . (Non Modified Write Backs)",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WBSTOI",
"PerPkg": "1",
@@ -956,6 +1105,7 @@
},
{
"BriefDescription": "WCIL requests from a local IA core",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WCIL",
"PerPkg": "1",
@@ -965,6 +1115,7 @@
},
{
"BriefDescription": "WCILF requests from local IA core",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WCILF",
"PerPkg": "1",
@@ -974,8 +1125,10 @@
},
{
"BriefDescription": "All TOR inserts from local IO devices",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : All requests from IO Devices",
"UMask": "0xc001ff04",
@@ -983,6 +1136,7 @@
},
{
"BriefDescription": "CLFlush requests from IO devices",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_CLFLUSH",
"PerPkg": "1",
@@ -992,8 +1146,10 @@
},
{
"BriefDescription": "All TOR inserts from local IO devices which hit the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : All requests from IO Devices that hit the LLC",
"UMask": "0xc001fd04",
@@ -1001,6 +1157,7 @@
},
{
"BriefDescription": "ItoMs from local IO devices which hit the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_ITOM",
"PerPkg": "1",
@@ -1010,6 +1167,7 @@
},
{
"BriefDescription": "ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_ITOMCACHENEAR",
"PerPkg": "1",
@@ -1019,6 +1177,7 @@
},
{
"BriefDescription": "PCIRDCURs issued by IO devices which hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_PCIRDCUR",
"PerPkg": "1",
@@ -1028,6 +1187,7 @@
},
{
"BriefDescription": "RFOs from local IO devices which hit the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_RFO",
"PerPkg": "1",
@@ -1037,6 +1197,7 @@
},
{
"BriefDescription": "All TOR ItoM inserts from local IO devices",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM",
"PerPkg": "1",
@@ -1046,6 +1207,7 @@
},
{
"BriefDescription": "ItoMCacheNears, indicating a partial write request, from IO Devices",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR",
"PerPkg": "1",
@@ -1055,8 +1217,10 @@
},
{
"BriefDescription": "All TOR inserts from local IO devices which miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : All requests from IO Devices that missed the LLC",
"UMask": "0xc001fe04",
@@ -1064,6 +1228,7 @@
},
{
"BriefDescription": "All TOR ItoM inserts from local IO devices which miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM",
"PerPkg": "1",
@@ -1073,6 +1238,7 @@
},
{
"BriefDescription": "ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR",
"PerPkg": "1",
@@ -1082,6 +1248,7 @@
},
{
"BriefDescription": "PCIRDCURs issued by IO devices which miss the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_PCIRDCUR",
"PerPkg": "1",
@@ -1091,6 +1258,7 @@
},
{
"BriefDescription": "All TOR RFO inserts from local IO devices which miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_RFO",
"PerPkg": "1",
@@ -1100,6 +1268,7 @@
},
{
"BriefDescription": "PCIRDCURs issued by IO devices",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR",
"PerPkg": "1",
@@ -1109,6 +1278,7 @@
},
{
"BriefDescription": "RFOs from local IO devices",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_RFO",
"PerPkg": "1",
@@ -1118,6 +1288,7 @@
},
{
"BriefDescription": "WBMtoI requests from IO devices",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_WBMTOI",
"PerPkg": "1",
@@ -1127,6 +1298,7 @@
},
{
"BriefDescription": "TOR Inserts for SF or LLC Evictions",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.LLC_OR_SF_EVICTIONS",
"PerPkg": "1",
@@ -1136,8 +1308,10 @@
},
{
"BriefDescription": "All locally initiated requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.LOC_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : All from Local iA and IO",
"UMask": "0xc000ff05",
@@ -1145,8 +1319,10 @@
},
{
"BriefDescription": "All from Local iA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.LOC_IA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : All from Local iA",
"UMask": "0xc000ff01",
@@ -1154,8 +1330,10 @@
},
{
"BriefDescription": "All from Local IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.LOC_IO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : All from Local IO",
"UMask": "0xc000ff04",
@@ -1163,8 +1341,10 @@
},
{
"BriefDescription": "Occupancy for all TOR entries",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All",
"UMask": "0xc001ffff",
@@ -1172,8 +1352,10 @@
},
{
"BriefDescription": "TOR Occupancy for All locally initiated requests from IA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All requests from iA Cores",
"UMask": "0xc001ff01",
@@ -1181,6 +1363,7 @@
},
{
"BriefDescription": "TOR Occupancy for CLFlush events that are initiated from the Core",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CLFLUSH",
"PerPkg": "1",
@@ -1190,6 +1373,7 @@
},
{
"BriefDescription": "TOR Occupancy for CLFlushOpt events that are initiated from the Core",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CLFLUSHOPT",
"PerPkg": "1",
@@ -1199,6 +1383,7 @@
},
{
"BriefDescription": "TOR Occupancy for Code read from local IA that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CRD",
"PerPkg": "1",
@@ -1208,6 +1393,7 @@
},
{
"BriefDescription": "TOR Occupancy for Code read prefetch from local IA that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CRD_PREF",
"PerPkg": "1",
@@ -1217,8 +1403,10 @@
},
{
"BriefDescription": "TOR Occupancy for Data read opt from local IA that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_OPT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Opts issued by iA Cores",
"UMask": "0xc827ff01",
@@ -1226,8 +1414,10 @@
},
{
"BriefDescription": "TOR Occupancy for Data read opt prefetch from local IA that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_OPT_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores",
"UMask": "0xc8a7ff01",
@@ -1235,8 +1425,10 @@
},
{
"BriefDescription": "TOR Occupancy for All locally initiated requests from IA Cores which hit the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All requests from iA Cores that Hit the LLC",
"UMask": "0xc001fd01",
@@ -1244,6 +1436,7 @@
},
{
"BriefDescription": "TOR Occupancy for Code read from local IA that hit the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD",
"PerPkg": "1",
@@ -1253,6 +1446,7 @@
},
{
"BriefDescription": "TOR Occupancy for Code read prefetch from local IA that hit the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD_PREF",
"PerPkg": "1",
@@ -1262,8 +1456,10 @@
},
{
"BriefDescription": "TOR Occupancy for Data read opt from local IA that hit the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_OPT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Opts issued by iA Cores that hit the LLC",
"UMask": "0xc827fd01",
@@ -1271,8 +1467,10 @@
},
{
"BriefDescription": "TOR Occupancy for Data read opt prefetch from local IA that hit the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_OPT_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores that hit the LLC",
"UMask": "0xc8a7fd01",
@@ -1280,6 +1478,7 @@
},
{
"BriefDescription": "TOR Occupancy for ItoM requests from local IA cores that hit the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_ITOM",
"PerPkg": "1",
@@ -1289,6 +1488,7 @@
},
{
"BriefDescription": "TOR Occupancy for Last level cache prefetch code read from local IA that hit the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFCODE",
"PerPkg": "1",
@@ -1298,6 +1498,7 @@
},
{
"BriefDescription": "TOR Occupancy for Last level cache prefetch data read from local IA that hit the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFDATA",
"PerPkg": "1",
@@ -1307,6 +1508,7 @@
},
{
"BriefDescription": "TOR Occupancy for Last level cache prefetch read for ownership from local IA that hit the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFRFO",
"PerPkg": "1",
@@ -1316,6 +1518,7 @@
},
{
"BriefDescription": "TOR Occupancy for Read for ownership from local IA that hit the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO",
"PerPkg": "1",
@@ -1325,6 +1528,7 @@
},
{
"BriefDescription": "TOR Occupancy for Read for ownership prefetch from local IA that hit the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO_PREF",
"PerPkg": "1",
@@ -1334,6 +1538,7 @@
},
{
"BriefDescription": "TOR Occupancy for ItoM events that are initiated from the Core",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_ITOM",
"PerPkg": "1",
@@ -1343,6 +1548,7 @@
},
{
"BriefDescription": "TOR Occupancy for ItoMCacheNear requests from local IA cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_ITOMCACHENEAR",
"PerPkg": "1",
@@ -1352,6 +1558,7 @@
},
{
"BriefDescription": "TOR Occupancy for Last level cache prefetch code read from local IA.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFCODE",
"PerPkg": "1",
@@ -1361,6 +1568,7 @@
},
{
"BriefDescription": "TOR Occupancy for Last level cache prefetch data read from local IA.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFDATA",
"PerPkg": "1",
@@ -1370,6 +1578,7 @@
},
{
"BriefDescription": "TOR Occupancy for Last level cache prefetch read for ownership from local IA that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFRFO",
"PerPkg": "1",
@@ -1379,8 +1588,10 @@
},
{
"BriefDescription": "TOR Occupancy for All locally initiated requests from IA Cores which miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All requests from iA Cores that Missed the LLC",
"UMask": "0xc001fe01",
@@ -1388,6 +1599,7 @@
},
{
"BriefDescription": "TOR Occupancy for Code read from local IA that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD",
"PerPkg": "1",
@@ -1397,6 +1609,7 @@
},
{
"BriefDescription": "TOR Occupancy for CRDs from local IA cores to locally homed memory",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_LOCAL",
"PerPkg": "1",
@@ -1406,6 +1619,7 @@
},
{
"BriefDescription": "TOR Occupancy for Code read prefetch from local IA that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF",
"PerPkg": "1",
@@ -1415,6 +1629,7 @@
},
{
"BriefDescription": "TOR Occupancy for CRD Prefetches from local IA cores to locally homed memory",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF_LOCAL",
"PerPkg": "1",
@@ -1424,8 +1639,10 @@
},
{
"BriefDescription": "TOR Occupancy for Data read opt from local IA that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Opt issued by iA Cores that missed the LLC",
"UMask": "0xc827fe01",
@@ -1433,8 +1650,10 @@
},
{
"BriefDescription": "TOR Occupancy for Data read opt prefetch from local IA that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores that missed the LLC",
"UMask": "0xc8a7fe01",
@@ -1442,6 +1661,7 @@
},
{
"BriefDescription": "TOR Occupancy for ItoM requests from local IA cores that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_ITOM",
"PerPkg": "1",
@@ -1451,6 +1671,7 @@
},
{
"BriefDescription": "TOR Occupancy for Last level cache prefetch code read from local IA that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFCODE",
"PerPkg": "1",
@@ -1460,6 +1681,7 @@
},
{
"BriefDescription": "TOR Occupancy for Last level cache prefetch data read from local IA that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFDATA",
"PerPkg": "1",
@@ -1469,6 +1691,7 @@
},
{
"BriefDescription": "TOR Occupancy for Last level cache prefetch read for ownership from local IA that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFRFO",
"PerPkg": "1",
@@ -1478,6 +1701,7 @@
},
{
"BriefDescription": "TOR Occupancy for WCILF requests from local IA cores to locally homed DDR addresses that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCILF_DDR",
"PerPkg": "1",
@@ -1487,6 +1711,7 @@
},
{
"BriefDescription": "TOR Occupancy for WCIL requests from local IA cores to locally homed DDR addresses that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCIL_DDR",
"PerPkg": "1",
@@ -1496,6 +1721,7 @@
},
{
"BriefDescription": "TOR Occupancy for Read for ownership from local IA that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO",
"PerPkg": "1",
@@ -1505,6 +1731,7 @@
},
{
"BriefDescription": "TOR Occupancy for Read for ownership from local IA that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_LOCAL",
"PerPkg": "1",
@@ -1514,6 +1741,7 @@
},
{
"BriefDescription": "TOR Occupancy for Read for ownership prefetch from local IA that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF",
"PerPkg": "1",
@@ -1523,6 +1751,7 @@
},
{
"BriefDescription": "TOR Occupancy for Read for ownership prefetch from local IA that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_LOCAL",
"PerPkg": "1",
@@ -1532,6 +1761,7 @@
},
{
"BriefDescription": "TOR Occupancy for UCRDF requests from local IA cores that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_UCRDF",
"PerPkg": "1",
@@ -1541,6 +1771,7 @@
},
{
"BriefDescription": "TOR Occupancy for WCIL requests from a local IA core that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL",
"PerPkg": "1",
@@ -1550,6 +1781,7 @@
},
{
"BriefDescription": "TOR Occupancy for WCILF requests from local IA core that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF",
"PerPkg": "1",
@@ -1559,6 +1791,7 @@
},
{
"BriefDescription": "TOR Occupancy for WCILF requests from local IA cores to DDR homed addresses which miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF_DDR",
"PerPkg": "1",
@@ -1568,6 +1801,7 @@
},
{
"BriefDescription": "TOR Occupancy for WCIL requests from local IA cores to DDR homed addresses which miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL_DDR",
"PerPkg": "1",
@@ -1577,6 +1811,7 @@
},
{
"BriefDescription": "TOR Occupancy for WIL requests from local IA cores that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WIL",
"PerPkg": "1",
@@ -1586,6 +1821,7 @@
},
{
"BriefDescription": "TOR Occupancy for Read for ownership from local IA that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_RFO",
"PerPkg": "1",
@@ -1595,6 +1831,7 @@
},
{
"BriefDescription": "TOR Occupancy for Read for ownership prefetch from local IA that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_RFO_PREF",
"PerPkg": "1",
@@ -1604,6 +1841,7 @@
},
{
"BriefDescription": "TOR Occupancy for SpecItoM events that are initiated from the Core",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_SPECITOM",
"PerPkg": "1",
@@ -1613,6 +1851,7 @@
},
{
"BriefDescription": "TOR Occupancy for WbMtoI requests from local IA cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WBMTOI",
"PerPkg": "1",
@@ -1622,6 +1861,7 @@
},
{
"BriefDescription": "TOR Occupancy for WCIL requests from a local IA core",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WCIL",
"PerPkg": "1",
@@ -1631,6 +1871,7 @@
},
{
"BriefDescription": "TOR Occupancy for WCILF requests from local IA core",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WCILF",
"PerPkg": "1",
@@ -1640,8 +1881,10 @@
},
{
"BriefDescription": "TOR Occupancy for All TOR inserts from local IO devices",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All requests from IO Devices",
"UMask": "0xc001ff04",
@@ -1649,6 +1892,7 @@
},
{
"BriefDescription": "TOR Occupancy for CLFlush requests from IO devices",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_CLFLUSH",
"PerPkg": "1",
@@ -1658,8 +1902,10 @@
},
{
"BriefDescription": "TOR Occupancy for All TOR inserts from local IO devices which hit the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All requests from IO Devices that hit the LLC",
"UMask": "0xc001fd04",
@@ -1667,6 +1913,7 @@
},
{
"BriefDescription": "TOR Occupancy for ItoMs from local IO devices which hit the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_ITOM",
"PerPkg": "1",
@@ -1676,6 +1923,7 @@
},
{
"BriefDescription": "TOR Occupancy for ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_ITOMCACHENEAR",
"PerPkg": "1",
@@ -1685,6 +1933,7 @@
},
{
"BriefDescription": "TOR Occupancy for PCIRDCURs issued by IO devices which hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_PCIRDCUR",
"PerPkg": "1",
@@ -1694,6 +1943,7 @@
},
{
"BriefDescription": "TOR Occupancy for RFOs from local IO devices which hit the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_RFO",
"PerPkg": "1",
@@ -1703,6 +1953,7 @@
},
{
"BriefDescription": "TOR Occupancy for All TOR ItoM inserts from local IO devices",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_ITOM",
"PerPkg": "1",
@@ -1712,6 +1963,7 @@
},
{
"BriefDescription": "TOR Occupancy for ItoMCacheNears, indicating a partial write request, from IO Devices",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_ITOMCACHENEAR",
"PerPkg": "1",
@@ -1721,8 +1973,10 @@
},
{
"BriefDescription": "TOR Occupancy for All TOR inserts from local IO devices which miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All requests from IO Devices that missed the LLC",
"UMask": "0xc001fe04",
@@ -1730,6 +1984,7 @@
},
{
"BriefDescription": "TOR Occupancy for All TOR ItoM inserts from local IO devices which miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM",
"PerPkg": "1",
@@ -1739,6 +1994,7 @@
},
{
"BriefDescription": "TOR Occupancy for ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOMCACHENEAR",
"PerPkg": "1",
@@ -1748,6 +2004,7 @@
},
{
"BriefDescription": "TOR Occupancy for PCIRDCURs issued by IO devices which miss the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_PCIRDCUR",
"PerPkg": "1",
@@ -1757,6 +2014,7 @@
},
{
"BriefDescription": "TOR Occupancy for All TOR RFO inserts from local IO devices which miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_RFO",
"PerPkg": "1",
@@ -1766,6 +2024,7 @@
},
{
"BriefDescription": "TOR Occupancy for PCIRDCURs issued by IO devices",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_PCIRDCUR",
"PerPkg": "1",
@@ -1775,6 +2034,7 @@
},
{
"BriefDescription": "TOR Occupancy for RFOs from local IO devices",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_RFO",
"PerPkg": "1",
@@ -1784,6 +2044,7 @@
},
{
"BriefDescription": "TOR Occupancy for WBMtoI requests from IO devices",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_WBMTOI",
"PerPkg": "1",
@@ -1793,8 +2054,10 @@
},
{
"BriefDescription": "TOR Occupancy for All locally initiated requests",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All from Local iA and IO",
"UMask": "0xc000ff05",
@@ -1802,8 +2065,10 @@
},
{
"BriefDescription": "TOR Occupancy for All from Local iA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_IA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All from Local iA",
"UMask": "0xc000ff01",
@@ -1811,8 +2076,10 @@
},
{
"BriefDescription": "TOR Occupancy for All from Local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_IO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All from Local IO",
"UMask": "0xc000ff04",
diff --git a/tools/perf/pmu-events/arch/x86/grandridge/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/grandridge/uncore-interconnect.json
index 9091f8fde51f..6aaca4039107 100644
--- a/tools/perf/pmu-events/arch/x86/grandridge/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/grandridge/uncore-interconnect.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Clockticks of the mesh to memory (B2CMI)",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_B2CMI_CLOCKTICKS",
"PerPkg": "1",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Counts the number of times B2CMI egress did D2C (direct to core)",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_B2CMI_DIRECT2CORE_TAKEN",
"PerPkg": "1",
@@ -16,6 +18,7 @@
},
{
"BriefDescription": "Counts the number of times D2C wasn't honoured even though the incoming request had d2c set for non cisgress txn",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_B2CMI_DIRECT2CORE_TXN_OVERRIDE",
"PerPkg": "1",
@@ -24,6 +27,7 @@
},
{
"BriefDescription": "Counts any read",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_B2CMI_IMC_READS.ALL",
"PerPkg": "1",
@@ -32,6 +36,7 @@
},
{
"BriefDescription": "Counts normal reads issue to CMI",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_B2CMI_IMC_READS.NORMAL",
"PerPkg": "1",
@@ -40,14 +45,17 @@
},
{
"BriefDescription": "Counts reads to 1lm non persistent memory regions",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_B2CMI_IMC_READS.TO_DDR_AS_MEM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x108",
"Unit": "B2CMI"
},
{
"BriefDescription": "All Writes - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_B2CMI_IMC_WRITES.ALL",
"PerPkg": "1",
@@ -56,6 +64,7 @@
},
{
"BriefDescription": "Full Non-ISOCH - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_B2CMI_IMC_WRITES.FULL",
"PerPkg": "1",
@@ -64,6 +73,7 @@
},
{
"BriefDescription": "Partial Non-ISOCH - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_B2CMI_IMC_WRITES.PARTIAL",
"PerPkg": "1",
@@ -72,22 +82,27 @@
},
{
"BriefDescription": "DDR - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_B2CMI_IMC_WRITES.TO_DDR_AS_MEM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x120",
"Unit": "B2CMI"
},
{
"BriefDescription": "Prefetch CAM Inserts : XPT - Ch 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_B2CMI_PREFCAM_INSERTS.CH0_XPT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "B2CMI"
},
{
"BriefDescription": "Prefetch CAM Inserts : XPT -All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_B2CMI_PREFCAM_INSERTS.XPT_ALLCH",
"PerPkg": "1",
@@ -97,14 +112,17 @@
},
{
"BriefDescription": "Prefetch CAM Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_B2CMI_PREFCAM_OCCUPANCY.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "B2CMI"
},
{
"BriefDescription": "Tracker Inserts : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_B2CMI_TRACKER_INSERTS.CH0",
"PerPkg": "1",
@@ -113,6 +131,7 @@
},
{
"BriefDescription": "Tracker Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_B2CMI_TRACKER_OCCUPANCY.CH0",
"PerPkg": "1",
@@ -121,22 +140,27 @@
},
{
"BriefDescription": "Write Tracker Inserts : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_B2CMI_WR_TRACKER_INSERTS.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "B2CMI"
},
{
"BriefDescription": "Total Write Cache Occupancy : Mem",
+ "Counter": "0,1,2,3",
"EventCode": "0x0F",
"EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.MEM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "IRP"
},
{
"BriefDescription": "IRP Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_I_CLOCKTICKS",
"PerPkg": "1",
@@ -144,6 +168,7 @@
},
{
"BriefDescription": "Inbound read requests received by the IRP and inserted into the FAF queue",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_I_FAF_INSERTS",
"PerPkg": "1",
@@ -151,21 +176,26 @@
},
{
"BriefDescription": "FAF occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_I_FAF_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "Misc Events - Set 1 : Lost Forward : Snoop pulled away ownership before a write was committed",
+ "Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_I_MISC1.LOST_FWD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "IRP"
},
{
"BriefDescription": "Inbound write (fast path) requests to coherent memory, received by the IRP resulting in write ownership requests issued by IRP to the mesh.",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_I_TRANSACTIONS.WR_PREF",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/grandridge/uncore-io.json b/tools/perf/pmu-events/arch/x86/grandridge/uncore-io.json
index c301ef95ae8d..33fc7b835abf 100644
--- a/tools/perf/pmu-events/arch/x86/grandridge/uncore-io.json
+++ b/tools/perf/pmu-events/arch/x86/grandridge/uncore-io.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "IIO Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_IIO_CLOCKTICKS",
"PerPkg": "1",
@@ -9,8 +10,10 @@
},
{
"BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.ALL_PARTS",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0FF",
@@ -19,8 +22,10 @@
},
{
"BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x001",
@@ -29,8 +34,10 @@
},
{
"BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x002",
@@ -39,8 +46,10 @@
},
{
"BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x004",
@@ -49,8 +58,10 @@
},
{
"BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x008",
@@ -59,8 +70,10 @@
},
{
"BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x010",
@@ -69,8 +82,10 @@
},
{
"BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x020",
@@ -79,8 +94,10 @@
},
{
"BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x040",
@@ -89,8 +106,10 @@
},
{
"BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x080",
@@ -99,8 +118,10 @@
},
{
"BriefDescription": "Count of allocations in the completion buffer",
+ "Counter": "2,3",
"EventCode": "0xD5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL_PARTS",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0FF",
@@ -109,8 +130,10 @@
},
{
"BriefDescription": "Count of allocations in the completion buffer",
+ "Counter": "2,3",
"EventCode": "0xD5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x001",
@@ -119,8 +142,10 @@
},
{
"BriefDescription": "Count of allocations in the completion buffer",
+ "Counter": "2,3",
"EventCode": "0xD5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x002",
@@ -129,8 +154,10 @@
},
{
"BriefDescription": "Count of allocations in the completion buffer",
+ "Counter": "2,3",
"EventCode": "0xD5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x004",
@@ -139,8 +166,10 @@
},
{
"BriefDescription": "Count of allocations in the completion buffer",
+ "Counter": "2,3",
"EventCode": "0xD5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x008",
@@ -149,8 +178,10 @@
},
{
"BriefDescription": "Count of allocations in the completion buffer",
+ "Counter": "2,3",
"EventCode": "0xD5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x010",
@@ -159,8 +190,10 @@
},
{
"BriefDescription": "Count of allocations in the completion buffer",
+ "Counter": "2,3",
"EventCode": "0xD5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x020",
@@ -169,8 +202,10 @@
},
{
"BriefDescription": "Count of allocations in the completion buffer",
+ "Counter": "2,3",
"EventCode": "0xD5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x040",
@@ -179,8 +214,10 @@
},
{
"BriefDescription": "Count of allocations in the completion buffer",
+ "Counter": "2,3",
"EventCode": "0xD5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x080",
@@ -189,6 +226,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.ALL_PARTS",
"FCMask": "0x07",
@@ -199,8 +237,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x001",
@@ -209,8 +249,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x002",
@@ -219,8 +261,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x004",
@@ -229,8 +273,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x008",
@@ -239,8 +285,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x010",
@@ -249,8 +297,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x020",
@@ -259,8 +309,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x040",
@@ -269,8 +321,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x080",
@@ -279,6 +333,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.ALL_PARTS",
"FCMask": "0x07",
@@ -289,6 +344,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART0",
"FCMask": "0x07",
@@ -299,6 +355,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART1",
"FCMask": "0x07",
@@ -309,6 +366,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART2",
"FCMask": "0x07",
@@ -319,6 +377,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART3",
"FCMask": "0x07",
@@ -329,6 +388,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART4",
"FCMask": "0x07",
@@ -339,6 +399,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART5",
"FCMask": "0x07",
@@ -349,6 +410,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART6",
"FCMask": "0x07",
@@ -359,6 +421,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART7",
"FCMask": "0x07",
@@ -369,6 +432,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0",
"FCMask": "0x07",
@@ -379,6 +443,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1",
"FCMask": "0x07",
@@ -389,6 +454,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2",
"FCMask": "0x07",
@@ -399,6 +465,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
"FCMask": "0x07",
@@ -409,6 +476,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART4",
"FCMask": "0x07",
@@ -419,6 +487,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART5",
"FCMask": "0x07",
@@ -429,6 +498,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART6",
"FCMask": "0x07",
@@ -439,6 +509,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART7",
"FCMask": "0x07",
@@ -449,6 +520,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0",
"FCMask": "0x07",
@@ -459,6 +531,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1",
"FCMask": "0x07",
@@ -469,6 +542,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2",
"FCMask": "0x07",
@@ -479,6 +553,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
"FCMask": "0x07",
@@ -489,6 +564,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART4",
"FCMask": "0x07",
@@ -499,6 +575,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART5",
"FCMask": "0x07",
@@ -509,6 +586,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART6",
"FCMask": "0x07",
@@ -519,6 +597,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART7",
"FCMask": "0x07",
@@ -529,8 +608,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x001",
@@ -539,8 +620,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x002",
@@ -549,8 +632,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x004",
@@ -559,8 +644,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x008",
@@ -569,8 +656,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x010",
@@ -579,8 +668,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x020",
@@ -589,8 +680,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x040",
@@ -599,8 +692,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x080",
@@ -609,8 +704,10 @@
},
{
"BriefDescription": "IOTLB Hits to a 1G Page",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.1G_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10",
@@ -618,8 +715,10 @@
},
{
"BriefDescription": "IOTLB Hits to a 2M Page",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.2M_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x8",
@@ -627,8 +726,10 @@
},
{
"BriefDescription": "IOTLB Hits to a 4K Page",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.4K_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x4",
@@ -636,8 +737,10 @@
},
{
"BriefDescription": "Context cache hits",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.CTXT_CACHE_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x80",
@@ -645,8 +748,10 @@
},
{
"BriefDescription": "Context cache lookups",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.CTXT_CACHE_LOOKUPS",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x40",
@@ -654,8 +759,10 @@
},
{
"BriefDescription": "IOTLB lookups first",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.FIRST_LOOKUPS",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x1",
@@ -663,8 +770,10 @@
},
{
"BriefDescription": "IOTLB Fills (same as IOTLB miss)",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.MISSES",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x20",
@@ -672,8 +781,10 @@
},
{
"BriefDescription": "IOMMU memory access (both low and high priority)",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.NUM_MEM_ACCESSES",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0xc0",
@@ -681,8 +792,10 @@
},
{
"BriefDescription": "Second Level Page Walk Cache Hit to a 1G page",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.SLPWC_1G_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x4",
@@ -690,8 +803,10 @@
},
{
"BriefDescription": "Second Level Page Walk Cache Hit to a 256T page",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.SLPWC_256T_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10",
@@ -699,8 +814,10 @@
},
{
"BriefDescription": "Second Level Page Walk Cache Hit to a 512G page",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.SLPWC_512G_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x8",
@@ -708,8 +825,10 @@
},
{
"BriefDescription": "-",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.ABORT",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0FF",
@@ -718,8 +837,10 @@
},
{
"BriefDescription": "-",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.CONFINED_P2P",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0FF",
@@ -728,8 +849,10 @@
},
{
"BriefDescription": "-",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.LOC_P2P",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0FF",
@@ -738,8 +861,10 @@
},
{
"BriefDescription": "-",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MCAST",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0FF",
@@ -748,8 +873,10 @@
},
{
"BriefDescription": "-",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MEM",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0FF",
@@ -758,8 +885,10 @@
},
{
"BriefDescription": "-",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MSGB",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0FF",
@@ -768,8 +897,10 @@
},
{
"BriefDescription": "-",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.UBOX",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0FF",
@@ -778,14 +909,17 @@
},
{
"BriefDescription": "All 9 bits of Page Walk Tracker Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_IIO_PWT_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"Unit": "IIO"
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART0",
"FCMask": "0x07",
@@ -796,6 +930,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART1",
"FCMask": "0x07",
@@ -806,6 +941,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART2",
"FCMask": "0x07",
@@ -816,6 +952,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART3",
"FCMask": "0x07",
@@ -826,6 +963,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART4",
"FCMask": "0x07",
@@ -836,6 +974,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART5",
"FCMask": "0x07",
@@ -846,6 +985,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART6",
"FCMask": "0x07",
@@ -856,6 +996,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART7",
"FCMask": "0x07",
@@ -866,6 +1007,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART0",
"FCMask": "0x07",
@@ -876,6 +1018,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART1",
"FCMask": "0x07",
@@ -886,6 +1029,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART2",
"FCMask": "0x07",
@@ -896,6 +1040,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART3",
"FCMask": "0x07",
@@ -906,6 +1051,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART4",
"FCMask": "0x07",
@@ -916,6 +1062,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART5",
"FCMask": "0x07",
@@ -926,6 +1073,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART6",
"FCMask": "0x07",
@@ -936,6 +1084,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART7",
"FCMask": "0x07",
@@ -946,6 +1095,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART0",
"FCMask": "0x07",
@@ -956,6 +1106,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART1",
"FCMask": "0x07",
@@ -966,6 +1117,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART2",
"FCMask": "0x07",
@@ -976,6 +1128,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART3",
"FCMask": "0x07",
@@ -986,6 +1139,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART4",
"FCMask": "0x07",
@@ -996,6 +1150,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART5",
"FCMask": "0x07",
@@ -1006,6 +1161,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART6",
"FCMask": "0x07",
@@ -1016,6 +1172,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART7",
"FCMask": "0x07",
@@ -1026,6 +1183,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART0",
"FCMask": "0x07",
@@ -1036,6 +1194,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART1",
"FCMask": "0x07",
@@ -1046,6 +1205,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART2",
"FCMask": "0x07",
@@ -1056,6 +1216,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART3",
"FCMask": "0x07",
@@ -1066,6 +1227,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART4",
"FCMask": "0x07",
@@ -1076,6 +1238,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART5",
"FCMask": "0x07",
@@ -1086,6 +1249,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART6",
"FCMask": "0x07",
@@ -1096,6 +1260,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART7",
"FCMask": "0x07",
@@ -1106,8 +1271,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x001",
@@ -1116,8 +1283,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x002",
@@ -1126,8 +1295,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x004",
@@ -1136,8 +1307,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x008",
@@ -1146,8 +1319,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x010",
@@ -1156,8 +1331,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x020",
@@ -1166,8 +1343,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x040",
@@ -1176,8 +1355,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x080",
diff --git a/tools/perf/pmu-events/arch/x86/grandridge/uncore-memory.json b/tools/perf/pmu-events/arch/x86/grandridge/uncore-memory.json
index a2405ed640c9..7e6e6764f181 100644
--- a/tools/perf/pmu-events/arch/x86/grandridge/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/grandridge/uncore-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "DRAM Activate Count : Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_M_ACT_COUNT.ALL",
"PerPkg": "1",
@@ -9,30 +10,37 @@
},
{
"BriefDescription": "DRAM Activate Count : Read transaction on Page Empty or Page Miss : Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_M_ACT_COUNT.RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf1",
"Unit": "IMC"
},
{
"BriefDescription": "DRAM Activate Count : Underfill Read transaction on Page Empty or Page Miss : Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_M_ACT_COUNT.UFILL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf4",
"Unit": "IMC"
},
{
"BriefDescription": "DRAM Activate Count : Write transaction on Page Empty or Page Miss : Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_M_ACT_COUNT.WR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf2",
"Unit": "IMC"
},
{
"BriefDescription": "CAS count for SubChannel 0, all CAS operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_M_CAS_COUNT_SCH0.ALL",
"PerPkg": "1",
@@ -41,6 +49,7 @@
},
{
"BriefDescription": "CAS count for SubChannel 0, all reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_M_CAS_COUNT_SCH0.RD",
"PerPkg": "1",
@@ -49,6 +58,7 @@
},
{
"BriefDescription": "CAS count for SubChannel 0 regular reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_M_CAS_COUNT_SCH0.RD_REG",
"PerPkg": "1",
@@ -57,6 +67,7 @@
},
{
"BriefDescription": "CAS count for SubChannel 0 underfill reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_M_CAS_COUNT_SCH0.RD_UNDERFILL",
"PerPkg": "1",
@@ -65,6 +76,7 @@
},
{
"BriefDescription": "CAS count for SubChannel 0, all writes",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_M_CAS_COUNT_SCH0.WR",
"PerPkg": "1",
@@ -73,22 +85,27 @@
},
{
"BriefDescription": "CAS count for SubChannel 0 regular writes",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_M_CAS_COUNT_SCH0.WR_NONPRE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd0",
"Unit": "IMC"
},
{
"BriefDescription": "CAS count for SubChannel 0 auto-precharge writes",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_M_CAS_COUNT_SCH0.WR_PRE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe0",
"Unit": "IMC"
},
{
"BriefDescription": "CAS count for SubChannel 1, all CAS operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_M_CAS_COUNT_SCH1.ALL",
"PerPkg": "1",
@@ -97,6 +114,7 @@
},
{
"BriefDescription": "CAS count for SubChannel 1, all reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_M_CAS_COUNT_SCH1.RD",
"PerPkg": "1",
@@ -105,6 +123,7 @@
},
{
"BriefDescription": "CAS count for SubChannel 1 regular reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_M_CAS_COUNT_SCH1.RD_REG",
"PerPkg": "1",
@@ -113,6 +132,7 @@
},
{
"BriefDescription": "CAS count for SubChannel 1 underfill reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_M_CAS_COUNT_SCH1.RD_UNDERFILL",
"PerPkg": "1",
@@ -121,6 +141,7 @@
},
{
"BriefDescription": "CAS count for SubChannel 1, all writes",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_M_CAS_COUNT_SCH1.WR",
"PerPkg": "1",
@@ -129,22 +150,27 @@
},
{
"BriefDescription": "CAS count for SubChannel 1 regular writes",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_M_CAS_COUNT_SCH1.WR_NONPRE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd0",
"Unit": "IMC"
},
{
"BriefDescription": "CAS count for SubChannel 1 auto-precharge writes",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_M_CAS_COUNT_SCH1.WR_PRE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe0",
"Unit": "IMC"
},
{
"BriefDescription": "Number of DRAM DCLK clock cycles while the event is enabled",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_M_CLOCKTICKS",
"PerPkg": "1",
@@ -154,14 +180,17 @@
},
{
"BriefDescription": "Number of DRAM HCLK clock cycles while the event is enabled",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_M_HCLOCKTICKS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM Clockticks",
"Unit": "IMC"
},
{
"BriefDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M_PRE_COUNT.ALL",
"PerPkg": "1",
@@ -170,6 +199,7 @@
},
{
"BriefDescription": "DRAM Precharge commands. : Precharge due to (?) : Counts the number of DRAM Precharge commands sent on this channel.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M_PRE_COUNT.PGT",
"PerPkg": "1",
@@ -178,46 +208,57 @@
},
{
"BriefDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M_PRE_COUNT.RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf1",
"Unit": "IMC"
},
{
"BriefDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M_PRE_COUNT.UFILL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf4",
"Unit": "IMC"
},
{
"BriefDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M_PRE_COUNT.WR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf2",
"Unit": "IMC"
},
{
"BriefDescription": "Read buffer inserts on subchannel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M_RDB_INSERTS.SCH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "IMC"
},
{
"BriefDescription": "Read buffer inserts on subchannel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M_RDB_INSERTS.SCH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "IMC"
},
{
"BriefDescription": "Read buffer occupancy on subchannel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x1a",
"EventName": "UNC_M_RDB_OCCUPANCY_SCH0",
"PerPkg": "1",
@@ -225,6 +266,7 @@
},
{
"BriefDescription": "Read buffer occupancy on subchannel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_M_RDB_OCCUPANCY_SCH1",
"PerPkg": "1",
@@ -232,22 +274,27 @@
},
{
"BriefDescription": "Read Pending Queue Allocations : Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M_RPQ_INSERTS.PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x50",
"Unit": "IMC"
},
{
"BriefDescription": "Read Pending Queue Allocations : Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M_RPQ_INSERTS.PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa0",
"Unit": "IMC"
},
{
"BriefDescription": "Read Pending Queue inserts for subchannel 0, pseudochannel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M_RPQ_INSERTS.SCH0_PCH0",
"PerPkg": "1",
@@ -256,6 +303,7 @@
},
{
"BriefDescription": "Read Pending Queue inserts for subchannel 0, pseudochannel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M_RPQ_INSERTS.SCH0_PCH1",
"PerPkg": "1",
@@ -264,6 +312,7 @@
},
{
"BriefDescription": "Read Pending Queue inserts for subchannel 1, pseudochannel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M_RPQ_INSERTS.SCH1_PCH0",
"PerPkg": "1",
@@ -272,6 +321,7 @@
},
{
"BriefDescription": "Read Pending Queue inserts for subchannel 1, pseudochannel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M_RPQ_INSERTS.SCH1_PCH1",
"PerPkg": "1",
@@ -280,6 +330,7 @@
},
{
"BriefDescription": "Read pending queue occupancy for subchannel 0, pseudochannel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M_RPQ_OCCUPANCY_SCH0_PCH0",
"PerPkg": "1",
@@ -287,6 +338,7 @@
},
{
"BriefDescription": "Read pending queue occupancy for subchannel 0, pseudochannel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "UNC_M_RPQ_OCCUPANCY_SCH0_PCH1",
"PerPkg": "1",
@@ -294,6 +346,7 @@
},
{
"BriefDescription": "Read pending queue occupancy for subchannel 1, pseudochannel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M_RPQ_OCCUPANCY_SCH1_PCH0",
"PerPkg": "1",
@@ -301,6 +354,7 @@
},
{
"BriefDescription": "Read pending queue occupancy for subchannel 1, pseudochannel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_RPQ_OCCUPANCY_SCH1_PCH1",
"PerPkg": "1",
@@ -308,22 +362,27 @@
},
{
"BriefDescription": "Write Pending Queue Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M_WPQ_INSERTS.PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x50",
"Unit": "IMC"
},
{
"BriefDescription": "Write Pending Queue Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M_WPQ_INSERTS.PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa0",
"Unit": "IMC"
},
{
"BriefDescription": "Write Pending Queue inserts for subchannel 0, pseudochannel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M_WPQ_INSERTS.SCH0_PCH0",
"PerPkg": "1",
@@ -332,6 +391,7 @@
},
{
"BriefDescription": "Write Pending Queue inserts for subchannel 0, pseudochannel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M_WPQ_INSERTS.SCH0_PCH1",
"PerPkg": "1",
@@ -340,6 +400,7 @@
},
{
"BriefDescription": "Write Pending Queue inserts for subchannel 1, pseudochannel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M_WPQ_INSERTS.SCH1_PCH0",
"PerPkg": "1",
@@ -348,6 +409,7 @@
},
{
"BriefDescription": "Write Pending Queue inserts for subchannel 1, pseudochannel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M_WPQ_INSERTS.SCH1_PCH1",
"PerPkg": "1",
@@ -356,6 +418,7 @@
},
{
"BriefDescription": "Write pending queue occupancy for subchannel 0, pseudochannel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M_WPQ_OCCUPANCY_SCH0_PCH0",
"PerPkg": "1",
@@ -363,6 +426,7 @@
},
{
"BriefDescription": "Write pending queue occupancy for subchannel 0, pseudochannel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_M_WPQ_OCCUPANCY_SCH0_PCH1",
"PerPkg": "1",
@@ -370,6 +434,7 @@
},
{
"BriefDescription": "Write pending queue occupancy for subchannel 1, pseudochannel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M_WPQ_OCCUPANCY_SCH1_PCH0",
"PerPkg": "1",
@@ -377,6 +442,7 @@
},
{
"BriefDescription": "Write pending queue occupancy for subchannel 1, pseudochannel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_M_WPQ_OCCUPANCY_SCH1_PCH1",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/grandridge/uncore-power.json b/tools/perf/pmu-events/arch/x86/grandridge/uncore-power.json
index e3a66166e28c..02e59f64a544 100644
--- a/tools/perf/pmu-events/arch/x86/grandridge/uncore-power.json
+++ b/tools/perf/pmu-events/arch/x86/grandridge/uncore-power.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "PCU Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_P_CLOCKTICKS",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/grandridge/virtual-memory.json b/tools/perf/pmu-events/arch/x86/grandridge/virtual-memory.json
index 371974c6d6c3..35cc5b6d41f2 100644
--- a/tools/perf/pmu-events/arch/x86/grandridge/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/grandridge/virtual-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of first level TLB misses but second level hits due to a demand load that did not start a page walk. Accounts for all page sizes. Will result in a DTLB write from STLB.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"SampleAfterValue": "200003",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to load DTLB misses.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"SampleAfterValue": "200003",
@@ -15,6 +17,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 2M or 4M page.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 2M or 4M pages. Includes page walks that page fault.",
@@ -23,6 +26,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 4K page.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.",
@@ -31,6 +35,7 @@
},
{
"BriefDescription": "Counts the number of page walks outstanding for Loads (demand or SW prefetch) in PMH every cycle.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding for Loads (demand or SW prefetch) in PMH every cycle. A PMH page walk is outstanding from page walk start till PMH becomes idle again (ready to serve next walk). Includes EPT-walk intervals.",
@@ -39,6 +44,7 @@
},
{
"BriefDescription": "Counts the number of first level TLB misses but second level hits due to stores that did not start a page walk. Accounts for all pages sizes. Will result in a DTLB write from STLB.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.STLB_HIT",
"SampleAfterValue": "2000003",
@@ -46,6 +52,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to store DTLB misses to a 1G page.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"SampleAfterValue": "2000003",
@@ -53,6 +60,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to store DTLB misses to a 2M or 4M page.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 2M or 4M pages. Includes page walks that page fault.",
@@ -61,6 +69,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to store DTLB misses to a 4K page.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.",
@@ -69,6 +78,7 @@
},
{
"BriefDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for stores every cycle.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for stores every cycle. A PMH page walk is outstanding from page walk start till PMH becomes idle again (ready to serve next walk). Includes EPT-walk intervals.",
@@ -77,6 +87,7 @@
},
{
"BriefDescription": "Counts the number of page walks initiated by a instruction fetch that missed the first and second level TLBs.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.MISS_CAUSED_WALK",
"SampleAfterValue": "1000003",
@@ -84,6 +95,7 @@
},
{
"BriefDescription": "Counts the number of first level TLB misses but second level hits due to an instruction fetch that did not start a page walk. Account for all pages sizes. Will result in an ITLB write from STLB.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.STLB_HIT",
"SampleAfterValue": "2000003",
@@ -91,6 +103,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to any page size.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
@@ -99,6 +112,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to a 2M or 4M page.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 2M or 4M pages. Includes page walks that page fault.",
@@ -107,6 +121,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to a 4K page.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.",
@@ -115,6 +130,7 @@
},
{
"BriefDescription": "Counts the number of page walks outstanding for iside in PMH every cycle.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding for iside in PMH every cycle. A PMH page walk is outstanding from page walk start till PMH becomes idle again (ready to serve next walk). Includes EPT-walk intervals. Walks could be counted by edge detecting on this event, but would count restarted suspended walks.",
@@ -123,6 +139,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a DTLB miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x05",
"EventName": "LD_HEAD.DTLB_MISS_AT_RET",
"SampleAfterValue": "1000003",
diff --git a/tools/perf/pmu-events/arch/x86/graniterapids/cache.json b/tools/perf/pmu-events/arch/x86/graniterapids/cache.json
index 56212827870c..b56066274813 100644
--- a/tools/perf/pmu-events/arch/x86/graniterapids/cache.json
+++ b/tools/perf/pmu-events/arch/x86/graniterapids/cache.json
@@ -1,6 +1,135 @@
[
{
+ "BriefDescription": "L1D.HWPF_MISS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x51",
+ "EventName": "L1D.HWPF_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Counts the number of cache lines replaced in L1 data cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x51",
+ "EventName": "L1D.REPLACEMENT",
+ "PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "PublicDescription": "Counts number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL_PERIODS",
+ "PublicDescription": "Counts number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of cycles a demand request has waited due to L1D due to lack of L2 resources.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.L2_STALLS",
+ "PublicDescription": "Counts number of cycles a demand request has waited due to L1D due to lack of L2 resources. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of L1D misses that are outstanding",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING",
+ "PublicDescription": "Counts number of L1D misses that are outstanding in each cycle, that is each cycle the number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch. Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "PublicDescription": "Counts duration of L1D miss outstanding in cycles.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "L2 cache lines filling L2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x25",
+ "EventName": "L2_LINES_IN.ALL",
+ "PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1f"
+ },
+ {
+ "BriefDescription": "Modified cache lines that are evicted by L2 cache when triggered by an L2 cache fill.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x26",
+ "EventName": "L2_LINES_OUT.NON_SILENT",
+ "PublicDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines are in Modified state. Modified lines are written back to L3",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Non-modified cache lines that are silently dropped by L2 cache when triggered by an L2 cache fill.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x26",
+ "EventName": "L2_LINES_OUT.SILENT",
+ "PublicDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared or Exclusive state. A non-threaded event.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cache lines that have been L2 hardware prefetched but not used by demand accesses",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x26",
+ "EventName": "L2_LINES_OUT.USELESS_HWPF",
+ "PublicDescription": "Counts the number of cache lines that have been prefetched by the L2 hardware prefetcher but not used by demand access when evicted from the L2 cache",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "All accesses to L2 cache [This event is alias to L2_RQSTS.REFERENCES]",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.ALL",
+ "PublicDescription": "Counts all requests that were hit or true misses in L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. [This event is alias to L2_RQSTS.REFERENCES]",
+ "SampleAfterValue": "200003",
+ "UMask": "0xff"
+ },
+ {
+ "BriefDescription": "All requests that hit L2 cache. [This event is alias to L2_RQSTS.HIT]",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.HIT",
+ "PublicDescription": "Counts all requests that hit L2 cache. [This event is alias to L2_RQSTS.HIT]",
+ "SampleAfterValue": "200003",
+ "UMask": "0xdf"
+ },
+ {
+ "BriefDescription": "Read requests with true-miss in L2 cache [This event is alias to L2_RQSTS.MISS]",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.MISS",
+ "PublicDescription": "Counts read requests of any type with true-miss in the L2 cache. True-miss excludes L2 misses that were merged with ongoing L2 misses. [This event is alias to L2_RQSTS.MISS]",
+ "SampleAfterValue": "200003",
+ "UMask": "0x3f"
+ },
+ {
"BriefDescription": "L2 code requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_CODE_RD",
"PublicDescription": "Counts the total number of L2 code requests.",
@@ -9,6 +138,7 @@
},
{
"BriefDescription": "Demand Data Read access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
"PublicDescription": "Counts Demand Data Read requests accessing the L2 cache. These requests may hit or miss L2 cache. True-miss exclude misses that were merged with ongoing L2 misses. An access is counted once.",
@@ -16,7 +146,159 @@
"UMask": "0xe1"
},
{
+ "BriefDescription": "Demand requests that miss L2 cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
+ "PublicDescription": "Counts demand requests that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x27"
+ },
+ {
+ "BriefDescription": "Demand requests to L2 cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
+ "PublicDescription": "Counts demand requests to L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe7"
+ },
+ {
+ "BriefDescription": "L2_RQSTS.ALL_HWPF",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_HWPF",
+ "SampleAfterValue": "200003",
+ "UMask": "0xf0"
+ },
+ {
+ "BriefDescription": "RFO requests to L2 cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_RFO",
+ "PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe2"
+ },
+ {
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc4"
+ },
+ {
+ "BriefDescription": "L2 cache misses when fetching instructions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "PublicDescription": "Counts L2 cache misses when fetching instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x24"
+ },
+ {
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "PublicDescription": "Counts the number of demand Data Read requests initiated by load instructions that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc1"
+ },
+ {
+ "BriefDescription": "Demand Data Read miss L2 cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
+ "PublicDescription": "Counts demand Data Read requests with true-miss in the L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. An access is counted once.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x21"
+ },
+ {
+ "BriefDescription": "All requests that hit L2 cache. [This event is alias to L2_REQUEST.HIT]",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.HIT",
+ "PublicDescription": "Counts all requests that hit L2 cache. [This event is alias to L2_REQUEST.HIT]",
+ "SampleAfterValue": "200003",
+ "UMask": "0xdf"
+ },
+ {
+ "BriefDescription": "L2_RQSTS.HWPF_MISS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.HWPF_MISS",
+ "SampleAfterValue": "200003",
+ "UMask": "0x30"
+ },
+ {
+ "BriefDescription": "Read requests with true-miss in L2 cache [This event is alias to L2_REQUEST.MISS]",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.MISS",
+ "PublicDescription": "Counts read requests of any type with true-miss in the L2 cache. True-miss excludes L2 misses that were merged with ongoing L2 misses. [This event is alias to L2_REQUEST.MISS]",
+ "SampleAfterValue": "200003",
+ "UMask": "0x3f"
+ },
+ {
+ "BriefDescription": "All accesses to L2 cache [This event is alias to L2_REQUEST.ALL]",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "PublicDescription": "Counts all requests that were hit or true misses in L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. [This event is alias to L2_REQUEST.ALL]",
+ "SampleAfterValue": "200003",
+ "UMask": "0xff"
+ },
+ {
+ "BriefDescription": "RFO requests that hit L2 cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc2"
+ },
+ {
+ "BriefDescription": "RFO requests that miss L2 cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x22"
+ },
+ {
+ "BriefDescription": "SW prefetch requests that hit L2 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.SWPF_HIT",
+ "PublicDescription": "Counts Software prefetch requests that hit the L2 cache. Accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions when FB is not full.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc8"
+ },
+ {
+ "BriefDescription": "SW prefetch requests that miss L2 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.SWPF_MISS",
+ "PublicDescription": "Counts Software prefetch requests that miss the L2 cache. Accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions when FB is not full.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x28"
+ },
+ {
+ "BriefDescription": "L2 writebacks that access L2 cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "L2_TRANS.L2_WB",
+ "PublicDescription": "Counts L2 writebacks that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x40"
+ },
+ {
"BriefDescription": "Core-originated cacheable requests that missed L3 (Except hardware prefetches to the L3)",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.MISS",
"PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.",
@@ -25,6 +307,7 @@
},
{
"BriefDescription": "Core-originated cacheable requests that refer to L3 (Except hardware prefetches to the L3)",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
"PublicDescription": "Counts core-originated cacheable requests to the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.",
@@ -33,6 +316,7 @@
},
{
"BriefDescription": "Retired load instructions.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ALL_LOADS",
@@ -43,6 +327,7 @@
},
{
"BriefDescription": "Retired store instructions.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ALL_STORES",
@@ -50,5 +335,545 @@
"PublicDescription": "Counts all retired store instructions.",
"SampleAfterValue": "1000003",
"UMask": "0x82"
+ },
+ {
+ "BriefDescription": "All retired memory instructions.",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ANY",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired memory instructions - loads and stores.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x83"
+ },
+ {
+ "BriefDescription": "Retired load instructions with locked access.",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.LOCK_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with locked access.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x21"
+ },
+ {
+ "BriefDescription": "Retired load instructions that split across a cacheline boundary.",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions that split across a cacheline boundary.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x41"
+ },
+ {
+ "BriefDescription": "Retired store instructions that split across a cacheline boundary.",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.SPLIT_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired store instructions that split across a cacheline boundary.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x42"
+ },
+ {
+ "BriefDescription": "Retired load instructions that hit the STLB.",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_HIT_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Number of retired load instructions with a clean hit in the 2nd-level TLB (STLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x9"
+ },
+ {
+ "BriefDescription": "Retired store instructions that hit the STLB.",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_HIT_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Number of retired store instructions that hit in the 2nd-level TLB (STLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0xa"
+ },
+ {
+ "BriefDescription": "Retired load instructions that miss the STLB.",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Number of retired load instructions that (start a) miss in the 2nd-level TLB (STLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x11"
+ },
+ {
+ "BriefDescription": "Retired store instructions that miss the STLB.",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Number of retired store instructions that (start a) miss in the 2nd-level TLB (STLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x12"
+ },
+ {
+ "BriefDescription": "Completed demand load uops that miss the L1 d-cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x43",
+ "EventName": "MEM_LOAD_COMPLETED.L1_MISS_ANY",
+ "PublicDescription": "Number of completed demand load requests that missed the L1 data cache including shadow misses (FB hits, merge to an ongoing L1D miss)",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xfd"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were HitM responses from shared L3",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions whose data sources were HitM responses from shared L3.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Counts the retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were hits in L3 without snoops required",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions whose data sources were hits in L3 without snoops required.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired load instructions which data sources missed L3 but serviced from local dram",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions which data sources missed L3 but serviced from local DRAM.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM",
+ "PEBS": "1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources was forwarded from a remote cache",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions whose data sources was forwarded from a remote cache.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM",
+ "PEBS": "1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Retired instructions with at least 1 uncacheable load or lock.",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd4",
+ "EventName": "MEM_LOAD_MISC_RETIRED.UC",
+ "PEBS": "1",
+ "PublicDescription": "Retired instructions with at least one load to uncacheable memory-type, or at least one cache-line split locked access (Bus Lock).",
+ "SampleAfterValue": "100007",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of completed demand load requests that missed the L1, but hit the FB(fill buffer), because a preceding miss to the same cacheline initiated the line to be brought into L1, but data is not yet ready in L1.",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.FB_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Retired load instructions with L1 cache hits as data sources",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L1_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L1 cache as data sources",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L1_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Retired load instructions with L2 cache hits as data sources",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L2_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with L2 cache hits as data sources.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L2 cache as data sources",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L2_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions missed L2 cache as data sources.",
+ "SampleAfterValue": "100021",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Retired load instructions with L3 cache hits as data sources",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L3_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache.",
+ "SampleAfterValue": "100021",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L3 cache as data sources",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L3_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "MEM_STORE_RETIRED.L2_HIT",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x44",
+ "EventName": "MEM_STORE_RETIRED.L2_HIT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired memory uops for any access",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe5",
+ "EventName": "MEM_UOP_RETIRED.ANY",
+ "PublicDescription": "Number of retired micro-operations (uops) for load or store memory accesses",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that resulted in a snoop hit a modified line in another core's caches which forwarded the data.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that resulted in a snoop hit a modified line in another core's caches which forwarded the data.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that resulted in a snoop hit in another core's caches which forwarded the unmodified data to the requesting core.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that resulted in a snoop hit a modified line in another core's caches which forwarded the data.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F003C4477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Any memory transaction that reached the SQ.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
+ "PublicDescription": "Counts memory transactions reached the super queue including requests initiated by the core, all L3 prefetches, page walks, etc..",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Demand and prefetch data reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "OFFCORE_REQUESTS.DATA_RD",
+ "PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cacheable and Non-Cacheable code read requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
+ "PublicDescription": "Counts both cacheable and Non-Cacheable code read requests.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Demand Data Read requests sent to uncore",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
+ "PublicDescription": "Counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "PublicDescription": "Counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles with offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
+ "PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles where at least 1 outstanding demand data read request is pending.",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles with offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "PublicDescription": "Counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "OFFCORE_REQUESTS_OUTSTANDING.DATA_RD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DATA_RD",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore, every cycle.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
+ "PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "For every cycle, increments by the number of outstanding demand data read requests pending.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
+ "PublicDescription": "For every cycle, increments by the number of outstanding demand data read requests pending. Requests are considered outstanding from the time they miss the core's L2 cache until the transaction completion message is sent to the requestor.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Store Read transactions pending for off-core. Highly correlated.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
+ "PublicDescription": "Counts the number of off-core outstanding read-for-ownership (RFO) store transactions every cycle. An RFO transaction is considered to be in the Off-core outstanding state between L2 cache miss and transaction completion.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts bus locks, accounts for cache line split locks and UC locks.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2c",
+ "EventName": "SQ_MISC.BUS_LOCK",
+ "PublicDescription": "Counts the more expensive bus lock needed to enforce cache coherency for certain memory accesses that need to be done atomically. Can be created by issuing an atomic instruction (via the LOCK prefix) which causes a cache line split or accesses uncacheable memory.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts the number of PREFETCHNTA, PREFETCHW, PREFETCHT0, PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "SW_PREFETCH_ACCESS.ANY",
+ "SampleAfterValue": "100003",
+ "UMask": "0xf"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHNTA instructions executed.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "SW_PREFETCH_ACCESS.NTA",
+ "PublicDescription": "Counts the number of PREFETCHNTA instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHW instructions executed.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
+ "PublicDescription": "Counts the number of PREFETCHW instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHT0 instructions executed.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "SW_PREFETCH_ACCESS.T0",
+ "PublicDescription": "Counts the number of PREFETCHT0 instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "SW_PREFETCH_ACCESS.T1_T2",
+ "PublicDescription": "Counts the number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/graniterapids/counter.json b/tools/perf/pmu-events/arch/x86/graniterapids/counter.json
new file mode 100644
index 000000000000..250781a8ca64
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/graniterapids/counter.json
@@ -0,0 +1,77 @@
+[
+ {
+ "Unit": "core",
+ "CountersNumFixed": "4",
+ "CountersNumGeneric": "8"
+ },
+ {
+ "Unit": "B2CMI",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "CHA",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "IMC",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "B2HOT",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": 4
+ },
+ {
+ "Unit": "IIO",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "IRP",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "UPI",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "B2UPI",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": 4
+ },
+ {
+ "Unit": "B2CXL",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": 4
+ },
+ {
+ "Unit": "PCU",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": 4
+ },
+ {
+ "Unit": "CHACMS",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": 4
+ },
+ {
+ "Unit": "MDF",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "CXLCM",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": 8
+ },
+ {
+ "Unit": "CXLDP",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": 4
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/graniterapids/floating-point.json b/tools/perf/pmu-events/arch/x86/graniterapids/floating-point.json
new file mode 100644
index 000000000000..59789eee060c
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/graniterapids/floating-point.json
@@ -0,0 +1,242 @@
+[
+ {
+ "BriefDescription": "This event counts the cycles the floating point divider is busy.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xb0",
+ "EventName": "ARITH.FPDIV_ACTIVE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all microcode FP assists.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.FP",
+ "PublicDescription": "Counts all microcode Floating Point assists.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "ASSISTS.SSE_AVX_MIX",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.SSE_AVX_MIX",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "FP_ARITH_DISPATCHED.PORT_0 [This event is alias to FP_ARITH_DISPATCHED.V0]",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb3",
+ "EventName": "FP_ARITH_DISPATCHED.PORT_0",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "FP_ARITH_DISPATCHED.PORT_1 [This event is alias to FP_ARITH_DISPATCHED.V1]",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb3",
+ "EventName": "FP_ARITH_DISPATCHED.PORT_1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "FP_ARITH_DISPATCHED.PORT_5 [This event is alias to FP_ARITH_DISPATCHED.V2]",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb3",
+ "EventName": "FP_ARITH_DISPATCHED.PORT_5",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "FP_ARITH_DISPATCHED.V0 [This event is alias to FP_ARITH_DISPATCHED.PORT_0]",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb3",
+ "EventName": "FP_ARITH_DISPATCHED.V0",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "FP_ARITH_DISPATCHED.V1 [This event is alias to FP_ARITH_DISPATCHED.PORT_1]",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb3",
+ "EventName": "FP_ARITH_DISPATCHED.V1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "FP_ARITH_DISPATCHED.V2 [This event is alias to FP_ARITH_DISPATCHED.PORT_5]",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb3",
+ "EventName": "FP_ARITH_DISPATCHED.V2",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single and 256-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and packed double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.4_FLOPS",
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision and 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point and packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x18"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision and 512-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RSQRT14 RCP RCP14 DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.8_FLOPS",
+ "PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision and 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision and double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RSQRT14 RCP RCP14 DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x60"
+ },
+ {
+ "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired; some instructions will count twice as noted below. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 RANGE SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR",
+ "PublicDescription": "Number of SSE/AVX computational scalar single precision and double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of any Vector retired FP arithmetic instructions",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.VECTOR",
+ "PublicDescription": "Number of any Vector retired FP arithmetic instructions. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xfc"
+ },
+ {
+ "BriefDescription": "FP_ARITH_INST_RETIRED2.128B_PACKED_HALF",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xcf",
+ "EventName": "FP_ARITH_INST_RETIRED2.128B_PACKED_HALF",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "FP_ARITH_INST_RETIRED2.256B_PACKED_HALF",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xcf",
+ "EventName": "FP_ARITH_INST_RETIRED2.256B_PACKED_HALF",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "FP_ARITH_INST_RETIRED2.512B_PACKED_HALF",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xcf",
+ "EventName": "FP_ARITH_INST_RETIRED2.512B_PACKED_HALF",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "FP_ARITH_INST_RETIRED2.COMPLEX_SCALAR_HALF",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xcf",
+ "EventName": "FP_ARITH_INST_RETIRED2.COMPLEX_SCALAR_HALF",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of all Scalar Half-Precision FP arithmetic instructions(1) retired - regular and complex.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xcf",
+ "EventName": "FP_ARITH_INST_RETIRED2.SCALAR",
+ "PublicDescription": "FP_ARITH_INST_RETIRED2.SCALAR",
+ "SampleAfterValue": "100003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "FP_ARITH_INST_RETIRED2.SCALAR_HALF",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xcf",
+ "EventName": "FP_ARITH_INST_RETIRED2.SCALAR_HALF",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of all Vector (also called packed) Half-Precision FP arithmetic instructions(1) retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xcf",
+ "EventName": "FP_ARITH_INST_RETIRED2.VECTOR",
+ "PublicDescription": "FP_ARITH_INST_RETIRED2.VECTOR",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1c"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/graniterapids/frontend.json b/tools/perf/pmu-events/arch/x86/graniterapids/frontend.json
index c6d5016e7337..663c1a0e55a2 100644
--- a/tools/perf/pmu-events/arch/x86/graniterapids/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/graniterapids/frontend.json
@@ -1,9 +1,474 @@
[
{
- "BriefDescription": "This event counts a subset of the Topdown Slots event that were no operation was delivered to the back-end pipeline due to instruction fetch limitations when the back-end could have accepted more operations. Common examples include instruction cache misses or x86 instruction decode limitations.",
+ "BriefDescription": "Clears due to Unknown Branches.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "BACLEARS.ANY",
+ "PublicDescription": "Number of times the front-end is resteered when it finds a branch instruction in a fetch line. This is called Unknown Branch which occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x87",
+ "EventName": "DECODE.LCP",
+ "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles the Microcode Sequencer is busy.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x87",
+ "EventName": "DECODE.MS_BUSY",
+ "SampleAfterValue": "500009",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "DSB-to-MITE switch true penalty cycles.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x61",
+ "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "PublicDescription": "Decode Stream Buffer (DSB) is a Uop-cache that holds translations of previously fetched instructions that were decoded by the legacy x86 decode pipeline (MITE). This event counts fetch penalty cycles when a transition occurs from DSB to MITE.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired ANT branches",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.ANY_ANT",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x9",
+ "PEBS": "1",
+ "PublicDescription": "Always Not Taken (ANT) conditional retired branches (no BTB entry and not mispredicted)",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced DSB miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.ANY_DSB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x1",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced a critical DSB miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.DSB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x11",
+ "PEBS": "1",
+ "PublicDescription": "Number of retired Instructions that experienced a critical DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Critical means stalls were exposed to the back-end as a result of the DSB miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced iTLB true miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.ITLB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x14",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.L1I_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x12",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L1 Cache true miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.L2_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x13",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L2 Cache true miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Retired instructions after front-end starvation of at least 1 cycle",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_1",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x600106",
+ "PEBS": "1",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 1 cycle which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x608006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x601006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Retired instructions after front-end starvation of at least 2 cycles",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x600206",
+ "PEBS": "1",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 2 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x610006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x100206",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x602006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x600406",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x620006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x604006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x600806",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "I-Cache miss too close to Code Prefetch Instruction",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATE_SWPF",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x9",
+ "PEBS": "1",
+ "PublicDescription": "Number of Instruction Cache demand miss in shadow of an on-going i-fetch cache-line triggered by PREFETCHIT0/1 instructions",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Mispredicted Retired ANT branches",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.MISP_ANT",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x9",
+ "PEBS": "1",
+ "PublicDescription": "ANT retired branches that got just mispredicted",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "FRONTEND_RETIRED.MS_FLOWS",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.MS_FLOWS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x8",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.STLB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x15",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "FRONTEND_RETIRED.UNKNOWN_BRANCH",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.UNKNOWN_BRANCH",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x17",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "ICACHE_DATA.STALLS",
+ "PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The decode pipeline works at a 32 Byte granularity.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "ICACHE_DATA.STALL_PERIODS",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x80",
+ "EventName": "ICACHE_DATA.STALL_PERIODS",
+ "SampleAfterValue": "500009",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_TAG.STALLS",
+ "PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES_ANY",
+ "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles DSB is delivering optimal number of Uops",
+ "Counter": "0,1,2,3",
+ "CounterMask": "6",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES_OK",
+ "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the DSB (Decode Stream Buffer) path. Count includes uops that may 'bypass' the IDQ.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_UOPS",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles MITE is delivering any Uop",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_CYCLES_ANY",
+ "PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles MITE is delivering optimal number of Uops",
+ "Counter": "0,1,2,3",
+ "CounterMask": "6",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_CYCLES_OK",
+ "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_UOPS",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles when uops are being delivered to IDQ while MS is busy",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_CYCLES_ANY",
+ "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of switches from DSB or MITE to the MS",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_SWITCHES",
+ "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Uops initiated by MITE or Decode Stream Buffer (DSB) and delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_UOPS",
+ "PublicDescription": "Counts the number of uops initiated by MITE or Decode Stream Buffer (DSB) and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "This event counts a subset of the Topdown Slots event that when no operation was delivered to the back-end pipeline due to instruction fetch limitations when the back-end could have accepted more operations. Common examples include instruction cache misses or x86 instruction decode limitations.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x9c",
"EventName": "IDQ_BUBBLES.CORE",
- "PublicDescription": "This event counts a subset of the Topdown Slots event that were no operation was delivered to the back-end pipeline due to instruction fetch limitations when the back-end could have accepted more operations. Common examples include instruction cache misses or x86 instruction decode limitations.\nThe count may be distributed among unhalted logical processors (hyper-threads) who share the same physical core, in processors that support Intel Hyper-Threading Technology. Software can use this event as the numerator for the Frontend Bound metric (or top-level category) of the Top-down Microarchitecture Analysis method.",
+ "PublicDescription": "This event counts a subset of the Topdown Slots event that when no operation was delivered to the back-end pipeline due to instruction fetch limitations when the back-end could have accepted more operations. Common examples include instruction cache misses or x86 instruction decode limitations. The count may be distributed among unhalted logical processors (hyper-threads) who share the same physical core, in processors that support Intel Hyper-Threading Technology. Software can use this event as the numerator for the Frontend Bound metric (or top-level category) of the Top-down Microarchitecture Analysis method.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled [This event is alias to IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE]",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "6",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_BUBBLES.CYCLES_0_UOPS_DELIV.CORE",
+ "PublicDescription": "Counts the number of cycles when no uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle. [This event is alias to IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE]",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled [This event is alias to IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK]",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_BUBBLES.CYCLES_FE_WAS_OK",
+ "Invert": "1",
+ "PublicDescription": "Counts the number of cycles when the optimal number of uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle. [This event is alias to IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK]",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Uops not delivered by IDQ when backend of the machine is not stalled",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "PublicDescription": "Counts the number of uops not delivered to by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled [This event is alias to IDQ_BUBBLES.CYCLES_0_UOPS_DELIV.CORE]",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "6",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+ "PublicDescription": "Counts the number of cycles when no uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle. [This event is alias to IDQ_BUBBLES.CYCLES_0_UOPS_DELIV.CORE]",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled [This event is alias to IDQ_BUBBLES.CYCLES_FE_WAS_OK]",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "Invert": "1",
+ "PublicDescription": "Counts the number of cycles when the optimal number of uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle. [This event is alias to IDQ_BUBBLES.CYCLES_FE_WAS_OK]",
"SampleAfterValue": "1000003",
"UMask": "0x1"
}
diff --git a/tools/perf/pmu-events/arch/x86/graniterapids/memory.json b/tools/perf/pmu-events/arch/x86/graniterapids/memory.json
index 1c0e0e86e58e..38b74c6752c2 100644
--- a/tools/perf/pmu-events/arch/x86/graniterapids/memory.json
+++ b/tools/perf/pmu-events/arch/x86/graniterapids/memory.json
@@ -1,6 +1,85 @@
[
{
+ "BriefDescription": "Cycles while L3 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
+ "CounterMask": "2",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L3_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
+ "CounterMask": "6",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x6"
+ },
+ {
+ "BriefDescription": "Number of machine clears due to memory ordering conflicts.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "PublicDescription": "Counts the number of Machine Clears detected dye to memory ordering. Memory Ordering Machine Clears may apply when a memory read may not conform to the memory ordering rules of the x86 architecture",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
+ "CounterMask": "2",
+ "EventCode": "0x47",
+ "EventName": "MEMORY_ACTIVITY.CYCLES_L1D_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
+ "CounterMask": "3",
+ "EventCode": "0x47",
+ "EventName": "MEMORY_ACTIVITY.STALLS_L1D_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Execution stalls while L2 cache miss demand cacheable load request is outstanding.",
+ "Counter": "0,1,2,3",
+ "CounterMask": "5",
+ "EventCode": "0x47",
+ "EventName": "MEMORY_ACTIVITY.STALLS_L2_MISS",
+ "PublicDescription": "Execution stalls while L2 cache miss demand cacheable load request is outstanding (will not count for uncacheable demand requests e.g. bus lock).",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Execution stalls while L3 cache miss demand cacheable load request is outstanding.",
+ "Counter": "0,1,2,3",
+ "CounterMask": "9",
+ "EventCode": "0x47",
+ "EventName": "MEMORY_ACTIVITY.STALLS_L3_MISS",
+ "PublicDescription": "Execution stalls while L3 cache miss demand cacheable load request is outstanding (will not count for uncacheable demand requests e.g. bus lock).",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x9"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 1024 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_1024",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x400",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 1024 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "53",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
@@ -13,6 +92,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
@@ -24,7 +104,21 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 2048 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_2048",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x800",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 2048 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "23",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
@@ -37,6 +131,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
@@ -49,6 +144,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
@@ -61,6 +157,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
@@ -73,6 +170,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
@@ -85,6 +183,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
@@ -97,6 +196,7 @@
},
{
"BriefDescription": "Retired memory store access operations. A PDist event for PEBS Store Latency Facility.",
+ "Counter": "0",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.STORE_SAMPLE",
@@ -106,7 +206,18 @@
"UMask": "0x2"
},
{
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the local socket's L1, L2, or L3 caches.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBFC00004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts demand data reads that were not supplied by the local socket's L1, L2, or L3 caches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -116,6 +227,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the local socket's L1, L2, or L3 caches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -124,51 +236,40 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Number of times an RTM execution aborted.",
- "EventCode": "0xc9",
- "EventName": "RTM_RETIRED.ABORTED",
- "PublicDescription": "Counts the number of times RTM abort was triggered.",
- "SampleAfterValue": "100003",
- "UMask": "0x4"
- },
- {
- "BriefDescription": "Number of times an RTM execution successfully committed",
- "EventCode": "0xc9",
- "EventName": "RTM_RETIRED.COMMIT",
- "PublicDescription": "Counts the number of times RTM commit succeeded.",
- "SampleAfterValue": "100003",
- "UMask": "0x2"
- },
- {
- "BriefDescription": "Number of times an RTM execution started.",
- "EventCode": "0xc9",
- "EventName": "RTM_RETIRED.START",
- "PublicDescription": "Counts the number of times we entered an RTM region. Does not count nested transactions.",
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F3FC04477",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Speculatively counts the number of TSX aborts due to a data capacity limitation for transactional reads",
- "EventCode": "0x54",
- "EventName": "TX_MEM.ABORT_CAPACITY_READ",
- "PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional reads",
+ "BriefDescription": "Counts demand data read requests that miss the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
"SampleAfterValue": "100003",
- "UMask": "0x80"
+ "UMask": "0x10"
},
{
- "BriefDescription": "Speculatively counts the number of TSX aborts due to a data capacity limitation for transactional writes.",
- "EventCode": "0x54",
- "EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
- "PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional writes.",
- "SampleAfterValue": "100003",
- "UMask": "0x2"
+ "BriefDescription": "Cycles where data return is pending for a Demand Data Read request who miss L3 cache.",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD",
+ "PublicDescription": "Cycles with at least 1 Demand Data Read requests who miss L3 cache in the superQ.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
},
{
- "BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address",
- "EventCode": "0x54",
- "EventName": "TX_MEM.ABORT_CONFLICT",
- "PublicDescription": "Counts the number of times a TSX line had a cache conflict.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
+ "BriefDescription": "For every cycle, increments by the number of demand data read requests pending that are known to have missed the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD",
+ "PublicDescription": "For every cycle, increments by the number of demand data read requests pending that are known to have missed the L3 cache. Note that this does not capture all elapsed cycles while requests are outstanding - only cycles from when the requests were known by the requesting core to have missed the L3 cache.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/graniterapids/other.json b/tools/perf/pmu-events/arch/x86/graniterapids/other.json
index 5e799bae03ea..8b9b3c920934 100644
--- a/tools/perf/pmu-events/arch/x86/graniterapids/other.json
+++ b/tools/perf/pmu-events/arch/x86/graniterapids/other.json
@@ -1,6 +1,53 @@
[
{
+ "BriefDescription": "ASSISTS.PAGE_FAULT",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.PAGE_FAULT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the cycles where the AMX (Advance Matrix Extension) unit is busy performing an operation.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb7",
+ "EventName": "EXE.AMX_BUSY",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x73C000004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts demand data reads that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -9,21 +56,112 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x73C000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM",
+ "EventName": "OCR.DEMAND_RFO.DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x104000001",
+ "MSRValue": "0x73C000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "EventName": "OCR.DEMAND_RFO.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F3FFC0002",
+ "MSRValue": "0x104000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts writebacks of modified cachelines and streaming stores that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.MODIFIED_WRITE.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10808",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F3FFC4477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x73C004477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY",
+ "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into starvation periods (e.g. branch mispredictions or i-cache misses)",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY_COUNT",
+ "Invert": "1",
+ "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to closely sample on front-end latency issues (see the FRONTEND_RETIRED event of designated precise events)",
+ "SampleAfterValue": "100003",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "Cycles when RS was empty and a resource allocation stall is asserted",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY_RESOURCE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles the uncore cannot take further requests",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x2d",
+ "EventName": "XQ.FULL_CYCLES",
+ "PublicDescription": "number of cycles when the thread is active and the uncore cannot take any further requests (for example prefetches, loads or stores initiated by the Core that miss the L2 cache).",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/graniterapids/pipeline.json b/tools/perf/pmu-events/arch/x86/graniterapids/pipeline.json
index 764c0435d1d2..0ef9daf64e2e 100644
--- a/tools/perf/pmu-events/arch/x86/graniterapids/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/graniterapids/pipeline.json
@@ -1,6 +1,35 @@
[
{
+ "BriefDescription": "Cycles when divide unit is busy executing divide or square root operations.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xb0",
+ "EventName": "ARITH.DIV_ACTIVE",
+ "PublicDescription": "Counts cycles when divide unit is busy executing divide or square root operations. Accounts for integer and floating-point operations.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x9"
+ },
+ {
+ "BriefDescription": "This event counts the cycles the integer divider is busy.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xb0",
+ "EventName": "ARITH.IDIV_ACTIVE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of occurrences where a microcode assist is invoked by hardware.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.ANY",
+ "PublicDescription": "Counts the number of occurrences where a microcode assist is invoked by hardware. Examples include AD (page Access Dirty), FP and AVX related assists.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1b"
+ },
+ {
"BriefDescription": "All branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -8,7 +37,88 @@
"SampleAfterValue": "400009"
},
{
+ "BriefDescription": "Conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND",
+ "PEBS": "1",
+ "PublicDescription": "Counts conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x11"
+ },
+ {
+ "BriefDescription": "Not taken branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_NTAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts not taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Taken conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts taken conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Far branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "PEBS": "1",
+ "PublicDescription": "Counts far branch instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Indirect near branch instructions retired (excluding returns)",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.INDIRECT",
+ "PEBS": "1",
+ "PublicDescription": "Counts near indirect branch instructions retired excluding returns. TSX abort is an indirect branch.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Direct and indirect near call instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "PEBS": "1",
+ "PublicDescription": "Counts both direct and indirect near call instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Return instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "PEBS": "1",
+ "PublicDescription": "Counts return instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Taken branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
+ },
+ {
"BriefDescription": "All mispredicted branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -16,7 +126,222 @@
"SampleAfterValue": "400009"
},
{
+ "BriefDescription": "All mispredicted branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_COST",
+ "PEBS": "1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x44"
+ },
+ {
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND",
+ "PEBS": "1",
+ "PublicDescription": "Counts mispredicted conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x11"
+ },
+ {
+ "BriefDescription": "Mispredicted conditional branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_COST",
+ "PEBS": "1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x51"
+ },
+ {
+ "BriefDescription": "Mispredicted non-taken conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_NTAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of conditional branch instructions retired that were mispredicted and the branch direction was not taken.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Mispredicted non-taken conditional branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_NTAKEN_COST",
+ "PEBS": "1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x50"
+ },
+ {
+ "BriefDescription": "number of branch instructions retired that were mispredicted and taken.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts taken conditional mispredicted branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Mispredicted taken conditional branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_TAKEN_COST",
+ "PEBS": "1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x41"
+ },
+ {
+ "BriefDescription": "Miss-predicted near indirect branch instructions retired (excluding returns)",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT",
+ "PEBS": "1",
+ "PublicDescription": "Counts miss-predicted near indirect branch instructions retired excluding returns. TSX abort is an indirect branch.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Mispredicted indirect CALL retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT_CALL",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired mispredicted indirect (near taken) CALL instructions, including both register and memory indirect.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Mispredicted indirect CALL retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT_CALL_COST",
+ "PEBS": "1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x42"
+ },
+ {
+ "BriefDescription": "Mispredicted near indirect branch instructions retired (excluding returns). This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT_COST",
+ "PEBS": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0xc0"
+ },
+ {
+ "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts number of near branch instructions retired that were mispredicted and taken.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Mispredicted taken near branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN_COST",
+ "PEBS": "1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x60"
+ },
+ {
+ "BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.RET",
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Mispredicted ret instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.RET_COST",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x48"
+ },
+ {
+ "BriefDescription": "Core clocks when the thread is in the C0.1 light-weight slower wakeup time but more power saving optimized state.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.C01",
+ "PublicDescription": "Counts core clocks when the thread is in the C0.1 light-weight slower wakeup time but more power saving optimized state. This state can be entered via the TPAUSE or UMWAIT instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Core clocks when the thread is in the C0.2 light-weight faster wakeup time but less power saving optimized state.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.C02",
+ "PublicDescription": "Counts core clocks when the thread is in the C0.2 light-weight faster wakeup time but less power saving optimized state. This state can be entered via the TPAUSE or UMWAIT instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Core clocks when the thread is in the C0.1 or C0.2 or running a PAUSE in C0 ACPI state.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.C0_WAIT",
+ "PublicDescription": "Counts core clocks when the thread is in the C0.1 or C0.2 power saving optimized states (TPAUSE or UMWAIT instructions) or running the PAUSE instruction.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x70"
+ },
+ {
+ "BriefDescription": "Cycle counts are evenly distributed between active threads in the Core.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.DISTRIBUTED",
+ "PublicDescription": "This event distributes cycle counts between active hyperthreads, i.e., those in C0. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If all other hyperthreads are inactive (or disabled or do not exist), all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "PublicDescription": "Counts Core crystal clock cycles when current thread is unhalted and the other thread is halted.",
+ "SampleAfterValue": "25003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "CPU_CLK_UNHALTED.PAUSE",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.PAUSE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "CPU_CLK_UNHALTED.PAUSE_INST",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.PAUSE_INST",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Core crystal clock cycles. Cycle counts are evenly distributed between active threads in the Core.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.REF_DISTRIBUTED",
+ "PublicDescription": "This event distributes Core crystal clock cycle counts between active hyperthreads, i.e., those in C0 sleep-state. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If one thread is active in a core, all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
"BriefDescription": "Reference cycles when the core is not in halt state.",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
"SampleAfterValue": "2000003",
@@ -24,6 +349,7 @@
},
{
"BriefDescription": "Reference cycles when the core is not in halt state.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.REF_TSC_P",
"PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
@@ -32,6 +358,7 @@
},
{
"BriefDescription": "Core cycles when the thread is not in halt state",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events.",
"SampleAfterValue": "2000003",
@@ -39,13 +366,150 @@
},
{
"BriefDescription": "Thread cycles when thread is not in halt state",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
"PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
"SampleAfterValue": "2000003"
},
{
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
+ "CounterMask": "8",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "16",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
+ "CounterMask": "12",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xc"
+ },
+ {
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
+ "CounterMask": "5",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Total execution stalls.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "4",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.1_PORTS_UTIL",
+ "PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles total of 2 or 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.2_3_PORTS_UTIL",
+ "SampleAfterValue": "2000003",
+ "UMask": "0xc"
+ },
+ {
+ "BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.2_PORTS_UTIL",
+ "PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.3_PORTS_UTIL",
+ "PublicDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.4_PORTS_UTIL",
+ "PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "5",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.BOUND_ON_LOADS",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x21"
+ },
+ {
+ "BriefDescription": "Cycles where the Store Buffer was full and no loads caused an execution stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "2",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.BOUND_ON_STORES",
+ "PublicDescription": "Counts cycles where the Store Buffer was full and no loads caused an execution stall.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Cycles no uop executed while RS was not empty, the SB was not full and there was no outstanding load.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.EXE_BOUND_0_PORTS",
+ "PublicDescription": "Number of cycles total of 0 uops executed on all ports, Reservation Station (RS) was not empty, the Store Buffer (SB) was not full and there was no outstanding load.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Instruction decoders utilized in a cycle",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x75",
+ "EventName": "INST_DECODED.DECODERS",
+ "PublicDescription": "Number of decoders utilized in a cycle when the MITE (legacy decode pipeline) fetches instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Number of instructions retired. Fixed Counter - architectural event",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PEBS": "1",
"PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
@@ -54,6 +518,7 @@
},
{
"BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.ANY_P",
"PEBS": "1",
@@ -61,7 +526,186 @@
"SampleAfterValue": "2000003"
},
{
+ "BriefDescription": "INST_RETIRED.MACRO_FUSED",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.MACRO_FUSED",
+ "PEBS": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Retired NOP instructions.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.NOP",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired NOP or ENDBR32/64 or PREFETCHIT0/1 instructions",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Precise instruction retired with PEBS precise-distribution",
+ "Counter": "Fixed counter 0",
+ "EventName": "INST_RETIRED.PREC_DIST",
+ "PEBS": "1",
+ "PublicDescription": "A version of INST_RETIRED that allows for a precise distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR++) feature to fix bias in how retired instructions get sampled. Use on Fixed Counter 0.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Iterations of Repeat string retired instructions.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.REP_ITERATION",
+ "PEBS": "1",
+ "PublicDescription": "Number of iterations of Repeat (REP) string retired instructions such as MOVS, CMPS, and SCAS. Each has a byte, word, and doubleword version and string instructions can be repeated using a repetition prefix, REP, that allows their architectural execution to be repeated a number of times as specified by the RCX register. Note the number of iterations is implementation-dependent.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Clears speculative count",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.CLEARS_COUNT",
+ "PublicDescription": "Counts the number of speculative clears due to any type of branch misprediction or machine clears",
+ "SampleAfterValue": "500009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
+ "PublicDescription": "Cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "INT_MISC.MBA_STALLS",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.MBA_STALLS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "PublicDescription": "Counts core cycles when the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Bubble cycles of BAClear (Unknown Branch).",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.UNKNOWN_BRANCH_CYCLES",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x7",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "TMA slots where uops got dropped",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.UOP_DROPPING",
+ "PublicDescription": "Estimated number of Top-down Microarchitecture Analysis slots that got dropped due to non front-end reasons",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "INT_VEC_RETIRED.128BIT",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.128BIT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x13"
+ },
+ {
+ "BriefDescription": "INT_VEC_RETIRED.256BIT",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.256BIT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xac"
+ },
+ {
+ "BriefDescription": "integer ADD, SUB, SAD 128-bit vector instructions.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.ADD_128",
+ "PublicDescription": "Number of retired integer ADD/SUB (regular or horizontal), SAD 128-bit vector instructions.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "integer ADD, SUB, SAD 256-bit vector instructions.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.ADD_256",
+ "PublicDescription": "Number of retired integer ADD/SUB (regular or horizontal), SAD 256-bit vector instructions.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xc"
+ },
+ {
+ "BriefDescription": "INT_VEC_RETIRED.MUL_256",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.MUL_256",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "INT_VEC_RETIRED.SHUFFLES",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.SHUFFLES",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "INT_VEC_RETIRED.VNNI_128",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.VNNI_128",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "INT_VEC_RETIRED.VNNI_256",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.VNNI_256",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "False dependencies in MOB due to partial compare on address.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.ADDRESS_ALIAS",
+ "PublicDescription": "Counts the number of times a load got blocked due to false dependencies in MOB due to partial compare on address.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.NO_SR",
+ "PublicDescription": "Counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x88"
+ },
+ {
"BriefDescription": "Loads blocked due to overlapping with a preceding store that cannot be forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.STORE_FORWARD",
"PublicDescription": "Counts the number of times where store forwarding was prevented for a load operation. The most common case is a load blocked due to the address of memory access (partially) overlapping with a preceding uncompleted store. Note: See the table of not supported store forwards in the Optimization Guide.",
@@ -69,15 +713,127 @@
"UMask": "0x82"
},
{
+ "BriefDescription": "Counts the number of demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4c",
+ "EventName": "LOAD_HIT_PREFETCH.SWPF",
+ "PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xa8",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "PublicDescription": "Counts the cycles when at least one uop is delivered by the LSD (Loop-stream detector).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles optimal number of Uops delivered by the LSD, but did not come from the decoder.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "6",
+ "EventCode": "0xa8",
+ "EventName": "LSD.CYCLES_OK",
+ "PublicDescription": "Counts the cycles when optimal number of uops is delivered by the LSD (Loop-stream detector).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of Uops delivered by the LSD.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa8",
+ "EventName": "LSD.UOPS",
+ "PublicDescription": "Counts the number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "PublicDescription": "Counts the number of machine clears (nukes) of any type.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Self-modifying code (SMC) detected.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "LFENCE instructions retired",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe0",
+ "EventName": "MISC2_RETIRED.LFENCE",
+ "PublicDescription": "number of LFENCE retired instructions",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Increments whenever there is an update to the LBR array.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xcc",
+ "EventName": "MISC_RETIRED.LBR_INSERTS",
+ "PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR enable via IA32_DEBUGCTL MSR and branch type selection via MSR_LBR_SELECT.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Counts cycles where the pipeline is stalled due to serializing operations.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa2",
+ "EventName": "RESOURCE_STALLS.SCOREBOARD",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
"BriefDescription": "This event counts a subset of the Topdown Slots event that were not consumed by the back-end pipeline due to lack of back-end resources, as a result of memory subsystem delays, execution units limitations, or other conditions.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa4",
"EventName": "TOPDOWN.BACKEND_BOUND_SLOTS",
- "PublicDescription": "This event counts a subset of the Topdown Slots event that were not consumed by the back-end pipeline due to lack of back-end resources, as a result of memory subsystem delays, execution units limitations, or other conditions.\nThe count is distributed among unhalted logical processors (hyper-threads) who share the same physical core, in processors that support Intel Hyper-Threading Technology. Software can use this event as the numerator for the Backend Bound metric (or top-level category) of the Top-down Microarchitecture Analysis method.",
+ "PublicDescription": "This event counts a subset of the Topdown Slots event that were not consumed by the back-end pipeline due to lack of back-end resources, as a result of memory subsystem delays, execution units limitations, or other conditions. The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core, in processors that support Intel Hyper-Threading Technology. Software can use this event as the numerator for the Backend Bound metric (or top-level category) of the Top-down Microarchitecture Analysis method.",
"SampleAfterValue": "10000003",
"UMask": "0x2"
},
{
+ "BriefDescription": "TMA slots wasted due to incorrect speculations.",
+ "Counter": "0",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.BAD_SPEC_SLOTS",
+ "PublicDescription": "Number of slots of TMA method that were wasted due to incorrect speculation. It covers all types of control-flow or data-related mis-speculations.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "TMA slots wasted due to incorrect speculation by branch mispredictions",
+ "Counter": "0",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.BR_MISPREDICT_SLOTS",
+ "PublicDescription": "Number of TMA slots that were wasted due to incorrect speculation by (any type of) branch mispredictions. This event estimates number of speculative operations that were issued but not retired as well as the out-of-order engine recovery past a branch misprediction.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "TOPDOWN.MEMORY_BOUND_SLOTS",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.MEMORY_BOUND_SLOTS",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x10"
+ },
+ {
"BriefDescription": "TMA slots available for an unhalted logical processor. Fixed counter - architectural event",
+ "Counter": "Fixed counter 3",
"EventName": "TOPDOWN.SLOTS",
"PublicDescription": "Number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method (TMA). The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core. Software can use this event as the denominator for the top-level metrics of the TMA method. This architectural event is counted on a designated fixed counter (Fixed Counter 3).",
"SampleAfterValue": "10000003",
@@ -85,6 +841,7 @@
},
{
"BriefDescription": "TMA slots available for an unhalted logical processor. General counter - architectural event",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa4",
"EventName": "TOPDOWN.SLOTS_P",
"PublicDescription": "Counts the number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method. The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core.",
@@ -92,11 +849,259 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Number of non dec-by-all uops decoded by decoder",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x76",
+ "EventName": "UOPS_DECODED.DEC0_UOPS",
+ "PublicDescription": "This event counts the number of not dec-by-all uops decoded by decoder 0.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Uops executed on port 0",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_0",
+ "PublicDescription": "Number of uops dispatch to execution port 0.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Uops executed on port 1",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_1",
+ "PublicDescription": "Number of uops dispatch to execution port 1.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Uops executed on ports 2, 3 and 10",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_2_3_10",
+ "PublicDescription": "Number of uops dispatch to execution ports 2, 3 and 10",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Uops executed on ports 4 and 9",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_4_9",
+ "PublicDescription": "Number of uops dispatch to execution ports 4 and 9",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Uops executed on ports 5 and 11",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_5_11",
+ "PublicDescription": "Number of uops dispatch to execution ports 5 and 11",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Uops executed on port 6",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_6",
+ "PublicDescription": "Number of uops dispatch to execution port 6.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Uops executed on ports 7 and 8",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_7_8",
+ "PublicDescription": "Number of uops dispatch to execution ports 7 and 8.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Number of uops executed on the core.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE",
+ "PublicDescription": "Counts the number of uops executed from any thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "PublicDescription": "Counts cycles when at least 1 micro-op is executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "2",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "PublicDescription": "Counts cycles when at least 2 micro-ops are executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "3",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "PublicDescription": "Counts cycles when at least 3 micro-ops are executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "4",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "PublicDescription": "Counts cycles when at least 4 micro-ops are executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1",
+ "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "2",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2",
+ "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "3",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3",
+ "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "4",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4",
+ "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.STALLS",
+ "Invert": "1",
+ "PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.THREAD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of x87 uops dispatched.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.X87",
+ "PublicDescription": "Counts the number of x87 uops executed.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Uops that RAT issues to RS",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xae",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "UOPS_ISSUED.CYCLES",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xae",
+ "EventName": "UOPS_ISSUED.CYCLES",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles with retired uop(s).",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.CYCLES",
+ "PublicDescription": "Counts cycles where at least one uop has retired.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired uops except the last uop of each instruction.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.HEAVY",
+ "PublicDescription": "Counts the number of retired micro-operations (uops) except the last uop of each instruction. An instruction that is decoded into less than two uops does not contribute to the count.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "UOPS_RETIRED.MS",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.MS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x8",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
"BriefDescription": "This event counts a subset of the Topdown Slots event that are utilized by operations that eventually get retired (committed) by the processor pipeline. Usually, this event positively correlates with higher performance for example, as measured by the instructions-per-cycle metric.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.SLOTS",
- "PublicDescription": "This event counts a subset of the Topdown Slots event that are utilized by operations that eventually get retired (committed) by the processor pipeline. Usually, this event positively correlates with higher performance for example, as measured by the instructions-per-cycle metric.\nSoftware can use this event as the numerator for the Retiring metric (or top-level category) of the Top-down Microarchitecture Analysis method.",
+ "PublicDescription": "This event counts a subset of the Topdown Slots event that are utilized by operations that eventually get retired (committed) by the processor pipeline. Usually, this event positively correlates with higher performance for example, as measured by the instructions-per-cycle metric. Software can use this event as the numerator for the Retiring metric (or top-level category) of the Top-down Microarchitecture Analysis method.",
"SampleAfterValue": "2000003",
"UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles without actually retired uops.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.STALLS",
+ "Invert": "1",
+ "PublicDescription": "This event counts cycles without actually retired uops.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/graniterapids/uncore-cache.json b/tools/perf/pmu-events/arch/x86/graniterapids/uncore-cache.json
new file mode 100644
index 000000000000..e0a45d4ea848
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/graniterapids/uncore-cache.json
@@ -0,0 +1,3674 @@
+[
+ {
+ "BriefDescription": "Clockticks for CMS units attached to CHA",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x01",
+ "EventName": "UNC_CHACMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "PublicDescription": "UNC_CHACMS_CLOCKTICKS",
+ "Unit": "CHACMS"
+ },
+ {
+ "BriefDescription": "Number of CHA clock cycles while the event is enabled",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x01",
+ "EventName": "UNC_CHA_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Clockticks of the uncore caching and home agent (CHA)",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts transactions that looked into the multi-socket cacheline Directory state, and therefore did not send a snoop because the Directory indicated it was not needed.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x53",
+ "EventName": "UNC_CHA_DIR_LOOKUP.NO_SNP",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts transactions that looked into the multi-socket cacheline Directory state, and sent one or more snoops, because the Directory indicated it was needed.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x53",
+ "EventName": "UNC_CHA_DIR_LOOKUP.SNP",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts only multi-socket cacheline Directory state updates memory writes issued from the HA pipe. This does not include memory write requests which are for I (Invalid) or E (Exclusive) cachelines.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "UNC_CHA_DIR_UPDATE.HA",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts only multi-socket cacheline Directory state updates due to memory writes issued from the TOR pipe which are the result of remote transaction hitting the SF/LLC and returning data Core2Core. This does not include memory write requests which are for I (Invalid) or E (Exclusive) cachelines.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "UNC_CHA_DIR_UPDATE.TOR",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Distress signal assertion for dynamic prefetch throttle (DPT). Threshold for distress signal assertion reached in TOR or IRQ (immediate cause for triggering).",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x59",
+ "EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_ANY",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x3",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Distress signal assertion for dynamic prefetch throttle (DPT). Threshold for distress signal assertion reached in IRQ (immediate cause for triggering).",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x59",
+ "EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_IRQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Distress signal assertion for dynamic prefetch throttle (DPT). Threshold for distress signal assertion reached in TOR (immediate cause for triggering).",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x59",
+ "EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_TOR",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts when a normal (Non-Isochronous) full line write is issued from the CHA to the any of the memory controller channels.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5b",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Full Line Writes Issued : ISOCH Full Line : Counts the total number of full line writes issued from the HA into the memory controller.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5b",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL_PRIORITY",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Full Line Writes Issued : Partial Non-ISOCH : Counts the total number of full line writes issued from the HA into the memory controller.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5b",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Full Line Writes Issued : ISOCH Partial : Counts the total number of full line writes issued from the HA into the memory controller.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5b",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL_PRIORITY",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups: All Requests to Remotely Homed Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.ALL_REMOTE",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : All transactions from Remote Agents",
+ "UMask": "0x17e0ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups: CRd Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.CODE",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : CRd Requests",
+ "UMask": "0x1bd0ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups: Read Requests and Read Prefetches",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_RD",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
+ "UMask": "0x1bc1ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups: Read Requests, Read Prefetches, and Snoops",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_ALL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Data Reads",
+ "UMask": "0x1fc1ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups: Read Requests to Locally Homed Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_LOCAL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Demand Data Reads, Core and LLC prefetches",
+ "UMask": "0x841ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups: Read Requests, Read Prefetches, and Snoops which miss the Cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_MISS",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Data Read Misses",
+ "UMask": "0x1fc101",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups: All Requests to Locally Homed Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCALLY_HOMED_ADDRESS",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Transactions homed locally",
+ "UMask": "0xbdfff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups: Code Read Requests and Code Read Prefetches to Locally Homed Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_CODE",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : CRd Requests",
+ "UMask": "0x19d0ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups: Read Requests and Read Prefetches to Locally Homed Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_DATA_RD",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
+ "UMask": "0x19c1ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups: Code Read Requests to Locally Homed Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_DMND_CODE",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : CRd Requests",
+ "UMask": "0x1850ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups: Read Requests to Locally Homed Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_DMND_DATA_RD",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
+ "UMask": "0x1841ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups: RFO Requests to Locally Homed Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_DMND_RFO",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : RFO Requests",
+ "UMask": "0x1848ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups: LLC Prefetch Requests to Locally Homed Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_LLC_PF",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
+ "UMask": "0x189dff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups: All Prefetches to Locally Homed Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_PF",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
+ "UMask": "0x199dff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups: Code Prefetches to Locally Homed Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_PF_CODE",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : CRd Requests",
+ "UMask": "0x1910ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups: Read Prefetches to Locally Homed Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_PF_DATA_RD",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
+ "UMask": "0x1981ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups: RFO Prefetches to Locally Homed Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_PF_RFO",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : RFO Requests",
+ "UMask": "0x1908ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups: RFO Requests and RFO Prefetches to Locally Homed Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_RFO",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : RFO Requests",
+ "UMask": "0x19c8ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups: All Requests to Remotely Homed Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REMOTELY_HOMED_ADDRESS",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Transactions homed remotely : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Transaction whose address resides in a remote MC",
+ "UMask": "0x15dfff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups: Code Read/Prefetch Requests from a Remote Socket",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_CODE",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : CRd Requests",
+ "UMask": "0x1a10ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups: Data Read/Prefetch Requests from a Remote Socket",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_DATA_RD",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
+ "UMask": "0x1a01ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups: RFO Requests/Prefetches from a Remote Socket",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_RFO",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : RFO Requests",
+ "UMask": "0x1a08ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups: Snoop Requests from a Remote Socket",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_SNP",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed",
+ "UMask": "0x1c19ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups: All RFO and RFO Prefetches",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.RFO",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : All RFOs - Demand and Prefetches",
+ "UMask": "0x1bc8ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups: RFO Requests and RFO Prefetches to Locally Homed Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.RFO_LOCAL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Locally HOMed RFOs - Demand and Prefetches",
+ "UMask": "0x9c8ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups: Writes to Locally Homed Memory (includes writebacks from L1/L2)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.WRITE_LOCAL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Writes",
+ "UMask": "0x842ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups: Writes to Remotely Homed Memory (includes writebacks from L1/L2)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.WRITE_REMOTE",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Remote Writes",
+ "UMask": "0x17c2ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.ALL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : All Lines Victimized",
+ "UMask": "0xf",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : IA traffic : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.IA",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : IO traffic : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.IO",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_ALL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Local - All Lines",
+ "UMask": "0x200f",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_E",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Local - Lines in E State",
+ "UMask": "0x2002",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_F",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Local - Lines in F State",
+ "UMask": "0x2008",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_M",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Local - Lines in M State",
+ "UMask": "0x2001",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_S",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Local - Lines in S State",
+ "UMask": "0x2004",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_ALL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Remote - All Lines",
+ "UMask": "0x800f",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_E",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Remote - Lines in E State",
+ "UMask": "0x8002",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_M",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Remote - Lines in M State",
+ "UMask": "0x8001",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_S",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Remote - Lines in S State",
+ "UMask": "0x8004",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_E",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Lines in E state",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_M",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Lines in M state",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_S",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Lines in S State",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts when a RFO (the Read for Ownership issued before a write) request hit a cacheline in the S (Shared) state.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.RFO_HIT_S",
+ "PerPkg": "1",
+ "PublicDescription": "Cbo Misc : RFO HitS",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast : Local InvItoE : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x55",
+ "EventName": "UNC_CHA_OSB.LOCAL_INVITOE",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast : Local Rd : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x55",
+ "EventName": "UNC_CHA_OSB.LOCAL_READ",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast : Off : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x55",
+ "EventName": "UNC_CHA_OSB.OFF_PWRHEURISTIC",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast : Remote Rd : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x55",
+ "EventName": "UNC_CHA_OSB.REMOTE_READ",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast : RFO HitS Snoop Broadcast : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x55",
+ "EventName": "UNC_CHA_OSB.RFO_HITS_SNP_BCAST",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_REMOTE_SF.ALLOC_EXCLUSIVE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x69",
+ "EventName": "UNC_CHA_REMOTE_SF.ALLOC_EXCLUSIVE",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_REMOTE_SF.ALLOC_SHARED",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x69",
+ "EventName": "UNC_CHA_REMOTE_SF.ALLOC_SHARED",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_REMOTE_SF.DEALLOC_EVCTCLN",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x69",
+ "EventName": "UNC_CHA_REMOTE_SF.DEALLOC_EVCTCLN",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_REMOTE_SF.DIRBACKED_ONLY",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x69",
+ "EventName": "UNC_CHA_REMOTE_SF.DIRBACKED_ONLY",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_REMOTE_SF.HIT_EXCLUSIVE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x69",
+ "EventName": "UNC_CHA_REMOTE_SF.HIT_EXCLUSIVE",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_REMOTE_SF.HIT_SHARED",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x69",
+ "EventName": "UNC_CHA_REMOTE_SF.HIT_SHARED",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_REMOTE_SF.INCLUSIVE_ONLY",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x69",
+ "EventName": "UNC_CHA_REMOTE_SF.INCLUSIVE_ONLY",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_REMOTE_SF.MISS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x69",
+ "EventName": "UNC_CHA_REMOTE_SF.MISS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_REMOTE_SF.UPDATE_EXCLUSIVE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x69",
+ "EventName": "UNC_CHA_REMOTE_SF.UPDATE_EXCLUSIVE",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_REMOTE_SF.UPDATE_SHARED",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x69",
+ "EventName": "UNC_CHA_REMOTE_SF.UPDATE_SHARED",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_REMOTE_SF.VICTIM_EXCLUSIVE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x69",
+ "EventName": "UNC_CHA_REMOTE_SF.VICTIM_EXCLUSIVE",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_REMOTE_SF.VICTIM_SHARED",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x69",
+ "EventName": "UNC_CHA_REMOTE_SF.VICTIM_SHARED",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the total number of requests coming from a unit on this socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.INVITOE",
+ "PerPkg": "1",
+ "PublicDescription": "HA Read and Write Requests : InvalItoE",
+ "UMask": "0x30",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the total number of requests coming from a unit on this socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.INVITOE_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the total number of requests coming from a remote socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.INVITOE_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts read requests made into this CHA. Reads include all read opcodes (including RFO: the Read for Ownership issued before a write) .",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.READS",
+ "PerPkg": "1",
+ "PublicDescription": "HA Read and Write Requests : Reads",
+ "UMask": "0x3",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts read requests coming from a unit on this socket made into this CHA. Reads include all read opcodes (including RFO: the Read for Ownership issued before a write).",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.READS_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts read requests coming from a remote socket made into the CHA. Reads include all read opcodes (including RFO: the Read for Ownership issued before a write).",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.READS_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts write requests made into the CHA, including streaming, evictions, HitM (Reads from another core to a Modified cacheline), etc.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.WRITES",
+ "PerPkg": "1",
+ "PublicDescription": "HA Read and Write Requests : Writes",
+ "UMask": "0xc",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts write requests coming from a unit on this socket made into this CHA, including streaming, evictions, HitM (Reads from another core to a Modified cacheline), etc.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.WRITES_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.WRITES_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "All TOR Inserts",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.ALL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All",
+ "UMask": "0xc001ffff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CLFlush transactions from a CXL device which hit in the L3.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.CXL_HIT_CLFLUSH",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x78c8c7fd20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "FsRdCur transactions from a CXL device which hit in the L3.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.CXL_HIT_FSRDCUR",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x78c8effd20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "FsRdCurPtl transactions from a CXL device which hit in the L3.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.CXL_HIT_FSRDCURPTL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x78c9effd20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ItoM transactions from a CXL device which hit in the L3.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.CXL_HIT_ITOM",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x78cc47fd20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ItoMWr transactions from a CXL device which hit in the L3.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.CXL_HIT_ITOMWR",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x78cc4ffd20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "MemPushWr transactions from a CXL device which hit in the L3.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.CXL_HIT_MEMPUSHWR",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x78cc6ffd20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WCiL transactions from a CXL device which hit in the L3.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.CXL_HIT_WCIL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x78c86ffd20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WcilF transactions from a CXL device which hit in the L3.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.CXL_HIT_WCILF",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x78c867fd20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WiL transactions from a CXL device which hit in the L3.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.CXL_HIT_WIL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x78c87ffd20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CLFlush transactions from a CXL device which miss the L3.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.CXL_MISS_CLFLUSH",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x78c8c7fe20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "FsRdCur transactions from a CXL device which miss the L3.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.CXL_MISS_FSRDCUR",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x78c8effe20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "FsRdCurPtl transactions from a CXL device which miss the L3.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.CXL_MISS_FSRDCURPTL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x78c9effe20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ItoM transactions from a CXL device which miss the L3.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.CXL_MISS_ITOM",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x78cc47fe20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ItoMWr transactions from a CXL device which miss the L3.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.CXL_MISS_ITOMWR",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x78cc4ffe20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "MemPushWr transactions from a CXL device which miss the L3.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.CXL_MISS_MEMPUSHWR",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x78cc6ffe20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WCiL transactions from a CXL device which miss the L3.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.CXL_MISS_WCIL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x78c86ffe20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WcilF transactions from a CXL device which miss the L3.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.CXL_MISS_WCILF",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x78c867fe20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WiL transactions from a CXL device which miss the L3.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.CXL_MISS_WIL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x78c87ffe20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "All locally initiated requests from IA Cores",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All requests from iA Cores",
+ "UMask": "0xc001ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CLFlush events that are initiated from the Core",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_CLFLUSH",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : CLFlushes issued by iA Cores",
+ "UMask": "0xc8c7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Code read from local IA that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : CRDs issued by iA Cores",
+ "UMask": "0xc80fff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Code read prefetch from local IA that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_CRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Code read prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc88fff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Data read from local IA that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_DRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRds issued by iA Cores",
+ "UMask": "0xc817ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "DRd PTEs issued by iA Cores due to a page walk",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_DRDPTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRdPte issued by iA Cores due to a page walk",
+ "UMask": "0xc837ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Data read prefetch from local IA that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd_Prefs issued by iA Cores",
+ "UMask": "0xc897ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "All locally initiated requests from IA Cores which hit the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All requests from iA Cores that Hit the LLC",
+ "UMask": "0xc001fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Code read from local IA that hit the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : CRds issued by iA Cores that Hit the LLC",
+ "UMask": "0xc80ffd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Code read prefetch from local IA that hit the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that hit the LLC",
+ "UMask": "0xc88ffd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "All requests issued from IA cores to CXL accelerator memory regions that hit the LLC.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c0018101",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Data read from local IA that hit the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRds issued by iA Cores that Hit the LLC",
+ "UMask": "0xc817fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "DRd PTEs issued by iA Cores due to page walks that hit the LLC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRDPTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRdPte issued by iA Cores due to a page walk that hit the LLC",
+ "UMask": "0xc837fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Data read prefetch from local IA that hit the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd_Prefs issued by iA Cores that Hit the LLC",
+ "UMask": "0xc897fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ItoM requests from local IA cores that hit the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoMs issued by iA Cores that Hit LLC",
+ "UMask": "0xcc47fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Last level cache prefetch code read from local IA that hit the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFCODE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : LLCPrefCode issued by iA Cores that hit the LLC",
+ "UMask": "0xcccffd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Last level cache prefetch data read from local IA that hit the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFDATA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : LLCPrefData issued by iA Cores that hit the LLC",
+ "UMask": "0xccd7fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Last level cache prefetch read for ownership from local IA that hit the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFRFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that hit the LLC",
+ "UMask": "0xccc7fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Read for ownership from local IA that hit the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFOs issued by iA Cores that Hit the LLC",
+ "UMask": "0xc807fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Read for ownership prefetch from local IA that hit the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Hit the LLC",
+ "UMask": "0xc887fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ItoM events that are initiated from the Core",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoMs issued by iA Cores",
+ "UMask": "0xcc47ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ItoMCacheNear requests from local IA cores",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoMCacheNears issued by iA Cores",
+ "UMask": "0xcd47ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Last level cache prefetch code read from local IA.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFCODE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : LLCPrefCode issued by iA Cores",
+ "UMask": "0xcccfff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Last level cache prefetch data read from local IA.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFDATA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : LLCPrefData issued by iA Cores",
+ "UMask": "0xccd7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Last level cache prefetch read for ownership from local IA that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFRFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores",
+ "UMask": "0xccc7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "All locally initiated requests from IA Cores which miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC",
+ "UMask": "0xc001fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Code read from local IA that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : CRds issued by iA Cores that Missed the LLC",
+ "UMask": "0xc80ffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CRDs from local IA cores to locally homed memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : CRd issued by iA Cores that Missed the LLC - HOMed locally",
+ "UMask": "0xc80efe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Code read prefetch from local IA that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC",
+ "UMask": "0xc88ffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CRD Prefetches from local IA cores to locally homed memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed locally",
+ "UMask": "0xc88efe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CRD Prefetches from local IA cores to remotely homed memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed remotely",
+ "UMask": "0xc88f7e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CRDs from local IA cores to remotely homed memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : CRd issued by iA Cores that Missed the LLC - HOMed remotely",
+ "UMask": "0xc80f7e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "All requests issued from IA cores to CXL accelerator memory regions that miss the LLC.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c0018201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Data read from local IA that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRds issued by iA Cores that Missed the LLC",
+ "UMask": "0xc817fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "DRd PTEs issued by iA Cores due to a page walk that missed the LLC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRDPTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRdPte issued by iA Cores due to a page walk that missed the LLC",
+ "UMask": "0xc837fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "DRds and equivalent opcodes issued from an IA core which miss the L3 and target memory in a CXL type 2 memory expander card.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_CXL_ACC",
+ "PerPkg": "1",
+ "PublicDescription": "DRds issued from an IA core which miss the L3 and target memory in a CXL type 2 memory expander card.",
+ "UMask": "0x10c8178201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "DRds issued by iA Cores targeting DDR Mem that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRds issued by iA Cores targeting DDR Mem that Missed the LLC",
+ "UMask": "0xc8178601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Data read from local IA that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRds issued by iA Cores that Missed the LLC - HOMed locally",
+ "UMask": "0xc816fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "DRds from local IA cores to locally homed DDR addresses that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally",
+ "UMask": "0xc8168601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "DRds from local IA cores to locally homed PMM addresses that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL_PMM",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally",
+ "UMask": "0xc8168a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "DRds issued by iA Cores targeting PMM Mem that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PMM",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRds issued by iA Cores targeting PMM Mem that Missed the LLC",
+ "UMask": "0xc8178a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Data read prefetch from local IA that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd_Prefs issued by iA Cores that Missed the LLC",
+ "UMask": "0xc897fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "L2 data prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c8978201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "DRd Prefetches from local IA cores to DDR addresses that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC",
+ "UMask": "0xc8978601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Data read prefetch from local IA that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Inserts into the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRD_PREF, and target local memory",
+ "UMask": "0xc896fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "DRd Prefetches from local IA cores to locally homed DDR addresses that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally",
+ "UMask": "0xc8968601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "DRd Prefetches from local IA cores to locally homed PMM addresses that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL_PMM",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally",
+ "UMask": "0xc8968a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "DRd Prefetches from local IA cores to PMM addresses that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_PMM",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC",
+ "UMask": "0xc8978a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Data read prefetch from local IA that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Inserts into the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRD_PREF, and target remote memory",
+ "UMask": "0xc8977e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "DRd Prefetches from local IA cores to remotely homed DDR addresses that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely",
+ "UMask": "0xc8970601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "DRd Prefetches from local IA cores to remotely homed PMM addresses that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE_PMM",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely",
+ "UMask": "0xc8970a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Data read from local IA that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRds issued by iA Cores that Missed the LLC - HOMed remotely",
+ "UMask": "0xc8177e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "DRds from local IA cores to remotely homed DDR addresses that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely",
+ "UMask": "0xc8170601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "DRds from local IA cores to remotely homed PMM addresses that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE_PMM",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely",
+ "UMask": "0xc8170a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ItoM requests from local IA cores that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoMs issued by iA Cores that Missed LLC",
+ "UMask": "0xcc47fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Last level cache prefetch code read from local IA that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFCODE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : LLCPrefCode issued by iA Cores that missed the LLC",
+ "UMask": "0xcccffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Last level cache prefetch data read from local IA that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFDATA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : LLCPrefData issued by iA Cores that missed the LLC",
+ "UMask": "0xccd7fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "LLC data prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFDATA_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10ccd78201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Last level cache prefetch read for ownership from local IA that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFRFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that missed the LLC",
+ "UMask": "0xccc7fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "L2 RFO prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFRFO_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c8878201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WCILF requests from local IA cores to locally homed DDR addresses that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCILF_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed locally",
+ "UMask": "0xc8668601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WCILF requests from local IA cores to locally homed PMM addresses which miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCILF_PMM",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed locally",
+ "UMask": "0xc8668a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WCIL requests from local IA cores to locally homed DDR addresses that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCIL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed locally",
+ "UMask": "0xc86e8601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WCIL requests from local IA cores to locally homed PMM addresses which miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCIL_PMM",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed locally",
+ "UMask": "0xc86e8a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WCILF requests from local IA cores to remotely homed DDR addresses that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCILF_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely",
+ "UMask": "0xc8670601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WCILF requests from local IA cores to remotely homed PMM addresses which miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCILF_PMM",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely",
+ "UMask": "0xc8670a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WCIL requests from local IA cores to remotely homed DDR addresses that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCIL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely",
+ "UMask": "0xc86f0601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WCIL requests from local IA cores to remotely homed PMM addresses which miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCIL_PMM",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely",
+ "UMask": "0xc86f0a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Read for ownership from local IA that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC",
+ "UMask": "0xc807fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RFOs issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c8078201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Read for ownership from local IA that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC - HOMed locally",
+ "UMask": "0xc806fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Read for ownership prefetch from local IA that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Missed the LLC",
+ "UMask": "0xc887fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "LLC RFO prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10ccc78201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Read for ownership prefetch from local IA that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Missed the LLC - HOMed locally",
+ "UMask": "0xc886fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Read for ownership prefetch from local IA that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Missed the LLC - HOMed remotely",
+ "UMask": "0xc8877e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Read for ownership from local IA that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC - HOMed remotely",
+ "UMask": "0xc8077e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UCRDF requests from local IA cores that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_UCRDF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : UCRdFs issued by iA Cores that Missed LLC",
+ "UMask": "0xc877de01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WCIL requests from a local IA core that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WCiLs issued by iA Cores that Missed the LLC",
+ "UMask": "0xc86ffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WCILF requests from local IA core that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WCiLF issued by iA Cores that Missed the LLC",
+ "UMask": "0xc867fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WCILF requests from local IA cores to DDR homed addresses which miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC",
+ "UMask": "0xc8678601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WCILF requests from local IA cores to PMM homed addresses which miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF_PMM",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC",
+ "UMask": "0xc8678a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WCIL requests from local IA cores to DDR homed addresses which miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC",
+ "UMask": "0xc86f8601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WCIL requests from a local IA core to PMM homed addresses that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL_PMM",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC",
+ "UMask": "0xc86f8a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WIL requests from local IA cores that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WIL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WiLs issued by iA Cores that Missed LLC",
+ "UMask": "0xc87fde01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Read for ownership from local IA that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFOs issued by iA Cores",
+ "UMask": "0xc807ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Read for ownership prefetch from local IA that miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_RFO_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFO_Prefs issued by iA Cores",
+ "UMask": "0xc887ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "SpecItoM events that are initiated from the Core",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_SPECITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : SpecItoMs issued by iA Cores",
+ "UMask": "0xcc57ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WbEFtoEs issued by iA Cores. (Non Modified Write Backs)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WBEFTOE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices that Hit the LLC",
+ "UMask": "0xcc3fff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WbEFtoIs issued by iA Cores . (Non Modified Write Backs)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WBEFTOI",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices that Hit the LLC",
+ "UMask": "0xcc37ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WbMtoEs issued by iA Cores . (Modified Write Backs)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WBMTOE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices that Hit the LLC",
+ "UMask": "0xcc2fff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WbMtoI requests from local IA cores",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WBMTOI",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WbMtoIs issued by iA Cores",
+ "UMask": "0xcc27ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WbStoIs issued by iA Cores . (Non Modified Write Backs)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WBSTOI",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices that Hit the LLC",
+ "UMask": "0xcc67ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WCIL requests from a local IA core",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WCIL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WCiLs issued by iA Cores",
+ "UMask": "0xc86fff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WCILF requests from local IA core",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WCILF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WCiLF issued by iA Cores",
+ "UMask": "0xc867ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "All TOR inserts from local IO devices",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All requests from IO Devices",
+ "UMask": "0xc001ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CLFlush requests from IO devices",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_CLFLUSH",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : CLFlushes issued by IO Devices",
+ "UMask": "0xc8c3ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "All TOR inserts from local IO devices which hit the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All requests from IO Devices that hit the LLC",
+ "UMask": "0xc001fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ItoMs from local IO devices which hit the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices that Hit the LLC",
+ "UMask": "0xcc43fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC",
+ "UMask": "0xcd43fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PCIRDCURs issued by IO devices which hit the LLC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_PCIRDCUR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : PCIRdCurs issued by IO Devices that hit the LLC",
+ "UMask": "0xc8f3fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RFOs from local IO devices which hit the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFOs issued by IO Devices that hit the LLC",
+ "UMask": "0xc803fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "All TOR ItoM inserts from local IO devices",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices",
+ "UMask": "0xcc43ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ItoMCacheNears, indicating a partial write request, from IO Devices",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices",
+ "UMask": "0xcd43ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ItoMCacheNear (partial write) transactions from an IO device that addresses memory on the local socket",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that address memory on the local socket",
+ "UMask": "0xcd42ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ItoMCacheNear (partial write) transactions from an IO device that addresses memory on a remote socket",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that address memory on a remote socket",
+ "UMask": "0xcd437f04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ItoM (write) transactions from an IO device that addresses memory on the local socket",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoM, indicating a write request, from IO Devices that address memory on the local socket",
+ "UMask": "0xcc42ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ItoM (write) transactions from an IO device that addresses memory on a remote socket",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoM, indicating a write request, from IO Devices that address memory on a remote socket",
+ "UMask": "0xcc437f04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "All TOR inserts from local IO devices which miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All requests from IO Devices that missed the LLC",
+ "UMask": "0xc001fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "All TOR ItoM inserts from local IO devices which miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices that missed the LLC",
+ "UMask": "0xcc43fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
+ "UMask": "0xcd43fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PCIRDCURs issued by IO devices which miss the LLC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_PCIRDCUR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : PCIRdCurs issued by IO Devices that missed the LLC",
+ "UMask": "0xc8f3fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "All TOR RFO inserts from local IO devices which miss the cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFOs issued by IO Devices that missed the LLC",
+ "UMask": "0xc803fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PCIRDCURs issued by IO devices",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : PCIRdCurs issued by IO Devices",
+ "UMask": "0xc8f3ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PCIRDCUR (read) transactions from an IO device that addresses memory on the local socket",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : PCIRdCurs issued by IO Devices that addresses memory on the local socket",
+ "UMask": "0xc8f2ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PCIRDCUR (read) transactions from an IO device that addresses memory on a remote socket",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : PCIRdCurs issued by IO Devices that addresses memory on a remote socket",
+ "UMask": "0xc8f37f04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RFOs from local IO devices",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFOs issued by IO Devices",
+ "UMask": "0xc803ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBMtoI requests from IO devices",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_WBMTOI",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WbMtoIs issued by IO Devices",
+ "UMask": "0xcc23ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts for SF or LLC Evictions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.LLC_OR_SF_EVICTIONS",
+ "PerPkg": "1",
+ "PublicDescription": "TOR allocation occurred as a result of SF/LLC evictions (came from the ISMQ)",
+ "UMask": "0xc001ff02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "All locally initiated requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.LOC_ALL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All from Local iA and IO",
+ "UMask": "0xc000ff05",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "All from Local iA",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.LOC_IA",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All from Local iA",
+ "UMask": "0xc000ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "All from Local IO",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.LOC_IO",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All from Local IO",
+ "UMask": "0xc000ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "All remote requests (e.g. snoops, writebacks) that came from remote sockets",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.REM_ALL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All Remote Requests",
+ "UMask": "0xc001ffc8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "All snoops to this LLC that came from remote sockets",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.REM_SNPS",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All Snoops from Remote",
+ "UMask": "0xc001ff08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Occupancy for all TOR entries",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.ALL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All",
+ "UMask": "0xc001ffff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for CLFlush transactions from a CXL device which hit in the L3.",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_HIT_CLFLUSH",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x78c8c7fd20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for FsRdCur transactions from a CXL device which hit in the L3.",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_HIT_FSRDCUR",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x78c8effd20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for FsRdCurPtl transactions from a CXL device which hit in the L3.",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_HIT_FSRDCURPTL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x78c9effd20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for ItoM transactions from a CXL device which hit in the L3.",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_HIT_ITOM",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x78cc47fd20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for ItoMWr transactions from a CXL device which hit in the L3.",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_HIT_ITOMWR",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x78cc4ffd20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for MemPushWr transactions from a CXL device which hit in the L3.",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_HIT_MEMPUSHWR",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x78cc6ffd20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for WCiL transactions from a CXL device which hit in the L3.",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_HIT_WCIL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x78c86ffd20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for WcilF transactions from a CXL device which hit in the L3.",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_HIT_WCILF",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x78c867fd20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for WiL transactions from a CXL device which hit in the L3.",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_HIT_WIL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x78c87ffd20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for CLFlush transactions from a CXL device which miss the L3.",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_MISS_CLFLUSH",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x78c8c7fe20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for FsRdCur transactions from a CXL device which miss the L3.",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_MISS_FSRDCUR",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x78c8effe20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for FsRdCurPtl transactions from a CXL device which miss the L3.",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_MISS_FSRDCURPTL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x78c9effe20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for ItoM transactions from a CXL device which miss the L3.",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_MISS_ITOM",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x78cc47fe20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for ItoMWr transactions from a CXL device which miss the L3.",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_MISS_ITOMWR",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x78cc4ffe20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for MemPushWr transactions from a CXL device which miss the L3.",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_MISS_MEMPUSHWR",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x78cc6ffe20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for WCiL transactions from a CXL device which miss the L3.",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_MISS_WCIL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x78c86ffe20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for WcilF transactions from a CXL device which miss the L3.",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_MISS_WCILF",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x78c867fe20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for WiL transactions from a CXL device which miss the L3.",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_MISS_WIL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x78c87ffe20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for All locally initiated requests from IA Cores",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All requests from iA Cores",
+ "UMask": "0xc001ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for CLFlush events that are initiated from the Core",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CLFLUSH",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CLFlushes issued by iA Cores",
+ "UMask": "0xc8c7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for Code read from local IA that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CRDs issued by iA Cores",
+ "UMask": "0xc80fff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for Code read prefetch from local IA that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Code read prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc88fff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for Data read from local IA that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRds issued by iA Cores",
+ "UMask": "0xc817ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for DRd PTEs issued by iA Cores due to a page walk",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRDPTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk",
+ "UMask": "0xc837ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for Data read prefetch from local IA that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores",
+ "UMask": "0xc897ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for All locally initiated requests from IA Cores which hit the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All requests from iA Cores that Hit the LLC",
+ "UMask": "0xc001fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for Code read from local IA that hit the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CRds issued by iA Cores that Hit the LLC",
+ "UMask": "0xc80ffd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for Code read prefetch from local IA that hit the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that hit the LLC",
+ "UMask": "0xc88ffd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for All requests issued from IA cores to CXL accelerator memory regions that hit the LLC.",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c0018101",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for Data read from local IA that hit the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRds issued by iA Cores that Hit the LLC",
+ "UMask": "0xc817fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for DRd PTEs issued by iA Cores due to page walks that hit the LLC",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRDPTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk that hit the LLC",
+ "UMask": "0xc837fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for Data read prefetch from local IA that hit the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores that Hit the LLC",
+ "UMask": "0xc897fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for ItoM requests from local IA cores that hit the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMs issued by iA Cores that Hit LLC",
+ "UMask": "0xcc47fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for Last level cache prefetch code read from local IA that hit the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFCODE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : LLCPrefCode issued by iA Cores that hit the LLC",
+ "UMask": "0xcccffd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for Last level cache prefetch data read from local IA that hit the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFDATA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : LLCPrefData issued by iA Cores that hit the LLC",
+ "UMask": "0xccd7fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for Last level cache prefetch read for ownership from local IA that hit the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFRFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores that hit the LLC",
+ "UMask": "0xccc7fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for Read for ownership from local IA that hit the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFOs issued by iA Cores that Hit the LLC",
+ "UMask": "0xc807fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for Read for ownership prefetch from local IA that hit the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Hit the LLC",
+ "UMask": "0xc887fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for ItoM events that are initiated from the Core",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMs issued by iA Cores",
+ "UMask": "0xcc47ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for ItoMCacheNear requests from local IA cores",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMCacheNears issued by iA Cores",
+ "UMask": "0xcd47ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for Last level cache prefetch code read from local IA.",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFCODE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : LLCPrefCode issued by iA Cores",
+ "UMask": "0xcccfff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for Last level cache prefetch data read from local IA.",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFDATA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : LLCPrefData issued by iA Cores",
+ "UMask": "0xccd7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for Last level cache prefetch read for ownership from local IA that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFRFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores",
+ "UMask": "0xccc7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for All locally initiated requests from IA Cores which miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All requests from iA Cores that Missed the LLC",
+ "UMask": "0xc001fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for Code read from local IA that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CRds issued by iA Cores that Missed the LLC",
+ "UMask": "0xc80ffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for CRDs from local IA cores to locally homed memory",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CRd issued by iA Cores that Missed the LLC - HOMed locally",
+ "UMask": "0xc80efe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for Code read prefetch from local IA that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC",
+ "UMask": "0xc88ffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for CRD Prefetches from local IA cores to locally homed memory",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed locally",
+ "UMask": "0xc88efe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for CRD Prefetches from local IA cores to remotely homed memory",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed remotely",
+ "UMask": "0xc88f7e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for CRDs from local IA cores to remotely homed memory",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CRd issued by iA Cores that Missed the LLC - HOMed remotely",
+ "UMask": "0xc80f7e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for All requests issued from IA cores to CXL accelerator memory regions that miss the LLC.",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c0018201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for Data read from local IA that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRds issued by iA Cores that Missed the LLC",
+ "UMask": "0xc817fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for DRd PTEs issued by iA Cores due to a page walk that missed the LLC",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRDPTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk that missed the LLC",
+ "UMask": "0xc837fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for DRds and equivalent opcodes issued from an IA core which miss the L3 and target memory in a CXL type 2 memory expander card.",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c8178201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for DRds issued by iA Cores targeting DDR Mem that Missed the LLC",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRds issued by iA Cores targeting DDR Mem that Missed the LLC",
+ "UMask": "0xc8178601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for Data read from local IA that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRds issued by iA Cores that Missed the LLC - HOMed locally",
+ "UMask": "0xc816fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for DRds from local IA cores to locally homed DDR addresses that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_LOCAL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally",
+ "UMask": "0xc8168601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for DRds from local IA cores to locally homed PMM addresses that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_LOCAL_PMM",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally",
+ "UMask": "0xc8168a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for DRds issued by iA Cores targeting PMM Mem that Missed the LLC",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PMM",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRds issued by iA Cores targeting PMM Mem that Missed the LLC",
+ "UMask": "0xc8178a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for Data read prefetch from local IA that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores that Missed the LLC",
+ "UMask": "0xc897fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for L2 data prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c8978201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for DRd Prefetches from local IA cores to DDR addresses that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC",
+ "UMask": "0xc8978601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for Data read prefetch from local IA that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc896fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for DRd Prefetches from local IA cores to locally homed DDR addresses that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_LOCAL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally",
+ "UMask": "0xc8968601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for DRd Prefetches from local IA cores to locally homed PMM addresses that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_LOCAL_PMM",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally",
+ "UMask": "0xc8968a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for DRd Prefetches from local IA cores to PMM addresses that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_PMM",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC",
+ "UMask": "0xc8978a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for Data read prefetch from local IA that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc8977e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for DRd Prefetches from local IA cores to remotely homed DDR addresses that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_REMOTE_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely",
+ "UMask": "0xc8970601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for DRd Prefetches from local IA cores to remotely homed PMM addresses that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_REMOTE_PMM",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely",
+ "UMask": "0xc8970a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for Data read from local IA that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRds issued by iA Cores that Missed the LLC - HOMed remotely",
+ "UMask": "0xc8177e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for DRds from local IA cores to remotely homed DDR addresses that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_REMOTE_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely",
+ "UMask": "0xc8170601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for DRds from local IA cores to remotely homed PMM addresses that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_REMOTE_PMM",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely",
+ "UMask": "0xc8170a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for ItoM requests from local IA cores that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMs issued by iA Cores that Missed LLC",
+ "UMask": "0xcc47fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for Last level cache prefetch code read from local IA that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFCODE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : LLCPrefCode issued by iA Cores that missed the LLC",
+ "UMask": "0xcccffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for Last level cache prefetch data read from local IA that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFDATA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : LLCPrefData issued by iA Cores that missed the LLC",
+ "UMask": "0xccd7fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for LLC data prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFDATA_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10ccd78201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for Last level cache prefetch read for ownership from local IA that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFRFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores that missed the LLC",
+ "UMask": "0xccc7fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for L2 RFO prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFRFO_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c8878201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for WCILF requests from local IA cores to locally homed DDR addresses that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCILF_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed locally",
+ "UMask": "0xc8668601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for WCILF requests from local IA cores to locally homed PMM addresses which miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCILF_PMM",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed locally",
+ "UMask": "0xc8668a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for WCIL requests from local IA cores to locally homed DDR addresses that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCIL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed locally",
+ "UMask": "0xc86e8601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for WCIL requests from local IA cores to locally homed PMM addresses which miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCIL_PMM",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed locally",
+ "UMask": "0xc86e8a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for WCILF requests from local IA cores to remotely homed DDR addresses that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCILF_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely",
+ "UMask": "0xc8670601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for WCILF requests from local IA cores to remotely homed PMM addresses which miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCILF_PMM",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely",
+ "UMask": "0xc8670a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for WCIL requests from local IA cores to remotely homed DDR addresses that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCIL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely",
+ "UMask": "0xc86f0601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for WCIL requests from local IA cores to remotely homed PMM addresses which miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCIL_PMM",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely",
+ "UMask": "0xc86f0a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for Read for ownership from local IA that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC",
+ "UMask": "0xc807fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for RFOs issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c8078201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for Read for ownership from local IA that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC - HOMed locally",
+ "UMask": "0xc806fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for Read for ownership prefetch from local IA that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Missed the LLC",
+ "UMask": "0xc887fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for LLC RFO prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10ccc78201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for Read for ownership prefetch from local IA that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Missed the LLC - HOMed locally",
+ "UMask": "0xc886fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for Read for ownership prefetch from local IA that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Missed the LLC - HOMed remotely",
+ "UMask": "0xc8877e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for Read for ownership from local IA that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC - HOMed remotely",
+ "UMask": "0xc8077e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for UCRDF requests from local IA cores that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_UCRDF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : UCRdFs issued by iA Cores that Missed LLC",
+ "UMask": "0xc877de01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for WCIL requests from a local IA core that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores that Missed the LLC",
+ "UMask": "0xc86ffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for WCILF requests from local IA core that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLF issued by iA Cores that Missed the LLC",
+ "UMask": "0xc867fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for WCILF requests from local IA cores to DDR homed addresses which miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC",
+ "UMask": "0xc8678601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for WCILF requests from local IA cores to PMM homed addresses which miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF_PMM",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC",
+ "UMask": "0xc8678a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for WCIL requests from local IA cores to DDR homed addresses which miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC",
+ "UMask": "0xc86f8601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for WCIL requests from a local IA core to PMM homed addresses that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL_PMM",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC",
+ "UMask": "0xc86f8a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for WIL requests from local IA cores that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WIL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WiLs issued by iA Cores that Missed LLC",
+ "UMask": "0xc87fde01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for Read for ownership from local IA that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFOs issued by iA Cores",
+ "UMask": "0xc807ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for Read for ownership prefetch from local IA that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_RFO_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores",
+ "UMask": "0xc887ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for SpecItoM events that are initiated from the Core",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_SPECITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : SpecItoMs issued by iA Cores",
+ "UMask": "0xcc57ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for WbMtoI requests from local IA cores",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WBMTOI",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WbMtoIs issued by iA Cores",
+ "UMask": "0xcc27ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for WCIL requests from a local IA core",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WCIL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores",
+ "UMask": "0xc86fff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for WCILF requests from local IA core",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WCILF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLF issued by iA Cores",
+ "UMask": "0xc867ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for All TOR inserts from local IO devices",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All requests from IO Devices",
+ "UMask": "0xc001ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for CLFlush requests from IO devices",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_CLFLUSH",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CLFlushes issued by IO Devices",
+ "UMask": "0xc8c3ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for All TOR inserts from local IO devices which hit the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All requests from IO Devices that hit the LLC",
+ "UMask": "0xc001fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for ItoMs from local IO devices which hit the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMs issued by IO Devices that Hit the LLC",
+ "UMask": "0xcc43fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC",
+ "UMask": "0xcd43fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for PCIRDCURs issued by IO devices which hit the LLC",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_PCIRDCUR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that hit the LLC",
+ "UMask": "0xc8f3fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for RFOs from local IO devices which hit the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFOs issued by IO Devices that hit the LLC",
+ "UMask": "0xc803fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for All TOR ItoM inserts from local IO devices",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMs issued by IO Devices",
+ "UMask": "0xcc43ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for ItoMCacheNears, indicating a partial write request, from IO Devices",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices",
+ "UMask": "0xcd43ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for All TOR inserts from local IO devices which miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All requests from IO Devices that missed the LLC",
+ "UMask": "0xc001fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for All TOR ItoM inserts from local IO devices which miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMs issued by IO Devices that missed the LLC",
+ "UMask": "0xcc43fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
+ "UMask": "0xcd43fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for ItoMCacheNear transactions from an IO device on the local socket that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOMCACHENEAR_LOCAL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
+ "UMask": "0xcd42fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for ItoMCacheNear transactions from an IO device on a remote socket that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOMCACHENEAR_REMOTE",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
+ "UMask": "0xcd437e04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for ItoM transactions from an IO device on the local socket that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM_LOCAL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMs issued by IO Devices that missed the LLC",
+ "UMask": "0xcc42fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for ItoM transactions from an IO device on a remote socket that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM_REMOTE",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMs issued by IO Devices that missed the LLC",
+ "UMask": "0xcc437e04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for PCIRDCURs issued by IO devices which miss the LLC",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_PCIRDCUR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that missed the LLC",
+ "UMask": "0xc8f3fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for PCIRDCUR transactions from an IO device on the local socket that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_PCIRDCUR_LOCAL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that missed the LLC",
+ "UMask": "0xc8f2fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for PCIRDCUR transactions from an IO device on a remote socket that miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_PCIRDCUR_REMOTE",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that missed the LLC",
+ "UMask": "0xc8f37e04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for All TOR RFO inserts from local IO devices which miss the cache",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFOs issued by IO Devices that missed the LLC",
+ "UMask": "0xc803fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for PCIRDCURs issued by IO devices",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_PCIRDCUR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices",
+ "UMask": "0xc8f3ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for RFOs from local IO devices",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFOs issued by IO Devices",
+ "UMask": "0xc803ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for WBMtoI requests from IO devices",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_WBMTOI",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WbMtoIs issued by IO Devices",
+ "UMask": "0xcc23ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for All locally initiated requests",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_ALL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All from Local iA and IO",
+ "UMask": "0xc000ff05",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for All from Local iA",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_IA",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All from Local iA",
+ "UMask": "0xc000ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for All from Local IO",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_IO",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All from Local IO",
+ "UMask": "0xc000ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for All remote requests (e.g. snoops, writebacks) that came from remote sockets",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.REM_ALL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All Remote Requests",
+ "UMask": "0xc001ffc8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for All snoops to this LLC that came from remote sockets",
+ "Counter": "0",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.REM_SNPS",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All Snoops from Remote",
+ "UMask": "0xc001ff08",
+ "Unit": "CHA"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/graniterapids/uncore-cxl.json b/tools/perf/pmu-events/arch/x86/graniterapids/uncore-cxl.json
new file mode 100644
index 000000000000..383a5ba5a697
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/graniterapids/uncore-cxl.json
@@ -0,0 +1,31 @@
+[
+ {
+ "BriefDescription": "B2CXL Clockticks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x01",
+ "EventName": "UNC_B2CXL_CLOCKTICKS",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "Unit": "B2CXL"
+ },
+ {
+ "BriefDescription": "Number of Allocation to Mem Data Packing buffer",
+ "Counter": "4,5,6,7",
+ "EventCode": "0x41",
+ "EventName": "UNC_CXLCM_RxC_PACK_BUF_INSERTS.MEM_DATA",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of Allocation to M2S Data AGF",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x02",
+ "EventName": "UNC_CXLDP_TxC_AGF_INSERTS.M2S_DATA",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CXLDP"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/graniterapids/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/graniterapids/uncore-interconnect.json
new file mode 100644
index 000000000000..856ee985ecd4
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/graniterapids/uncore-interconnect.json
@@ -0,0 +1,1849 @@
+[
+ {
+ "BriefDescription": "Clockticks of the mesh to memory (B2CMI)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x01",
+ "EventName": "UNC_B2CMI_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Counts the number of time D2C was not honoured by egress due to directory state constraints",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x17",
+ "EventName": "UNC_B2CMI_DIRECT2CORE_NOT_TAKEN_DIRSTATE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Counts the number of times B2CMI egress did D2C (direct to core)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_B2CMI_DIRECT2CORE_TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Counts the number of times D2C wasn't honoured even though the incoming request had d2c set for non cisgress txn",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_B2CMI_DIRECT2CORE_TXN_OVERRIDE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Counts the number of d2k wasn't done due to credit constraints",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_B2CMI_DIRECT2UPI_NOT_TAKEN_CREDITS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Direct to UPI Transactions - Ignored due to lack of credits : All : Counts the number of d2k wasn't done due to credit constraints",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_B2CMI_DIRECT2UPI_NOT_TAKEN_CREDITS.EGRESS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Counts the number of time D2K was not honoured by egress due to directory state constraints",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1A",
+ "EventName": "UNC_B2CMI_DIRECT2UPI_NOT_TAKEN_DIRSTATE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Cycles when Direct2UPI was Disabled : Egress Ignored D2U : Counts the number of time D2K was not honoured by egress due to directory state constraints",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1A",
+ "EventName": "UNC_B2CMI_DIRECT2UPI_NOT_TAKEN_DIRSTATE.EGRESS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Counts the number of times egress did D2K (Direct to KTI)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_B2CMI_DIRECT2UPI_TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Counts the number of times D2K wasn't honoured even though the incoming request had d2k set for non cisgress txn",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_B2CMI_DIRECT2UPI_TXN_OVERRIDE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Directory Hit Clean",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_B2CMI_DIRECTORY_HIT.CLEAN",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x38",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Directory Hit : On NonDirty Line in A State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_B2CMI_DIRECTORY_HIT.CLEAN_A",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Directory Hit : On NonDirty Line in I State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_B2CMI_DIRECTORY_HIT.CLEAN_I",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Directory Hit : On NonDirty Line in S State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_B2CMI_DIRECTORY_HIT.CLEAN_S",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Directory Hit Dirty (modified)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_B2CMI_DIRECTORY_HIT.DIRTY",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Directory Hit : On Dirty Line in A State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_B2CMI_DIRECTORY_HIT.DIRTY_A",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Directory Hit : On Dirty Line in I State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_B2CMI_DIRECTORY_HIT.DIRTY_I",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Directory Hit : On Dirty Line in S State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_B2CMI_DIRECTORY_HIT.DIRTY_S",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Counts the number of 1lm or 2lm hit read data returns to egress with any directory to non persistent memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_B2CMI_DIRECTORY_LOOKUP.ANY",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Counts the number of 1lm or 2lm hit read data returns to egress with directory A to non persistent memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_B2CMI_DIRECTORY_LOOKUP.STATE_A",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Counts the number of 1lm or 2lm hit read data returns to egress with directory I to non persistent memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_B2CMI_DIRECTORY_LOOKUP.STATE_I",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Counts the number of 1lm or 2lm hit read data returns to egress with directory S to non persistent memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_B2CMI_DIRECTORY_LOOKUP.STATE_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of 1lm or 2lm hit read data returns to egress with directory S to non persistent memory",
+ "UMask": "0x4",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Directory Miss Clean",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_B2CMI_DIRECTORY_MISS.CLEAN",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x38",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Directory Miss : On NonDirty Line in A State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_B2CMI_DIRECTORY_MISS.CLEAN_A",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Directory Miss : On NonDirty Line in I State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_B2CMI_DIRECTORY_MISS.CLEAN_I",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Directory Miss : On NonDirty Line in S State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_B2CMI_DIRECTORY_MISS.CLEAN_S",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Directory Miss Dirty (modified)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_B2CMI_DIRECTORY_MISS.DIRTY",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Directory Miss : On Dirty Line in A State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_B2CMI_DIRECTORY_MISS.DIRTY_A",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Directory Miss : On Dirty Line in I State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_B2CMI_DIRECTORY_MISS.DIRTY_I",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Directory Miss : On Dirty Line in S State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_B2CMI_DIRECTORY_MISS.DIRTY_S",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Any A2I Transition",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.A2I",
+ "PerPkg": "1",
+ "UMask": "0x320",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Any A2S Transition",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.A2S",
+ "PerPkg": "1",
+ "UMask": "0x340",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Counts cisgress directory updates",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.ANY",
+ "PerPkg": "1",
+ "UMask": "0x301",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Counts any 1lm or 2lm hit data return that would result in directory update to non persistent memory (DRAM)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.HIT_ANY",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x101",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Directory update in near memory to the A state",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.HIT_X2A",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x114",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Directory update in near memory to the I state",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.HIT_X2I",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x128",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Directory update in near memory to the S state",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.HIT_X2S",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x142",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Any I2A Transition",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.I2A",
+ "PerPkg": "1",
+ "UMask": "0x304",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Any I2S Transition",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.I2S",
+ "PerPkg": "1",
+ "UMask": "0x302",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Directory update in far memory to the A state",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.MISS_X2A",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x214",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Directory update in far memory to the I state",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.MISS_X2I",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x228",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Directory update in far memory to the S state",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.MISS_X2S",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x242",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Any S2A Transition",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.S2A",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x310",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Any S2I Transition",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.S2I",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x308",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Directory update to the A state",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.X2A",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x314",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Directory update to the I state",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.X2I",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x328",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Directory update to the S state",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_B2CMI_DIRECTORY_UPDATE.X2S",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x342",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Counts any read",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_B2CMI_IMC_READS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x104",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Counts normal reads issue to CMI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_B2CMI_IMC_READS.NORMAL",
+ "PerPkg": "1",
+ "UMask": "0x101",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Count reads to NM region",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_B2CMI_IMC_READS.TO_DDR_AS_CACHE",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x110",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Counts reads to 1lm non persistent memory regions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_B2CMI_IMC_READS.TO_DDR_AS_MEM",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x108",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "All Writes - All Channels",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x25",
+ "EventName": "UNC_B2CMI_IMC_WRITES.ALL",
+ "PerPkg": "1",
+ "UMask": "0x110",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Full Non-ISOCH - All Channels",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x25",
+ "EventName": "UNC_B2CMI_IMC_WRITES.FULL",
+ "PerPkg": "1",
+ "UMask": "0x101",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Non-Inclusive - All Channels",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x25",
+ "EventName": "UNC_B2CMI_IMC_WRITES.NI",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Non-Inclusive Miss - All Channels",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x25",
+ "EventName": "UNC_B2CMI_IMC_WRITES.NI_MISS",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Partial Non-ISOCH - All Channels",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x25",
+ "EventName": "UNC_B2CMI_IMC_WRITES.PARTIAL",
+ "PerPkg": "1",
+ "UMask": "0x102",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "DDR, acting as Cache - All Channels",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x25",
+ "EventName": "UNC_B2CMI_IMC_WRITES.TO_DDR_AS_CACHE",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x140",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "DDR - All Channels",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x25",
+ "EventName": "UNC_B2CMI_IMC_WRITES.TO_DDR_AS_MEM",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x120",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : UPI - Ch 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x56",
+ "EventName": "UNC_B2CMI_PREFCAM_INSERTS.CH0_UPI",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : XPT - Ch 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x56",
+ "EventName": "UNC_B2CMI_PREFCAM_INSERTS.CH0_XPT",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : UPI - All Channels",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x56",
+ "EventName": "UNC_B2CMI_PREFCAM_INSERTS.UPI_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : XPT -All Channels",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x56",
+ "EventName": "UNC_B2CMI_PREFCAM_INSERTS.XPT_ALLCH",
+ "PerPkg": "1",
+ "PublicDescription": "Prefetch CAM Inserts : XPT - All Channels",
+ "UMask": "0x1",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "UNC_B2CMI_PREFCAM_OCCUPANCY.CH0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Counts the 2lm reads and WRNI which were a hit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1F",
+ "EventName": "UNC_B2CMI_TAG_HIT.ALL",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Counts the 2lm reads which were a hit clean",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1F",
+ "EventName": "UNC_B2CMI_TAG_HIT.RD_CLEAN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Counts the 2lm reads which were a hit dirty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1F",
+ "EventName": "UNC_B2CMI_TAG_HIT.RD_DIRTY",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Counts the 2lm WRNI which were a hit clean",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1F",
+ "EventName": "UNC_B2CMI_TAG_HIT.WR_CLEAN",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Counts the 2lm WRNI which were a hit dirty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1F",
+ "EventName": "UNC_B2CMI_TAG_HIT.WR_DIRTY",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Counts the 2lm second way read miss for a WrNI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4B",
+ "EventName": "UNC_B2CMI_TAG_MISS.CLEAN",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Counts the 2lm second way read miss for a WrNI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4B",
+ "EventName": "UNC_B2CMI_TAG_MISS.DIRTY",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Counts the 2lm second way read miss for a Rd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4B",
+ "EventName": "UNC_B2CMI_TAG_MISS.RD_2WAY",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Counts the 2lm reads which were a miss and the cache line is unmodified",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4B",
+ "EventName": "UNC_B2CMI_TAG_MISS.RD_CLEAN",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Counts the 2lm reads which were a miss and the cache line is modified",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4B",
+ "EventName": "UNC_B2CMI_TAG_MISS.RD_DIRTY",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Counts the 2lm second way read miss for a WrNI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4B",
+ "EventName": "UNC_B2CMI_TAG_MISS.WR_2WAY",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Counts the 2lm WRNI which were a miss and the cache line is unmodified",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4B",
+ "EventName": "UNC_B2CMI_TAG_MISS.WR_CLEAN",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Counts the 2lm WRNI which were a miss and the cache line is modified",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4B",
+ "EventName": "UNC_B2CMI_TAG_MISS.WR_DIRTY",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Tracker Inserts : Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_B2CMI_TRACKER_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x104",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_B2CMI_TRACKER_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "Write Tracker Inserts : Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_B2CMI_WR_TRACKER_INSERTS.CH0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "B2CMI"
+ },
+ {
+ "BriefDescription": "UNC_B2HOT_CLOCKTICKS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x01",
+ "EventName": "UNC_B2HOT_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Clockticks for the B2HOT unit",
+ "UMask": "0x1",
+ "Unit": "B2HOT"
+ },
+ {
+ "BriefDescription": "Number of uclks in domain",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x01",
+ "EventName": "UNC_B2UPI_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "B2UPI"
+ },
+ {
+ "BriefDescription": "Total Write Cache Occupancy : Mem",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x0F",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.MEM",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "IRP Clockticks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x01",
+ "EventName": "UNC_I_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound read requests received by the IRP and inserted into the FAF queue",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_I_FAF_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "FAF occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_I_FAF_OCCUPANCY",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Lost Forward : Snoop pulled away ownership before a write was committed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1F",
+ "EventName": "UNC_I_MISC1.LOST_FWD",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound write (fast path) requests to coherent memory, received by the IRP resulting in write ownership requests issued by IRP to the mesh.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.WR_PREF",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "MDF Clockticks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x01",
+ "EventName": "UNC_MDF_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of packets bypassing the ingress queue",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_MDF_RxR_BYPASS.AD_BNC",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of packets bypassing the ingress queue",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_MDF_RxR_BYPASS.AD_CRD",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of packets bypassing the ingress queue",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_MDF_RxR_BYPASS.AK",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of packets bypassing the ingress queue",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_MDF_RxR_BYPASS.BL_BNC",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of packets bypassing the ingress queue",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_MDF_RxR_BYPASS.BL_CRD",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of packets bypassing the ingress queue",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_MDF_RxR_BYPASS.IV",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of allocations into the Ingress used to queue up requests from the mesh (AD_BNC)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_MDF_RxR_INSERTS.AD_BNC",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of allocations into the Ingress used to queue up requests from the mesh (AD)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_MDF_RxR_INSERTS.AD_CRD",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of allocations into the Ingress used to queue up requests from the mesh (AK)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_MDF_RxR_INSERTS.AK",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of allocations into the Ingress used to queue up requests from the mesh (BL_BNC)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_MDF_RxR_INSERTS.BL_BNC",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of allocations into the Ingress used to queue up requests from the mesh (BL_CRD)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_MDF_RxR_INSERTS.BL_CRD",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of allocations into the Ingress used to queue up requests from the mesh (IV)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_MDF_RxR_INSERTS.IV",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Occupancy counts for the Ingress buffer",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_MDF_RxR_OCCUPANCY.AD_BNC",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Occupancy counts for the Ingress buffer",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_MDF_RxR_OCCUPANCY.AD_CRD",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Occupancy counts for the Ingress buffer",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_MDF_RxR_OCCUPANCY.AK",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Occupancy counts for the Ingress buffer",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_MDF_RxR_OCCUPANCY.BL_BNC",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Occupancy counts for the Ingress buffer",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_MDF_RxR_OCCUPANCY.BL_CRD",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Occupancy counts for the Ingress buffer",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_MDF_RxR_OCCUPANCY.IV",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Egress bypasses for for AD_BNC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_MDF_TxR_BYPASS.AD_BNC",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Egress bypasses for for AD_CRD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_MDF_TxR_BYPASS.AD_CRD",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Egress bypasses for for AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_MDF_TxR_BYPASS.AK",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Egress bypasses for for BL_BNC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_MDF_TxR_BYPASS.BL_BNC",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Egress bypasses for for BL_CRD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_MDF_TxR_BYPASS.BL_CRD",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Egress bypasses for for IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_MDF_TxR_BYPASS.IV",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of egress inserts for for AD_BNC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_MDF_TxR_INSERTS.AD_BNC",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of egress inserts for for AD_CRD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_MDF_TxR_INSERTS.AD_CRD",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of egress inserts for for AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_MDF_TxR_INSERTS.AK",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of egress inserts for for BL_BNC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_MDF_TxR_INSERTS.BL_BNC",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of egress inserts for for BL_CRD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_MDF_TxR_INSERTS.BL_CRD",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of egress inserts for for IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_MDF_TxR_INSERTS.IV",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Egress occupancy for for AD_BNC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_MDF_TxR_OCCUPANCY.AD_BNC",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Egress occupancy for for AD_CRD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_MDF_TxR_OCCUPANCY.AD_CRD",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Egress occupancy for for AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_MDF_TxR_OCCUPANCY.AK",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Egress occupancy for for BL_BNC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_MDF_TxR_OCCUPANCY.BL_BNC",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Egress occupancy for for BL_CRD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_MDF_TxR_OCCUPANCY.BL_CRD",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Egress occupancy for for IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_MDF_TxR_OCCUPANCY.IV",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of UPI LL clock cycles while the event is enabled",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x01",
+ "EventName": "UNC_UPI_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of kfclks",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Cycles in L1 : Number of UPI qfclk cycles spent in L1 power mode. L1 is a mode that totally shuts down a UPI link. Use edge detect to count the number of instances when the UPI link entered L1. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. Because L1 totally shuts down the link, it takes a good amount of time to exit this mode.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_UPI_L1_POWER_CYCLES",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0xe",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass, Match Opcode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB_OPC",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x10e",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard, Match Opcode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS_OPC",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x10f",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.REQ",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Request, Match Opcode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.REQ_OPC",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x108",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Response - Conflict",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSPCNFLT",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1aa",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Response - Invalid",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSPI",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x12a",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Response - Data",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_DATA",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Response - Data, Match Opcode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_DATA_OPC",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x10c",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Response - No Data",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_NODATA",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Response - No Data, Match Opcode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_NODATA_OPC",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x10a",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Snoop",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.SNP",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Snoop, Match Opcode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.SNP_OPC",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x109",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Writeback",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.WB",
+ "PerPkg": "1",
+ "UMask": "0xd",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Writeback, Match Opcode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.WB_OPC",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x10d",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : All Data : Shows legal flit time (hides impact of L0p and L0c).",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.ALL_DATA",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Null FLITs received from any slot",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.ALL_NULL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Received : Null FLITs received from any slot",
+ "UMask": "0x27",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Data : Shows legal flit time (hides impact of L0p and L0c). : Count Data Flits (which consume all slots), but how much to count is based on Slot0-2 mask, so count can be 0-3 depending on which slots are enabled for counting..",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.DATA",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Idle : Shows legal flit time (hides impact of L0p and L0c).",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.IDLE",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x47",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : LLCRD Not Empty : Shows legal flit time (hides impact of L0p and L0c). : Enables counting of LLCRD (with non-zero payload). This only applies to slot 2 since LLCRD is only allowed in slot 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.LLCRD",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : LLCTRL : Shows legal flit time (hides impact of L0p and L0c). : Equivalent to an idle packet. Enables counting of slot 0 LLCTRL messages.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.LLCTRL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : All Non Data : Shows legal flit time (hides impact of L0p and L0c).",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.NON_DATA",
+ "PerPkg": "1",
+ "UMask": "0x97",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Slot NULL or LLCRD Empty : Shows legal flit time (hides impact of L0p and L0c). : LLCRD with all zeros is treated as NULL. Slot 1 is not treated as NULL if slot 0 is a dual slot. This can apply to slot 0,1, or 2.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.NULL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Protocol Header : Shows legal flit time (hides impact of L0p and L0c). : Enables count of protocol headers in slot 0,1,2 (depending on slot uMask bits)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.PROTHDR",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Slot 0 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 0 - Other mask bits determine types of headers to count.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.SLOT0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Slot 1 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 1 - Other mask bits determine types of headers to count.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.SLOT1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Slot 2 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 2 - Other mask bits determine types of headers to count.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.SLOT2",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Allocations : Slot 0 : Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x30",
+ "EventName": "UNC_UPI_RxL_INSERTS.SLOT0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Allocations : Slot 1 : Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x30",
+ "EventName": "UNC_UPI_RxL_INSERTS.SLOT1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Allocations : Slot 2 : Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x30",
+ "EventName": "UNC_UPI_RxL_INSERTS.SLOT2",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - All Packets : Slot 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - All Packets : Slot 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - All Packets : Slot 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT2",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0xe",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass, Match Opcode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB_OPC",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x10e",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard, Match Opcode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS_OPC",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x10f",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.REQ",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Request, Match Opcode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.REQ_OPC",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x108",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Response - Conflict",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSPCNFLT",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1aa",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Response - Invalid",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSPI",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x12a",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Response - Data",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_DATA",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Response - Data, Match Opcode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_DATA_OPC",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x10c",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Response - No Data",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_NODATA",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Response - No Data, Match Opcode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_NODATA_OPC",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x10a",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Snoop",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.SNP",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Snoop, Match Opcode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.SNP_OPC",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x109",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Writeback",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.WB",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0xd",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Writeback, Match Opcode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.WB_OPC",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x10d",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : All Data : Counts number of data flits across this UPI link.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.ALL_DATA",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "All Null Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.ALL_NULL",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : Idle",
+ "UMask": "0x27",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Data : Shows legal flit time (hides impact of L0p and L0c). : Count Data Flits (which consume all slots), but how much to count is based on Slot0-2 mask, so count can be 0-3 depending on which slots are enabled for counting..",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.DATA",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Idle : Shows legal flit time (hides impact of L0p and L0c).",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.IDLE",
+ "PerPkg": "1",
+ "UMask": "0x47",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : LLCRD Not Empty : Shows legal flit time (hides impact of L0p and L0c). : Enables counting of LLCRD (with non-zero payload). This only applies to slot 2 since LLCRD is only allowed in slot 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.LLCRD",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : LLCTRL : Shows legal flit time (hides impact of L0p and L0c). : Equivalent to an idle packet. Enables counting of slot 0 LLCTRL messages.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.LLCTRL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : All Non Data : Shows legal flit time (hides impact of L0p and L0c).",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.NON_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : Null FLITs transmitted to any slot",
+ "UMask": "0x97",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Slot NULL or LLCRD Empty : Shows legal flit time (hides impact of L0p and L0c). : LLCRD with all zeros is treated as NULL. Slot 1 is not treated as NULL if slot 0 is a dual slot. This can apply to slot 0,1, or 2.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.NULL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Protocol Header : Shows legal flit time (hides impact of L0p and L0c). : Enables count of protocol headers in slot 0,1,2 (depending on slot uMask bits)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.PROTHDR",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Slot 0 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 0 - Other mask bits determine types of headers to count.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.SLOT0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Slot 1 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 1 - Other mask bits determine types of headers to count.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.SLOT1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Slot 2 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 2 - Other mask bits determine types of headers to count.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.SLOT2",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Allocations : Number of allocations into the UPI Tx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_UPI_TxL_INSERTS",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Occupancy : Accumulates the number of flits in the TxQ. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This can be used with the cycles not empty event to track average occupancy, or the allocations event to track average lifetime in the TxQ.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x42",
+ "EventName": "UNC_UPI_TxL_OCCUPANCY",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "Unit": "UPI"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/graniterapids/uncore-io.json b/tools/perf/pmu-events/arch/x86/graniterapids/uncore-io.json
new file mode 100644
index 000000000000..cffb9d94b53d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/graniterapids/uncore-io.json
@@ -0,0 +1,1901 @@
+[
+ {
+ "BriefDescription": "IIO Clockticks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x01",
+ "EventName": "UNC_IIO_CLOCKTICKS",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.ALL_PARTS",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0FF",
+ "UMask": "0x70ff004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART0",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x001",
+ "UMask": "0x7001004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART1",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x002",
+ "UMask": "0x7002004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART2",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x004",
+ "UMask": "0x7004004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART3",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x008",
+ "UMask": "0x7008004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART4",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x010",
+ "UMask": "0x7010004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART5",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x020",
+ "UMask": "0x7020004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART6",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x040",
+ "UMask": "0x7040004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART7",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x080",
+ "UMask": "0x7080004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Count of allocations in the completion buffer",
+ "Counter": "2,3",
+ "EventCode": "0xD5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL_PARTS",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0FF",
+ "UMask": "0x70ff0ff",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Count of allocations in the completion buffer",
+ "Counter": "2,3",
+ "EventCode": "0xD5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART0",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x001",
+ "UMask": "0x7001001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Count of allocations in the completion buffer",
+ "Counter": "2,3",
+ "EventCode": "0xD5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART1",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x002",
+ "UMask": "0x7002002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Count of allocations in the completion buffer",
+ "Counter": "2,3",
+ "EventCode": "0xD5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART2",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x004",
+ "UMask": "0x7004004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Count of allocations in the completion buffer",
+ "Counter": "2,3",
+ "EventCode": "0xD5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART3",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x008",
+ "UMask": "0x7008008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Count of allocations in the completion buffer",
+ "Counter": "2,3",
+ "EventCode": "0xD5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART4",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x010",
+ "UMask": "0x7010010",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Count of allocations in the completion buffer",
+ "Counter": "2,3",
+ "EventCode": "0xD5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART5",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x020",
+ "UMask": "0x7020020",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Count of allocations in the completion buffer",
+ "Counter": "2,3",
+ "EventCode": "0xD5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART6",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x040",
+ "UMask": "0x7040040",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Count of allocations in the completion buffer",
+ "Counter": "2,3",
+ "EventCode": "0xD5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART7",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x080",
+ "UMask": "0x7080080",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.ALL_PARTS",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0FF",
+ "UMask": "0x70ff004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART0",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x001",
+ "UMask": "0x7001004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART1",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x002",
+ "UMask": "0x7002004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART2",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x004",
+ "UMask": "0x7004004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART3",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x008",
+ "UMask": "0x7008004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART4",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x010",
+ "UMask": "0x7010004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART5",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x020",
+ "UMask": "0x7020004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART6",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x040",
+ "UMask": "0x7040004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART7",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x080",
+ "UMask": "0x7080004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.ALL_PARTS",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0FF",
+ "UMask": "0x70ff001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x001",
+ "UMask": "0x7001001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x002",
+ "UMask": "0x7002001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x004",
+ "UMask": "0x7004001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x008",
+ "UMask": "0x7008001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x010",
+ "UMask": "0x7010001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x020",
+ "UMask": "0x7020001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x040",
+ "UMask": "0x7040001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x080",
+ "UMask": "0x7080001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.ALL_PARTS",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0FF",
+ "UMask": "0x70ff008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.ALL_PARTS",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0FF",
+ "UMask": "0x70ff002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Counts once for every 4 bytes read from this card to memory. This event does include reads to IO.",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.ALL_PARTS",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0FF",
+ "UMask": "0x70ff004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x001",
+ "UMask": "0x7001004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x7002004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x7004004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x7008004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x7010004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x7020004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x7040004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x7080004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Counts once for every 4 bytes written from this card to memory. This event does include writes to IO.",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.ALL_PARTS",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0FF",
+ "UMask": "0x70ff001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x001",
+ "UMask": "0x7001001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x7002001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x7004001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x7008001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x7010001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x7020001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x7040001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x7080001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART0",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x001",
+ "UMask": "0x7001008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART1",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x002",
+ "UMask": "0x7002008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART2",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x004",
+ "UMask": "0x7004008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART3",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x008",
+ "UMask": "0x7008008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART4",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x010",
+ "UMask": "0x7010008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART5",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x020",
+ "UMask": "0x7020008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART6",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x040",
+ "UMask": "0x7040008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART7",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x080",
+ "UMask": "0x7080008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Counts once for every 4 bytes written from this card to a peer device's IO space.",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.ALL_PARTS",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0FF",
+ "UMask": "0x70ff002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART0",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x001",
+ "UMask": "0x7001002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART1",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x002",
+ "UMask": "0x7002002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART2",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x004",
+ "UMask": "0x7004002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART3",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x008",
+ "UMask": "0x7008002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART4",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x010",
+ "UMask": "0x7010002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART5",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x020",
+ "UMask": "0x7020002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART6",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x040",
+ "UMask": "0x7040002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART7",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x080",
+ "UMask": "0x7080002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "IOTLB Hits to a 1G Page",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.1G_HITS",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "IOTLB Hits to a 2M Page",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.2M_HITS",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "IOTLB Hits to a 4K Page",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.4K_HITS",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "IOTLB lookups all",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.ALL_LOOKUPS",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Context cache hits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.CTXT_CACHE_HITS",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Context cache lookups",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.CTXT_CACHE_LOOKUPS",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "IOTLB lookups first",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.FIRST_LOOKUPS",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "IOTLB Fills (same as IOTLB miss)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.MISSES",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "IOMMU memory access (both low and high priority)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.NUM_MEM_ACCESSES",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0xc0",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "IOMMU high priority memory access",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.NUM_MEM_ACCESSES_HIGH",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "IOMMU low priority memory access",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.NUM_MEM_ACCESSES_LOW",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Second Level Page Walk Cache Hit to a 1G page",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.SLPWC_1G_HITS",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Second Level Page Walk Cache Hit to a 256T page",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.SLPWC_256T_HITS",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Second Level Page Walk Cache Hit to a 2M page",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.SLPWC_2M_HITS",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Second Level Page Walk Cache Hit to a 512G page",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.SLPWC_512G_HITS",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Second Level Page Walk Cache fill",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.SLPWC_CACHE_FILLS",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Second Level Page Walk Cache lookup",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.SLPWC_CACHE_LOOKUPS",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Cycles PWT full",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.CYC_PWT_FULL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Interrupt Entry cache hit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.INT_CACHE_HITS",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Interrupt Entry cache lookup",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.INT_CACHE_LOOKUPS",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Context Cache invalidation events",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.NUM_INVAL_CTXT_CACHE",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Interrupt Entry Cache invalidation events",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.NUM_INVAL_INT_CACHE",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "IOTLB invalidation events",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.NUM_INVAL_IOTLB",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PASID Cache invalidation events",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.NUM_INVAL_PASID_CACHE",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Occupancy of outbound request queue : To device : Counts number of outbound requests/completions IIO is currently processing",
+ "Counter": "2,3",
+ "EventCode": "0xc5",
+ "EventName": "UNC_IIO_NUM_OUSTANDING_REQ_FROM_CPU.TO_IO",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0FF",
+ "UMask": "0x70ff008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Passing data to be written",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x88",
+ "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.DATA",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0FF",
+ "UMask": "0x700f020",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Issuing final read or write of line",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x88",
+ "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.FINAL_RD_WR",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0FF",
+ "UMask": "0x700f008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Processing response from IOMMU",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x88",
+ "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.IOMMU_HIT",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0FF",
+ "UMask": "0x700f002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Issuing to IOMMU",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x88",
+ "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.IOMMU_REQ",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0FF",
+ "UMask": "0x700f001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Request Ownership",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x88",
+ "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.REQ_OWN",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0FF",
+ "UMask": "0x700f004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Writing line",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x88",
+ "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.WR",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0FF",
+ "UMask": "0x700f010",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "-",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8e",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.ABORT",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0FF",
+ "UMask": "0x70ff080",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "-",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8e",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.CONFINED_P2P",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0FF",
+ "UMask": "0x70ff040",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "-",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8e",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.LOC_P2P",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0FF",
+ "UMask": "0x70ff020",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "-",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8e",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MCAST",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0FF",
+ "UMask": "0x70ff002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "-",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8e",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MEM",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0FF",
+ "UMask": "0x70ff008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "-",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8e",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MSGB",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0FF",
+ "UMask": "0x70ff001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "-",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8e",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.REM_P2P",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0FF",
+ "UMask": "0x70ff010",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "-",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8e",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.UBOX",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0FF",
+ "UMask": "0x70ff004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "All 9 bits of Page Walk Tracker Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x42",
+ "EventName": "UNC_IIO_PWT_OCCUPANCY",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.ALL_PARTS",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0FF",
+ "UMask": "0x70ff004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x001",
+ "UMask": "0x7001004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x002",
+ "UMask": "0x7002004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x004",
+ "UMask": "0x7004004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x008",
+ "UMask": "0x7008004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x010",
+ "UMask": "0x7010004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x020",
+ "UMask": "0x7020004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x040",
+ "UMask": "0x7040004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x080",
+ "UMask": "0x7080004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.ALL_PARTS",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0FF",
+ "UMask": "0x70ff001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x001",
+ "UMask": "0x7001001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x002",
+ "UMask": "0x7002001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x004",
+ "UMask": "0x7004001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x008",
+ "UMask": "0x7008001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x010",
+ "UMask": "0x7010001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x020",
+ "UMask": "0x7020001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x040",
+ "UMask": "0x7040001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x080",
+ "UMask": "0x7080001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.ALL_PARTS",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0FF",
+ "UMask": "0x70ff008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.ALL_PARTS",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0FF",
+ "UMask": "0x70ff002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x001",
+ "UMask": "0x7001004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x002",
+ "UMask": "0x7002004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x004",
+ "UMask": "0x7004004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x008",
+ "UMask": "0x7008004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x010",
+ "UMask": "0x7010004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x020",
+ "UMask": "0x7020004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x040",
+ "UMask": "0x7040004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x080",
+ "UMask": "0x7080004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x001",
+ "UMask": "0x7001001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x002",
+ "UMask": "0x7002001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x004",
+ "UMask": "0x7004001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x008",
+ "UMask": "0x7008001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x010",
+ "UMask": "0x7010001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x020",
+ "UMask": "0x7020001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x040",
+ "UMask": "0x7040001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x080",
+ "UMask": "0x7080001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART0",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x001",
+ "UMask": "0x7001008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART1",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x002",
+ "UMask": "0x7002008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART2",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x004",
+ "UMask": "0x7004008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART3",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x008",
+ "UMask": "0x7008008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART4",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x010",
+ "UMask": "0x7010008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART5",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x020",
+ "UMask": "0x7020008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART6",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x040",
+ "UMask": "0x7040008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART7",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x080",
+ "UMask": "0x7080008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART0",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x001",
+ "UMask": "0x7001002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART1",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x002",
+ "UMask": "0x7002002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART2",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x004",
+ "UMask": "0x7004002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART3",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x008",
+ "UMask": "0x7008002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART4",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x010",
+ "UMask": "0x7010002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART5",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x020",
+ "UMask": "0x7020002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART6",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x040",
+ "UMask": "0x7040002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART7",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x080",
+ "UMask": "0x7080002",
+ "Unit": "IIO"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/graniterapids/uncore-memory.json b/tools/perf/pmu-events/arch/x86/graniterapids/uncore-memory.json
new file mode 100644
index 000000000000..08e410b9b0a2
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/graniterapids/uncore-memory.json
@@ -0,0 +1,449 @@
+[
+ {
+ "BriefDescription": "DRAM Activate Count : Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x02",
+ "EventName": "UNC_M_ACT_COUNT.ALL",
+ "PerPkg": "1",
+ "UMask": "0xf7",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "DRAM Activate Count : Read transaction on Page Empty or Page Miss : Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x02",
+ "EventName": "UNC_M_ACT_COUNT.RD",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0xf1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "DRAM Activate Count : Underfill Read transaction on Page Empty or Page Miss : Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x02",
+ "EventName": "UNC_M_ACT_COUNT.UFILL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0xf4",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "DRAM Activate Count : Write transaction on Page Empty or Page Miss : Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x02",
+ "EventName": "UNC_M_ACT_COUNT.WR",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0xf2",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "CAS count for SubChannel 0, all CAS operations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT_SCH0.ALL",
+ "PerPkg": "1",
+ "UMask": "0xff",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "CAS count for SubChannel 0, all reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT_SCH0.RD",
+ "PerPkg": "1",
+ "UMask": "0xcf",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "CAS count for SubChannel 0 regular reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT_SCH0.RD_REG",
+ "PerPkg": "1",
+ "UMask": "0xc1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "CAS count for SubChannel 0 underfill reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT_SCH0.RD_UNDERFILL",
+ "PerPkg": "1",
+ "UMask": "0xc4",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "CAS count for SubChannel 0, all writes",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT_SCH0.WR",
+ "PerPkg": "1",
+ "UMask": "0xf0",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "CAS count for SubChannel 0 regular writes",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT_SCH0.WR_NONPRE",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0xd0",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "CAS count for SubChannel 0 auto-precharge writes",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT_SCH0.WR_PRE",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0xe0",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "CAS count for SubChannel 1, all CAS operations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x06",
+ "EventName": "UNC_M_CAS_COUNT_SCH1.ALL",
+ "PerPkg": "1",
+ "UMask": "0xff",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "CAS count for SubChannel 1, all reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x06",
+ "EventName": "UNC_M_CAS_COUNT_SCH1.RD",
+ "PerPkg": "1",
+ "UMask": "0xcf",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "CAS count for SubChannel 1 regular reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x06",
+ "EventName": "UNC_M_CAS_COUNT_SCH1.RD_REG",
+ "PerPkg": "1",
+ "UMask": "0xc1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "CAS count for SubChannel 1 underfill reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x06",
+ "EventName": "UNC_M_CAS_COUNT_SCH1.RD_UNDERFILL",
+ "PerPkg": "1",
+ "UMask": "0xc4",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "CAS count for SubChannel 1, all writes",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x06",
+ "EventName": "UNC_M_CAS_COUNT_SCH1.WR",
+ "PerPkg": "1",
+ "UMask": "0xf0",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "CAS count for SubChannel 1 regular writes",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x06",
+ "EventName": "UNC_M_CAS_COUNT_SCH1.WR_NONPRE",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0xd0",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "CAS count for SubChannel 1 auto-precharge writes",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x06",
+ "EventName": "UNC_M_CAS_COUNT_SCH1.WR_PRE",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0xe0",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM DCLK clock cycles while the event is enabled",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x01",
+ "EventName": "UNC_M_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Clockticks",
+ "UMask": "0x1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM HCLK clock cycles while the event is enabled",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x01",
+ "EventName": "UNC_M_HCLOCKTICKS",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Clockticks",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.ALL",
+ "PerPkg": "1",
+ "UMask": "0xff",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands. : Precharge due to (?) : Counts the number of DRAM Precharge commands sent on this channel.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.PGT",
+ "PerPkg": "1",
+ "UMask": "0xf8",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.RD",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0xf1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.UFILL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0xf4",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.WR",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0xf2",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "Read buffer inserts on subchannel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x17",
+ "EventName": "UNC_M_RDB_INSERTS.SCH0",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "Read buffer inserts on subchannel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x17",
+ "EventName": "UNC_M_RDB_INSERTS.SCH1",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "Read buffer occupancy on subchannel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1a",
+ "EventName": "UNC_M_RDB_OCCUPANCY_SCH0",
+ "PerPkg": "1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "Read buffer occupancy on subchannel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_M_RDB_OCCUPANCY_SCH1",
+ "PerPkg": "1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Allocations : Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_M_RPQ_INSERTS.PCH0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x50",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Allocations : Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_M_RPQ_INSERTS.PCH1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0xa0",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue inserts for subchannel 0, pseudochannel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_M_RPQ_INSERTS.SCH0_PCH0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue inserts for subchannel 0, pseudochannel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_M_RPQ_INSERTS.SCH0_PCH1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue inserts for subchannel 1, pseudochannel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_M_RPQ_INSERTS.SCH1_PCH0",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue inserts for subchannel 1, pseudochannel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_M_RPQ_INSERTS.SCH1_PCH1",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "Read pending queue occupancy for subchannel 0, pseudochannel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_M_RPQ_OCCUPANCY_SCH0_PCH0",
+ "PerPkg": "1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "Read pending queue occupancy for subchannel 0, pseudochannel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x81",
+ "EventName": "UNC_M_RPQ_OCCUPANCY_SCH0_PCH1",
+ "PerPkg": "1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "Read pending queue occupancy for subchannel 1, pseudochannel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x82",
+ "EventName": "UNC_M_RPQ_OCCUPANCY_SCH1_PCH0",
+ "PerPkg": "1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "Read pending queue occupancy for subchannel 1, pseudochannel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_RPQ_OCCUPANCY_SCH1_PCH1",
+ "PerPkg": "1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_M_WPQ_INSERTS.PCH0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x50",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_M_WPQ_INSERTS.PCH1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0xa0",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue inserts for subchannel 0, pseudochannel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_M_WPQ_INSERTS.SCH0_PCH0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue inserts for subchannel 0, pseudochannel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_M_WPQ_INSERTS.SCH0_PCH1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue inserts for subchannel 1, pseudochannel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_M_WPQ_INSERTS.SCH1_PCH0",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue inserts for subchannel 1, pseudochannel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_M_WPQ_INSERTS.SCH1_PCH1",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "Write pending queue occupancy for subchannel 0, pseudochannel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_M_WPQ_OCCUPANCY_SCH0_PCH0",
+ "PerPkg": "1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "Write pending queue occupancy for subchannel 0, pseudochannel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x85",
+ "EventName": "UNC_M_WPQ_OCCUPANCY_SCH0_PCH1",
+ "PerPkg": "1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "Write pending queue occupancy for subchannel 1, pseudochannel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x86",
+ "EventName": "UNC_M_WPQ_OCCUPANCY_SCH1_PCH0",
+ "PerPkg": "1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "Write pending queue occupancy for subchannel 1, pseudochannel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x87",
+ "EventName": "UNC_M_WPQ_OCCUPANCY_SCH1_PCH1",
+ "PerPkg": "1",
+ "Unit": "IMC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/graniterapids/uncore-power.json b/tools/perf/pmu-events/arch/x86/graniterapids/uncore-power.json
new file mode 100644
index 000000000000..02e59f64a544
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/graniterapids/uncore-power.json
@@ -0,0 +1,11 @@
+[
+ {
+ "BriefDescription": "PCU Clockticks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x01",
+ "EventName": "UNC_P_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "PCU Clockticks: The PCU runs off a fixed 1 GHz clock. This event counts the number of pclk cycles measured while the counter was enabled. The pclk, like the Memory Controller's dclk, counts at a constant rate making it a good measure of actual wall time.",
+ "Unit": "PCU"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/graniterapids/virtual-memory.json b/tools/perf/pmu-events/arch/x86/graniterapids/virtual-memory.json
index 8784c97b7534..609a9549cbf3 100644
--- a/tools/perf/pmu-events/arch/x86/graniterapids/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/graniterapids/virtual-memory.json
@@ -1,6 +1,26 @@
[
{
+ "BriefDescription": "Loads that miss the DTLB and hit the STLB.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a demand load.",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a demand load.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
"BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (all page sizes) caused by demand data loads. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -8,7 +28,63 @@
"UMask": "0xe"
},
{
+ "BriefDescription": "Page walks completed due to a demand data load to a 1G page.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "Counts completed page walks (1G sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data load to a 2M/4M page.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data load to a 4K page.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (4K sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of page walks outstanding for a demand load in the PMH each cycle.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding for a demand load in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Stores that miss the DTLB and hit the STLB.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "PublicDescription": "Counts stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store.",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a store.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
"BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (all page sizes) caused by demand data stores. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -16,11 +92,94 @@
"UMask": "0xe"
},
{
+ "BriefDescription": "Page walks completed due to a demand data store to a 1G page.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "Counts completed page walks (1G sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data store to a 2M/4M page.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data store to a 4K page.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (4K sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of page walks outstanding for a store in the PMH each cycle.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding for a store in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "PublicDescription": "Counts instruction fetch requests that miss the ITLB (Instruction TLB) and hit the STLB (Second-level TLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request.",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a code (instruction fetch) request.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (all page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
"UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (2M/4M page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (4K page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of page walks outstanding for an outstanding code request in the PMH each cycle.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding for an outstanding code (instruction fetch) request in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/haswell/cache.json b/tools/perf/pmu-events/arch/x86/haswell/cache.json
index 0831f14b3cc6..29b408d036c2 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/cache.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "L1D data line replacements",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "L1D.REPLACEMENT",
"PublicDescription": "This event counts when new data lines are brought into the L1 Data cache, which cause other lines to be evicted from the cache.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Cycles a demand request was blocked due to Fill Buffers unavailability.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.FB_FULL",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "L1D miss outstanding duration in cycles",
+ "Counter": "2",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING",
"PublicDescription": "Increments the number of outstanding L1D misses every cycle. Set Cmask = 1 and Edge =1 to count occurrences.",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "Counter": "2",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING_CYCLES",
@@ -34,6 +38,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "Counter": "2",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
@@ -42,6 +47,7 @@
},
{
"BriefDescription": "Number of times a request needed a FB entry but there was no entry available for it. That is the FB unavailability was dominant reason for blocking the request. A request includes cacheable/uncacheable demands that is load, store or SW prefetch. HWP are e.",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.REQUEST_FB_FULL",
"SampleAfterValue": "2000003",
@@ -49,6 +55,7 @@
},
{
"BriefDescription": "Not rejected writebacks that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_DEMAND_RQSTS.WB_HIT",
"PublicDescription": "Not rejected writebacks that hit L2 cache.",
@@ -57,6 +64,7 @@
},
{
"BriefDescription": "L2 cache lines filling L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.ALL",
"PublicDescription": "This event counts the number of L2 cache lines brought into the L2 cache. Lines are filled into the L2 cache when there was an L2 miss.",
@@ -65,6 +73,7 @@
},
{
"BriefDescription": "L2 cache lines in E state filling L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.E",
"PublicDescription": "L2 cache lines in E state filling L2.",
@@ -73,6 +82,7 @@
},
{
"BriefDescription": "L2 cache lines in I state filling L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.I",
"PublicDescription": "L2 cache lines in I state filling L2.",
@@ -81,6 +91,7 @@
},
{
"BriefDescription": "L2 cache lines in S state filling L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.S",
"PublicDescription": "L2 cache lines in S state filling L2.",
@@ -89,6 +100,7 @@
},
{
"BriefDescription": "Clean L2 cache lines evicted by demand",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.DEMAND_CLEAN",
"PublicDescription": "Clean L2 cache lines evicted by demand.",
@@ -97,6 +109,7 @@
},
{
"BriefDescription": "Dirty L2 cache lines evicted by demand",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.DEMAND_DIRTY",
"PublicDescription": "Dirty L2 cache lines evicted by demand.",
@@ -105,6 +118,7 @@
},
{
"BriefDescription": "L2 code requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_CODE_RD",
"PublicDescription": "Counts all L2 code requests.",
@@ -113,6 +127,7 @@
},
{
"BriefDescription": "Demand Data Read requests",
+ "Counter": "0,1,2,3",
"Errata": "HSD78, HSM80",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
@@ -122,6 +137,7 @@
},
{
"BriefDescription": "Demand requests that miss L2 cache",
+ "Counter": "0,1,2,3",
"Errata": "HSD78, HSM80",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_MISS",
@@ -131,6 +147,7 @@
},
{
"BriefDescription": "Demand requests to L2 cache",
+ "Counter": "0,1,2,3",
"Errata": "HSD78, HSM80",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
@@ -140,6 +157,7 @@
},
{
"BriefDescription": "Requests from L2 hardware prefetchers",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_PF",
"PublicDescription": "Counts all L2 HW prefetcher requests.",
@@ -148,6 +166,7 @@
},
{
"BriefDescription": "RFO requests to L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_RFO",
"PublicDescription": "Counts all L2 store RFO requests.",
@@ -156,6 +175,7 @@
},
{
"BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_HIT",
"PublicDescription": "Number of instruction fetches that hit the L2 cache.",
@@ -164,6 +184,7 @@
},
{
"BriefDescription": "L2 cache misses when fetching instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_MISS",
"PublicDescription": "Number of instruction fetches that missed the L2 cache.",
@@ -172,6 +193,7 @@
},
{
"BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "Counter": "0,1,2,3",
"Errata": "HSD78, HSM80",
"EventCode": "0x24",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
@@ -181,6 +203,7 @@
},
{
"BriefDescription": "Demand Data Read miss L2, no rejects",
+ "Counter": "0,1,2,3",
"Errata": "HSD78, HSM80",
"EventCode": "0x24",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
@@ -190,6 +213,7 @@
},
{
"BriefDescription": "L2 prefetch requests that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.L2_PF_HIT",
"PublicDescription": "Counts all L2 HW prefetcher requests that hit L2.",
@@ -198,6 +222,7 @@
},
{
"BriefDescription": "L2 prefetch requests that miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.L2_PF_MISS",
"PublicDescription": "Counts all L2 HW prefetcher requests that missed L2.",
@@ -206,6 +231,7 @@
},
{
"BriefDescription": "All requests that miss L2 cache",
+ "Counter": "0,1,2,3",
"Errata": "HSD78, HSM80",
"EventCode": "0x24",
"EventName": "L2_RQSTS.MISS",
@@ -215,6 +241,7 @@
},
{
"BriefDescription": "All L2 requests",
+ "Counter": "0,1,2,3",
"Errata": "HSD78, HSM80",
"EventCode": "0x24",
"EventName": "L2_RQSTS.REFERENCES",
@@ -224,6 +251,7 @@
},
{
"BriefDescription": "RFO requests that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_HIT",
"PublicDescription": "Counts the number of store RFO requests that hit the L2 cache.",
@@ -232,6 +260,7 @@
},
{
"BriefDescription": "RFO requests that miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_MISS",
"PublicDescription": "Counts the number of store RFO requests that miss the L2 cache.",
@@ -240,6 +269,7 @@
},
{
"BriefDescription": "L2 or L3 HW prefetches that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xf0",
"EventName": "L2_TRANS.ALL_PF",
"PublicDescription": "Any MLC or L3 HW prefetch accessing L2, including rejects.",
@@ -248,6 +278,7 @@
},
{
"BriefDescription": "Transactions accessing L2 pipe",
+ "Counter": "0,1,2,3",
"EventCode": "0xf0",
"EventName": "L2_TRANS.ALL_REQUESTS",
"PublicDescription": "Transactions accessing L2 pipe.",
@@ -256,6 +287,7 @@
},
{
"BriefDescription": "L2 cache accesses when fetching instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0xf0",
"EventName": "L2_TRANS.CODE_RD",
"PublicDescription": "L2 cache accesses when fetching instructions.",
@@ -264,6 +296,7 @@
},
{
"BriefDescription": "Demand Data Read requests that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xf0",
"EventName": "L2_TRANS.DEMAND_DATA_RD",
"PublicDescription": "Demand data read requests that access L2 cache.",
@@ -272,6 +305,7 @@
},
{
"BriefDescription": "L1D writebacks that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xf0",
"EventName": "L2_TRANS.L1D_WB",
"PublicDescription": "L1D writebacks that access L2 cache.",
@@ -280,6 +314,7 @@
},
{
"BriefDescription": "L2 fill requests that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xf0",
"EventName": "L2_TRANS.L2_FILL",
"PublicDescription": "L2 fill requests that access L2 cache.",
@@ -288,6 +323,7 @@
},
{
"BriefDescription": "L2 writebacks that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xf0",
"EventName": "L2_TRANS.L2_WB",
"PublicDescription": "L2 writebacks that access L2 cache.",
@@ -296,6 +332,7 @@
},
{
"BriefDescription": "RFO requests that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xf0",
"EventName": "L2_TRANS.RFO",
"PublicDescription": "RFO requests that access L2 cache.",
@@ -304,6 +341,7 @@
},
{
"BriefDescription": "Cycles when L1D is locked",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
"PublicDescription": "Cycles in which the L1D is locked.",
@@ -312,6 +350,7 @@
},
{
"BriefDescription": "Core-originated cacheable demand requests missed L3",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.MISS",
"PublicDescription": "This event counts each cache miss condition for references to the last level cache.",
@@ -320,6 +359,7 @@
},
{
"BriefDescription": "Core-originated cacheable demand requests that refer to L3",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
"PublicDescription": "This event counts requests originating from the core that reference a cache line in the last level cache.",
@@ -328,6 +368,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSD29, HSD25, HSM26, HSM30",
"EventCode": "0xD2",
@@ -338,6 +379,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSD29, HSD25, HSM26, HSM30",
"EventCode": "0xD2",
@@ -348,6 +390,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSD29, HSD25, HSM26, HSM30",
"EventCode": "0xD2",
@@ -358,6 +401,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSD74, HSD29, HSD25, HSM26, HSM30",
"EventCode": "0xD2",
@@ -368,6 +412,7 @@
},
{
"BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSD74, HSD29, HSD25, HSM30",
"EventCode": "0xD3",
@@ -379,6 +424,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSM30",
"EventCode": "0xD1",
@@ -389,6 +435,7 @@
},
{
"BriefDescription": "Retired load uops with L1 cache hits as data sources.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSD29, HSM30",
"EventCode": "0xD1",
@@ -399,6 +446,7 @@
},
{
"BriefDescription": "Retired load uops misses in L1 cache as data sources.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSM30",
"EventCode": "0xD1",
@@ -410,6 +458,7 @@
},
{
"BriefDescription": "Retired load uops with L2 cache hits as data sources.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSD76, HSD29, HSM30",
"EventCode": "0xD1",
@@ -420,6 +469,7 @@
},
{
"BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSD29, HSM30",
"EventCode": "0xD1",
@@ -431,6 +481,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were data hits in L3 without snoops required.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSD74, HSD29, HSD25, HSM26, HSM30",
"EventCode": "0xD1",
@@ -442,6 +493,7 @@
},
{
"BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSD74, HSD29, HSD25, HSM26, HSM30",
"EventCode": "0xD1",
@@ -453,6 +505,7 @@
},
{
"BriefDescription": "Retired load uops.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSD29, HSM30",
"EventCode": "0xD0",
@@ -464,6 +517,7 @@
},
{
"BriefDescription": "Retired store uops.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSD29, HSM30",
"EventCode": "0xD0",
@@ -475,6 +529,7 @@
},
{
"BriefDescription": "Retired load uops with locked access.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSD76, HSD29, HSM30",
"EventCode": "0xD0",
@@ -485,6 +540,7 @@
},
{
"BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSD29, HSM30",
"EventCode": "0xD0",
@@ -495,6 +551,7 @@
},
{
"BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSD29, HSM30",
"EventCode": "0xD0",
@@ -505,6 +562,7 @@
},
{
"BriefDescription": "Retired load uops that miss the STLB.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSD29, HSM30",
"EventCode": "0xD0",
@@ -515,6 +573,7 @@
},
{
"BriefDescription": "Retired store uops that miss the STLB.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSD29, HSM30",
"EventCode": "0xD0",
@@ -525,6 +584,7 @@
},
{
"BriefDescription": "Demand and prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
"PublicDescription": "Data read requests sent to uncore (demand and prefetch).",
@@ -533,6 +593,7 @@
},
{
"BriefDescription": "Cacheable and noncacheable code read requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
"PublicDescription": "Demand code read requests sent to uncore.",
@@ -541,6 +602,7 @@
},
{
"BriefDescription": "Demand Data Read requests sent to uncore",
+ "Counter": "0,1,2,3",
"Errata": "HSD78, HSM80",
"EventCode": "0xb0",
"EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
@@ -550,6 +612,7 @@
},
{
"BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
"PublicDescription": "Demand RFO read requests sent to uncore, including regular RFOs, locks, ItoM.",
@@ -558,6 +621,7 @@
},
{
"BriefDescription": "Offcore requests buffer cannot take more entries for this thread core.",
+ "Counter": "0,1,2,3",
"EventCode": "0xb2",
"EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
"SampleAfterValue": "2000003",
@@ -565,6 +629,7 @@
},
{
"BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
"Errata": "HSD62, HSD61, HSM63",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
@@ -574,6 +639,7 @@
},
{
"BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"Errata": "HSD62, HSD61, HSM63",
"EventCode": "0x60",
@@ -583,6 +649,7 @@
},
{
"BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"Errata": "HSD78, HSD62, HSD61, HSM63, HSM80",
"EventCode": "0x60",
@@ -592,6 +659,7 @@
},
{
"BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"Errata": "HSD62, HSD61, HSM63",
"EventCode": "0x60",
@@ -601,6 +669,7 @@
},
{
"BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "Counter": "0,1,2,3",
"Errata": "HSD62, HSD61, HSM63",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
@@ -610,6 +679,7 @@
},
{
"BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
"Errata": "HSD78, HSD62, HSD61, HSM63, HSM80",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
@@ -619,6 +689,7 @@
},
{
"BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"Errata": "HSD78, HSD62, HSD61, HSM63, HSM80",
"EventCode": "0x60",
@@ -628,6 +699,7 @@
},
{
"BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
"Errata": "HSD62, HSD61, HSM63",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
@@ -637,6 +709,7 @@
},
{
"BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE",
"SampleAfterValue": "100003",
@@ -644,6 +717,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -653,6 +727,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -662,6 +737,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -671,6 +747,7 @@
},
{
"BriefDescription": "hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -680,6 +757,7 @@
},
{
"BriefDescription": "hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -689,6 +767,7 @@
},
{
"BriefDescription": "Counts all requests hit in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.L3_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -698,6 +777,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -707,6 +787,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -716,6 +797,7 @@
},
{
"BriefDescription": "Counts all demand code reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -725,6 +807,7 @@
},
{
"BriefDescription": "Counts all demand code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -734,6 +817,7 @@
},
{
"BriefDescription": "Counts demand data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -743,6 +827,7 @@
},
{
"BriefDescription": "Counts demand data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -752,6 +837,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -761,6 +847,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -770,6 +857,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads hit in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -779,6 +867,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads hit in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -788,6 +877,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs hit in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -797,6 +887,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) code reads hit in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -806,6 +897,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads hit in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -815,6 +907,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -824,6 +917,7 @@
},
{
"BriefDescription": "Split locks in SQ",
+ "Counter": "0,1,2,3",
"EventCode": "0xf4",
"EventName": "SQ_MISC.SPLIT_LOCK",
"SampleAfterValue": "100003",
diff --git a/tools/perf/pmu-events/arch/x86/haswell/counter.json b/tools/perf/pmu-events/arch/x86/haswell/counter.json
new file mode 100644
index 000000000000..1be6522e2bbc
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/haswell/counter.json
@@ -0,0 +1,22 @@
+[
+ {
+ "Unit": "core",
+ "CountersNumFixed": "3",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "CBOX",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "2"
+ },
+ {
+ "Unit": "ARB",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "2"
+ },
+ {
+ "Unit": "cbox_0",
+ "CountersNumFixed": 1,
+ "CountersNumGeneric": "0"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswell/floating-point.json b/tools/perf/pmu-events/arch/x86/haswell/floating-point.json
index 8fcc10f74ad9..a0b917306887 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/floating-point.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Approximate counts of AVX & AVX2 256-bit instructions, including non-arithmetic instructions, loads, and stores. May count non-AVX instructions that employ 256-bit operations, including (but not necessarily limited to) rep string instructions that use 256-bit loads and stores for optimized performance, XSAVE* and XRSTOR*, and operations that transition the x87 FPU data registers between x87 and MMX.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "AVX_INSTS.ALL",
"PublicDescription": "Note that a whole rep string only counts AVX_INST.ALL once.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Cycles with any input/output SSE or FP assist",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.ANY",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "Number of SIMD FP assists due to input values",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.SIMD_INPUT",
"PublicDescription": "Number of SIMD FP assists due to input values.",
@@ -26,6 +29,7 @@
},
{
"BriefDescription": "Number of SIMD FP assists due to Output values",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.SIMD_OUTPUT",
"PublicDescription": "Number of SIMD FP assists due to output values.",
@@ -34,6 +38,7 @@
},
{
"BriefDescription": "Number of X87 assists due to input value.",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.X87_INPUT",
"PublicDescription": "Number of X87 FP assists due to input values.",
@@ -42,6 +47,7 @@
},
{
"BriefDescription": "Number of X87 assists due to output value.",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.X87_OUTPUT",
"PublicDescription": "Number of X87 FP assists due to output values.",
@@ -50,6 +56,7 @@
},
{
"BriefDescription": "Number of SIMD Move Elimination candidate uops that were eliminated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "MOVE_ELIMINATION.SIMD_ELIMINATED",
"PublicDescription": "Number of SIMD move elimination candidate uops that were eliminated.",
@@ -58,6 +65,7 @@
},
{
"BriefDescription": "Number of SIMD Move Elimination candidate uops that were not eliminated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "MOVE_ELIMINATION.SIMD_NOT_ELIMINATED",
"PublicDescription": "Number of SIMD move elimination candidate uops that were not eliminated.",
@@ -66,6 +74,7 @@
},
{
"BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
+ "Counter": "0,1,2,3",
"Errata": "HSD56, HSM57",
"EventCode": "0xC1",
"EventName": "OTHER_ASSISTS.AVX_TO_SSE",
@@ -74,6 +83,7 @@
},
{
"BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
+ "Counter": "0,1,2,3",
"Errata": "HSD56, HSM57",
"EventCode": "0xC1",
"EventName": "OTHER_ASSISTS.SSE_TO_AVX",
diff --git a/tools/perf/pmu-events/arch/x86/haswell/frontend.json b/tools/perf/pmu-events/arch/x86/haswell/frontend.json
index 73d6d681dfa7..a9f81fd17925 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/frontend.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "Counter": "0,1,2,3",
"EventCode": "0xe6",
"EventName": "BACLEARS.ANY",
"PublicDescription": "Number of front end re-steers due to BPU misprediction.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
"SampleAfterValue": "2000003",
@@ -16,6 +18,7 @@
},
{
"BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.HIT",
"SampleAfterValue": "2000003",
@@ -23,6 +26,7 @@
},
{
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction-cache miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.IFDATA_STALL",
"SampleAfterValue": "2000003",
@@ -30,6 +34,7 @@
},
{
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction-cache miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.IFETCH_STALL",
"SampleAfterValue": "2000003",
@@ -37,6 +42,7 @@
},
{
"BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Misses. Includes Uncacheable accesses.",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.MISSES",
"PublicDescription": "This event counts Instruction Cache (ICACHE) misses.",
@@ -45,6 +51,7 @@
},
{
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0x79",
"EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
@@ -54,6 +61,7 @@
},
{
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
@@ -63,6 +71,7 @@
},
{
"BriefDescription": "Cycles MITE is delivering 4 Uops",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0x79",
"EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
@@ -72,6 +81,7 @@
},
{
"BriefDescription": "Cycles MITE is delivering any Uop",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
@@ -81,6 +91,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.DSB_CYCLES",
@@ -89,6 +100,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.DSB_UOPS",
"PublicDescription": "Increment each cycle. # of uops delivered to IDQ from DSB path. Set Cmask = 1 to count cycles.",
@@ -97,6 +109,7 @@
},
{
"BriefDescription": "Instruction Decode Queue (IDQ) empty cycles",
+ "Counter": "0,1,2,3",
"Errata": "HSD135",
"EventCode": "0x79",
"EventName": "IDQ.EMPTY",
@@ -106,6 +119,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MITE_ALL_UOPS",
"PublicDescription": "Number of uops delivered to IDQ from any path.",
@@ -114,6 +128,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MITE_CYCLES",
@@ -122,6 +137,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MITE_UOPS",
"PublicDescription": "Increment each cycle # of uops delivered to IDQ from MITE path. Set Cmask = 1 to count cycles.",
@@ -130,6 +146,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MS_CYCLES",
@@ -139,6 +156,7 @@
},
{
"BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MS_DSB_CYCLES",
@@ -147,6 +165,7 @@
},
{
"BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequencer (MS) is busy.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x79",
@@ -156,6 +175,7 @@
},
{
"BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_DSB_UOPS",
"PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
@@ -164,6 +184,7 @@
},
{
"BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_MITE_UOPS",
"PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
@@ -172,6 +193,7 @@
},
{
"BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x79",
@@ -181,6 +203,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_UOPS",
"PublicDescription": "This event counts uops delivered by the Front-end with the assistance of the microcode sequencer. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance.",
@@ -189,6 +212,7 @@
},
{
"BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
+ "Counter": "0,1,2,3",
"Errata": "HSD135",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
@@ -198,6 +222,7 @@
},
{
"BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"Errata": "HSD135",
"EventCode": "0x9C",
@@ -208,6 +233,7 @@
},
{
"BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"Errata": "HSD135",
"EventCode": "0x9C",
@@ -218,6 +244,7 @@
},
{
"BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
+ "Counter": "0,1,2,3",
"CounterMask": "3",
"Errata": "HSD135",
"EventCode": "0x9C",
@@ -227,6 +254,7 @@
},
{
"BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"Errata": "HSD135",
"EventCode": "0x9C",
@@ -236,6 +264,7 @@
},
{
"BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"Errata": "HSD135",
"EventCode": "0x9C",
diff --git a/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json b/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json
index 5631018ed388..b693c0b0cafe 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json
@@ -90,7 +90,7 @@
{
"BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists",
"MetricExpr": "66 * OTHER_ASSISTS.ANY_WB_ASSIST / tma_info_thread_slots",
- "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricGroup": "BvIO;TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_assists",
"MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
"PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: OTHER_ASSISTS.ANY",
@@ -100,7 +100,7 @@
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
"MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "1 - (tma_frontend_bound + tma_bad_speculation + tma_retiring)",
- "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvOB;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
"MetricThreshold": "tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL1",
@@ -121,7 +121,7 @@
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * tma_bad_speculation",
- "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
+ "MetricGroup": "BadSpec;BrMispredicts;BvMP;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
"MetricName": "tma_branch_mispredicts",
"MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
@@ -151,7 +151,7 @@
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(60 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.L3_MISS))) + 43 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.L3_MISS)))) / tma_info_thread_clks",
- "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricGroup": "BvMS;DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_contested_accesses",
"MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS_PS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
@@ -172,7 +172,7 @@
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "43 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.L3_MISS))) / tma_info_thread_clks",
- "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricGroup": "BvMS;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_data_sharing",
"MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT_PS. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
@@ -181,7 +181,7 @@
{
"BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
"MetricExpr": "10 * ARITH.DIVIDER_UOPS / tma_info_core_core_clks",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
"MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_UOPS",
@@ -218,7 +218,7 @@
{
"BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
"MetricExpr": "(8 * DTLB_LOAD_MISSES.STLB_HIT + DTLB_LOAD_MISSES.WALK_DURATION) / tma_info_thread_clks",
- "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
+ "MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
"MetricName": "tma_dtlb_load",
"MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_UOPS_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store",
@@ -227,7 +227,7 @@
{
"BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
"MetricExpr": "(8 * DTLB_STORE_MISSES.STLB_HIT + DTLB_STORE_MISSES.WALK_DURATION) / tma_info_thread_clks",
- "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
+ "MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
"MetricName": "tma_dtlb_store",
"MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_UOPS_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load",
@@ -236,7 +236,7 @@
{
"BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing",
"MetricExpr": "60 * OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE / tma_info_thread_clks",
- "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
+ "MetricGroup": "BvMS;DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
"MetricName": "tma_false_sharing",
"MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
@@ -246,7 +246,7 @@
"BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_info_memory_load_miss_real_latency * cpu@L1D_PEND_MISS.REQUEST_FB_FULL\\,cmask\\=1@ / tma_info_thread_clks",
- "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
+ "MetricGroup": "BvMS;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
"MetricName": "tma_fb_full",
"MetricThreshold": "tma_fb_full > 0.3",
"PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
@@ -275,7 +275,7 @@
{
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / tma_info_thread_slots",
- "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvFB;BvIO;PGO;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_frontend_bound",
"MetricThreshold": "tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL1",
@@ -295,7 +295,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses.",
"MetricExpr": "ICACHE.IFDATA_STALL / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_icache_misses",
"MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"ScaleUnit": "100%"
@@ -388,12 +388,12 @@
"MetricThreshold": "tma_info_inst_mix_ipstore < 8"
},
{
- "BriefDescription": "Instruction per taken branch",
+ "BriefDescription": "Instructions per taken branch",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
"MetricName": "tma_info_inst_mix_iptb",
"MetricThreshold": "tma_info_inst_mix_iptb < 9",
- "PublicDescription": "Instruction per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_lcp"
+ "PublicDescription": "Instructions per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_lcp"
},
{
"BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
@@ -414,7 +414,7 @@
"MetricName": "tma_info_memory_core_l3_cache_fill_bw_2t"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
"MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l1d_cache_fill_bw"
@@ -426,7 +426,7 @@
"MetricName": "tma_info_memory_l1mpki"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
"MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l2_cache_fill_bw"
@@ -438,7 +438,13 @@
"MetricName": "tma_info_memory_l2mpki"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Offcore requests (L2 cache miss) per kilo instruction for demand RFOs",
+ "MetricExpr": "1e3 * OFFCORE_REQUESTS.DEMAND_RFO / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Offcore",
+ "MetricName": "tma_info_memory_l2mpki_rfo"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
"MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l3_cache_fill_bw"
@@ -503,13 +509,13 @@
},
{
"BriefDescription": "Average CPU Utilization (percentage)",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "tma_info_system_cpus_utilized / #num_cpus_online",
"MetricGroup": "HPC;Summary",
"MetricName": "tma_info_system_cpu_utilization"
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "#num_cpus_online * tma_info_system_cpu_utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized"
},
@@ -590,7 +596,7 @@
"MetricThreshold": "tma_info_thread_uoppi > 1.05"
},
{
- "BriefDescription": "Instruction per taken branch",
+ "BriefDescription": "Uops per taken branch",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW",
"MetricName": "tma_info_thread_uptb",
@@ -599,7 +605,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
"MetricExpr": "(14 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION) / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_itlb_misses",
"MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: ITLB_MISSES.WALK_COMPLETED",
@@ -617,7 +623,7 @@
{
"BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
"MetricExpr": "(CYCLE_ACTIVITY.STALLS_L1D_PENDING - CYCLE_ACTIVITY.STALLS_L2_PENDING) / tma_info_thread_clks",
- "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricGroup": "BvML;CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l2_bound",
"MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L2_HIT_PS",
@@ -637,7 +643,7 @@
"BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "29 * (MEM_LOAD_UOPS_RETIRED.L3_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.L3_MISS))) / tma_info_thread_clks",
- "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
+ "MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
"MetricName": "tma_l3_hit_latency",
"MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS. Related metrics: tma_mem_latency",
@@ -686,7 +692,7 @@
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_bad_speculation - tma_branch_mispredicts",
- "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
+ "MetricGroup": "BadSpec;BvMS;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
"MetricName": "tma_machine_clears",
"MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
@@ -696,7 +702,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=6@) / tma_info_thread_clks",
- "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
"MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full",
@@ -705,7 +711,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM)",
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth",
- "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
+ "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
"MetricName": "tma_mem_latency",
"MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_l3_hit_latency",
@@ -861,7 +867,7 @@
{
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise).",
"MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ / 2 if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@) / tma_info_core_core_clks",
- "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricGroup": "BvCB;PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_3m",
"MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%"
@@ -869,7 +875,7 @@
{
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / tma_info_thread_slots",
- "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvUW;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_retiring",
"MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL1",
@@ -898,7 +904,7 @@
{
"BriefDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors)",
"MetricExpr": "(OFFCORE_REQUESTS_BUFFER.SQ_FULL / 2 if #SMT_on else OFFCORE_REQUESTS_BUFFER.SQ_FULL) / tma_info_core_core_clks",
- "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
+ "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
"MetricName": "tma_sq_full",
"MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth",
@@ -926,7 +932,7 @@
"BriefDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(L2_RQSTS.RFO_HIT * 9 * (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) + (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_thread_clks",
- "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
+ "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
"MetricName": "tma_store_latency",
"MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
diff --git a/tools/perf/pmu-events/arch/x86/haswell/memory.json b/tools/perf/pmu-events/arch/x86/haswell/memory.json
index 6ba0ea6e3fa6..edb1b5b9f553 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/memory.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one).",
+ "Counter": "0,1,2,3",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED",
"PEBS": "1",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "Counter": "0,1,2,3",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_MISC1",
"SampleAfterValue": "2000003",
@@ -16,6 +18,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to uncommon conditions.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_MISC2",
"SampleAfterValue": "2000003",
@@ -23,6 +26,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_MISC3",
"SampleAfterValue": "2000003",
@@ -30,6 +34,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to incompatible memory type.",
+ "Counter": "0,1,2,3",
"Errata": "HSD65",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_MISC4",
@@ -38,6 +43,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to none of the previous 4 categories (e.g. interrupts)",
+ "Counter": "0,1,2,3",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_MISC5",
"PublicDescription": "Number of times an HLE execution aborted due to none of the previous 4 categories (e.g. interrupts).",
@@ -46,6 +52,7 @@
},
{
"BriefDescription": "Number of times an HLE execution successfully committed.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.COMMIT",
"SampleAfterValue": "2000003",
@@ -53,6 +60,7 @@
},
{
"BriefDescription": "Number of times an HLE execution started.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC8",
"EventName": "HLE_RETIRED.START",
"SampleAfterValue": "2000003",
@@ -60,6 +68,7 @@
},
{
"BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
"PublicDescription": "This event counts the number of memory ordering machine clears detected. Memory ordering machine clears can result from memory address aliasing or snoops from another hardware thread or core to data inflight in the pipeline. Machine clears can have a significant performance impact if they are happening frequently.",
@@ -68,6 +77,7 @@
},
{
"BriefDescription": "Randomly selected loads with latency value being above 128.",
+ "Counter": "3",
"Data_LA": "1",
"Errata": "HSD76, HSD25, HSM26",
"EventCode": "0xcd",
@@ -80,6 +90,7 @@
},
{
"BriefDescription": "Randomly selected loads with latency value being above 16.",
+ "Counter": "3",
"Data_LA": "1",
"Errata": "HSD76, HSD25, HSM26",
"EventCode": "0xcd",
@@ -92,6 +103,7 @@
},
{
"BriefDescription": "Randomly selected loads with latency value being above 256.",
+ "Counter": "3",
"Data_LA": "1",
"Errata": "HSD76, HSD25, HSM26",
"EventCode": "0xcd",
@@ -104,6 +116,7 @@
},
{
"BriefDescription": "Randomly selected loads with latency value being above 32.",
+ "Counter": "3",
"Data_LA": "1",
"Errata": "HSD76, HSD25, HSM26",
"EventCode": "0xcd",
@@ -116,6 +129,7 @@
},
{
"BriefDescription": "Randomly selected loads with latency value being above 4.",
+ "Counter": "3",
"Data_LA": "1",
"Errata": "HSD76, HSD25, HSM26",
"EventCode": "0xcd",
@@ -128,6 +142,7 @@
},
{
"BriefDescription": "Randomly selected loads with latency value being above 512.",
+ "Counter": "3",
"Data_LA": "1",
"Errata": "HSD76, HSD25, HSM26",
"EventCode": "0xcd",
@@ -140,6 +155,7 @@
},
{
"BriefDescription": "Randomly selected loads with latency value being above 64.",
+ "Counter": "3",
"Data_LA": "1",
"Errata": "HSD76, HSD25, HSM26",
"EventCode": "0xcd",
@@ -152,6 +168,7 @@
},
{
"BriefDescription": "Randomly selected loads with latency value being above 8.",
+ "Counter": "3",
"Data_LA": "1",
"Errata": "HSD76, HSD25, HSM26",
"EventCode": "0xcd",
@@ -164,6 +181,7 @@
},
{
"BriefDescription": "Speculative cache line split load uops dispatched to L1 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "MISALIGN_MEM_REF.LOADS",
"PublicDescription": "Speculative cache-line split load uops dispatched to L1D.",
@@ -172,6 +190,7 @@
},
{
"BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "MISALIGN_MEM_REF.STORES",
"PublicDescription": "Speculative cache-line split store-address uops dispatched to L1D.",
@@ -180,6 +199,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch code reads miss in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.L3_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -189,6 +209,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch code reads miss the L3 and the data is returned from local dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.L3_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -198,6 +219,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads miss in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -207,6 +229,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from local dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -216,6 +239,7 @@
},
{
"BriefDescription": "miss in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -225,6 +249,7 @@
},
{
"BriefDescription": "miss the L3 and the data is returned from local dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -234,6 +259,7 @@
},
{
"BriefDescription": "Counts all requests miss in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.L3_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -243,6 +269,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs miss in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -252,6 +279,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs miss the L3 and the data is returned from local dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -261,6 +289,7 @@
},
{
"BriefDescription": "Counts all demand code reads miss in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -270,6 +299,7 @@
},
{
"BriefDescription": "Counts all demand code reads miss the L3 and the data is returned from local dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -279,6 +309,7 @@
},
{
"BriefDescription": "Counts demand data reads miss in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -288,6 +319,7 @@
},
{
"BriefDescription": "Counts demand data reads miss the L3 and the data is returned from local dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -297,6 +329,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) miss in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -306,6 +339,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) miss the L3 and the data is returned from local dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -315,6 +349,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads miss in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -324,6 +359,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads miss in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -333,6 +369,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs miss in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -342,6 +379,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) code reads miss in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -351,6 +389,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads miss in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -360,6 +399,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs miss in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -369,6 +409,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one).",
+ "Counter": "0,1,2,3",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED",
"PEBS": "2",
@@ -377,6 +418,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
+ "Counter": "0,1,2,3",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_MISC1",
"PublicDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).",
@@ -385,6 +427,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "Counter": "0,1,2,3",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_MISC2",
"SampleAfterValue": "2000003",
@@ -392,6 +435,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_MISC3",
"SampleAfterValue": "2000003",
@@ -399,6 +443,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type.",
+ "Counter": "0,1,2,3",
"Errata": "HSD65",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_MISC4",
@@ -407,6 +452,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
+ "Counter": "0,1,2,3",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_MISC5",
"PublicDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
@@ -415,6 +461,7 @@
},
{
"BriefDescription": "Number of times an RTM execution successfully committed.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.COMMIT",
"SampleAfterValue": "2000003",
@@ -422,6 +469,7 @@
},
{
"BriefDescription": "Number of times an RTM execution started.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC9",
"EventName": "RTM_RETIRED.START",
"SampleAfterValue": "2000003",
@@ -429,6 +477,7 @@
},
{
"BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed. Since this is the count of execution, it may not always cause a transactional abort.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC1",
"SampleAfterValue": "2000003",
@@ -436,6 +485,7 @@
},
{
"BriefDescription": "Counts the number of times a class of instructions (e.g., vzeroupper) that may cause a transactional abort was executed inside a transactional region.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC2",
"SampleAfterValue": "2000003",
@@ -443,6 +493,7 @@
},
{
"BriefDescription": "Counts the number of times an instruction execution caused the transactional nest count supported to be exceeded.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC3",
"SampleAfterValue": "2000003",
@@ -450,6 +501,7 @@
},
{
"BriefDescription": "Counts the number of times a XBEGIN instruction was executed inside an HLE transactional region.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC4",
"SampleAfterValue": "2000003",
@@ -457,6 +509,7 @@
},
{
"BriefDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC5",
"SampleAfterValue": "2000003",
@@ -464,6 +517,7 @@
},
{
"BriefDescription": "Number of times a transactional abort was signaled due to a data capacity limitation for transactional writes.",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
"SampleAfterValue": "2000003",
@@ -471,6 +525,7 @@
},
{
"BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address.",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_CONFLICT",
"SampleAfterValue": "2000003",
@@ -478,6 +533,7 @@
},
{
"BriefDescription": "Number of times an HLE transactional execution aborted due to XRELEASE lock not satisfying the address and value requirements in the elision buffer.",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
"SampleAfterValue": "2000003",
@@ -485,6 +541,7 @@
},
{
"BriefDescription": "Number of times an HLE transactional execution aborted due to NoAllocatedElisionBuffer being non-zero.",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
"SampleAfterValue": "2000003",
@@ -492,6 +549,7 @@
},
{
"BriefDescription": "Number of times an HLE transactional execution aborted due to an unsupported read alignment from the elision buffer.",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
"SampleAfterValue": "2000003",
@@ -499,6 +557,7 @@
},
{
"BriefDescription": "Number of times a HLE transactional region aborted due to a non XRELEASE prefixed instruction writing to an elided lock in the elision buffer.",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
"SampleAfterValue": "2000003",
@@ -506,6 +565,7 @@
},
{
"BriefDescription": "Number of times HLE lock could not be elided due to ElisionBufferAvailable being zero.",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
"SampleAfterValue": "2000003",
diff --git a/tools/perf/pmu-events/arch/x86/haswell/metricgroups.json b/tools/perf/pmu-events/arch/x86/haswell/metricgroups.json
index 8c808347f6da..4193c90c3459 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/metricgroups.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/metricgroups.json
@@ -5,7 +5,18 @@
"BigFootprint": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"BrMispredicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Branches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvBC": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvCB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvFB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvIO": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvML": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMP": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMS": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMT": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvOB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvUW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"CacheHits": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "CacheMisses": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Compute": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Cor": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"DSB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
diff --git a/tools/perf/pmu-events/arch/x86/haswell/other.json b/tools/perf/pmu-events/arch/x86/haswell/other.json
index 2395ebf112db..7d8769ef6d04 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/other.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/other.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Unhalted core cycles when the thread is in ring 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "CPL_CYCLES.RING0",
"PublicDescription": "Unhalted core cycles when the thread is in ring 0.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Number of intervals between processor halts while thread is in ring 0.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x5C",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "CPL_CYCLES.RING123",
"PublicDescription": "Unhalted core cycles when the thread is not in ring 0.",
@@ -26,6 +29,7 @@
},
{
"BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "LOCK_CYCLES.SPLIT_LOCK_UC_LOCK_DURATION",
"PublicDescription": "Cycles in which the L1D and L2 are locked, due to a UC lock or split lock.",
diff --git a/tools/perf/pmu-events/arch/x86/haswell/pipeline.json b/tools/perf/pmu-events/arch/x86/haswell/pipeline.json
index 540f4372623c..c00301fdb3d7 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/pipeline.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Any uop executed by the Divider. (This includes all divide uops, sqrt, ...)",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "ARITH.DIVIDER_UOPS",
"SampleAfterValue": "2000003",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Speculative and retired branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_BRANCHES",
"PublicDescription": "Counts all near executed branches (not necessarily retired).",
@@ -16,6 +18,7 @@
},
{
"BriefDescription": "Speculative and retired macro-conditional branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
"SampleAfterValue": "200003",
@@ -23,6 +26,7 @@
},
{
"BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
"SampleAfterValue": "200003",
@@ -30,6 +34,7 @@
},
{
"BriefDescription": "Speculative and retired direct near calls.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
"SampleAfterValue": "200003",
@@ -37,6 +42,7 @@
},
{
"BriefDescription": "Speculative and retired indirect branches excluding calls and returns.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
"SampleAfterValue": "200003",
@@ -44,6 +50,7 @@
},
{
"BriefDescription": "Speculative and retired indirect return branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
"SampleAfterValue": "200003",
@@ -51,6 +58,7 @@
},
{
"BriefDescription": "Not taken macro-conditional branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL",
"SampleAfterValue": "200003",
@@ -58,6 +66,7 @@
},
{
"BriefDescription": "Taken speculative and retired macro-conditional branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL",
"SampleAfterValue": "200003",
@@ -65,6 +74,7 @@
},
{
"BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP",
"SampleAfterValue": "200003",
@@ -72,6 +82,7 @@
},
{
"BriefDescription": "Taken speculative and retired direct near calls.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
"SampleAfterValue": "200003",
@@ -79,6 +90,7 @@
},
{
"BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
"SampleAfterValue": "200003",
@@ -86,6 +98,7 @@
},
{
"BriefDescription": "Taken speculative and retired indirect calls.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
"SampleAfterValue": "200003",
@@ -93,6 +106,7 @@
},
{
"BriefDescription": "Taken speculative and retired indirect branches with return mnemonic.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
"SampleAfterValue": "200003",
@@ -100,6 +114,7 @@
},
{
"BriefDescription": "All (macro) branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"PublicDescription": "Branch instructions at retirement.",
@@ -107,6 +122,7 @@
},
{
"BriefDescription": "All (macro) branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
"PEBS": "2",
@@ -115,6 +131,7 @@
},
{
"BriefDescription": "Conditional branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
"PEBS": "1",
@@ -124,6 +141,7 @@
},
{
"BriefDescription": "Far branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"PublicDescription": "Number of far branches retired.",
@@ -132,6 +150,7 @@
},
{
"BriefDescription": "Direct and indirect near call instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
"PEBS": "1",
@@ -140,6 +159,7 @@
},
{
"BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3).",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
"PEBS": "1",
@@ -148,6 +168,7 @@
},
{
"BriefDescription": "Return instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
"PEBS": "1",
@@ -157,6 +178,7 @@
},
{
"BriefDescription": "Taken branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
"PEBS": "1",
@@ -166,6 +188,7 @@
},
{
"BriefDescription": "Not taken branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NOT_TAKEN",
"PublicDescription": "Counts the number of not taken branch instructions retired.",
@@ -174,6 +197,7 @@
},
{
"BriefDescription": "Speculative and retired mispredicted macro conditional branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.ALL_BRANCHES",
"PublicDescription": "Counts all near executed branches (not necessarily retired).",
@@ -182,6 +206,7 @@
},
{
"BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
"SampleAfterValue": "200003",
@@ -189,6 +214,7 @@
},
{
"BriefDescription": "Mispredicted indirect branches excluding calls and returns.",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
"SampleAfterValue": "200003",
@@ -196,6 +222,7 @@
},
{
"BriefDescription": "Speculative mispredicted indirect branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.INDIRECT",
"PublicDescription": "Counts speculatively miss-predicted indirect branches at execution time. Counts for indirect near CALL or JMP instructions (RET excluded).",
@@ -204,6 +231,7 @@
},
{
"BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
"SampleAfterValue": "200003",
@@ -211,6 +239,7 @@
},
{
"BriefDescription": "Taken speculative and retired mispredicted macro conditional branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
"SampleAfterValue": "200003",
@@ -218,6 +247,7 @@
},
{
"BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns.",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
"SampleAfterValue": "200003",
@@ -225,6 +255,7 @@
},
{
"BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
"SampleAfterValue": "200003",
@@ -232,6 +263,7 @@
},
{
"BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic.",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
"SampleAfterValue": "200003",
@@ -239,6 +271,7 @@
},
{
"BriefDescription": "All mispredicted macro branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"PublicDescription": "Mispredicted branch instructions at retirement.",
@@ -246,6 +279,7 @@
},
{
"BriefDescription": "Mispredicted macro branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
"PEBS": "2",
@@ -255,6 +289,7 @@
},
{
"BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.CONDITIONAL",
"PEBS": "1",
@@ -263,6 +298,7 @@
},
{
"BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
"PEBS": "1",
@@ -272,6 +308,7 @@
},
{
"BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3c",
"EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "100003",
@@ -279,6 +316,7 @@
},
{
"BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
"PublicDescription": "Increments at the frequency of XCLK (100 MHz) when not halted.",
@@ -288,6 +326,7 @@
{
"AnyThread": "1",
"BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
"PublicDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
@@ -296,6 +335,7 @@
},
{
"BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "100003",
@@ -303,6 +343,7 @@
},
{
"BriefDescription": "Reference cycles when the core is not in halt state.",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state.",
"SampleAfterValue": "2000003",
@@ -310,6 +351,7 @@
},
{
"BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.REF_XCLK",
"PublicDescription": "Reference cycles when the thread is unhalted. (counts at 100 MHz rate)",
@@ -319,6 +361,7 @@
{
"AnyThread": "1",
"BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
"PublicDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
@@ -327,6 +370,7 @@
},
{
"BriefDescription": "Core cycles when the thread is not in halt state.",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"PublicDescription": "This event counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
"SampleAfterValue": "2000003",
@@ -335,12 +379,14 @@
{
"AnyThread": "1",
"BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
"SampleAfterValue": "2000003",
"UMask": "0x2"
},
{
"BriefDescription": "Thread cycles when thread is not in halt state",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
"PublicDescription": "Counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
@@ -349,12 +395,14 @@
{
"AnyThread": "1",
"BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
"SampleAfterValue": "2000003"
},
{
"BriefDescription": "Cycles with pending L1 cache miss loads.",
+ "Counter": "2",
"CounterMask": "8",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
@@ -364,6 +412,7 @@
},
{
"BriefDescription": "Cycles with pending L2 cache miss loads.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"Errata": "HSD78, HSM63, HSM80",
"EventCode": "0xa3",
@@ -374,6 +423,7 @@
},
{
"BriefDescription": "Cycles with pending memory loads.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_LDM_PENDING",
@@ -383,6 +433,7 @@
},
{
"BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
@@ -392,6 +443,7 @@
},
{
"BriefDescription": "Execution stalls due to L1 data cache misses",
+ "Counter": "2",
"CounterMask": "12",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
@@ -401,6 +453,7 @@
},
{
"BriefDescription": "Execution stalls due to L2 cache misses.",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"Errata": "HSM63, HSM80",
"EventCode": "0xa3",
@@ -411,6 +464,7 @@
},
{
"BriefDescription": "Execution stalls due to memory subsystem.",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_LDM_PENDING",
@@ -420,6 +474,7 @@
},
{
"BriefDescription": "Stall cycles because IQ is full",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.IQ_FULL",
"PublicDescription": "Stall cycles due to IQ is full.",
@@ -428,6 +483,7 @@
},
{
"BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.LCP",
"PublicDescription": "This event counts cycles where the decoder is stalled on an instruction with a length changing prefix (LCP).",
@@ -436,6 +492,7 @@
},
{
"BriefDescription": "Instructions retired from execution.",
+ "Counter": "Fixed counter 0",
"Errata": "HSD140, HSD143",
"EventName": "INST_RETIRED.ANY",
"PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. INST_RETIRED.ANY is counted by a designated fixed counter, leaving the programmable counters available for other events. Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
@@ -444,6 +501,7 @@
},
{
"BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Counter": "0,1,2,3",
"Errata": "HSD11, HSD140",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.ANY_P",
@@ -452,6 +510,7 @@
},
{
"BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
+ "Counter": "1",
"Errata": "HSD140",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.PREC_DIST",
@@ -462,6 +521,7 @@
},
{
"BriefDescription": "FP operations retired. X87 FP operations that have no exceptions: Counts also flows that have several X87 or flows that use X87 uops in the exception handling.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.X87",
"PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
@@ -470,6 +530,7 @@
},
{
"BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x0D",
"EventName": "INT_MISC.RECOVERY_CYCLES",
@@ -480,6 +541,7 @@
{
"AnyThread": "1",
"BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x0D",
"EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
@@ -489,6 +551,7 @@
},
{
"BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.NO_SR",
"PublicDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
@@ -497,6 +560,7 @@
},
{
"BriefDescription": "loads blocked by overlapping with store buffer that cannot be forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.STORE_FORWARD",
"PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceding smaller uncompleted store. The penalty for blocked store forwarding is that the load must wait for the store to write its value to the cache before it can be issued.",
@@ -505,6 +569,7 @@
},
{
"BriefDescription": "False dependencies in MOB due to partial compare on address.",
+ "Counter": "0,1,2,3",
"EventCode": "0x07",
"EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
"PublicDescription": "Aliasing occurs when a load is issued after a store and their memory addresses are offset by 4K. This event counts the number of loads that aliased with a preceding store, resulting in an extended address check in the pipeline which can have a performance impact.",
@@ -513,6 +578,7 @@
},
{
"BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch",
+ "Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "LOAD_HIT_PRE.HW_PF",
"PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for H/W prefetch.",
@@ -521,6 +587,7 @@
},
{
"BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch",
+ "Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "LOAD_HIT_PRE.SW_PF",
"PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for S/W prefetch.",
@@ -529,6 +596,7 @@
},
{
"BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xA8",
"EventName": "LSD.CYCLES_4_UOPS",
@@ -537,6 +605,7 @@
},
{
"BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA8",
"EventName": "LSD.CYCLES_ACTIVE",
@@ -545,6 +614,7 @@
},
{
"BriefDescription": "Number of Uops delivered by the LSD.",
+ "Counter": "0,1,2,3",
"EventCode": "0xa8",
"EventName": "LSD.UOPS",
"PublicDescription": "Number of uops delivered by the LSD.",
@@ -553,6 +623,7 @@
},
{
"BriefDescription": "Number of machine clears (nukes) of any type.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xC3",
@@ -562,6 +633,7 @@
},
{
"BriefDescription": "Cycles there was a Nuke. Account for both thread-specific and All Thread Nukes.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.CYCLES",
"SampleAfterValue": "2000003",
@@ -569,6 +641,7 @@
},
{
"BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.MASKMOV",
"SampleAfterValue": "100003",
@@ -576,6 +649,7 @@
},
{
"BriefDescription": "Self-modifying code (SMC) detected.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.SMC",
"PublicDescription": "This event is incremented when self-modifying code (SMC) is detected, which causes a machine clear. Machine clears can have a significant performance impact if they are happening frequently.",
@@ -584,6 +658,7 @@
},
{
"BriefDescription": "Number of integer Move Elimination candidate uops that were eliminated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "MOVE_ELIMINATION.INT_ELIMINATED",
"PublicDescription": "Number of integer move elimination candidate uops that were eliminated.",
@@ -592,6 +667,7 @@
},
{
"BriefDescription": "Number of integer Move Elimination candidate uops that were not eliminated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "MOVE_ELIMINATION.INT_NOT_ELIMINATED",
"PublicDescription": "Number of integer move elimination candidate uops that were not eliminated.",
@@ -600,6 +676,7 @@
},
{
"BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "OTHER_ASSISTS.ANY_WB_ASSIST",
"PublicDescription": "Number of microcode assists invoked by HW upon uop writeback.",
@@ -608,6 +685,7 @@
},
{
"BriefDescription": "Resource-related stall cycles",
+ "Counter": "0,1,2,3",
"Errata": "HSD135",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.ANY",
@@ -617,6 +695,7 @@
},
{
"BriefDescription": "Cycles stalled due to re-order buffer full.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.ROB",
"SampleAfterValue": "2000003",
@@ -624,6 +703,7 @@
},
{
"BriefDescription": "Cycles stalled due to no eligible RS entry available.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.RS",
"SampleAfterValue": "2000003",
@@ -631,6 +711,7 @@
},
{
"BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.SB",
"PublicDescription": "This event counts cycles during which no instructions were allocated because no Store Buffers (SB) were available.",
@@ -639,6 +720,7 @@
},
{
"BriefDescription": "Count cases of saving new LBR",
+ "Counter": "0,1,2,3",
"EventCode": "0xCC",
"EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
"PublicDescription": "Count cases of saving new LBR records by hardware.",
@@ -647,6 +729,7 @@
},
{
"BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "Counter": "0,1,2,3",
"EventCode": "0x5E",
"EventName": "RS_EVENTS.EMPTY_CYCLES",
"PublicDescription": "This event counts cycles when the Reservation Station ( RS ) is empty for the thread. The RS is a structure that buffers allocated micro-ops from the Front-end. If there are many cycles when the RS is empty, it may represent an underflow of instructions delivered from the Front-end.",
@@ -655,6 +738,7 @@
},
{
"BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x5E",
@@ -665,6 +749,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 0.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_0",
"SampleAfterValue": "2000003",
@@ -672,6 +757,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 1.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_1",
"SampleAfterValue": "2000003",
@@ -679,6 +765,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 2.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_2",
"SampleAfterValue": "2000003",
@@ -686,6 +773,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 3.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_3",
"SampleAfterValue": "2000003",
@@ -693,6 +781,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 4.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_4",
"SampleAfterValue": "2000003",
@@ -700,6 +789,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 5.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_5",
"SampleAfterValue": "2000003",
@@ -707,6 +797,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 6.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_6",
"SampleAfterValue": "2000003",
@@ -714,6 +805,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 7.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_7",
"SampleAfterValue": "2000003",
@@ -721,6 +813,7 @@
},
{
"BriefDescription": "Number of uops executed on the core.",
+ "Counter": "0,1,2,3",
"Errata": "HSD30, HSM31",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE",
@@ -730,6 +823,7 @@
},
{
"BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"Errata": "HSD30, HSM31",
"EventCode": "0xb1",
@@ -739,6 +833,7 @@
},
{
"BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"Errata": "HSD30, HSM31",
"EventCode": "0xb1",
@@ -748,6 +843,7 @@
},
{
"BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "3",
"Errata": "HSD30, HSM31",
"EventCode": "0xb1",
@@ -757,6 +853,7 @@
},
{
"BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"Errata": "HSD30, HSM31",
"EventCode": "0xb1",
@@ -766,6 +863,7 @@
},
{
"BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"Errata": "HSD30, HSM31",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
@@ -775,6 +873,7 @@
},
{
"BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"Errata": "HSD144, HSD30, HSM31",
"EventCode": "0xB1",
@@ -785,6 +884,7 @@
},
{
"BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"Errata": "HSD144, HSD30, HSM31",
"EventCode": "0xB1",
@@ -795,6 +895,7 @@
},
{
"BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "Counter": "0,1,2,3",
"CounterMask": "3",
"Errata": "HSD144, HSD30, HSM31",
"EventCode": "0xB1",
@@ -805,6 +906,7 @@
},
{
"BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"Errata": "HSD144, HSD30, HSM31",
"EventCode": "0xB1",
@@ -814,6 +916,7 @@
},
{
"BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"Errata": "HSD144, HSD30, HSM31",
"EventCode": "0xB1",
@@ -824,6 +927,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_0",
"PublicDescription": "Cycles which a uop is dispatched on port 0 in this thread.",
@@ -833,6 +937,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are executed in port 0.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
"SampleAfterValue": "2000003",
@@ -840,6 +945,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_1",
"PublicDescription": "Cycles which a uop is dispatched on port 1 in this thread.",
@@ -849,6 +955,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are executed in port 1.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
"SampleAfterValue": "2000003",
@@ -856,6 +963,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_2",
"PublicDescription": "Cycles which a uop is dispatched on port 2 in this thread.",
@@ -865,6 +973,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are dispatched to port 2.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
"SampleAfterValue": "2000003",
@@ -872,6 +981,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_3",
"PublicDescription": "Cycles which a uop is dispatched on port 3 in this thread.",
@@ -881,6 +991,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are dispatched to port 3.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
"SampleAfterValue": "2000003",
@@ -888,6 +999,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_4",
"PublicDescription": "Cycles which a uop is dispatched on port 4 in this thread.",
@@ -897,6 +1009,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are executed in port 4.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
"SampleAfterValue": "2000003",
@@ -904,6 +1017,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_5",
"PublicDescription": "Cycles which a uop is dispatched on port 5 in this thread.",
@@ -913,6 +1027,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are executed in port 5.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
"SampleAfterValue": "2000003",
@@ -920,6 +1035,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_6",
"PublicDescription": "Cycles which a uop is dispatched on port 6 in this thread.",
@@ -929,6 +1045,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are executed in port 6.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
"SampleAfterValue": "2000003",
@@ -936,6 +1053,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_7",
"PublicDescription": "Cycles which a uop is dispatched on port 7 in this thread.",
@@ -945,6 +1063,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are dispatched to port 7.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
"SampleAfterValue": "2000003",
@@ -952,6 +1071,7 @@
},
{
"BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.ANY",
"PublicDescription": "This event counts the number of uops issued by the Front-end of the pipeline to the Back-end. This event is counted at the allocation stage and will count both retired and non-retired uops.",
@@ -961,6 +1081,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
@@ -970,6 +1091,7 @@
},
{
"BriefDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive; added by GSR u-arch.",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.FLAGS_MERGE",
"PublicDescription": "Number of flags-merge uops allocated. Such uops add delay.",
@@ -978,6 +1100,7 @@
},
{
"BriefDescription": "Number of Multiply packed/scalar single precision uops allocated",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.SINGLE_MUL",
"PublicDescription": "Number of multiply packed/scalar single precision uops allocated.",
@@ -986,6 +1109,7 @@
},
{
"BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.SLOW_LEA",
"PublicDescription": "Number of slow LEA or similar uops allocated. Such uop has 3 sources (for example, 2 sources + immediate) regardless of whether it is a result of LEA instruction or not.",
@@ -994,6 +1118,7 @@
},
{
"BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.STALL_CYCLES",
@@ -1003,6 +1128,7 @@
},
{
"BriefDescription": "Actually retired uops.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.ALL",
"PEBS": "1",
@@ -1013,6 +1139,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles without actually retired uops.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.CORE_STALL_CYCLES",
@@ -1022,6 +1149,7 @@
},
{
"BriefDescription": "Retirement slots used.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.RETIRE_SLOTS",
"PEBS": "1",
@@ -1031,6 +1159,7 @@
},
{
"BriefDescription": "Cycles without actually retired uops.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.STALL_CYCLES",
@@ -1040,6 +1169,7 @@
},
{
"BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "Counter": "0,1,2,3",
"CounterMask": "16",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.TOTAL_CYCLES",
diff --git a/tools/perf/pmu-events/arch/x86/haswell/uncore-cache.json b/tools/perf/pmu-events/arch/x86/haswell/uncore-cache.json
index be9a3ed1a940..fb116637e83e 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/uncore-cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "L3 Lookup any request that access cache and found line in E or S-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.ANY_ES",
"PerPkg": "1",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "L3 Lookup any request that access cache and found line in I-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.ANY_I",
"PerPkg": "1",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "L3 Lookup any request that access cache and found line in M-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.ANY_M",
"PerPkg": "1",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "L3 Lookup any request that access cache and found line in MESI-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.ANY_MESI",
"PerPkg": "1",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "L3 Lookup external snoop request that access cache and found line in E or S-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.EXTSNP_ES",
"PerPkg": "1",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "L3 Lookup external snoop request that access cache and found line in I-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.EXTSNP_I",
"PerPkg": "1",
@@ -49,6 +55,7 @@
},
{
"BriefDescription": "L3 Lookup external snoop request that access cache and found line in M-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.EXTSNP_M",
"PerPkg": "1",
@@ -57,6 +64,7 @@
},
{
"BriefDescription": "L3 Lookup external snoop request that access cache and found line in MESI-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.EXTSNP_MESI",
"PerPkg": "1",
@@ -65,6 +73,7 @@
},
{
"BriefDescription": "L3 Lookup read request that access cache and found line in E or S-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.READ_ES",
"PerPkg": "1",
@@ -73,6 +82,7 @@
},
{
"BriefDescription": "L3 Lookup read request that access cache and found line in I-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.READ_I",
"PerPkg": "1",
@@ -81,6 +91,7 @@
},
{
"BriefDescription": "L3 Lookup read request that access cache and found line in M-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.READ_M",
"PerPkg": "1",
@@ -89,6 +100,7 @@
},
{
"BriefDescription": "L3 Lookup read request that access cache and found line in any MESI-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.READ_MESI",
"PerPkg": "1",
@@ -97,6 +109,7 @@
},
{
"BriefDescription": "L3 Lookup write request that access cache and found line in E or S-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_ES",
"PerPkg": "1",
@@ -105,6 +118,7 @@
},
{
"BriefDescription": "L3 Lookup write request that access cache and found line in I-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_I",
"PerPkg": "1",
@@ -113,6 +127,7 @@
},
{
"BriefDescription": "L3 Lookup write request that access cache and found line in M-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_M",
"PerPkg": "1",
@@ -121,6 +136,7 @@
},
{
"BriefDescription": "L3 Lookup write request that access cache and found line in MESI-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_MESI",
"PerPkg": "1",
@@ -129,6 +145,7 @@
},
{
"BriefDescription": "A cross-core snoop resulted from L3 Eviction which hits a modified line in some processor core.",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_CBO_XSNP_RESPONSE.HITM_EVICTION",
"PerPkg": "1",
@@ -137,6 +154,7 @@
},
{
"BriefDescription": "An external snoop hits a modified line in some processor core.",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_CBO_XSNP_RESPONSE.HITM_EXTERNAL",
"PerPkg": "1",
@@ -145,6 +163,7 @@
},
{
"BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a modified line in some processor core.",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_CBO_XSNP_RESPONSE.HITM_XCORE",
"PerPkg": "1",
@@ -153,6 +172,7 @@
},
{
"BriefDescription": "A cross-core snoop resulted from L3 Eviction which hits a non-modified line in some processor core.",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_CBO_XSNP_RESPONSE.HIT_EVICTION",
"PerPkg": "1",
@@ -161,6 +181,7 @@
},
{
"BriefDescription": "An external snoop hits a non-modified line in some processor core.",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_CBO_XSNP_RESPONSE.HIT_EXTERNAL",
"PerPkg": "1",
@@ -169,6 +190,7 @@
},
{
"BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a non-modified line in some processor core.",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_CBO_XSNP_RESPONSE.HIT_XCORE",
"PerPkg": "1",
@@ -177,6 +199,7 @@
},
{
"BriefDescription": "A cross-core snoop resulted from L3 Eviction which misses in some processor core.",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_CBO_XSNP_RESPONSE.MISS_EVICTION",
"PerPkg": "1",
@@ -185,6 +208,7 @@
},
{
"BriefDescription": "An external snoop misses in some processor core.",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_CBO_XSNP_RESPONSE.MISS_EXTERNAL",
"PerPkg": "1",
@@ -193,10 +217,19 @@
},
{
"BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which misses in some processor core.",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_CBO_XSNP_RESPONSE.MISS_XCORE",
"PerPkg": "1",
"UMask": "0x41",
"Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "This 48-bit fixed counter counts the UCLK cycles.",
+ "Counter": "FIXED",
+ "EventCode": "0xff",
+ "EventName": "UNC_CLOCK.SOCKET",
+ "PerPkg": "1",
+ "Unit": "cbox_0"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/haswell/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/haswell/uncore-interconnect.json
index 8da28239ebf9..557b278e631d 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/uncore-interconnect.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Each cycle count number of valid entries in Coherency Tracker queue from allocation till deallocation. Aperture requests (snoops) appear as NC decoded internally and become coherent (snoop L3, access memory)",
+ "Counter": "0",
"EventCode": "0x83",
"EventName": "UNC_ARB_COH_TRK_OCCUPANCY.All",
"PerPkg": "1",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "Number of entries allocated. Account for Any type: e.g. Snoop, Core aperture, etc.",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_ARB_COH_TRK_REQUESTS.ALL",
"PerPkg": "1",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "Each cycle counts number of all Core outgoing valid entries. Such entry is defined as valid from its allocation till first of IDI0 or DRS0 messages is sent out. Accounts for Coherent and non-coherent traffic.",
+ "Counter": "0",
"EventCode": "0x80",
"EventName": "UNC_ARB_TRK_OCCUPANCY.ALL",
"PerPkg": "1",
@@ -26,6 +29,7 @@
},
{
"BriefDescription": "Cycles with at least one request outstanding is waiting for data return from memory controller. Account for coherent and non-coherent requests initiated by IA Cores, Processor Graphics Unit, or LLC.",
+ "Counter": "0",
"CounterMask": "1",
"EventCode": "0x80",
"EventName": "UNC_ARB_TRK_OCCUPANCY.CYCLES_WITH_ANY_REQUEST",
@@ -35,6 +39,7 @@
},
{
"BriefDescription": "Total number of Core outgoing entries allocated. Accounts for Coherent and non-coherent traffic.",
+ "Counter": "0,1",
"EventCode": "0x81",
"EventName": "UNC_ARB_TRK_REQUESTS.ALL",
"PerPkg": "1",
@@ -43,6 +48,7 @@
},
{
"BriefDescription": "Number of Writes allocated - any write transactions: full/partials writes and evictions.",
+ "Counter": "0,1",
"EventCode": "0x81",
"EventName": "UNC_ARB_TRK_REQUESTS.WRITES",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/haswell/uncore-other.json b/tools/perf/pmu-events/arch/x86/haswell/uncore-other.json
index 2af92e43b28a..1ac5b5ef8094 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/uncore-other.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/uncore-other.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "This 48-bit fixed counter counts the UCLK cycles.",
+ "Counter": "FIXED",
"EventCode": "0xff",
"EventName": "UNC_CLOCK.SOCKET",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/haswell/virtual-memory.json b/tools/perf/pmu-events/arch/x86/haswell/virtual-memory.json
index 87a4ec1ee7d7..7cf00ae0e993 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/virtual-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Load misses in all DTLB levels that cause page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "Misses in all TLB levels that cause a page walk of any page size.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "DTLB demand load misses with low part of linear-to-physical address translation missed",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.PDE_CACHE_MISS",
"PublicDescription": "DTLB demand load misses with low part of linear-to-physical address translation missed.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"PublicDescription": "Number of cache load STLB hits. No page walk.",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Load misses that miss the DTLB and hit the STLB (2M)",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT_2M",
"PublicDescription": "This event counts load operations from a 2M page that miss the first DTLB level but hit the second and do not cause page walks.",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Load misses that miss the DTLB and hit the STLB (4K)",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT_4K",
"PublicDescription": "This event counts load operations from a 4K page that miss the first DTLB level but hit the second and do not cause page walks.",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"PublicDescription": "Completed page walks in any TLB of any page size due to demand load misses.",
@@ -49,6 +55,7 @@
},
{
"BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (1G)",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
"SampleAfterValue": "2000003",
@@ -56,6 +63,7 @@
},
{
"BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (2M/4M).",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Completed page walks due to demand load misses that caused 2M/4M page walks in any TLB levels.",
@@ -64,6 +72,7 @@
},
{
"BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (4K).",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Completed page walks due to demand load misses that caused 4K page walks in any TLB levels.",
@@ -72,6 +81,7 @@
},
{
"BriefDescription": "Cycles when PMH is busy with page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
"PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by DTLB load misses.",
@@ -80,6 +90,7 @@
},
{
"BriefDescription": "Store misses in all DTLB levels that cause page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "Miss in all TLB levels causes a page walk of any page size (4K/2M/4M/1G).",
@@ -88,6 +99,7 @@
},
{
"BriefDescription": "DTLB store misses with low part of linear-to-physical address translation missed",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.PDE_CACHE_MISS",
"PublicDescription": "DTLB store misses with low part of linear-to-physical address translation missed.",
@@ -96,6 +108,7 @@
},
{
"BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.STLB_HIT",
"PublicDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
@@ -104,6 +117,7 @@
},
{
"BriefDescription": "Store misses that miss the DTLB and hit the STLB (2M)",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.STLB_HIT_2M",
"PublicDescription": "This event counts store operations from a 2M page that miss the first DTLB level but hit the second and do not cause page walks.",
@@ -112,6 +126,7 @@
},
{
"BriefDescription": "Store misses that miss the DTLB and hit the STLB (4K)",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.STLB_HIT_4K",
"PublicDescription": "This event counts store operations from a 4K page that miss the first DTLB level but hit the second and do not cause page walks.",
@@ -120,6 +135,7 @@
},
{
"BriefDescription": "Store misses in all DTLB levels that cause completed page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"PublicDescription": "Completed page walks due to store miss in any TLB levels of any page size (4K/2M/4M/1G).",
@@ -128,6 +144,7 @@
},
{
"BriefDescription": "Store misses in all DTLB levels that cause completed page walks. (1G)",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
"SampleAfterValue": "100003",
@@ -135,6 +152,7 @@
},
{
"BriefDescription": "Store misses in all DTLB levels that cause completed page walks (2M/4M)",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Completed page walks due to store misses in one or more TLB levels of 2M/4M page structure.",
@@ -143,6 +161,7 @@
},
{
"BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (4K)",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Completed page walks due to store misses in one or more TLB levels of 4K page structure.",
@@ -151,6 +170,7 @@
},
{
"BriefDescription": "Cycles when PMH is busy with page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_DURATION",
"PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by DTLB store misses.",
@@ -159,6 +179,7 @@
},
{
"BriefDescription": "Cycle count for an Extended Page table walk.",
+ "Counter": "0,1,2,3",
"EventCode": "0x4f",
"EventName": "EPT.WALK_CYCLES",
"SampleAfterValue": "2000003",
@@ -166,6 +187,7 @@
},
{
"BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
+ "Counter": "0,1,2,3",
"EventCode": "0xae",
"EventName": "ITLB.ITLB_FLUSH",
"PublicDescription": "Counts the number of ITLB flushes, includes 4k/2M/4M pages.",
@@ -174,6 +196,7 @@
},
{
"BriefDescription": "Misses at all ITLB levels that cause page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "Misses in ITLB that causes a page walk of any page size.",
@@ -182,6 +205,7 @@
},
{
"BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.STLB_HIT",
"PublicDescription": "ITLB misses that hit STLB. No page walk.",
@@ -190,6 +214,7 @@
},
{
"BriefDescription": "Code misses that miss the DTLB and hit the STLB (2M)",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.STLB_HIT_2M",
"PublicDescription": "ITLB misses that hit STLB (2M).",
@@ -198,6 +223,7 @@
},
{
"BriefDescription": "Core misses that miss the DTLB and hit the STLB (4K)",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.STLB_HIT_4K",
"PublicDescription": "ITLB misses that hit STLB (4K).",
@@ -206,6 +232,7 @@
},
{
"BriefDescription": "Misses in all ITLB levels that cause completed page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
"PublicDescription": "Completed page walks in ITLB of any page size.",
@@ -214,6 +241,7 @@
},
{
"BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (1G)",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
"SampleAfterValue": "100003",
@@ -221,6 +249,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Completed page walks due to misses in ITLB 2M/4M page entries.",
@@ -229,6 +258,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Completed page walks due to misses in ITLB 4K page entries.",
@@ -237,6 +267,7 @@
},
{
"BriefDescription": "Cycles when PMH is busy with page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_DURATION",
"PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by ITLB misses.",
@@ -245,6 +276,7 @@
},
{
"BriefDescription": "Number of DTLB page walker hits in the L1+FB",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.DTLB_L1",
"PublicDescription": "Number of DTLB page walker loads that hit in the L1+FB.",
@@ -253,6 +285,7 @@
},
{
"BriefDescription": "Number of DTLB page walker hits in the L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.DTLB_L2",
"PublicDescription": "Number of DTLB page walker loads that hit in the L2.",
@@ -261,6 +294,7 @@
},
{
"BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP",
+ "Counter": "0,1,2,3",
"Errata": "HSD25",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.DTLB_L3",
@@ -270,6 +304,7 @@
},
{
"BriefDescription": "Number of DTLB page walker hits in Memory",
+ "Counter": "0,1,2,3",
"Errata": "HSD25",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
@@ -279,6 +314,7 @@
},
{
"BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L1 and FB.",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L1",
"SampleAfterValue": "2000003",
@@ -286,6 +322,7 @@
},
{
"BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L2.",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L2",
"SampleAfterValue": "2000003",
@@ -293,6 +330,7 @@
},
{
"BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L3",
"SampleAfterValue": "2000003",
@@ -300,6 +338,7 @@
},
{
"BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in memory.",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.EPT_DTLB_MEMORY",
"SampleAfterValue": "2000003",
@@ -307,6 +346,7 @@
},
{
"BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L1 and FB.",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L1",
"SampleAfterValue": "2000003",
@@ -314,6 +354,7 @@
},
{
"BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L2.",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L2",
"SampleAfterValue": "2000003",
@@ -321,6 +362,7 @@
},
{
"BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L2.",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L3",
"SampleAfterValue": "2000003",
@@ -328,6 +370,7 @@
},
{
"BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in memory.",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.EPT_ITLB_MEMORY",
"SampleAfterValue": "2000003",
@@ -335,6 +378,7 @@
},
{
"BriefDescription": "Number of ITLB page walker hits in the L1+FB",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.ITLB_L1",
"PublicDescription": "Number of ITLB page walker loads that hit in the L1+FB.",
@@ -343,6 +387,7 @@
},
{
"BriefDescription": "Number of ITLB page walker hits in the L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.ITLB_L2",
"PublicDescription": "Number of ITLB page walker loads that hit in the L2.",
@@ -351,6 +396,7 @@
},
{
"BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP",
+ "Counter": "0,1,2,3",
"Errata": "HSD25",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.ITLB_L3",
@@ -360,6 +406,7 @@
},
{
"BriefDescription": "Number of ITLB page walker hits in Memory",
+ "Counter": "0,1,2,3",
"Errata": "HSD25",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.ITLB_MEMORY",
@@ -369,6 +416,7 @@
},
{
"BriefDescription": "DTLB flush attempts of the thread-specific entries",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "TLB_FLUSH.DTLB_THREAD",
"PublicDescription": "DTLB flush attempts of the thread-specific entries.",
@@ -377,6 +425,7 @@
},
{
"BriefDescription": "STLB flush attempts",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "TLB_FLUSH.STLB_ANY",
"PublicDescription": "Count number of STLB flush attempts.",
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/cache.json b/tools/perf/pmu-events/arch/x86/haswellx/cache.json
index a6c81010b394..42f24cdbe6ae 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/cache.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "L1D data line replacements",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "L1D.REPLACEMENT",
"PublicDescription": "This event counts when new data lines are brought into the L1 Data cache, which cause other lines to be evicted from the cache.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Cycles a demand request was blocked due to Fill Buffers unavailability.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.FB_FULL",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "L1D miss outstanding duration in cycles",
+ "Counter": "2",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING",
"PublicDescription": "Increments the number of outstanding L1D misses every cycle. Set Cmask = 1 and Edge =1 to count occurrences.",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "Counter": "2",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING_CYCLES",
@@ -34,6 +38,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "Counter": "2",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
@@ -42,6 +47,7 @@
},
{
"BriefDescription": "Number of times a request needed a FB entry but there was no entry available for it. That is the FB unavailability was dominant reason for blocking the request. A request includes cacheable/uncacheable demands that is load, store or SW prefetch. HWP are e.",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.REQUEST_FB_FULL",
"SampleAfterValue": "2000003",
@@ -49,6 +55,7 @@
},
{
"BriefDescription": "Not rejected writebacks that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_DEMAND_RQSTS.WB_HIT",
"PublicDescription": "Not rejected writebacks that hit L2 cache.",
@@ -57,6 +64,7 @@
},
{
"BriefDescription": "L2 cache lines filling L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.ALL",
"PublicDescription": "This event counts the number of L2 cache lines brought into the L2 cache. Lines are filled into the L2 cache when there was an L2 miss.",
@@ -65,6 +73,7 @@
},
{
"BriefDescription": "L2 cache lines in E state filling L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.E",
"PublicDescription": "L2 cache lines in E state filling L2.",
@@ -73,6 +82,7 @@
},
{
"BriefDescription": "L2 cache lines in I state filling L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.I",
"PublicDescription": "L2 cache lines in I state filling L2.",
@@ -81,6 +91,7 @@
},
{
"BriefDescription": "L2 cache lines in S state filling L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.S",
"PublicDescription": "L2 cache lines in S state filling L2.",
@@ -89,6 +100,7 @@
},
{
"BriefDescription": "Clean L2 cache lines evicted by demand",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.DEMAND_CLEAN",
"PublicDescription": "Clean L2 cache lines evicted by demand.",
@@ -97,6 +109,7 @@
},
{
"BriefDescription": "Dirty L2 cache lines evicted by demand",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.DEMAND_DIRTY",
"PublicDescription": "Dirty L2 cache lines evicted by demand.",
@@ -105,6 +118,7 @@
},
{
"BriefDescription": "L2 code requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_CODE_RD",
"PublicDescription": "Counts all L2 code requests.",
@@ -113,6 +127,7 @@
},
{
"BriefDescription": "Demand Data Read requests",
+ "Counter": "0,1,2,3",
"Errata": "HSD78, HSM80",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
@@ -122,6 +137,7 @@
},
{
"BriefDescription": "Demand requests that miss L2 cache",
+ "Counter": "0,1,2,3",
"Errata": "HSD78, HSM80",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_MISS",
@@ -131,6 +147,7 @@
},
{
"BriefDescription": "Demand requests to L2 cache",
+ "Counter": "0,1,2,3",
"Errata": "HSD78, HSM80",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
@@ -140,6 +157,7 @@
},
{
"BriefDescription": "Requests from L2 hardware prefetchers",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_PF",
"PublicDescription": "Counts all L2 HW prefetcher requests.",
@@ -148,6 +166,7 @@
},
{
"BriefDescription": "RFO requests to L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_RFO",
"PublicDescription": "Counts all L2 store RFO requests.",
@@ -156,6 +175,7 @@
},
{
"BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_HIT",
"PublicDescription": "Number of instruction fetches that hit the L2 cache.",
@@ -164,6 +184,7 @@
},
{
"BriefDescription": "L2 cache misses when fetching instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_MISS",
"PublicDescription": "Number of instruction fetches that missed the L2 cache.",
@@ -172,6 +193,7 @@
},
{
"BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "Counter": "0,1,2,3",
"Errata": "HSD78, HSM80",
"EventCode": "0x24",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
@@ -181,6 +203,7 @@
},
{
"BriefDescription": "Demand Data Read miss L2, no rejects",
+ "Counter": "0,1,2,3",
"Errata": "HSD78, HSM80",
"EventCode": "0x24",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
@@ -190,6 +213,7 @@
},
{
"BriefDescription": "L2 prefetch requests that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.L2_PF_HIT",
"PublicDescription": "Counts all L2 HW prefetcher requests that hit L2.",
@@ -198,6 +222,7 @@
},
{
"BriefDescription": "L2 prefetch requests that miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.L2_PF_MISS",
"PublicDescription": "Counts all L2 HW prefetcher requests that missed L2.",
@@ -206,6 +231,7 @@
},
{
"BriefDescription": "All requests that miss L2 cache",
+ "Counter": "0,1,2,3",
"Errata": "HSD78, HSM80",
"EventCode": "0x24",
"EventName": "L2_RQSTS.MISS",
@@ -215,6 +241,7 @@
},
{
"BriefDescription": "All L2 requests",
+ "Counter": "0,1,2,3",
"Errata": "HSD78, HSM80",
"EventCode": "0x24",
"EventName": "L2_RQSTS.REFERENCES",
@@ -224,6 +251,7 @@
},
{
"BriefDescription": "RFO requests that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_HIT",
"PublicDescription": "Counts the number of store RFO requests that hit the L2 cache.",
@@ -232,6 +260,7 @@
},
{
"BriefDescription": "RFO requests that miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_MISS",
"PublicDescription": "Counts the number of store RFO requests that miss the L2 cache.",
@@ -240,6 +269,7 @@
},
{
"BriefDescription": "L2 or L3 HW prefetches that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xf0",
"EventName": "L2_TRANS.ALL_PF",
"PublicDescription": "Any MLC or L3 HW prefetch accessing L2, including rejects.",
@@ -248,6 +278,7 @@
},
{
"BriefDescription": "Transactions accessing L2 pipe",
+ "Counter": "0,1,2,3",
"EventCode": "0xf0",
"EventName": "L2_TRANS.ALL_REQUESTS",
"PublicDescription": "Transactions accessing L2 pipe.",
@@ -256,6 +287,7 @@
},
{
"BriefDescription": "L2 cache accesses when fetching instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0xf0",
"EventName": "L2_TRANS.CODE_RD",
"PublicDescription": "L2 cache accesses when fetching instructions.",
@@ -264,6 +296,7 @@
},
{
"BriefDescription": "Demand Data Read requests that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xf0",
"EventName": "L2_TRANS.DEMAND_DATA_RD",
"PublicDescription": "Demand data read requests that access L2 cache.",
@@ -272,6 +305,7 @@
},
{
"BriefDescription": "L1D writebacks that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xf0",
"EventName": "L2_TRANS.L1D_WB",
"PublicDescription": "L1D writebacks that access L2 cache.",
@@ -280,6 +314,7 @@
},
{
"BriefDescription": "L2 fill requests that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xf0",
"EventName": "L2_TRANS.L2_FILL",
"PublicDescription": "L2 fill requests that access L2 cache.",
@@ -288,6 +323,7 @@
},
{
"BriefDescription": "L2 writebacks that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xf0",
"EventName": "L2_TRANS.L2_WB",
"PublicDescription": "L2 writebacks that access L2 cache.",
@@ -296,6 +332,7 @@
},
{
"BriefDescription": "RFO requests that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xf0",
"EventName": "L2_TRANS.RFO",
"PublicDescription": "RFO requests that access L2 cache.",
@@ -304,6 +341,7 @@
},
{
"BriefDescription": "Cycles when L1D is locked",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
"PublicDescription": "Cycles in which the L1D is locked.",
@@ -312,6 +350,7 @@
},
{
"BriefDescription": "Core-originated cacheable demand requests missed L3",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.MISS",
"PublicDescription": "This event counts each cache miss condition for references to the last level cache.",
@@ -320,6 +359,7 @@
},
{
"BriefDescription": "Core-originated cacheable demand requests that refer to L3",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
"PublicDescription": "This event counts requests originating from the core that reference a cache line in the last level cache.",
@@ -328,6 +368,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSD29, HSD25, HSM26, HSM30",
"EventCode": "0xD2",
@@ -338,6 +379,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSD29, HSD25, HSM26, HSM30",
"EventCode": "0xD2",
@@ -348,6 +390,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSD29, HSD25, HSM26, HSM30",
"EventCode": "0xD2",
@@ -358,6 +401,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSD74, HSD29, HSD25, HSM26, HSM30",
"EventCode": "0xD2",
@@ -368,6 +412,7 @@
},
{
"BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSD74, HSD29, HSD25, HSM30",
"EventCode": "0xD3",
@@ -379,6 +424,7 @@
},
{
"BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI)",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSD29, HSM30",
"EventCode": "0xD3",
@@ -389,6 +435,7 @@
},
{
"BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSM30",
"EventCode": "0xD3",
@@ -399,6 +446,7 @@
},
{
"BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSM30",
"EventCode": "0xD3",
@@ -409,6 +457,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSM30",
"EventCode": "0xD1",
@@ -419,6 +468,7 @@
},
{
"BriefDescription": "Retired load uops with L1 cache hits as data sources.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSD29, HSM30",
"EventCode": "0xD1",
@@ -429,6 +479,7 @@
},
{
"BriefDescription": "Retired load uops misses in L1 cache as data sources.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSM30",
"EventCode": "0xD1",
@@ -440,6 +491,7 @@
},
{
"BriefDescription": "Retired load uops with L2 cache hits as data sources.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSD76, HSD29, HSM30",
"EventCode": "0xD1",
@@ -450,6 +502,7 @@
},
{
"BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSD29, HSM30",
"EventCode": "0xD1",
@@ -461,6 +514,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were data hits in L3 without snoops required.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSD74, HSD29, HSD25, HSM26, HSM30",
"EventCode": "0xD1",
@@ -472,6 +526,7 @@
},
{
"BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSD74, HSD29, HSD25, HSM26, HSM30",
"EventCode": "0xD1",
@@ -483,6 +538,7 @@
},
{
"BriefDescription": "Retired load uops.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSD29, HSM30",
"EventCode": "0xD0",
@@ -494,6 +550,7 @@
},
{
"BriefDescription": "Retired store uops.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSD29, HSM30",
"EventCode": "0xD0",
@@ -505,6 +562,7 @@
},
{
"BriefDescription": "Retired load uops with locked access.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSD76, HSD29, HSM30",
"EventCode": "0xD0",
@@ -515,6 +573,7 @@
},
{
"BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSD29, HSM30",
"EventCode": "0xD0",
@@ -525,6 +584,7 @@
},
{
"BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSD29, HSM30",
"EventCode": "0xD0",
@@ -535,6 +595,7 @@
},
{
"BriefDescription": "Retired load uops that miss the STLB.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSD29, HSM30",
"EventCode": "0xD0",
@@ -545,6 +606,7 @@
},
{
"BriefDescription": "Retired store uops that miss the STLB.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Errata": "HSD29, HSM30",
"EventCode": "0xD0",
@@ -555,6 +617,7 @@
},
{
"BriefDescription": "Demand and prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
"PublicDescription": "Data read requests sent to uncore (demand and prefetch).",
@@ -563,6 +626,7 @@
},
{
"BriefDescription": "Cacheable and noncacheable code read requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
"PublicDescription": "Demand code read requests sent to uncore.",
@@ -571,6 +635,7 @@
},
{
"BriefDescription": "Demand Data Read requests sent to uncore",
+ "Counter": "0,1,2,3",
"Errata": "HSD78, HSM80",
"EventCode": "0xb0",
"EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
@@ -580,6 +645,7 @@
},
{
"BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
"PublicDescription": "Demand RFO read requests sent to uncore, including regular RFOs, locks, ItoM.",
@@ -588,6 +654,7 @@
},
{
"BriefDescription": "Offcore requests buffer cannot take more entries for this thread core.",
+ "Counter": "0,1,2,3",
"EventCode": "0xb2",
"EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
"SampleAfterValue": "2000003",
@@ -595,6 +662,7 @@
},
{
"BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
"Errata": "HSD62, HSD61, HSM63",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
@@ -604,6 +672,7 @@
},
{
"BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"Errata": "HSD62, HSD61, HSM63",
"EventCode": "0x60",
@@ -613,6 +682,7 @@
},
{
"BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"Errata": "HSD78, HSD62, HSD61, HSM63, HSM80",
"EventCode": "0x60",
@@ -622,6 +692,7 @@
},
{
"BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"Errata": "HSD62, HSD61, HSM63",
"EventCode": "0x60",
@@ -631,6 +702,7 @@
},
{
"BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "Counter": "0,1,2,3",
"Errata": "HSD62, HSD61, HSM63",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
@@ -640,6 +712,7 @@
},
{
"BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
"Errata": "HSD78, HSD62, HSD61, HSM63, HSM80",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
@@ -649,6 +722,7 @@
},
{
"BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"Errata": "HSD78, HSD62, HSD61, HSM63, HSM80",
"EventCode": "0x60",
@@ -658,6 +732,7 @@
},
{
"BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
"Errata": "HSD62, HSD61, HSM63",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
@@ -667,6 +742,7 @@
},
{
"BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE",
"SampleAfterValue": "100003",
@@ -674,6 +750,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -683,6 +760,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -692,6 +770,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -701,6 +780,7 @@
},
{
"BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -710,6 +790,7 @@
},
{
"BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -719,6 +800,7 @@
},
{
"BriefDescription": "Counts all requests hit in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -728,6 +810,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -737,6 +820,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -746,6 +830,7 @@
},
{
"BriefDescription": "Counts all demand code reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -755,6 +840,7 @@
},
{
"BriefDescription": "Counts all demand code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -764,6 +850,7 @@
},
{
"BriefDescription": "Counts demand data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -773,6 +860,7 @@
},
{
"BriefDescription": "Counts demand data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -782,6 +870,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -791,6 +880,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -800,6 +890,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads hit in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -809,6 +900,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads hit in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -818,6 +910,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs hit in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -827,6 +920,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) code reads hit in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -836,6 +930,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads hit in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -845,6 +940,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -854,6 +950,7 @@
},
{
"BriefDescription": "Split locks in SQ",
+ "Counter": "0,1,2,3",
"EventCode": "0xf4",
"EventName": "SQ_MISC.SPLIT_LOCK",
"SampleAfterValue": "100003",
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/counter.json b/tools/perf/pmu-events/arch/x86/haswellx/counter.json
new file mode 100644
index 000000000000..84c01d8023f1
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/haswellx/counter.json
@@ -0,0 +1,57 @@
+[
+ {
+ "Unit": "core",
+ "CountersNumFixed": "3",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "CBOX",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "HA",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "IRP",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "2"
+ },
+ {
+ "Unit": "PCU",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "QPI",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "R2PCIe",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "R3QPI",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "3"
+ },
+ {
+ "Unit": "SBOX",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "UBOX",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "2"
+ },
+ {
+ "Unit": "iMC",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/floating-point.json b/tools/perf/pmu-events/arch/x86/haswellx/floating-point.json
index 8fcc10f74ad9..a0b917306887 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/floating-point.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Approximate counts of AVX & AVX2 256-bit instructions, including non-arithmetic instructions, loads, and stores. May count non-AVX instructions that employ 256-bit operations, including (but not necessarily limited to) rep string instructions that use 256-bit loads and stores for optimized performance, XSAVE* and XRSTOR*, and operations that transition the x87 FPU data registers between x87 and MMX.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "AVX_INSTS.ALL",
"PublicDescription": "Note that a whole rep string only counts AVX_INST.ALL once.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Cycles with any input/output SSE or FP assist",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.ANY",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "Number of SIMD FP assists due to input values",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.SIMD_INPUT",
"PublicDescription": "Number of SIMD FP assists due to input values.",
@@ -26,6 +29,7 @@
},
{
"BriefDescription": "Number of SIMD FP assists due to Output values",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.SIMD_OUTPUT",
"PublicDescription": "Number of SIMD FP assists due to output values.",
@@ -34,6 +38,7 @@
},
{
"BriefDescription": "Number of X87 assists due to input value.",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.X87_INPUT",
"PublicDescription": "Number of X87 FP assists due to input values.",
@@ -42,6 +47,7 @@
},
{
"BriefDescription": "Number of X87 assists due to output value.",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.X87_OUTPUT",
"PublicDescription": "Number of X87 FP assists due to output values.",
@@ -50,6 +56,7 @@
},
{
"BriefDescription": "Number of SIMD Move Elimination candidate uops that were eliminated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "MOVE_ELIMINATION.SIMD_ELIMINATED",
"PublicDescription": "Number of SIMD move elimination candidate uops that were eliminated.",
@@ -58,6 +65,7 @@
},
{
"BriefDescription": "Number of SIMD Move Elimination candidate uops that were not eliminated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "MOVE_ELIMINATION.SIMD_NOT_ELIMINATED",
"PublicDescription": "Number of SIMD move elimination candidate uops that were not eliminated.",
@@ -66,6 +74,7 @@
},
{
"BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
+ "Counter": "0,1,2,3",
"Errata": "HSD56, HSM57",
"EventCode": "0xC1",
"EventName": "OTHER_ASSISTS.AVX_TO_SSE",
@@ -74,6 +83,7 @@
},
{
"BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
+ "Counter": "0,1,2,3",
"Errata": "HSD56, HSM57",
"EventCode": "0xC1",
"EventName": "OTHER_ASSISTS.SSE_TO_AVX",
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/frontend.json b/tools/perf/pmu-events/arch/x86/haswellx/frontend.json
index 73d6d681dfa7..a9f81fd17925 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/frontend.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "Counter": "0,1,2,3",
"EventCode": "0xe6",
"EventName": "BACLEARS.ANY",
"PublicDescription": "Number of front end re-steers due to BPU misprediction.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
"SampleAfterValue": "2000003",
@@ -16,6 +18,7 @@
},
{
"BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.HIT",
"SampleAfterValue": "2000003",
@@ -23,6 +26,7 @@
},
{
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction-cache miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.IFDATA_STALL",
"SampleAfterValue": "2000003",
@@ -30,6 +34,7 @@
},
{
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction-cache miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.IFETCH_STALL",
"SampleAfterValue": "2000003",
@@ -37,6 +42,7 @@
},
{
"BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Misses. Includes Uncacheable accesses.",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.MISSES",
"PublicDescription": "This event counts Instruction Cache (ICACHE) misses.",
@@ -45,6 +51,7 @@
},
{
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0x79",
"EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
@@ -54,6 +61,7 @@
},
{
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
@@ -63,6 +71,7 @@
},
{
"BriefDescription": "Cycles MITE is delivering 4 Uops",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0x79",
"EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
@@ -72,6 +81,7 @@
},
{
"BriefDescription": "Cycles MITE is delivering any Uop",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
@@ -81,6 +91,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.DSB_CYCLES",
@@ -89,6 +100,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.DSB_UOPS",
"PublicDescription": "Increment each cycle. # of uops delivered to IDQ from DSB path. Set Cmask = 1 to count cycles.",
@@ -97,6 +109,7 @@
},
{
"BriefDescription": "Instruction Decode Queue (IDQ) empty cycles",
+ "Counter": "0,1,2,3",
"Errata": "HSD135",
"EventCode": "0x79",
"EventName": "IDQ.EMPTY",
@@ -106,6 +119,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MITE_ALL_UOPS",
"PublicDescription": "Number of uops delivered to IDQ from any path.",
@@ -114,6 +128,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MITE_CYCLES",
@@ -122,6 +137,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MITE_UOPS",
"PublicDescription": "Increment each cycle # of uops delivered to IDQ from MITE path. Set Cmask = 1 to count cycles.",
@@ -130,6 +146,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MS_CYCLES",
@@ -139,6 +156,7 @@
},
{
"BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MS_DSB_CYCLES",
@@ -147,6 +165,7 @@
},
{
"BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequencer (MS) is busy.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x79",
@@ -156,6 +175,7 @@
},
{
"BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_DSB_UOPS",
"PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
@@ -164,6 +184,7 @@
},
{
"BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_MITE_UOPS",
"PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
@@ -172,6 +193,7 @@
},
{
"BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x79",
@@ -181,6 +203,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_UOPS",
"PublicDescription": "This event counts uops delivered by the Front-end with the assistance of the microcode sequencer. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance.",
@@ -189,6 +212,7 @@
},
{
"BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
+ "Counter": "0,1,2,3",
"Errata": "HSD135",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
@@ -198,6 +222,7 @@
},
{
"BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"Errata": "HSD135",
"EventCode": "0x9C",
@@ -208,6 +233,7 @@
},
{
"BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"Errata": "HSD135",
"EventCode": "0x9C",
@@ -218,6 +244,7 @@
},
{
"BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
+ "Counter": "0,1,2,3",
"CounterMask": "3",
"Errata": "HSD135",
"EventCode": "0x9C",
@@ -227,6 +254,7 @@
},
{
"BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"Errata": "HSD135",
"EventCode": "0x9C",
@@ -236,6 +264,7 @@
},
{
"BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"Errata": "HSD135",
"EventCode": "0x9C",
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json b/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json
index 83d50d80a148..8f2ba3391e35 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json
@@ -68,7 +68,7 @@
},
{
"BriefDescription": "Percentage of time spent in the active CPU power state C0",
- "MetricExpr": "tma_info_system_cpu_utilization",
+ "MetricExpr": "tma_info_system_cpus_utilized",
"MetricName": "cpu_utilization",
"ScaleUnit": "100%"
},
@@ -292,7 +292,7 @@
{
"BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists",
"MetricExpr": "66 * OTHER_ASSISTS.ANY_WB_ASSIST / tma_info_thread_slots",
- "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricGroup": "BvIO;TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_assists",
"MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
"PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: OTHER_ASSISTS.ANY",
@@ -302,7 +302,7 @@
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
"MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "1 - (tma_frontend_bound + tma_bad_speculation + tma_retiring)",
- "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvOB;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
"MetricThreshold": "tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL1",
@@ -323,7 +323,7 @@
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * tma_bad_speculation",
- "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
+ "MetricGroup": "BadSpec;BrMispredicts;BvMP;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
"MetricName": "tma_branch_mispredicts",
"MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
@@ -353,7 +353,7 @@
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(60 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD))) + 43 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD)))) / tma_info_thread_clks",
- "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricGroup": "BvMS;DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_contested_accesses",
"MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS_PS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
@@ -374,7 +374,7 @@
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "43 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD))) / tma_info_thread_clks",
- "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricGroup": "BvMS;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_data_sharing",
"MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT_PS. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
@@ -383,7 +383,7 @@
{
"BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
"MetricExpr": "10 * ARITH.DIVIDER_UOPS / tma_info_core_core_clks",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
"MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_UOPS",
@@ -420,7 +420,7 @@
{
"BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
"MetricExpr": "(8 * DTLB_LOAD_MISSES.STLB_HIT + DTLB_LOAD_MISSES.WALK_DURATION) / tma_info_thread_clks",
- "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
+ "MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
"MetricName": "tma_dtlb_load",
"MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_UOPS_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store",
@@ -429,7 +429,7 @@
{
"BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
"MetricExpr": "(8 * DTLB_STORE_MISSES.STLB_HIT + DTLB_STORE_MISSES.WALK_DURATION) / tma_info_thread_clks",
- "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
+ "MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
"MetricName": "tma_dtlb_store",
"MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_UOPS_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load",
@@ -438,7 +438,7 @@
{
"BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing",
"MetricExpr": "(200 * OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM + 60 * OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE) / tma_info_thread_clks",
- "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
+ "MetricGroup": "BvMS;DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
"MetricName": "tma_false_sharing",
"MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
@@ -448,7 +448,7 @@
"BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_info_memory_load_miss_real_latency * cpu@L1D_PEND_MISS.REQUEST_FB_FULL\\,cmask\\=1@ / tma_info_thread_clks",
- "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
+ "MetricGroup": "BvMS;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
"MetricName": "tma_fb_full",
"MetricThreshold": "tma_fb_full > 0.3",
"PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
@@ -477,7 +477,7 @@
{
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / tma_info_thread_slots",
- "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvFB;BvIO;PGO;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_frontend_bound",
"MetricThreshold": "tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL1",
@@ -497,7 +497,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses.",
"MetricExpr": "ICACHE.IFDATA_STALL / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_icache_misses",
"MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"ScaleUnit": "100%"
@@ -590,12 +590,12 @@
"MetricThreshold": "tma_info_inst_mix_ipstore < 8"
},
{
- "BriefDescription": "Instruction per taken branch",
+ "BriefDescription": "Instructions per taken branch",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
"MetricName": "tma_info_inst_mix_iptb",
"MetricThreshold": "tma_info_inst_mix_iptb < 9",
- "PublicDescription": "Instruction per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_lcp"
+ "PublicDescription": "Instructions per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_lcp"
},
{
"BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
@@ -616,58 +616,40 @@
"MetricName": "tma_info_memory_core_l3_cache_fill_bw_2t"
},
{
- "BriefDescription": "Average Parallel L2 cache miss data reads",
- "MetricExpr": "tma_info_memory_latency_data_l2_mlp",
- "MetricGroup": "Memory_BW;Offcore",
- "MetricName": "tma_info_memory_data_l2_mlp"
- },
- {
- "BriefDescription": "",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
"MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l1d_cache_fill_bw"
},
{
- "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
- "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / (duration_time * 1e3 / 1e3)",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "tma_info_memory_l1d_cache_fill_bw_2t"
- },
- {
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
"MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
"MetricGroup": "CacheHits;Mem",
"MetricName": "tma_info_memory_l1mpki"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
"MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l2_cache_fill_bw"
},
{
- "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
- "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / (duration_time * 1e3 / 1e3)",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "tma_info_memory_l2_cache_fill_bw_2t"
- },
- {
"BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
"MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"MetricGroup": "Backend;CacheHits;Mem",
"MetricName": "tma_info_memory_l2mpki"
},
{
- "BriefDescription": "",
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "tma_info_memory_l3_cache_fill_bw"
+ "BriefDescription": "Offcore requests (L2 cache miss) per kilo instruction for demand RFOs",
+ "MetricExpr": "1e3 * OFFCORE_REQUESTS.DEMAND_RFO / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Offcore",
+ "MetricName": "tma_info_memory_l2mpki_rfo"
},
{
- "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / (duration_time * 1e3 / 1e3)",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
- "MetricName": "tma_info_memory_l3_cache_fill_bw_2t"
+ "MetricName": "tma_info_memory_l3_cache_fill_bw"
},
{
"BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
@@ -683,27 +665,15 @@
},
{
"BriefDescription": "Average Latency for L2 cache miss demand Loads",
- "MetricExpr": "tma_info_memory_load_l2_miss_latency",
- "MetricGroup": "Memory_Lat;Offcore",
- "MetricName": "tma_info_memory_latency_load_l2_miss_latency"
- },
- {
- "BriefDescription": "Average Parallel L2 cache miss demand Loads",
- "MetricExpr": "tma_info_memory_load_l2_mlp",
- "MetricGroup": "Memory_BW;Offcore",
- "MetricName": "tma_info_memory_latency_load_l2_mlp"
- },
- {
- "BriefDescription": "Average Latency for L2 cache miss demand Loads",
"MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD",
"MetricGroup": "Memory_Lat;Offcore",
- "MetricName": "tma_info_memory_load_l2_miss_latency"
+ "MetricName": "tma_info_memory_latency_load_l2_miss_latency"
},
{
"BriefDescription": "Average Parallel L2 cache miss demand Loads",
"MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
"MetricGroup": "Memory_BW;Offcore",
- "MetricName": "tma_info_memory_load_l2_mlp"
+ "MetricName": "tma_info_memory_latency_load_l2_mlp"
},
{
"BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
@@ -722,12 +692,6 @@
},
{
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "tma_info_memory_tlb_page_walks_utilization",
- "MetricGroup": "Mem;MemoryTLB",
- "MetricName": "tma_info_memory_page_walks_utilization"
- },
- {
- "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
"MetricExpr": "(ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION) / tma_info_core_core_clks",
"MetricGroup": "Mem;MemoryTLB",
"MetricName": "tma_info_memory_tlb_page_walks_utilization",
@@ -747,13 +711,13 @@
},
{
"BriefDescription": "Average CPU Utilization (percentage)",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "tma_info_system_cpus_utilized / #num_cpus_online",
"MetricGroup": "HPC;Summary",
"MetricName": "tma_info_system_cpu_utilization"
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "#num_cpus_online * tma_info_system_cpu_utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized"
},
@@ -854,7 +818,7 @@
"MetricThreshold": "tma_info_thread_uoppi > 1.05"
},
{
- "BriefDescription": "Instruction per taken branch",
+ "BriefDescription": "Uops per taken branch",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW",
"MetricName": "tma_info_thread_uptb",
@@ -863,7 +827,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
"MetricExpr": "(14 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION) / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_itlb_misses",
"MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: ITLB_MISSES.WALK_COMPLETED",
@@ -881,7 +845,7 @@
{
"BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
"MetricExpr": "(CYCLE_ACTIVITY.STALLS_L1D_PENDING - CYCLE_ACTIVITY.STALLS_L2_PENDING) / tma_info_thread_clks",
- "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricGroup": "BvML;CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l2_bound",
"MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L2_HIT_PS",
@@ -901,7 +865,7 @@
"BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "41 * (MEM_LOAD_UOPS_RETIRED.L3_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD))) / tma_info_thread_clks",
- "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
+ "MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
"MetricName": "tma_l3_hit_latency",
"MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS. Related metrics: tma_mem_latency",
@@ -959,7 +923,7 @@
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_bad_speculation - tma_branch_mispredicts",
- "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
+ "MetricGroup": "BadSpec;BvMS;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
"MetricName": "tma_machine_clears",
"MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
@@ -969,7 +933,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=6@) / tma_info_thread_clks",
- "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
"MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full",
@@ -978,7 +942,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM)",
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth",
- "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
+ "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
"MetricName": "tma_mem_latency",
"MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_l3_hit_latency",
@@ -1134,7 +1098,7 @@
{
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise).",
"MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ / 2 if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@) / tma_info_core_core_clks",
- "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricGroup": "BvCB;PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_3m",
"MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%"
@@ -1161,7 +1125,7 @@
{
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / tma_info_thread_slots",
- "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvUW;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_retiring",
"MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL1",
@@ -1190,7 +1154,7 @@
{
"BriefDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors)",
"MetricExpr": "(OFFCORE_REQUESTS_BUFFER.SQ_FULL / 2 if #SMT_on else OFFCORE_REQUESTS_BUFFER.SQ_FULL) / tma_info_core_core_clks",
- "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
+ "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
"MetricName": "tma_sq_full",
"MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth",
@@ -1218,7 +1182,7 @@
"BriefDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(L2_RQSTS.RFO_HIT * 9 * (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) + (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_thread_clks",
- "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
+ "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
"MetricName": "tma_store_latency",
"MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/memory.json b/tools/perf/pmu-events/arch/x86/haswellx/memory.json
index 2d212cf59e92..be0108558103 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/memory.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one).",
+ "Counter": "0,1,2,3",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED",
"PEBS": "1",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "Counter": "0,1,2,3",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_MISC1",
"SampleAfterValue": "2000003",
@@ -16,6 +18,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to uncommon conditions.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_MISC2",
"SampleAfterValue": "2000003",
@@ -23,6 +26,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_MISC3",
"SampleAfterValue": "2000003",
@@ -30,6 +34,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to incompatible memory type.",
+ "Counter": "0,1,2,3",
"Errata": "HSD65",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_MISC4",
@@ -38,6 +43,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to none of the previous 4 categories (e.g. interrupts)",
+ "Counter": "0,1,2,3",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_MISC5",
"PublicDescription": "Number of times an HLE execution aborted due to none of the previous 4 categories (e.g. interrupts).",
@@ -46,6 +52,7 @@
},
{
"BriefDescription": "Number of times an HLE execution successfully committed.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.COMMIT",
"SampleAfterValue": "2000003",
@@ -53,6 +60,7 @@
},
{
"BriefDescription": "Number of times an HLE execution started.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC8",
"EventName": "HLE_RETIRED.START",
"SampleAfterValue": "2000003",
@@ -60,6 +68,7 @@
},
{
"BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
"PublicDescription": "This event counts the number of memory ordering machine clears detected. Memory ordering machine clears can result from memory address aliasing or snoops from another hardware thread or core to data inflight in the pipeline. Machine clears can have a significant performance impact if they are happening frequently.",
@@ -68,6 +77,7 @@
},
{
"BriefDescription": "Randomly selected loads with latency value being above 128.",
+ "Counter": "3",
"Data_LA": "1",
"Errata": "HSD76, HSD25, HSM26",
"EventCode": "0xcd",
@@ -80,6 +90,7 @@
},
{
"BriefDescription": "Randomly selected loads with latency value being above 16.",
+ "Counter": "3",
"Data_LA": "1",
"Errata": "HSD76, HSD25, HSM26",
"EventCode": "0xcd",
@@ -92,6 +103,7 @@
},
{
"BriefDescription": "Randomly selected loads with latency value being above 256.",
+ "Counter": "3",
"Data_LA": "1",
"Errata": "HSD76, HSD25, HSM26",
"EventCode": "0xcd",
@@ -104,6 +116,7 @@
},
{
"BriefDescription": "Randomly selected loads with latency value being above 32.",
+ "Counter": "3",
"Data_LA": "1",
"Errata": "HSD76, HSD25, HSM26",
"EventCode": "0xcd",
@@ -116,6 +129,7 @@
},
{
"BriefDescription": "Randomly selected loads with latency value being above 4.",
+ "Counter": "3",
"Data_LA": "1",
"Errata": "HSD76, HSD25, HSM26",
"EventCode": "0xcd",
@@ -128,6 +142,7 @@
},
{
"BriefDescription": "Randomly selected loads with latency value being above 512.",
+ "Counter": "3",
"Data_LA": "1",
"Errata": "HSD76, HSD25, HSM26",
"EventCode": "0xcd",
@@ -140,6 +155,7 @@
},
{
"BriefDescription": "Randomly selected loads with latency value being above 64.",
+ "Counter": "3",
"Data_LA": "1",
"Errata": "HSD76, HSD25, HSM26",
"EventCode": "0xcd",
@@ -152,6 +168,7 @@
},
{
"BriefDescription": "Randomly selected loads with latency value being above 8.",
+ "Counter": "3",
"Data_LA": "1",
"Errata": "HSD76, HSD25, HSM26",
"EventCode": "0xcd",
@@ -164,6 +181,7 @@
},
{
"BriefDescription": "Speculative cache line split load uops dispatched to L1 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "MISALIGN_MEM_REF.LOADS",
"PublicDescription": "Speculative cache-line split load uops dispatched to L1D.",
@@ -172,6 +190,7 @@
},
{
"BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "MISALIGN_MEM_REF.STORES",
"PublicDescription": "Speculative cache-line split store-address uops dispatched to L1D.",
@@ -180,6 +199,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch code reads miss in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -189,6 +209,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch code reads miss the L3 and the data is returned from local dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -198,6 +219,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads miss in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -207,6 +229,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from local dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -216,6 +239,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from remote dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -225,6 +249,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the modified data is transferred from remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -234,6 +259,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads miss the L3 and clean or shared data is transferred from remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
@@ -243,6 +269,7 @@
},
{
"BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -252,6 +279,7 @@
},
{
"BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from local dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -261,6 +289,7 @@
},
{
"BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from remote dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -270,6 +299,7 @@
},
{
"BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the modified data is transferred from remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -279,6 +309,7 @@
},
{
"BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and clean or shared data is transferred from remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
@@ -288,6 +319,7 @@
},
{
"BriefDescription": "Counts all requests miss in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -297,6 +329,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs miss in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -306,6 +339,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs miss the L3 and the data is returned from local dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -315,6 +349,7 @@
},
{
"BriefDescription": "Counts all demand code reads miss in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -324,6 +359,7 @@
},
{
"BriefDescription": "Counts all demand code reads miss the L3 and the data is returned from local dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -333,6 +369,7 @@
},
{
"BriefDescription": "Counts demand data reads miss in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -342,6 +379,7 @@
},
{
"BriefDescription": "Counts demand data reads miss the L3 and the data is returned from local dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -351,6 +389,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) miss in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -360,6 +399,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) miss the L3 and the data is returned from local dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -369,6 +409,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) miss the L3 and the modified data is transferred from remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -378,6 +419,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads miss in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -387,6 +429,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads miss in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -396,6 +439,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs miss in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -405,6 +449,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) code reads miss in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -414,6 +459,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads miss in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -423,6 +469,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs miss in the L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -432,6 +479,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one).",
+ "Counter": "0,1,2,3",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED",
"PEBS": "1",
@@ -440,6 +488,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
+ "Counter": "0,1,2,3",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_MISC1",
"PublicDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).",
@@ -448,6 +497,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "Counter": "0,1,2,3",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_MISC2",
"SampleAfterValue": "2000003",
@@ -455,6 +505,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_MISC3",
"SampleAfterValue": "2000003",
@@ -462,6 +513,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type.",
+ "Counter": "0,1,2,3",
"Errata": "HSD65",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_MISC4",
@@ -470,6 +522,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
+ "Counter": "0,1,2,3",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_MISC5",
"PublicDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
@@ -478,6 +531,7 @@
},
{
"BriefDescription": "Number of times an RTM execution successfully committed.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.COMMIT",
"SampleAfterValue": "2000003",
@@ -485,6 +539,7 @@
},
{
"BriefDescription": "Number of times an RTM execution started.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC9",
"EventName": "RTM_RETIRED.START",
"SampleAfterValue": "2000003",
@@ -492,6 +547,7 @@
},
{
"BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed. Since this is the count of execution, it may not always cause a transactional abort.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC1",
"SampleAfterValue": "2000003",
@@ -499,6 +555,7 @@
},
{
"BriefDescription": "Counts the number of times a class of instructions (e.g., vzeroupper) that may cause a transactional abort was executed inside a transactional region.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC2",
"SampleAfterValue": "2000003",
@@ -506,6 +563,7 @@
},
{
"BriefDescription": "Counts the number of times an instruction execution caused the transactional nest count supported to be exceeded.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC3",
"SampleAfterValue": "2000003",
@@ -513,6 +571,7 @@
},
{
"BriefDescription": "Counts the number of times a XBEGIN instruction was executed inside an HLE transactional region.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC4",
"SampleAfterValue": "2000003",
@@ -520,6 +579,7 @@
},
{
"BriefDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC5",
"SampleAfterValue": "2000003",
@@ -527,6 +587,7 @@
},
{
"BriefDescription": "Number of times a transactional abort was signaled due to a data capacity limitation for transactional writes.",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
"SampleAfterValue": "2000003",
@@ -534,6 +595,7 @@
},
{
"BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address.",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_CONFLICT",
"SampleAfterValue": "2000003",
@@ -541,6 +603,7 @@
},
{
"BriefDescription": "Number of times an HLE transactional execution aborted due to XRELEASE lock not satisfying the address and value requirements in the elision buffer.",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
"SampleAfterValue": "2000003",
@@ -548,6 +611,7 @@
},
{
"BriefDescription": "Number of times an HLE transactional execution aborted due to NoAllocatedElisionBuffer being non-zero.",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
"SampleAfterValue": "2000003",
@@ -555,6 +619,7 @@
},
{
"BriefDescription": "Number of times an HLE transactional execution aborted due to an unsupported read alignment from the elision buffer.",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
"SampleAfterValue": "2000003",
@@ -562,6 +627,7 @@
},
{
"BriefDescription": "Number of times a HLE transactional region aborted due to a non XRELEASE prefixed instruction writing to an elided lock in the elision buffer.",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
"SampleAfterValue": "2000003",
@@ -569,6 +635,7 @@
},
{
"BriefDescription": "Number of times HLE lock could not be elided due to ElisionBufferAvailable being zero.",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
"SampleAfterValue": "2000003",
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/metricgroups.json b/tools/perf/pmu-events/arch/x86/haswellx/metricgroups.json
index 8c808347f6da..4193c90c3459 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/metricgroups.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/metricgroups.json
@@ -5,7 +5,18 @@
"BigFootprint": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"BrMispredicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Branches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvBC": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvCB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvFB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvIO": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvML": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMP": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMS": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMT": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvOB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvUW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"CacheHits": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "CacheMisses": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Compute": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Cor": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"DSB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/other.json b/tools/perf/pmu-events/arch/x86/haswellx/other.json
index 2395ebf112db..7d8769ef6d04 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/other.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/other.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Unhalted core cycles when the thread is in ring 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "CPL_CYCLES.RING0",
"PublicDescription": "Unhalted core cycles when the thread is in ring 0.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Number of intervals between processor halts while thread is in ring 0.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x5C",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "CPL_CYCLES.RING123",
"PublicDescription": "Unhalted core cycles when the thread is not in ring 0.",
@@ -26,6 +29,7 @@
},
{
"BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "LOCK_CYCLES.SPLIT_LOCK_UC_LOCK_DURATION",
"PublicDescription": "Cycles in which the L1D and L2 are locked, due to a UC lock or split lock.",
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/pipeline.json b/tools/perf/pmu-events/arch/x86/haswellx/pipeline.json
index 540f4372623c..c00301fdb3d7 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/pipeline.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Any uop executed by the Divider. (This includes all divide uops, sqrt, ...)",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "ARITH.DIVIDER_UOPS",
"SampleAfterValue": "2000003",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Speculative and retired branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_BRANCHES",
"PublicDescription": "Counts all near executed branches (not necessarily retired).",
@@ -16,6 +18,7 @@
},
{
"BriefDescription": "Speculative and retired macro-conditional branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
"SampleAfterValue": "200003",
@@ -23,6 +26,7 @@
},
{
"BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
"SampleAfterValue": "200003",
@@ -30,6 +34,7 @@
},
{
"BriefDescription": "Speculative and retired direct near calls.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
"SampleAfterValue": "200003",
@@ -37,6 +42,7 @@
},
{
"BriefDescription": "Speculative and retired indirect branches excluding calls and returns.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
"SampleAfterValue": "200003",
@@ -44,6 +50,7 @@
},
{
"BriefDescription": "Speculative and retired indirect return branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
"SampleAfterValue": "200003",
@@ -51,6 +58,7 @@
},
{
"BriefDescription": "Not taken macro-conditional branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL",
"SampleAfterValue": "200003",
@@ -58,6 +66,7 @@
},
{
"BriefDescription": "Taken speculative and retired macro-conditional branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL",
"SampleAfterValue": "200003",
@@ -65,6 +74,7 @@
},
{
"BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP",
"SampleAfterValue": "200003",
@@ -72,6 +82,7 @@
},
{
"BriefDescription": "Taken speculative and retired direct near calls.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
"SampleAfterValue": "200003",
@@ -79,6 +90,7 @@
},
{
"BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
"SampleAfterValue": "200003",
@@ -86,6 +98,7 @@
},
{
"BriefDescription": "Taken speculative and retired indirect calls.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
"SampleAfterValue": "200003",
@@ -93,6 +106,7 @@
},
{
"BriefDescription": "Taken speculative and retired indirect branches with return mnemonic.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
"SampleAfterValue": "200003",
@@ -100,6 +114,7 @@
},
{
"BriefDescription": "All (macro) branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"PublicDescription": "Branch instructions at retirement.",
@@ -107,6 +122,7 @@
},
{
"BriefDescription": "All (macro) branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
"PEBS": "2",
@@ -115,6 +131,7 @@
},
{
"BriefDescription": "Conditional branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
"PEBS": "1",
@@ -124,6 +141,7 @@
},
{
"BriefDescription": "Far branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"PublicDescription": "Number of far branches retired.",
@@ -132,6 +150,7 @@
},
{
"BriefDescription": "Direct and indirect near call instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
"PEBS": "1",
@@ -140,6 +159,7 @@
},
{
"BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3).",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
"PEBS": "1",
@@ -148,6 +168,7 @@
},
{
"BriefDescription": "Return instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
"PEBS": "1",
@@ -157,6 +178,7 @@
},
{
"BriefDescription": "Taken branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
"PEBS": "1",
@@ -166,6 +188,7 @@
},
{
"BriefDescription": "Not taken branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NOT_TAKEN",
"PublicDescription": "Counts the number of not taken branch instructions retired.",
@@ -174,6 +197,7 @@
},
{
"BriefDescription": "Speculative and retired mispredicted macro conditional branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.ALL_BRANCHES",
"PublicDescription": "Counts all near executed branches (not necessarily retired).",
@@ -182,6 +206,7 @@
},
{
"BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
"SampleAfterValue": "200003",
@@ -189,6 +214,7 @@
},
{
"BriefDescription": "Mispredicted indirect branches excluding calls and returns.",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
"SampleAfterValue": "200003",
@@ -196,6 +222,7 @@
},
{
"BriefDescription": "Speculative mispredicted indirect branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.INDIRECT",
"PublicDescription": "Counts speculatively miss-predicted indirect branches at execution time. Counts for indirect near CALL or JMP instructions (RET excluded).",
@@ -204,6 +231,7 @@
},
{
"BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
"SampleAfterValue": "200003",
@@ -211,6 +239,7 @@
},
{
"BriefDescription": "Taken speculative and retired mispredicted macro conditional branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
"SampleAfterValue": "200003",
@@ -218,6 +247,7 @@
},
{
"BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns.",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
"SampleAfterValue": "200003",
@@ -225,6 +255,7 @@
},
{
"BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
"SampleAfterValue": "200003",
@@ -232,6 +263,7 @@
},
{
"BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic.",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
"SampleAfterValue": "200003",
@@ -239,6 +271,7 @@
},
{
"BriefDescription": "All mispredicted macro branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"PublicDescription": "Mispredicted branch instructions at retirement.",
@@ -246,6 +279,7 @@
},
{
"BriefDescription": "Mispredicted macro branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
"PEBS": "2",
@@ -255,6 +289,7 @@
},
{
"BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.CONDITIONAL",
"PEBS": "1",
@@ -263,6 +298,7 @@
},
{
"BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
"PEBS": "1",
@@ -272,6 +308,7 @@
},
{
"BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3c",
"EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "100003",
@@ -279,6 +316,7 @@
},
{
"BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
"PublicDescription": "Increments at the frequency of XCLK (100 MHz) when not halted.",
@@ -288,6 +326,7 @@
{
"AnyThread": "1",
"BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
"PublicDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
@@ -296,6 +335,7 @@
},
{
"BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "100003",
@@ -303,6 +343,7 @@
},
{
"BriefDescription": "Reference cycles when the core is not in halt state.",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state.",
"SampleAfterValue": "2000003",
@@ -310,6 +351,7 @@
},
{
"BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.REF_XCLK",
"PublicDescription": "Reference cycles when the thread is unhalted. (counts at 100 MHz rate)",
@@ -319,6 +361,7 @@
{
"AnyThread": "1",
"BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
"PublicDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
@@ -327,6 +370,7 @@
},
{
"BriefDescription": "Core cycles when the thread is not in halt state.",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"PublicDescription": "This event counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
"SampleAfterValue": "2000003",
@@ -335,12 +379,14 @@
{
"AnyThread": "1",
"BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
"SampleAfterValue": "2000003",
"UMask": "0x2"
},
{
"BriefDescription": "Thread cycles when thread is not in halt state",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
"PublicDescription": "Counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
@@ -349,12 +395,14 @@
{
"AnyThread": "1",
"BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
"SampleAfterValue": "2000003"
},
{
"BriefDescription": "Cycles with pending L1 cache miss loads.",
+ "Counter": "2",
"CounterMask": "8",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
@@ -364,6 +412,7 @@
},
{
"BriefDescription": "Cycles with pending L2 cache miss loads.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"Errata": "HSD78, HSM63, HSM80",
"EventCode": "0xa3",
@@ -374,6 +423,7 @@
},
{
"BriefDescription": "Cycles with pending memory loads.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_LDM_PENDING",
@@ -383,6 +433,7 @@
},
{
"BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
@@ -392,6 +443,7 @@
},
{
"BriefDescription": "Execution stalls due to L1 data cache misses",
+ "Counter": "2",
"CounterMask": "12",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
@@ -401,6 +453,7 @@
},
{
"BriefDescription": "Execution stalls due to L2 cache misses.",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"Errata": "HSM63, HSM80",
"EventCode": "0xa3",
@@ -411,6 +464,7 @@
},
{
"BriefDescription": "Execution stalls due to memory subsystem.",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_LDM_PENDING",
@@ -420,6 +474,7 @@
},
{
"BriefDescription": "Stall cycles because IQ is full",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.IQ_FULL",
"PublicDescription": "Stall cycles due to IQ is full.",
@@ -428,6 +483,7 @@
},
{
"BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.LCP",
"PublicDescription": "This event counts cycles where the decoder is stalled on an instruction with a length changing prefix (LCP).",
@@ -436,6 +492,7 @@
},
{
"BriefDescription": "Instructions retired from execution.",
+ "Counter": "Fixed counter 0",
"Errata": "HSD140, HSD143",
"EventName": "INST_RETIRED.ANY",
"PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. INST_RETIRED.ANY is counted by a designated fixed counter, leaving the programmable counters available for other events. Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
@@ -444,6 +501,7 @@
},
{
"BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Counter": "0,1,2,3",
"Errata": "HSD11, HSD140",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.ANY_P",
@@ -452,6 +510,7 @@
},
{
"BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
+ "Counter": "1",
"Errata": "HSD140",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.PREC_DIST",
@@ -462,6 +521,7 @@
},
{
"BriefDescription": "FP operations retired. X87 FP operations that have no exceptions: Counts also flows that have several X87 or flows that use X87 uops in the exception handling.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.X87",
"PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
@@ -470,6 +530,7 @@
},
{
"BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x0D",
"EventName": "INT_MISC.RECOVERY_CYCLES",
@@ -480,6 +541,7 @@
{
"AnyThread": "1",
"BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x0D",
"EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
@@ -489,6 +551,7 @@
},
{
"BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.NO_SR",
"PublicDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
@@ -497,6 +560,7 @@
},
{
"BriefDescription": "loads blocked by overlapping with store buffer that cannot be forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.STORE_FORWARD",
"PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceding smaller uncompleted store. The penalty for blocked store forwarding is that the load must wait for the store to write its value to the cache before it can be issued.",
@@ -505,6 +569,7 @@
},
{
"BriefDescription": "False dependencies in MOB due to partial compare on address.",
+ "Counter": "0,1,2,3",
"EventCode": "0x07",
"EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
"PublicDescription": "Aliasing occurs when a load is issued after a store and their memory addresses are offset by 4K. This event counts the number of loads that aliased with a preceding store, resulting in an extended address check in the pipeline which can have a performance impact.",
@@ -513,6 +578,7 @@
},
{
"BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch",
+ "Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "LOAD_HIT_PRE.HW_PF",
"PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for H/W prefetch.",
@@ -521,6 +587,7 @@
},
{
"BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch",
+ "Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "LOAD_HIT_PRE.SW_PF",
"PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for S/W prefetch.",
@@ -529,6 +596,7 @@
},
{
"BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xA8",
"EventName": "LSD.CYCLES_4_UOPS",
@@ -537,6 +605,7 @@
},
{
"BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA8",
"EventName": "LSD.CYCLES_ACTIVE",
@@ -545,6 +614,7 @@
},
{
"BriefDescription": "Number of Uops delivered by the LSD.",
+ "Counter": "0,1,2,3",
"EventCode": "0xa8",
"EventName": "LSD.UOPS",
"PublicDescription": "Number of uops delivered by the LSD.",
@@ -553,6 +623,7 @@
},
{
"BriefDescription": "Number of machine clears (nukes) of any type.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xC3",
@@ -562,6 +633,7 @@
},
{
"BriefDescription": "Cycles there was a Nuke. Account for both thread-specific and All Thread Nukes.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.CYCLES",
"SampleAfterValue": "2000003",
@@ -569,6 +641,7 @@
},
{
"BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.MASKMOV",
"SampleAfterValue": "100003",
@@ -576,6 +649,7 @@
},
{
"BriefDescription": "Self-modifying code (SMC) detected.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.SMC",
"PublicDescription": "This event is incremented when self-modifying code (SMC) is detected, which causes a machine clear. Machine clears can have a significant performance impact if they are happening frequently.",
@@ -584,6 +658,7 @@
},
{
"BriefDescription": "Number of integer Move Elimination candidate uops that were eliminated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "MOVE_ELIMINATION.INT_ELIMINATED",
"PublicDescription": "Number of integer move elimination candidate uops that were eliminated.",
@@ -592,6 +667,7 @@
},
{
"BriefDescription": "Number of integer Move Elimination candidate uops that were not eliminated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "MOVE_ELIMINATION.INT_NOT_ELIMINATED",
"PublicDescription": "Number of integer move elimination candidate uops that were not eliminated.",
@@ -600,6 +676,7 @@
},
{
"BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "OTHER_ASSISTS.ANY_WB_ASSIST",
"PublicDescription": "Number of microcode assists invoked by HW upon uop writeback.",
@@ -608,6 +685,7 @@
},
{
"BriefDescription": "Resource-related stall cycles",
+ "Counter": "0,1,2,3",
"Errata": "HSD135",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.ANY",
@@ -617,6 +695,7 @@
},
{
"BriefDescription": "Cycles stalled due to re-order buffer full.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.ROB",
"SampleAfterValue": "2000003",
@@ -624,6 +703,7 @@
},
{
"BriefDescription": "Cycles stalled due to no eligible RS entry available.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.RS",
"SampleAfterValue": "2000003",
@@ -631,6 +711,7 @@
},
{
"BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.SB",
"PublicDescription": "This event counts cycles during which no instructions were allocated because no Store Buffers (SB) were available.",
@@ -639,6 +720,7 @@
},
{
"BriefDescription": "Count cases of saving new LBR",
+ "Counter": "0,1,2,3",
"EventCode": "0xCC",
"EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
"PublicDescription": "Count cases of saving new LBR records by hardware.",
@@ -647,6 +729,7 @@
},
{
"BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "Counter": "0,1,2,3",
"EventCode": "0x5E",
"EventName": "RS_EVENTS.EMPTY_CYCLES",
"PublicDescription": "This event counts cycles when the Reservation Station ( RS ) is empty for the thread. The RS is a structure that buffers allocated micro-ops from the Front-end. If there are many cycles when the RS is empty, it may represent an underflow of instructions delivered from the Front-end.",
@@ -655,6 +738,7 @@
},
{
"BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x5E",
@@ -665,6 +749,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 0.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_0",
"SampleAfterValue": "2000003",
@@ -672,6 +757,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 1.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_1",
"SampleAfterValue": "2000003",
@@ -679,6 +765,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 2.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_2",
"SampleAfterValue": "2000003",
@@ -686,6 +773,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 3.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_3",
"SampleAfterValue": "2000003",
@@ -693,6 +781,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 4.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_4",
"SampleAfterValue": "2000003",
@@ -700,6 +789,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 5.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_5",
"SampleAfterValue": "2000003",
@@ -707,6 +797,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 6.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_6",
"SampleAfterValue": "2000003",
@@ -714,6 +805,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 7.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_7",
"SampleAfterValue": "2000003",
@@ -721,6 +813,7 @@
},
{
"BriefDescription": "Number of uops executed on the core.",
+ "Counter": "0,1,2,3",
"Errata": "HSD30, HSM31",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE",
@@ -730,6 +823,7 @@
},
{
"BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"Errata": "HSD30, HSM31",
"EventCode": "0xb1",
@@ -739,6 +833,7 @@
},
{
"BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"Errata": "HSD30, HSM31",
"EventCode": "0xb1",
@@ -748,6 +843,7 @@
},
{
"BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "3",
"Errata": "HSD30, HSM31",
"EventCode": "0xb1",
@@ -757,6 +853,7 @@
},
{
"BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"Errata": "HSD30, HSM31",
"EventCode": "0xb1",
@@ -766,6 +863,7 @@
},
{
"BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"Errata": "HSD30, HSM31",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
@@ -775,6 +873,7 @@
},
{
"BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"Errata": "HSD144, HSD30, HSM31",
"EventCode": "0xB1",
@@ -785,6 +884,7 @@
},
{
"BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"Errata": "HSD144, HSD30, HSM31",
"EventCode": "0xB1",
@@ -795,6 +895,7 @@
},
{
"BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "Counter": "0,1,2,3",
"CounterMask": "3",
"Errata": "HSD144, HSD30, HSM31",
"EventCode": "0xB1",
@@ -805,6 +906,7 @@
},
{
"BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"Errata": "HSD144, HSD30, HSM31",
"EventCode": "0xB1",
@@ -814,6 +916,7 @@
},
{
"BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"Errata": "HSD144, HSD30, HSM31",
"EventCode": "0xB1",
@@ -824,6 +927,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_0",
"PublicDescription": "Cycles which a uop is dispatched on port 0 in this thread.",
@@ -833,6 +937,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are executed in port 0.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
"SampleAfterValue": "2000003",
@@ -840,6 +945,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_1",
"PublicDescription": "Cycles which a uop is dispatched on port 1 in this thread.",
@@ -849,6 +955,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are executed in port 1.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
"SampleAfterValue": "2000003",
@@ -856,6 +963,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_2",
"PublicDescription": "Cycles which a uop is dispatched on port 2 in this thread.",
@@ -865,6 +973,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are dispatched to port 2.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
"SampleAfterValue": "2000003",
@@ -872,6 +981,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_3",
"PublicDescription": "Cycles which a uop is dispatched on port 3 in this thread.",
@@ -881,6 +991,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are dispatched to port 3.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
"SampleAfterValue": "2000003",
@@ -888,6 +999,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_4",
"PublicDescription": "Cycles which a uop is dispatched on port 4 in this thread.",
@@ -897,6 +1009,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are executed in port 4.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
"SampleAfterValue": "2000003",
@@ -904,6 +1017,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_5",
"PublicDescription": "Cycles which a uop is dispatched on port 5 in this thread.",
@@ -913,6 +1027,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are executed in port 5.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
"SampleAfterValue": "2000003",
@@ -920,6 +1035,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_6",
"PublicDescription": "Cycles which a uop is dispatched on port 6 in this thread.",
@@ -929,6 +1045,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are executed in port 6.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
"SampleAfterValue": "2000003",
@@ -936,6 +1053,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_7",
"PublicDescription": "Cycles which a uop is dispatched on port 7 in this thread.",
@@ -945,6 +1063,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are dispatched to port 7.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
"SampleAfterValue": "2000003",
@@ -952,6 +1071,7 @@
},
{
"BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.ANY",
"PublicDescription": "This event counts the number of uops issued by the Front-end of the pipeline to the Back-end. This event is counted at the allocation stage and will count both retired and non-retired uops.",
@@ -961,6 +1081,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
@@ -970,6 +1091,7 @@
},
{
"BriefDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive; added by GSR u-arch.",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.FLAGS_MERGE",
"PublicDescription": "Number of flags-merge uops allocated. Such uops add delay.",
@@ -978,6 +1100,7 @@
},
{
"BriefDescription": "Number of Multiply packed/scalar single precision uops allocated",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.SINGLE_MUL",
"PublicDescription": "Number of multiply packed/scalar single precision uops allocated.",
@@ -986,6 +1109,7 @@
},
{
"BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.SLOW_LEA",
"PublicDescription": "Number of slow LEA or similar uops allocated. Such uop has 3 sources (for example, 2 sources + immediate) regardless of whether it is a result of LEA instruction or not.",
@@ -994,6 +1118,7 @@
},
{
"BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.STALL_CYCLES",
@@ -1003,6 +1128,7 @@
},
{
"BriefDescription": "Actually retired uops.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.ALL",
"PEBS": "1",
@@ -1013,6 +1139,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles without actually retired uops.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.CORE_STALL_CYCLES",
@@ -1022,6 +1149,7 @@
},
{
"BriefDescription": "Retirement slots used.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.RETIRE_SLOTS",
"PEBS": "1",
@@ -1031,6 +1159,7 @@
},
{
"BriefDescription": "Cycles without actually retired uops.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.STALL_CYCLES",
@@ -1040,6 +1169,7 @@
},
{
"BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "Counter": "0,1,2,3",
"CounterMask": "16",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.TOTAL_CYCLES",
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/uncore-cache.json b/tools/perf/pmu-events/arch/x86/haswellx/uncore-cache.json
index 9227cc226002..3c23bafcba28 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/uncore-cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "LLC prefetch misses for code reads. Derived from unc_c_tor_inserts.miss_opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_MISSES.CODE_LLC_PREFETCH",
"Filter": "filter_opc=0x191",
@@ -12,6 +13,7 @@
},
{
"BriefDescription": "LLC prefetch misses for data reads. Derived from unc_c_tor_inserts.miss_opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_MISSES.DATA_LLC_PREFETCH",
"Filter": "filter_opc=0x192",
@@ -23,6 +25,7 @@
},
{
"BriefDescription": "LLC misses - demand and prefetch data reads - excludes LLC prefetches. Derived from unc_c_tor_inserts.miss_opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_MISSES.DATA_READ",
"Filter": "filter_opc=0x182",
@@ -34,6 +37,7 @@
},
{
"BriefDescription": "MMIO reads. Derived from unc_c_tor_inserts.miss_opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_MISSES.MMIO_READ",
"Filter": "filter_opc=0x187,filter_nc=1",
@@ -45,6 +49,7 @@
},
{
"BriefDescription": "MMIO writes. Derived from unc_c_tor_inserts.miss_opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_MISSES.MMIO_WRITE",
"Filter": "filter_opc=0x18f,filter_nc=1",
@@ -56,6 +61,7 @@
},
{
"BriefDescription": "PCIe write misses (full cache line). Derived from unc_c_tor_inserts.miss_opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_MISSES.PCIE_NON_SNOOP_WRITE",
"Filter": "filter_opc=0x1c8,filter_tid=0x3e",
@@ -67,6 +73,7 @@
},
{
"BriefDescription": "LLC misses for PCIe read current. Derived from unc_c_tor_inserts.miss_opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_MISSES.PCIE_READ",
"Filter": "filter_opc=0x19e",
@@ -78,6 +85,7 @@
},
{
"BriefDescription": "ItoM write misses (as part of fast string memcpy stores) + PCIe full line writes. Derived from unc_c_tor_inserts.miss_opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_MISSES.PCIE_WRITE",
"Filter": "filter_opc=0x1c8",
@@ -89,6 +97,7 @@
},
{
"BriefDescription": "LLC prefetch misses for RFO. Derived from unc_c_tor_inserts.miss_opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_MISSES.RFO_LLC_PREFETCH",
"Filter": "filter_opc=0x190",
@@ -100,6 +109,7 @@
},
{
"BriefDescription": "LLC misses - Uncacheable reads (from cpu) . Derived from unc_c_tor_inserts.miss_opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_MISSES.UNCACHEABLE",
"Filter": "filter_opc=0x187",
@@ -111,6 +121,7 @@
},
{
"BriefDescription": "L2 demand and L2 prefetch code references to LLC. Derived from unc_c_tor_inserts.opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_REFERENCES.CODE_LLC_PREFETCH",
"Filter": "filter_opc=0x181",
@@ -122,6 +133,7 @@
},
{
"BriefDescription": "PCIe writes (partial cache line). Derived from unc_c_tor_inserts.opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_REFERENCES.PCIE_NS_PARTIAL_WRITE",
"Filter": "filter_opc=0x180,filter_tid=0x3e",
@@ -132,6 +144,7 @@
},
{
"BriefDescription": "PCIe read current. Derived from unc_c_tor_inserts.opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_REFERENCES.PCIE_READ",
"Filter": "filter_opc=0x19e",
@@ -143,6 +156,7 @@
},
{
"BriefDescription": "PCIe write references (full cache line). Derived from unc_c_tor_inserts.opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_REFERENCES.PCIE_WRITE",
"Filter": "filter_opc=0x1c8,filter_tid=0x3e",
@@ -154,6 +168,7 @@
},
{
"BriefDescription": "Streaming stores (full cache line). Derived from unc_c_tor_inserts.opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_REFERENCES.STREAMING_FULL",
"Filter": "filter_opc=0x18c",
@@ -165,6 +180,7 @@
},
{
"BriefDescription": "Streaming stores (partial cache line). Derived from unc_c_tor_inserts.opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_REFERENCES.STREAMING_PARTIAL",
"Filter": "filter_opc=0x18d",
@@ -176,6 +192,7 @@
},
{
"BriefDescription": "Bounce Control",
+ "Counter": "0,1,2,3",
"EventCode": "0xA",
"EventName": "UNC_C_BOUNCE_CONTROL",
"PerPkg": "1",
@@ -183,12 +200,14 @@
},
{
"BriefDescription": "Uncore Clocks",
+ "Counter": "0,1,2,3",
"EventName": "UNC_C_CLOCKTICKS",
"PerPkg": "1",
"Unit": "CBOX"
},
{
"BriefDescription": "Counter 0 Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_C_COUNTER0_OCCUPANCY",
"PerPkg": "1",
@@ -197,6 +216,7 @@
},
{
"BriefDescription": "FaST wire asserted",
+ "Counter": "0,1",
"EventCode": "0x9",
"EventName": "UNC_C_FAST_ASSERTED",
"PerPkg": "1",
@@ -205,6 +225,7 @@
},
{
"BriefDescription": "All LLC Misses (code+ data rd + data wr - including demand and prefetch)",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.ANY",
"Filter": "filter_state=0x1",
@@ -216,6 +237,7 @@
},
{
"BriefDescription": "Cache Lookups; Data Read Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.DATA_READ",
"PerPkg": "1",
@@ -225,6 +247,7 @@
},
{
"BriefDescription": "Cache Lookups; Lookups that Match NID",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.NID",
"PerPkg": "1",
@@ -234,6 +257,7 @@
},
{
"BriefDescription": "Cache Lookups; Any Read Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.READ",
"PerPkg": "1",
@@ -243,6 +267,7 @@
},
{
"BriefDescription": "Cache Lookups; External Snoop Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.REMOTE_SNOOP",
"PerPkg": "1",
@@ -252,6 +277,7 @@
},
{
"BriefDescription": "Cache Lookups; Write Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.WRITE",
"PerPkg": "1",
@@ -261,6 +287,7 @@
},
{
"BriefDescription": "Lines Victimized; Lines in E state",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.E_STATE",
"PerPkg": "1",
@@ -270,6 +297,7 @@
},
{
"BriefDescription": "Lines Victimized",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.F_STATE",
"PerPkg": "1",
@@ -279,6 +307,7 @@
},
{
"BriefDescription": "Lines Victimized; Lines in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.I_STATE",
"PerPkg": "1",
@@ -288,6 +317,7 @@
},
{
"BriefDescription": "Lines Victimized",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.MISS",
"PerPkg": "1",
@@ -297,6 +327,7 @@
},
{
"BriefDescription": "M line evictions from LLC (writebacks to memory)",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.M_STATE",
"PerPkg": "1",
@@ -307,6 +338,7 @@
},
{
"BriefDescription": "Lines Victimized; Victimized Lines that Match NID",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.NID",
"PerPkg": "1",
@@ -316,6 +348,7 @@
},
{
"BriefDescription": "Lines in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.S_STATE",
"PerPkg": "1",
@@ -325,6 +358,7 @@
},
{
"BriefDescription": "Cbo Misc; DRd hitting non-M with raw CV=0",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_C_MISC.CVZERO_PREFETCH_MISS",
"PerPkg": "1",
@@ -334,6 +368,7 @@
},
{
"BriefDescription": "Cbo Misc; Clean Victim with raw CV=0",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_C_MISC.CVZERO_PREFETCH_VICTIM",
"PerPkg": "1",
@@ -343,6 +378,7 @@
},
{
"BriefDescription": "Cbo Misc; RFO HitS",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_C_MISC.RFO_HIT_S",
"PerPkg": "1",
@@ -352,6 +388,7 @@
},
{
"BriefDescription": "Cbo Misc; Silent Snoop Eviction",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_C_MISC.RSPI_WAS_FSE",
"PerPkg": "1",
@@ -361,6 +398,7 @@
},
{
"BriefDescription": "Cbo Misc",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_C_MISC.STARTED",
"PerPkg": "1",
@@ -370,6 +408,7 @@
},
{
"BriefDescription": "Cbo Misc; Write Combining Aliasing",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_C_MISC.WC_ALIASING",
"PerPkg": "1",
@@ -379,6 +418,7 @@
},
{
"BriefDescription": "LRU Queue; LRU Age 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "UNC_C_QLRU.AGE0",
"PerPkg": "1",
@@ -388,6 +428,7 @@
},
{
"BriefDescription": "LRU Queue; LRU Age 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "UNC_C_QLRU.AGE1",
"PerPkg": "1",
@@ -397,6 +438,7 @@
},
{
"BriefDescription": "LRU Queue; LRU Age 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "UNC_C_QLRU.AGE2",
"PerPkg": "1",
@@ -406,6 +448,7 @@
},
{
"BriefDescription": "LRU Queue; LRU Age 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "UNC_C_QLRU.AGE3",
"PerPkg": "1",
@@ -415,6 +458,7 @@
},
{
"BriefDescription": "LRU Queue; LRU Bits Decremented",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "UNC_C_QLRU.LRU_DECREMENT",
"PerPkg": "1",
@@ -424,6 +468,7 @@
},
{
"BriefDescription": "LRU Queue; Non-0 Aged Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "UNC_C_QLRU.VICTIM_NON_ZERO",
"PerPkg": "1",
@@ -433,6 +478,7 @@
},
{
"BriefDescription": "AD Ring In Use; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_C_RING_AD_USED.ALL",
"PerPkg": "1",
@@ -442,6 +488,7 @@
},
{
"BriefDescription": "AD Ring In Use; Down",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_C_RING_AD_USED.DOWN",
"PerPkg": "1",
@@ -451,6 +498,7 @@
},
{
"BriefDescription": "AD Ring In Use; Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_C_RING_AD_USED.DOWN_EVEN",
"PerPkg": "1",
@@ -460,6 +508,7 @@
},
{
"BriefDescription": "AD Ring In Use; Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_C_RING_AD_USED.DOWN_ODD",
"PerPkg": "1",
@@ -469,6 +518,7 @@
},
{
"BriefDescription": "AD Ring In Use; Up",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_C_RING_AD_USED.UP",
"PerPkg": "1",
@@ -478,6 +528,7 @@
},
{
"BriefDescription": "AD Ring In Use; Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_C_RING_AD_USED.UP_EVEN",
"PerPkg": "1",
@@ -487,6 +538,7 @@
},
{
"BriefDescription": "AD Ring In Use; Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_C_RING_AD_USED.UP_ODD",
"PerPkg": "1",
@@ -496,6 +548,7 @@
},
{
"BriefDescription": "AK Ring In Use; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_C_RING_AK_USED.ALL",
"PerPkg": "1",
@@ -505,6 +558,7 @@
},
{
"BriefDescription": "AK Ring In Use; Down",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_C_RING_AK_USED.DOWN",
"PerPkg": "1",
@@ -514,6 +568,7 @@
},
{
"BriefDescription": "AK Ring In Use; Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_C_RING_AK_USED.DOWN_EVEN",
"PerPkg": "1",
@@ -523,6 +578,7 @@
},
{
"BriefDescription": "AK Ring In Use; Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_C_RING_AK_USED.DOWN_ODD",
"PerPkg": "1",
@@ -532,6 +588,7 @@
},
{
"BriefDescription": "AK Ring In Use; Up",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_C_RING_AK_USED.UP",
"PerPkg": "1",
@@ -541,6 +598,7 @@
},
{
"BriefDescription": "AK Ring In Use; Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_C_RING_AK_USED.UP_EVEN",
"PerPkg": "1",
@@ -550,6 +608,7 @@
},
{
"BriefDescription": "AK Ring In Use; Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_C_RING_AK_USED.UP_ODD",
"PerPkg": "1",
@@ -559,6 +618,7 @@
},
{
"BriefDescription": "BL Ring in Use; Down",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_C_RING_BL_USED.ALL",
"PerPkg": "1",
@@ -568,6 +628,7 @@
},
{
"BriefDescription": "BL Ring in Use; Down",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_C_RING_BL_USED.DOWN",
"PerPkg": "1",
@@ -577,6 +638,7 @@
},
{
"BriefDescription": "BL Ring in Use; Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_C_RING_BL_USED.DOWN_EVEN",
"PerPkg": "1",
@@ -586,6 +648,7 @@
},
{
"BriefDescription": "BL Ring in Use; Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_C_RING_BL_USED.DOWN_ODD",
"PerPkg": "1",
@@ -595,6 +658,7 @@
},
{
"BriefDescription": "BL Ring in Use; Up",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_C_RING_BL_USED.UP",
"PerPkg": "1",
@@ -604,6 +668,7 @@
},
{
"BriefDescription": "BL Ring in Use; Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_C_RING_BL_USED.UP_EVEN",
"PerPkg": "1",
@@ -613,6 +678,7 @@
},
{
"BriefDescription": "BL Ring in Use; Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_C_RING_BL_USED.UP_ODD",
"PerPkg": "1",
@@ -622,6 +688,7 @@
},
{
"BriefDescription": "Number of LLC responses that bounced on the Ring.; AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_C_RING_BOUNCES.AD",
"PerPkg": "1",
@@ -630,6 +697,7 @@
},
{
"BriefDescription": "Number of LLC responses that bounced on the Ring.; AK",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_C_RING_BOUNCES.AK",
"PerPkg": "1",
@@ -638,6 +706,7 @@
},
{
"BriefDescription": "Number of LLC responses that bounced on the Ring.; BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_C_RING_BOUNCES.BL",
"PerPkg": "1",
@@ -646,6 +715,7 @@
},
{
"BriefDescription": "Number of LLC responses that bounced on the Ring.; Snoops of processor's cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_C_RING_BOUNCES.IV",
"PerPkg": "1",
@@ -654,6 +724,7 @@
},
{
"BriefDescription": "BL Ring in Use; Any",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_C_RING_IV_USED.ANY",
"PerPkg": "1",
@@ -663,6 +734,7 @@
},
{
"BriefDescription": "BL Ring in Use; Any",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_C_RING_IV_USED.DN",
"PerPkg": "1",
@@ -672,6 +744,7 @@
},
{
"BriefDescription": "BL Ring in Use; Down",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_C_RING_IV_USED.DOWN",
"PerPkg": "1",
@@ -681,6 +754,7 @@
},
{
"BriefDescription": "BL Ring in Use; Any",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_C_RING_IV_USED.UP",
"PerPkg": "1",
@@ -690,6 +764,7 @@
},
{
"BriefDescription": "UNC_C_RING_SINK_STARVED.AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_C_RING_SINK_STARVED.AD",
"PerPkg": "1",
@@ -698,6 +773,7 @@
},
{
"BriefDescription": "UNC_C_RING_SINK_STARVED.AK",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_C_RING_SINK_STARVED.AK",
"PerPkg": "1",
@@ -706,6 +782,7 @@
},
{
"BriefDescription": "UNC_C_RING_SINK_STARVED.BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_C_RING_SINK_STARVED.BL",
"PerPkg": "1",
@@ -714,6 +791,7 @@
},
{
"BriefDescription": "UNC_C_RING_SINK_STARVED.IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_C_RING_SINK_STARVED.IV",
"PerPkg": "1",
@@ -722,6 +800,7 @@
},
{
"BriefDescription": "Number of cycles the Cbo is actively throttling traffic onto the Ring in order to limit bounce traffic.",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_C_RING_SRC_THRTL",
"PerPkg": "1",
@@ -729,6 +808,7 @@
},
{
"BriefDescription": "Ingress Arbiter Blocking Cycles; IRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_C_RxR_EXT_STARVED.IPQ",
"PerPkg": "1",
@@ -738,6 +818,7 @@
},
{
"BriefDescription": "Ingress Arbiter Blocking Cycles; IPQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_C_RxR_EXT_STARVED.IRQ",
"PerPkg": "1",
@@ -747,6 +828,7 @@
},
{
"BriefDescription": "Ingress Arbiter Blocking Cycles; ISMQ_BID",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_C_RxR_EXT_STARVED.ISMQ_BIDS",
"PerPkg": "1",
@@ -756,6 +838,7 @@
},
{
"BriefDescription": "Ingress Arbiter Blocking Cycles; PRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_C_RxR_EXT_STARVED.PRQ",
"PerPkg": "1",
@@ -765,6 +848,7 @@
},
{
"BriefDescription": "Ingress Allocations; IPQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_C_RxR_INSERTS.IPQ",
"PerPkg": "1",
@@ -774,6 +858,7 @@
},
{
"BriefDescription": "Ingress Allocations; IRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_C_RxR_INSERTS.IRQ",
"PerPkg": "1",
@@ -783,6 +868,7 @@
},
{
"BriefDescription": "Ingress Allocations; IRQ Rejected",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_C_RxR_INSERTS.IRQ_REJ",
"PerPkg": "1",
@@ -792,6 +878,7 @@
},
{
"BriefDescription": "Ingress Allocations; PRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_C_RxR_INSERTS.PRQ",
"PerPkg": "1",
@@ -801,6 +888,7 @@
},
{
"BriefDescription": "Ingress Allocations; PRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_C_RxR_INSERTS.PRQ_REJ",
"PerPkg": "1",
@@ -810,6 +898,7 @@
},
{
"BriefDescription": "Ingress Internal Starvation Cycles; IPQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_C_RxR_INT_STARVED.IPQ",
"PerPkg": "1",
@@ -819,6 +908,7 @@
},
{
"BriefDescription": "Ingress Internal Starvation Cycles; IRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_C_RxR_INT_STARVED.IRQ",
"PerPkg": "1",
@@ -828,6 +918,7 @@
},
{
"BriefDescription": "Ingress Internal Starvation Cycles; ISMQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_C_RxR_INT_STARVED.ISMQ",
"PerPkg": "1",
@@ -837,6 +928,7 @@
},
{
"BriefDescription": "Ingress Internal Starvation Cycles; PRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_C_RxR_INT_STARVED.PRQ",
"PerPkg": "1",
@@ -846,6 +938,7 @@
},
{
"BriefDescription": "Probe Queue Retries; Address Conflict",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_C_RxR_IPQ_RETRY.ADDR_CONFLICT",
"PerPkg": "1",
@@ -855,6 +948,7 @@
},
{
"BriefDescription": "Probe Queue Retries; Any Reject",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_C_RxR_IPQ_RETRY.ANY",
"PerPkg": "1",
@@ -864,6 +958,7 @@
},
{
"BriefDescription": "Probe Queue Retries; No Egress Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_C_RxR_IPQ_RETRY.FULL",
"PerPkg": "1",
@@ -873,6 +968,7 @@
},
{
"BriefDescription": "Probe Queue Retries; No QPI Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_C_RxR_IPQ_RETRY.QPI_CREDITS",
"PerPkg": "1",
@@ -882,6 +978,7 @@
},
{
"BriefDescription": "Probe Queue Retries; No AD Sbo Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_C_RxR_IPQ_RETRY2.AD_SBO",
"PerPkg": "1",
@@ -891,6 +988,7 @@
},
{
"BriefDescription": "Probe Queue Retries; Target Node Filter",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_C_RxR_IPQ_RETRY2.TARGET",
"PerPkg": "1",
@@ -900,6 +998,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects; Address Conflict",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_C_RxR_IRQ_RETRY.ADDR_CONFLICT",
"PerPkg": "1",
@@ -909,6 +1008,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects; Any Reject",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_C_RxR_IRQ_RETRY.ANY",
"PerPkg": "1",
@@ -918,6 +1018,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects; No Egress Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_C_RxR_IRQ_RETRY.FULL",
"PerPkg": "1",
@@ -927,6 +1028,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects; No IIO Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_C_RxR_IRQ_RETRY.IIO_CREDITS",
"PerPkg": "1",
@@ -936,6 +1038,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_C_RxR_IRQ_RETRY.NID",
"PerPkg": "1",
@@ -945,6 +1048,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects; No QPI Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_C_RxR_IRQ_RETRY.QPI_CREDITS",
"PerPkg": "1",
@@ -954,6 +1058,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects; No RTIDs",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_C_RxR_IRQ_RETRY.RTID",
"PerPkg": "1",
@@ -963,6 +1068,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects; No AD Sbo Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_C_RxR_IRQ_RETRY2.AD_SBO",
"PerPkg": "1",
@@ -972,6 +1078,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects; No BL Sbo Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_C_RxR_IRQ_RETRY2.BL_SBO",
"PerPkg": "1",
@@ -981,6 +1088,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects; Target Node Filter",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_C_RxR_IRQ_RETRY2.TARGET",
"PerPkg": "1",
@@ -990,6 +1098,7 @@
},
{
"BriefDescription": "ISMQ Retries; Any Reject",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_C_RxR_ISMQ_RETRY.ANY",
"PerPkg": "1",
@@ -999,6 +1108,7 @@
},
{
"BriefDescription": "ISMQ Retries; No Egress Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_C_RxR_ISMQ_RETRY.FULL",
"PerPkg": "1",
@@ -1008,6 +1118,7 @@
},
{
"BriefDescription": "ISMQ Retries; No IIO Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_C_RxR_ISMQ_RETRY.IIO_CREDITS",
"PerPkg": "1",
@@ -1017,6 +1128,7 @@
},
{
"BriefDescription": "ISMQ Retries",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_C_RxR_ISMQ_RETRY.NID",
"PerPkg": "1",
@@ -1026,6 +1138,7 @@
},
{
"BriefDescription": "ISMQ Retries; No QPI Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_C_RxR_ISMQ_RETRY.QPI_CREDITS",
"PerPkg": "1",
@@ -1035,6 +1148,7 @@
},
{
"BriefDescription": "ISMQ Retries; No RTIDs",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_C_RxR_ISMQ_RETRY.RTID",
"PerPkg": "1",
@@ -1044,6 +1158,7 @@
},
{
"BriefDescription": "ISMQ Retries",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_C_RxR_ISMQ_RETRY.WB_CREDITS",
"PerPkg": "1",
@@ -1053,6 +1168,7 @@
},
{
"BriefDescription": "ISMQ Request Queue Rejects; No AD Sbo Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_C_RxR_ISMQ_RETRY2.AD_SBO",
"PerPkg": "1",
@@ -1062,6 +1178,7 @@
},
{
"BriefDescription": "ISMQ Request Queue Rejects; No BL Sbo Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_C_RxR_ISMQ_RETRY2.BL_SBO",
"PerPkg": "1",
@@ -1071,6 +1188,7 @@
},
{
"BriefDescription": "ISMQ Request Queue Rejects; Target Node Filter",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_C_RxR_ISMQ_RETRY2.TARGET",
"PerPkg": "1",
@@ -1080,6 +1198,7 @@
},
{
"BriefDescription": "Ingress Occupancy; IPQ",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_C_RxR_OCCUPANCY.IPQ",
"PerPkg": "1",
@@ -1089,6 +1208,7 @@
},
{
"BriefDescription": "Ingress Occupancy; IRQ",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_C_RxR_OCCUPANCY.IRQ",
"PerPkg": "1",
@@ -1098,6 +1218,7 @@
},
{
"BriefDescription": "Ingress Occupancy; IRQ Rejected",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_C_RxR_OCCUPANCY.IRQ_REJ",
"PerPkg": "1",
@@ -1107,6 +1228,7 @@
},
{
"BriefDescription": "Ingress Occupancy; PRQ Rejects",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_C_RxR_OCCUPANCY.PRQ_REJ",
"PerPkg": "1",
@@ -1116,6 +1238,7 @@
},
{
"BriefDescription": "SBo Credits Acquired; For AD Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x3D",
"EventName": "UNC_C_SBO_CREDITS_ACQUIRED.AD",
"PerPkg": "1",
@@ -1125,6 +1248,7 @@
},
{
"BriefDescription": "SBo Credits Acquired; For BL Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x3D",
"EventName": "UNC_C_SBO_CREDITS_ACQUIRED.BL",
"PerPkg": "1",
@@ -1134,6 +1258,7 @@
},
{
"BriefDescription": "SBo Credits Occupancy; For AD Ring",
+ "Counter": "0",
"EventCode": "0x3E",
"EventName": "UNC_C_SBO_CREDIT_OCCUPANCY.AD",
"PerPkg": "1",
@@ -1143,6 +1268,7 @@
},
{
"BriefDescription": "SBo Credits Occupancy; For BL Ring",
+ "Counter": "0",
"EventCode": "0x3E",
"EventName": "UNC_C_SBO_CREDIT_OCCUPANCY.BL",
"PerPkg": "1",
@@ -1152,6 +1278,7 @@
},
{
"BriefDescription": "TOR Inserts; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.ALL",
"PerPkg": "1",
@@ -1161,6 +1288,7 @@
},
{
"BriefDescription": "TOR Inserts; Evictions",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.EVICTION",
"PerPkg": "1",
@@ -1170,6 +1298,7 @@
},
{
"BriefDescription": "TOR Inserts; Local Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.LOCAL",
"PerPkg": "1",
@@ -1179,6 +1308,7 @@
},
{
"BriefDescription": "TOR Inserts; Local Memory - Opcode Matched",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.LOCAL_OPCODE",
"PerPkg": "1",
@@ -1188,6 +1318,7 @@
},
{
"BriefDescription": "TOR Inserts; Misses to Local Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.MISS_LOCAL",
"PerPkg": "1",
@@ -1197,6 +1328,7 @@
},
{
"BriefDescription": "TOR Inserts; Misses to Local Memory - Opcode Matched",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.MISS_LOCAL_OPCODE",
"PerPkg": "1",
@@ -1206,6 +1338,7 @@
},
{
"BriefDescription": "TOR Inserts; Miss Opcode Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.MISS_OPCODE",
"PerPkg": "1",
@@ -1215,6 +1348,7 @@
},
{
"BriefDescription": "TOR Inserts; Misses to Remote Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.MISS_REMOTE",
"PerPkg": "1",
@@ -1224,6 +1358,7 @@
},
{
"BriefDescription": "TOR Inserts; Misses to Remote Memory - Opcode Matched",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.MISS_REMOTE_OPCODE",
"PerPkg": "1",
@@ -1233,6 +1368,7 @@
},
{
"BriefDescription": "TOR Inserts; NID Matched",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.NID_ALL",
"PerPkg": "1",
@@ -1242,6 +1378,7 @@
},
{
"BriefDescription": "TOR Inserts; NID Matched Evictions",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.NID_EVICTION",
"PerPkg": "1",
@@ -1251,6 +1388,7 @@
},
{
"BriefDescription": "TOR Inserts; NID Matched Miss All",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.NID_MISS_ALL",
"PerPkg": "1",
@@ -1260,6 +1398,7 @@
},
{
"BriefDescription": "TOR Inserts; NID and Opcode Matched Miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.NID_MISS_OPCODE",
"PerPkg": "1",
@@ -1269,6 +1408,7 @@
},
{
"BriefDescription": "TOR Inserts; NID and Opcode Matched",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.NID_OPCODE",
"PerPkg": "1",
@@ -1278,6 +1418,7 @@
},
{
"BriefDescription": "TOR Inserts; NID Matched Writebacks",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.NID_WB",
"PerPkg": "1",
@@ -1287,6 +1428,7 @@
},
{
"BriefDescription": "TOR Inserts; Opcode Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.OPCODE",
"PerPkg": "1",
@@ -1296,6 +1438,7 @@
},
{
"BriefDescription": "TOR Inserts; Remote Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.REMOTE",
"PerPkg": "1",
@@ -1305,6 +1448,7 @@
},
{
"BriefDescription": "TOR Inserts; Remote Memory - Opcode Matched",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.REMOTE_OPCODE",
"PerPkg": "1",
@@ -1314,6 +1458,7 @@
},
{
"BriefDescription": "TOR Inserts; Writebacks",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.WB",
"PerPkg": "1",
@@ -1323,6 +1468,7 @@
},
{
"BriefDescription": "TOR Occupancy; Any",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.ALL",
"PerPkg": "1",
@@ -1332,6 +1478,7 @@
},
{
"BriefDescription": "TOR Occupancy; Evictions",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.EVICTION",
"PerPkg": "1",
@@ -1341,6 +1488,7 @@
},
{
"BriefDescription": "Occupancy counter for LLC data reads (demand and L2 prefetch). Derived from unc_c_tor_occupancy.miss_opcode",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.LLC_DATA_READ",
"Filter": "filter_opc=0x182",
@@ -1351,6 +1499,7 @@
},
{
"BriefDescription": "TOR Occupancy",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.LOCAL",
"PerPkg": "1",
@@ -1360,6 +1509,7 @@
},
{
"BriefDescription": "TOR Occupancy; Local Memory - Opcode Matched",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.LOCAL_OPCODE",
"PerPkg": "1",
@@ -1369,6 +1519,7 @@
},
{
"BriefDescription": "TOR Occupancy; Miss All",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.MISS_ALL",
"PerPkg": "1",
@@ -1378,6 +1529,7 @@
},
{
"BriefDescription": "TOR Occupancy",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.MISS_LOCAL",
"PerPkg": "1",
@@ -1387,6 +1539,7 @@
},
{
"BriefDescription": "TOR Occupancy; Misses to Local Memory - Opcode Matched",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.MISS_LOCAL_OPCODE",
"PerPkg": "1",
@@ -1396,6 +1549,7 @@
},
{
"BriefDescription": "TOR Occupancy; Miss Opcode Match",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.MISS_OPCODE",
"PerPkg": "1",
@@ -1405,6 +1559,7 @@
},
{
"BriefDescription": "TOR Occupancy",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.MISS_REMOTE",
"PerPkg": "1",
@@ -1414,6 +1569,7 @@
},
{
"BriefDescription": "TOR Occupancy; Misses to Remote Memory - Opcode Matched",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.MISS_REMOTE_OPCODE",
"PerPkg": "1",
@@ -1423,6 +1579,7 @@
},
{
"BriefDescription": "TOR Occupancy; NID Matched",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.NID_ALL",
"PerPkg": "1",
@@ -1432,6 +1589,7 @@
},
{
"BriefDescription": "TOR Occupancy; NID Matched Evictions",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.NID_EVICTION",
"PerPkg": "1",
@@ -1441,6 +1599,7 @@
},
{
"BriefDescription": "TOR Occupancy; NID Matched",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.NID_MISS_ALL",
"PerPkg": "1",
@@ -1450,6 +1609,7 @@
},
{
"BriefDescription": "TOR Occupancy; NID and Opcode Matched Miss",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.NID_MISS_OPCODE",
"PerPkg": "1",
@@ -1459,6 +1619,7 @@
},
{
"BriefDescription": "TOR Occupancy; NID and Opcode Matched",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.NID_OPCODE",
"PerPkg": "1",
@@ -1468,6 +1629,7 @@
},
{
"BriefDescription": "TOR Occupancy; NID Matched Writebacks",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.NID_WB",
"PerPkg": "1",
@@ -1477,6 +1639,7 @@
},
{
"BriefDescription": "TOR Occupancy; Opcode Match",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.OPCODE",
"PerPkg": "1",
@@ -1486,6 +1649,7 @@
},
{
"BriefDescription": "TOR Occupancy",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.REMOTE",
"PerPkg": "1",
@@ -1495,6 +1659,7 @@
},
{
"BriefDescription": "TOR Occupancy; Remote Memory - Opcode Matched",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.REMOTE_OPCODE",
"PerPkg": "1",
@@ -1504,6 +1669,7 @@
},
{
"BriefDescription": "TOR Occupancy; Writebacks",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.WB",
"PerPkg": "1",
@@ -1513,6 +1679,7 @@
},
{
"BriefDescription": "Onto AD Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_C_TxR_ADS_USED.AD",
"PerPkg": "1",
@@ -1521,6 +1688,7 @@
},
{
"BriefDescription": "Onto AK Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_C_TxR_ADS_USED.AK",
"PerPkg": "1",
@@ -1529,6 +1697,7 @@
},
{
"BriefDescription": "Onto BL Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_C_TxR_ADS_USED.BL",
"PerPkg": "1",
@@ -1537,6 +1706,7 @@
},
{
"BriefDescription": "Egress Allocations; AD - Cachebo",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_C_TxR_INSERTS.AD_CACHE",
"PerPkg": "1",
@@ -1546,6 +1716,7 @@
},
{
"BriefDescription": "Egress Allocations; AD - Corebo",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_C_TxR_INSERTS.AD_CORE",
"PerPkg": "1",
@@ -1555,6 +1726,7 @@
},
{
"BriefDescription": "Egress Allocations; AK - Cachebo",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_C_TxR_INSERTS.AK_CACHE",
"PerPkg": "1",
@@ -1564,6 +1736,7 @@
},
{
"BriefDescription": "Egress Allocations; AK - Corebo",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_C_TxR_INSERTS.AK_CORE",
"PerPkg": "1",
@@ -1573,6 +1746,7 @@
},
{
"BriefDescription": "Egress Allocations; BL - Cacheno",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_C_TxR_INSERTS.BL_CACHE",
"PerPkg": "1",
@@ -1582,6 +1756,7 @@
},
{
"BriefDescription": "Egress Allocations; BL - Corebo",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_C_TxR_INSERTS.BL_CORE",
"PerPkg": "1",
@@ -1591,6 +1766,7 @@
},
{
"BriefDescription": "Egress Allocations; IV - Cachebo",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_C_TxR_INSERTS.IV_CACHE",
"PerPkg": "1",
@@ -1600,6 +1776,7 @@
},
{
"BriefDescription": "Injection Starvation; Onto AD Ring (to core)",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_C_TxR_STARVED.AD_CORE",
"PerPkg": "1",
@@ -1609,6 +1786,7 @@
},
{
"BriefDescription": "Injection Starvation; Onto AK Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_C_TxR_STARVED.AK_BOTH",
"PerPkg": "1",
@@ -1618,6 +1796,7 @@
},
{
"BriefDescription": "Injection Starvation; Onto BL Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_C_TxR_STARVED.BL_BOTH",
"PerPkg": "1",
@@ -1627,6 +1806,7 @@
},
{
"BriefDescription": "Injection Starvation; Onto IV Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_C_TxR_STARVED.IV",
"PerPkg": "1",
@@ -1636,6 +1816,7 @@
},
{
"BriefDescription": "BT Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_H_BT_CYCLES_NE",
"PerPkg": "1",
@@ -1644,6 +1825,7 @@
},
{
"BriefDescription": "BT to HT Not Issued; Incoming Data Hazard",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.INCOMING_BL_HAZARD",
"PerPkg": "1",
@@ -1653,6 +1835,7 @@
},
{
"BriefDescription": "BT to HT Not Issued; Incoming Snoop Hazard",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.INCOMING_SNP_HAZARD",
"PerPkg": "1",
@@ -1662,6 +1845,7 @@
},
{
"BriefDescription": "BT to HT Not Issued; Incoming Data Hazard",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.RSPACKCFLT_HAZARD",
"PerPkg": "1",
@@ -1671,6 +1855,7 @@
},
{
"BriefDescription": "BT to HT Not Issued; Incoming Data Hazard",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.WBMDATA_HAZARD",
"PerPkg": "1",
@@ -1680,6 +1865,7 @@
},
{
"BriefDescription": "HA to iMC Bypass; Not Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_H_BYPASS_IMC.NOT_TAKEN",
"PerPkg": "1",
@@ -1689,6 +1875,7 @@
},
{
"BriefDescription": "HA to iMC Bypass; Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_H_BYPASS_IMC.TAKEN",
"PerPkg": "1",
@@ -1698,6 +1885,7 @@
},
{
"BriefDescription": "uclks",
+ "Counter": "0,1,2,3",
"EventName": "UNC_H_CLOCKTICKS",
"PerPkg": "1",
"PublicDescription": "Counts the number of uclks in the HA. This will be slightly different than the count in the Ubox because of enable/freeze delays. The HA is on the other side of the die from the fixed Ubox uclk counter, so the drift could be somewhat larger than in units that are closer like the QPI Agent.",
@@ -1705,6 +1893,7 @@
},
{
"BriefDescription": "Direct2Core Messages Sent",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_H_DIRECT2CORE_COUNT",
"PerPkg": "1",
@@ -1713,6 +1902,7 @@
},
{
"BriefDescription": "Cycles when Direct2Core was Disabled",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_H_DIRECT2CORE_CYCLES_DISABLED",
"PerPkg": "1",
@@ -1721,6 +1911,7 @@
},
{
"BriefDescription": "Number of Reads that had Direct2Core Overridden",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_H_DIRECT2CORE_TXN_OVERRIDE",
"PerPkg": "1",
@@ -1729,6 +1920,7 @@
},
{
"BriefDescription": "Directory Lat Opt Return",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_H_DIRECTORY_LAT_OPT",
"PerPkg": "1",
@@ -1737,6 +1929,7 @@
},
{
"BriefDescription": "Directory Lookups; Snoop Not Needed",
+ "Counter": "0,1,2,3",
"EventCode": "0xC",
"EventName": "UNC_H_DIRECTORY_LOOKUP.NO_SNP",
"PerPkg": "1",
@@ -1746,6 +1939,7 @@
},
{
"BriefDescription": "Directory Lookups; Snoop Needed",
+ "Counter": "0,1,2,3",
"EventCode": "0xC",
"EventName": "UNC_H_DIRECTORY_LOOKUP.SNP",
"PerPkg": "1",
@@ -1755,6 +1949,7 @@
},
{
"BriefDescription": "Directory Updates; Any Directory Update",
+ "Counter": "0,1,2,3",
"EventCode": "0xD",
"EventName": "UNC_H_DIRECTORY_UPDATE.ANY",
"PerPkg": "1",
@@ -1764,6 +1959,7 @@
},
{
"BriefDescription": "Directory Updates; Directory Clear",
+ "Counter": "0,1,2,3",
"EventCode": "0xD",
"EventName": "UNC_H_DIRECTORY_UPDATE.CLEAR",
"PerPkg": "1",
@@ -1773,6 +1969,7 @@
},
{
"BriefDescription": "Directory Updates; Directory Set",
+ "Counter": "0,1,2,3",
"EventCode": "0xD",
"EventName": "UNC_H_DIRECTORY_UPDATE.SET",
"PerPkg": "1",
@@ -1782,6 +1979,7 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; op is AckCnfltWbI",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_H_HITME_HIT.ACKCNFLTWBI",
"PerPkg": "1",
@@ -1790,6 +1988,7 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; All Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_H_HITME_HIT.ALL",
"PerPkg": "1",
@@ -1798,6 +1997,7 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_H_HITME_HIT.ALLOCS",
"PerPkg": "1",
@@ -1806,6 +2006,7 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_H_HITME_HIT.EVICTS",
"PerPkg": "1",
@@ -1814,6 +2015,7 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; HOM Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_H_HITME_HIT.HOM",
"PerPkg": "1",
@@ -1822,6 +2024,7 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; Invalidations",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_H_HITME_HIT.INVALS",
"PerPkg": "1",
@@ -1830,6 +2033,7 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; op is RdCode, RdData, RdDataMigratory, RdInvOwn, RdCur or InvItoE",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_H_HITME_HIT.READ_OR_INVITOE",
"PerPkg": "1",
@@ -1838,6 +2042,7 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; op is RspI, RspIWb, RspS, RspSWb, RspCnflt or RspCnfltWbI",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_H_HITME_HIT.RSP",
"PerPkg": "1",
@@ -1846,6 +2051,7 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; op is RspIFwd or RspIFwdWb for a local request",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_H_HITME_HIT.RSPFWDI_LOCAL",
"PerPkg": "1",
@@ -1854,6 +2060,7 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; op is RspIFwd or RspIFwdWb for a remote request",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_H_HITME_HIT.RSPFWDI_REMOTE",
"PerPkg": "1",
@@ -1862,6 +2069,7 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; op is RsSFwd or RspSFwdWb",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_H_HITME_HIT.RSPFWDS",
"PerPkg": "1",
@@ -1870,6 +2078,7 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; op is WbMtoE or WbMtoS",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_H_HITME_HIT.WBMTOE_OR_S",
"PerPkg": "1",
@@ -1878,6 +2087,7 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; op is WbMtoI",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_H_HITME_HIT.WBMTOI",
"PerPkg": "1",
@@ -1886,6 +2096,7 @@
},
{
"BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is AckCnfltWbI",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_H_HITME_HIT_PV_BITS_SET.ACKCNFLTWBI",
"PerPkg": "1",
@@ -1894,6 +2105,7 @@
},
{
"BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; All Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_H_HITME_HIT_PV_BITS_SET.ALL",
"PerPkg": "1",
@@ -1902,6 +2114,7 @@
},
{
"BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; HOM Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_H_HITME_HIT_PV_BITS_SET.HOM",
"PerPkg": "1",
@@ -1910,6 +2123,7 @@
},
{
"BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RdCode, RdData, RdDataMigratory, RdInvOwn, RdCur or InvItoE",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_H_HITME_HIT_PV_BITS_SET.READ_OR_INVITOE",
"PerPkg": "1",
@@ -1918,6 +2132,7 @@
},
{
"BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RspI, RspIWb, RspS, RspSWb, RspCnflt or RspCnfltWbI",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_H_HITME_HIT_PV_BITS_SET.RSP",
"PerPkg": "1",
@@ -1926,6 +2141,7 @@
},
{
"BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RspIFwd or RspIFwdWb for a local request",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_H_HITME_HIT_PV_BITS_SET.RSPFWDI_LOCAL",
"PerPkg": "1",
@@ -1934,6 +2150,7 @@
},
{
"BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RspIFwd or RspIFwdWb for a remote request",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_H_HITME_HIT_PV_BITS_SET.RSPFWDI_REMOTE",
"PerPkg": "1",
@@ -1942,6 +2159,7 @@
},
{
"BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RsSFwd or RspSFwdWb",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_H_HITME_HIT_PV_BITS_SET.RSPFWDS",
"PerPkg": "1",
@@ -1950,6 +2168,7 @@
},
{
"BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is WbMtoE or WbMtoS",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_H_HITME_HIT_PV_BITS_SET.WBMTOE_OR_S",
"PerPkg": "1",
@@ -1958,6 +2177,7 @@
},
{
"BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is WbMtoI",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_H_HITME_HIT_PV_BITS_SET.WBMTOI",
"PerPkg": "1",
@@ -1966,6 +2186,7 @@
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed; op is AckCnfltWbI",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_H_HITME_LOOKUP.ACKCNFLTWBI",
"PerPkg": "1",
@@ -1974,6 +2195,7 @@
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed; All Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_H_HITME_LOOKUP.ALL",
"PerPkg": "1",
@@ -1982,6 +2204,7 @@
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed; Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_H_HITME_LOOKUP.ALLOCS",
"PerPkg": "1",
@@ -1990,6 +2213,7 @@
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed; HOM Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_H_HITME_LOOKUP.HOM",
"PerPkg": "1",
@@ -1998,6 +2222,7 @@
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed; Invalidations",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_H_HITME_LOOKUP.INVALS",
"PerPkg": "1",
@@ -2006,6 +2231,7 @@
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RdCode, RdData, RdDataMigratory, RdInvOwn, RdCur or InvItoE",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_H_HITME_LOOKUP.READ_OR_INVITOE",
"PerPkg": "1",
@@ -2014,6 +2240,7 @@
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RspI, RspIWb, RspS, RspSWb, RspCnflt or RspCnfltWbI",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_H_HITME_LOOKUP.RSP",
"PerPkg": "1",
@@ -2022,6 +2249,7 @@
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RspIFwd or RspIFwdWb for a local request",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_H_HITME_LOOKUP.RSPFWDI_LOCAL",
"PerPkg": "1",
@@ -2030,6 +2258,7 @@
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RspIFwd or RspIFwdWb for a remote request",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_H_HITME_LOOKUP.RSPFWDI_REMOTE",
"PerPkg": "1",
@@ -2038,6 +2267,7 @@
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RsSFwd or RspSFwdWb",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_H_HITME_LOOKUP.RSPFWDS",
"PerPkg": "1",
@@ -2046,6 +2276,7 @@
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed; op is WbMtoE or WbMtoS",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_H_HITME_LOOKUP.WBMTOE_OR_S",
"PerPkg": "1",
@@ -2054,6 +2285,7 @@
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed; op is WbMtoI",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_H_HITME_LOOKUP.WBMTOI",
"PerPkg": "1",
@@ -2062,6 +2294,7 @@
},
{
"BriefDescription": "Cycles without QPI Ingress Credits; AD to QPI Link 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI0",
"PerPkg": "1",
@@ -2071,6 +2304,7 @@
},
{
"BriefDescription": "Cycles without QPI Ingress Credits; AD to QPI Link 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI1",
"PerPkg": "1",
@@ -2080,6 +2314,7 @@
},
{
"BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI2",
"PerPkg": "1",
@@ -2089,6 +2324,7 @@
},
{
"BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI0",
"PerPkg": "1",
@@ -2098,6 +2334,7 @@
},
{
"BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI1",
"PerPkg": "1",
@@ -2107,6 +2344,7 @@
},
{
"BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI2",
"PerPkg": "1",
@@ -2116,6 +2354,7 @@
},
{
"BriefDescription": "HA to iMC Normal Priority Reads Issued; Normal Priority",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_H_IMC_READS.NORMAL",
"PerPkg": "1",
@@ -2125,6 +2364,7 @@
},
{
"BriefDescription": "Retry Events",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_H_IMC_RETRY",
"PerPkg": "1",
@@ -2132,6 +2372,7 @@
},
{
"BriefDescription": "HA to iMC Full Line Writes Issued; All Writes",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_H_IMC_WRITES.ALL",
"PerPkg": "1",
@@ -2141,6 +2382,7 @@
},
{
"BriefDescription": "HA to iMC Full Line Writes Issued; Full Line Non-ISOCH",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_H_IMC_WRITES.FULL",
"PerPkg": "1",
@@ -2150,6 +2392,7 @@
},
{
"BriefDescription": "HA to iMC Full Line Writes Issued; ISOCH Full Line",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_H_IMC_WRITES.FULL_ISOCH",
"PerPkg": "1",
@@ -2159,6 +2402,7 @@
},
{
"BriefDescription": "HA to iMC Full Line Writes Issued; Partial Non-ISOCH",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_H_IMC_WRITES.PARTIAL",
"PerPkg": "1",
@@ -2168,6 +2412,7 @@
},
{
"BriefDescription": "HA to iMC Full Line Writes Issued; ISOCH Partial",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_H_IMC_WRITES.PARTIAL_ISOCH",
"PerPkg": "1",
@@ -2177,6 +2422,7 @@
},
{
"BriefDescription": "IOT Backpressure",
+ "Counter": "0,1,2",
"EventCode": "0x61",
"EventName": "UNC_H_IOT_BACKPRESSURE.HUB",
"PerPkg": "1",
@@ -2185,6 +2431,7 @@
},
{
"BriefDescription": "IOT Backpressure",
+ "Counter": "0,1,2",
"EventCode": "0x61",
"EventName": "UNC_H_IOT_BACKPRESSURE.SAT",
"PerPkg": "1",
@@ -2193,6 +2440,7 @@
},
{
"BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "Counter": "0,1,2",
"EventCode": "0x64",
"EventName": "UNC_H_IOT_CTS_EAST_LO.CTS0",
"PerPkg": "1",
@@ -2202,6 +2450,7 @@
},
{
"BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "Counter": "0,1,2",
"EventCode": "0x64",
"EventName": "UNC_H_IOT_CTS_EAST_LO.CTS1",
"PerPkg": "1",
@@ -2211,6 +2460,7 @@
},
{
"BriefDescription": "IOT Common Trigger Sequencer - Hi",
+ "Counter": "0,1,2",
"EventCode": "0x65",
"EventName": "UNC_H_IOT_CTS_HI.CTS2",
"PerPkg": "1",
@@ -2220,6 +2470,7 @@
},
{
"BriefDescription": "IOT Common Trigger Sequencer - Hi",
+ "Counter": "0,1,2",
"EventCode": "0x65",
"EventName": "UNC_H_IOT_CTS_HI.CTS3",
"PerPkg": "1",
@@ -2229,6 +2480,7 @@
},
{
"BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "Counter": "0,1,2",
"EventCode": "0x62",
"EventName": "UNC_H_IOT_CTS_WEST_LO.CTS0",
"PerPkg": "1",
@@ -2238,6 +2490,7 @@
},
{
"BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "Counter": "0,1,2",
"EventCode": "0x62",
"EventName": "UNC_H_IOT_CTS_WEST_LO.CTS1",
"PerPkg": "1",
@@ -2247,6 +2500,7 @@
},
{
"BriefDescription": "OSB Snoop Broadcast; Cancelled",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_H_OSB.CANCELLED",
"PerPkg": "1",
@@ -2256,6 +2510,7 @@
},
{
"BriefDescription": "OSB Snoop Broadcast; Local InvItoE",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_H_OSB.INVITOE_LOCAL",
"PerPkg": "1",
@@ -2265,6 +2520,7 @@
},
{
"BriefDescription": "OSB Snoop Broadcast; Local Reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_H_OSB.READS_LOCAL",
"PerPkg": "1",
@@ -2274,6 +2530,7 @@
},
{
"BriefDescription": "OSB Snoop Broadcast; Reads Local - Useful",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_H_OSB.READS_LOCAL_USEFUL",
"PerPkg": "1",
@@ -2283,6 +2540,7 @@
},
{
"BriefDescription": "OSB Snoop Broadcast; Remote",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_H_OSB.REMOTE",
"PerPkg": "1",
@@ -2292,6 +2550,7 @@
},
{
"BriefDescription": "OSB Snoop Broadcast; Remote - Useful",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_H_OSB.REMOTE_USEFUL",
"PerPkg": "1",
@@ -2301,6 +2560,7 @@
},
{
"BriefDescription": "OSB Early Data Return; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_H_OSB_EDR.ALL",
"PerPkg": "1",
@@ -2310,6 +2570,7 @@
},
{
"BriefDescription": "OSB Early Data Return; Reads to Local I",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_H_OSB_EDR.READS_LOCAL_I",
"PerPkg": "1",
@@ -2319,6 +2580,7 @@
},
{
"BriefDescription": "OSB Early Data Return; Reads to Local S",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_H_OSB_EDR.READS_LOCAL_S",
"PerPkg": "1",
@@ -2328,6 +2590,7 @@
},
{
"BriefDescription": "OSB Early Data Return; Reads to Remote I",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_H_OSB_EDR.READS_REMOTE_I",
"PerPkg": "1",
@@ -2337,6 +2600,7 @@
},
{
"BriefDescription": "OSB Early Data Return; Reads to Remote S",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_H_OSB_EDR.READS_REMOTE_S",
"PerPkg": "1",
@@ -2346,6 +2610,7 @@
},
{
"BriefDescription": "Read and Write Requests; Local InvItoEs",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.INVITOE_LOCAL",
"PerPkg": "1",
@@ -2355,6 +2620,7 @@
},
{
"BriefDescription": "Read and Write Requests; Remote InvItoEs",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.INVITOE_REMOTE",
"PerPkg": "1",
@@ -2364,6 +2630,7 @@
},
{
"BriefDescription": "Read and Write Requests; Reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.READS",
"PerPkg": "1",
@@ -2373,6 +2640,7 @@
},
{
"BriefDescription": "Read and Write Requests; Local Reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.READS_LOCAL",
"PerPkg": "1",
@@ -2382,6 +2650,7 @@
},
{
"BriefDescription": "Read and Write Requests; Remote Reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.READS_REMOTE",
"PerPkg": "1",
@@ -2391,6 +2660,7 @@
},
{
"BriefDescription": "Read and Write Requests; Writes",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.WRITES",
"PerPkg": "1",
@@ -2400,6 +2670,7 @@
},
{
"BriefDescription": "Read and Write Requests; Local Writes",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.WRITES_LOCAL",
"PerPkg": "1",
@@ -2409,6 +2680,7 @@
},
{
"BriefDescription": "Read and Write Requests; Remote Writes",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.WRITES_REMOTE",
"PerPkg": "1",
@@ -2418,6 +2690,7 @@
},
{
"BriefDescription": "HA AD Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x3E",
"EventName": "UNC_H_RING_AD_USED.CCW",
"PerPkg": "1",
@@ -2427,6 +2700,7 @@
},
{
"BriefDescription": "HA AD Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x3E",
"EventName": "UNC_H_RING_AD_USED.CCW_EVEN",
"PerPkg": "1",
@@ -2436,6 +2710,7 @@
},
{
"BriefDescription": "HA AD Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x3E",
"EventName": "UNC_H_RING_AD_USED.CCW_ODD",
"PerPkg": "1",
@@ -2445,6 +2720,7 @@
},
{
"BriefDescription": "HA AD Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x3E",
"EventName": "UNC_H_RING_AD_USED.CW",
"PerPkg": "1",
@@ -2454,6 +2730,7 @@
},
{
"BriefDescription": "HA AD Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x3E",
"EventName": "UNC_H_RING_AD_USED.CW_EVEN",
"PerPkg": "1",
@@ -2463,6 +2740,7 @@
},
{
"BriefDescription": "HA AD Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x3E",
"EventName": "UNC_H_RING_AD_USED.CW_ODD",
"PerPkg": "1",
@@ -2472,6 +2750,7 @@
},
{
"BriefDescription": "HA AK Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x3F",
"EventName": "UNC_H_RING_AK_USED.CCW",
"PerPkg": "1",
@@ -2481,6 +2760,7 @@
},
{
"BriefDescription": "HA AK Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x3F",
"EventName": "UNC_H_RING_AK_USED.CCW_EVEN",
"PerPkg": "1",
@@ -2490,6 +2770,7 @@
},
{
"BriefDescription": "HA AK Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x3F",
"EventName": "UNC_H_RING_AK_USED.CCW_ODD",
"PerPkg": "1",
@@ -2499,6 +2780,7 @@
},
{
"BriefDescription": "HA AK Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x3F",
"EventName": "UNC_H_RING_AK_USED.CW",
"PerPkg": "1",
@@ -2508,6 +2790,7 @@
},
{
"BriefDescription": "HA AK Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x3F",
"EventName": "UNC_H_RING_AK_USED.CW_EVEN",
"PerPkg": "1",
@@ -2517,6 +2800,7 @@
},
{
"BriefDescription": "HA AK Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x3F",
"EventName": "UNC_H_RING_AK_USED.CW_ODD",
"PerPkg": "1",
@@ -2526,6 +2810,7 @@
},
{
"BriefDescription": "HA BL Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_H_RING_BL_USED.CCW",
"PerPkg": "1",
@@ -2535,6 +2820,7 @@
},
{
"BriefDescription": "HA BL Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_H_RING_BL_USED.CCW_EVEN",
"PerPkg": "1",
@@ -2544,6 +2830,7 @@
},
{
"BriefDescription": "HA BL Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_H_RING_BL_USED.CCW_ODD",
"PerPkg": "1",
@@ -2553,6 +2840,7 @@
},
{
"BriefDescription": "HA BL Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_H_RING_BL_USED.CW",
"PerPkg": "1",
@@ -2562,6 +2850,7 @@
},
{
"BriefDescription": "HA BL Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_H_RING_BL_USED.CW_EVEN",
"PerPkg": "1",
@@ -2571,6 +2860,7 @@
},
{
"BriefDescription": "HA BL Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_H_RING_BL_USED.CW_ODD",
"PerPkg": "1",
@@ -2580,6 +2870,7 @@
},
{
"BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN0",
"PerPkg": "1",
@@ -2589,6 +2880,7 @@
},
{
"BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN1",
"PerPkg": "1",
@@ -2598,6 +2890,7 @@
},
{
"BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN2",
"PerPkg": "1",
@@ -2607,6 +2900,7 @@
},
{
"BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN3",
"PerPkg": "1",
@@ -2616,6 +2910,7 @@
},
{
"BriefDescription": "iMC RPQ Credits Empty - Special; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN0",
"PerPkg": "1",
@@ -2625,6 +2920,7 @@
},
{
"BriefDescription": "iMC RPQ Credits Empty - Special; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN1",
"PerPkg": "1",
@@ -2634,6 +2930,7 @@
},
{
"BriefDescription": "iMC RPQ Credits Empty - Special; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN2",
"PerPkg": "1",
@@ -2643,6 +2940,7 @@
},
{
"BriefDescription": "iMC RPQ Credits Empty - Special; Channel 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN3",
"PerPkg": "1",
@@ -2652,6 +2950,7 @@
},
{
"BriefDescription": "SBo0 Credits Acquired; For AD Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x68",
"EventName": "UNC_H_SBO0_CREDITS_ACQUIRED.AD",
"PerPkg": "1",
@@ -2661,6 +2960,7 @@
},
{
"BriefDescription": "SBo0 Credits Acquired; For BL Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x68",
"EventName": "UNC_H_SBO0_CREDITS_ACQUIRED.BL",
"PerPkg": "1",
@@ -2670,6 +2970,7 @@
},
{
"BriefDescription": "SBo0 Credits Occupancy; For AD Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x6A",
"EventName": "UNC_H_SBO0_CREDIT_OCCUPANCY.AD",
"PerPkg": "1",
@@ -2679,6 +2980,7 @@
},
{
"BriefDescription": "SBo0 Credits Occupancy; For BL Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x6A",
"EventName": "UNC_H_SBO0_CREDIT_OCCUPANCY.BL",
"PerPkg": "1",
@@ -2688,6 +2990,7 @@
},
{
"BriefDescription": "SBo1 Credits Acquired; For AD Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x69",
"EventName": "UNC_H_SBO1_CREDITS_ACQUIRED.AD",
"PerPkg": "1",
@@ -2697,6 +3000,7 @@
},
{
"BriefDescription": "SBo1 Credits Acquired; For BL Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x69",
"EventName": "UNC_H_SBO1_CREDITS_ACQUIRED.BL",
"PerPkg": "1",
@@ -2706,6 +3010,7 @@
},
{
"BriefDescription": "SBo1 Credits Occupancy; For AD Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x6B",
"EventName": "UNC_H_SBO1_CREDIT_OCCUPANCY.AD",
"PerPkg": "1",
@@ -2715,6 +3020,7 @@
},
{
"BriefDescription": "SBo1 Credits Occupancy; For BL Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x6B",
"EventName": "UNC_H_SBO1_CREDIT_OCCUPANCY.BL",
"PerPkg": "1",
@@ -2724,6 +3030,7 @@
},
{
"BriefDescription": "Data beat the Snoop Responses; Local Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xA",
"EventName": "UNC_H_SNOOPS_RSP_AFTER_DATA.LOCAL",
"PerPkg": "1",
@@ -2733,6 +3040,7 @@
},
{
"BriefDescription": "Data beat the Snoop Responses; Remote Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xA",
"EventName": "UNC_H_SNOOPS_RSP_AFTER_DATA.REMOTE",
"PerPkg": "1",
@@ -2742,6 +3050,7 @@
},
{
"BriefDescription": "Cycles with Snoops Outstanding; All Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_H_SNOOP_CYCLES_NE.ALL",
"PerPkg": "1",
@@ -2751,6 +3060,7 @@
},
{
"BriefDescription": "Cycles with Snoops Outstanding; Local Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_H_SNOOP_CYCLES_NE.LOCAL",
"PerPkg": "1",
@@ -2760,6 +3070,7 @@
},
{
"BriefDescription": "Cycles with Snoops Outstanding; Remote Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_H_SNOOP_CYCLES_NE.REMOTE",
"PerPkg": "1",
@@ -2769,6 +3080,7 @@
},
{
"BriefDescription": "Tracker Snoops Outstanding Accumulator; Local Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_H_SNOOP_OCCUPANCY.LOCAL",
"PerPkg": "1",
@@ -2778,6 +3090,7 @@
},
{
"BriefDescription": "Tracker Snoops Outstanding Accumulator; Remote Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_H_SNOOP_OCCUPANCY.REMOTE",
"PerPkg": "1",
@@ -2787,6 +3100,7 @@
},
{
"BriefDescription": "Snoop Responses Received; RSPCNFLCT*",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPCNFLCT",
"PerPkg": "1",
@@ -2796,6 +3110,7 @@
},
{
"BriefDescription": "Snoop Responses Received; RspI",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPI",
"PerPkg": "1",
@@ -2805,6 +3120,7 @@
},
{
"BriefDescription": "M line forwarded from remote cache with no writeback to memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPIFWD",
"PerPkg": "1",
@@ -2815,6 +3131,7 @@
},
{
"BriefDescription": "Shared line response from remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPS",
"PerPkg": "1",
@@ -2825,6 +3142,7 @@
},
{
"BriefDescription": "Shared line forwarded from remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPSFWD",
"PerPkg": "1",
@@ -2835,6 +3153,7 @@
},
{
"BriefDescription": "M line forwarded from remote cache along with writeback to memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSP_FWD_WB",
"PerPkg": "1",
@@ -2845,6 +3164,7 @@
},
{
"BriefDescription": "Snoop Responses Received; Rsp*WB",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSP_WB",
"PerPkg": "1",
@@ -2854,6 +3174,7 @@
},
{
"BriefDescription": "Snoop Responses Received Local; Other",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_H_SNP_RESP_RECV_LOCAL.OTHER",
"PerPkg": "1",
@@ -2863,6 +3184,7 @@
},
{
"BriefDescription": "Snoop Responses Received Local; RspCnflct",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPCNFLCT",
"PerPkg": "1",
@@ -2872,6 +3194,7 @@
},
{
"BriefDescription": "Snoop Responses Received Local; RspI",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPI",
"PerPkg": "1",
@@ -2881,6 +3204,7 @@
},
{
"BriefDescription": "Snoop Responses Received Local; RspIFwd",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPIFWD",
"PerPkg": "1",
@@ -2890,6 +3214,7 @@
},
{
"BriefDescription": "Snoop Responses Received Local; RspS",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPS",
"PerPkg": "1",
@@ -2899,6 +3224,7 @@
},
{
"BriefDescription": "Snoop Responses Received Local; RspSFwd",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPSFWD",
"PerPkg": "1",
@@ -2908,6 +3234,7 @@
},
{
"BriefDescription": "Snoop Responses Received Local; Rsp*FWD*WB",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPxFWDxWB",
"PerPkg": "1",
@@ -2917,6 +3244,7 @@
},
{
"BriefDescription": "Snoop Responses Received Local; Rsp*WB",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPxWB",
"PerPkg": "1",
@@ -2926,6 +3254,7 @@
},
{
"BriefDescription": "Stall on No Sbo Credits; For SBo0, AD Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x6C",
"EventName": "UNC_H_STALL_NO_SBO_CREDIT.SBO0_AD",
"PerPkg": "1",
@@ -2935,6 +3264,7 @@
},
{
"BriefDescription": "Stall on No Sbo Credits; For SBo0, BL Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x6C",
"EventName": "UNC_H_STALL_NO_SBO_CREDIT.SBO0_BL",
"PerPkg": "1",
@@ -2944,6 +3274,7 @@
},
{
"BriefDescription": "Stall on No Sbo Credits; For SBo1, AD Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x6C",
"EventName": "UNC_H_STALL_NO_SBO_CREDIT.SBO1_AD",
"PerPkg": "1",
@@ -2953,6 +3284,7 @@
},
{
"BriefDescription": "Stall on No Sbo Credits; For SBo1, BL Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x6C",
"EventName": "UNC_H_STALL_NO_SBO_CREDIT.SBO1_BL",
"PerPkg": "1",
@@ -2962,6 +3294,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_H_TAD_REQUESTS_G0.REGION0",
"PerPkg": "1",
@@ -2971,6 +3304,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_H_TAD_REQUESTS_G0.REGION1",
"PerPkg": "1",
@@ -2980,6 +3314,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_H_TAD_REQUESTS_G0.REGION2",
"PerPkg": "1",
@@ -2989,6 +3324,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_H_TAD_REQUESTS_G0.REGION3",
"PerPkg": "1",
@@ -2998,6 +3334,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_H_TAD_REQUESTS_G0.REGION4",
"PerPkg": "1",
@@ -3007,6 +3344,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_H_TAD_REQUESTS_G0.REGION5",
"PerPkg": "1",
@@ -3016,6 +3354,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_H_TAD_REQUESTS_G0.REGION6",
"PerPkg": "1",
@@ -3025,6 +3364,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_H_TAD_REQUESTS_G0.REGION7",
"PerPkg": "1",
@@ -3034,6 +3374,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_H_TAD_REQUESTS_G1.REGION10",
"PerPkg": "1",
@@ -3043,6 +3384,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 11",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_H_TAD_REQUESTS_G1.REGION11",
"PerPkg": "1",
@@ -3052,6 +3394,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_H_TAD_REQUESTS_G1.REGION8",
"PerPkg": "1",
@@ -3061,6 +3404,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_H_TAD_REQUESTS_G1.REGION9",
"PerPkg": "1",
@@ -3070,6 +3414,7 @@
},
{
"BriefDescription": "Tracker Cycles Full; Cycles Completely Used",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_H_TRACKER_CYCLES_FULL.ALL",
"PerPkg": "1",
@@ -3079,6 +3424,7 @@
},
{
"BriefDescription": "Tracker Cycles Full; Cycles GP Completely Used",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_H_TRACKER_CYCLES_FULL.GP",
"PerPkg": "1",
@@ -3088,6 +3434,7 @@
},
{
"BriefDescription": "Tracker Cycles Not Empty; All Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_H_TRACKER_CYCLES_NE.ALL",
"PerPkg": "1",
@@ -3097,6 +3444,7 @@
},
{
"BriefDescription": "Tracker Cycles Not Empty; Local Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_H_TRACKER_CYCLES_NE.LOCAL",
"PerPkg": "1",
@@ -3106,6 +3454,7 @@
},
{
"BriefDescription": "Tracker Cycles Not Empty; Remote Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_H_TRACKER_CYCLES_NE.REMOTE",
"PerPkg": "1",
@@ -3115,6 +3464,7 @@
},
{
"BriefDescription": "Tracker Occupancy Accumulator; Local InvItoE Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_H_TRACKER_OCCUPANCY.INVITOE_LOCAL",
"PerPkg": "1",
@@ -3124,6 +3474,7 @@
},
{
"BriefDescription": "Tracker Occupancy Accumulator; Remote InvItoE Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_H_TRACKER_OCCUPANCY.INVITOE_REMOTE",
"PerPkg": "1",
@@ -3133,6 +3484,7 @@
},
{
"BriefDescription": "Tracker Occupancy Accumulator; Local Read Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_H_TRACKER_OCCUPANCY.READS_LOCAL",
"PerPkg": "1",
@@ -3142,6 +3494,7 @@
},
{
"BriefDescription": "Tracker Occupancy Accumulator; Remote Read Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_H_TRACKER_OCCUPANCY.READS_REMOTE",
"PerPkg": "1",
@@ -3151,6 +3504,7 @@
},
{
"BriefDescription": "Tracker Occupancy Accumulator; Local Write Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_H_TRACKER_OCCUPANCY.WRITES_LOCAL",
"PerPkg": "1",
@@ -3160,6 +3514,7 @@
},
{
"BriefDescription": "Tracker Occupancy Accumulator; Remote Write Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_H_TRACKER_OCCUPANCY.WRITES_REMOTE",
"PerPkg": "1",
@@ -3169,6 +3524,7 @@
},
{
"BriefDescription": "Data Pending Occupancy Accumulator; Local Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_H_TRACKER_PENDING_OCCUPANCY.LOCAL",
"PerPkg": "1",
@@ -3178,6 +3534,7 @@
},
{
"BriefDescription": "Data Pending Occupancy Accumulator; Remote Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_H_TRACKER_PENDING_OCCUPANCY.REMOTE",
"PerPkg": "1",
@@ -3187,6 +3544,7 @@
},
{
"BriefDescription": "Outbound NDR Ring Transactions; Non-data Responses",
+ "Counter": "0,1,2,3",
"EventCode": "0xF",
"EventName": "UNC_H_TxR_AD.HOM",
"PerPkg": "1",
@@ -3196,6 +3554,7 @@
},
{
"BriefDescription": "AD Egress Full; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_H_TxR_AD_CYCLES_FULL.ALL",
"PerPkg": "1",
@@ -3205,6 +3564,7 @@
},
{
"BriefDescription": "AD Egress Full; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_H_TxR_AD_CYCLES_FULL.SCHED0",
"PerPkg": "1",
@@ -3214,6 +3574,7 @@
},
{
"BriefDescription": "AD Egress Full; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_H_TxR_AD_CYCLES_FULL.SCHED1",
"PerPkg": "1",
@@ -3223,6 +3584,7 @@
},
{
"BriefDescription": "AD Egress Not Empty; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_H_TxR_AD_CYCLES_NE.ALL",
"PerPkg": "1",
@@ -3232,6 +3594,7 @@
},
{
"BriefDescription": "AD Egress Not Empty; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_H_TxR_AD_CYCLES_NE.SCHED0",
"PerPkg": "1",
@@ -3241,6 +3604,7 @@
},
{
"BriefDescription": "AD Egress Not Empty; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_H_TxR_AD_CYCLES_NE.SCHED1",
"PerPkg": "1",
@@ -3250,6 +3614,7 @@
},
{
"BriefDescription": "AD Egress Allocations; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_H_TxR_AD_INSERTS.ALL",
"PerPkg": "1",
@@ -3259,6 +3624,7 @@
},
{
"BriefDescription": "AD Egress Allocations; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_H_TxR_AD_INSERTS.SCHED0",
"PerPkg": "1",
@@ -3268,6 +3634,7 @@
},
{
"BriefDescription": "AD Egress Allocations; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_H_TxR_AD_INSERTS.SCHED1",
"PerPkg": "1",
@@ -3277,6 +3644,7 @@
},
{
"BriefDescription": "AK Egress Full; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_H_TxR_AK_CYCLES_FULL.ALL",
"PerPkg": "1",
@@ -3286,6 +3654,7 @@
},
{
"BriefDescription": "AK Egress Full; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_H_TxR_AK_CYCLES_FULL.SCHED0",
"PerPkg": "1",
@@ -3295,6 +3664,7 @@
},
{
"BriefDescription": "AK Egress Full; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_H_TxR_AK_CYCLES_FULL.SCHED1",
"PerPkg": "1",
@@ -3304,6 +3674,7 @@
},
{
"BriefDescription": "AK Egress Not Empty; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_H_TxR_AK_CYCLES_NE.ALL",
"PerPkg": "1",
@@ -3313,6 +3684,7 @@
},
{
"BriefDescription": "AK Egress Not Empty; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_H_TxR_AK_CYCLES_NE.SCHED0",
"PerPkg": "1",
@@ -3322,6 +3694,7 @@
},
{
"BriefDescription": "AK Egress Not Empty; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_H_TxR_AK_CYCLES_NE.SCHED1",
"PerPkg": "1",
@@ -3331,6 +3704,7 @@
},
{
"BriefDescription": "AK Egress Allocations; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_H_TxR_AK_INSERTS.ALL",
"PerPkg": "1",
@@ -3340,6 +3714,7 @@
},
{
"BriefDescription": "AK Egress Allocations; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_H_TxR_AK_INSERTS.SCHED0",
"PerPkg": "1",
@@ -3349,6 +3724,7 @@
},
{
"BriefDescription": "AK Egress Allocations; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_H_TxR_AK_INSERTS.SCHED1",
"PerPkg": "1",
@@ -3358,6 +3734,7 @@
},
{
"BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_H_TxR_BL.DRS_CACHE",
"PerPkg": "1",
@@ -3367,6 +3744,7 @@
},
{
"BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Core",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_H_TxR_BL.DRS_CORE",
"PerPkg": "1",
@@ -3376,6 +3754,7 @@
},
{
"BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to QPI",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_H_TxR_BL.DRS_QPI",
"PerPkg": "1",
@@ -3385,6 +3764,7 @@
},
{
"BriefDescription": "BL Egress Full; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x36",
"EventName": "UNC_H_TxR_BL_CYCLES_FULL.ALL",
"PerPkg": "1",
@@ -3394,6 +3774,7 @@
},
{
"BriefDescription": "BL Egress Full; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x36",
"EventName": "UNC_H_TxR_BL_CYCLES_FULL.SCHED0",
"PerPkg": "1",
@@ -3403,6 +3784,7 @@
},
{
"BriefDescription": "BL Egress Full; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x36",
"EventName": "UNC_H_TxR_BL_CYCLES_FULL.SCHED1",
"PerPkg": "1",
@@ -3412,6 +3794,7 @@
},
{
"BriefDescription": "BL Egress Not Empty; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_H_TxR_BL_CYCLES_NE.ALL",
"PerPkg": "1",
@@ -3421,6 +3804,7 @@
},
{
"BriefDescription": "BL Egress Not Empty; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_H_TxR_BL_CYCLES_NE.SCHED0",
"PerPkg": "1",
@@ -3430,6 +3814,7 @@
},
{
"BriefDescription": "BL Egress Not Empty; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_H_TxR_BL_CYCLES_NE.SCHED1",
"PerPkg": "1",
@@ -3439,6 +3824,7 @@
},
{
"BriefDescription": "BL Egress Allocations; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_H_TxR_BL_INSERTS.ALL",
"PerPkg": "1",
@@ -3448,6 +3834,7 @@
},
{
"BriefDescription": "BL Egress Allocations; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_H_TxR_BL_INSERTS.SCHED0",
"PerPkg": "1",
@@ -3457,6 +3844,7 @@
},
{
"BriefDescription": "BL Egress Allocations; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_H_TxR_BL_INSERTS.SCHED1",
"PerPkg": "1",
@@ -3466,6 +3854,7 @@
},
{
"BriefDescription": "Injection Starvation; For AK Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x6D",
"EventName": "UNC_H_TxR_STARVED.AK",
"PerPkg": "1",
@@ -3475,6 +3864,7 @@
},
{
"BriefDescription": "Injection Starvation; For BL Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x6D",
"EventName": "UNC_H_TxR_STARVED.BL",
"PerPkg": "1",
@@ -3484,6 +3874,7 @@
},
{
"BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN0",
"PerPkg": "1",
@@ -3493,6 +3884,7 @@
},
{
"BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN1",
"PerPkg": "1",
@@ -3502,6 +3894,7 @@
},
{
"BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN2",
"PerPkg": "1",
@@ -3511,6 +3904,7 @@
},
{
"BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN3",
"PerPkg": "1",
@@ -3520,6 +3914,7 @@
},
{
"BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN0",
"PerPkg": "1",
@@ -3529,6 +3924,7 @@
},
{
"BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN1",
"PerPkg": "1",
@@ -3538,6 +3934,7 @@
},
{
"BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN2",
"PerPkg": "1",
@@ -3547,6 +3944,7 @@
},
{
"BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN3",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/haswellx/uncore-interconnect.json
index bef1f5ef6f31..121de411d312 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/uncore-interconnect.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Number of non data (control) flits transmitted . Derived from unc_q_txl_flits_g0.non_data",
+ "Counter": "0,1,2,3",
"EventName": "QPI_CTL_BANDWIDTH_TX",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.; Number of non-NULL non-data flits transmitted across QPI. This basically tracks the protocol overhead on the QPI link. One can get a good picture of the QPI-link characteristics by evaluating the protocol flits, data flits, and idle/null flits. This includes the header flits for data packets.",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "Number of data flits transmitted . Derived from unc_q_txl_flits_g0.data",
+ "Counter": "0,1,2,3",
"EventName": "QPI_DATA_BANDWIDTH_TX",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.; Number of data flits transmitted over QPI. Each flit contains 64b of data. This includes both DRS and NCB data flits (coherent and non-coherent). This can be used to calculate the data bandwidth of the QPI link. One can get a good picture of the QPI-link characteristics by evaluating the protocol flits, data flits, and idle/null flits. This does not include the header flits that go in data packets.",
@@ -19,6 +21,7 @@
},
{
"BriefDescription": "Total Write Cache Occupancy; Any Source",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.ANY",
"PerPkg": "1",
@@ -28,6 +31,7 @@
},
{
"BriefDescription": "Total Write Cache Occupancy; Select Source",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.SOURCE",
"PerPkg": "1",
@@ -37,6 +41,7 @@
},
{
"BriefDescription": "Clocks in the IRP",
+ "Counter": "0,1",
"EventName": "UNC_I_CLOCKTICKS",
"PerPkg": "1",
"PublicDescription": "Number of clocks in the IRP.",
@@ -44,6 +49,7 @@
},
{
"BriefDescription": "Coherent Ops; CLFlush",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_COHERENT_OPS.CLFLUSH",
"PerPkg": "1",
@@ -53,6 +59,7 @@
},
{
"BriefDescription": "Coherent Ops; CRd",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_COHERENT_OPS.CRD",
"PerPkg": "1",
@@ -62,6 +69,7 @@
},
{
"BriefDescription": "Coherent Ops; DRd",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_COHERENT_OPS.DRD",
"PerPkg": "1",
@@ -71,6 +79,7 @@
},
{
"BriefDescription": "Coherent Ops; PCIDCAHin5t",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_COHERENT_OPS.PCIDCAHINT",
"PerPkg": "1",
@@ -80,6 +89,7 @@
},
{
"BriefDescription": "Coherent Ops; PCIRdCur",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_COHERENT_OPS.PCIRDCUR",
"PerPkg": "1",
@@ -89,6 +99,7 @@
},
{
"BriefDescription": "Coherent Ops; PCIItoM",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_COHERENT_OPS.PCITOM",
"PerPkg": "1",
@@ -98,6 +109,7 @@
},
{
"BriefDescription": "Coherent Ops; RFO",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_COHERENT_OPS.RFO",
"PerPkg": "1",
@@ -107,6 +119,7 @@
},
{
"BriefDescription": "Coherent Ops; WbMtoI",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_COHERENT_OPS.WBMTOI",
"PerPkg": "1",
@@ -116,6 +129,7 @@
},
{
"BriefDescription": "Misc Events - Set 0; Cache Inserts of Atomic Transactions as Secondary",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_I_MISC0.2ND_ATOMIC_INSERT",
"PerPkg": "1",
@@ -125,6 +139,7 @@
},
{
"BriefDescription": "Misc Events - Set 0; Cache Inserts of Read Transactions as Secondary",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_I_MISC0.2ND_RD_INSERT",
"PerPkg": "1",
@@ -134,6 +149,7 @@
},
{
"BriefDescription": "Misc Events - Set 0; Cache Inserts of Write Transactions as Secondary",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_I_MISC0.2ND_WR_INSERT",
"PerPkg": "1",
@@ -143,6 +159,7 @@
},
{
"BriefDescription": "Misc Events - Set 0; Fastpath Rejects",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_I_MISC0.FAST_REJ",
"PerPkg": "1",
@@ -152,6 +169,7 @@
},
{
"BriefDescription": "Misc Events - Set 0; Fastpath Requests",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_I_MISC0.FAST_REQ",
"PerPkg": "1",
@@ -161,6 +179,7 @@
},
{
"BriefDescription": "Misc Events - Set 0; Fastpath Transfers From Primary to Secondary",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_I_MISC0.FAST_XFER",
"PerPkg": "1",
@@ -170,6 +189,7 @@
},
{
"BriefDescription": "Misc Events - Set 0; Prefetch Ack Hints From Primary to Secondary",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_I_MISC0.PF_ACK_HINT",
"PerPkg": "1",
@@ -179,6 +199,7 @@
},
{
"BriefDescription": "Misc Events - Set 0; Prefetch TimeOut",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_I_MISC0.PF_TIMEOUT",
"PerPkg": "1",
@@ -188,6 +209,7 @@
},
{
"BriefDescription": "Misc Events - Set 1; Data Throttled",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_MISC1.DATA_THROTTLE",
"PerPkg": "1",
@@ -197,6 +219,7 @@
},
{
"BriefDescription": "Misc Events - Set 1",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_MISC1.LOST_FWD",
"PerPkg": "1",
@@ -206,6 +229,7 @@
},
{
"BriefDescription": "Misc Events - Set 1; Received Invalid",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_MISC1.SEC_RCVD_INVLD",
"PerPkg": "1",
@@ -215,6 +239,7 @@
},
{
"BriefDescription": "Misc Events - Set 1; Received Valid",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_MISC1.SEC_RCVD_VLD",
"PerPkg": "1",
@@ -224,6 +249,7 @@
},
{
"BriefDescription": "Misc Events - Set 1; Slow Transfer of E Line",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_MISC1.SLOW_E",
"PerPkg": "1",
@@ -233,6 +259,7 @@
},
{
"BriefDescription": "Misc Events - Set 1; Slow Transfer of I Line",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_MISC1.SLOW_I",
"PerPkg": "1",
@@ -242,6 +269,7 @@
},
{
"BriefDescription": "Misc Events - Set 1; Slow Transfer of M Line",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_MISC1.SLOW_M",
"PerPkg": "1",
@@ -251,6 +279,7 @@
},
{
"BriefDescription": "Misc Events - Set 1; Slow Transfer of S Line",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_MISC1.SLOW_S",
"PerPkg": "1",
@@ -260,6 +289,7 @@
},
{
"BriefDescription": "AK Ingress Occupancy",
+ "Counter": "0,1",
"EventCode": "0xA",
"EventName": "UNC_I_RxR_AK_INSERTS",
"PerPkg": "1",
@@ -268,6 +298,7 @@
},
{
"BriefDescription": "UNC_I_RxR_BL_DRS_CYCLES_FULL",
+ "Counter": "0,1",
"EventCode": "0x4",
"EventName": "UNC_I_RxR_BL_DRS_CYCLES_FULL",
"PerPkg": "1",
@@ -276,6 +307,7 @@
},
{
"BriefDescription": "BL Ingress Occupancy - DRS",
+ "Counter": "0,1",
"EventCode": "0x1",
"EventName": "UNC_I_RxR_BL_DRS_INSERTS",
"PerPkg": "1",
@@ -284,6 +316,7 @@
},
{
"BriefDescription": "UNC_I_RxR_BL_DRS_OCCUPANCY",
+ "Counter": "0,1",
"EventCode": "0x7",
"EventName": "UNC_I_RxR_BL_DRS_OCCUPANCY",
"PerPkg": "1",
@@ -292,6 +325,7 @@
},
{
"BriefDescription": "UNC_I_RxR_BL_NCB_CYCLES_FULL",
+ "Counter": "0,1",
"EventCode": "0x5",
"EventName": "UNC_I_RxR_BL_NCB_CYCLES_FULL",
"PerPkg": "1",
@@ -300,6 +334,7 @@
},
{
"BriefDescription": "BL Ingress Occupancy - NCB",
+ "Counter": "0,1",
"EventCode": "0x2",
"EventName": "UNC_I_RxR_BL_NCB_INSERTS",
"PerPkg": "1",
@@ -308,6 +343,7 @@
},
{
"BriefDescription": "UNC_I_RxR_BL_NCB_OCCUPANCY",
+ "Counter": "0,1",
"EventCode": "0x8",
"EventName": "UNC_I_RxR_BL_NCB_OCCUPANCY",
"PerPkg": "1",
@@ -316,6 +352,7 @@
},
{
"BriefDescription": "UNC_I_RxR_BL_NCS_CYCLES_FULL",
+ "Counter": "0,1",
"EventCode": "0x6",
"EventName": "UNC_I_RxR_BL_NCS_CYCLES_FULL",
"PerPkg": "1",
@@ -324,6 +361,7 @@
},
{
"BriefDescription": "BL Ingress Occupancy - NCS",
+ "Counter": "0,1",
"EventCode": "0x3",
"EventName": "UNC_I_RxR_BL_NCS_INSERTS",
"PerPkg": "1",
@@ -332,6 +370,7 @@
},
{
"BriefDescription": "UNC_I_RxR_BL_NCS_OCCUPANCY",
+ "Counter": "0,1",
"EventCode": "0x9",
"EventName": "UNC_I_RxR_BL_NCS_OCCUPANCY",
"PerPkg": "1",
@@ -340,6 +379,7 @@
},
{
"BriefDescription": "Snoop Responses; Hit E or S",
+ "Counter": "0,1",
"EventCode": "0x17",
"EventName": "UNC_I_SNOOP_RESP.HIT_ES",
"PerPkg": "1",
@@ -349,6 +389,7 @@
},
{
"BriefDescription": "Snoop Responses; Hit I",
+ "Counter": "0,1",
"EventCode": "0x17",
"EventName": "UNC_I_SNOOP_RESP.HIT_I",
"PerPkg": "1",
@@ -358,6 +399,7 @@
},
{
"BriefDescription": "Snoop Responses; Hit M",
+ "Counter": "0,1",
"EventCode": "0x17",
"EventName": "UNC_I_SNOOP_RESP.HIT_M",
"PerPkg": "1",
@@ -367,6 +409,7 @@
},
{
"BriefDescription": "Snoop Responses; Miss",
+ "Counter": "0,1",
"EventCode": "0x17",
"EventName": "UNC_I_SNOOP_RESP.MISS",
"PerPkg": "1",
@@ -376,6 +419,7 @@
},
{
"BriefDescription": "Snoop Responses; SnpCode",
+ "Counter": "0,1",
"EventCode": "0x17",
"EventName": "UNC_I_SNOOP_RESP.SNPCODE",
"PerPkg": "1",
@@ -385,6 +429,7 @@
},
{
"BriefDescription": "Snoop Responses; SnpData",
+ "Counter": "0,1",
"EventCode": "0x17",
"EventName": "UNC_I_SNOOP_RESP.SNPDATA",
"PerPkg": "1",
@@ -394,6 +439,7 @@
},
{
"BriefDescription": "Snoop Responses; SnpInv",
+ "Counter": "0,1",
"EventCode": "0x17",
"EventName": "UNC_I_SNOOP_RESP.SNPINV",
"PerPkg": "1",
@@ -403,6 +449,7 @@
},
{
"BriefDescription": "Inbound Transaction Count; Atomic",
+ "Counter": "0,1",
"EventCode": "0x16",
"EventName": "UNC_I_TRANSACTIONS.ATOMIC",
"PerPkg": "1",
@@ -412,6 +459,7 @@
},
{
"BriefDescription": "Inbound Transaction Count; Other",
+ "Counter": "0,1",
"EventCode": "0x16",
"EventName": "UNC_I_TRANSACTIONS.OTHER",
"PerPkg": "1",
@@ -421,6 +469,7 @@
},
{
"BriefDescription": "Inbound Transaction Count; Read Prefetches",
+ "Counter": "0,1",
"EventCode": "0x16",
"EventName": "UNC_I_TRANSACTIONS.RD_PREF",
"PerPkg": "1",
@@ -430,6 +479,7 @@
},
{
"BriefDescription": "Inbound Transaction Count; Reads",
+ "Counter": "0,1",
"EventCode": "0x16",
"EventName": "UNC_I_TRANSACTIONS.READS",
"PerPkg": "1",
@@ -439,6 +489,7 @@
},
{
"BriefDescription": "Inbound Transaction Count; Writes",
+ "Counter": "0,1",
"EventCode": "0x16",
"EventName": "UNC_I_TRANSACTIONS.WRITES",
"PerPkg": "1",
@@ -448,6 +499,7 @@
},
{
"BriefDescription": "Inbound Transaction Count; Write Prefetches",
+ "Counter": "0,1",
"EventCode": "0x16",
"EventName": "UNC_I_TRANSACTIONS.WR_PREF",
"PerPkg": "1",
@@ -457,6 +509,7 @@
},
{
"BriefDescription": "No AD Egress Credit Stalls",
+ "Counter": "0,1",
"EventCode": "0x18",
"EventName": "UNC_I_TxR_AD_STALL_CREDIT_CYCLES",
"PerPkg": "1",
@@ -465,6 +518,7 @@
},
{
"BriefDescription": "No BL Egress Credit Stalls",
+ "Counter": "0,1",
"EventCode": "0x19",
"EventName": "UNC_I_TxR_BL_STALL_CREDIT_CYCLES",
"PerPkg": "1",
@@ -473,6 +527,7 @@
},
{
"BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
"EventCode": "0xE",
"EventName": "UNC_I_TxR_DATA_INSERTS_NCB",
"PerPkg": "1",
@@ -481,6 +536,7 @@
},
{
"BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
"EventCode": "0xF",
"EventName": "UNC_I_TxR_DATA_INSERTS_NCS",
"PerPkg": "1",
@@ -489,6 +545,7 @@
},
{
"BriefDescription": "Outbound Request Queue Occupancy",
+ "Counter": "0,1",
"EventCode": "0xD",
"EventName": "UNC_I_TxR_REQUEST_OCCUPANCY",
"PerPkg": "1",
@@ -497,6 +554,7 @@
},
{
"BriefDescription": "Number of qfclks",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_Q_CLOCKTICKS",
"PerPkg": "1",
@@ -505,6 +563,7 @@
},
{
"BriefDescription": "Count of CTO Events",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_Q_CTO_COUNT",
"PerPkg": "1",
@@ -513,6 +572,7 @@
},
{
"BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS",
"PerPkg": "1",
@@ -522,6 +582,7 @@
},
{
"BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress and RBT Miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS_MISS",
"PerPkg": "1",
@@ -531,6 +592,7 @@
},
{
"BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress and RBT Invalid",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS_RBT",
"PerPkg": "1",
@@ -540,6 +602,7 @@
},
{
"BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress and RBT Miss, Invalid",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS_RBT_MISS",
"PerPkg": "1",
@@ -549,6 +612,7 @@
},
{
"BriefDescription": "Direct 2 Core Spawning; Spawn Failure - RBT Miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_DIRECT2CORE.FAILURE_MISS",
"PerPkg": "1",
@@ -558,6 +622,7 @@
},
{
"BriefDescription": "Direct 2 Core Spawning; Spawn Failure - RBT Invalid",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_DIRECT2CORE.FAILURE_RBT_HIT",
"PerPkg": "1",
@@ -567,6 +632,7 @@
},
{
"BriefDescription": "Direct 2 Core Spawning; Spawn Failure - RBT Miss and Invalid",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_DIRECT2CORE.FAILURE_RBT_MISS",
"PerPkg": "1",
@@ -576,6 +642,7 @@
},
{
"BriefDescription": "Direct 2 Core Spawning; Spawn Success",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_DIRECT2CORE.SUCCESS_RBT_HIT",
"PerPkg": "1",
@@ -585,6 +652,7 @@
},
{
"BriefDescription": "Cycles in L1",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_Q_L1_POWER_CYCLES",
"PerPkg": "1",
@@ -593,6 +661,7 @@
},
{
"BriefDescription": "Cycles in L0p",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_Q_RxL0P_POWER_CYCLES",
"PerPkg": "1",
@@ -601,6 +670,7 @@
},
{
"BriefDescription": "Cycles in L0",
+ "Counter": "0,1,2,3",
"EventCode": "0xF",
"EventName": "UNC_Q_RxL0_POWER_CYCLES",
"PerPkg": "1",
@@ -609,6 +679,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Bypassed",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_Q_RxL_BYPASSED",
"PerPkg": "1",
@@ -617,6 +688,7 @@
},
{
"BriefDescription": "CRC Errors Detected; LinkInit",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_Q_RxL_CRC_ERRORS.LINK_INIT",
"PerPkg": "1",
@@ -626,6 +698,7 @@
},
{
"BriefDescription": "CRC Errors Detected; Normal Operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_Q_RxL_CRC_ERRORS.NORMAL_OP",
"PerPkg": "1",
@@ -635,6 +708,7 @@
},
{
"BriefDescription": "VN0 Credit Consumed; DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.DRS",
"PerPkg": "1",
@@ -644,6 +718,7 @@
},
{
"BriefDescription": "VN0 Credit Consumed; HOM",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.HOM",
"PerPkg": "1",
@@ -653,6 +728,7 @@
},
{
"BriefDescription": "VN0 Credit Consumed; NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NCB",
"PerPkg": "1",
@@ -662,6 +738,7 @@
},
{
"BriefDescription": "VN0 Credit Consumed; NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NCS",
"PerPkg": "1",
@@ -671,6 +748,7 @@
},
{
"BriefDescription": "VN0 Credit Consumed; NDR",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NDR",
"PerPkg": "1",
@@ -680,6 +758,7 @@
},
{
"BriefDescription": "VN0 Credit Consumed; SNP",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.SNP",
"PerPkg": "1",
@@ -689,6 +768,7 @@
},
{
"BriefDescription": "VN1 Credit Consumed; DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.DRS",
"PerPkg": "1",
@@ -698,6 +778,7 @@
},
{
"BriefDescription": "VN1 Credit Consumed; HOM",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.HOM",
"PerPkg": "1",
@@ -707,6 +788,7 @@
},
{
"BriefDescription": "VN1 Credit Consumed; NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.NCB",
"PerPkg": "1",
@@ -716,6 +798,7 @@
},
{
"BriefDescription": "VN1 Credit Consumed; NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.NCS",
"PerPkg": "1",
@@ -725,6 +808,7 @@
},
{
"BriefDescription": "VN1 Credit Consumed; NDR",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.NDR",
"PerPkg": "1",
@@ -734,6 +818,7 @@
},
{
"BriefDescription": "VN1 Credit Consumed; SNP",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.SNP",
"PerPkg": "1",
@@ -743,6 +828,7 @@
},
{
"BriefDescription": "VNA Credit Consumed",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VNA",
"PerPkg": "1",
@@ -751,6 +837,7 @@
},
{
"BriefDescription": "RxQ Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0xA",
"EventName": "UNC_Q_RxL_CYCLES_NE",
"PerPkg": "1",
@@ -759,6 +846,7 @@
},
{
"BriefDescription": "RxQ Cycles Not Empty - DRS; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0xF",
"EventName": "UNC_Q_RxL_CYCLES_NE_DRS.VN0",
"PerPkg": "1",
@@ -768,6 +856,7 @@
},
{
"BriefDescription": "RxQ Cycles Not Empty - DRS; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0xF",
"EventName": "UNC_Q_RxL_CYCLES_NE_DRS.VN1",
"PerPkg": "1",
@@ -777,6 +866,7 @@
},
{
"BriefDescription": "RxQ Cycles Not Empty - HOM; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_Q_RxL_CYCLES_NE_HOM.VN0",
"PerPkg": "1",
@@ -786,6 +876,7 @@
},
{
"BriefDescription": "RxQ Cycles Not Empty - HOM; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_Q_RxL_CYCLES_NE_HOM.VN1",
"PerPkg": "1",
@@ -795,6 +886,7 @@
},
{
"BriefDescription": "RxQ Cycles Not Empty - NCB; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_Q_RxL_CYCLES_NE_NCB.VN0",
"PerPkg": "1",
@@ -804,6 +896,7 @@
},
{
"BriefDescription": "RxQ Cycles Not Empty - NCB; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_Q_RxL_CYCLES_NE_NCB.VN1",
"PerPkg": "1",
@@ -813,6 +906,7 @@
},
{
"BriefDescription": "RxQ Cycles Not Empty - NCS; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_Q_RxL_CYCLES_NE_NCS.VN0",
"PerPkg": "1",
@@ -822,6 +916,7 @@
},
{
"BriefDescription": "RxQ Cycles Not Empty - NCS; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_Q_RxL_CYCLES_NE_NCS.VN1",
"PerPkg": "1",
@@ -831,6 +926,7 @@
},
{
"BriefDescription": "RxQ Cycles Not Empty - NDR; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_Q_RxL_CYCLES_NE_NDR.VN0",
"PerPkg": "1",
@@ -840,6 +936,7 @@
},
{
"BriefDescription": "RxQ Cycles Not Empty - NDR; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_Q_RxL_CYCLES_NE_NDR.VN1",
"PerPkg": "1",
@@ -849,6 +946,7 @@
},
{
"BriefDescription": "RxQ Cycles Not Empty - SNP; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_RxL_CYCLES_NE_SNP.VN0",
"PerPkg": "1",
@@ -858,6 +956,7 @@
},
{
"BriefDescription": "RxQ Cycles Not Empty - SNP; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_RxL_CYCLES_NE_SNP.VN1",
"PerPkg": "1",
@@ -867,6 +966,7 @@
},
{
"BriefDescription": "Flits Received - Group 0; Idle and Null Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_Q_RxL_FLITS_G0.IDLE",
"PerPkg": "1",
@@ -876,6 +976,7 @@
},
{
"BriefDescription": "Flits Received - Group 1; DRS Flits (both Header and Data)",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_RxL_FLITS_G1.DRS",
"PerPkg": "1",
@@ -885,6 +986,7 @@
},
{
"BriefDescription": "Flits Received - Group 1; DRS Data Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_RxL_FLITS_G1.DRS_DATA",
"PerPkg": "1",
@@ -894,6 +996,7 @@
},
{
"BriefDescription": "Flits Received - Group 1; DRS Header Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_RxL_FLITS_G1.DRS_NONDATA",
"PerPkg": "1",
@@ -903,6 +1006,7 @@
},
{
"BriefDescription": "Flits Received - Group 1; HOM Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_RxL_FLITS_G1.HOM",
"PerPkg": "1",
@@ -912,6 +1016,7 @@
},
{
"BriefDescription": "Flits Received - Group 1; HOM Non-Request Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_RxL_FLITS_G1.HOM_NONREQ",
"PerPkg": "1",
@@ -921,6 +1026,7 @@
},
{
"BriefDescription": "Flits Received - Group 1; HOM Request Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_RxL_FLITS_G1.HOM_REQ",
"PerPkg": "1",
@@ -930,6 +1036,7 @@
},
{
"BriefDescription": "Flits Received - Group 1; SNP Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_RxL_FLITS_G1.SNP",
"PerPkg": "1",
@@ -939,6 +1046,7 @@
},
{
"BriefDescription": "Flits Received - Group 2; Non-Coherent Rx Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_Q_RxL_FLITS_G2.NCB",
"PerPkg": "1",
@@ -948,6 +1056,7 @@
},
{
"BriefDescription": "Flits Received - Group 2; Non-Coherent data Rx Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_Q_RxL_FLITS_G2.NCB_DATA",
"PerPkg": "1",
@@ -957,6 +1066,7 @@
},
{
"BriefDescription": "Flits Received - Group 2; Non-Coherent non-data Rx Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_Q_RxL_FLITS_G2.NCB_NONDATA",
"PerPkg": "1",
@@ -966,6 +1076,7 @@
},
{
"BriefDescription": "Flits Received - Group 2; Non-Coherent standard Rx Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_Q_RxL_FLITS_G2.NCS",
"PerPkg": "1",
@@ -975,6 +1086,7 @@
},
{
"BriefDescription": "Flits Received - Group 2; Non-Data Response Rx Flits - AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_Q_RxL_FLITS_G2.NDR_AD",
"PerPkg": "1",
@@ -984,6 +1096,7 @@
},
{
"BriefDescription": "Flits Received - Group 2; Non-Data Response Rx Flits - AK",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_Q_RxL_FLITS_G2.NDR_AK",
"PerPkg": "1",
@@ -993,6 +1106,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_Q_RxL_INSERTS",
"PerPkg": "1",
@@ -1001,6 +1115,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - DRS; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_Q_RxL_INSERTS_DRS.VN0",
"PerPkg": "1",
@@ -1010,6 +1125,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - DRS; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_Q_RxL_INSERTS_DRS.VN1",
"PerPkg": "1",
@@ -1019,6 +1135,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - HOM; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0xC",
"EventName": "UNC_Q_RxL_INSERTS_HOM.VN0",
"PerPkg": "1",
@@ -1028,6 +1145,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - HOM; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0xC",
"EventName": "UNC_Q_RxL_INSERTS_HOM.VN1",
"PerPkg": "1",
@@ -1037,6 +1155,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - NCB; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0xA",
"EventName": "UNC_Q_RxL_INSERTS_NCB.VN0",
"PerPkg": "1",
@@ -1046,6 +1165,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - NCB; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0xA",
"EventName": "UNC_Q_RxL_INSERTS_NCB.VN1",
"PerPkg": "1",
@@ -1055,6 +1175,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - NCS; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB",
"EventName": "UNC_Q_RxL_INSERTS_NCS.VN0",
"PerPkg": "1",
@@ -1064,6 +1185,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - NCS; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB",
"EventName": "UNC_Q_RxL_INSERTS_NCS.VN1",
"PerPkg": "1",
@@ -1073,6 +1195,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - NDR; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0xE",
"EventName": "UNC_Q_RxL_INSERTS_NDR.VN0",
"PerPkg": "1",
@@ -1082,6 +1205,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - NDR; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0xE",
"EventName": "UNC_Q_RxL_INSERTS_NDR.VN1",
"PerPkg": "1",
@@ -1091,6 +1215,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - SNP; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD",
"EventName": "UNC_Q_RxL_INSERTS_SNP.VN0",
"PerPkg": "1",
@@ -1100,6 +1225,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - SNP; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD",
"EventName": "UNC_Q_RxL_INSERTS_SNP.VN1",
"PerPkg": "1",
@@ -1109,6 +1235,7 @@
},
{
"BriefDescription": "RxQ Occupancy - All Packets",
+ "Counter": "0,1,2,3",
"EventCode": "0xB",
"EventName": "UNC_Q_RxL_OCCUPANCY",
"PerPkg": "1",
@@ -1117,6 +1244,7 @@
},
{
"BriefDescription": "RxQ Occupancy - DRS; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_Q_RxL_OCCUPANCY_DRS.VN0",
"PerPkg": "1",
@@ -1126,6 +1254,7 @@
},
{
"BriefDescription": "RxQ Occupancy - DRS; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_Q_RxL_OCCUPANCY_DRS.VN1",
"PerPkg": "1",
@@ -1135,6 +1264,7 @@
},
{
"BriefDescription": "RxQ Occupancy - HOM; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_Q_RxL_OCCUPANCY_HOM.VN0",
"PerPkg": "1",
@@ -1144,6 +1274,7 @@
},
{
"BriefDescription": "RxQ Occupancy - HOM; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_Q_RxL_OCCUPANCY_HOM.VN1",
"PerPkg": "1",
@@ -1153,6 +1284,7 @@
},
{
"BriefDescription": "RxQ Occupancy - NCB; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_Q_RxL_OCCUPANCY_NCB.VN0",
"PerPkg": "1",
@@ -1162,6 +1294,7 @@
},
{
"BriefDescription": "RxQ Occupancy - NCB; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_Q_RxL_OCCUPANCY_NCB.VN1",
"PerPkg": "1",
@@ -1171,6 +1304,7 @@
},
{
"BriefDescription": "RxQ Occupancy - NCS; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_Q_RxL_OCCUPANCY_NCS.VN0",
"PerPkg": "1",
@@ -1180,6 +1314,7 @@
},
{
"BriefDescription": "RxQ Occupancy - NCS; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_Q_RxL_OCCUPANCY_NCS.VN1",
"PerPkg": "1",
@@ -1189,6 +1324,7 @@
},
{
"BriefDescription": "RxQ Occupancy - NDR; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_Q_RxL_OCCUPANCY_NDR.VN0",
"PerPkg": "1",
@@ -1198,6 +1334,7 @@
},
{
"BriefDescription": "RxQ Occupancy - NDR; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_Q_RxL_OCCUPANCY_NDR.VN1",
"PerPkg": "1",
@@ -1207,6 +1344,7 @@
},
{
"BriefDescription": "RxQ Occupancy - SNP; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_Q_RxL_OCCUPANCY_SNP.VN0",
"PerPkg": "1",
@@ -1216,6 +1354,7 @@
},
{
"BriefDescription": "RxQ Occupancy - SNP; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_Q_RxL_OCCUPANCY_SNP.VN1",
"PerPkg": "1",
@@ -1225,6 +1364,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - HOM",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_Q_RxL_STALLS_VN0.BGF_DRS",
"PerPkg": "1",
@@ -1234,6 +1374,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_Q_RxL_STALLS_VN0.BGF_HOM",
"PerPkg": "1",
@@ -1243,6 +1384,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - SNP",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_Q_RxL_STALLS_VN0.BGF_NCB",
"PerPkg": "1",
@@ -1252,6 +1394,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - NDR",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_Q_RxL_STALLS_VN0.BGF_NCS",
"PerPkg": "1",
@@ -1261,6 +1404,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_Q_RxL_STALLS_VN0.BGF_NDR",
"PerPkg": "1",
@@ -1270,6 +1414,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_Q_RxL_STALLS_VN0.BGF_SNP",
"PerPkg": "1",
@@ -1279,6 +1424,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN0; Egress Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_Q_RxL_STALLS_VN0.EGRESS_CREDITS",
"PerPkg": "1",
@@ -1288,6 +1434,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN0; GV",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_Q_RxL_STALLS_VN0.GV",
"PerPkg": "1",
@@ -1297,6 +1444,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - HOM",
+ "Counter": "0,1,2,3",
"EventCode": "0x3A",
"EventName": "UNC_Q_RxL_STALLS_VN1.BGF_DRS",
"PerPkg": "1",
@@ -1306,6 +1454,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x3A",
"EventName": "UNC_Q_RxL_STALLS_VN1.BGF_HOM",
"PerPkg": "1",
@@ -1315,6 +1464,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - SNP",
+ "Counter": "0,1,2,3",
"EventCode": "0x3A",
"EventName": "UNC_Q_RxL_STALLS_VN1.BGF_NCB",
"PerPkg": "1",
@@ -1324,6 +1474,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - NDR",
+ "Counter": "0,1,2,3",
"EventCode": "0x3A",
"EventName": "UNC_Q_RxL_STALLS_VN1.BGF_NCS",
"PerPkg": "1",
@@ -1333,6 +1484,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x3A",
"EventName": "UNC_Q_RxL_STALLS_VN1.BGF_NDR",
"PerPkg": "1",
@@ -1342,6 +1494,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x3A",
"EventName": "UNC_Q_RxL_STALLS_VN1.BGF_SNP",
"PerPkg": "1",
@@ -1351,6 +1504,7 @@
},
{
"BriefDescription": "Cycles in L0p",
+ "Counter": "0,1,2,3",
"EventCode": "0xD",
"EventName": "UNC_Q_TxL0P_POWER_CYCLES",
"PerPkg": "1",
@@ -1359,6 +1513,7 @@
},
{
"BriefDescription": "Cycles in L0",
+ "Counter": "0,1,2,3",
"EventCode": "0xC",
"EventName": "UNC_Q_TxL0_POWER_CYCLES",
"PerPkg": "1",
@@ -1367,6 +1522,7 @@
},
{
"BriefDescription": "Tx Flit Buffer Bypassed",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_Q_TxL_BYPASSED",
"PerPkg": "1",
@@ -1375,6 +1531,7 @@
},
{
"BriefDescription": "Cycles Stalled with no LLR Credits; LLR is almost full",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_TxL_CRC_NO_CREDITS.ALMOST_FULL",
"PerPkg": "1",
@@ -1384,6 +1541,7 @@
},
{
"BriefDescription": "Cycles Stalled with no LLR Credits; LLR is full",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_TxL_CRC_NO_CREDITS.FULL",
"PerPkg": "1",
@@ -1393,6 +1551,7 @@
},
{
"BriefDescription": "Tx Flit Buffer Cycles not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_Q_TxL_CYCLES_NE",
"PerPkg": "1",
@@ -1401,6 +1560,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 0; Data Tx Flits",
+ "Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G0.DATA",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.; Number of data flits transmitted over QPI. Each flit contains 64b of data. This includes both DRS and NCB data flits (coherent and non-coherent). This can be used to calculate the data bandwidth of the QPI link. One can get a good picture of the QPI-link characteristics by evaluating the protocol flits, data flits, and idle/null flits. This does not include the header flits that go in data packets.",
@@ -1409,6 +1569,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 0; Non-Data protocol Tx Flits",
+ "Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G0.NON_DATA",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.; Number of non-NULL non-data flits transmitted across QPI. This basically tracks the protocol overhead on the QPI link. One can get a good picture of the QPI-link characteristics by evaluating the protocol flits, data flits, and idle/null flits. This includes the header flits for data packets.",
@@ -1417,6 +1578,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 1; DRS Flits (both Header and Data)",
+ "Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G1.DRS",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of flits transmitted over QPI on the DRS (Data Response) channel. DRS flits are used to transmit data with coherency.",
@@ -1425,6 +1587,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 1; DRS Data Flits",
+ "Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G1.DRS_DATA",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of data flits transmitted over QPI on the DRS (Data Response) channel. DRS flits are used to transmit data with coherency. This does not count data flits transmitted over the NCB channel which transmits non-coherent data. This includes only the data flits (not the header).",
@@ -1433,6 +1596,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 1; DRS Header Flits",
+ "Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G1.DRS_NONDATA",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of protocol flits transmitted over QPI on the DRS (Data Response) channel. DRS flits are used to transmit data with coherency. This does not count data flits transmitted over the NCB channel which transmits non-coherent data. This includes only the header flits (not the data). This includes extended headers.",
@@ -1441,6 +1605,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 1; HOM Flits",
+ "Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G1.HOM",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of flits transmitted over QPI on the home channel.",
@@ -1449,6 +1614,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 1; HOM Non-Request Flits",
+ "Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G1.HOM_NONREQ",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of non-request flits transmitted over QPI on the home channel. These are most commonly snoop responses, and this event can be used as a proxy for that.",
@@ -1457,6 +1623,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 1; HOM Request Flits",
+ "Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G1.HOM_REQ",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of data request transmitted over QPI on the home channel. This basically counts the number of remote memory requests transmitted over QPI. In conjunction with the local read count in the Home Agent, one can calculate the number of LLC Misses.",
@@ -1465,6 +1632,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 1; SNP Flits",
+ "Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G1.SNP",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of snoop request flits transmitted over QPI. These requests are contained in the snoop channel. This does not include snoop responses, which are transmitted on the home channel.",
@@ -1473,6 +1641,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 2; Non-Coherent Bypass Tx Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_Q_TxL_FLITS_G2.NCB",
"PerPkg": "1",
@@ -1482,6 +1651,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 2; Non-Coherent data Tx Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_Q_TxL_FLITS_G2.NCB_DATA",
"PerPkg": "1",
@@ -1491,6 +1661,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 2; Non-Coherent non-data Tx Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_Q_TxL_FLITS_G2.NCB_NONDATA",
"PerPkg": "1",
@@ -1500,6 +1671,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 2; Non-Coherent standard Tx Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_Q_TxL_FLITS_G2.NCS",
"PerPkg": "1",
@@ -1509,6 +1681,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 2; Non-Data Response Tx Flits - AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_Q_TxL_FLITS_G2.NDR_AD",
"PerPkg": "1",
@@ -1518,6 +1691,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 2; Non-Data Response Tx Flits - AK",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_Q_TxL_FLITS_G2.NDR_AK",
"PerPkg": "1",
@@ -1527,6 +1701,7 @@
},
{
"BriefDescription": "Tx Flit Buffer Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_Q_TxL_INSERTS",
"PerPkg": "1",
@@ -1535,6 +1710,7 @@
},
{
"BriefDescription": "Tx Flit Buffer Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_Q_TxL_OCCUPANCY",
"PerPkg": "1",
@@ -1543,6 +1719,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - HOM; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_Q_TxR_AD_HOM_CREDIT_ACQUIRED.VN0",
"PerPkg": "1",
@@ -1552,6 +1729,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - HOM; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_Q_TxR_AD_HOM_CREDIT_ACQUIRED.VN1",
"PerPkg": "1",
@@ -1561,6 +1739,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AD HOM; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_Q_TxR_AD_HOM_CREDIT_OCCUPANCY.VN0",
"PerPkg": "1",
@@ -1570,6 +1749,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AD HOM; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_Q_TxR_AD_HOM_CREDIT_OCCUPANCY.VN1",
"PerPkg": "1",
@@ -1579,6 +1759,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AD NDR; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_Q_TxR_AD_NDR_CREDIT_ACQUIRED.VN0",
"PerPkg": "1",
@@ -1588,6 +1769,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AD NDR; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_Q_TxR_AD_NDR_CREDIT_ACQUIRED.VN1",
"PerPkg": "1",
@@ -1597,6 +1779,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AD NDR; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_Q_TxR_AD_NDR_CREDIT_OCCUPANCY.VN0",
"PerPkg": "1",
@@ -1606,6 +1789,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AD NDR; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_Q_TxR_AD_NDR_CREDIT_OCCUPANCY.VN1",
"PerPkg": "1",
@@ -1615,6 +1799,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - SNP; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_Q_TxR_AD_SNP_CREDIT_ACQUIRED.VN0",
"PerPkg": "1",
@@ -1624,6 +1809,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - SNP; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_Q_TxR_AD_SNP_CREDIT_ACQUIRED.VN1",
"PerPkg": "1",
@@ -1633,6 +1819,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AD SNP; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_Q_TxR_AD_SNP_CREDIT_OCCUPANCY.VN0",
"PerPkg": "1",
@@ -1642,6 +1829,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AD SNP; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_Q_TxR_AD_SNP_CREDIT_OCCUPANCY.VN1",
"PerPkg": "1",
@@ -1651,6 +1839,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AK NDR",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_Q_TxR_AK_NDR_CREDIT_ACQUIRED",
"PerPkg": "1",
@@ -1659,6 +1848,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AK NDR",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_Q_TxR_AK_NDR_CREDIT_OCCUPANCY",
"PerPkg": "1",
@@ -1667,6 +1857,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - DRS; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_Q_TxR_BL_DRS_CREDIT_ACQUIRED.VN0",
"PerPkg": "1",
@@ -1676,6 +1867,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - DRS; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_Q_TxR_BL_DRS_CREDIT_ACQUIRED.VN1",
"PerPkg": "1",
@@ -1685,6 +1877,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - DRS; for Shared VN",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_Q_TxR_BL_DRS_CREDIT_ACQUIRED.VN_SHR",
"PerPkg": "1",
@@ -1694,6 +1887,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - BL DRS; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_Q_TxR_BL_DRS_CREDIT_OCCUPANCY.VN0",
"PerPkg": "1",
@@ -1703,6 +1897,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - BL DRS; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_Q_TxR_BL_DRS_CREDIT_OCCUPANCY.VN1",
"PerPkg": "1",
@@ -1712,6 +1907,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - BL DRS; for Shared VN",
+ "Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_Q_TxR_BL_DRS_CREDIT_OCCUPANCY.VN_SHR",
"PerPkg": "1",
@@ -1721,6 +1917,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - NCB; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_Q_TxR_BL_NCB_CREDIT_ACQUIRED.VN0",
"PerPkg": "1",
@@ -1730,6 +1927,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - NCB; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_Q_TxR_BL_NCB_CREDIT_ACQUIRED.VN1",
"PerPkg": "1",
@@ -1739,6 +1937,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - BL NCB; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_Q_TxR_BL_NCB_CREDIT_OCCUPANCY.VN0",
"PerPkg": "1",
@@ -1748,6 +1947,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - BL NCB; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_Q_TxR_BL_NCB_CREDIT_OCCUPANCY.VN1",
"PerPkg": "1",
@@ -1757,6 +1957,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - NCS; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_Q_TxR_BL_NCS_CREDIT_ACQUIRED.VN0",
"PerPkg": "1",
@@ -1766,6 +1967,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - NCS; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_Q_TxR_BL_NCS_CREDIT_ACQUIRED.VN1",
"PerPkg": "1",
@@ -1775,6 +1977,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - BL NCS; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_Q_TxR_BL_NCS_CREDIT_OCCUPANCY.VN0",
"PerPkg": "1",
@@ -1784,6 +1987,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - BL NCS; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_Q_TxR_BL_NCS_CREDIT_OCCUPANCY.VN1",
"PerPkg": "1",
@@ -1793,6 +1997,7 @@
},
{
"BriefDescription": "VNA Credits Returned",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_Q_VNA_CREDIT_RETURNS",
"PerPkg": "1",
@@ -1801,6 +2006,7 @@
},
{
"BriefDescription": "VNA Credits Pending Return - Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_Q_VNA_CREDIT_RETURN_OCCUPANCY",
"PerPkg": "1",
@@ -1809,6 +2015,7 @@
},
{
"BriefDescription": "Number of uclks in domain",
+ "Counter": "0,1,2",
"EventCode": "0x1",
"EventName": "UNC_R3_CLOCKTICKS",
"PerPkg": "1",
@@ -1817,6 +2024,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x1F",
"EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO10",
"PerPkg": "1",
@@ -1826,6 +2034,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x1F",
"EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO11",
"PerPkg": "1",
@@ -1835,6 +2044,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x1F",
"EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO12",
"PerPkg": "1",
@@ -1844,6 +2054,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x1F",
"EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO13",
"PerPkg": "1",
@@ -1853,6 +2064,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x1F",
"EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO14_16",
"PerPkg": "1",
@@ -1862,6 +2074,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x1F",
"EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO8",
"PerPkg": "1",
@@ -1871,6 +2084,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x1F",
"EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO9",
"PerPkg": "1",
@@ -1880,6 +2094,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x1F",
"EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO_15_17",
"PerPkg": "1",
@@ -1889,6 +2104,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO0",
"PerPkg": "1",
@@ -1898,6 +2114,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO1",
"PerPkg": "1",
@@ -1907,6 +2124,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO2",
"PerPkg": "1",
@@ -1916,6 +2134,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO3",
"PerPkg": "1",
@@ -1925,6 +2144,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO4",
"PerPkg": "1",
@@ -1934,6 +2154,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO5",
"PerPkg": "1",
@@ -1943,6 +2164,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO6",
"PerPkg": "1",
@@ -1952,6 +2174,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO7",
"PerPkg": "1",
@@ -1961,6 +2184,7 @@
},
{
"BriefDescription": "HA/R2 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2D",
"EventName": "UNC_R3_HA_R2_BL_CREDITS_EMPTY.HA0",
"PerPkg": "1",
@@ -1970,6 +2194,7 @@
},
{
"BriefDescription": "HA/R2 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2D",
"EventName": "UNC_R3_HA_R2_BL_CREDITS_EMPTY.HA1",
"PerPkg": "1",
@@ -1979,6 +2204,7 @@
},
{
"BriefDescription": "HA/R2 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2D",
"EventName": "UNC_R3_HA_R2_BL_CREDITS_EMPTY.R2_NCB",
"PerPkg": "1",
@@ -1988,6 +2214,7 @@
},
{
"BriefDescription": "HA/R2 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2D",
"EventName": "UNC_R3_HA_R2_BL_CREDITS_EMPTY.R2_NCS",
"PerPkg": "1",
@@ -1997,6 +2224,7 @@
},
{
"BriefDescription": "IOT Backpressure",
+ "Counter": "0,1,2",
"EventCode": "0xB",
"EventName": "UNC_R3_IOT_BACKPRESSURE.HUB",
"PerPkg": "1",
@@ -2005,6 +2233,7 @@
},
{
"BriefDescription": "IOT Backpressure",
+ "Counter": "0,1,2",
"EventCode": "0xB",
"EventName": "UNC_R3_IOT_BACKPRESSURE.SAT",
"PerPkg": "1",
@@ -2013,6 +2242,7 @@
},
{
"BriefDescription": "IOT Common Trigger Sequencer - Hi",
+ "Counter": "0,1,2",
"EventCode": "0xD",
"EventName": "UNC_R3_IOT_CTS_HI.CTS2",
"PerPkg": "1",
@@ -2022,6 +2252,7 @@
},
{
"BriefDescription": "IOT Common Trigger Sequencer - Hi",
+ "Counter": "0,1,2",
"EventCode": "0xD",
"EventName": "UNC_R3_IOT_CTS_HI.CTS3",
"PerPkg": "1",
@@ -2031,6 +2262,7 @@
},
{
"BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "Counter": "0,1,2",
"EventCode": "0xC",
"EventName": "UNC_R3_IOT_CTS_LO.CTS0",
"PerPkg": "1",
@@ -2040,6 +2272,7 @@
},
{
"BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "Counter": "0,1,2",
"EventCode": "0xC",
"EventName": "UNC_R3_IOT_CTS_LO.CTS1",
"PerPkg": "1",
@@ -2049,6 +2282,7 @@
},
{
"BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x20",
"EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN0_HOM",
"PerPkg": "1",
@@ -2058,6 +2292,7 @@
},
{
"BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x20",
"EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN0_NDR",
"PerPkg": "1",
@@ -2067,6 +2302,7 @@
},
{
"BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x20",
"EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN0_SNP",
"PerPkg": "1",
@@ -2076,6 +2312,7 @@
},
{
"BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x20",
"EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN1_HOM",
"PerPkg": "1",
@@ -2085,6 +2322,7 @@
},
{
"BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x20",
"EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN1_NDR",
"PerPkg": "1",
@@ -2094,6 +2332,7 @@
},
{
"BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x20",
"EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN1_SNP",
"PerPkg": "1",
@@ -2103,6 +2342,7 @@
},
{
"BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x20",
"EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VNA",
"PerPkg": "1",
@@ -2112,6 +2352,7 @@
},
{
"BriefDescription": "QPI0 BL Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x21",
"EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VN1_HOM",
"PerPkg": "1",
@@ -2121,6 +2362,7 @@
},
{
"BriefDescription": "QPI0 BL Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x21",
"EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VN1_NDR",
"PerPkg": "1",
@@ -2130,6 +2372,7 @@
},
{
"BriefDescription": "QPI0 BL Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x21",
"EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VN1_SNP",
"PerPkg": "1",
@@ -2139,6 +2382,7 @@
},
{
"BriefDescription": "QPI0 BL Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x21",
"EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VNA",
"PerPkg": "1",
@@ -2148,6 +2392,7 @@
},
{
"BriefDescription": "QPI1 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2E",
"EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VN1_HOM",
"PerPkg": "1",
@@ -2157,6 +2402,7 @@
},
{
"BriefDescription": "QPI1 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2E",
"EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VN1_NDR",
"PerPkg": "1",
@@ -2166,6 +2412,7 @@
},
{
"BriefDescription": "QPI1 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2E",
"EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VN1_SNP",
"PerPkg": "1",
@@ -2175,6 +2422,7 @@
},
{
"BriefDescription": "QPI1 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2E",
"EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VNA",
"PerPkg": "1",
@@ -2184,6 +2432,7 @@
},
{
"BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2F",
"EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN0_HOM",
"PerPkg": "1",
@@ -2193,6 +2442,7 @@
},
{
"BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2F",
"EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN0_NDR",
"PerPkg": "1",
@@ -2202,6 +2452,7 @@
},
{
"BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2F",
"EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN0_SNP",
"PerPkg": "1",
@@ -2211,6 +2462,7 @@
},
{
"BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2F",
"EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN1_HOM",
"PerPkg": "1",
@@ -2220,6 +2472,7 @@
},
{
"BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2F",
"EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN1_NDR",
"PerPkg": "1",
@@ -2229,6 +2482,7 @@
},
{
"BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2F",
"EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN1_SNP",
"PerPkg": "1",
@@ -2238,6 +2492,7 @@
},
{
"BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2F",
"EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VNA",
"PerPkg": "1",
@@ -2247,6 +2502,7 @@
},
{
"BriefDescription": "R3 AD Ring in Use; Counterclockwise",
+ "Counter": "0,1,2",
"EventCode": "0x7",
"EventName": "UNC_R3_RING_AD_USED.CCW",
"PerPkg": "1",
@@ -2256,6 +2512,7 @@
},
{
"BriefDescription": "R3 AD Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2",
"EventCode": "0x7",
"EventName": "UNC_R3_RING_AD_USED.CCW_EVEN",
"PerPkg": "1",
@@ -2265,6 +2522,7 @@
},
{
"BriefDescription": "R3 AD Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2",
"EventCode": "0x7",
"EventName": "UNC_R3_RING_AD_USED.CCW_ODD",
"PerPkg": "1",
@@ -2274,6 +2532,7 @@
},
{
"BriefDescription": "R3 AD Ring in Use; Clockwise",
+ "Counter": "0,1,2",
"EventCode": "0x7",
"EventName": "UNC_R3_RING_AD_USED.CW",
"PerPkg": "1",
@@ -2283,6 +2542,7 @@
},
{
"BriefDescription": "R3 AD Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2",
"EventCode": "0x7",
"EventName": "UNC_R3_RING_AD_USED.CW_EVEN",
"PerPkg": "1",
@@ -2292,6 +2552,7 @@
},
{
"BriefDescription": "R3 AD Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2",
"EventCode": "0x7",
"EventName": "UNC_R3_RING_AD_USED.CW_ODD",
"PerPkg": "1",
@@ -2301,6 +2562,7 @@
},
{
"BriefDescription": "R3 AK Ring in Use; Counterclockwise",
+ "Counter": "0,1,2",
"EventCode": "0x8",
"EventName": "UNC_R3_RING_AK_USED.CCW",
"PerPkg": "1",
@@ -2310,6 +2572,7 @@
},
{
"BriefDescription": "R3 AK Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2",
"EventCode": "0x8",
"EventName": "UNC_R3_RING_AK_USED.CCW_EVEN",
"PerPkg": "1",
@@ -2319,6 +2582,7 @@
},
{
"BriefDescription": "R3 AK Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2",
"EventCode": "0x8",
"EventName": "UNC_R3_RING_AK_USED.CCW_ODD",
"PerPkg": "1",
@@ -2328,6 +2592,7 @@
},
{
"BriefDescription": "R3 AK Ring in Use; Clockwise",
+ "Counter": "0,1,2",
"EventCode": "0x8",
"EventName": "UNC_R3_RING_AK_USED.CW",
"PerPkg": "1",
@@ -2337,6 +2602,7 @@
},
{
"BriefDescription": "R3 AK Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2",
"EventCode": "0x8",
"EventName": "UNC_R3_RING_AK_USED.CW_EVEN",
"PerPkg": "1",
@@ -2346,6 +2612,7 @@
},
{
"BriefDescription": "R3 AK Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2",
"EventCode": "0x8",
"EventName": "UNC_R3_RING_AK_USED.CW_ODD",
"PerPkg": "1",
@@ -2355,6 +2622,7 @@
},
{
"BriefDescription": "R3 BL Ring in Use; Counterclockwise",
+ "Counter": "0,1,2",
"EventCode": "0x9",
"EventName": "UNC_R3_RING_BL_USED.CCW",
"PerPkg": "1",
@@ -2364,6 +2632,7 @@
},
{
"BriefDescription": "R3 BL Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2",
"EventCode": "0x9",
"EventName": "UNC_R3_RING_BL_USED.CCW_EVEN",
"PerPkg": "1",
@@ -2373,6 +2642,7 @@
},
{
"BriefDescription": "R3 BL Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2",
"EventCode": "0x9",
"EventName": "UNC_R3_RING_BL_USED.CCW_ODD",
"PerPkg": "1",
@@ -2382,6 +2652,7 @@
},
{
"BriefDescription": "R3 BL Ring in Use; Clockwise",
+ "Counter": "0,1,2",
"EventCode": "0x9",
"EventName": "UNC_R3_RING_BL_USED.CW",
"PerPkg": "1",
@@ -2391,6 +2662,7 @@
},
{
"BriefDescription": "R3 BL Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2",
"EventCode": "0x9",
"EventName": "UNC_R3_RING_BL_USED.CW_EVEN",
"PerPkg": "1",
@@ -2400,6 +2672,7 @@
},
{
"BriefDescription": "R3 BL Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2",
"EventCode": "0x9",
"EventName": "UNC_R3_RING_BL_USED.CW_ODD",
"PerPkg": "1",
@@ -2409,6 +2682,7 @@
},
{
"BriefDescription": "R3 IV Ring in Use; Any",
+ "Counter": "0,1,2",
"EventCode": "0xA",
"EventName": "UNC_R3_RING_IV_USED.ANY",
"PerPkg": "1",
@@ -2418,6 +2692,7 @@
},
{
"BriefDescription": "R3 IV Ring in Use; Clockwise",
+ "Counter": "0,1,2",
"EventCode": "0xA",
"EventName": "UNC_R3_RING_IV_USED.CW",
"PerPkg": "1",
@@ -2427,6 +2702,7 @@
},
{
"BriefDescription": "Ring Stop Starved; AK",
+ "Counter": "0,1,2",
"EventCode": "0xE",
"EventName": "UNC_R3_RING_SINK_STARVED.AK",
"PerPkg": "1",
@@ -2436,6 +2712,7 @@
},
{
"BriefDescription": "Ingress Cycles Not Empty; HOM",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_R3_RxR_CYCLES_NE.HOM",
"PerPkg": "1",
@@ -2445,6 +2722,7 @@
},
{
"BriefDescription": "Ingress Cycles Not Empty; NDR",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_R3_RxR_CYCLES_NE.NDR",
"PerPkg": "1",
@@ -2454,6 +2732,7 @@
},
{
"BriefDescription": "Ingress Cycles Not Empty; SNP",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_R3_RxR_CYCLES_NE.SNP",
"PerPkg": "1",
@@ -2463,6 +2742,7 @@
},
{
"BriefDescription": "VN1 Ingress Cycles Not Empty; DRS",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_R3_RxR_CYCLES_NE_VN1.DRS",
"PerPkg": "1",
@@ -2472,6 +2752,7 @@
},
{
"BriefDescription": "VN1 Ingress Cycles Not Empty; HOM",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_R3_RxR_CYCLES_NE_VN1.HOM",
"PerPkg": "1",
@@ -2481,6 +2762,7 @@
},
{
"BriefDescription": "VN1 Ingress Cycles Not Empty; NCB",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_R3_RxR_CYCLES_NE_VN1.NCB",
"PerPkg": "1",
@@ -2490,6 +2772,7 @@
},
{
"BriefDescription": "VN1 Ingress Cycles Not Empty; NCS",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_R3_RxR_CYCLES_NE_VN1.NCS",
"PerPkg": "1",
@@ -2499,6 +2782,7 @@
},
{
"BriefDescription": "VN1 Ingress Cycles Not Empty; NDR",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_R3_RxR_CYCLES_NE_VN1.NDR",
"PerPkg": "1",
@@ -2508,6 +2792,7 @@
},
{
"BriefDescription": "VN1 Ingress Cycles Not Empty; SNP",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_R3_RxR_CYCLES_NE_VN1.SNP",
"PerPkg": "1",
@@ -2517,6 +2802,7 @@
},
{
"BriefDescription": "Ingress Allocations; DRS",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_R3_RxR_INSERTS.DRS",
"PerPkg": "1",
@@ -2526,6 +2812,7 @@
},
{
"BriefDescription": "Ingress Allocations; HOM",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_R3_RxR_INSERTS.HOM",
"PerPkg": "1",
@@ -2535,6 +2822,7 @@
},
{
"BriefDescription": "Ingress Allocations; NCB",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_R3_RxR_INSERTS.NCB",
"PerPkg": "1",
@@ -2544,6 +2832,7 @@
},
{
"BriefDescription": "Ingress Allocations; NCS",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_R3_RxR_INSERTS.NCS",
"PerPkg": "1",
@@ -2553,6 +2842,7 @@
},
{
"BriefDescription": "Ingress Allocations; NDR",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_R3_RxR_INSERTS.NDR",
"PerPkg": "1",
@@ -2562,6 +2852,7 @@
},
{
"BriefDescription": "Ingress Allocations; SNP",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_R3_RxR_INSERTS.SNP",
"PerPkg": "1",
@@ -2571,6 +2862,7 @@
},
{
"BriefDescription": "VN1 Ingress Allocations; DRS",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_R3_RxR_INSERTS_VN1.DRS",
"PerPkg": "1",
@@ -2580,6 +2872,7 @@
},
{
"BriefDescription": "VN1 Ingress Allocations; HOM",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_R3_RxR_INSERTS_VN1.HOM",
"PerPkg": "1",
@@ -2589,6 +2882,7 @@
},
{
"BriefDescription": "VN1 Ingress Allocations; NCB",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_R3_RxR_INSERTS_VN1.NCB",
"PerPkg": "1",
@@ -2598,6 +2892,7 @@
},
{
"BriefDescription": "VN1 Ingress Allocations; NCS",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_R3_RxR_INSERTS_VN1.NCS",
"PerPkg": "1",
@@ -2607,6 +2902,7 @@
},
{
"BriefDescription": "VN1 Ingress Allocations; NDR",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_R3_RxR_INSERTS_VN1.NDR",
"PerPkg": "1",
@@ -2616,6 +2912,7 @@
},
{
"BriefDescription": "VN1 Ingress Allocations; SNP",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_R3_RxR_INSERTS_VN1.SNP",
"PerPkg": "1",
@@ -2625,6 +2922,7 @@
},
{
"BriefDescription": "VN1 Ingress Occupancy Accumulator; DRS",
+ "Counter": "0",
"EventCode": "0x13",
"EventName": "UNC_R3_RxR_OCCUPANCY_VN1.DRS",
"PerPkg": "1",
@@ -2634,6 +2932,7 @@
},
{
"BriefDescription": "VN1 Ingress Occupancy Accumulator; HOM",
+ "Counter": "0",
"EventCode": "0x13",
"EventName": "UNC_R3_RxR_OCCUPANCY_VN1.HOM",
"PerPkg": "1",
@@ -2643,6 +2942,7 @@
},
{
"BriefDescription": "VN1 Ingress Occupancy Accumulator; NCB",
+ "Counter": "0",
"EventCode": "0x13",
"EventName": "UNC_R3_RxR_OCCUPANCY_VN1.NCB",
"PerPkg": "1",
@@ -2652,6 +2952,7 @@
},
{
"BriefDescription": "VN1 Ingress Occupancy Accumulator; NCS",
+ "Counter": "0",
"EventCode": "0x13",
"EventName": "UNC_R3_RxR_OCCUPANCY_VN1.NCS",
"PerPkg": "1",
@@ -2661,6 +2962,7 @@
},
{
"BriefDescription": "VN1 Ingress Occupancy Accumulator; NDR",
+ "Counter": "0",
"EventCode": "0x13",
"EventName": "UNC_R3_RxR_OCCUPANCY_VN1.NDR",
"PerPkg": "1",
@@ -2670,6 +2972,7 @@
},
{
"BriefDescription": "VN1 Ingress Occupancy Accumulator; SNP",
+ "Counter": "0",
"EventCode": "0x13",
"EventName": "UNC_R3_RxR_OCCUPANCY_VN1.SNP",
"PerPkg": "1",
@@ -2679,6 +2982,7 @@
},
{
"BriefDescription": "SBo0 Credits Acquired; For AD Ring",
+ "Counter": "0,1",
"EventCode": "0x28",
"EventName": "UNC_R3_SBO0_CREDITS_ACQUIRED.AD",
"PerPkg": "1",
@@ -2688,6 +2992,7 @@
},
{
"BriefDescription": "SBo0 Credits Acquired; For BL Ring",
+ "Counter": "0,1",
"EventCode": "0x28",
"EventName": "UNC_R3_SBO0_CREDITS_ACQUIRED.BL",
"PerPkg": "1",
@@ -2697,6 +3002,7 @@
},
{
"BriefDescription": "SBo0 Credits Occupancy; For AD Ring",
+ "Counter": "0",
"EventCode": "0x2A",
"EventName": "UNC_R3_SBO0_CREDIT_OCCUPANCY.AD",
"PerPkg": "1",
@@ -2706,6 +3012,7 @@
},
{
"BriefDescription": "SBo0 Credits Occupancy; For BL Ring",
+ "Counter": "0",
"EventCode": "0x2A",
"EventName": "UNC_R3_SBO0_CREDIT_OCCUPANCY.BL",
"PerPkg": "1",
@@ -2715,6 +3022,7 @@
},
{
"BriefDescription": "SBo1 Credits Acquired; For AD Ring",
+ "Counter": "0,1",
"EventCode": "0x29",
"EventName": "UNC_R3_SBO1_CREDITS_ACQUIRED.AD",
"PerPkg": "1",
@@ -2724,6 +3032,7 @@
},
{
"BriefDescription": "SBo1 Credits Acquired; For BL Ring",
+ "Counter": "0,1",
"EventCode": "0x29",
"EventName": "UNC_R3_SBO1_CREDITS_ACQUIRED.BL",
"PerPkg": "1",
@@ -2733,6 +3042,7 @@
},
{
"BriefDescription": "SBo1 Credits Occupancy; For AD Ring",
+ "Counter": "0",
"EventCode": "0x2B",
"EventName": "UNC_R3_SBO1_CREDIT_OCCUPANCY.AD",
"PerPkg": "1",
@@ -2742,6 +3052,7 @@
},
{
"BriefDescription": "SBo1 Credits Occupancy; For BL Ring",
+ "Counter": "0",
"EventCode": "0x2B",
"EventName": "UNC_R3_SBO1_CREDIT_OCCUPANCY.BL",
"PerPkg": "1",
@@ -2751,6 +3062,7 @@
},
{
"BriefDescription": "Stall on No Sbo Credits; For SBo0, AD Ring",
+ "Counter": "0,1",
"EventCode": "0x2C",
"EventName": "UNC_R3_STALL_NO_SBO_CREDIT.SBO0_AD",
"PerPkg": "1",
@@ -2760,6 +3072,7 @@
},
{
"BriefDescription": "Stall on No Sbo Credits; For SBo0, BL Ring",
+ "Counter": "0,1",
"EventCode": "0x2C",
"EventName": "UNC_R3_STALL_NO_SBO_CREDIT.SBO0_BL",
"PerPkg": "1",
@@ -2769,6 +3082,7 @@
},
{
"BriefDescription": "Stall on No Sbo Credits; For SBo1, AD Ring",
+ "Counter": "0,1",
"EventCode": "0x2C",
"EventName": "UNC_R3_STALL_NO_SBO_CREDIT.SBO1_AD",
"PerPkg": "1",
@@ -2778,6 +3092,7 @@
},
{
"BriefDescription": "Stall on No Sbo Credits; For SBo1, BL Ring",
+ "Counter": "0,1",
"EventCode": "0x2C",
"EventName": "UNC_R3_STALL_NO_SBO_CREDIT.SBO1_BL",
"PerPkg": "1",
@@ -2787,6 +3102,7 @@
},
{
"BriefDescription": "Egress CCW NACK; AD CCW",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R3_TxR_NACK.DN_AD",
"PerPkg": "1",
@@ -2796,6 +3112,7 @@
},
{
"BriefDescription": "Egress CCW NACK; AK CCW",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R3_TxR_NACK.DN_AK",
"PerPkg": "1",
@@ -2805,6 +3122,7 @@
},
{
"BriefDescription": "Egress CCW NACK; BL CCW",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R3_TxR_NACK.DN_BL",
"PerPkg": "1",
@@ -2814,6 +3132,7 @@
},
{
"BriefDescription": "Egress CCW NACK; AK CCW",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R3_TxR_NACK.UP_AD",
"PerPkg": "1",
@@ -2823,6 +3142,7 @@
},
{
"BriefDescription": "Egress CCW NACK; BL CW",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R3_TxR_NACK.UP_AK",
"PerPkg": "1",
@@ -2832,6 +3152,7 @@
},
{
"BriefDescription": "Egress CCW NACK; BL CCW",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R3_TxR_NACK.UP_BL",
"PerPkg": "1",
@@ -2841,6 +3162,7 @@
},
{
"BriefDescription": "VN0 Credit Acquisition Failed on DRS; DRS Message Class",
+ "Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_R3_VN0_CREDITS_REJECT.DRS",
"PerPkg": "1",
@@ -2850,6 +3172,7 @@
},
{
"BriefDescription": "VN0 Credit Acquisition Failed on DRS; HOM Message Class",
+ "Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_R3_VN0_CREDITS_REJECT.HOM",
"PerPkg": "1",
@@ -2859,6 +3182,7 @@
},
{
"BriefDescription": "VN0 Credit Acquisition Failed on DRS; NCB Message Class",
+ "Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_R3_VN0_CREDITS_REJECT.NCB",
"PerPkg": "1",
@@ -2868,6 +3192,7 @@
},
{
"BriefDescription": "VN0 Credit Acquisition Failed on DRS; NCS Message Class",
+ "Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_R3_VN0_CREDITS_REJECT.NCS",
"PerPkg": "1",
@@ -2877,6 +3202,7 @@
},
{
"BriefDescription": "VN0 Credit Acquisition Failed on DRS; NDR Message Class",
+ "Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_R3_VN0_CREDITS_REJECT.NDR",
"PerPkg": "1",
@@ -2886,6 +3212,7 @@
},
{
"BriefDescription": "VN0 Credit Acquisition Failed on DRS; SNP Message Class",
+ "Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_R3_VN0_CREDITS_REJECT.SNP",
"PerPkg": "1",
@@ -2895,6 +3222,7 @@
},
{
"BriefDescription": "VN0 Credit Used; DRS Message Class",
+ "Counter": "0,1",
"EventCode": "0x36",
"EventName": "UNC_R3_VN0_CREDITS_USED.DRS",
"PerPkg": "1",
@@ -2904,6 +3232,7 @@
},
{
"BriefDescription": "VN0 Credit Used; HOM Message Class",
+ "Counter": "0,1",
"EventCode": "0x36",
"EventName": "UNC_R3_VN0_CREDITS_USED.HOM",
"PerPkg": "1",
@@ -2913,6 +3242,7 @@
},
{
"BriefDescription": "VN0 Credit Used; NCB Message Class",
+ "Counter": "0,1",
"EventCode": "0x36",
"EventName": "UNC_R3_VN0_CREDITS_USED.NCB",
"PerPkg": "1",
@@ -2922,6 +3252,7 @@
},
{
"BriefDescription": "VN0 Credit Used; NCS Message Class",
+ "Counter": "0,1",
"EventCode": "0x36",
"EventName": "UNC_R3_VN0_CREDITS_USED.NCS",
"PerPkg": "1",
@@ -2931,6 +3262,7 @@
},
{
"BriefDescription": "VN0 Credit Used; NDR Message Class",
+ "Counter": "0,1",
"EventCode": "0x36",
"EventName": "UNC_R3_VN0_CREDITS_USED.NDR",
"PerPkg": "1",
@@ -2940,6 +3272,7 @@
},
{
"BriefDescription": "VN0 Credit Used; SNP Message Class",
+ "Counter": "0,1",
"EventCode": "0x36",
"EventName": "UNC_R3_VN0_CREDITS_USED.SNP",
"PerPkg": "1",
@@ -2949,6 +3282,7 @@
},
{
"BriefDescription": "VN1 Credit Acquisition Failed on DRS; DRS Message Class",
+ "Counter": "0,1",
"EventCode": "0x39",
"EventName": "UNC_R3_VN1_CREDITS_REJECT.DRS",
"PerPkg": "1",
@@ -2958,6 +3292,7 @@
},
{
"BriefDescription": "VN1 Credit Acquisition Failed on DRS; HOM Message Class",
+ "Counter": "0,1",
"EventCode": "0x39",
"EventName": "UNC_R3_VN1_CREDITS_REJECT.HOM",
"PerPkg": "1",
@@ -2967,6 +3302,7 @@
},
{
"BriefDescription": "VN1 Credit Acquisition Failed on DRS; NCB Message Class",
+ "Counter": "0,1",
"EventCode": "0x39",
"EventName": "UNC_R3_VN1_CREDITS_REJECT.NCB",
"PerPkg": "1",
@@ -2976,6 +3312,7 @@
},
{
"BriefDescription": "VN1 Credit Acquisition Failed on DRS; NCS Message Class",
+ "Counter": "0,1",
"EventCode": "0x39",
"EventName": "UNC_R3_VN1_CREDITS_REJECT.NCS",
"PerPkg": "1",
@@ -2985,6 +3322,7 @@
},
{
"BriefDescription": "VN1 Credit Acquisition Failed on DRS; NDR Message Class",
+ "Counter": "0,1",
"EventCode": "0x39",
"EventName": "UNC_R3_VN1_CREDITS_REJECT.NDR",
"PerPkg": "1",
@@ -2994,6 +3332,7 @@
},
{
"BriefDescription": "VN1 Credit Acquisition Failed on DRS; SNP Message Class",
+ "Counter": "0,1",
"EventCode": "0x39",
"EventName": "UNC_R3_VN1_CREDITS_REJECT.SNP",
"PerPkg": "1",
@@ -3003,6 +3342,7 @@
},
{
"BriefDescription": "VN1 Credit Used; DRS Message Class",
+ "Counter": "0,1",
"EventCode": "0x38",
"EventName": "UNC_R3_VN1_CREDITS_USED.DRS",
"PerPkg": "1",
@@ -3012,6 +3352,7 @@
},
{
"BriefDescription": "VN1 Credit Used; HOM Message Class",
+ "Counter": "0,1",
"EventCode": "0x38",
"EventName": "UNC_R3_VN1_CREDITS_USED.HOM",
"PerPkg": "1",
@@ -3021,6 +3362,7 @@
},
{
"BriefDescription": "VN1 Credit Used; NCB Message Class",
+ "Counter": "0,1",
"EventCode": "0x38",
"EventName": "UNC_R3_VN1_CREDITS_USED.NCB",
"PerPkg": "1",
@@ -3030,6 +3372,7 @@
},
{
"BriefDescription": "VN1 Credit Used; NCS Message Class",
+ "Counter": "0,1",
"EventCode": "0x38",
"EventName": "UNC_R3_VN1_CREDITS_USED.NCS",
"PerPkg": "1",
@@ -3039,6 +3382,7 @@
},
{
"BriefDescription": "VN1 Credit Used; NDR Message Class",
+ "Counter": "0,1",
"EventCode": "0x38",
"EventName": "UNC_R3_VN1_CREDITS_USED.NDR",
"PerPkg": "1",
@@ -3048,6 +3392,7 @@
},
{
"BriefDescription": "VN1 Credit Used; SNP Message Class",
+ "Counter": "0,1",
"EventCode": "0x38",
"EventName": "UNC_R3_VN1_CREDITS_USED.SNP",
"PerPkg": "1",
@@ -3057,6 +3402,7 @@
},
{
"BriefDescription": "VNA credit Acquisitions; HOM Message Class",
+ "Counter": "0,1",
"EventCode": "0x33",
"EventName": "UNC_R3_VNA_CREDITS_ACQUIRED.AD",
"PerPkg": "1",
@@ -3066,6 +3412,7 @@
},
{
"BriefDescription": "VNA credit Acquisitions; HOM Message Class",
+ "Counter": "0,1",
"EventCode": "0x33",
"EventName": "UNC_R3_VNA_CREDITS_ACQUIRED.BL",
"PerPkg": "1",
@@ -3075,6 +3422,7 @@
},
{
"BriefDescription": "VNA Credit Reject; DRS Message Class",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_R3_VNA_CREDITS_REJECT.DRS",
"PerPkg": "1",
@@ -3084,6 +3432,7 @@
},
{
"BriefDescription": "VNA Credit Reject; HOM Message Class",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_R3_VNA_CREDITS_REJECT.HOM",
"PerPkg": "1",
@@ -3093,6 +3442,7 @@
},
{
"BriefDescription": "VNA Credit Reject; NCB Message Class",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_R3_VNA_CREDITS_REJECT.NCB",
"PerPkg": "1",
@@ -3102,6 +3452,7 @@
},
{
"BriefDescription": "VNA Credit Reject; NCS Message Class",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_R3_VNA_CREDITS_REJECT.NCS",
"PerPkg": "1",
@@ -3111,6 +3462,7 @@
},
{
"BriefDescription": "VNA Credit Reject; NDR Message Class",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_R3_VNA_CREDITS_REJECT.NDR",
"PerPkg": "1",
@@ -3120,6 +3472,7 @@
},
{
"BriefDescription": "VNA Credit Reject; SNP Message Class",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_R3_VNA_CREDITS_REJECT.SNP",
"PerPkg": "1",
@@ -3129,6 +3482,7 @@
},
{
"BriefDescription": "Bounce Control",
+ "Counter": "0,1,2,3",
"EventCode": "0xA",
"EventName": "UNC_S_BOUNCE_CONTROL",
"PerPkg": "1",
@@ -3136,12 +3490,14 @@
},
{
"BriefDescription": "Uncore Clocks",
+ "Counter": "0,1,2,3",
"EventName": "UNC_S_CLOCKTICKS",
"PerPkg": "1",
"Unit": "SBOX"
},
{
"BriefDescription": "FaST wire asserted",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_S_FAST_ASSERTED",
"PerPkg": "1",
@@ -3150,6 +3506,7 @@
},
{
"BriefDescription": "AD Ring In Use; Down",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_S_RING_AD_USED.DOWN",
"PerPkg": "1",
@@ -3159,6 +3516,7 @@
},
{
"BriefDescription": "AD Ring In Use; Down and Event",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_S_RING_AD_USED.DOWN_EVEN",
"PerPkg": "1",
@@ -3168,6 +3526,7 @@
},
{
"BriefDescription": "AD Ring In Use; Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_S_RING_AD_USED.DOWN_ODD",
"PerPkg": "1",
@@ -3177,6 +3536,7 @@
},
{
"BriefDescription": "AD Ring In Use; Up",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_S_RING_AD_USED.UP",
"PerPkg": "1",
@@ -3186,6 +3546,7 @@
},
{
"BriefDescription": "AD Ring In Use; Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_S_RING_AD_USED.UP_EVEN",
"PerPkg": "1",
@@ -3195,6 +3556,7 @@
},
{
"BriefDescription": "AD Ring In Use; Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_S_RING_AD_USED.UP_ODD",
"PerPkg": "1",
@@ -3204,6 +3566,7 @@
},
{
"BriefDescription": "AK Ring In Use; Down",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_S_RING_AK_USED.DOWN",
"PerPkg": "1",
@@ -3213,6 +3576,7 @@
},
{
"BriefDescription": "AK Ring In Use; Down and Event",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_S_RING_AK_USED.DOWN_EVEN",
"PerPkg": "1",
@@ -3222,6 +3586,7 @@
},
{
"BriefDescription": "AK Ring In Use; Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_S_RING_AK_USED.DOWN_ODD",
"PerPkg": "1",
@@ -3231,6 +3596,7 @@
},
{
"BriefDescription": "AK Ring In Use; Up",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_S_RING_AK_USED.UP",
"PerPkg": "1",
@@ -3240,6 +3606,7 @@
},
{
"BriefDescription": "AK Ring In Use; Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_S_RING_AK_USED.UP_EVEN",
"PerPkg": "1",
@@ -3249,6 +3616,7 @@
},
{
"BriefDescription": "AK Ring In Use; Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_S_RING_AK_USED.UP_ODD",
"PerPkg": "1",
@@ -3258,6 +3626,7 @@
},
{
"BriefDescription": "BL Ring in Use; Down",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_S_RING_BL_USED.DOWN",
"PerPkg": "1",
@@ -3267,6 +3636,7 @@
},
{
"BriefDescription": "BL Ring in Use; Down and Event",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_S_RING_BL_USED.DOWN_EVEN",
"PerPkg": "1",
@@ -3276,6 +3646,7 @@
},
{
"BriefDescription": "BL Ring in Use; Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_S_RING_BL_USED.DOWN_ODD",
"PerPkg": "1",
@@ -3285,6 +3656,7 @@
},
{
"BriefDescription": "BL Ring in Use; Up",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_S_RING_BL_USED.UP",
"PerPkg": "1",
@@ -3294,6 +3666,7 @@
},
{
"BriefDescription": "BL Ring in Use; Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_S_RING_BL_USED.UP_EVEN",
"PerPkg": "1",
@@ -3303,6 +3676,7 @@
},
{
"BriefDescription": "BL Ring in Use; Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_S_RING_BL_USED.UP_ODD",
"PerPkg": "1",
@@ -3312,6 +3686,7 @@
},
{
"BriefDescription": "Number of LLC responses that bounced on the Ring.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_S_RING_BOUNCES.AD_CACHE",
"PerPkg": "1",
@@ -3320,6 +3695,7 @@
},
{
"BriefDescription": "Number of LLC responses that bounced on the Ring.; Acknowledgements to core",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_S_RING_BOUNCES.AK_CORE",
"PerPkg": "1",
@@ -3328,6 +3704,7 @@
},
{
"BriefDescription": "Number of LLC responses that bounced on the Ring.; Data Responses to core",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_S_RING_BOUNCES.BL_CORE",
"PerPkg": "1",
@@ -3336,6 +3713,7 @@
},
{
"BriefDescription": "Number of LLC responses that bounced on the Ring.; Snoops of processor's cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_S_RING_BOUNCES.IV_CORE",
"PerPkg": "1",
@@ -3344,6 +3722,7 @@
},
{
"BriefDescription": "BL Ring in Use; Any",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_S_RING_IV_USED.DN",
"PerPkg": "1",
@@ -3353,6 +3732,7 @@
},
{
"BriefDescription": "BL Ring in Use; Any",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_S_RING_IV_USED.UP",
"PerPkg": "1",
@@ -3362,6 +3742,7 @@
},
{
"BriefDescription": "UNC_S_RING_SINK_STARVED.AD_CACHE",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_S_RING_SINK_STARVED.AD_CACHE",
"PerPkg": "1",
@@ -3370,6 +3751,7 @@
},
{
"BriefDescription": "UNC_S_RING_SINK_STARVED.AK_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_S_RING_SINK_STARVED.AK_CORE",
"PerPkg": "1",
@@ -3378,6 +3760,7 @@
},
{
"BriefDescription": "UNC_S_RING_SINK_STARVED.BL_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_S_RING_SINK_STARVED.BL_CORE",
"PerPkg": "1",
@@ -3386,6 +3769,7 @@
},
{
"BriefDescription": "UNC_S_RING_SINK_STARVED.IV_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_S_RING_SINK_STARVED.IV_CORE",
"PerPkg": "1",
@@ -3394,6 +3778,7 @@
},
{
"BriefDescription": "Injection Starvation; AD - Bounces",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_S_RxR_BUSY_STARVED.AD_BNC",
"PerPkg": "1",
@@ -3403,6 +3788,7 @@
},
{
"BriefDescription": "Injection Starvation; AD - Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_S_RxR_BUSY_STARVED.AD_CRD",
"PerPkg": "1",
@@ -3412,6 +3798,7 @@
},
{
"BriefDescription": "Injection Starvation; BL - Bounces",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_S_RxR_BUSY_STARVED.BL_BNC",
"PerPkg": "1",
@@ -3421,6 +3808,7 @@
},
{
"BriefDescription": "Injection Starvation; BL - Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_S_RxR_BUSY_STARVED.BL_CRD",
"PerPkg": "1",
@@ -3430,6 +3818,7 @@
},
{
"BriefDescription": "Bypass; AD - Bounces",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_S_RxR_BYPASS.AD_BNC",
"PerPkg": "1",
@@ -3439,6 +3828,7 @@
},
{
"BriefDescription": "Bypass; AD - Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_S_RxR_BYPASS.AD_CRD",
"PerPkg": "1",
@@ -3448,6 +3838,7 @@
},
{
"BriefDescription": "Bypass; AK",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_S_RxR_BYPASS.AK",
"PerPkg": "1",
@@ -3457,6 +3848,7 @@
},
{
"BriefDescription": "Bypass; BL - Bounces",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_S_RxR_BYPASS.BL_BNC",
"PerPkg": "1",
@@ -3466,6 +3858,7 @@
},
{
"BriefDescription": "Bypass; BL - Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_S_RxR_BYPASS.BL_CRD",
"PerPkg": "1",
@@ -3475,6 +3868,7 @@
},
{
"BriefDescription": "Bypass; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_S_RxR_BYPASS.IV",
"PerPkg": "1",
@@ -3484,6 +3878,7 @@
},
{
"BriefDescription": "Injection Starvation; AD - Bounces",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_S_RxR_CRD_STARVED.AD_BNC",
"PerPkg": "1",
@@ -3493,6 +3888,7 @@
},
{
"BriefDescription": "Injection Starvation; AD - Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_S_RxR_CRD_STARVED.AD_CRD",
"PerPkg": "1",
@@ -3502,6 +3898,7 @@
},
{
"BriefDescription": "Injection Starvation; AK",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_S_RxR_CRD_STARVED.AK",
"PerPkg": "1",
@@ -3511,6 +3908,7 @@
},
{
"BriefDescription": "Injection Starvation; BL - Bounces",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_S_RxR_CRD_STARVED.BL_BNC",
"PerPkg": "1",
@@ -3520,6 +3918,7 @@
},
{
"BriefDescription": "Injection Starvation; BL - Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_S_RxR_CRD_STARVED.BL_CRD",
"PerPkg": "1",
@@ -3529,6 +3928,7 @@
},
{
"BriefDescription": "Injection Starvation; IVF Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_S_RxR_CRD_STARVED.IFV",
"PerPkg": "1",
@@ -3538,6 +3938,7 @@
},
{
"BriefDescription": "Injection Starvation; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_S_RxR_CRD_STARVED.IV",
"PerPkg": "1",
@@ -3547,6 +3948,7 @@
},
{
"BriefDescription": "Ingress Allocations; AD - Bounces",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_S_RxR_INSERTS.AD_BNC",
"PerPkg": "1",
@@ -3556,6 +3958,7 @@
},
{
"BriefDescription": "Ingress Allocations; AD - Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_S_RxR_INSERTS.AD_CRD",
"PerPkg": "1",
@@ -3565,6 +3968,7 @@
},
{
"BriefDescription": "Ingress Allocations; AK",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_S_RxR_INSERTS.AK",
"PerPkg": "1",
@@ -3574,6 +3978,7 @@
},
{
"BriefDescription": "Ingress Allocations; BL - Bounces",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_S_RxR_INSERTS.BL_BNC",
"PerPkg": "1",
@@ -3583,6 +3988,7 @@
},
{
"BriefDescription": "Ingress Allocations; BL - Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_S_RxR_INSERTS.BL_CRD",
"PerPkg": "1",
@@ -3592,6 +3998,7 @@
},
{
"BriefDescription": "Ingress Allocations; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_S_RxR_INSERTS.IV",
"PerPkg": "1",
@@ -3601,6 +4008,7 @@
},
{
"BriefDescription": "Ingress Occupancy; AD - Bounces",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_S_RxR_OCCUPANCY.AD_BNC",
"PerPkg": "1",
@@ -3610,6 +4018,7 @@
},
{
"BriefDescription": "Ingress Occupancy; AD - Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_S_RxR_OCCUPANCY.AD_CRD",
"PerPkg": "1",
@@ -3619,6 +4028,7 @@
},
{
"BriefDescription": "Ingress Occupancy; AK",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_S_RxR_OCCUPANCY.AK",
"PerPkg": "1",
@@ -3628,6 +4038,7 @@
},
{
"BriefDescription": "Ingress Occupancy; BL - Bounces",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_S_RxR_OCCUPANCY.BL_BNC",
"PerPkg": "1",
@@ -3637,6 +4048,7 @@
},
{
"BriefDescription": "Ingress Occupancy; BL - Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_S_RxR_OCCUPANCY.BL_CRD",
"PerPkg": "1",
@@ -3646,6 +4058,7 @@
},
{
"BriefDescription": "Ingress Occupancy; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_S_RxR_OCCUPANCY.IV",
"PerPkg": "1",
@@ -3655,6 +4068,7 @@
},
{
"BriefDescription": "UNC_S_TxR_ADS_USED.AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_S_TxR_ADS_USED.AD",
"PerPkg": "1",
@@ -3663,6 +4077,7 @@
},
{
"BriefDescription": "UNC_S_TxR_ADS_USED.AK",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_S_TxR_ADS_USED.AK",
"PerPkg": "1",
@@ -3671,6 +4086,7 @@
},
{
"BriefDescription": "UNC_S_TxR_ADS_USED.BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_S_TxR_ADS_USED.BL",
"PerPkg": "1",
@@ -3679,6 +4095,7 @@
},
{
"BriefDescription": "Egress Allocations; AD - Bounces",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_S_TxR_INSERTS.AD_BNC",
"PerPkg": "1",
@@ -3688,6 +4105,7 @@
},
{
"BriefDescription": "Egress Allocations; AD - Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_S_TxR_INSERTS.AD_CRD",
"PerPkg": "1",
@@ -3697,6 +4115,7 @@
},
{
"BriefDescription": "Egress Allocations; AK",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_S_TxR_INSERTS.AK",
"PerPkg": "1",
@@ -3706,6 +4125,7 @@
},
{
"BriefDescription": "Egress Allocations; BL - Bounces",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_S_TxR_INSERTS.BL_BNC",
"PerPkg": "1",
@@ -3715,6 +4135,7 @@
},
{
"BriefDescription": "Egress Allocations; BL - Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_S_TxR_INSERTS.BL_CRD",
"PerPkg": "1",
@@ -3724,6 +4145,7 @@
},
{
"BriefDescription": "Egress Allocations; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_S_TxR_INSERTS.IV",
"PerPkg": "1",
@@ -3733,6 +4155,7 @@
},
{
"BriefDescription": "Egress Occupancy; AD - Bounces",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_S_TxR_OCCUPANCY.AD_BNC",
"PerPkg": "1",
@@ -3742,6 +4165,7 @@
},
{
"BriefDescription": "Egress Occupancy; AD - Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_S_TxR_OCCUPANCY.AD_CRD",
"PerPkg": "1",
@@ -3751,6 +4175,7 @@
},
{
"BriefDescription": "Egress Occupancy; AK",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_S_TxR_OCCUPANCY.AK",
"PerPkg": "1",
@@ -3760,6 +4185,7 @@
},
{
"BriefDescription": "Egress Occupancy; BL - Bounces",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_S_TxR_OCCUPANCY.BL_BNC",
"PerPkg": "1",
@@ -3769,6 +4195,7 @@
},
{
"BriefDescription": "Egress Occupancy; BL - Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_S_TxR_OCCUPANCY.BL_CRD",
"PerPkg": "1",
@@ -3778,6 +4205,7 @@
},
{
"BriefDescription": "Egress Occupancy; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_S_TxR_OCCUPANCY.IV",
"PerPkg": "1",
@@ -3787,6 +4215,7 @@
},
{
"BriefDescription": "Injection Starvation; Onto AD Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_S_TxR_STARVED.AD",
"PerPkg": "1",
@@ -3796,6 +4225,7 @@
},
{
"BriefDescription": "Injection Starvation; Onto AK Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_S_TxR_STARVED.AK",
"PerPkg": "1",
@@ -3805,6 +4235,7 @@
},
{
"BriefDescription": "Injection Starvation; Onto BL Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_S_TxR_STARVED.BL",
"PerPkg": "1",
@@ -3814,6 +4245,7 @@
},
{
"BriefDescription": "Injection Starvation; Onto IV Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_S_TxR_STARVED.IV",
"PerPkg": "1",
@@ -3823,12 +4255,14 @@
},
{
"BriefDescription": "UNC_U_CLOCKTICKS",
+ "Counter": "0,1",
"EventName": "UNC_U_CLOCKTICKS",
"PerPkg": "1",
"Unit": "UBOX"
},
{
"BriefDescription": "VLW Received",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.DOORBELL_RCVD",
"PerPkg": "1",
@@ -3838,6 +4272,7 @@
},
{
"BriefDescription": "Filter Match",
+ "Counter": "0,1",
"EventCode": "0x41",
"EventName": "UNC_U_FILTER_MATCH.DISABLE",
"PerPkg": "1",
@@ -3847,6 +4282,7 @@
},
{
"BriefDescription": "Filter Match",
+ "Counter": "0,1",
"EventCode": "0x41",
"EventName": "UNC_U_FILTER_MATCH.ENABLE",
"PerPkg": "1",
@@ -3856,6 +4292,7 @@
},
{
"BriefDescription": "Filter Match",
+ "Counter": "0,1",
"EventCode": "0x41",
"EventName": "UNC_U_FILTER_MATCH.U2C_DISABLE",
"PerPkg": "1",
@@ -3865,6 +4302,7 @@
},
{
"BriefDescription": "Filter Match",
+ "Counter": "0,1",
"EventCode": "0x41",
"EventName": "UNC_U_FILTER_MATCH.U2C_ENABLE",
"PerPkg": "1",
@@ -3874,6 +4312,7 @@
},
{
"BriefDescription": "Cycles PHOLD Assert to Ack; Assert to ACK",
+ "Counter": "0,1",
"EventCode": "0x45",
"EventName": "UNC_U_PHOLD_CYCLES.ASSERT_TO_ACK",
"PerPkg": "1",
@@ -3883,6 +4322,7 @@
},
{
"BriefDescription": "RACU Request",
+ "Counter": "0,1",
"EventCode": "0x46",
"EventName": "UNC_U_RACU_REQUESTS",
"PerPkg": "1",
@@ -3891,6 +4331,7 @@
},
{
"BriefDescription": "Monitor Sent to T0; Correctable Machine Check",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.CMC",
"PerPkg": "1",
@@ -3900,6 +4341,7 @@
},
{
"BriefDescription": "Monitor Sent to T0; Livelock",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.LIVELOCK",
"PerPkg": "1",
@@ -3909,6 +4351,7 @@
},
{
"BriefDescription": "Monitor Sent to T0; LTError",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.LTERROR",
"PerPkg": "1",
@@ -3918,6 +4361,7 @@
},
{
"BriefDescription": "Monitor Sent to T0; Monitor T0",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.MONITOR_T0",
"PerPkg": "1",
@@ -3927,6 +4371,7 @@
},
{
"BriefDescription": "Monitor Sent to T0; Monitor T1",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.MONITOR_T1",
"PerPkg": "1",
@@ -3936,6 +4381,7 @@
},
{
"BriefDescription": "Monitor Sent to T0; Other",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.OTHER",
"PerPkg": "1",
@@ -3945,6 +4391,7 @@
},
{
"BriefDescription": "Monitor Sent to T0; Trap",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.TRAP",
"PerPkg": "1",
@@ -3954,6 +4401,7 @@
},
{
"BriefDescription": "Monitor Sent to T0; Uncorrectable Machine Check",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.UMC",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/uncore-io.json b/tools/perf/pmu-events/arch/x86/haswellx/uncore-io.json
index bd64a8a1625f..84d1d601ea95 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/uncore-io.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/uncore-io.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Number of uclks in domain",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_R2_CLOCKTICKS",
"PerPkg": "1",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "UNC_R2_IIO_CREDIT.ISOCH_QPI0",
+ "Counter": "0,1",
"EventCode": "0x2D",
"EventName": "UNC_R2_IIO_CREDIT.ISOCH_QPI0",
"PerPkg": "1",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "UNC_R2_IIO_CREDIT.ISOCH_QPI1",
+ "Counter": "0,1",
"EventCode": "0x2D",
"EventName": "UNC_R2_IIO_CREDIT.ISOCH_QPI1",
"PerPkg": "1",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "UNC_R2_IIO_CREDIT.PRQ_QPI0",
+ "Counter": "0,1",
"EventCode": "0x2D",
"EventName": "UNC_R2_IIO_CREDIT.PRQ_QPI0",
"PerPkg": "1",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "UNC_R2_IIO_CREDIT.PRQ_QPI1",
+ "Counter": "0,1",
"EventCode": "0x2D",
"EventName": "UNC_R2_IIO_CREDIT.PRQ_QPI1",
"PerPkg": "1",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "R2PCIe IIO Credit Acquired; DRS",
+ "Counter": "0,1",
"EventCode": "0x33",
"EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.DRS",
"PerPkg": "1",
@@ -50,6 +56,7 @@
},
{
"BriefDescription": "R2PCIe IIO Credit Acquired; NCB",
+ "Counter": "0,1",
"EventCode": "0x33",
"EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.NCB",
"PerPkg": "1",
@@ -59,6 +66,7 @@
},
{
"BriefDescription": "R2PCIe IIO Credit Acquired; NCS",
+ "Counter": "0,1",
"EventCode": "0x33",
"EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.NCS",
"PerPkg": "1",
@@ -68,6 +76,7 @@
},
{
"BriefDescription": "R2PCIe IIO Credits in Use; DRS",
+ "Counter": "0,1",
"EventCode": "0x32",
"EventName": "UNC_R2_IIO_CREDITS_USED.DRS",
"PerPkg": "1",
@@ -77,6 +86,7 @@
},
{
"BriefDescription": "R2PCIe IIO Credits in Use; NCB",
+ "Counter": "0,1",
"EventCode": "0x32",
"EventName": "UNC_R2_IIO_CREDITS_USED.NCB",
"PerPkg": "1",
@@ -86,6 +96,7 @@
},
{
"BriefDescription": "R2PCIe IIO Credits in Use; NCS",
+ "Counter": "0,1",
"EventCode": "0x32",
"EventName": "UNC_R2_IIO_CREDITS_USED.NCS",
"PerPkg": "1",
@@ -95,6 +106,7 @@
},
{
"BriefDescription": "R2 AD Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_R2_RING_AD_USED.CCW",
"PerPkg": "1",
@@ -104,6 +116,7 @@
},
{
"BriefDescription": "R2 AD Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_R2_RING_AD_USED.CCW_EVEN",
"PerPkg": "1",
@@ -113,6 +126,7 @@
},
{
"BriefDescription": "R2 AD Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_R2_RING_AD_USED.CCW_ODD",
"PerPkg": "1",
@@ -122,6 +136,7 @@
},
{
"BriefDescription": "R2 AD Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_R2_RING_AD_USED.CW",
"PerPkg": "1",
@@ -131,6 +146,7 @@
},
{
"BriefDescription": "R2 AD Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_R2_RING_AD_USED.CW_EVEN",
"PerPkg": "1",
@@ -140,6 +156,7 @@
},
{
"BriefDescription": "R2 AD Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_R2_RING_AD_USED.CW_ODD",
"PerPkg": "1",
@@ -149,6 +166,7 @@
},
{
"BriefDescription": "AK Ingress Bounced; Dn",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_R2_RING_AK_BOUNCES.DN",
"PerPkg": "1",
@@ -158,6 +176,7 @@
},
{
"BriefDescription": "AK Ingress Bounced; Up",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_R2_RING_AK_BOUNCES.UP",
"PerPkg": "1",
@@ -167,6 +186,7 @@
},
{
"BriefDescription": "R2 AK Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_R2_RING_AK_USED.CCW",
"PerPkg": "1",
@@ -176,6 +196,7 @@
},
{
"BriefDescription": "R2 AK Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_R2_RING_AK_USED.CCW_EVEN",
"PerPkg": "1",
@@ -185,6 +206,7 @@
},
{
"BriefDescription": "R2 AK Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_R2_RING_AK_USED.CCW_ODD",
"PerPkg": "1",
@@ -194,6 +216,7 @@
},
{
"BriefDescription": "R2 AK Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_R2_RING_AK_USED.CW",
"PerPkg": "1",
@@ -203,6 +226,7 @@
},
{
"BriefDescription": "R2 AK Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_R2_RING_AK_USED.CW_EVEN",
"PerPkg": "1",
@@ -212,6 +236,7 @@
},
{
"BriefDescription": "R2 AK Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_R2_RING_AK_USED.CW_ODD",
"PerPkg": "1",
@@ -221,6 +246,7 @@
},
{
"BriefDescription": "R2 BL Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_R2_RING_BL_USED.CCW",
"PerPkg": "1",
@@ -230,6 +256,7 @@
},
{
"BriefDescription": "R2 BL Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_R2_RING_BL_USED.CCW_EVEN",
"PerPkg": "1",
@@ -239,6 +266,7 @@
},
{
"BriefDescription": "R2 BL Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_R2_RING_BL_USED.CCW_ODD",
"PerPkg": "1",
@@ -248,6 +276,7 @@
},
{
"BriefDescription": "R2 BL Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_R2_RING_BL_USED.CW",
"PerPkg": "1",
@@ -257,6 +286,7 @@
},
{
"BriefDescription": "R2 BL Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_R2_RING_BL_USED.CW_EVEN",
"PerPkg": "1",
@@ -266,6 +296,7 @@
},
{
"BriefDescription": "R2 BL Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_R2_RING_BL_USED.CW_ODD",
"PerPkg": "1",
@@ -275,6 +306,7 @@
},
{
"BriefDescription": "R2 IV Ring in Use; Any",
+ "Counter": "0,1,2,3",
"EventCode": "0xA",
"EventName": "UNC_R2_RING_IV_USED.ANY",
"PerPkg": "1",
@@ -284,6 +316,7 @@
},
{
"BriefDescription": "R2 IV Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0xA",
"EventName": "UNC_R2_RING_IV_USED.CCW",
"PerPkg": "1",
@@ -293,6 +326,7 @@
},
{
"BriefDescription": "R2 IV Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0xA",
"EventName": "UNC_R2_RING_IV_USED.CW",
"PerPkg": "1",
@@ -302,6 +336,7 @@
},
{
"BriefDescription": "Ingress Cycles Not Empty; NCB",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_R2_RxR_CYCLES_NE.NCB",
"PerPkg": "1",
@@ -311,6 +346,7 @@
},
{
"BriefDescription": "Ingress Cycles Not Empty; NCS",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_R2_RxR_CYCLES_NE.NCS",
"PerPkg": "1",
@@ -320,6 +356,7 @@
},
{
"BriefDescription": "Ingress Allocations; NCB",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_R2_RxR_INSERTS.NCB",
"PerPkg": "1",
@@ -329,6 +366,7 @@
},
{
"BriefDescription": "Ingress Allocations; NCS",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_R2_RxR_INSERTS.NCS",
"PerPkg": "1",
@@ -338,6 +376,7 @@
},
{
"BriefDescription": "Ingress Occupancy Accumulator; DRS",
+ "Counter": "0",
"EventCode": "0x13",
"EventName": "UNC_R2_RxR_OCCUPANCY.DRS",
"PerPkg": "1",
@@ -347,6 +386,7 @@
},
{
"BriefDescription": "SBo0 Credits Acquired; For AD Ring",
+ "Counter": "0,1",
"EventCode": "0x28",
"EventName": "UNC_R2_SBO0_CREDITS_ACQUIRED.AD",
"PerPkg": "1",
@@ -356,6 +396,7 @@
},
{
"BriefDescription": "SBo0 Credits Acquired; For BL Ring",
+ "Counter": "0,1",
"EventCode": "0x28",
"EventName": "UNC_R2_SBO0_CREDITS_ACQUIRED.BL",
"PerPkg": "1",
@@ -365,6 +406,7 @@
},
{
"BriefDescription": "SBo0 Credits Occupancy; For AD Ring",
+ "Counter": "0",
"EventCode": "0x2A",
"EventName": "UNC_R2_SBO0_CREDIT_OCCUPANCY.AD",
"PerPkg": "1",
@@ -374,6 +416,7 @@
},
{
"BriefDescription": "SBo0 Credits Occupancy; For BL Ring",
+ "Counter": "0",
"EventCode": "0x2A",
"EventName": "UNC_R2_SBO0_CREDIT_OCCUPANCY.BL",
"PerPkg": "1",
@@ -383,6 +426,7 @@
},
{
"BriefDescription": "Stall on No Sbo Credits; For SBo0, AD Ring",
+ "Counter": "0,1",
"EventCode": "0x2C",
"EventName": "UNC_R2_STALL_NO_SBO_CREDIT.SBO0_AD",
"PerPkg": "1",
@@ -392,6 +436,7 @@
},
{
"BriefDescription": "Stall on No Sbo Credits; For SBo0, BL Ring",
+ "Counter": "0,1",
"EventCode": "0x2C",
"EventName": "UNC_R2_STALL_NO_SBO_CREDIT.SBO0_BL",
"PerPkg": "1",
@@ -401,6 +446,7 @@
},
{
"BriefDescription": "Stall on No Sbo Credits; For SBo1, AD Ring",
+ "Counter": "0,1",
"EventCode": "0x2C",
"EventName": "UNC_R2_STALL_NO_SBO_CREDIT.SBO1_AD",
"PerPkg": "1",
@@ -410,6 +456,7 @@
},
{
"BriefDescription": "Stall on No Sbo Credits; For SBo1, BL Ring",
+ "Counter": "0,1",
"EventCode": "0x2C",
"EventName": "UNC_R2_STALL_NO_SBO_CREDIT.SBO1_BL",
"PerPkg": "1",
@@ -419,6 +466,7 @@
},
{
"BriefDescription": "Egress Cycles Full; AD",
+ "Counter": "0",
"EventCode": "0x25",
"EventName": "UNC_R2_TxR_CYCLES_FULL.AD",
"PerPkg": "1",
@@ -428,6 +476,7 @@
},
{
"BriefDescription": "Egress Cycles Full; AK",
+ "Counter": "0",
"EventCode": "0x25",
"EventName": "UNC_R2_TxR_CYCLES_FULL.AK",
"PerPkg": "1",
@@ -437,6 +486,7 @@
},
{
"BriefDescription": "Egress Cycles Full; BL",
+ "Counter": "0",
"EventCode": "0x25",
"EventName": "UNC_R2_TxR_CYCLES_FULL.BL",
"PerPkg": "1",
@@ -446,6 +496,7 @@
},
{
"BriefDescription": "Egress Cycles Not Empty; AD",
+ "Counter": "0",
"EventCode": "0x23",
"EventName": "UNC_R2_TxR_CYCLES_NE.AD",
"PerPkg": "1",
@@ -455,6 +506,7 @@
},
{
"BriefDescription": "Egress Cycles Not Empty; AK",
+ "Counter": "0",
"EventCode": "0x23",
"EventName": "UNC_R2_TxR_CYCLES_NE.AK",
"PerPkg": "1",
@@ -464,6 +516,7 @@
},
{
"BriefDescription": "Egress Cycles Not Empty; BL",
+ "Counter": "0",
"EventCode": "0x23",
"EventName": "UNC_R2_TxR_CYCLES_NE.BL",
"PerPkg": "1",
@@ -473,6 +526,7 @@
},
{
"BriefDescription": "Egress CCW NACK; AD CCW",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R2_TxR_NACK_CW.DN_AD",
"PerPkg": "1",
@@ -482,6 +536,7 @@
},
{
"BriefDescription": "Egress CCW NACK; AK CCW",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R2_TxR_NACK_CW.DN_AK",
"PerPkg": "1",
@@ -491,6 +546,7 @@
},
{
"BriefDescription": "Egress CCW NACK; BL CCW",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R2_TxR_NACK_CW.DN_BL",
"PerPkg": "1",
@@ -500,6 +556,7 @@
},
{
"BriefDescription": "Egress CCW NACK; AK CCW",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R2_TxR_NACK_CW.UP_AD",
"PerPkg": "1",
@@ -509,6 +566,7 @@
},
{
"BriefDescription": "Egress CCW NACK; BL CW",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R2_TxR_NACK_CW.UP_AK",
"PerPkg": "1",
@@ -518,6 +576,7 @@
},
{
"BriefDescription": "Egress CCW NACK; BL CCW",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R2_TxR_NACK_CW.UP_BL",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/uncore-memory.json b/tools/perf/pmu-events/arch/x86/haswellx/uncore-memory.json
index c005f5115722..9ef5eeba3ef4 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/uncore-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "read requests to memory controller. Derived from unc_m_cas_count.rd",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "LLC_MISSES.MEM_READ",
"PerPkg": "1",
@@ -11,6 +12,7 @@
},
{
"BriefDescription": "write requests to memory controller. Derived from unc_m_cas_count.wr",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "LLC_MISSES.MEM_WRITE",
"PerPkg": "1",
@@ -21,6 +23,7 @@
},
{
"BriefDescription": "DRAM Activate Count; Activate due to Write",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_M_ACT_COUNT.BYP",
"PerPkg": "1",
@@ -30,6 +33,7 @@
},
{
"BriefDescription": "DRAM Activate Count; Activate due to Read",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_M_ACT_COUNT.RD",
"PerPkg": "1",
@@ -39,6 +43,7 @@
},
{
"BriefDescription": "DRAM Activate Count; Activate due to Write",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_M_ACT_COUNT.WR",
"PerPkg": "1",
@@ -48,6 +53,7 @@
},
{
"BriefDescription": "ACT command issued by 2 cycle bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M_BYP_CMDS.ACT",
"PerPkg": "1",
@@ -56,6 +62,7 @@
},
{
"BriefDescription": "CAS command issued by 2 cycle bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M_BYP_CMDS.CAS",
"PerPkg": "1",
@@ -64,6 +71,7 @@
},
{
"BriefDescription": "PRE command issued by 2 cycle bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M_BYP_CMDS.PRE",
"PerPkg": "1",
@@ -72,6 +80,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM WR_CAS (w/ and w/out auto-pre)",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.ALL",
"PerPkg": "1",
@@ -81,6 +90,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM Reads (RD_CAS + Underfills)",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.RD",
"PerPkg": "1",
@@ -90,6 +100,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM RD_CAS (w/ and w/out auto-pre)",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.RD_REG",
"PerPkg": "1",
@@ -99,6 +110,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Read CAS issued in RMM",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.RD_RMM",
"PerPkg": "1",
@@ -107,6 +119,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Underfill Read Issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
"PerPkg": "1",
@@ -116,6 +129,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Read CAS issued in WMM",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.RD_WMM",
"PerPkg": "1",
@@ -124,6 +138,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM WR_CAS (both Modes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.WR",
"PerPkg": "1",
@@ -133,6 +148,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Read Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.WR_RMM",
"PerPkg": "1",
@@ -142,6 +158,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Write Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.WR_WMM",
"PerPkg": "1",
@@ -151,18 +168,21 @@
},
{
"BriefDescription": "DRAM Clockticks",
+ "Counter": "0,1,2,3",
"EventName": "UNC_M_CLOCKTICKS",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "DRAM Clockticks",
+ "Counter": "0,1,2,3",
"EventName": "UNC_M_DCLOCKTICKS",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "DRAM Precharge All Commands",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_M_DRAM_PRE_ALL",
"PerPkg": "1",
@@ -171,6 +191,7 @@
},
{
"BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_M_DRAM_REFRESH.HIGH",
"PerPkg": "1",
@@ -180,6 +201,7 @@
},
{
"BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_M_DRAM_REFRESH.PANIC",
"PerPkg": "1",
@@ -189,6 +211,7 @@
},
{
"BriefDescription": "ECC Correctable Errors",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_M_ECC_CORRECTABLE_ERRORS",
"PerPkg": "1",
@@ -197,6 +220,7 @@
},
{
"BriefDescription": "Cycles in a Major Mode; Isoch Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_M_MAJOR_MODES.ISOCH",
"PerPkg": "1",
@@ -206,6 +230,7 @@
},
{
"BriefDescription": "Cycles in a Major Mode; Partial Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_M_MAJOR_MODES.PARTIAL",
"PerPkg": "1",
@@ -215,6 +240,7 @@
},
{
"BriefDescription": "Cycles in a Major Mode; Read Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_M_MAJOR_MODES.READ",
"PerPkg": "1",
@@ -224,6 +250,7 @@
},
{
"BriefDescription": "Cycles in a Major Mode; Write Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_M_MAJOR_MODES.WRITE",
"PerPkg": "1",
@@ -233,6 +260,7 @@
},
{
"BriefDescription": "Channel DLLOFF Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M_POWER_CHANNEL_DLLOFF",
"PerPkg": "1",
@@ -241,6 +269,7 @@
},
{
"BriefDescription": "Channel PPD Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_M_POWER_CHANNEL_PPD",
"PerPkg": "1",
@@ -249,6 +278,7 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK0",
"PerPkg": "1",
@@ -258,6 +288,7 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK1",
"PerPkg": "1",
@@ -267,6 +298,7 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK2",
"PerPkg": "1",
@@ -276,6 +308,7 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK3",
"PerPkg": "1",
@@ -285,6 +318,7 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK4",
"PerPkg": "1",
@@ -294,6 +328,7 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK5",
"PerPkg": "1",
@@ -303,6 +338,7 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK6",
"PerPkg": "1",
@@ -312,6 +348,7 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK7",
"PerPkg": "1",
@@ -321,6 +358,7 @@
},
{
"BriefDescription": "Critical Throttle Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M_POWER_CRITICAL_THROTTLE_CYCLES",
"PerPkg": "1",
@@ -329,6 +367,7 @@
},
{
"BriefDescription": "UNC_M_POWER_PCU_THROTTLING",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M_POWER_PCU_THROTTLING",
"PerPkg": "1",
@@ -336,6 +375,7 @@
},
{
"BriefDescription": "Clock-Enabled Self-Refresh",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M_POWER_SELF_REFRESH",
"PerPkg": "1",
@@ -344,6 +384,7 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK0",
"PerPkg": "1",
@@ -353,6 +394,7 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK1",
"PerPkg": "1",
@@ -362,6 +404,7 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK2",
"PerPkg": "1",
@@ -371,6 +414,7 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK3",
"PerPkg": "1",
@@ -380,6 +424,7 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK4",
"PerPkg": "1",
@@ -389,6 +434,7 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK5",
"PerPkg": "1",
@@ -398,6 +444,7 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK6",
"PerPkg": "1",
@@ -407,6 +454,7 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK7",
"PerPkg": "1",
@@ -416,6 +464,7 @@
},
{
"BriefDescription": "Read Preemption Count; Read over Read Preemption",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_M_PREEMPTION.RD_PREEMPT_RD",
"PerPkg": "1",
@@ -425,6 +474,7 @@
},
{
"BriefDescription": "Read Preemption Count; Read over Write Preemption",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_M_PREEMPTION.RD_PREEMPT_WR",
"PerPkg": "1",
@@ -434,6 +484,7 @@
},
{
"BriefDescription": "DRAM Precharge commands.; Precharge due to bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.BYP",
"PerPkg": "1",
@@ -443,6 +494,7 @@
},
{
"BriefDescription": "DRAM Precharge commands.; Precharge due to timer expiration",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.PAGE_CLOSE",
"PerPkg": "1",
@@ -452,6 +504,7 @@
},
{
"BriefDescription": "DRAM Precharge commands.; Precharges due to page miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.PAGE_MISS",
"PerPkg": "1",
@@ -461,6 +514,7 @@
},
{
"BriefDescription": "DRAM Precharge commands.; Precharge due to read",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.RD",
"PerPkg": "1",
@@ -470,6 +524,7 @@
},
{
"BriefDescription": "DRAM Precharge commands.; Precharge due to write",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.WR",
"PerPkg": "1",
@@ -479,6 +534,7 @@
},
{
"BriefDescription": "Read CAS issued with HIGH priority",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M_RD_CAS_PRIO.HIGH",
"PerPkg": "1",
@@ -487,6 +543,7 @@
},
{
"BriefDescription": "Read CAS issued with LOW priority",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M_RD_CAS_PRIO.LOW",
"PerPkg": "1",
@@ -495,6 +552,7 @@
},
{
"BriefDescription": "Read CAS issued with MEDIUM priority",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M_RD_CAS_PRIO.MED",
"PerPkg": "1",
@@ -503,6 +561,7 @@
},
{
"BriefDescription": "Read CAS issued with PANIC NON ISOCH priority (starved)",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M_RD_CAS_PRIO.PANIC",
"PerPkg": "1",
@@ -511,6 +570,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.ALLBANKS",
"PerPkg": "1",
@@ -520,6 +580,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK0",
"PerPkg": "1",
@@ -528,6 +589,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK1",
"PerPkg": "1",
@@ -537,6 +599,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK10",
"PerPkg": "1",
@@ -546,6 +609,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK11",
"PerPkg": "1",
@@ -555,6 +619,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK12",
"PerPkg": "1",
@@ -564,6 +629,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK13",
"PerPkg": "1",
@@ -573,6 +639,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK14",
"PerPkg": "1",
@@ -582,6 +649,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK15",
"PerPkg": "1",
@@ -591,6 +659,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK2",
"PerPkg": "1",
@@ -600,6 +669,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK3",
"PerPkg": "1",
@@ -609,6 +679,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK4",
"PerPkg": "1",
@@ -618,6 +689,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK5",
"PerPkg": "1",
@@ -627,6 +699,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK6",
"PerPkg": "1",
@@ -636,6 +709,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK7",
"PerPkg": "1",
@@ -645,6 +719,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK8",
"PerPkg": "1",
@@ -654,6 +729,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK9",
"PerPkg": "1",
@@ -663,6 +739,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANKG0",
"PerPkg": "1",
@@ -672,6 +749,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANKG1",
"PerPkg": "1",
@@ -681,6 +759,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANKG2",
"PerPkg": "1",
@@ -690,6 +769,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANKG3",
"PerPkg": "1",
@@ -699,6 +779,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.ALLBANKS",
"PerPkg": "1",
@@ -708,6 +789,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK0",
"PerPkg": "1",
@@ -716,6 +798,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK1",
"PerPkg": "1",
@@ -725,6 +808,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK10",
"PerPkg": "1",
@@ -734,6 +818,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK11",
"PerPkg": "1",
@@ -743,6 +828,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK12",
"PerPkg": "1",
@@ -752,6 +838,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK13",
"PerPkg": "1",
@@ -761,6 +848,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK14",
"PerPkg": "1",
@@ -770,6 +858,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK15",
"PerPkg": "1",
@@ -779,6 +868,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK2",
"PerPkg": "1",
@@ -788,6 +878,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK3",
"PerPkg": "1",
@@ -797,6 +888,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK4",
"PerPkg": "1",
@@ -806,6 +898,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK5",
"PerPkg": "1",
@@ -815,6 +908,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK6",
"PerPkg": "1",
@@ -824,6 +918,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK7",
"PerPkg": "1",
@@ -833,6 +928,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK8",
"PerPkg": "1",
@@ -842,6 +938,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK9",
"PerPkg": "1",
@@ -851,6 +948,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANKG0",
"PerPkg": "1",
@@ -860,6 +958,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANKG1",
"PerPkg": "1",
@@ -869,6 +968,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANKG2",
"PerPkg": "1",
@@ -878,6 +978,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANKG3",
"PerPkg": "1",
@@ -887,6 +988,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK0",
"PerPkg": "1",
@@ -895,6 +997,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.ALLBANKS",
"PerPkg": "1",
@@ -904,6 +1007,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK0",
"PerPkg": "1",
@@ -912,6 +1016,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK1",
"PerPkg": "1",
@@ -921,6 +1026,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK10",
"PerPkg": "1",
@@ -930,6 +1036,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK11",
"PerPkg": "1",
@@ -939,6 +1046,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK12",
"PerPkg": "1",
@@ -948,6 +1056,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK13",
"PerPkg": "1",
@@ -957,6 +1066,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK14",
"PerPkg": "1",
@@ -966,6 +1076,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK15",
"PerPkg": "1",
@@ -975,6 +1086,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK2",
"PerPkg": "1",
@@ -984,6 +1096,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK3",
"PerPkg": "1",
@@ -993,6 +1106,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK4",
"PerPkg": "1",
@@ -1002,6 +1116,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK5",
"PerPkg": "1",
@@ -1011,6 +1126,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK6",
"PerPkg": "1",
@@ -1020,6 +1136,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK7",
"PerPkg": "1",
@@ -1029,6 +1146,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK8",
"PerPkg": "1",
@@ -1038,6 +1156,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK9",
"PerPkg": "1",
@@ -1047,6 +1166,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANKG0",
"PerPkg": "1",
@@ -1056,6 +1176,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANKG1",
"PerPkg": "1",
@@ -1065,6 +1186,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANKG2",
"PerPkg": "1",
@@ -1074,6 +1196,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANKG3",
"PerPkg": "1",
@@ -1083,6 +1206,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.ALLBANKS",
"PerPkg": "1",
@@ -1092,6 +1216,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK0",
"PerPkg": "1",
@@ -1100,6 +1225,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK1",
"PerPkg": "1",
@@ -1109,6 +1235,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK10",
"PerPkg": "1",
@@ -1118,6 +1245,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK11",
"PerPkg": "1",
@@ -1127,6 +1255,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK12",
"PerPkg": "1",
@@ -1136,6 +1265,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK13",
"PerPkg": "1",
@@ -1145,6 +1275,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK14",
"PerPkg": "1",
@@ -1154,6 +1285,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK15",
"PerPkg": "1",
@@ -1163,6 +1295,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK2",
"PerPkg": "1",
@@ -1172,6 +1305,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK3",
"PerPkg": "1",
@@ -1181,6 +1315,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK4",
"PerPkg": "1",
@@ -1190,6 +1325,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK5",
"PerPkg": "1",
@@ -1199,6 +1335,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK6",
"PerPkg": "1",
@@ -1208,6 +1345,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK7",
"PerPkg": "1",
@@ -1217,6 +1355,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK8",
"PerPkg": "1",
@@ -1226,6 +1365,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK9",
"PerPkg": "1",
@@ -1235,6 +1375,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANKG0",
"PerPkg": "1",
@@ -1244,6 +1385,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANKG1",
"PerPkg": "1",
@@ -1253,6 +1395,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANKG2",
"PerPkg": "1",
@@ -1262,6 +1405,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANKG3",
"PerPkg": "1",
@@ -1271,6 +1415,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.ALLBANKS",
"PerPkg": "1",
@@ -1280,6 +1425,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK0",
"PerPkg": "1",
@@ -1288,6 +1434,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK1",
"PerPkg": "1",
@@ -1297,6 +1444,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK10",
"PerPkg": "1",
@@ -1306,6 +1454,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK11",
"PerPkg": "1",
@@ -1315,6 +1464,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK12",
"PerPkg": "1",
@@ -1324,6 +1474,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK13",
"PerPkg": "1",
@@ -1333,6 +1484,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK14",
"PerPkg": "1",
@@ -1342,6 +1494,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK15",
"PerPkg": "1",
@@ -1351,6 +1504,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK2",
"PerPkg": "1",
@@ -1360,6 +1514,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK3",
"PerPkg": "1",
@@ -1369,6 +1524,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK4",
"PerPkg": "1",
@@ -1378,6 +1534,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK5",
"PerPkg": "1",
@@ -1387,6 +1544,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK6",
"PerPkg": "1",
@@ -1396,6 +1554,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK7",
"PerPkg": "1",
@@ -1405,6 +1564,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK8",
"PerPkg": "1",
@@ -1414,6 +1574,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK9",
"PerPkg": "1",
@@ -1423,6 +1584,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANKG0",
"PerPkg": "1",
@@ -1432,6 +1594,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANKG1",
"PerPkg": "1",
@@ -1441,6 +1604,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANKG2",
"PerPkg": "1",
@@ -1450,6 +1614,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANKG3",
"PerPkg": "1",
@@ -1459,6 +1624,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.ALLBANKS",
"PerPkg": "1",
@@ -1468,6 +1634,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK0",
"PerPkg": "1",
@@ -1476,6 +1643,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK1",
"PerPkg": "1",
@@ -1485,6 +1653,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK10",
"PerPkg": "1",
@@ -1494,6 +1663,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK11",
"PerPkg": "1",
@@ -1503,6 +1673,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK12",
"PerPkg": "1",
@@ -1512,6 +1683,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK13",
"PerPkg": "1",
@@ -1521,6 +1693,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK14",
"PerPkg": "1",
@@ -1530,6 +1703,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK15",
"PerPkg": "1",
@@ -1539,6 +1713,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK2",
"PerPkg": "1",
@@ -1548,6 +1723,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK3",
"PerPkg": "1",
@@ -1557,6 +1733,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK4",
"PerPkg": "1",
@@ -1566,6 +1743,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK5",
"PerPkg": "1",
@@ -1575,6 +1753,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK6",
"PerPkg": "1",
@@ -1584,6 +1763,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK7",
"PerPkg": "1",
@@ -1593,6 +1773,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK8",
"PerPkg": "1",
@@ -1602,6 +1783,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK9",
"PerPkg": "1",
@@ -1611,6 +1793,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANKG0",
"PerPkg": "1",
@@ -1620,6 +1803,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANKG1",
"PerPkg": "1",
@@ -1629,6 +1813,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANKG2",
"PerPkg": "1",
@@ -1638,6 +1823,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANKG3",
"PerPkg": "1",
@@ -1647,6 +1833,7 @@
},
{
"BriefDescription": "Read Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M_RPQ_CYCLES_NE",
"PerPkg": "1",
@@ -1655,6 +1842,7 @@
},
{
"BriefDescription": "Read Pending Queue Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M_RPQ_INSERTS",
"PerPkg": "1",
@@ -1663,6 +1851,7 @@
},
{
"BriefDescription": "VMSE MXB write buffer occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_M_VMSE_MXB_WR_OCCUPANCY",
"PerPkg": "1",
@@ -1670,6 +1859,7 @@
},
{
"BriefDescription": "VMSE WR PUSH issued; VMSE write PUSH issued in RMM",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M_VMSE_WR_PUSH.RMM",
"PerPkg": "1",
@@ -1678,6 +1868,7 @@
},
{
"BriefDescription": "VMSE WR PUSH issued; VMSE write PUSH issued in WMM",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M_VMSE_WR_PUSH.WMM",
"PerPkg": "1",
@@ -1686,6 +1877,7 @@
},
{
"BriefDescription": "Transition from WMM to RMM because of low threshold; Transition from WMM to RMM because of starve counter",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "UNC_M_WMM_TO_RMM.LOW_THRESH",
"PerPkg": "1",
@@ -1694,6 +1886,7 @@
},
{
"BriefDescription": "Transition from WMM to RMM because of low threshold",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "UNC_M_WMM_TO_RMM.STARVE",
"PerPkg": "1",
@@ -1702,6 +1895,7 @@
},
{
"BriefDescription": "Transition from WMM to RMM because of low threshold",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "UNC_M_WMM_TO_RMM.VMSE_RETRY",
"PerPkg": "1",
@@ -1710,6 +1904,7 @@
},
{
"BriefDescription": "Write Pending Queue Full Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M_WPQ_CYCLES_FULL",
"PerPkg": "1",
@@ -1718,6 +1913,7 @@
},
{
"BriefDescription": "Write Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M_WPQ_CYCLES_NE",
"PerPkg": "1",
@@ -1726,6 +1922,7 @@
},
{
"BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_M_WPQ_READ_HIT",
"PerPkg": "1",
@@ -1734,6 +1931,7 @@
},
{
"BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M_WPQ_WRITE_HIT",
"PerPkg": "1",
@@ -1742,6 +1940,7 @@
},
{
"BriefDescription": "Not getting the requested Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_M_WRONG_MM",
"PerPkg": "1",
@@ -1749,6 +1948,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.ALLBANKS",
"PerPkg": "1",
@@ -1758,6 +1958,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK0",
"PerPkg": "1",
@@ -1766,6 +1967,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK1",
"PerPkg": "1",
@@ -1775,6 +1977,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK10",
"PerPkg": "1",
@@ -1784,6 +1987,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK11",
"PerPkg": "1",
@@ -1793,6 +1997,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK12",
"PerPkg": "1",
@@ -1802,6 +2007,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK13",
"PerPkg": "1",
@@ -1811,6 +2017,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK14",
"PerPkg": "1",
@@ -1820,6 +2027,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK15",
"PerPkg": "1",
@@ -1829,6 +2037,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK2",
"PerPkg": "1",
@@ -1838,6 +2047,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK3",
"PerPkg": "1",
@@ -1847,6 +2057,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK4",
"PerPkg": "1",
@@ -1856,6 +2067,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK5",
"PerPkg": "1",
@@ -1865,6 +2077,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK6",
"PerPkg": "1",
@@ -1874,6 +2087,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK7",
"PerPkg": "1",
@@ -1883,6 +2097,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK8",
"PerPkg": "1",
@@ -1892,6 +2107,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK9",
"PerPkg": "1",
@@ -1901,6 +2117,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANKG0",
"PerPkg": "1",
@@ -1910,6 +2127,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANKG1",
"PerPkg": "1",
@@ -1919,6 +2137,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANKG2",
"PerPkg": "1",
@@ -1928,6 +2147,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANKG3",
"PerPkg": "1",
@@ -1937,6 +2157,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.ALLBANKS",
"PerPkg": "1",
@@ -1946,6 +2167,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK0",
"PerPkg": "1",
@@ -1954,6 +2176,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK1",
"PerPkg": "1",
@@ -1963,6 +2186,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK10",
"PerPkg": "1",
@@ -1972,6 +2196,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK11",
"PerPkg": "1",
@@ -1981,6 +2206,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK12",
"PerPkg": "1",
@@ -1990,6 +2216,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK13",
"PerPkg": "1",
@@ -1999,6 +2226,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK14",
"PerPkg": "1",
@@ -2008,6 +2236,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK15",
"PerPkg": "1",
@@ -2017,6 +2246,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK2",
"PerPkg": "1",
@@ -2026,6 +2256,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK3",
"PerPkg": "1",
@@ -2035,6 +2266,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK4",
"PerPkg": "1",
@@ -2044,6 +2276,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK5",
"PerPkg": "1",
@@ -2053,6 +2286,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK6",
"PerPkg": "1",
@@ -2062,6 +2296,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK7",
"PerPkg": "1",
@@ -2071,6 +2306,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK8",
"PerPkg": "1",
@@ -2080,6 +2316,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK9",
"PerPkg": "1",
@@ -2089,6 +2326,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANKG0",
"PerPkg": "1",
@@ -2098,6 +2336,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANKG1",
"PerPkg": "1",
@@ -2107,6 +2346,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANKG2",
"PerPkg": "1",
@@ -2116,6 +2356,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANKG3",
"PerPkg": "1",
@@ -2125,6 +2366,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.ALLBANKS",
"PerPkg": "1",
@@ -2134,6 +2376,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK0",
"PerPkg": "1",
@@ -2142,6 +2385,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK1",
"PerPkg": "1",
@@ -2151,6 +2395,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK10",
"PerPkg": "1",
@@ -2160,6 +2405,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK11",
"PerPkg": "1",
@@ -2169,6 +2415,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK12",
"PerPkg": "1",
@@ -2178,6 +2425,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK13",
"PerPkg": "1",
@@ -2187,6 +2435,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK14",
"PerPkg": "1",
@@ -2196,6 +2445,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK15",
"PerPkg": "1",
@@ -2205,6 +2455,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK2",
"PerPkg": "1",
@@ -2214,6 +2465,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK3",
"PerPkg": "1",
@@ -2223,6 +2475,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK4",
"PerPkg": "1",
@@ -2232,6 +2485,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK5",
"PerPkg": "1",
@@ -2241,6 +2495,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK6",
"PerPkg": "1",
@@ -2250,6 +2505,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK7",
"PerPkg": "1",
@@ -2259,6 +2515,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK8",
"PerPkg": "1",
@@ -2268,6 +2525,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK9",
"PerPkg": "1",
@@ -2277,6 +2535,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANKG0",
"PerPkg": "1",
@@ -2286,6 +2545,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANKG1",
"PerPkg": "1",
@@ -2295,6 +2555,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANKG2",
"PerPkg": "1",
@@ -2304,6 +2565,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANKG3",
"PerPkg": "1",
@@ -2313,6 +2575,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.ALLBANKS",
"PerPkg": "1",
@@ -2322,6 +2585,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK0",
"PerPkg": "1",
@@ -2330,6 +2594,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK1",
"PerPkg": "1",
@@ -2339,6 +2604,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK10",
"PerPkg": "1",
@@ -2348,6 +2614,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK11",
"PerPkg": "1",
@@ -2357,6 +2624,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK12",
"PerPkg": "1",
@@ -2366,6 +2634,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK13",
"PerPkg": "1",
@@ -2375,6 +2644,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK14",
"PerPkg": "1",
@@ -2384,6 +2654,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK15",
"PerPkg": "1",
@@ -2393,6 +2664,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK2",
"PerPkg": "1",
@@ -2402,6 +2674,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK3",
"PerPkg": "1",
@@ -2411,6 +2684,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK4",
"PerPkg": "1",
@@ -2420,6 +2694,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK5",
"PerPkg": "1",
@@ -2429,6 +2704,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK6",
"PerPkg": "1",
@@ -2438,6 +2714,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK7",
"PerPkg": "1",
@@ -2447,6 +2724,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK8",
"PerPkg": "1",
@@ -2456,6 +2734,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK9",
"PerPkg": "1",
@@ -2465,6 +2744,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANKG0",
"PerPkg": "1",
@@ -2474,6 +2754,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANKG1",
"PerPkg": "1",
@@ -2483,6 +2764,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANKG2",
"PerPkg": "1",
@@ -2492,6 +2774,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANKG3",
"PerPkg": "1",
@@ -2501,6 +2784,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.ALLBANKS",
"PerPkg": "1",
@@ -2510,6 +2794,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK0",
"PerPkg": "1",
@@ -2518,6 +2803,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK1",
"PerPkg": "1",
@@ -2527,6 +2813,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK10",
"PerPkg": "1",
@@ -2536,6 +2823,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK11",
"PerPkg": "1",
@@ -2545,6 +2833,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK12",
"PerPkg": "1",
@@ -2554,6 +2843,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK13",
"PerPkg": "1",
@@ -2563,6 +2853,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK14",
"PerPkg": "1",
@@ -2572,6 +2863,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK15",
"PerPkg": "1",
@@ -2581,6 +2873,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK2",
"PerPkg": "1",
@@ -2590,6 +2883,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK3",
"PerPkg": "1",
@@ -2599,6 +2893,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK4",
"PerPkg": "1",
@@ -2608,6 +2903,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK5",
"PerPkg": "1",
@@ -2617,6 +2913,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK6",
"PerPkg": "1",
@@ -2626,6 +2923,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK7",
"PerPkg": "1",
@@ -2635,6 +2933,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK8",
"PerPkg": "1",
@@ -2644,6 +2943,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK9",
"PerPkg": "1",
@@ -2653,6 +2953,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANKG0",
"PerPkg": "1",
@@ -2662,6 +2963,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANKG1",
"PerPkg": "1",
@@ -2671,6 +2973,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANKG2",
"PerPkg": "1",
@@ -2680,6 +2983,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANKG3",
"PerPkg": "1",
@@ -2689,6 +2993,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.ALLBANKS",
"PerPkg": "1",
@@ -2698,6 +3003,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK0",
"PerPkg": "1",
@@ -2706,6 +3012,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK1",
"PerPkg": "1",
@@ -2715,6 +3022,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK10",
"PerPkg": "1",
@@ -2724,6 +3032,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK11",
"PerPkg": "1",
@@ -2733,6 +3042,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK12",
"PerPkg": "1",
@@ -2742,6 +3052,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK13",
"PerPkg": "1",
@@ -2751,6 +3062,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK14",
"PerPkg": "1",
@@ -2760,6 +3072,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK15",
"PerPkg": "1",
@@ -2769,6 +3082,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK2",
"PerPkg": "1",
@@ -2778,6 +3092,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK3",
"PerPkg": "1",
@@ -2787,6 +3102,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK4",
"PerPkg": "1",
@@ -2796,6 +3112,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK5",
"PerPkg": "1",
@@ -2805,6 +3122,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK6",
"PerPkg": "1",
@@ -2814,6 +3132,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK7",
"PerPkg": "1",
@@ -2823,6 +3142,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK8",
"PerPkg": "1",
@@ -2832,6 +3152,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK9",
"PerPkg": "1",
@@ -2841,6 +3162,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANKG0",
"PerPkg": "1",
@@ -2850,6 +3172,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANKG1",
"PerPkg": "1",
@@ -2859,6 +3182,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANKG2",
"PerPkg": "1",
@@ -2868,6 +3192,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANKG3",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/uncore-power.json b/tools/perf/pmu-events/arch/x86/haswellx/uncore-power.json
index c391325ee36b..252415937680 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/uncore-power.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/uncore-power.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "pclk Cycles",
+ "Counter": "0,1,2,3",
"EventName": "UNC_P_CLOCKTICKS",
"PerPkg": "1",
"PublicDescription": "The PCU runs off a fixed 800 MHz clock. This event counts the number of pclk cycles measured while the counter was enabled. The pclk, like the Memory Controller's dclk, counts at a constant rate making it a good measure of actual wall time.",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_P_CORE0_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -16,6 +18,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x6A",
"EventName": "UNC_P_CORE10_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -24,6 +27,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x6B",
"EventName": "UNC_P_CORE11_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -32,6 +36,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x6C",
"EventName": "UNC_P_CORE12_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -40,6 +45,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x6D",
"EventName": "UNC_P_CORE13_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -48,6 +54,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x6E",
"EventName": "UNC_P_CORE14_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -56,6 +63,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x6F",
"EventName": "UNC_P_CORE15_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -64,6 +72,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_P_CORE16_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -72,6 +81,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_P_CORE17_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -80,6 +90,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_P_CORE1_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -88,6 +99,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x62",
"EventName": "UNC_P_CORE2_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -96,6 +108,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "UNC_P_CORE3_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -104,6 +117,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x64",
"EventName": "UNC_P_CORE4_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -112,6 +126,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x65",
"EventName": "UNC_P_CORE5_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -120,6 +135,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x66",
"EventName": "UNC_P_CORE6_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -128,6 +144,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x67",
"EventName": "UNC_P_CORE7_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -136,6 +153,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x68",
"EventName": "UNC_P_CORE8_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -144,6 +162,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x69",
"EventName": "UNC_P_CORE9_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -152,6 +171,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_P_DEMOTIONS_CORE0",
"PerPkg": "1",
@@ -160,6 +180,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_P_DEMOTIONS_CORE1",
"PerPkg": "1",
@@ -168,6 +189,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x3A",
"EventName": "UNC_P_DEMOTIONS_CORE10",
"PerPkg": "1",
@@ -176,6 +198,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x3B",
"EventName": "UNC_P_DEMOTIONS_CORE11",
"PerPkg": "1",
@@ -184,6 +207,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "UNC_P_DEMOTIONS_CORE12",
"PerPkg": "1",
@@ -192,6 +216,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x3D",
"EventName": "UNC_P_DEMOTIONS_CORE13",
"PerPkg": "1",
@@ -200,6 +225,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x3E",
"EventName": "UNC_P_DEMOTIONS_CORE14",
"PerPkg": "1",
@@ -208,6 +234,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x3F",
"EventName": "UNC_P_DEMOTIONS_CORE15",
"PerPkg": "1",
@@ -216,6 +243,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_P_DEMOTIONS_CORE16",
"PerPkg": "1",
@@ -224,6 +252,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_P_DEMOTIONS_CORE17",
"PerPkg": "1",
@@ -232,6 +261,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_P_DEMOTIONS_CORE2",
"PerPkg": "1",
@@ -240,6 +270,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_P_DEMOTIONS_CORE3",
"PerPkg": "1",
@@ -248,6 +279,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_P_DEMOTIONS_CORE4",
"PerPkg": "1",
@@ -256,6 +288,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_P_DEMOTIONS_CORE5",
"PerPkg": "1",
@@ -264,6 +297,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x36",
"EventName": "UNC_P_DEMOTIONS_CORE6",
"PerPkg": "1",
@@ -272,6 +306,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_P_DEMOTIONS_CORE7",
"PerPkg": "1",
@@ -280,6 +315,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_P_DEMOTIONS_CORE8",
"PerPkg": "1",
@@ -288,6 +324,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_P_DEMOTIONS_CORE9",
"PerPkg": "1",
@@ -296,6 +333,7 @@
},
{
"BriefDescription": "Frequency Residency",
+ "Counter": "0,1,2,3",
"EventCode": "0xB",
"EventName": "UNC_P_FREQ_BAND0_CYCLES",
"PerPkg": "1",
@@ -304,6 +342,7 @@
},
{
"BriefDescription": "Frequency Residency",
+ "Counter": "0,1,2,3",
"EventCode": "0xC",
"EventName": "UNC_P_FREQ_BAND1_CYCLES",
"PerPkg": "1",
@@ -312,6 +351,7 @@
},
{
"BriefDescription": "Frequency Residency",
+ "Counter": "0,1,2,3",
"EventCode": "0xD",
"EventName": "UNC_P_FREQ_BAND2_CYCLES",
"PerPkg": "1",
@@ -320,6 +360,7 @@
},
{
"BriefDescription": "Frequency Residency",
+ "Counter": "0,1,2,3",
"EventCode": "0xE",
"EventName": "UNC_P_FREQ_BAND3_CYCLES",
"PerPkg": "1",
@@ -328,6 +369,7 @@
},
{
"BriefDescription": "Thermal Strongest Upper Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES",
"PerPkg": "1",
@@ -336,6 +378,7 @@
},
{
"BriefDescription": "OS Strongest Upper Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_P_FREQ_MAX_OS_CYCLES",
"PerPkg": "1",
@@ -344,6 +387,7 @@
},
{
"BriefDescription": "Power Strongest Upper Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_P_FREQ_MAX_POWER_CYCLES",
"PerPkg": "1",
@@ -352,6 +396,7 @@
},
{
"BriefDescription": "IO P Limit Strongest Lower Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x73",
"EventName": "UNC_P_FREQ_MIN_IO_P_CYCLES",
"PerPkg": "1",
@@ -360,6 +405,7 @@
},
{
"BriefDescription": "Cycles spent changing Frequency",
+ "Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "UNC_P_FREQ_TRANS_CYCLES",
"PerPkg": "1",
@@ -368,6 +414,7 @@
},
{
"BriefDescription": "Memory Phase Shedding Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_P_MEMORY_PHASE_SHEDDING_CYCLES",
"PerPkg": "1",
@@ -376,6 +423,7 @@
},
{
"BriefDescription": "Package C State Residency - C0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_P_PKG_RESIDENCY_C0_CYCLES",
"PerPkg": "1",
@@ -384,6 +432,7 @@
},
{
"BriefDescription": "Package C State Residency - C1E",
+ "Counter": "0,1,2,3",
"EventCode": "0x4E",
"EventName": "UNC_P_PKG_RESIDENCY_C1E_CYCLES",
"PerPkg": "1",
@@ -392,6 +441,7 @@
},
{
"BriefDescription": "Package C State Residency - C2E",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_P_PKG_RESIDENCY_C2E_CYCLES",
"PerPkg": "1",
@@ -400,6 +450,7 @@
},
{
"BriefDescription": "Package C State Residency - C3",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_P_PKG_RESIDENCY_C3_CYCLES",
"PerPkg": "1",
@@ -408,6 +459,7 @@
},
{
"BriefDescription": "Package C State Residency - C6",
+ "Counter": "0,1,2,3",
"EventCode": "0x2D",
"EventName": "UNC_P_PKG_RESIDENCY_C6_CYCLES",
"PerPkg": "1",
@@ -416,6 +468,7 @@
},
{
"BriefDescription": "Package C7 State Residency",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_P_PKG_RESIDENCY_C7_CYCLES",
"PerPkg": "1",
@@ -424,6 +477,7 @@
},
{
"BriefDescription": "Number of cores in C-State; C0 and C1",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
"Filter": "occ_sel=1",
@@ -433,6 +487,7 @@
},
{
"BriefDescription": "Number of cores in C-State; C3",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
"Filter": "occ_sel=2",
@@ -442,6 +497,7 @@
},
{
"BriefDescription": "Number of cores in C-State; C6 and C7",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
"Filter": "occ_sel=3",
@@ -451,6 +507,7 @@
},
{
"BriefDescription": "External Prochot",
+ "Counter": "0,1,2,3",
"EventCode": "0xA",
"EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
"PerPkg": "1",
@@ -459,6 +516,7 @@
},
{
"BriefDescription": "Internal Prochot",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_P_PROCHOT_INTERNAL_CYCLES",
"PerPkg": "1",
@@ -467,6 +525,7 @@
},
{
"BriefDescription": "Total Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_P_TOTAL_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -475,6 +534,7 @@
},
{
"BriefDescription": "UNC_P_UFS_TRANSITIONS_NO_CHANGE",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "UNC_P_UFS_TRANSITIONS_NO_CHANGE",
"PerPkg": "1",
@@ -483,6 +543,7 @@
},
{
"BriefDescription": "UNC_P_UFS_TRANSITIONS_RING_GV",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "UNC_P_UFS_TRANSITIONS_RING_GV",
"PerPkg": "1",
@@ -491,6 +552,7 @@
},
{
"BriefDescription": "VR Hot",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_P_VR_HOT_CYCLES",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/virtual-memory.json b/tools/perf/pmu-events/arch/x86/haswellx/virtual-memory.json
index 87a4ec1ee7d7..7cf00ae0e993 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/virtual-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Load misses in all DTLB levels that cause page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "Misses in all TLB levels that cause a page walk of any page size.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "DTLB demand load misses with low part of linear-to-physical address translation missed",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.PDE_CACHE_MISS",
"PublicDescription": "DTLB demand load misses with low part of linear-to-physical address translation missed.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"PublicDescription": "Number of cache load STLB hits. No page walk.",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Load misses that miss the DTLB and hit the STLB (2M)",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT_2M",
"PublicDescription": "This event counts load operations from a 2M page that miss the first DTLB level but hit the second and do not cause page walks.",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Load misses that miss the DTLB and hit the STLB (4K)",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT_4K",
"PublicDescription": "This event counts load operations from a 4K page that miss the first DTLB level but hit the second and do not cause page walks.",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"PublicDescription": "Completed page walks in any TLB of any page size due to demand load misses.",
@@ -49,6 +55,7 @@
},
{
"BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (1G)",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
"SampleAfterValue": "2000003",
@@ -56,6 +63,7 @@
},
{
"BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (2M/4M).",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Completed page walks due to demand load misses that caused 2M/4M page walks in any TLB levels.",
@@ -64,6 +72,7 @@
},
{
"BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (4K).",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Completed page walks due to demand load misses that caused 4K page walks in any TLB levels.",
@@ -72,6 +81,7 @@
},
{
"BriefDescription": "Cycles when PMH is busy with page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
"PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by DTLB load misses.",
@@ -80,6 +90,7 @@
},
{
"BriefDescription": "Store misses in all DTLB levels that cause page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "Miss in all TLB levels causes a page walk of any page size (4K/2M/4M/1G).",
@@ -88,6 +99,7 @@
},
{
"BriefDescription": "DTLB store misses with low part of linear-to-physical address translation missed",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.PDE_CACHE_MISS",
"PublicDescription": "DTLB store misses with low part of linear-to-physical address translation missed.",
@@ -96,6 +108,7 @@
},
{
"BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.STLB_HIT",
"PublicDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
@@ -104,6 +117,7 @@
},
{
"BriefDescription": "Store misses that miss the DTLB and hit the STLB (2M)",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.STLB_HIT_2M",
"PublicDescription": "This event counts store operations from a 2M page that miss the first DTLB level but hit the second and do not cause page walks.",
@@ -112,6 +126,7 @@
},
{
"BriefDescription": "Store misses that miss the DTLB and hit the STLB (4K)",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.STLB_HIT_4K",
"PublicDescription": "This event counts store operations from a 4K page that miss the first DTLB level but hit the second and do not cause page walks.",
@@ -120,6 +135,7 @@
},
{
"BriefDescription": "Store misses in all DTLB levels that cause completed page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"PublicDescription": "Completed page walks due to store miss in any TLB levels of any page size (4K/2M/4M/1G).",
@@ -128,6 +144,7 @@
},
{
"BriefDescription": "Store misses in all DTLB levels that cause completed page walks. (1G)",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
"SampleAfterValue": "100003",
@@ -135,6 +152,7 @@
},
{
"BriefDescription": "Store misses in all DTLB levels that cause completed page walks (2M/4M)",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Completed page walks due to store misses in one or more TLB levels of 2M/4M page structure.",
@@ -143,6 +161,7 @@
},
{
"BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (4K)",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Completed page walks due to store misses in one or more TLB levels of 4K page structure.",
@@ -151,6 +170,7 @@
},
{
"BriefDescription": "Cycles when PMH is busy with page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_DURATION",
"PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by DTLB store misses.",
@@ -159,6 +179,7 @@
},
{
"BriefDescription": "Cycle count for an Extended Page table walk.",
+ "Counter": "0,1,2,3",
"EventCode": "0x4f",
"EventName": "EPT.WALK_CYCLES",
"SampleAfterValue": "2000003",
@@ -166,6 +187,7 @@
},
{
"BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
+ "Counter": "0,1,2,3",
"EventCode": "0xae",
"EventName": "ITLB.ITLB_FLUSH",
"PublicDescription": "Counts the number of ITLB flushes, includes 4k/2M/4M pages.",
@@ -174,6 +196,7 @@
},
{
"BriefDescription": "Misses at all ITLB levels that cause page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "Misses in ITLB that causes a page walk of any page size.",
@@ -182,6 +205,7 @@
},
{
"BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.STLB_HIT",
"PublicDescription": "ITLB misses that hit STLB. No page walk.",
@@ -190,6 +214,7 @@
},
{
"BriefDescription": "Code misses that miss the DTLB and hit the STLB (2M)",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.STLB_HIT_2M",
"PublicDescription": "ITLB misses that hit STLB (2M).",
@@ -198,6 +223,7 @@
},
{
"BriefDescription": "Core misses that miss the DTLB and hit the STLB (4K)",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.STLB_HIT_4K",
"PublicDescription": "ITLB misses that hit STLB (4K).",
@@ -206,6 +232,7 @@
},
{
"BriefDescription": "Misses in all ITLB levels that cause completed page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
"PublicDescription": "Completed page walks in ITLB of any page size.",
@@ -214,6 +241,7 @@
},
{
"BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (1G)",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
"SampleAfterValue": "100003",
@@ -221,6 +249,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Completed page walks due to misses in ITLB 2M/4M page entries.",
@@ -229,6 +258,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Completed page walks due to misses in ITLB 4K page entries.",
@@ -237,6 +267,7 @@
},
{
"BriefDescription": "Cycles when PMH is busy with page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_DURATION",
"PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by ITLB misses.",
@@ -245,6 +276,7 @@
},
{
"BriefDescription": "Number of DTLB page walker hits in the L1+FB",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.DTLB_L1",
"PublicDescription": "Number of DTLB page walker loads that hit in the L1+FB.",
@@ -253,6 +285,7 @@
},
{
"BriefDescription": "Number of DTLB page walker hits in the L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.DTLB_L2",
"PublicDescription": "Number of DTLB page walker loads that hit in the L2.",
@@ -261,6 +294,7 @@
},
{
"BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP",
+ "Counter": "0,1,2,3",
"Errata": "HSD25",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.DTLB_L3",
@@ -270,6 +304,7 @@
},
{
"BriefDescription": "Number of DTLB page walker hits in Memory",
+ "Counter": "0,1,2,3",
"Errata": "HSD25",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
@@ -279,6 +314,7 @@
},
{
"BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L1 and FB.",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L1",
"SampleAfterValue": "2000003",
@@ -286,6 +322,7 @@
},
{
"BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L2.",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L2",
"SampleAfterValue": "2000003",
@@ -293,6 +330,7 @@
},
{
"BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L3",
"SampleAfterValue": "2000003",
@@ -300,6 +338,7 @@
},
{
"BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in memory.",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.EPT_DTLB_MEMORY",
"SampleAfterValue": "2000003",
@@ -307,6 +346,7 @@
},
{
"BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L1 and FB.",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L1",
"SampleAfterValue": "2000003",
@@ -314,6 +354,7 @@
},
{
"BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L2.",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L2",
"SampleAfterValue": "2000003",
@@ -321,6 +362,7 @@
},
{
"BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L2.",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L3",
"SampleAfterValue": "2000003",
@@ -328,6 +370,7 @@
},
{
"BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in memory.",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.EPT_ITLB_MEMORY",
"SampleAfterValue": "2000003",
@@ -335,6 +378,7 @@
},
{
"BriefDescription": "Number of ITLB page walker hits in the L1+FB",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.ITLB_L1",
"PublicDescription": "Number of ITLB page walker loads that hit in the L1+FB.",
@@ -343,6 +387,7 @@
},
{
"BriefDescription": "Number of ITLB page walker hits in the L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.ITLB_L2",
"PublicDescription": "Number of ITLB page walker loads that hit in the L2.",
@@ -351,6 +396,7 @@
},
{
"BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP",
+ "Counter": "0,1,2,3",
"Errata": "HSD25",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.ITLB_L3",
@@ -360,6 +406,7 @@
},
{
"BriefDescription": "Number of ITLB page walker hits in Memory",
+ "Counter": "0,1,2,3",
"Errata": "HSD25",
"EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.ITLB_MEMORY",
@@ -369,6 +416,7 @@
},
{
"BriefDescription": "DTLB flush attempts of the thread-specific entries",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "TLB_FLUSH.DTLB_THREAD",
"PublicDescription": "DTLB flush attempts of the thread-specific entries.",
@@ -377,6 +425,7 @@
},
{
"BriefDescription": "STLB flush attempts",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "TLB_FLUSH.STLB_ANY",
"PublicDescription": "Count number of STLB flush attempts.",
diff --git a/tools/perf/pmu-events/arch/x86/icelake/cache.json b/tools/perf/pmu-events/arch/x86/icelake/cache.json
index d26c4efe35f0..3508340acd0e 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/cache.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of cache lines replaced in L1 data cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "L1D.REPLACEMENT",
"PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.FB_FULL",
"PublicDescription": "Counts number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x48",
@@ -27,6 +30,7 @@
},
{
"BriefDescription": "Number of cycles a demand request has waited due to L1D due to lack of L2 resources.",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.L2_STALL",
"PublicDescription": "Counts number of cycles a demand request has waited due to L1D due to lack of L2 resources. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
@@ -35,6 +39,7 @@
},
{
"BriefDescription": "Number of L1D misses that are outstanding",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING",
"PublicDescription": "Counts number of L1D misses that are outstanding in each cycle, that is each cycle the number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch. Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
@@ -43,6 +48,7 @@
},
{
"BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING_CYCLES",
@@ -52,6 +58,7 @@
},
{
"BriefDescription": "L2 cache lines filling L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.ALL",
"PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
@@ -60,6 +67,7 @@
},
{
"BriefDescription": "Modified cache lines that are evicted by L2 cache when triggered by an L2 cache fill.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.NON_SILENT",
"PublicDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines are in Modified state. Modified lines are written back to L3",
@@ -68,6 +76,7 @@
},
{
"BriefDescription": "Non-modified cache lines that are silently dropped by L2 cache when triggered by an L2 cache fill.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.SILENT",
"PublicDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared or Exclusive state. A non-threaded event.",
@@ -76,6 +85,7 @@
},
{
"BriefDescription": "Cache lines that have been L2 hardware prefetched but not used by demand accesses",
+ "Counter": "0,1,2,3",
"EventCode": "0xf2",
"EventName": "L2_LINES_OUT.USELESS_HWPF",
"PublicDescription": "Counts the number of cache lines that have been prefetched by the L2 hardware prefetcher but not used by demand access when evicted from the L2 cache",
@@ -84,6 +94,7 @@
},
{
"BriefDescription": "L2 code requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_CODE_RD",
"PublicDescription": "Counts the total number of L2 code requests.",
@@ -92,6 +103,7 @@
},
{
"BriefDescription": "Demand Data Read requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
"PublicDescription": "Counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
@@ -100,6 +112,7 @@
},
{
"BriefDescription": "Demand requests that miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_MISS",
"PublicDescription": "Counts demand requests that miss L2 cache.",
@@ -108,6 +121,7 @@
},
{
"BriefDescription": "Demand requests to L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
"PublicDescription": "Counts demand requests to L2 cache.",
@@ -116,6 +130,7 @@
},
{
"BriefDescription": "RFO requests to L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_RFO",
"PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
@@ -124,6 +139,7 @@
},
{
"BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_HIT",
"PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
@@ -132,6 +148,7 @@
},
{
"BriefDescription": "L2 cache misses when fetching instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_MISS",
"PublicDescription": "Counts L2 cache misses when fetching instructions.",
@@ -140,6 +157,7 @@
},
{
"BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
"PublicDescription": "Counts the number of demand Data Read requests initiated by load instructions that hit L2 cache.",
@@ -148,6 +166,7 @@
},
{
"BriefDescription": "Demand Data Read miss L2, no rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
"PublicDescription": "Counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
@@ -156,6 +175,7 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x24",
"EventName": "L2_RQSTS.MISS",
@@ -164,6 +184,7 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x24",
"EventName": "L2_RQSTS.REFERENCES",
@@ -172,6 +193,7 @@
},
{
"BriefDescription": "RFO requests that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_HIT",
"PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
@@ -180,6 +202,7 @@
},
{
"BriefDescription": "RFO requests that miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_MISS",
"PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
@@ -188,6 +211,7 @@
},
{
"BriefDescription": "SW prefetch requests that hit L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.SWPF_HIT",
"PublicDescription": "Counts Software prefetch requests that hit the L2 cache. Accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions when FB is not full.",
@@ -196,6 +220,7 @@
},
{
"BriefDescription": "SW prefetch requests that miss L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.SWPF_MISS",
"PublicDescription": "Counts Software prefetch requests that miss the L2 cache. Accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions when FB is not full.",
@@ -204,6 +229,7 @@
},
{
"BriefDescription": "L2 writebacks that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.L2_WB",
"PublicDescription": "Counts L2 writebacks that access L2 cache.",
@@ -212,6 +238,7 @@
},
{
"BriefDescription": "Core-originated cacheable requests that missed L3 (Except hardware prefetches to the L3)",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.MISS",
"PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.",
@@ -220,6 +247,7 @@
},
{
"BriefDescription": "Retired load instructions.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ALL_LOADS",
@@ -230,6 +258,7 @@
},
{
"BriefDescription": "Retired store instructions.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ALL_STORES",
@@ -240,6 +269,7 @@
},
{
"BriefDescription": "All retired memory instructions.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ANY",
@@ -250,6 +280,7 @@
},
{
"BriefDescription": "Retired load instructions with locked access.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.LOCK_LOADS",
@@ -260,6 +291,7 @@
},
{
"BriefDescription": "Retired load instructions that split across a cacheline boundary.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
@@ -270,6 +302,7 @@
},
{
"BriefDescription": "Retired store instructions that split across a cacheline boundary.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.SPLIT_STORES",
@@ -280,6 +313,7 @@
},
{
"BriefDescription": "Retired load instructions that miss the STLB.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
@@ -290,6 +324,7 @@
},
{
"BriefDescription": "Retired store instructions that miss the STLB.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
@@ -300,6 +335,7 @@
},
{
"BriefDescription": "Retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT",
@@ -310,6 +346,7 @@
},
{
"BriefDescription": "Retired load instructions whose data sources were HitM responses from shared L3",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM",
@@ -320,6 +357,7 @@
},
{
"BriefDescription": "Retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
@@ -330,6 +368,7 @@
},
{
"BriefDescription": "Retired load instructions whose data sources were hits in L3 without snoops required",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
@@ -340,6 +379,7 @@
},
{
"BriefDescription": "Retired instructions with at least 1 uncacheable load or Bus Lock.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd4",
"EventName": "MEM_LOAD_MISC_RETIRED.UC",
@@ -350,6 +390,7 @@
},
{
"BriefDescription": "Number of completed demand load requests that missed the L1, but hit the FB(fill buffer), because a preceding miss to the same cacheline initiated the line to be brought into L1, but data is not yet ready in L1.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.FB_HIT",
@@ -360,6 +401,7 @@
},
{
"BriefDescription": "Retired load instructions with L1 cache hits as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L1_HIT",
@@ -370,6 +412,7 @@
},
{
"BriefDescription": "Retired load instructions missed L1 cache as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L1_MISS",
@@ -380,6 +423,7 @@
},
{
"BriefDescription": "Retired load instructions with L2 cache hits as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L2_HIT",
@@ -390,6 +434,7 @@
},
{
"BriefDescription": "Retired load instructions missed L2 cache as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L2_MISS",
@@ -400,6 +445,7 @@
},
{
"BriefDescription": "Retired load instructions with L3 cache hits as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L3_HIT",
@@ -410,6 +456,7 @@
},
{
"BriefDescription": "Retired load instructions missed L3 cache as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L3_MISS",
@@ -420,6 +467,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a cacheline in the L3 where a snoop was sent or not.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -429,6 +477,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a cacheline in the L3 where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -438,6 +487,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a cacheline in the L3 where a snoop hit in another core, data forwarding is not required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -447,6 +497,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -456,6 +507,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -465,6 +517,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a cacheline in the L3 where a snoop was sent.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_SENT",
"MSRIndex": "0x1a6,0x1a7",
@@ -474,6 +527,7 @@
},
{
"BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop was sent or not.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -483,6 +537,7 @@
},
{
"BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -492,6 +547,7 @@
},
{
"BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop hit in another core, data forwarding is not required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -501,6 +557,7 @@
},
{
"BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -510,6 +567,7 @@
},
{
"BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -519,6 +577,7 @@
},
{
"BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop was sent.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_SENT",
"MSRIndex": "0x1a6,0x1a7",
@@ -528,6 +587,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop was sent or not.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -537,6 +597,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -546,6 +607,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop hit in another core, data forwarding is not required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -555,6 +617,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -564,6 +627,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -573,6 +637,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop was sent.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_SENT",
"MSRIndex": "0x1a6,0x1a7",
@@ -582,6 +647,7 @@
},
{
"BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that hit a cacheline in the L3 where a snoop was sent or not.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L1D_AND_SWPF.L3_HIT.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -591,6 +657,7 @@
},
{
"BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L1D_AND_SWPF.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -600,6 +667,7 @@
},
{
"BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L1D_AND_SWPF.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -609,6 +677,7 @@
},
{
"BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that hit a cacheline in the L3 where a snoop was sent or not.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -618,6 +687,7 @@
},
{
"BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that hit a cacheline in the L3 where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -627,6 +697,7 @@
},
{
"BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that hit a cacheline in the L3 where a snoop hit in another core, data forwarding is not required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -636,6 +707,7 @@
},
{
"BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -645,6 +717,7 @@
},
{
"BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -654,6 +727,7 @@
},
{
"BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that hit a cacheline in the L3 where a snoop was sent.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_SENT",
"MSRIndex": "0x1a6,0x1a7",
@@ -663,6 +737,7 @@
},
{
"BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that hit a cacheline in the L3 where a snoop was sent or not.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2_RFO.L3_HIT.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -672,6 +747,7 @@
},
{
"BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that hit a cacheline in the L3 where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -681,6 +757,7 @@
},
{
"BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that hit a cacheline in the L3 where a snoop hit in another core, data forwarding is not required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -690,6 +767,7 @@
},
{
"BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -699,6 +777,7 @@
},
{
"BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -708,6 +787,7 @@
},
{
"BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that hit a cacheline in the L3 where a snoop was sent.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_SENT",
"MSRIndex": "0x1a6,0x1a7",
@@ -717,6 +797,7 @@
},
{
"BriefDescription": "Counts hardware prefetches to the L3 only that hit a cacheline in the L3 where a snoop was sent or not.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L3.L3_HIT.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -726,6 +807,7 @@
},
{
"BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that hit a cacheline in the L3 where a snoop hit in another core, data forwarding is not required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -735,6 +817,7 @@
},
{
"BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -744,6 +827,7 @@
},
{
"BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -753,6 +837,7 @@
},
{
"BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that hit a cacheline in the L3 where a snoop was sent.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT.SNOOP_SENT",
"MSRIndex": "0x1a6,0x1a7",
@@ -762,6 +847,7 @@
},
{
"BriefDescription": "Counts streaming stores that hit a cacheline in the L3 where a snoop was sent or not.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.STREAMING_WR.L3_HIT.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -771,6 +857,7 @@
},
{
"BriefDescription": "Demand and prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
"PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
@@ -779,6 +866,7 @@
},
{
"BriefDescription": "Counts memory transactions sent to the uncore.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
"PublicDescription": "Counts memory transactions sent to the uncore including requests initiated by the core, all L3 prefetches, reads resulting from page walks, and snoop responses.",
@@ -787,6 +875,7 @@
},
{
"BriefDescription": "Demand Data Read requests sent to uncore",
+ "Counter": "0,1,2,3",
"EventCode": "0xb0",
"EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
"PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
@@ -795,6 +884,7 @@
},
{
"BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "Counter": "0,1,2,3",
"EventCode": "0xb0",
"EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
"PublicDescription": "Counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
@@ -803,6 +893,7 @@
},
{
"BriefDescription": "For every cycle, increments by the number of outstanding data read requests pending.",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
"PublicDescription": "For every cycle, increments by the number of outstanding data read requests pending. Data read requests include cacheable demand reads and L2 prefetches, but do not include RFOs, code reads or prefetches to the L3. Reads due to page walks resulting from any request type will also be counted. Requests are considered outstanding from the time they miss the core's L2 cache until the transaction completion message is sent to the requestor.",
@@ -811,6 +902,7 @@
},
{
"BriefDescription": "Cycles where at least 1 outstanding data read request is pending.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
@@ -820,6 +912,7 @@
},
{
"BriefDescription": "Cycles where at least 1 outstanding Demand RFO request is pending.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
@@ -829,6 +922,7 @@
},
{
"BriefDescription": "For every cycle, increments by the number of outstanding demand data read requests pending.",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
"PublicDescription": "For every cycle, increments by the number of outstanding demand data read requests pending. Requests are considered outstanding from the time they miss the core's L2 cache until the transaction completion message is sent to the requestor.",
@@ -837,6 +931,7 @@
},
{
"BriefDescription": "Store Read transactions pending for off-core. Highly correlated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
"PublicDescription": "Counts the number of off-core outstanding read-for-ownership (RFO) store transactions every cycle. An RFO transaction is considered to be in the Off-core outstanding state between L2 cache miss and transaction completion.",
@@ -845,6 +940,7 @@
},
{
"BriefDescription": "Counts bus locks, accounts for cache line split locks and UC locks.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF4",
"EventName": "SQ_MISC.BUS_LOCK",
"PublicDescription": "Counts the more expensive bus lock needed to enforce cache coherency for certain memory accesses that need to be done atomically. Can be created by issuing an atomic instruction (via the LOCK prefix) which causes a cache line split or accesses uncacheable memory.",
@@ -853,6 +949,7 @@
},
{
"BriefDescription": "Cycles the queue waiting for offcore responses is full.",
+ "Counter": "0,1,2,3",
"EventCode": "0xf4",
"EventName": "SQ_MISC.SQ_FULL",
"PublicDescription": "Counts the cycles for which the thread is active and the queue waiting for responses from the uncore cannot take any more entries.",
@@ -860,7 +957,16 @@
"UMask": "0x4"
},
{
+ "BriefDescription": "Counts the number of PREFETCHNTA, PREFETCHW, PREFETCHT0, PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.ANY",
+ "SampleAfterValue": "100003",
+ "UMask": "0xf"
+ },
+ {
"BriefDescription": "Number of PREFETCHNTA instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "SW_PREFETCH_ACCESS.NTA",
"PublicDescription": "Counts the number of PREFETCHNTA instructions executed.",
@@ -869,6 +975,7 @@
},
{
"BriefDescription": "Number of PREFETCHW instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
"PublicDescription": "Counts the number of PREFETCHW instructions executed.",
@@ -877,6 +984,7 @@
},
{
"BriefDescription": "Number of PREFETCHT0 instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "SW_PREFETCH_ACCESS.T0",
"PublicDescription": "Counts the number of PREFETCHT0 instructions executed.",
@@ -885,6 +993,7 @@
},
{
"BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "SW_PREFETCH_ACCESS.T1_T2",
"PublicDescription": "Counts the number of PREFETCHT1 or PREFETCHT2 instructions executed.",
diff --git a/tools/perf/pmu-events/arch/x86/icelake/counter.json b/tools/perf/pmu-events/arch/x86/icelake/counter.json
new file mode 100644
index 000000000000..5a350072522a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelake/counter.json
@@ -0,0 +1,17 @@
+[
+ {
+ "Unit": "core",
+ "CountersNumFixed": "4",
+ "CountersNumGeneric": "8"
+ },
+ {
+ "Unit": "ARB",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "2"
+ },
+ {
+ "Unit": "CLOCK",
+ "CountersNumFixed": 1,
+ "CountersNumGeneric": "0"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/icelake/floating-point.json b/tools/perf/pmu-events/arch/x86/icelake/floating-point.json
index 85c26c889088..61ddce0c8db6 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/floating-point.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts all microcode FP assists.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc1",
"EventName": "ASSISTS.FP",
"PublicDescription": "Counts all microcode Floating Point assists.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
"PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
"PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 128-bit packed single and 256-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and packed double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.4_FLOPS",
"PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision and 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point and packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -49,6 +55,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -57,6 +64,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE",
"PublicDescription": "Number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -65,6 +73,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision and 512-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RSQRT14 RCP RCP14 DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.8_FLOPS",
"PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision and 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision and double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RSQRT14 RCP RCP14 DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -73,6 +82,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired; some instructions will count twice as noted below. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR",
"PublicDescription": "Number of SSE/AVX computational scalar single precision and double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -81,6 +91,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -89,6 +100,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
"PublicDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -97,6 +109,7 @@
},
{
"BriefDescription": "Number of any Vector retired FP arithmetic instructions",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.VECTOR",
"SampleAfterValue": "1000003",
diff --git a/tools/perf/pmu-events/arch/x86/icelake/frontend.json b/tools/perf/pmu-events/arch/x86/icelake/frontend.json
index 2b539a08d2bf..e7c7d4d4152d 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/frontend.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "Counter": "0,1,2,3",
"EventCode": "0xe6",
"EventName": "BACLEARS.ANY",
"PublicDescription": "Counts the number of times the front-end is resteered when it finds a branch instruction in a fetch line. This occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Stalls caused by changing prefix length of the instruction. [This event is alias to ILD_STALL.LCP]",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "DECODE.LCP",
"PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk. [This event is alias to ILD_STALL.LCP]",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE transitions count.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xab",
@@ -27,6 +30,7 @@
},
{
"BriefDescription": "DSB-to-MITE switch true penalty cycles.",
+ "Counter": "0,1,2,3",
"EventCode": "0xab",
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
"PublicDescription": "Decode Stream Buffer (DSB) is a Uop-cache that holds translations of previously fetched instructions that were decoded by the legacy x86 decode pipeline (MITE). This event counts fetch penalty cycles when a transition occurs from DSB to MITE.",
@@ -35,6 +39,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced DSB miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.ANY_DSB_MISS",
"MSRIndex": "0x3F7",
@@ -46,6 +51,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced a critical DSB miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.DSB_MISS",
"MSRIndex": "0x3F7",
@@ -57,6 +63,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced iTLB true miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.ITLB_MISS",
"MSRIndex": "0x3F7",
@@ -68,6 +75,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.L1I_MISS",
"MSRIndex": "0x3F7",
@@ -79,6 +87,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.L2_MISS",
"MSRIndex": "0x3F7",
@@ -90,6 +99,7 @@
},
{
"BriefDescription": "Retired instructions after front-end starvation of at least 1 cycle",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_1",
"MSRIndex": "0x3F7",
@@ -101,6 +111,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
"MSRIndex": "0x3F7",
@@ -112,6 +123,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
"MSRIndex": "0x3F7",
@@ -123,6 +135,7 @@
},
{
"BriefDescription": "Retired instructions after front-end starvation of at least 2 cycles",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
"MSRIndex": "0x3F7",
@@ -134,6 +147,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
"MSRIndex": "0x3F7",
@@ -145,6 +159,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
"MSRIndex": "0x3F7",
@@ -156,6 +171,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
"MSRIndex": "0x3F7",
@@ -167,6 +183,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
"MSRIndex": "0x3F7",
@@ -178,6 +195,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
"MSRIndex": "0x3F7",
@@ -189,6 +207,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
"MSRIndex": "0x3F7",
@@ -200,6 +219,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
"MSRIndex": "0x3F7",
@@ -211,6 +231,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.STLB_MISS",
"MSRIndex": "0x3F7",
@@ -222,6 +243,7 @@
},
{
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss. [This event is alias to ICACHE_DATA.STALLS]",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE_16B.IFDATA_STALL",
"PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity. [This event is alias to ICACHE_DATA.STALLS]",
@@ -230,6 +252,7 @@
},
{
"BriefDescription": "Instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "ICACHE_64B.IFTAG_HIT",
"PublicDescription": "Counts instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity. Accounts for both cacheable and uncacheable accesses.",
@@ -238,6 +261,7 @@
},
{
"BriefDescription": "Instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "ICACHE_64B.IFTAG_MISS",
"PublicDescription": "Counts instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity. Accounts for both cacheable and uncacheable accesses.",
@@ -246,6 +270,7 @@
},
{
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_TAG.STALLS]",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "ICACHE_64B.IFTAG_STALL",
"PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_TAG.STALLS]",
@@ -254,6 +279,7 @@
},
{
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss. [This event is alias to ICACHE_16B.IFDATA_STALL]",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE_DATA.STALLS",
"PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity. [This event is alias to ICACHE_16B.IFDATA_STALL]",
@@ -262,6 +288,7 @@
},
{
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_64B.IFTAG_STALL]",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "ICACHE_TAG.STALLS",
"PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_64B.IFTAG_STALL]",
@@ -270,6 +297,7 @@
},
{
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.DSB_CYCLES_ANY",
@@ -279,15 +307,17 @@
},
{
"BriefDescription": "Cycles DSB is delivering optimal number of Uops",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"EventCode": "0x79",
"EventName": "IDQ.DSB_CYCLES_OK",
- "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the DSB (Decode Stream Buffer) path. Count includes uops that may 'bypass' the IDQ.",
"SampleAfterValue": "2000003",
"UMask": "0x8"
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.DSB_UOPS",
"PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
@@ -296,6 +326,7 @@
},
{
"BriefDescription": "Cycles MITE is delivering any Uop",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MITE_CYCLES_ANY",
@@ -305,6 +336,7 @@
},
{
"BriefDescription": "Cycles MITE is delivering optimal number of Uops",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"EventCode": "0x79",
"EventName": "IDQ.MITE_CYCLES_OK",
@@ -314,6 +346,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MITE_UOPS",
"PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
@@ -322,6 +355,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to IDQ while MS is busy",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MS_CYCLES_ANY",
@@ -331,6 +365,7 @@
},
{
"BriefDescription": "Number of switches from DSB or MITE to the MS",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x79",
@@ -341,6 +376,7 @@
},
{
"BriefDescription": "Uops delivered to IDQ while MS is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_UOPS",
"PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS). Any instruction over 4 uops will be delivered by the MS. Some instructions such as transcendentals may additionally generate uops from the MS.",
@@ -349,6 +385,7 @@
},
{
"BriefDescription": "Uops not delivered by IDQ when backend of the machine is not stalled",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x9c",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
"PublicDescription": "Counts the number of uops not delivered to by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
@@ -357,6 +394,7 @@
},
{
"BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "5",
"EventCode": "0x9c",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
@@ -366,6 +404,7 @@
},
{
"BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
diff --git a/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json b/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json
index f67cc73779f8..9085ea60f516 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json
@@ -104,7 +104,7 @@
{
"BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists",
"MetricExpr": "34 * ASSISTS.ANY / tma_info_thread_slots",
- "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricGroup": "BvIO;TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_assists",
"MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
"PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: ASSISTS.ANY",
@@ -114,7 +114,7 @@
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
"DefaultMetricgroupName": "TopdownL1",
"MetricExpr": "topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 5 * INT_MISC.CLEARS_COUNT / tma_info_thread_slots",
- "MetricGroup": "Default;TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvOB;Default;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
"MetricThreshold": "tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL1;Default",
@@ -135,7 +135,7 @@
{
"BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions.",
"MetricExpr": "tma_light_operations * BR_INST_RETIRED.ALL_BRANCHES / (tma_retiring * tma_info_thread_slots)",
- "MetricGroup": "Branches;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_branch_instructions",
"MetricThreshold": "tma_branch_instructions > 0.1 & tma_light_operations > 0.6",
"ScaleUnit": "100%"
@@ -143,7 +143,7 @@
{
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
"MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * tma_bad_speculation",
- "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
+ "MetricGroup": "BadSpec;BrMispredicts;BvMP;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
"MetricName": "tma_branch_mispredicts",
"MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
@@ -181,7 +181,7 @@
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(29 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM + 23.5 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
- "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricGroup": "BvMS;DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_contested_accesses",
"MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS_PS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
@@ -201,7 +201,7 @@
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "23.5 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
- "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricGroup": "BvMS;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_data_sharing",
"MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT_PS. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
@@ -219,7 +219,7 @@
{
"BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
"MetricExpr": "ARITH.DIVIDER_ACTIVE / tma_info_thread_clks",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
"MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_ACTIVE",
@@ -250,13 +250,13 @@
"MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_dsb_switches",
"MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
"MetricExpr": "min(7 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_LOAD_MISSES.WALK_ACTIVE, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
- "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
+ "MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
"MetricName": "tma_dtlb_load",
"MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs, tma_info_bottleneck_memory_synchronization",
@@ -265,7 +265,7 @@
{
"BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
"MetricExpr": "(7 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_STORE_MISSES.WALK_ACTIVE) / tma_info_core_core_clks",
- "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
+ "MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
"MetricName": "tma_dtlb_store",
"MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load, tma_info_bottleneck_memory_data_tlbs, tma_info_bottleneck_memory_synchronization",
@@ -274,7 +274,7 @@
{
"BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing",
"MetricExpr": "32.5 * tma_info_system_core_frequency * OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM / tma_info_thread_clks",
- "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
+ "MetricGroup": "BvMS;DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
"MetricName": "tma_false_sharing",
"MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
@@ -283,7 +283,7 @@
{
"BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
"MetricExpr": "L1D_PEND_MISS.FB_FULL / tma_info_thread_clks",
- "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
+ "MetricGroup": "BvMS;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
"MetricName": "tma_fb_full",
"MetricThreshold": "tma_fb_full > 0.3",
"PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
@@ -296,7 +296,7 @@
"MetricName": "tma_fetch_bandwidth",
"MetricThreshold": "tma_fetch_bandwidth > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
{
@@ -338,7 +338,7 @@
},
{
"BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired",
- "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ / (tma_retiring * tma_info_thread_slots)",
+ "MetricExpr": "FP_ARITH_INST_RETIRED.SCALAR / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_scalar",
"MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
@@ -347,7 +347,7 @@
},
{
"BriefDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths",
- "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@ / (tma_retiring * tma_info_thread_slots)",
+ "MetricExpr": "FP_ARITH_INST_RETIRED.VECTOR / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_vector",
"MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
@@ -385,7 +385,7 @@
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
"DefaultMetricgroupName": "TopdownL1",
"MetricExpr": "topdown\\-fe\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) - INT_MISC.UOP_DROPPING / tma_info_thread_slots",
- "MetricGroup": "Default;PGO;TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvFB;BvIO;Default;PGO;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_frontend_bound",
"MetricThreshold": "tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL1;Default",
@@ -405,7 +405,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses",
"MetricExpr": "ICACHE_DATA.STALLS / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_icache_misses",
"MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS",
@@ -462,6 +462,27 @@
},
{
"BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts",
+ "MetricExpr": "tma_info_botlnk_l0_core_bound_likely",
+ "MetricGroup": "Cor;Metric;SMT",
+ "MetricName": "tma_info_botlnk_core_bound_likely",
+ "MetricThreshold": "tma_info_botlnk_core_bound_likely > 0.5"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck.",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + tma_fetch_bandwidth * tma_mite / (tma_mite + tma_dsb + tma_lsd))",
+ "MetricGroup": "DSBmiss;Fed;Scaled_Slots;tma_issueFB",
+ "MetricName": "tma_info_botlnk_dsb_misses",
+ "MetricThreshold": "tma_info_botlnk_dsb_misses > 10"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck.",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches))",
+ "MetricGroup": "Fed;FetchLat;IcMiss;Scaled_Slots;tma_issueFL",
+ "MetricName": "tma_info_botlnk_ic_misses",
+ "MetricThreshold": "tma_info_botlnk_ic_misses > 5"
+ },
+ {
+ "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(100 * (1 - tma_core_bound / tma_ports_utilization if tma_core_bound < tma_ports_utilization else 1) if tma_info_system_smt_2t_utilization > 0.5 else 0)",
"MetricGroup": "Cor;SMT",
@@ -469,13 +490,21 @@
"MetricThreshold": "tma_info_botlnk_l0_core_bound_likely > 0.5"
},
{
+ "BriefDescription": "Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck",
+ "MetricExpr": "100 * (tma_frontend_bound * (tma_fetch_bandwidth / (tma_fetch_bandwidth + tma_fetch_latency)) * (tma_dsb / (tma_dsb + tma_lsd + tma_mite)))",
+ "MetricGroup": "DSB;FetchBW;tma_issueFB",
+ "MetricName": "tma_info_botlnk_l2_dsb_bandwidth",
+ "MetricThreshold": "tma_info_botlnk_l2_dsb_bandwidth > 10",
+ "PublicDescription": "Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp"
+ },
+ {
"BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_lsd + tma_mite))",
"MetricGroup": "DSBmiss;Fed;tma_issueFB",
"MetricName": "tma_info_botlnk_l2_dsb_misses",
"MetricThreshold": "tma_info_botlnk_l2_dsb_misses > 10",
- "PublicDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp"
+ "PublicDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp"
},
{
"BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
@@ -487,39 +516,33 @@
"PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: "
},
{
- "BriefDescription": "Total pipeline cost of \"useful operations\" - the baseline operations not covered by Branching_Overhead nor Irregular_Overhead.",
- "MetricExpr": "100 * (tma_retiring - (BR_INST_RETIRED.ALL_BRANCHES + BR_INST_RETIRED.NEAR_CALL) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
- "MetricGroup": "Ret",
- "MetricName": "tma_info_bottleneck_base_non_br",
- "MetricThreshold": "tma_info_bottleneck_base_non_br > 20"
- },
- {
"BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)",
- "MetricGroup": "BigFootprint;Fed;Frontend;IcMiss;MemoryTLB",
+ "MetricGroup": "BigFootprint;BvBC;Fed;Frontend;IcMiss;MemoryTLB",
"MetricName": "tma_info_bottleneck_big_code",
"MetricThreshold": "tma_info_bottleneck_big_code > 20"
},
{
- "BriefDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls)",
- "MetricExpr": "100 * ((BR_INST_RETIRED.ALL_BRANCHES + BR_INST_RETIRED.NEAR_CALL) / tma_info_thread_slots)",
- "MetricGroup": "Ret",
+ "BriefDescription": "Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA",
+ "MetricExpr": "100 * ((BR_INST_RETIRED.ALL_BRANCHES + 2 * BR_INST_RETIRED.NEAR_CALL + INST_RETIRED.NOP) / tma_info_thread_slots)",
+ "MetricGroup": "BvBO;Ret",
"MetricName": "tma_info_bottleneck_branching_overhead",
- "MetricThreshold": "tma_info_bottleneck_branching_overhead > 5"
+ "MetricThreshold": "tma_info_bottleneck_branching_overhead > 5",
+ "PublicDescription": "Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA. Examples include function calls; loops and alignments. (A lower bound)"
},
{
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
- "MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_hit_latency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
+ "MetricGroup": "BvMB;Mem;MemoryBW;Offcore;tma_issueBW",
"MetricName": "tma_info_bottleneck_cache_memory_bandwidth",
"MetricThreshold": "tma_info_bottleneck_cache_memory_bandwidth > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full"
},
{
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
- "MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l1_hit_latency / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_hit_latency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
+ "MetricGroup": "BvML;Mem;MemoryLat;Offcore;tma_issueLat",
"MetricName": "tma_info_bottleneck_cache_memory_latency",
"MetricThreshold": "tma_info_bottleneck_cache_memory_latency > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks. Related metrics: tma_l3_hit_latency, tma_mem_latency"
@@ -527,23 +550,23 @@
{
"BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation",
"MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_ports_utilization + tma_serializing_operation)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))",
- "MetricGroup": "Cor;tma_issueComp",
+ "MetricGroup": "BvCB;Cor;tma_issueComp",
"MetricName": "tma_info_bottleneck_compute_bound_est",
"MetricThreshold": "tma_info_bottleneck_compute_bound_est > 20",
"PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. Related metrics: "
},
{
- "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks",
+ "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end)",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * (10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts)) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_info_bottleneck_big_code",
- "MetricGroup": "Fed;FetchBW;Frontend",
+ "MetricGroup": "BvFB;Fed;FetchBW;Frontend",
"MetricName": "tma_info_bottleneck_instruction_fetch_bw",
"MetricThreshold": "tma_info_bottleneck_instruction_fetch_bw > 20"
},
{
"BriefDescription": "Total pipeline cost of irregular execution (e.g",
"MetricExpr": "100 * (tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * (10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts)) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + tma_core_bound * RS_EVENTS.EMPTY_CYCLES / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
- "MetricGroup": "Bad;Cor;Ret;tma_issueMS",
+ "MetricGroup": "Bad;BvIO;Cor;Ret;tma_issueMS",
"MetricName": "tma_info_bottleneck_irregular_overhead",
"MetricThreshold": "tma_info_bottleneck_irregular_overhead > 10",
"PublicDescription": "Total pipeline cost of irregular execution (e.g. FP-assists in HPC, Wait time with work imbalance multithreaded workloads, overhead in system services or virtualized environments). Related metrics: tma_microcode_sequencer, tma_ms_switches"
@@ -551,8 +574,8 @@
{
"BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
- "MetricGroup": "Mem;MemoryTLB;Offcore;tma_issueTLB",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_hit_latency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
+ "MetricGroup": "BvMT;Mem;MemoryTLB;Offcore;tma_issueTLB",
"MetricName": "tma_info_bottleneck_memory_data_tlbs",
"MetricThreshold": "tma_info_bottleneck_memory_data_tlbs > 20",
"PublicDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs). Related metrics: tma_dtlb_load, tma_dtlb_store, tma_info_bottleneck_memory_synchronization"
@@ -560,7 +583,7 @@
{
"BriefDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)",
"MetricExpr": "100 * (tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * tma_false_sharing / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))",
- "MetricGroup": "Mem;Offcore;tma_issueTLB",
+ "MetricGroup": "BvMS;Mem;Offcore;tma_issueTLB",
"MetricName": "tma_info_bottleneck_memory_synchronization",
"MetricThreshold": "tma_info_bottleneck_memory_synchronization > 10",
"PublicDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors). Related metrics: tma_dtlb_load, tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs"
@@ -569,18 +592,25 @@
"BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
- "MetricGroup": "Bad;BadSpec;BrMispredicts;tma_issueBM",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts;BvMP;tma_issueBM",
"MetricName": "tma_info_bottleneck_mispredictions",
"MetricThreshold": "tma_info_bottleneck_mispredictions > 20",
"PublicDescription": "Total pipeline cost of Branch Misprediction related bottlenecks. Related metrics: tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost, tma_mispredicts_resteers"
},
{
- "BriefDescription": "Total pipeline cost of remaining bottlenecks (apart from those listed in the Info.Bottlenecks metrics class)",
- "MetricExpr": "100 - (tma_info_bottleneck_big_code + tma_info_bottleneck_instruction_fetch_bw + tma_info_bottleneck_mispredictions + tma_info_bottleneck_cache_memory_bandwidth + tma_info_bottleneck_cache_memory_latency + tma_info_bottleneck_memory_data_tlbs + tma_info_bottleneck_memory_synchronization + tma_info_bottleneck_compute_bound_est + tma_info_bottleneck_irregular_overhead + tma_info_bottleneck_branching_overhead + tma_info_bottleneck_base_non_br)",
- "MetricGroup": "Cor;Offcore",
+ "BriefDescription": "Total pipeline cost of remaining bottlenecks in the back-end",
+ "MetricExpr": "100 - (tma_info_bottleneck_big_code + tma_info_bottleneck_instruction_fetch_bw + tma_info_bottleneck_mispredictions + tma_info_bottleneck_cache_memory_bandwidth + tma_info_bottleneck_cache_memory_latency + tma_info_bottleneck_memory_data_tlbs + tma_info_bottleneck_memory_synchronization + tma_info_bottleneck_compute_bound_est + tma_info_bottleneck_irregular_overhead + tma_info_bottleneck_branching_overhead + tma_info_bottleneck_useful_work)",
+ "MetricGroup": "BvOB;Cor;Offcore",
"MetricName": "tma_info_bottleneck_other_bottlenecks",
"MetricThreshold": "tma_info_bottleneck_other_bottlenecks > 20",
- "PublicDescription": "Total pipeline cost of remaining bottlenecks (apart from those listed in the Info.Bottlenecks metrics class). Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls."
+ "PublicDescription": "Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls."
+ },
+ {
+ "BriefDescription": "Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead.",
+ "MetricExpr": "100 * (tma_retiring - (BR_INST_RETIRED.ALL_BRANCHES + 2 * BR_INST_RETIRED.NEAR_CALL + INST_RETIRED.NOP) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
+ "MetricGroup": "BvUW;Ret",
+ "MetricName": "tma_info_bottleneck_useful_work",
+ "MetricThreshold": "tma_info_bottleneck_useful_work > 20"
},
{
"BriefDescription": "Fraction of branches that are CALL or RET",
@@ -638,7 +668,7 @@
},
{
"BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
- "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@) / (2 * tma_info_core_core_clks)",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + FP_ARITH_INST_RETIRED.VECTOR) / (2 * tma_info_core_core_clks)",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "tma_info_core_fp_arith_utilization",
"PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
@@ -655,7 +685,7 @@
"MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
"MetricName": "tma_info_frontend_dsb_coverage",
"MetricThreshold": "tma_info_frontend_dsb_coverage < 0.7 & tma_info_thread_ipc / 5 > 0.35",
- "PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_inst_mix_iptb, tma_lcp"
+ "PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_inst_mix_iptb, tma_lcp"
},
{
"BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
@@ -721,7 +751,7 @@
},
{
"BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
- "MetricExpr": "INST_RETIRED.ANY / (cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR + FP_ARITH_INST_RETIRED.VECTOR)",
"MetricGroup": "Flops;InsType",
"MetricName": "tma_info_inst_mix_iparith",
"MetricThreshold": "tma_info_inst_mix_iparith < 10",
@@ -816,12 +846,24 @@
"MetricThreshold": "tma_info_inst_mix_ipswpf < 100"
},
{
- "BriefDescription": "Instruction per taken branch",
+ "BriefDescription": "Instructions per taken branch",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
"MetricName": "tma_info_inst_mix_iptb",
"MetricThreshold": "tma_info_inst_mix_iptb < 11",
- "PublicDescription": "Instruction per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_lcp"
+ "PublicDescription": "Instructions per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_lcp"
+ },
+ {
+ "BriefDescription": "\"Bus lock\" per kilo instruction",
+ "MetricExpr": "tma_info_memory_mix_bus_lock_pki",
+ "MetricGroup": "Mem;Metric",
+ "MetricName": "tma_info_memory_bus_lock_pki"
+ },
+ {
+ "BriefDescription": "STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
+ "MetricExpr": "tma_info_memory_tlb_code_stlb_mpki",
+ "MetricGroup": "Fed;MemoryTLB;Metric",
+ "MetricName": "tma_info_memory_code_stlb_mpki"
},
{
"BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
@@ -848,18 +890,30 @@
"MetricName": "tma_info_memory_core_l3_cache_fill_bw_2t"
},
{
+ "BriefDescription": "Average Parallel L2 cache miss data reads",
+ "MetricExpr": "tma_info_memory_latency_data_l2_mlp",
+ "MetricGroup": "Memory_BW;Metric;Offcore",
+ "MetricName": "tma_info_memory_data_l2_mlp"
+ },
+ {
"BriefDescription": "Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)",
"MetricExpr": "1e3 * MEM_LOAD_RETIRED.FB_HIT / INST_RETIRED.ANY",
"MetricGroup": "CacheHits;Mem",
"MetricName": "tma_info_memory_fb_hpki"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
"MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l1d_cache_fill_bw"
},
{
+ "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "tma_info_memory_l1d_cache_fill_bw",
+ "MetricGroup": "Core_Metric;Mem;MemoryBW",
+ "MetricName": "tma_info_memory_l1d_cache_fill_bw_2t"
+ },
+ {
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
"MetricExpr": "1e3 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
"MetricGroup": "CacheHits;Mem",
@@ -872,12 +926,18 @@
"MetricName": "tma_info_memory_l1mpki_load"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
"MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l2_cache_fill_bw"
},
{
+ "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "tma_info_memory_l2_cache_fill_bw",
+ "MetricGroup": "Core_Metric;Mem;MemoryBW",
+ "MetricName": "tma_info_memory_l2_cache_fill_bw_2t"
+ },
+ {
"BriefDescription": "L2 cache hits per kilo instruction for all demand loads (including speculative)",
"MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_HIT / INST_RETIRED.ANY",
"MetricGroup": "CacheHits;Mem",
@@ -902,18 +962,36 @@
"MetricName": "tma_info_memory_l2mpki_load"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Offcore requests (L2 cache miss) per kilo instruction for demand RFOs",
+ "MetricExpr": "1e3 * L2_RQSTS.RFO_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Offcore",
+ "MetricName": "tma_info_memory_l2mpki_rfo"
+ },
+ {
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
"MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW;Offcore",
"MetricName": "tma_info_memory_l3_cache_access_bw"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "tma_info_memory_l3_cache_access_bw",
+ "MetricGroup": "Core_Metric;Mem;MemoryBW;Offcore",
+ "MetricName": "tma_info_memory_l3_cache_access_bw_2t"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
"MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l3_cache_fill_bw"
},
{
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "tma_info_memory_l3_cache_fill_bw",
+ "MetricGroup": "Core_Metric;Mem;MemoryBW",
+ "MetricName": "tma_info_memory_l3_cache_fill_bw_2t"
+ },
+ {
"BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
"MetricExpr": "1e3 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
"MetricGroup": "Mem",
@@ -927,7 +1005,7 @@
},
{
"BriefDescription": "Average Latency for L2 cache miss demand Loads",
- "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "MetricExpr": "tma_info_memory_load_l2_miss_latency",
"MetricGroup": "Memory_Lat;Offcore",
"MetricName": "tma_info_memory_latency_load_l2_miss_latency"
},
@@ -944,12 +1022,36 @@
"MetricName": "tma_info_memory_latency_load_l3_miss_latency"
},
{
+ "BriefDescription": "Average Latency for L2 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "MetricGroup": "Clocks_Latency;Memory_Lat;Offcore",
+ "MetricName": "tma_info_memory_load_l2_miss_latency"
+ },
+ {
+ "BriefDescription": "Average Parallel L2 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=0x1@",
+ "MetricGroup": "Memory_BW;Metric;Offcore",
+ "MetricName": "tma_info_memory_load_l2_mlp"
+ },
+ {
+ "BriefDescription": "Average Latency for L3 cache miss demand Loads",
+ "MetricExpr": "cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,umask\\=0x0@ / OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
+ "MetricGroup": "Clocks_Latency;Memory_Lat;Offcore",
+ "MetricName": "tma_info_memory_load_l3_miss_latency"
+ },
+ {
"BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
"MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT)",
"MetricGroup": "Mem;MemoryBound;MemoryLat",
"MetricName": "tma_info_memory_load_miss_real_latency"
},
{
+ "BriefDescription": "STLB (2nd level TLB) data load speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
+ "MetricExpr": "tma_info_memory_tlb_load_stlb_mpki",
+ "MetricGroup": "Mem;MemoryTLB;Metric",
+ "MetricName": "tma_info_memory_load_stlb_mpki"
+ },
+ {
"BriefDescription": "\"Bus lock\" per kilo instruction",
"MetricExpr": "1e3 * SQ_MISC.BUS_LOCK / INST_RETIRED.ANY",
"MetricGroup": "Mem",
@@ -957,7 +1059,7 @@
},
{
"BriefDescription": "Un-cacheable retired load per kilo instruction",
- "MetricExpr": "1e3 * MEM_LOAD_MISC_RETIRED.UC / INST_RETIRED.ANY",
+ "MetricExpr": "tma_info_memory_uc_load_pki",
"MetricGroup": "Mem",
"MetricName": "tma_info_memory_mix_uc_load_pki"
},
@@ -969,6 +1071,19 @@
"PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)"
},
{
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "tma_info_memory_tlb_page_walks_utilization",
+ "MetricGroup": "Core_Metric;Mem;MemoryTLB",
+ "MetricName": "tma_info_memory_page_walks_utilization",
+ "MetricThreshold": "tma_info_memory_page_walks_utilization > 0.5"
+ },
+ {
+ "BriefDescription": "STLB (2nd level TLB) data store speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
+ "MetricExpr": "tma_info_memory_tlb_store_stlb_mpki",
+ "MetricGroup": "Mem;MemoryTLB;Metric",
+ "MetricName": "tma_info_memory_store_stlb_mpki"
+ },
+ {
"BriefDescription": "STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
"MetricExpr": "1e3 * ITLB_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
"MetricGroup": "Fed;MemoryTLB",
@@ -994,12 +1109,36 @@
"MetricName": "tma_info_memory_tlb_store_stlb_mpki"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Un-cacheable retired load per kilo instruction",
+ "MetricExpr": "1e3 * MEM_LOAD_MISC_RETIRED.UC / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;Metric",
+ "MetricName": "tma_info_memory_uc_load_pki"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per core",
"MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@)",
"MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
"MetricName": "tma_info_pipeline_execute"
},
{
+ "BriefDescription": "Average number of uops fetched from DSB per cycle",
+ "MetricExpr": "IDQ.DSB_UOPS / IDQ.DSB_CYCLES_ANY",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "tma_info_pipeline_fetch_dsb"
+ },
+ {
+ "BriefDescription": "Average number of uops fetched from LSD per cycle",
+ "MetricExpr": "LSD.UOPS / LSD.CYCLES_ACTIVE",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "tma_info_pipeline_fetch_lsd"
+ },
+ {
+ "BriefDescription": "Average number of uops fetched from MITE per cycle",
+ "MetricExpr": "IDQ.MITE_UOPS / IDQ.MITE_CYCLES_ANY",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "tma_info_pipeline_fetch_mite"
+ },
+ {
"BriefDescription": "Instructions per a microcode Assist invocation",
"MetricExpr": "INST_RETIRED.ANY / ASSISTS.ANY",
"MetricGroup": "MicroSeq;Pipeline;Ret;Retire",
@@ -1021,13 +1160,13 @@
},
{
"BriefDescription": "Average CPU Utilization (percentage)",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "tma_info_system_cpus_utilized / #num_cpus_online",
"MetricGroup": "HPC;Summary",
"MetricName": "tma_info_system_cpu_utilization"
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "#num_cpus_online * tma_info_system_cpu_utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized"
},
@@ -1151,7 +1290,7 @@
"MetricThreshold": "tma_info_thread_uoppi > 1.05"
},
{
- "BriefDescription": "Instruction per taken branch",
+ "BriefDescription": "Uops per taken branch",
"MetricExpr": "tma_retiring * tma_info_thread_slots / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW",
"MetricName": "tma_info_thread_uptb",
@@ -1160,7 +1299,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
"MetricExpr": "ICACHE_TAG.STALLS / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_itlb_misses",
"MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS",
@@ -1176,10 +1315,19 @@
"ScaleUnit": "100%"
},
{
+ "BriefDescription": "This metric roughly estimates fraction of cycles with demand load accesses that hit the L1 cache",
+ "MetricExpr": "min(2 * (MEM_INST_RETIRED.ALL_LOADS - MEM_LOAD_RETIRED.FB_HIT - MEM_LOAD_RETIRED.L1_MISS) * 20 / 100, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
+ "MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_l1_hit_latency",
+ "MetricThreshold": "tma_l1_hit_latency > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles with demand load accesses that hit the L1 cache. The short latency of the L1 data cache may be exposed in pointer-chasing memory access patterns as an example. Sample with: MEM_LOAD_RETIRED.L1_HIT",
+ "ScaleUnit": "100%"
+ },
+ {
"BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) / (MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + L1D_PEND_MISS.FB_FULL_PERIODS) * ((CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks)",
- "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricGroup": "BvML;CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l2_bound",
"MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT_PS",
@@ -1198,7 +1346,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
"MetricExpr": "9 * tma_info_system_core_frequency * (MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2)) / tma_info_thread_clks",
- "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
+ "MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
"MetricName": "tma_l3_hit_latency",
"MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_info_bottleneck_cache_memory_latency, tma_mem_latency",
@@ -1210,7 +1358,7 @@
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_lcp",
"MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
- "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
"ScaleUnit": "100%"
},
{
@@ -1255,7 +1403,7 @@
"MetricGroup": "Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
"MetricName": "tma_lock_latency",
"MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
- "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS_PS. Related metrics: tma_store_latency",
+ "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS. Related metrics: tma_store_latency",
"ScaleUnit": "100%"
},
{
@@ -1270,7 +1418,7 @@
{
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears",
"MetricExpr": "max(0, tma_bad_speculation - tma_branch_mispredicts)",
- "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
+ "MetricGroup": "BadSpec;BvMS;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
"MetricName": "tma_machine_clears",
"MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
@@ -1280,7 +1428,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_thread_clks",
- "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
"MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_sq_full",
@@ -1289,7 +1437,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM)",
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth",
- "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
+ "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
"MetricName": "tma_mem_latency",
"MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_info_bottleneck_cache_memory_latency, tma_l3_hit_latency",
@@ -1326,7 +1474,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage",
"MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks",
- "MetricGroup": "BadSpec;BrMispredicts;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
+ "MetricGroup": "BadSpec;BrMispredicts;BvMP;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
"MetricName": "tma_mispredicts_resteers",
"MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost, tma_info_bottleneck_mispredictions",
@@ -1370,7 +1518,7 @@
{
"BriefDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions",
"MetricExpr": "tma_light_operations * INST_RETIRED.NOP / (tma_retiring * tma_info_thread_slots)",
- "MetricGroup": "Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
+ "MetricGroup": "BvBO;Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
"MetricName": "tma_nop_instructions",
"MetricThreshold": "tma_nop_instructions > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)",
"PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP",
@@ -1389,7 +1537,7 @@
{
"BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).",
"MetricExpr": "max(tma_branch_mispredicts * (1 - BR_MISP_RETIRED.ALL_BRANCHES / (INT_MISC.CLEARS_COUNT - MACHINE_CLEARS.COUNT)), 0.0001)",
- "MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
+ "MetricGroup": "BrMispredicts;BvIO;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_other_mispredicts",
"MetricThreshold": "tma_other_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
@@ -1397,7 +1545,7 @@
{
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.",
"MetricExpr": "max(tma_machine_clears * (1 - MACHINE_CLEARS.MEMORY_ORDERING / MACHINE_CLEARS.COUNT), 0.0001)",
- "MetricGroup": "Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group",
+ "MetricGroup": "BvIO;Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_other_nukes",
"MetricThreshold": "tma_other_nukes > 0.05 & (tma_machine_clears > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
@@ -1449,7 +1597,7 @@
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricExpr": "(cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ + tma_core_bound * RS_EVENTS.EMPTY_CYCLES) / tma_info_thread_clks * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) / tma_info_thread_clks",
+ "MetricExpr": "cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_0",
"MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
@@ -1477,7 +1625,7 @@
{
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
"MetricExpr": "UOPS_EXECUTED.CYCLES_GE_3 / tma_info_thread_clks",
- "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricGroup": "BvCB;PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_3m",
"MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Sample with: UOPS_EXECUTED.CYCLES_GE_3",
@@ -1487,7 +1635,7 @@
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
"DefaultMetricgroupName": "TopdownL1",
"MetricExpr": "topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * tma_info_thread_slots",
- "MetricGroup": "Default;TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvUW;Default;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_retiring",
"MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL1;Default",
@@ -1497,7 +1645,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations",
"MetricExpr": "RESOURCE_STALLS.SCOREBOARD / tma_info_thread_clks",
- "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO",
+ "MetricGroup": "BvIO;PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO",
"MetricName": "tma_serializing_operation",
"MetricThreshold": "tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: RESOURCE_STALLS.SCOREBOARD. Related metrics: tma_ms_switches",
@@ -1534,7 +1682,7 @@
{
"BriefDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors)",
"MetricExpr": "L1D_PEND_MISS.L2_STALL / tma_info_thread_clks",
- "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
+ "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
"MetricName": "tma_sq_full",
"MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth",
@@ -1562,7 +1710,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses",
"MetricExpr": "(L2_RQSTS.RFO_HIT * 10 * (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) + (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_thread_clks",
- "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
+ "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
"MetricName": "tma_store_latency",
"MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
@@ -1605,7 +1753,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears",
"MetricExpr": "10 * BACLEARS.ANY / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
"MetricName": "tma_unknown_branches",
"MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: BACLEARS.ANY",
diff --git a/tools/perf/pmu-events/arch/x86/icelake/memory.json b/tools/perf/pmu-events/arch/x86/icelake/memory.json
index f84763220549..f73035f44330 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/memory.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Cycles while L3 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L3_MISS",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one).",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED",
"PublicDescription": "Counts the number of times HLE abort was triggered.",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to unfriendly events (such as interrupts).",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_EVENTS",
"PublicDescription": "Counts the number of times an HLE execution aborted due to unfriendly events (such as interrupts).",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_MEM",
"PublicDescription": "Counts the number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_UNFRIENDLY",
"PublicDescription": "Counts the number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
@@ -49,6 +55,7 @@
},
{
"BriefDescription": "Number of times an HLE execution successfully committed",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.COMMIT",
"PublicDescription": "Counts the number of times HLE commit succeeded.",
@@ -57,6 +64,7 @@
},
{
"BriefDescription": "Number of times an HLE execution started.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.START",
"PublicDescription": "Counts the number of times we entered an HLE region. Does not count nested transactions.",
@@ -65,6 +73,7 @@
},
{
"BriefDescription": "Number of machine clears due to memory ordering conflicts.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
"PublicDescription": "Counts the number of Machine Clears detected dye to memory ordering. Memory Ordering Machine Clears may apply when a memory read may not conform to the memory ordering rules of the x86 architecture",
@@ -73,6 +82,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
@@ -85,6 +95,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
@@ -97,6 +108,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
@@ -109,6 +121,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
@@ -121,6 +134,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
@@ -133,6 +147,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
@@ -145,6 +160,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
@@ -157,6 +173,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
@@ -169,6 +186,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that was not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -178,6 +196,7 @@
},
{
"BriefDescription": "Counts demand data reads that was not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -187,6 +206,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that was not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -196,6 +216,7 @@
},
{
"BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that was not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L1D_AND_SWPF.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -205,6 +226,7 @@
},
{
"BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that was not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2_DATA_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -214,6 +236,7 @@
},
{
"BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that was not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -223,6 +246,7 @@
},
{
"BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that was not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -232,6 +256,7 @@
},
{
"BriefDescription": "Counts streaming stores that was not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.STREAMING_WR.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -241,6 +266,7 @@
},
{
"BriefDescription": "Counts demand data read requests that miss the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xb0",
"EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
"SampleAfterValue": "100003",
@@ -248,6 +274,7 @@
},
{
"BriefDescription": "Cycles where at least one demand data read request known to have missed the L3 cache is pending.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD",
@@ -257,6 +284,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED",
"PEBS": "1",
@@ -266,6 +294,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_EVENTS",
"PublicDescription": "Counts the number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
@@ -274,6 +303,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_MEM",
"PublicDescription": "Counts the number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).",
@@ -282,6 +312,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_MEMTYPE",
"PublicDescription": "Counts the number of times an RTM execution aborted due to incompatible memory type.",
@@ -290,6 +321,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_UNFRIENDLY",
"PublicDescription": "Counts the number of times an RTM execution aborted due to HLE-unfriendly instructions.",
@@ -298,6 +330,7 @@
},
{
"BriefDescription": "Number of times an RTM execution successfully committed",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.COMMIT",
"PublicDescription": "Counts the number of times RTM commit succeeded.",
@@ -306,6 +339,7 @@
},
{
"BriefDescription": "Number of times an RTM execution started.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.START",
"PublicDescription": "Counts the number of times we entered an RTM region. Does not count nested transactions.",
@@ -314,6 +348,7 @@
},
{
"BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed inside a transactional region",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC2",
"PublicDescription": "Counts Unfriendly TSX abort triggered by a vzeroupper instruction.",
@@ -322,6 +357,7 @@
},
{
"BriefDescription": "Number of times an instruction execution caused the transactional nest count supported to be exceeded",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC3",
"PublicDescription": "Counts Unfriendly TSX abort triggered by a nest count that is too deep.",
@@ -330,6 +366,7 @@
},
{
"BriefDescription": "Speculatively counts the number of TSX aborts due to a data capacity limitation for transactional reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_CAPACITY_READ",
"PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional reads",
@@ -338,6 +375,7 @@
},
{
"BriefDescription": "Speculatively counts the number of TSX aborts due to a data capacity limitation for transactional writes.",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
"PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional writes.",
@@ -346,6 +384,7 @@
},
{
"BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_CONFLICT",
"PublicDescription": "Counts the number of times a TSX line had a cache conflict.",
@@ -354,6 +393,7 @@
},
{
"BriefDescription": "Number of times an HLE transactional execution aborted due to XRELEASE lock not satisfying the address and value requirements in the elision buffer",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
"PublicDescription": "Counts the number of times a TSX Abort was triggered due to release/commit but data and address mismatch.",
@@ -362,6 +402,7 @@
},
{
"BriefDescription": "Number of times an HLE transactional execution aborted due to NoAllocatedElisionBuffer being non-zero.",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
"PublicDescription": "Counts the number of times a TSX Abort was triggered due to commit but Lock Buffer not empty.",
@@ -370,6 +411,7 @@
},
{
"BriefDescription": "Number of times an HLE transactional execution aborted due to an unsupported read alignment from the elision buffer.",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
"PublicDescription": "Counts the number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer.",
@@ -378,6 +420,7 @@
},
{
"BriefDescription": "Number of times a HLE transactional region aborted due to a non XRELEASE prefixed instruction writing to an elided lock in the elision buffer",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
"PublicDescription": "Counts the number of times a TSX Abort was triggered due to a non-release/commit store to lock.",
@@ -386,6 +429,7 @@
},
{
"BriefDescription": "Number of times HLE lock could not be elided due to ElisionBufferAvailable being zero.",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
"PublicDescription": "Counts the number of times we could not allocate Lock Buffer.",
diff --git a/tools/perf/pmu-events/arch/x86/icelake/metricgroups.json b/tools/perf/pmu-events/arch/x86/icelake/metricgroups.json
index 5452a1448ded..3a88260194d1 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/metricgroups.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/metricgroups.json
@@ -5,7 +5,20 @@
"BigFootprint": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"BrMispredicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Branches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvBC": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvBO": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvCB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvFB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvIO": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvML": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMP": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMS": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMT": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvOB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvUW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"CacheHits": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "CacheMisses": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"CodeGen": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Compute": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Cor": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
diff --git a/tools/perf/pmu-events/arch/x86/icelake/other.json b/tools/perf/pmu-events/arch/x86/icelake/other.json
index 4fdc87339555..a96b2a989d3f 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/other.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/other.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the Non-AVX turbo schedule.",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "CORE_POWER.LVL0_TURBO_LICENSE",
"PublicDescription": "Counts Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX2 turbo schedule.",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "CORE_POWER.LVL1_TURBO_LICENSE",
"PublicDescription": "Counts Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX512 turbo schedule.",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "CORE_POWER.LVL2_TURBO_LICENSE",
"PublicDescription": "Core cycles where the core was running with power-delivery for license level 2 (introduced in Skylake Server microarchitecture). This includes high current AVX 512-bit instructions.",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -34,6 +38,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -43,6 +48,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -52,6 +58,7 @@
},
{
"BriefDescription": "Counts demand data reads that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -61,6 +68,7 @@
},
{
"BriefDescription": "Counts demand data reads that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -70,6 +78,7 @@
},
{
"BriefDescription": "Counts demand data reads that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -79,6 +88,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -88,6 +98,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -97,6 +108,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -106,6 +118,7 @@
},
{
"BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L1D_AND_SWPF.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -115,6 +128,7 @@
},
{
"BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L1D_AND_SWPF.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -124,6 +138,7 @@
},
{
"BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L1D_AND_SWPF.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -133,6 +148,7 @@
},
{
"BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -142,6 +158,7 @@
},
{
"BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2_DATA_RD.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -151,6 +168,7 @@
},
{
"BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2_DATA_RD.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -160,6 +178,7 @@
},
{
"BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -169,6 +188,7 @@
},
{
"BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2_RFO.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -178,6 +198,7 @@
},
{
"BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2_RFO.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -187,6 +208,7 @@
},
{
"BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -196,6 +218,7 @@
},
{
"BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -205,6 +228,7 @@
},
{
"BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -214,6 +238,7 @@
},
{
"BriefDescription": "Counts streaming stores that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -223,6 +248,7 @@
},
{
"BriefDescription": "Counts streaming stores that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.STREAMING_WR.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -232,6 +258,7 @@
},
{
"BriefDescription": "Counts streaming stores that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.STREAMING_WR.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
diff --git a/tools/perf/pmu-events/arch/x86/icelake/pipeline.json b/tools/perf/pmu-events/arch/x86/icelake/pipeline.json
index c7313fd4fdf4..4fdf07c7beb7 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/pipeline.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Cycles when divide unit is busy executing divide or square root operations.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0x14",
"EventName": "ARITH.DIVIDER_ACTIVE",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "Number of occurrences where a microcode assist is invoked by hardware.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc1",
"EventName": "ASSISTS.ANY",
"PublicDescription": "Counts the number of occurrences where a microcode assist is invoked by hardware Examples include AD (page Access Dirty), FP and AVX related assists.",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "All branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -26,6 +29,7 @@
},
{
"BriefDescription": "Conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND",
"PEBS": "1",
@@ -35,6 +39,7 @@
},
{
"BriefDescription": "Not taken branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_NTAKEN",
"PEBS": "1",
@@ -44,6 +49,7 @@
},
{
"BriefDescription": "Taken conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_TAKEN",
"PEBS": "1",
@@ -53,6 +59,7 @@
},
{
"BriefDescription": "Far branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"PEBS": "1",
@@ -62,6 +69,7 @@
},
{
"BriefDescription": "Indirect near branch instructions retired (excluding returns)",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.INDIRECT",
"PEBS": "1",
@@ -71,6 +79,7 @@
},
{
"BriefDescription": "Direct and indirect near call instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
"PEBS": "1",
@@ -80,6 +89,7 @@
},
{
"BriefDescription": "Return instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
"PEBS": "1",
@@ -89,6 +99,7 @@
},
{
"BriefDescription": "Taken branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
"PEBS": "1",
@@ -98,6 +109,7 @@
},
{
"BriefDescription": "All mispredicted branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -106,6 +118,7 @@
},
{
"BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND",
"PEBS": "1",
@@ -115,6 +128,7 @@
},
{
"BriefDescription": "Mispredicted non-taken conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_NTAKEN",
"PEBS": "1",
@@ -124,6 +138,7 @@
},
{
"BriefDescription": "number of branch instructions retired that were mispredicted and taken.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_TAKEN",
"PEBS": "1",
@@ -133,6 +148,7 @@
},
{
"BriefDescription": "All miss-predicted indirect branch instructions retired (excluding RETs. TSX aborts is considered indirect branch).",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT",
"PEBS": "1",
@@ -142,6 +158,7 @@
},
{
"BriefDescription": "Mispredicted indirect CALL instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT_CALL",
"PEBS": "1",
@@ -151,6 +168,7 @@
},
{
"BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
"PEBS": "1",
@@ -160,6 +178,7 @@
},
{
"BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.RET",
"PEBS": "1",
@@ -169,6 +188,7 @@
},
{
"BriefDescription": "Cycle counts are evenly distributed between active threads in the Core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xec",
"EventName": "CPU_CLK_UNHALTED.DISTRIBUTED",
"PublicDescription": "This event distributes cycle counts between active hyperthreads, i.e., those in C0. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If all other hyperthreads are inactive (or disabled or do not exist), all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
@@ -177,6 +197,7 @@
},
{
"BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
"PublicDescription": "Counts Core crystal clock cycles when current thread is unhalted and the other thread is halted.",
@@ -185,6 +206,7 @@
},
{
"BriefDescription": "Core crystal clock cycles. Cycle counts are evenly distributed between active threads in the Core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.REF_DISTRIBUTED",
"PublicDescription": "This event distributes Core crystal clock cycle counts between active hyperthreads, i.e., those in C0 sleep-state. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If one thread is active in a core, all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
@@ -193,6 +215,7 @@
},
{
"BriefDescription": "Reference cycles when the core is not in halt state.",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
"SampleAfterValue": "2000003",
@@ -200,6 +223,7 @@
},
{
"BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.REF_XCLK",
"PublicDescription": "Counts core crystal clock cycles when the thread is unhalted.",
@@ -208,6 +232,7 @@
},
{
"BriefDescription": "Core cycles when the thread is not in halt state",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events.",
"SampleAfterValue": "2000003",
@@ -215,6 +240,7 @@
},
{
"BriefDescription": "Thread cycles when thread is not in halt state",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
"PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
@@ -222,6 +248,7 @@
},
{
"BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "8",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
@@ -230,6 +257,7 @@
},
{
"BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
@@ -238,6 +266,7 @@
},
{
"BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "16",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
@@ -246,6 +275,7 @@
},
{
"BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "12",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
@@ -254,6 +284,7 @@
},
{
"BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
@@ -262,6 +293,7 @@
},
{
"BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "20",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
@@ -270,6 +302,7 @@
},
{
"BriefDescription": "Total execution stalls.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "4",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
@@ -278,6 +311,7 @@
},
{
"BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.1_PORTS_UTIL",
"PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
@@ -286,6 +320,7 @@
},
{
"BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.2_PORTS_UTIL",
"PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
@@ -294,6 +329,7 @@
},
{
"BriefDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.3_PORTS_UTIL",
"PublicDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
@@ -302,6 +338,7 @@
},
{
"BriefDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.4_PORTS_UTIL",
"PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.",
@@ -310,6 +347,7 @@
},
{
"BriefDescription": "Cycles where the Store Buffer was full and no loads caused an execution stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "2",
"EventCode": "0xA6",
"EventName": "EXE_ACTIVITY.BOUND_ON_STORES",
@@ -319,6 +357,7 @@
},
{
"BriefDescription": "Stalls caused by changing prefix length of the instruction. [This event is alias to DECODE.LCP]",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.LCP",
"PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk. [This event is alias to DECODE.LCP]",
@@ -327,6 +366,7 @@
},
{
"BriefDescription": "Instruction decoders utilized in a cycle",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "INST_DECODED.DECODERS",
"PublicDescription": "Number of decoders utilized in a cycle when the MITE (legacy decode pipeline) fetches instructions.",
@@ -335,6 +375,7 @@
},
{
"BriefDescription": "Number of instructions retired. Fixed Counter - architectural event",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PEBS": "1",
"PublicDescription": "Counts the number of instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
@@ -343,6 +384,7 @@
},
{
"BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.ANY_P",
"PEBS": "1",
@@ -351,6 +393,7 @@
},
{
"BriefDescription": "Number of all retired NOP instructions.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.NOP",
"PEBS": "1",
@@ -359,6 +402,7 @@
},
{
"BriefDescription": "Precise instruction retired event with a reduced effect of PEBS shadow in IP distribution",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.PREC_DIST",
"PEBS": "1",
"PublicDescription": "A version of INST_RETIRED that allows for a more unbiased distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR) feature to mitigate some bias in how retired instructions get sampled. Use on Fixed Counter 0.",
@@ -367,6 +411,7 @@
},
{
"BriefDescription": "Cycles without actually retired instructions.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.STALL_CYCLES",
@@ -377,6 +422,7 @@
},
{
"BriefDescription": "Cycles the Backend cluster is recovering after a miss-speculation or a Store Buffer or Load Buffer drain stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0x0D",
"EventName": "INT_MISC.ALL_RECOVERY_CYCLES",
@@ -386,6 +432,7 @@
},
{
"BriefDescription": "Clears speculative count",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x0D",
@@ -396,6 +443,7 @@
},
{
"BriefDescription": "Counts cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x0d",
"EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
"PublicDescription": "Cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
@@ -404,6 +452,7 @@
},
{
"BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x0D",
"EventName": "INT_MISC.RECOVERY_CYCLES",
"PublicDescription": "Counts core cycles when the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
@@ -412,6 +461,7 @@
},
{
"BriefDescription": "TMA slots where uops got dropped",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x0d",
"EventName": "INT_MISC.UOP_DROPPING",
"PublicDescription": "Estimated number of Top-down Microarchitecture Analysis slots that got dropped due to non front-end reasons",
@@ -420,6 +470,7 @@
},
{
"BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.NO_SR",
"PublicDescription": "Counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
@@ -428,6 +479,7 @@
},
{
"BriefDescription": "Loads blocked due to overlapping with a preceding store that cannot be forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.STORE_FORWARD",
"PublicDescription": "Counts the number of times where store forwarding was prevented for a load operation. The most common case is a load blocked due to the address of memory access (partially) overlapping with a preceding uncompleted store. Note: See the table of not supported store forwards in the Optimization Guide.",
@@ -436,6 +488,7 @@
},
{
"BriefDescription": "False dependencies due to partial compare on address.",
+ "Counter": "0,1,2,3",
"EventCode": "0x07",
"EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
"PublicDescription": "Counts the number of times a load got blocked due to false dependencies due to partial compare on address.",
@@ -444,6 +497,7 @@
},
{
"BriefDescription": "Counts the number of demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.",
+ "Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "LOAD_HIT_PREFETCH.SWPF",
"PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
@@ -452,6 +506,7 @@
},
{
"BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA8",
"EventName": "LSD.CYCLES_ACTIVE",
@@ -461,6 +516,7 @@
},
{
"BriefDescription": "Cycles optimal number of Uops delivered by the LSD, but did not come from the decoder.",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"EventCode": "0xa8",
"EventName": "LSD.CYCLES_OK",
@@ -470,6 +526,7 @@
},
{
"BriefDescription": "Number of Uops delivered by the LSD.",
+ "Counter": "0,1,2,3",
"EventCode": "0xa8",
"EventName": "LSD.UOPS",
"PublicDescription": "Counts the number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
@@ -478,6 +535,7 @@
},
{
"BriefDescription": "Number of machine clears (nukes) of any type.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xc3",
@@ -488,6 +546,7 @@
},
{
"BriefDescription": "Self-modifying code (SMC) detected.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.SMC",
"PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
@@ -496,6 +555,7 @@
},
{
"BriefDescription": "Increments whenever there is an update to the LBR array.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xcc",
"EventName": "MISC_RETIRED.LBR_INSERTS",
"PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR to be enabled properly.",
@@ -504,6 +564,7 @@
},
{
"BriefDescription": "Number of retired PAUSE instructions. This event is not supported on first SKL and KBL products.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xcc",
"EventName": "MISC_RETIRED.PAUSE_INST",
"PublicDescription": "Counts number of retired PAUSE instructions. This event is not supported on first SKL and KBL products.",
@@ -512,6 +573,7 @@
},
{
"BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa2",
"EventName": "RESOURCE_STALLS.SB",
"PublicDescription": "Counts allocation stall cycles caused by the store buffer (SB) being full. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
@@ -520,6 +582,7 @@
},
{
"BriefDescription": "Counts cycles where the pipeline is stalled due to serializing operations.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa2",
"EventName": "RESOURCE_STALLS.SCOREBOARD",
"SampleAfterValue": "100003",
@@ -527,6 +590,7 @@
},
{
"BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x5e",
"EventName": "RS_EVENTS.EMPTY_CYCLES",
"PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into starvation periods (e.g. branch mispredictions or i-cache misses)",
@@ -535,6 +599,7 @@
},
{
"BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x5E",
@@ -546,6 +611,7 @@
},
{
"BriefDescription": "TMA slots where no uops were being issued due to lack of back-end resources.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa4",
"EventName": "TOPDOWN.BACKEND_BOUND_SLOTS",
"PublicDescription": "Counts the number of Top-down Microarchitecture Analysis (TMA) method's slots where no micro-operations were being issued from front-end to back-end of the machine due to lack of back-end resources.",
@@ -554,6 +620,7 @@
},
{
"BriefDescription": "TMA slots available for an unhalted logical processor. Fixed counter - architectural event",
+ "Counter": "Fixed counter 3",
"EventName": "TOPDOWN.SLOTS",
"PublicDescription": "Number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method (TMA). The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core. Software can use this event as the denominator for the top-level metrics of the TMA method. This architectural event is counted on a designated fixed counter (Fixed Counter 3).",
"SampleAfterValue": "10000003",
@@ -561,6 +628,7 @@
},
{
"BriefDescription": "TMA slots available for an unhalted logical processor. General counter - architectural event",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa4",
"EventName": "TOPDOWN.SLOTS_P",
"PublicDescription": "Counts the number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method. The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core.",
@@ -569,6 +637,7 @@
},
{
"BriefDescription": "Number of uops decoded out of instructions exclusively fetched by decoder 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UOPS_DECODED.DEC0",
"PublicDescription": "Uops exclusively fetched by decoder 0",
@@ -577,6 +646,7 @@
},
{
"BriefDescription": "Number of uops executed on port 0",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa1",
"EventName": "UOPS_DISPATCHED.PORT_0",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 0.",
@@ -585,6 +655,7 @@
},
{
"BriefDescription": "Number of uops executed on port 1",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa1",
"EventName": "UOPS_DISPATCHED.PORT_1",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 1.",
@@ -593,6 +664,7 @@
},
{
"BriefDescription": "Number of uops executed on port 2 and 3",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa1",
"EventName": "UOPS_DISPATCHED.PORT_2_3",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 2 and 3.",
@@ -601,6 +673,7 @@
},
{
"BriefDescription": "Number of uops executed on port 4 and 9",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa1",
"EventName": "UOPS_DISPATCHED.PORT_4_9",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 5 and 9.",
@@ -609,6 +682,7 @@
},
{
"BriefDescription": "Number of uops executed on port 5",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa1",
"EventName": "UOPS_DISPATCHED.PORT_5",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 5.",
@@ -617,6 +691,7 @@
},
{
"BriefDescription": "Number of uops executed on port 6",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa1",
"EventName": "UOPS_DISPATCHED.PORT_6",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 6.",
@@ -625,6 +700,7 @@
},
{
"BriefDescription": "Number of uops executed on port 7 and 8",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa1",
"EventName": "UOPS_DISPATCHED.PORT_7_8",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 7 and 8.",
@@ -633,6 +709,7 @@
},
{
"BriefDescription": "Number of uops executed on the core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE",
"PublicDescription": "Counts the number of uops executed from any thread.",
@@ -641,6 +718,7 @@
},
{
"BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
@@ -650,6 +728,7 @@
},
{
"BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "2",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
@@ -659,6 +738,7 @@
},
{
"BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
@@ -668,6 +748,7 @@
},
{
"BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "4",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
@@ -677,6 +758,7 @@
},
{
"BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_1",
@@ -686,6 +768,7 @@
},
{
"BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "2",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_2",
@@ -695,6 +778,7 @@
},
{
"BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "3",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_3",
@@ -704,6 +788,7 @@
},
{
"BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "4",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_4",
@@ -713,6 +798,7 @@
},
{
"BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.STALL_CYCLES",
@@ -723,6 +809,7 @@
},
{
"BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.THREAD",
"SampleAfterValue": "2000003",
@@ -730,6 +817,7 @@
},
{
"BriefDescription": "Counts the number of x87 uops dispatched.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.X87",
"PublicDescription": "Counts the number of x87 uops executed.",
@@ -738,6 +826,7 @@
},
{
"BriefDescription": "Uops that RAT issues to RS",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x0e",
"EventName": "UOPS_ISSUED.ANY",
"PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
@@ -746,6 +835,7 @@
},
{
"BriefDescription": "Cycles when RAT does not issue Uops to RS for the thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.STALL_CYCLES",
@@ -756,6 +846,7 @@
},
{
"BriefDescription": "Uops inserted at issue-stage in order to preserve upper bits of vector registers.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x0e",
"EventName": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH",
"PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to 'Mixing Intel AVX and Intel SSE Code' section of the Optimization Guide.",
@@ -764,6 +855,7 @@
},
{
"BriefDescription": "Retirement slots used.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.SLOTS",
"PublicDescription": "Counts the retirement slots used each cycle.",
@@ -772,6 +864,7 @@
},
{
"BriefDescription": "Cycles without actually retired uops.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.STALL_CYCLES",
@@ -782,6 +875,7 @@
},
{
"BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "10",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.TOTAL_CYCLES",
diff --git a/tools/perf/pmu-events/arch/x86/icelake/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/icelake/uncore-interconnect.json
index 8027590f1776..909a73d7f2d3 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/uncore-interconnect.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Number of entries allocated. Account for Any type: e.g. Snoop, etc.",
+ "Counter": "1",
"EventCode": "0x84",
"EventName": "UNC_ARB_COH_TRK_REQUESTS.ALL",
"PerPkg": "1",
@@ -8,55 +9,73 @@
"Unit": "ARB"
},
{
- "BriefDescription": "Each cycle counts number of any coherent request at memory controller that were issued by any core. This event is not supported on ICL products but is supported on RKL products.",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_ARB_IFA_OCCUPANCY.ALL",
+ "Counter": "0",
+ "Deprecated": "1",
"EventCode": "0x85",
"EventName": "UNC_ARB_DAT_OCCUPANCY.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "ARB"
},
{
- "BriefDescription": "Each cycle counts number of coherent reads pending on data return from memory controller that were issued by any core. This event is not supported on ICL products but is supported on RKL products.",
+ "BriefDescription": "This event is deprecated.",
+ "Counter": "0",
+ "Deprecated": "1",
"EventCode": "0x85",
"EventName": "UNC_ARB_DAT_OCCUPANCY.RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "ARB"
},
{
- "BriefDescription": "Each cycle count number of 'valid' coherent Data Read entries . Such entry is defined as valid when it is allocated till deallocation. Doesn't include prefetches. This event is not supported on ICL products but is supported on RKL products.",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_ARB_TRK_OCCUPANCY.RD",
+ "Counter": "0",
+ "Deprecated": "1",
"EventCode": "0x80",
"EventName": "UNC_ARB_REQ_TRK_OCCUPANCY.DRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "ARB"
},
{
"BriefDescription": "Number of all coherent Data Read entries. Doesn't include prefetches",
+ "Counter": "1",
"EventCode": "0x81",
"EventName": "UNC_ARB_REQ_TRK_REQUEST.DRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "ARB"
},
{
- "BriefDescription": "Each cycle counts number of all outgoing valid entries in ReqTrk. Such entry is defined as valid from its allocation in ReqTrk till deallocation. Accounts for Coherent and non-coherent traffic. This event is not supported on ICL products but is supported on RKL products.",
+ "BriefDescription": "This event is deprecated.",
+ "Counter": "0",
+ "Deprecated": "1",
"EventCode": "0x80",
"EventName": "UNC_ARB_TRK_OCCUPANCY.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "ARB"
},
{
- "BriefDescription": "Each cycle count number of 'valid' coherent Data Read entries . Such entry is defined as valid when it is allocated till deallocation. Doesn't include prefetches. This event is not supported on ICL products but is supported on RKL products.",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_ARB_REQ_TRK_OCCUPANCY.DRD",
+ "Counter": "0",
+ "Deprecated": "1",
"EventCode": "0x80",
"EventName": "UNC_ARB_TRK_OCCUPANCY.RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "ARB"
},
{
"BriefDescription": "Total number of all outgoing entries allocated. Accounts for Coherent and non-coherent traffic.",
+ "Counter": "1",
"EventCode": "0x81",
"EventName": "UNC_ARB_TRK_REQUESTS.ALL",
"PerPkg": "1",
@@ -64,9 +83,12 @@
"Unit": "ARB"
},
{
- "BriefDescription": "Number of all coherent Data Read entries. Doesn't include prefetches. This event is not supported on ICL products but is supported on RKL products.",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_ARB_REQ_TRK_REQUEST.DRD",
+ "Counter": "0,1",
+ "Deprecated": "1",
"EventCode": "0x81",
"EventName": "UNC_ARB_TRK_REQUESTS.RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "ARB"
diff --git a/tools/perf/pmu-events/arch/x86/icelake/uncore-other.json b/tools/perf/pmu-events/arch/x86/icelake/uncore-other.json
index c6596ba09195..cc8110ac020c 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/uncore-other.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/uncore-other.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "UNC_CLOCK.SOCKET",
+ "Counter": "FIXED",
"EventCode": "0xff",
"EventName": "UNC_CLOCK.SOCKET",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/icelake/virtual-memory.json b/tools/perf/pmu-events/arch/x86/icelake/virtual-memory.json
index b28f62ce1f39..3ff51040f84f 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/virtual-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Loads that miss the DTLB and hit the STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for a demand load.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (all page sizes) caused by demand data loads. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -26,6 +29,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data load to a 2M/4M page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -34,6 +38,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data load to a 4K page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts completed page walks (4K sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -42,6 +47,7 @@
},
{
"BriefDescription": "Number of page walks outstanding for a demand load in the PMH each cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding for a demand load in the PMH (Page Miss Handler) each cycle.",
@@ -50,6 +56,7 @@
},
{
"BriefDescription": "Stores that miss the DTLB and hit the STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.STLB_HIT",
"PublicDescription": "Counts stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
@@ -58,6 +65,7 @@
},
{
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
@@ -67,6 +75,7 @@
},
{
"BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (all page sizes) caused by demand data stores. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -75,6 +84,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data store to a 2M/4M page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -83,6 +93,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data store to a 4K page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts completed page walks (4K sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -91,6 +102,7 @@
},
{
"BriefDescription": "Number of page walks outstanding for a store in the PMH each cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding for a store in the PMH (Page Miss Handler) each cycle.",
@@ -99,6 +111,7 @@
},
{
"BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.STLB_HIT",
"PublicDescription": "Counts instruction fetch requests that miss the ITLB (Instruction TLB) and hit the STLB (Second-level TLB).",
@@ -107,6 +120,7 @@
},
{
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_ACTIVE",
@@ -116,6 +130,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (all page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
@@ -124,6 +139,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts completed page walks (2M/4M page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
@@ -132,6 +148,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts completed page walks (4K page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
@@ -140,6 +157,7 @@
},
{
"BriefDescription": "Number of page walks outstanding for an outstanding code request in the PMH each cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding for an outstanding code (instruction fetch) request in the PMH (Page Miss Handler) each cycle.",
@@ -148,6 +166,7 @@
},
{
"BriefDescription": "DTLB flush attempts of the thread-specific entries",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "TLB_FLUSH.DTLB_THREAD",
"PublicDescription": "Counts the number of DTLB flush attempts of the thread-specific entries.",
@@ -156,6 +175,7 @@
},
{
"BriefDescription": "STLB flush attempts",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "TLB_FLUSH.STLB_ANY",
"PublicDescription": "Counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, etc.).",
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/cache.json b/tools/perf/pmu-events/arch/x86/icelakex/cache.json
index 3bdc56a75097..0cbb9d6a3ec1 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/cache.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of cache lines replaced in L1 data cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "L1D.REPLACEMENT",
"PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.FB_FULL",
"PublicDescription": "Counts number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x48",
@@ -27,6 +30,7 @@
},
{
"BriefDescription": "Number of cycles a demand request has waited due to L1D due to lack of L2 resources.",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.L2_STALL",
"PublicDescription": "Counts number of cycles a demand request has waited due to L1D due to lack of L2 resources. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
@@ -35,6 +39,7 @@
},
{
"BriefDescription": "Number of L1D misses that are outstanding",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING",
"PublicDescription": "Counts number of L1D misses that are outstanding in each cycle, that is each cycle the number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch. Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
@@ -43,6 +48,7 @@
},
{
"BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING_CYCLES",
@@ -52,6 +58,7 @@
},
{
"BriefDescription": "L2 cache lines filling L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.ALL",
"PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
@@ -60,6 +67,7 @@
},
{
"BriefDescription": "Cache lines that are evicted by L2 cache when triggered by an L2 cache fill.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.NON_SILENT",
"PublicDescription": "Counts the number of lines that are evicted by the L2 cache due to L2 cache fills. Evicted lines are delivered to the L3, which may or may not cache them, according to system load and priorities.",
@@ -68,6 +76,7 @@
},
{
"BriefDescription": "Non-modified cache lines that are silently dropped by L2 cache when triggered by an L2 cache fill.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.SILENT",
"PublicDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared or Exclusive state. A non-threaded event.",
@@ -76,6 +85,7 @@
},
{
"BriefDescription": "L2 code requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_CODE_RD",
"PublicDescription": "Counts the total number of L2 code requests.",
@@ -84,6 +94,7 @@
},
{
"BriefDescription": "Demand Data Read requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
"PublicDescription": "Counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
@@ -92,6 +103,7 @@
},
{
"BriefDescription": "Demand requests that miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_MISS",
"PublicDescription": "Counts demand requests that miss L2 cache.",
@@ -100,6 +112,7 @@
},
{
"BriefDescription": "RFO requests to L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_RFO",
"PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
@@ -108,6 +121,7 @@
},
{
"BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_HIT",
"PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
@@ -116,6 +130,7 @@
},
{
"BriefDescription": "L2 cache misses when fetching instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_MISS",
"PublicDescription": "Counts L2 cache misses when fetching instructions.",
@@ -124,6 +139,7 @@
},
{
"BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
"PublicDescription": "Counts the number of demand Data Read requests initiated by load instructions that hit L2 cache.",
@@ -132,6 +148,7 @@
},
{
"BriefDescription": "Demand Data Read miss L2, no rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
"PublicDescription": "Counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
@@ -140,6 +157,7 @@
},
{
"BriefDescription": "RFO requests that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_HIT",
"PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
@@ -148,6 +166,7 @@
},
{
"BriefDescription": "RFO requests that miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_MISS",
"PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
@@ -156,6 +175,7 @@
},
{
"BriefDescription": "SW prefetch requests that hit L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.SWPF_HIT",
"PublicDescription": "Counts Software prefetch requests that hit the L2 cache. Accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions when FB is not full.",
@@ -164,6 +184,7 @@
},
{
"BriefDescription": "SW prefetch requests that miss L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.SWPF_MISS",
"PublicDescription": "Counts Software prefetch requests that miss the L2 cache. Accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions when FB is not full.",
@@ -172,6 +193,7 @@
},
{
"BriefDescription": "L2 writebacks that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.L2_WB",
"PublicDescription": "Counts L2 writebacks that access L2 cache.",
@@ -180,6 +202,7 @@
},
{
"BriefDescription": "Core-originated cacheable requests that missed L3 (Except hardware prefetches to the L3)",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.MISS",
"PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.",
@@ -188,6 +211,7 @@
},
{
"BriefDescription": "Core-originated cacheable requests that refer to L3 (Except hardware prefetches to the L3)",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
"PublicDescription": "Counts core-originated cacheable requests to the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.",
@@ -196,6 +220,7 @@
},
{
"BriefDescription": "Retired load instructions.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ALL_LOADS",
@@ -206,6 +231,7 @@
},
{
"BriefDescription": "Retired store instructions.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ALL_STORES",
@@ -216,6 +242,7 @@
},
{
"BriefDescription": "All retired memory instructions.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ANY",
@@ -226,6 +253,7 @@
},
{
"BriefDescription": "Retired load instructions with locked access.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.LOCK_LOADS",
@@ -236,6 +264,7 @@
},
{
"BriefDescription": "Retired load instructions that split across a cacheline boundary.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
@@ -246,6 +275,7 @@
},
{
"BriefDescription": "Retired store instructions that split across a cacheline boundary.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.SPLIT_STORES",
@@ -256,6 +286,7 @@
},
{
"BriefDescription": "Retired load instructions that miss the STLB.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
@@ -266,6 +297,7 @@
},
{
"BriefDescription": "Retired store instructions that miss the STLB.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
@@ -276,6 +308,7 @@
},
{
"BriefDescription": "Retired load instructions whose data sources were HitM responses from shared L3",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD",
@@ -286,6 +319,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Deprecated": "1",
"EventCode": "0xd2",
@@ -296,6 +330,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"Deprecated": "1",
"EventCode": "0xd2",
@@ -306,6 +341,7 @@
},
{
"BriefDescription": "Retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
@@ -316,6 +352,7 @@
},
{
"BriefDescription": "Retired load instructions whose data sources were hits in L3 without snoops required",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
@@ -326,6 +363,7 @@
},
{
"BriefDescription": "Retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD",
@@ -336,6 +374,7 @@
},
{
"BriefDescription": "Retired load instructions which data sources missed L3 but serviced from local dram",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
@@ -346,6 +385,7 @@
},
{
"BriefDescription": "Retired load instructions which data sources missed L3 but serviced from remote dram",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM",
@@ -355,6 +395,7 @@
},
{
"BriefDescription": "Retired load instructions whose data sources was forwarded from a remote cache",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD",
@@ -365,6 +406,7 @@
},
{
"BriefDescription": "Retired load instructions whose data sources was remote HITM",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM",
@@ -375,6 +417,7 @@
},
{
"BriefDescription": "Retired load instructions with remote Intel(R) Optane(TM) DC persistent memory as the data source where the data request missed all caches.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM",
@@ -385,6 +428,7 @@
},
{
"BriefDescription": "Retired instructions with at least 1 uncacheable load or Bus Lock.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd4",
"EventName": "MEM_LOAD_MISC_RETIRED.UC",
@@ -395,6 +439,7 @@
},
{
"BriefDescription": "Number of completed demand load requests that missed the L1, but hit the FB(fill buffer), because a preceding miss to the same cacheline initiated the line to be brought into L1, but data is not yet ready in L1.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.FB_HIT",
@@ -405,6 +450,7 @@
},
{
"BriefDescription": "Retired load instructions with L1 cache hits as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L1_HIT",
@@ -415,6 +461,7 @@
},
{
"BriefDescription": "Retired load instructions missed L1 cache as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L1_MISS",
@@ -425,6 +472,7 @@
},
{
"BriefDescription": "Retired load instructions with L2 cache hits as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L2_HIT",
@@ -435,6 +483,7 @@
},
{
"BriefDescription": "Retired load instructions missed L2 cache as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L2_MISS",
@@ -445,6 +494,7 @@
},
{
"BriefDescription": "Retired load instructions with L3 cache hits as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L3_HIT",
@@ -455,6 +505,7 @@
},
{
"BriefDescription": "Retired load instructions missed L3 cache as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L3_MISS",
@@ -465,6 +516,7 @@
},
{
"BriefDescription": "Retired load instructions with local Intel(R) Optane(TM) DC persistent memory as the data source where the data request missed all caches.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.LOCAL_PMM",
@@ -475,6 +527,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -484,6 +537,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that resulted in a snoop hit a modified line in another core's caches which forwarded the data.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -493,6 +547,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.SNC_CACHE.HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -502,6 +557,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.SNC_CACHE.HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -511,6 +567,7 @@
},
{
"BriefDescription": "Counts demand data reads that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -520,6 +577,7 @@
},
{
"BriefDescription": "Counts demand data reads that resulted in a snoop hit a modified line in another core's caches which forwarded the data.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -529,6 +587,7 @@
},
{
"BriefDescription": "Counts demand data reads that resulted in a snoop that hit in another core, which did not forward the data.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -538,6 +597,7 @@
},
{
"BriefDescription": "Counts demand data reads that resulted in a snoop hit in another core's caches which forwarded the unmodified data to the requesting core.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -547,6 +607,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by a cache on a remote socket where a snoop hit a modified line in another core's caches which forwarded the data.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.REMOTE_CACHE.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -556,6 +617,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by a cache on a remote socket where a snoop hit in another core's caches which forwarded the unmodified data to the requesting core.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.REMOTE_CACHE.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -565,6 +627,7 @@
},
{
"BriefDescription": "Counts demand data reads that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.SNC_CACHE.HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -574,6 +637,7 @@
},
{
"BriefDescription": "Counts demand data reads that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.SNC_CACHE.HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -583,6 +647,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -592,6 +657,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that resulted in a snoop hit a modified line in another core's caches which forwarded the data.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -601,6 +667,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.SNC_CACHE.HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -610,6 +677,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.SNC_CACHE.HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -619,6 +687,7 @@
},
{
"BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L1D_AND_SWPF.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -628,6 +697,7 @@
},
{
"BriefDescription": "Counts hardware prefetches to the L3 only that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L3.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -637,6 +707,7 @@
},
{
"BriefDescription": "Counts hardware and software prefetches to all cache levels that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PREFETCHES.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -646,6 +717,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -655,6 +727,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that resulted in a snoop hit a modified line in another core's caches which forwarded the data.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -664,6 +737,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that resulted in a snoop that hit in another core, which did not forward the data.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -673,6 +747,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that resulted in a snoop hit in another core's caches which forwarded the unmodified data to the requesting core.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -682,6 +757,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop was sent and data was returned (Modified or Not Modified).",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.REMOTE_CACHE.SNOOP_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -691,6 +767,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop hit a modified line in another core's caches which forwarded the data.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.REMOTE_CACHE.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -700,6 +777,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop hit in another core's caches which forwarded the unmodified data to the requesting core.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.REMOTE_CACHE.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -709,6 +787,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.SNC_CACHE.HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -718,6 +797,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.SNC_CACHE.HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -727,6 +807,7 @@
},
{
"BriefDescription": "Counts streaming stores that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.STREAMING_WR.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -736,6 +817,7 @@
},
{
"BriefDescription": "Demand and prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
"PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
@@ -744,6 +826,7 @@
},
{
"BriefDescription": "Counts memory transactions sent to the uncore.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
"PublicDescription": "Counts memory transactions sent to the uncore including requests initiated by the core, all L3 prefetches, reads resulting from page walks, and snoop responses.",
@@ -752,6 +835,7 @@
},
{
"BriefDescription": "Counts cacheable and non-cacheable code reads to the core.",
+ "Counter": "0,1,2,3",
"EventCode": "0xb0",
"EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
"PublicDescription": "Counts both cacheable and non-cacheable code reads to the core.",
@@ -760,6 +844,7 @@
},
{
"BriefDescription": "Demand Data Read requests sent to uncore",
+ "Counter": "0,1,2,3",
"EventCode": "0xb0",
"EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
"PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
@@ -768,6 +853,7 @@
},
{
"BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "Counter": "0,1,2,3",
"EventCode": "0xb0",
"EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
"PublicDescription": "Counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
@@ -776,6 +862,7 @@
},
{
"BriefDescription": "For every cycle, increments by the number of outstanding data read requests pending.",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
"PublicDescription": "For every cycle, increments by the number of outstanding data read requests pending. Data read requests include cacheable demand reads and L2 prefetches, but do not include RFOs, code reads or prefetches to the L3. Reads due to page walks resulting from any request type will also be counted. Requests are considered outstanding from the time they miss the core's L2 cache until the transaction completion message is sent to the requestor.",
@@ -784,6 +871,7 @@
},
{
"BriefDescription": "Cycles where at least 1 outstanding data read request is pending.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
@@ -793,6 +881,7 @@
},
{
"BriefDescription": "Cycles with outstanding code read requests pending.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
@@ -802,6 +891,7 @@
},
{
"BriefDescription": "Cycles where at least 1 outstanding Demand RFO request is pending.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
@@ -811,6 +901,7 @@
},
{
"BriefDescription": "For every cycle, increments by the number of outstanding code read requests pending.",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
"PublicDescription": "For every cycle, increments by the number of outstanding code read requests pending. Code Read requests include both cacheable and non-cacheable Code Reads. Requests are considered outstanding from the time they miss the core's L2 cache until the transaction completion message is sent to the requestor.",
@@ -819,6 +910,7 @@
},
{
"BriefDescription": "For every cycle, increments by the number of outstanding demand data read requests pending.",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
"PublicDescription": "For every cycle, increments by the number of outstanding demand data read requests pending. Requests are considered outstanding from the time they miss the core's L2 cache until the transaction completion message is sent to the requestor.",
@@ -827,6 +919,7 @@
},
{
"BriefDescription": "Counts bus locks, accounts for cache line split locks and UC locks.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF4",
"EventName": "SQ_MISC.BUS_LOCK",
"PublicDescription": "Counts the more expensive bus lock needed to enforce cache coherency for certain memory accesses that need to be done atomically. Can be created by issuing an atomic instruction (via the LOCK prefix) which causes a cache line split or accesses uncacheable memory.",
@@ -835,6 +928,7 @@
},
{
"BriefDescription": "Cycles the queue waiting for offcore responses is full.",
+ "Counter": "0,1,2,3",
"EventCode": "0xf4",
"EventName": "SQ_MISC.SQ_FULL",
"PublicDescription": "Counts the cycles for which the thread is active and the queue waiting for responses from the uncore cannot take any more entries.",
@@ -842,7 +936,16 @@
"UMask": "0x4"
},
{
+ "BriefDescription": "Counts the number of PREFETCHNTA, PREFETCHW, PREFETCHT0, PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.ANY",
+ "SampleAfterValue": "100003",
+ "UMask": "0xf"
+ },
+ {
"BriefDescription": "Number of PREFETCHNTA instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "SW_PREFETCH_ACCESS.NTA",
"PublicDescription": "Counts the number of PREFETCHNTA instructions executed.",
@@ -851,6 +954,7 @@
},
{
"BriefDescription": "Number of PREFETCHW instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
"PublicDescription": "Counts the number of PREFETCHW instructions executed.",
@@ -859,6 +963,7 @@
},
{
"BriefDescription": "Number of PREFETCHT0 instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "SW_PREFETCH_ACCESS.T0",
"PublicDescription": "Counts the number of PREFETCHT0 instructions executed.",
@@ -867,6 +972,7 @@
},
{
"BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "SW_PREFETCH_ACCESS.T1_T2",
"PublicDescription": "Counts the number of PREFETCHT1 or PREFETCHT2 instructions executed.",
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/counter.json b/tools/perf/pmu-events/arch/x86/icelakex/counter.json
new file mode 100644
index 000000000000..63657e0a51a3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelakex/counter.json
@@ -0,0 +1,57 @@
+[
+ {
+ "Unit": "core",
+ "CountersNumFixed": "4",
+ "CountersNumGeneric": "8"
+ },
+ {
+ "Unit": "CHA",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "IIO",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "IRP",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "2"
+ },
+ {
+ "Unit": "iMC",
+ "CountersNumFixed": "1",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "M2M",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "UPI",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "M2PCIe",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "M3UPI",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "PCU",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "UBOX",
+ "CountersNumFixed": 1,
+ "CountersNumGeneric": "2"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/floating-point.json b/tools/perf/pmu-events/arch/x86/icelakex/floating-point.json
index 85c26c889088..61ddce0c8db6 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/floating-point.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts all microcode FP assists.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc1",
"EventName": "ASSISTS.FP",
"PublicDescription": "Counts all microcode Floating Point assists.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
"PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
"PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 128-bit packed single and 256-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and packed double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.4_FLOPS",
"PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision and 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point and packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -49,6 +55,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -57,6 +64,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE",
"PublicDescription": "Number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -65,6 +73,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision and 512-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RSQRT14 RCP RCP14 DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.8_FLOPS",
"PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision and 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision and double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RSQRT14 RCP RCP14 DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -73,6 +82,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired; some instructions will count twice as noted below. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR",
"PublicDescription": "Number of SSE/AVX computational scalar single precision and double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -81,6 +91,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -89,6 +100,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
"PublicDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -97,6 +109,7 @@
},
{
"BriefDescription": "Number of any Vector retired FP arithmetic instructions",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.VECTOR",
"SampleAfterValue": "1000003",
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/frontend.json b/tools/perf/pmu-events/arch/x86/icelakex/frontend.json
index 66669d062e68..d79ddc15b220 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/frontend.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "Counter": "0,1,2,3",
"EventCode": "0xe6",
"EventName": "BACLEARS.ANY",
"PublicDescription": "Counts the number of times the front-end is resteered when it finds a branch instruction in a fetch line. This occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Stalls caused by changing prefix length of the instruction. [This event is alias to ILD_STALL.LCP]",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "DECODE.LCP",
"PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk. [This event is alias to ILD_STALL.LCP]",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE transitions count.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xab",
@@ -27,6 +30,7 @@
},
{
"BriefDescription": "DSB-to-MITE switch true penalty cycles.",
+ "Counter": "0,1,2,3",
"EventCode": "0xab",
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
"PublicDescription": "Decode Stream Buffer (DSB) is a Uop-cache that holds translations of previously fetched instructions that were decoded by the legacy x86 decode pipeline (MITE). This event counts fetch penalty cycles when a transition occurs from DSB to MITE.",
@@ -35,6 +39,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced DSB miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.ANY_DSB_MISS",
"MSRIndex": "0x3F7",
@@ -46,6 +51,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced a critical DSB miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.DSB_MISS",
"MSRIndex": "0x3F7",
@@ -57,6 +63,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced iTLB true miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.ITLB_MISS",
"MSRIndex": "0x3F7",
@@ -68,6 +75,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.L1I_MISS",
"MSRIndex": "0x3F7",
@@ -79,6 +87,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.L2_MISS",
"MSRIndex": "0x3F7",
@@ -90,6 +99,7 @@
},
{
"BriefDescription": "Retired instructions after front-end starvation of at least 1 cycle",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_1",
"MSRIndex": "0x3F7",
@@ -101,6 +111,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
"MSRIndex": "0x3F7",
@@ -112,6 +123,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
"MSRIndex": "0x3F7",
@@ -123,6 +135,7 @@
},
{
"BriefDescription": "Retired instructions after front-end starvation of at least 2 cycles",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
"MSRIndex": "0x3F7",
@@ -134,6 +147,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
"MSRIndex": "0x3F7",
@@ -145,6 +159,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
"MSRIndex": "0x3F7",
@@ -156,6 +171,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
"MSRIndex": "0x3F7",
@@ -167,6 +183,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
"MSRIndex": "0x3F7",
@@ -178,6 +195,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
"MSRIndex": "0x3F7",
@@ -189,6 +207,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
"MSRIndex": "0x3F7",
@@ -200,6 +219,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
"MSRIndex": "0x3F7",
@@ -211,6 +231,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.STLB_MISS",
"MSRIndex": "0x3F7",
@@ -222,6 +243,7 @@
},
{
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss. [This event is alias to ICACHE_DATA.STALLS]",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE_16B.IFDATA_STALL",
"PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity. [This event is alias to ICACHE_DATA.STALLS]",
@@ -230,6 +252,7 @@
},
{
"BriefDescription": "Instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "ICACHE_64B.IFTAG_HIT",
"PublicDescription": "Counts instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity. Accounts for both cacheable and uncacheable accesses.",
@@ -238,6 +261,7 @@
},
{
"BriefDescription": "Instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "ICACHE_64B.IFTAG_MISS",
"PublicDescription": "Counts instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity. Accounts for both cacheable and uncacheable accesses.",
@@ -246,6 +270,7 @@
},
{
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_TAG.STALLS]",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "ICACHE_64B.IFTAG_STALL",
"PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_TAG.STALLS]",
@@ -254,6 +279,7 @@
},
{
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss. [This event is alias to ICACHE_16B.IFDATA_STALL]",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE_DATA.STALLS",
"PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity. [This event is alias to ICACHE_16B.IFDATA_STALL]",
@@ -262,6 +288,7 @@
},
{
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_64B.IFTAG_STALL]",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "ICACHE_TAG.STALLS",
"PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_64B.IFTAG_STALL]",
@@ -270,6 +297,7 @@
},
{
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.DSB_CYCLES_ANY",
@@ -279,6 +307,7 @@
},
{
"BriefDescription": "Cycles DSB is delivering optimal number of Uops",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"EventCode": "0x79",
"EventName": "IDQ.DSB_CYCLES_OK",
@@ -288,6 +317,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.DSB_UOPS",
"PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
@@ -296,6 +326,7 @@
},
{
"BriefDescription": "Cycles MITE is delivering any Uop",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MITE_CYCLES_ANY",
@@ -305,6 +336,7 @@
},
{
"BriefDescription": "Cycles MITE is delivering optimal number of Uops",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"EventCode": "0x79",
"EventName": "IDQ.MITE_CYCLES_OK",
@@ -314,6 +346,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MITE_UOPS",
"PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
@@ -322,6 +355,7 @@
},
{
"BriefDescription": "Number of switches from DSB or MITE to the MS",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x79",
@@ -332,6 +366,7 @@
},
{
"BriefDescription": "Uops delivered to IDQ while MS is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_UOPS",
"PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS). Any instruction over 4 uops will be delivered by the MS. Some instructions such as transcendentals may additionally generate uops from the MS.",
@@ -340,6 +375,7 @@
},
{
"BriefDescription": "Uops not delivered by IDQ when backend of the machine is not stalled",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x9c",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
"PublicDescription": "Counts the number of uops not delivered to by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
@@ -348,6 +384,7 @@
},
{
"BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "5",
"EventCode": "0x9c",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
@@ -357,6 +394,7 @@
},
{
"BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json b/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json
index 769ba12bef87..db5510ba9099 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json
@@ -47,7 +47,7 @@
},
{
"BriefDescription": "Percentage of time spent in the active CPU power state C0",
- "MetricExpr": "tma_info_system_cpu_utilization",
+ "MetricExpr": "tma_info_system_cpus_utilized",
"MetricName": "cpu_utilization",
"ScaleUnit": "100%"
},
@@ -73,12 +73,36 @@
"ScaleUnit": "1per_instr"
},
{
+ "BriefDescription": "Bandwidth of IO reads that are initiated by end device controllers that are requesting memory from the local CPU socket.",
+ "MetricExpr": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR_LOCAL * 64 / 1e6 / duration_time",
+ "MetricName": "io_bandwidth_read_local",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth of IO reads that are initiated by end device controllers that are requesting memory from a remote CPU socket.",
+ "MetricExpr": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR_REMOTE * 64 / 1e6 / duration_time",
+ "MetricName": "io_bandwidth_read_remote",
+ "ScaleUnit": "1MB/s"
+ },
+ {
"BriefDescription": "Bandwidth of IO writes that are initiated by end device controllers that are writing memory to the CPU.",
"MetricExpr": "(UNC_CHA_TOR_INSERTS.IO_HIT_ITOM + UNC_CHA_TOR_INSERTS.IO_MISS_ITOM + UNC_CHA_TOR_INSERTS.IO_HIT_ITOMCACHENEAR + UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR) * 64 / 1e6 / duration_time",
"MetricName": "io_bandwidth_write",
"ScaleUnit": "1MB/s"
},
{
+ "BriefDescription": "Bandwidth of IO writes that are initiated by end device controllers that are writing memory to the local CPU socket.",
+ "MetricExpr": "(UNC_CHA_TOR_INSERTS.IO_ITOM_LOCAL + UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR_LOCAL) * 64 / 1e6 / duration_time",
+ "MetricName": "io_bandwidth_write_local",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth of IO writes that are initiated by end device controllers that are writing memory to a remote CPU socket.",
+ "MetricExpr": "(UNC_CHA_TOR_INSERTS.IO_ITOM_REMOTE + UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR_REMOTE) * 64 / 1e6 / duration_time",
+ "MetricName": "io_bandwidth_write_remote",
+ "ScaleUnit": "1MB/s"
+ },
+ {
"BriefDescription": "Ratio of number of completed page walks (for 2 megabyte and 4 megabyte page sizes) caused by a code fetch to the total number of completed instructions",
"MetricExpr": "ITLB_MISSES.WALK_COMPLETED_2M_4M / INST_RETIRED.ANY",
"MetricName": "itlb_2nd_level_large_page_mpi",
@@ -308,7 +332,7 @@
{
"BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists",
"MetricExpr": "34 * ASSISTS.ANY / tma_info_thread_slots",
- "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricGroup": "BvIO;TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_assists",
"MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
"PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: ASSISTS.ANY",
@@ -318,7 +342,7 @@
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
"DefaultMetricgroupName": "TopdownL1",
"MetricExpr": "topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 5 * INT_MISC.CLEARS_COUNT / tma_info_thread_slots",
- "MetricGroup": "Default;TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvOB;Default;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
"MetricThreshold": "tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL1;Default",
@@ -339,7 +363,7 @@
{
"BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions.",
"MetricExpr": "tma_light_operations * BR_INST_RETIRED.ALL_BRANCHES / (tma_retiring * tma_info_thread_slots)",
- "MetricGroup": "Branches;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_branch_instructions",
"MetricThreshold": "tma_branch_instructions > 0.1 & tma_light_operations > 0.6",
"ScaleUnit": "100%"
@@ -347,7 +371,7 @@
{
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
"MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * tma_bad_speculation",
- "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
+ "MetricGroup": "BadSpec;BrMispredicts;BvMP;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
"MetricName": "tma_branch_mispredicts",
"MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
@@ -385,7 +409,7 @@
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(44 * tma_info_system_core_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM * (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) + 43.5 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
- "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricGroup": "BvMS;DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_contested_accesses",
"MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS_PS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
@@ -405,7 +429,7 @@
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "43.5 * tma_info_system_core_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM * (1 - OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
- "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricGroup": "BvMS;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_data_sharing",
"MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT_PS. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
@@ -423,7 +447,7 @@
{
"BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
"MetricExpr": "ARITH.DIVIDER_ACTIVE / tma_info_thread_clks",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
"MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_ACTIVE",
@@ -454,13 +478,13 @@
"MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_dsb_switches",
"MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
"MetricExpr": "min(7 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_LOAD_MISSES.WALK_ACTIVE, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
- "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
+ "MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
"MetricName": "tma_dtlb_load",
"MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs, tma_info_bottleneck_memory_synchronization",
@@ -469,7 +493,7 @@
{
"BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
"MetricExpr": "(7 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_STORE_MISSES.WALK_ACTIVE) / tma_info_core_core_clks",
- "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
+ "MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
"MetricName": "tma_dtlb_store",
"MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load, tma_info_bottleneck_memory_data_tlbs, tma_info_bottleneck_memory_synchronization",
@@ -478,7 +502,7 @@
{
"BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing",
"MetricExpr": "48 * tma_info_system_core_frequency * OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM / tma_info_thread_clks",
- "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
+ "MetricGroup": "BvMS;DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
"MetricName": "tma_false_sharing",
"MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
@@ -487,7 +511,7 @@
{
"BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
"MetricExpr": "L1D_PEND_MISS.FB_FULL / tma_info_thread_clks",
- "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
+ "MetricGroup": "BvMS;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
"MetricName": "tma_fb_full",
"MetricThreshold": "tma_fb_full > 0.3",
"PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
@@ -500,7 +524,7 @@
"MetricName": "tma_fetch_bandwidth",
"MetricThreshold": "tma_fetch_bandwidth > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
{
@@ -542,7 +566,7 @@
},
{
"BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired",
- "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ / (tma_retiring * tma_info_thread_slots)",
+ "MetricExpr": "FP_ARITH_INST_RETIRED.SCALAR / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_scalar",
"MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
@@ -551,7 +575,7 @@
},
{
"BriefDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths",
- "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@ / (tma_retiring * tma_info_thread_slots)",
+ "MetricExpr": "FP_ARITH_INST_RETIRED.VECTOR / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_vector",
"MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
@@ -589,7 +613,7 @@
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
"DefaultMetricgroupName": "TopdownL1",
"MetricExpr": "topdown\\-fe\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) - INT_MISC.UOP_DROPPING / tma_info_thread_slots",
- "MetricGroup": "Default;PGO;TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvFB;BvIO;Default;PGO;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_frontend_bound",
"MetricThreshold": "tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL1;Default",
@@ -609,7 +633,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses",
"MetricExpr": "ICACHE_DATA.STALLS / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_icache_misses",
"MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS",
@@ -666,24 +690,6 @@
},
{
"BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts",
- "MetricExpr": "(100 * (1 - max(0, topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 5 * INT_MISC.CLEARS_COUNT / slots - (CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES) * (topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 5 * INT_MISC.CLEARS_COUNT / slots)) / (((cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ + max(0, topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 5 * INT_MISC.CLEARS_COUNT / slots - (CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES) * (topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 5 * INT_MISC.CLEARS_COUNT / slots)) * RS_EVENTS.EMPTY_CYCLES) / CPU_CLK_UNHALTED.THREAD * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) / CPU_CLK_UNHALTED.THREAD * CPU_CLK_UNHALTED.THREAD + (EXE_ACTIVITY.1_PORTS_UTIL + topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) * EXE_ACTIVITY.2_PORTS_UTIL)) / CPU_CLK_UNHALTED.THREAD if ARITH.DIVIDER_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY else (EXE_ACTIVITY.1_PORTS_UTIL + topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) * EXE_ACTIVITY.2_PORTS_UTIL) / CPU_CLK_UNHALTED.THREAD) if max(0, topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 5 * INT_MISC.CLEARS_COUNT / slots - (CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES) * (topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 5 * INT_MISC.CLEARS_COUNT / slots)) < (((cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ + max(0, topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 5 * INT_MISC.CLEARS_COUNT / slots - (CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES) * (topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 5 * INT_MISC.CLEARS_COUNT / slots)) * RS_EVENTS.EMPTY_CYCLES) / CPU_CLK_UNHALTED.THREAD * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) / CPU_CLK_UNHALTED.THREAD * CPU_CLK_UNHALTED.THREAD + (EXE_ACTIVITY.1_PORTS_UTIL + topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) * EXE_ACTIVITY.2_PORTS_UTIL)) / CPU_CLK_UNHALTED.THREAD if ARITH.DIVIDER_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY else (EXE_ACTIVITY.1_PORTS_UTIL + topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) * EXE_ACTIVITY.2_PORTS_UTIL) / CPU_CLK_UNHALTED.THREAD) else 1) if tma_info_system_smt_2t_utilization > 0.5 else 0)",
- "MetricGroup": "Cor;SMT",
- "MetricName": "tma_info_botlnk_core_bound_likely"
- },
- {
- "BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck.",
- "MetricExpr": "100 * (100 * ((5 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE - INT_MISC.UOP_DROPPING) / slots * (DSB2MITE_SWITCHES.PENALTY_CYCLES / CPU_CLK_UNHALTED.THREAD) / (ICACHE_DATA.STALLS / CPU_CLK_UNHALTED.THREAD + ICACHE_TAG.STALLS / CPU_CLK_UNHALTED.THREAD + (INT_MISC.CLEAR_RESTEER_CYCLES / CPU_CLK_UNHALTED.THREAD + 10 * BACLEARS.ANY / CPU_CLK_UNHALTED.THREAD) + min(3 * IDQ.MS_SWITCHES / CPU_CLK_UNHALTED.THREAD, 1) + DECODE.LCP / CPU_CLK_UNHALTED.THREAD + DSB2MITE_SWITCHES.PENALTY_CYCLES / CPU_CLK_UNHALTED.THREAD) + max(0, topdown\\-fe\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) - INT_MISC.UOP_DROPPING / slots - (5 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE - INT_MISC.UOP_DROPPING) / slots) * ((IDQ.MITE_CYCLES_ANY - IDQ.MITE_CYCLES_OK) / (CPU_CLK_UNHALTED.DISTRIBUTED if #SMT_on else CPU_CLK_UNHALTED.THREAD) / 2) / ((IDQ.MITE_CYCLES_ANY - IDQ.MITE_CYCLES_OK) / (CPU_CLK_UNHALTED.DISTRIBUTED if #SMT_on else CPU_CLK_UNHALTED.THREAD) / 2 + (IDQ.DSB_CYCLES_ANY - IDQ.DSB_CYCLES_OK) / (CPU_CLK_UNHALTED.DISTRIBUTED if #SMT_on else CPU_CLK_UNHALTED.THREAD) / 2)))",
- "MetricGroup": "DSBmiss;Fed",
- "MetricName": "tma_info_botlnk_dsb_misses"
- },
- {
- "BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck.",
- "MetricExpr": "100 * (100 * ((5 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE - INT_MISC.UOP_DROPPING) / slots * (ICACHE_DATA.STALLS / CPU_CLK_UNHALTED.THREAD) / (ICACHE_DATA.STALLS / CPU_CLK_UNHALTED.THREAD + ICACHE_TAG.STALLS / CPU_CLK_UNHALTED.THREAD + (INT_MISC.CLEAR_RESTEER_CYCLES / CPU_CLK_UNHALTED.THREAD + 10 * BACLEARS.ANY / CPU_CLK_UNHALTED.THREAD) + min(3 * IDQ.MS_SWITCHES / CPU_CLK_UNHALTED.THREAD, 1) + DECODE.LCP / CPU_CLK_UNHALTED.THREAD + DSB2MITE_SWITCHES.PENALTY_CYCLES / CPU_CLK_UNHALTED.THREAD)))",
- "MetricGroup": "Fed;FetchLat;IcMiss",
- "MetricName": "tma_info_botlnk_ic_misses"
- },
- {
- "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(100 * (1 - tma_core_bound / tma_ports_utilization if tma_core_bound < tma_ports_utilization else 1) if tma_info_system_smt_2t_utilization > 0.5 else 0)",
"MetricGroup": "Cor;SMT",
@@ -691,13 +697,21 @@
"MetricThreshold": "tma_info_botlnk_l0_core_bound_likely > 0.5"
},
{
+ "BriefDescription": "Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck",
+ "MetricExpr": "100 * (tma_frontend_bound * (tma_fetch_bandwidth / (tma_fetch_bandwidth + tma_fetch_latency)) * (tma_dsb / (tma_dsb + tma_mite)))",
+ "MetricGroup": "DSB;FetchBW;tma_issueFB",
+ "MetricName": "tma_info_botlnk_l2_dsb_bandwidth",
+ "MetricThreshold": "tma_info_botlnk_l2_dsb_bandwidth > 10",
+ "PublicDescription": "Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp"
+ },
+ {
"BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_mite))",
"MetricGroup": "DSBmiss;Fed;tma_issueFB",
"MetricName": "tma_info_botlnk_l2_dsb_misses",
"MetricThreshold": "tma_info_botlnk_l2_dsb_misses > 10",
- "PublicDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp"
+ "PublicDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp"
},
{
"BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
@@ -709,39 +723,33 @@
"PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: "
},
{
- "BriefDescription": "Total pipeline cost of \"useful operations\" - the baseline operations not covered by Branching_Overhead nor Irregular_Overhead.",
- "MetricExpr": "100 * (tma_retiring - (BR_INST_RETIRED.ALL_BRANCHES + BR_INST_RETIRED.NEAR_CALL) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
- "MetricGroup": "Ret",
- "MetricName": "tma_info_bottleneck_base_non_br",
- "MetricThreshold": "tma_info_bottleneck_base_non_br > 20"
- },
- {
"BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)",
- "MetricGroup": "BigFootprint;Fed;Frontend;IcMiss;MemoryTLB",
+ "MetricGroup": "BigFootprint;BvBC;Fed;Frontend;IcMiss;MemoryTLB",
"MetricName": "tma_info_bottleneck_big_code",
"MetricThreshold": "tma_info_bottleneck_big_code > 20"
},
{
- "BriefDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls)",
- "MetricExpr": "100 * ((BR_INST_RETIRED.ALL_BRANCHES + BR_INST_RETIRED.NEAR_CALL) / tma_info_thread_slots)",
- "MetricGroup": "Ret",
+ "BriefDescription": "Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA",
+ "MetricExpr": "100 * ((BR_INST_RETIRED.ALL_BRANCHES + 2 * BR_INST_RETIRED.NEAR_CALL + INST_RETIRED.NOP) / tma_info_thread_slots)",
+ "MetricGroup": "BvBO;Ret",
"MetricName": "tma_info_bottleneck_branching_overhead",
- "MetricThreshold": "tma_info_bottleneck_branching_overhead > 5"
+ "MetricThreshold": "tma_info_bottleneck_branching_overhead > 5",
+ "PublicDescription": "Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA. Examples include function calls; loops and alignments. (A lower bound)"
},
{
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
- "MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_hit_latency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
+ "MetricGroup": "BvMB;Mem;MemoryBW;Offcore;tma_issueBW",
"MetricName": "tma_info_bottleneck_cache_memory_bandwidth",
"MetricThreshold": "tma_info_bottleneck_cache_memory_bandwidth > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full"
},
{
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
- "MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_l1_hit_latency / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_hit_latency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
+ "MetricGroup": "BvML;Mem;MemoryLat;Offcore;tma_issueLat",
"MetricName": "tma_info_bottleneck_cache_memory_latency",
"MetricThreshold": "tma_info_bottleneck_cache_memory_latency > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks. Related metrics: tma_l3_hit_latency, tma_mem_latency"
@@ -749,23 +757,23 @@
{
"BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation",
"MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_ports_utilization + tma_serializing_operation)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))",
- "MetricGroup": "Cor;tma_issueComp",
+ "MetricGroup": "BvCB;Cor;tma_issueComp",
"MetricName": "tma_info_bottleneck_compute_bound_est",
"MetricThreshold": "tma_info_bottleneck_compute_bound_est > 20",
"PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. Related metrics: "
},
{
- "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks",
+ "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end)",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * (10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts)) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_info_bottleneck_big_code",
- "MetricGroup": "Fed;FetchBW;Frontend",
+ "MetricGroup": "BvFB;Fed;FetchBW;Frontend",
"MetricName": "tma_info_bottleneck_instruction_fetch_bw",
"MetricThreshold": "tma_info_bottleneck_instruction_fetch_bw > 20"
},
{
"BriefDescription": "Total pipeline cost of irregular execution (e.g",
"MetricExpr": "100 * (tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * (10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts)) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + tma_core_bound * RS_EVENTS.EMPTY_CYCLES / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
- "MetricGroup": "Bad;Cor;Ret;tma_issueMS",
+ "MetricGroup": "Bad;BvIO;Cor;Ret;tma_issueMS",
"MetricName": "tma_info_bottleneck_irregular_overhead",
"MetricThreshold": "tma_info_bottleneck_irregular_overhead > 10",
"PublicDescription": "Total pipeline cost of irregular execution (e.g. FP-assists in HPC, Wait time with work imbalance multithreaded workloads, overhead in system services or virtualized environments). Related metrics: tma_microcode_sequencer, tma_ms_switches"
@@ -773,8 +781,8 @@
{
"BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
- "MetricGroup": "Mem;MemoryTLB;Offcore;tma_issueTLB",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_hit_latency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
+ "MetricGroup": "BvMT;Mem;MemoryTLB;Offcore;tma_issueTLB",
"MetricName": "tma_info_bottleneck_memory_data_tlbs",
"MetricThreshold": "tma_info_bottleneck_memory_data_tlbs > 20",
"PublicDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs). Related metrics: tma_dtlb_load, tma_dtlb_store, tma_info_bottleneck_memory_synchronization"
@@ -782,7 +790,7 @@
{
"BriefDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)",
"MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) * tma_remote_cache / (tma_local_mem + tma_remote_cache + tma_remote_mem) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * tma_false_sharing / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))",
- "MetricGroup": "Mem;Offcore;tma_issueTLB",
+ "MetricGroup": "BvMS;Mem;Offcore;tma_issueTLB",
"MetricName": "tma_info_bottleneck_memory_synchronization",
"MetricThreshold": "tma_info_bottleneck_memory_synchronization > 10",
"PublicDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors). Related metrics: tma_dtlb_load, tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs"
@@ -791,18 +799,25 @@
"BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
- "MetricGroup": "Bad;BadSpec;BrMispredicts;tma_issueBM",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts;BvMP;tma_issueBM",
"MetricName": "tma_info_bottleneck_mispredictions",
"MetricThreshold": "tma_info_bottleneck_mispredictions > 20",
"PublicDescription": "Total pipeline cost of Branch Misprediction related bottlenecks. Related metrics: tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost, tma_mispredicts_resteers"
},
{
- "BriefDescription": "Total pipeline cost of remaining bottlenecks (apart from those listed in the Info.Bottlenecks metrics class)",
- "MetricExpr": "100 - (tma_info_bottleneck_big_code + tma_info_bottleneck_instruction_fetch_bw + tma_info_bottleneck_mispredictions + tma_info_bottleneck_cache_memory_bandwidth + tma_info_bottleneck_cache_memory_latency + tma_info_bottleneck_memory_data_tlbs + tma_info_bottleneck_memory_synchronization + tma_info_bottleneck_compute_bound_est + tma_info_bottleneck_irregular_overhead + tma_info_bottleneck_branching_overhead + tma_info_bottleneck_base_non_br)",
- "MetricGroup": "Cor;Offcore",
+ "BriefDescription": "Total pipeline cost of remaining bottlenecks in the back-end",
+ "MetricExpr": "100 - (tma_info_bottleneck_big_code + tma_info_bottleneck_instruction_fetch_bw + tma_info_bottleneck_mispredictions + tma_info_bottleneck_cache_memory_bandwidth + tma_info_bottleneck_cache_memory_latency + tma_info_bottleneck_memory_data_tlbs + tma_info_bottleneck_memory_synchronization + tma_info_bottleneck_compute_bound_est + tma_info_bottleneck_irregular_overhead + tma_info_bottleneck_branching_overhead + tma_info_bottleneck_useful_work)",
+ "MetricGroup": "BvOB;Cor;Offcore",
"MetricName": "tma_info_bottleneck_other_bottlenecks",
"MetricThreshold": "tma_info_bottleneck_other_bottlenecks > 20",
- "PublicDescription": "Total pipeline cost of remaining bottlenecks (apart from those listed in the Info.Bottlenecks metrics class). Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls."
+ "PublicDescription": "Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls."
+ },
+ {
+ "BriefDescription": "Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead.",
+ "MetricExpr": "100 * (tma_retiring - (BR_INST_RETIRED.ALL_BRANCHES + 2 * BR_INST_RETIRED.NEAR_CALL + INST_RETIRED.NOP) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
+ "MetricGroup": "BvUW;Ret",
+ "MetricName": "tma_info_bottleneck_useful_work",
+ "MetricThreshold": "tma_info_bottleneck_useful_work > 20"
},
{
"BriefDescription": "Fraction of branches that are CALL or RET",
@@ -860,7 +875,7 @@
},
{
"BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
- "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@) / (2 * tma_info_core_core_clks)",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + FP_ARITH_INST_RETIRED.VECTOR) / (2 * tma_info_core_core_clks)",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "tma_info_core_fp_arith_utilization",
"PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
@@ -877,7 +892,7 @@
"MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
"MetricName": "tma_info_frontend_dsb_coverage",
"MetricThreshold": "tma_info_frontend_dsb_coverage < 0.7 & tma_info_thread_ipc / 5 > 0.35",
- "PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_inst_mix_iptb, tma_lcp"
+ "PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_inst_mix_iptb, tma_lcp"
},
{
"BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
@@ -937,7 +952,7 @@
},
{
"BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
- "MetricExpr": "INST_RETIRED.ANY / (cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR + FP_ARITH_INST_RETIRED.VECTOR)",
"MetricGroup": "Flops;InsType",
"MetricName": "tma_info_inst_mix_iparith",
"MetricThreshold": "tma_info_inst_mix_iparith < 10",
@@ -1032,24 +1047,12 @@
"MetricThreshold": "tma_info_inst_mix_ipswpf < 100"
},
{
- "BriefDescription": "Instruction per taken branch",
+ "BriefDescription": "Instructions per taken branch",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
"MetricName": "tma_info_inst_mix_iptb",
"MetricThreshold": "tma_info_inst_mix_iptb < 11",
- "PublicDescription": "Instruction per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_lcp"
- },
- {
- "BriefDescription": "\"Bus lock\" per kilo instruction",
- "MetricExpr": "tma_info_memory_mix_bus_lock_pki",
- "MetricGroup": "Mem",
- "MetricName": "tma_info_memory_bus_lock_pki"
- },
- {
- "BriefDescription": "STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
- "MetricExpr": "tma_info_memory_tlb_code_stlb_mpki",
- "MetricGroup": "Fed;MemoryTLB",
- "MetricName": "tma_info_memory_code_stlb_mpki"
+ "PublicDescription": "Instructions per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_lcp"
},
{
"BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
@@ -1088,30 +1091,18 @@
"MetricName": "tma_info_memory_core_l3_cache_fill_bw_2t"
},
{
- "BriefDescription": "Average Parallel L2 cache miss data reads",
- "MetricExpr": "tma_info_memory_latency_data_l2_mlp",
- "MetricGroup": "Memory_BW;Offcore",
- "MetricName": "tma_info_memory_data_l2_mlp"
- },
- {
"BriefDescription": "Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)",
"MetricExpr": "1e3 * MEM_LOAD_RETIRED.FB_HIT / INST_RETIRED.ANY",
"MetricGroup": "CacheHits;Mem",
"MetricName": "tma_info_memory_fb_hpki"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
"MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l1d_cache_fill_bw"
},
{
- "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
- "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / (duration_time * 1e3 / 1e3)",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "tma_info_memory_l1d_cache_fill_bw_2t"
- },
- {
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
"MetricExpr": "1e3 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
"MetricGroup": "CacheHits;Mem",
@@ -1124,30 +1115,12 @@
"MetricName": "tma_info_memory_l1mpki_load"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
"MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l2_cache_fill_bw"
},
{
- "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
- "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / (duration_time * 1e3 / 1e3)",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "tma_info_memory_l2_cache_fill_bw_2t"
- },
- {
- "BriefDescription": "Rate of non silent evictions from the L2 cache per Kilo instruction",
- "MetricExpr": "1e3 * L2_LINES_OUT.NON_SILENT / INST_RETIRED.ANY",
- "MetricGroup": "L2Evicts;Mem;Server",
- "MetricName": "tma_info_memory_l2_evictions_nonsilent_pki"
- },
- {
- "BriefDescription": "Rate of silent evictions from the L2 cache per Kilo instruction where the evicted lines are dropped (no writeback to L3 or memory)",
- "MetricExpr": "1e3 * L2_LINES_OUT.SILENT / INST_RETIRED.ANY",
- "MetricGroup": "L2Evicts;Mem;Server",
- "MetricName": "tma_info_memory_l2_evictions_silent_pki"
- },
- {
"BriefDescription": "L2 cache hits per kilo instruction for all demand loads (including speculative)",
"MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_HIT / INST_RETIRED.ANY",
"MetricGroup": "CacheHits;Mem",
@@ -1172,30 +1145,24 @@
"MetricName": "tma_info_memory_l2mpki_load"
},
{
- "BriefDescription": "",
- "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / duration_time",
- "MetricGroup": "Mem;MemoryBW;Offcore",
- "MetricName": "tma_info_memory_l3_cache_access_bw"
+ "BriefDescription": "Offcore requests (L2 cache miss) per kilo instruction for demand RFOs",
+ "MetricExpr": "1e3 * L2_RQSTS.RFO_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Offcore",
+ "MetricName": "tma_info_memory_l2mpki_rfo"
},
{
- "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / (duration_time * 1e3 / 1e3)",
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW;Offcore",
- "MetricName": "tma_info_memory_l3_cache_access_bw_2t"
+ "MetricName": "tma_info_memory_l3_cache_access_bw"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
"MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l3_cache_fill_bw"
},
{
- "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / (duration_time * 1e3 / 1e3)",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "tma_info_memory_l3_cache_fill_bw_2t"
- },
- {
"BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
"MetricExpr": "1e3 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
"MetricGroup": "Mem",
@@ -1209,7 +1176,7 @@
},
{
"BriefDescription": "Average Latency for L2 cache miss demand Loads",
- "MetricExpr": "tma_info_memory_load_l2_miss_latency",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD",
"MetricGroup": "Memory_Lat;Offcore",
"MetricName": "tma_info_memory_latency_load_l2_miss_latency"
},
@@ -1221,27 +1188,9 @@
},
{
"BriefDescription": "Average Latency for L3 cache miss demand Loads",
- "MetricExpr": "tma_info_memory_load_l3_miss_latency",
- "MetricGroup": "Memory_Lat;Offcore",
- "MetricName": "tma_info_memory_latency_load_l3_miss_latency"
- },
- {
- "BriefDescription": "Average Latency for L2 cache miss demand Loads",
- "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD",
- "MetricGroup": "Memory_Lat;Offcore",
- "MetricName": "tma_info_memory_load_l2_miss_latency"
- },
- {
- "BriefDescription": "Average Parallel L2 cache miss demand Loads",
- "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=0x1@",
- "MetricGroup": "Memory_BW;Offcore",
- "MetricName": "tma_info_memory_load_l2_mlp"
- },
- {
- "BriefDescription": "Average Latency for L3 cache miss demand Loads",
"MetricExpr": "cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,umask\\=0x10@ / OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
"MetricGroup": "Memory_Lat;Offcore",
- "MetricName": "tma_info_memory_load_l3_miss_latency"
+ "MetricName": "tma_info_memory_latency_load_l3_miss_latency"
},
{
"BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
@@ -1250,12 +1199,6 @@
"MetricName": "tma_info_memory_load_miss_real_latency"
},
{
- "BriefDescription": "STLB (2nd level TLB) data load speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
- "MetricExpr": "tma_info_memory_tlb_load_stlb_mpki",
- "MetricGroup": "Mem;MemoryTLB",
- "MetricName": "tma_info_memory_load_stlb_mpki"
- },
- {
"BriefDescription": "\"Bus lock\" per kilo instruction",
"MetricExpr": "1e3 * SQ_MISC.BUS_LOCK / INST_RETIRED.ANY",
"MetricGroup": "Mem",
@@ -1263,7 +1206,7 @@
},
{
"BriefDescription": "Un-cacheable retired load per kilo instruction",
- "MetricExpr": "tma_info_memory_uc_load_pki",
+ "MetricExpr": "1e3 * MEM_LOAD_MISC_RETIRED.UC / INST_RETIRED.ANY",
"MetricGroup": "Mem",
"MetricName": "tma_info_memory_mix_uc_load_pki"
},
@@ -1275,18 +1218,6 @@
"PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)"
},
{
- "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "(ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING) / (2 * (CPU_CLK_UNHALTED.DISTRIBUTED if #SMT_on else CPU_CLK_UNHALTED.THREAD))",
- "MetricGroup": "Mem;MemoryTLB",
- "MetricName": "tma_info_memory_page_walks_utilization"
- },
- {
- "BriefDescription": "STLB (2nd level TLB) data store speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
- "MetricExpr": "tma_info_memory_tlb_store_stlb_mpki",
- "MetricGroup": "Mem;MemoryTLB",
- "MetricName": "tma_info_memory_store_stlb_mpki"
- },
- {
"BriefDescription": "STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
"MetricExpr": "1e3 * ITLB_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
"MetricGroup": "Fed;MemoryTLB",
@@ -1312,18 +1243,24 @@
"MetricName": "tma_info_memory_tlb_store_stlb_mpki"
},
{
- "BriefDescription": "Un-cacheable retired load per kilo instruction",
- "MetricExpr": "1e3 * MEM_LOAD_MISC_RETIRED.UC / INST_RETIRED.ANY",
- "MetricGroup": "Mem",
- "MetricName": "tma_info_memory_uc_load_pki"
- },
- {
- "BriefDescription": "",
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per core",
"MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@)",
"MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
"MetricName": "tma_info_pipeline_execute"
},
{
+ "BriefDescription": "Average number of uops fetched from DSB per cycle",
+ "MetricExpr": "IDQ.DSB_UOPS / IDQ.DSB_CYCLES_ANY",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "tma_info_pipeline_fetch_dsb"
+ },
+ {
+ "BriefDescription": "Average number of uops fetched from MITE per cycle",
+ "MetricExpr": "IDQ.MITE_UOPS / IDQ.MITE_CYCLES_ANY",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "tma_info_pipeline_fetch_mite"
+ },
+ {
"BriefDescription": "Instructions per a microcode Assist invocation",
"MetricExpr": "INST_RETIRED.ANY / ASSISTS.ANY",
"MetricGroup": "MicroSeq;Pipeline;Ret;Retire",
@@ -1345,13 +1282,13 @@
},
{
"BriefDescription": "Average CPU Utilization (percentage)",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "tma_info_system_cpus_utilized / #num_cpus_online",
"MetricGroup": "HPC;Summary",
"MetricName": "tma_info_system_cpu_utilization"
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "#num_cpus_online * tma_info_system_cpu_utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized"
},
@@ -1535,7 +1472,7 @@
"MetricThreshold": "tma_info_thread_uoppi > 1.05"
},
{
- "BriefDescription": "Instruction per taken branch",
+ "BriefDescription": "Uops per taken branch",
"MetricExpr": "tma_retiring * tma_info_thread_slots / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW",
"MetricName": "tma_info_thread_uptb",
@@ -1544,7 +1481,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
"MetricExpr": "ICACHE_TAG.STALLS / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_itlb_misses",
"MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS",
@@ -1560,10 +1497,19 @@
"ScaleUnit": "100%"
},
{
+ "BriefDescription": "This metric roughly estimates fraction of cycles with demand load accesses that hit the L1 cache",
+ "MetricExpr": "min(2 * (MEM_INST_RETIRED.ALL_LOADS - MEM_LOAD_RETIRED.FB_HIT - MEM_LOAD_RETIRED.L1_MISS) * 20 / 100, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
+ "MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_l1_hit_latency",
+ "MetricThreshold": "tma_l1_hit_latency > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles with demand load accesses that hit the L1 cache. The short latency of the L1 data cache may be exposed in pointer-chasing memory access patterns as an example. Sample with: MEM_LOAD_RETIRED.L1_HIT",
+ "ScaleUnit": "100%"
+ },
+ {
"BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) / (MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + L1D_PEND_MISS.FB_FULL_PERIODS) * ((CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks)",
- "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricGroup": "BvML;CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l2_bound",
"MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT_PS",
@@ -1582,7 +1528,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
"MetricExpr": "19 * tma_info_system_core_frequency * (MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2)) / tma_info_thread_clks",
- "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
+ "MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
"MetricName": "tma_l3_hit_latency",
"MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_info_bottleneck_cache_memory_latency, tma_mem_latency",
@@ -1594,7 +1540,7 @@
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_lcp",
"MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
- "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
"ScaleUnit": "100%"
},
{
@@ -1638,7 +1584,7 @@
"MetricGroup": "Server;TopdownL5;tma_L5_group;tma_mem_latency_group",
"MetricName": "tma_local_mem",
"MetricThreshold": "tma_local_mem > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory. Caching will improve the latency and increase performance. Sample with: MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM_PS",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory. Caching will improve the latency and increase performance. Sample with: MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
"ScaleUnit": "100%"
},
{
@@ -1648,13 +1594,13 @@
"MetricGroup": "Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
"MetricName": "tma_lock_latency",
"MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
- "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS_PS. Related metrics: tma_store_latency",
+ "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS. Related metrics: tma_store_latency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears",
"MetricExpr": "max(0, tma_bad_speculation - tma_branch_mispredicts)",
- "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
+ "MetricGroup": "BadSpec;BvMS;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
"MetricName": "tma_machine_clears",
"MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
@@ -1664,7 +1610,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_thread_clks",
- "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
"MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_sq_full",
@@ -1673,7 +1619,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM)",
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth",
- "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
+ "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
"MetricName": "tma_mem_latency",
"MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_info_bottleneck_cache_memory_latency, tma_l3_hit_latency",
@@ -1710,7 +1656,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage",
"MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks",
- "MetricGroup": "BadSpec;BrMispredicts;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
+ "MetricGroup": "BadSpec;BrMispredicts;BvMP;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
"MetricName": "tma_mispredicts_resteers",
"MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost, tma_info_bottleneck_mispredictions",
@@ -1754,7 +1700,7 @@
{
"BriefDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions",
"MetricExpr": "tma_light_operations * INST_RETIRED.NOP / (tma_retiring * tma_info_thread_slots)",
- "MetricGroup": "Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
+ "MetricGroup": "BvBO;Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
"MetricName": "tma_nop_instructions",
"MetricThreshold": "tma_nop_instructions > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)",
"PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP",
@@ -1773,7 +1719,7 @@
{
"BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).",
"MetricExpr": "max(tma_branch_mispredicts * (1 - BR_MISP_RETIRED.ALL_BRANCHES / (INT_MISC.CLEARS_COUNT - MACHINE_CLEARS.COUNT)), 0.0001)",
- "MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
+ "MetricGroup": "BrMispredicts;BvIO;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_other_mispredicts",
"MetricThreshold": "tma_other_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
@@ -1781,7 +1727,7 @@
{
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.",
"MetricExpr": "max(tma_machine_clears * (1 - MACHINE_CLEARS.MEMORY_ORDERING / MACHINE_CLEARS.COUNT), 0.0001)",
- "MetricGroup": "Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group",
+ "MetricGroup": "BvIO;Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_other_nukes",
"MetricThreshold": "tma_other_nukes > 0.05 & (tma_machine_clears > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
@@ -1842,7 +1788,7 @@
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricExpr": "(cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ + tma_core_bound * RS_EVENTS.EMPTY_CYCLES) / tma_info_thread_clks * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) / tma_info_thread_clks",
+ "MetricExpr": "cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_0",
"MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
@@ -1870,7 +1816,7 @@
{
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
"MetricExpr": "UOPS_EXECUTED.CYCLES_GE_3 / tma_info_thread_clks",
- "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricGroup": "BvCB;PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_3m",
"MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Sample with: UOPS_EXECUTED.CYCLES_GE_3",
@@ -1898,7 +1844,7 @@
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
"DefaultMetricgroupName": "TopdownL1",
"MetricExpr": "topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * tma_info_thread_slots",
- "MetricGroup": "Default;TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvUW;Default;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_retiring",
"MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL1;Default",
@@ -1908,7 +1854,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations",
"MetricExpr": "RESOURCE_STALLS.SCOREBOARD / tma_info_thread_clks",
- "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO",
+ "MetricGroup": "BvIO;PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO",
"MetricName": "tma_serializing_operation",
"MetricThreshold": "tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: RESOURCE_STALLS.SCOREBOARD. Related metrics: tma_ms_switches",
@@ -1945,7 +1891,7 @@
{
"BriefDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors)",
"MetricExpr": "L1D_PEND_MISS.L2_STALL / tma_info_thread_clks",
- "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
+ "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
"MetricName": "tma_sq_full",
"MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth",
@@ -1973,7 +1919,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses",
"MetricExpr": "(L2_RQSTS.RFO_HIT * 10 * (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) + (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_thread_clks",
- "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
+ "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
"MetricName": "tma_store_latency",
"MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
@@ -2016,7 +1962,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears",
"MetricExpr": "10 * BACLEARS.ANY / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
"MetricName": "tma_unknown_branches",
"MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: BACLEARS.ANY",
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/memory.json b/tools/perf/pmu-events/arch/x86/icelakex/memory.json
index 875b584b8443..32a3dedb82fb 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/memory.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Number of machine clears due to memory ordering conflicts.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
"PublicDescription": "Counts the number of Machine Clears detected dye to memory ordering. Memory Ordering Machine Clears may apply when a memory read may not conform to the memory ordering rules of the x86 architecture",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
@@ -29,6 +32,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
@@ -41,6 +45,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
@@ -53,6 +58,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
@@ -65,6 +71,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
@@ -77,6 +84,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
@@ -89,6 +97,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
@@ -101,6 +110,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
@@ -113,6 +123,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the local socket's L1, L2, or L3 caches.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -122,6 +133,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline is homed locally.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -131,6 +143,7 @@
},
{
"BriefDescription": "Counts demand data reads that were not supplied by the local socket's L1, L2, or L3 caches.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -140,6 +153,7 @@
},
{
"BriefDescription": "Counts demand data reads that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline is homed locally.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -149,6 +163,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the local socket's L1, L2, or L3 caches.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -158,6 +173,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the local socket's L1, L2, or L3 caches and were supplied by the local socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -167,6 +183,7 @@
},
{
"BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that were not supplied by the local socket's L1, L2, or L3 caches.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L1D_AND_SWPF.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -176,6 +193,7 @@
},
{
"BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline is homed locally.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L1D_AND_SWPF.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -185,6 +203,7 @@
},
{
"BriefDescription": "Counts hardware prefetches to the L3 only that missed the local socket's L1, L2, and L3 caches.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L3.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -194,6 +213,7 @@
},
{
"BriefDescription": "Counts hardware prefetches to the L3 only that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline is homed locally.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L3.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -203,6 +223,7 @@
},
{
"BriefDescription": "Counts full cacheline writes (ItoM) that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline is homed locally.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ITOM.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -212,6 +233,7 @@
},
{
"BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that were not supplied by the local socket's L1, L2, or L3 caches.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -221,6 +243,7 @@
},
{
"BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline is homed locally.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -230,6 +253,7 @@
},
{
"BriefDescription": "Counts hardware and software prefetches to all cache levels that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline is homed locally.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PREFETCHES.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -239,6 +263,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -248,6 +273,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches and were supplied by the local socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -257,6 +283,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that missed the L3 Cache and were supplied by the local socket (DRAM or PMM), whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts PMM or DRAM accesses that are controlled by the close or distant SNC Cluster.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.L3_MISS_LOCAL_SOCKET",
"MSRIndex": "0x1a6,0x1a7",
@@ -266,6 +293,7 @@
},
{
"BriefDescription": "Counts streaming stores that missed the local socket's L1, L2, and L3 caches.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.STREAMING_WR.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -275,6 +303,7 @@
},
{
"BriefDescription": "Counts streaming stores that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline is homed locally.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.STREAMING_WR.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -284,6 +313,7 @@
},
{
"BriefDescription": "Counts demand data read requests that miss the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xb0",
"EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
"SampleAfterValue": "100003",
@@ -291,6 +321,7 @@
},
{
"BriefDescription": "Cycles where at least one demand data read request known to have missed the L3 cache is pending.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD",
@@ -300,6 +331,7 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD",
@@ -308,6 +340,7 @@
},
{
"BriefDescription": "Cycles where the core is waiting on at least 6 outstanding demand data read requests known to have missed the L3 cache.",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD_GE_6",
@@ -317,6 +350,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED",
"PEBS": "1",
@@ -326,6 +360,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_EVENTS",
"PublicDescription": "Counts the number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
@@ -334,6 +369,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_MEM",
"PublicDescription": "Counts the number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).",
@@ -342,6 +378,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_MEMTYPE",
"PublicDescription": "Counts the number of times an RTM execution aborted due to incompatible memory type.",
@@ -350,6 +387,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_UNFRIENDLY",
"PublicDescription": "Counts the number of times an RTM execution aborted due to HLE-unfriendly instructions.",
@@ -358,6 +396,7 @@
},
{
"BriefDescription": "Number of times an RTM execution successfully committed",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.COMMIT",
"PublicDescription": "Counts the number of times RTM commit succeeded.",
@@ -366,6 +405,7 @@
},
{
"BriefDescription": "Number of times an RTM execution started.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.START",
"PublicDescription": "Counts the number of times we entered an RTM region. Does not count nested transactions.",
@@ -374,6 +414,7 @@
},
{
"BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed inside a transactional region",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC2",
"PublicDescription": "Counts Unfriendly TSX abort triggered by a vzeroupper instruction.",
@@ -382,6 +423,7 @@
},
{
"BriefDescription": "Number of times an instruction execution caused the transactional nest count supported to be exceeded",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC3",
"PublicDescription": "Counts Unfriendly TSX abort triggered by a nest count that is too deep.",
@@ -390,6 +432,7 @@
},
{
"BriefDescription": "Speculatively counts the number of TSX aborts due to a data capacity limitation for transactional reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_CAPACITY_READ",
"PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional reads",
@@ -398,6 +441,7 @@
},
{
"BriefDescription": "Speculatively counts the number of TSX aborts due to a data capacity limitation for transactional writes.",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
"PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional writes.",
@@ -406,6 +450,7 @@
},
{
"BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_CONFLICT",
"PublicDescription": "Counts the number of times a TSX line had a cache conflict.",
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/metricgroups.json b/tools/perf/pmu-events/arch/x86/icelakex/metricgroups.json
index 904d299c95a3..cccfcab3425e 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/metricgroups.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/metricgroups.json
@@ -5,7 +5,20 @@
"BigFootprint": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"BrMispredicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Branches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvBC": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvBO": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvCB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvFB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvIO": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvML": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMP": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMS": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMT": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvOB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvUW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"CacheHits": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "CacheMisses": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"CodeGen": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Compute": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Cor": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/other.json b/tools/perf/pmu-events/arch/x86/icelakex/other.json
index 11810daaf150..05b348d9c838 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/other.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/other.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the Non-AVX turbo schedule.",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "CORE_POWER.LVL0_TURBO_LICENSE",
"PublicDescription": "Counts Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX2 turbo schedule.",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "CORE_POWER.LVL1_TURBO_LICENSE",
"PublicDescription": "Counts Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX512 turbo schedule.",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "CORE_POWER.LVL2_TURBO_LICENSE",
"PublicDescription": "Core cycles where the core was running with power-delivery for license level 2 (introduced in Skylake Server microarchitecture). This includes high current AVX 512-bit instructions.",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Hit snoop reply with data, line invalidated.",
+ "Counter": "0,1,2,3",
"EventCode": "0xef",
"EventName": "CORE_SNOOP_RESPONSE.I_FWD_FE",
"PublicDescription": "Counts responses to snoops indicating the line will now be (I)nvalidated: removed from this core's cache, after the data is forwarded back to the requestor and indicating the data was found unmodified in the (FE) Forward or Exclusive State in this cores caches cache. A single snoop response from the core counts on all hyperthreads of the core.",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "HitM snoop reply with data, line invalidated.",
+ "Counter": "0,1,2,3",
"EventCode": "0xef",
"EventName": "CORE_SNOOP_RESPONSE.I_FWD_M",
"PublicDescription": "Counts responses to snoops indicating the line will now be (I)nvalidated: removed from this core's caches, after the data is forwarded back to the requestor, and indicating the data was found modified(M) in this cores caches cache (aka HitM response). A single snoop response from the core counts on all hyperthreads of the core.",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "Hit snoop reply without sending the data, line invalidated.",
+ "Counter": "0,1,2,3",
"EventCode": "0xef",
"EventName": "CORE_SNOOP_RESPONSE.I_HIT_FSE",
"PublicDescription": "Counts responses to snoops indicating the line will now be (I)nvalidated in this core's caches without being forwarded back to the requestor. The line was in Forward, Shared or Exclusive (FSE) state in this cores caches. A single snoop response from the core counts on all hyperthreads of the core.",
@@ -49,6 +55,7 @@
},
{
"BriefDescription": "Line not found snoop reply",
+ "Counter": "0,1,2,3",
"EventCode": "0xef",
"EventName": "CORE_SNOOP_RESPONSE.MISS",
"PublicDescription": "Counts responses to snoops indicating that the data was not found (IHitI) in this core's caches. A single snoop response from the core counts on all hyperthreads of the Core.",
@@ -57,6 +64,7 @@
},
{
"BriefDescription": "Hit snoop reply with data, line kept in Shared state.",
+ "Counter": "0,1,2,3",
"EventCode": "0xef",
"EventName": "CORE_SNOOP_RESPONSE.S_FWD_FE",
"PublicDescription": "Counts responses to snoops indicating the line may be kept on this core in the (S)hared state, after the data is forwarded back to the requestor, initially the data was found in the cache in the (FS) Forward or Shared state. A single snoop response from the core counts on all hyperthreads of the core.",
@@ -65,6 +73,7 @@
},
{
"BriefDescription": "HitM snoop reply with data, line kept in Shared state",
+ "Counter": "0,1,2,3",
"EventCode": "0xef",
"EventName": "CORE_SNOOP_RESPONSE.S_FWD_M",
"PublicDescription": "Counts responses to snoops indicating the line may be kept on this core in the (S)hared state, after the data is forwarded back to the requestor, initially the data was found in the cache in the (M)odified state. A single snoop response from the core counts on all hyperthreads of the core.",
@@ -73,6 +82,7 @@
},
{
"BriefDescription": "Hit snoop reply without sending the data, line kept in Shared state.",
+ "Counter": "0,1,2,3",
"EventCode": "0xef",
"EventName": "CORE_SNOOP_RESPONSE.S_HIT_FSE",
"PublicDescription": "Counts responses to snoops indicating the line was kept on this core in the (S)hared state, and that the data was found unmodified but not forwarded back to the requestor, initially the data was found in the cache in the (FSE) Forward, Shared state or Exclusive state. A single snoop response from the core counts on all hyperthreads of the core.",
@@ -81,6 +91,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -90,6 +101,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -99,6 +111,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -108,6 +121,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.SNC_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -117,6 +131,7 @@
},
{
"BriefDescription": "Counts demand data reads that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -126,6 +141,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -135,6 +151,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -144,6 +161,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by PMM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those PMM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.LOCAL_PMM",
"MSRIndex": "0x1a6,0x1a7",
@@ -153,6 +171,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by PMM.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.PMM",
"MSRIndex": "0x1a6,0x1a7",
@@ -162,6 +181,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by DRAM attached to another socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -171,6 +191,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by PMM attached to another socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.REMOTE_PMM",
"MSRIndex": "0x1a6,0x1a7",
@@ -180,6 +201,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.SNC_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -189,6 +211,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by PMM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.SNC_PMM",
"MSRIndex": "0x1a6,0x1a7",
@@ -198,6 +221,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -207,6 +231,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -216,6 +241,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -225,6 +251,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by PMM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those PMM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.LOCAL_PMM",
"MSRIndex": "0x1a6,0x1a7",
@@ -234,6 +261,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by PMM.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.PMM",
"MSRIndex": "0x1a6,0x1a7",
@@ -243,6 +271,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by PMM attached to another socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.REMOTE_PMM",
"MSRIndex": "0x1a6,0x1a7",
@@ -252,6 +281,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.SNC_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -261,6 +291,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by PMM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.SNC_PMM",
"MSRIndex": "0x1a6,0x1a7",
@@ -270,6 +301,7 @@
},
{
"BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L1D_AND_SWPF.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -279,6 +311,7 @@
},
{
"BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L1D_AND_SWPF.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -288,6 +321,7 @@
},
{
"BriefDescription": "Counts hardware prefetch (which bring data to L2) that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -297,6 +331,7 @@
},
{
"BriefDescription": "Counts hardware prefetches to the L3 only that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L3.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -306,6 +341,7 @@
},
{
"BriefDescription": "Counts hardware prefetches to the L3 only that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline was homed in a remote socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L3.REMOTE",
"MSRIndex": "0x1a6,0x1a7",
@@ -315,6 +351,7 @@
},
{
"BriefDescription": "Counts full cacheline writes (ItoM) that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline was homed in a remote socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ITOM.REMOTE",
"MSRIndex": "0x1a6,0x1a7",
@@ -324,6 +361,7 @@
},
{
"BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -333,6 +371,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -342,6 +381,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -351,6 +391,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -360,6 +401,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by PMM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those PMM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.LOCAL_PMM",
"MSRIndex": "0x1a6,0x1a7",
@@ -369,6 +411,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts DRAM accesses that are controlled by the close or distant SNC Cluster.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.LOCAL_SOCKET_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -378,6 +421,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by PMM attached to this socket, whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts PMM accesses that are controlled by the close or distant SNC Cluster.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.LOCAL_SOCKET_PMM",
"MSRIndex": "0x1a6,0x1a7",
@@ -387,6 +431,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches and were supplied by a remote socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.REMOTE",
"MSRIndex": "0x1a6,0x1a7",
@@ -396,6 +441,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to another socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -405,6 +451,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM or PMM attached to another socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.REMOTE_MEMORY",
"MSRIndex": "0x1a6,0x1a7",
@@ -414,6 +461,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by PMM attached to another socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.REMOTE_PMM",
"MSRIndex": "0x1a6,0x1a7",
@@ -423,6 +471,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.SNC_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -432,6 +481,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by PMM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.SNC_PMM",
"MSRIndex": "0x1a6,0x1a7",
@@ -441,6 +491,7 @@
},
{
"BriefDescription": "Counts streaming stores that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -450,6 +501,7 @@
},
{
"BriefDescription": "Counts Demand RFOs, ItoM's, PREFECTHW's, Hardware RFO Prefetches to the L1/L2 and Streaming stores that likely resulted in a store to Memory (DRAM or PMM)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.WRITE_ESTIMATE.MEMORY",
"MSRIndex": "0x1a6,0x1a7",
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/pipeline.json b/tools/perf/pmu-events/arch/x86/icelakex/pipeline.json
index 45ee6bceba7f..74285b6c81e7 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/pipeline.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Cycles when divide unit is busy executing divide or square root operations.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0x14",
"EventName": "ARITH.DIVIDER_ACTIVE",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "Number of occurrences where a microcode assist is invoked by hardware.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc1",
"EventName": "ASSISTS.ANY",
"PublicDescription": "Counts the number of occurrences where a microcode assist is invoked by hardware Examples include AD (page Access Dirty), FP and AVX related assists.",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "All branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -26,6 +29,7 @@
},
{
"BriefDescription": "Conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND",
"PEBS": "1",
@@ -35,6 +39,7 @@
},
{
"BriefDescription": "Not taken branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_NTAKEN",
"PEBS": "1",
@@ -44,6 +49,7 @@
},
{
"BriefDescription": "Taken conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_TAKEN",
"PEBS": "1",
@@ -53,6 +59,7 @@
},
{
"BriefDescription": "Far branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"PEBS": "1",
@@ -62,6 +69,7 @@
},
{
"BriefDescription": "Indirect near branch instructions retired (excluding returns)",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.INDIRECT",
"PEBS": "1",
@@ -71,6 +79,7 @@
},
{
"BriefDescription": "Direct and indirect near call instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
"PEBS": "1",
@@ -80,6 +89,7 @@
},
{
"BriefDescription": "Return instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
"PEBS": "1",
@@ -89,6 +99,7 @@
},
{
"BriefDescription": "Taken branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
"PEBS": "1",
@@ -98,6 +109,7 @@
},
{
"BriefDescription": "All mispredicted branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -106,6 +118,7 @@
},
{
"BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND",
"PEBS": "1",
@@ -115,6 +128,7 @@
},
{
"BriefDescription": "Mispredicted non-taken conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_NTAKEN",
"PEBS": "1",
@@ -124,6 +138,7 @@
},
{
"BriefDescription": "number of branch instructions retired that were mispredicted and taken.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_TAKEN",
"PEBS": "1",
@@ -133,6 +148,7 @@
},
{
"BriefDescription": "All miss-predicted indirect branch instructions retired (excluding RETs. TSX aborts is considered indirect branch).",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT",
"PEBS": "1",
@@ -142,6 +158,7 @@
},
{
"BriefDescription": "Mispredicted indirect CALL instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT_CALL",
"PEBS": "1",
@@ -151,6 +168,7 @@
},
{
"BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
"PEBS": "1",
@@ -160,6 +178,7 @@
},
{
"BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.RET",
"PEBS": "1",
@@ -169,6 +188,7 @@
},
{
"BriefDescription": "Cycle counts are evenly distributed between active threads in the Core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xec",
"EventName": "CPU_CLK_UNHALTED.DISTRIBUTED",
"PublicDescription": "This event distributes cycle counts between active hyperthreads, i.e., those in C0. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If all other hyperthreads are inactive (or disabled or do not exist), all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
@@ -177,6 +197,7 @@
},
{
"BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
"PublicDescription": "Counts Core crystal clock cycles when current thread is unhalted and the other thread is halted.",
@@ -185,6 +206,7 @@
},
{
"BriefDescription": "Core crystal clock cycles. Cycle counts are evenly distributed between active threads in the Core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.REF_DISTRIBUTED",
"PublicDescription": "This event distributes Core crystal clock cycle counts between active hyperthreads, i.e., those in C0 sleep-state. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If one thread is active in a core, all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
@@ -193,6 +215,7 @@
},
{
"BriefDescription": "Reference cycles when the core is not in halt state.",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
"SampleAfterValue": "2000003",
@@ -200,6 +223,7 @@
},
{
"BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.REF_XCLK",
"PublicDescription": "Counts core crystal clock cycles when the thread is unhalted.",
@@ -208,6 +232,7 @@
},
{
"BriefDescription": "Core cycles when the thread is not in halt state",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events.",
"SampleAfterValue": "2000003",
@@ -215,6 +240,7 @@
},
{
"BriefDescription": "Thread cycles when thread is not in halt state",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
"PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
@@ -222,6 +248,7 @@
},
{
"BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "8",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
@@ -230,6 +257,7 @@
},
{
"BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
@@ -238,6 +266,7 @@
},
{
"BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "16",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
@@ -246,6 +275,7 @@
},
{
"BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "12",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
@@ -254,6 +284,7 @@
},
{
"BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
@@ -262,6 +293,7 @@
},
{
"BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "20",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
@@ -270,6 +302,7 @@
},
{
"BriefDescription": "Total execution stalls.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "4",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
@@ -278,6 +311,7 @@
},
{
"BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.1_PORTS_UTIL",
"PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
@@ -286,6 +320,7 @@
},
{
"BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.2_PORTS_UTIL",
"PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
@@ -294,6 +329,7 @@
},
{
"BriefDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.3_PORTS_UTIL",
"PublicDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
@@ -302,6 +338,7 @@
},
{
"BriefDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.4_PORTS_UTIL",
"PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.",
@@ -310,6 +347,7 @@
},
{
"BriefDescription": "Cycles where the Store Buffer was full and no loads caused an execution stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "2",
"EventCode": "0xA6",
"EventName": "EXE_ACTIVITY.BOUND_ON_STORES",
@@ -319,6 +357,7 @@
},
{
"BriefDescription": "Stalls caused by changing prefix length of the instruction. [This event is alias to DECODE.LCP]",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.LCP",
"PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk. [This event is alias to DECODE.LCP]",
@@ -327,6 +366,7 @@
},
{
"BriefDescription": "Instruction decoders utilized in a cycle",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "INST_DECODED.DECODERS",
"PublicDescription": "Number of decoders utilized in a cycle when the MITE (legacy decode pipeline) fetches instructions.",
@@ -335,6 +375,7 @@
},
{
"BriefDescription": "Number of instructions retired. Fixed Counter - architectural event",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PEBS": "1",
"PublicDescription": "Counts the number of instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
@@ -343,6 +384,7 @@
},
{
"BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.ANY_P",
"PEBS": "1",
@@ -351,6 +393,7 @@
},
{
"BriefDescription": "Number of all retired NOP instructions.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.NOP",
"PEBS": "1",
@@ -359,6 +402,7 @@
},
{
"BriefDescription": "Precise instruction retired event with a reduced effect of PEBS shadow in IP distribution",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.PREC_DIST",
"PEBS": "1",
"PublicDescription": "A version of INST_RETIRED that allows for a more unbiased distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR) feature to mitigate some bias in how retired instructions get sampled. Use on Fixed Counter 0.",
@@ -367,6 +411,7 @@
},
{
"BriefDescription": "Cycles the Backend cluster is recovering after a miss-speculation or a Store Buffer or Load Buffer drain stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0x0D",
"EventName": "INT_MISC.ALL_RECOVERY_CYCLES",
@@ -376,6 +421,7 @@
},
{
"BriefDescription": "Clears speculative count",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x0D",
@@ -386,6 +432,7 @@
},
{
"BriefDescription": "Counts cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x0d",
"EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
"PublicDescription": "Cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
@@ -394,6 +441,7 @@
},
{
"BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x0D",
"EventName": "INT_MISC.RECOVERY_CYCLES",
"PublicDescription": "Counts core cycles when the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
@@ -402,6 +450,7 @@
},
{
"BriefDescription": "TMA slots where uops got dropped",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x0d",
"EventName": "INT_MISC.UOP_DROPPING",
"PublicDescription": "Estimated number of Top-down Microarchitecture Analysis slots that got dropped due to non front-end reasons",
@@ -410,6 +459,7 @@
},
{
"BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.NO_SR",
"PublicDescription": "Counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
@@ -418,6 +468,7 @@
},
{
"BriefDescription": "Loads blocked due to overlapping with a preceding store that cannot be forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.STORE_FORWARD",
"PublicDescription": "Counts the number of times where store forwarding was prevented for a load operation. The most common case is a load blocked due to the address of memory access (partially) overlapping with a preceding uncompleted store. Note: See the table of not supported store forwards in the Optimization Guide.",
@@ -426,6 +477,7 @@
},
{
"BriefDescription": "False dependencies due to partial compare on address.",
+ "Counter": "0,1,2,3",
"EventCode": "0x07",
"EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
"PublicDescription": "Counts the number of times a load got blocked due to false dependencies due to partial compare on address.",
@@ -434,6 +486,7 @@
},
{
"BriefDescription": "Counts the number of demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.",
+ "Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "LOAD_HIT_PREFETCH.SWPF",
"PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
@@ -442,6 +495,7 @@
},
{
"BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA8",
"EventName": "LSD.CYCLES_ACTIVE",
@@ -451,6 +505,7 @@
},
{
"BriefDescription": "Cycles optimal number of Uops delivered by the LSD, but did not come from the decoder.",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"EventCode": "0xa8",
"EventName": "LSD.CYCLES_OK",
@@ -460,6 +515,7 @@
},
{
"BriefDescription": "Number of Uops delivered by the LSD.",
+ "Counter": "0,1,2,3",
"EventCode": "0xa8",
"EventName": "LSD.UOPS",
"PublicDescription": "Counts the number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
@@ -468,6 +524,7 @@
},
{
"BriefDescription": "Number of machine clears (nukes) of any type.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xc3",
@@ -478,6 +535,7 @@
},
{
"BriefDescription": "Self-modifying code (SMC) detected.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.SMC",
"PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
@@ -486,6 +544,7 @@
},
{
"BriefDescription": "Increments whenever there is an update to the LBR array.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xcc",
"EventName": "MISC_RETIRED.LBR_INSERTS",
"PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR to be enabled properly.",
@@ -494,6 +553,7 @@
},
{
"BriefDescription": "Number of retired PAUSE instructions. This event is not supported on first SKL and KBL products.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xcc",
"EventName": "MISC_RETIRED.PAUSE_INST",
"PublicDescription": "Counts number of retired PAUSE instructions. This event is not supported on first SKL and KBL products.",
@@ -502,6 +562,7 @@
},
{
"BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa2",
"EventName": "RESOURCE_STALLS.SB",
"PublicDescription": "Counts allocation stall cycles caused by the store buffer (SB) being full. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
@@ -510,6 +571,7 @@
},
{
"BriefDescription": "Counts cycles where the pipeline is stalled due to serializing operations.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa2",
"EventName": "RESOURCE_STALLS.SCOREBOARD",
"SampleAfterValue": "100003",
@@ -517,6 +579,7 @@
},
{
"BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x5e",
"EventName": "RS_EVENTS.EMPTY_CYCLES",
"PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into starvation periods (e.g. branch mispredictions or i-cache misses)",
@@ -525,6 +588,7 @@
},
{
"BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x5E",
@@ -536,6 +600,7 @@
},
{
"BriefDescription": "TMA slots where no uops were being issued due to lack of back-end resources.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa4",
"EventName": "TOPDOWN.BACKEND_BOUND_SLOTS",
"PublicDescription": "Counts the number of Top-down Microarchitecture Analysis (TMA) method's slots where no micro-operations were being issued from front-end to back-end of the machine due to lack of back-end resources.",
@@ -544,6 +609,7 @@
},
{
"BriefDescription": "TMA slots available for an unhalted logical processor. Fixed counter - architectural event",
+ "Counter": "Fixed counter 3",
"EventName": "TOPDOWN.SLOTS",
"PublicDescription": "Number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method (TMA). The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core. Software can use this event as the denominator for the top-level metrics of the TMA method. This architectural event is counted on a designated fixed counter (Fixed Counter 3).",
"SampleAfterValue": "10000003",
@@ -551,6 +617,7 @@
},
{
"BriefDescription": "TMA slots available for an unhalted logical processor. General counter - architectural event",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa4",
"EventName": "TOPDOWN.SLOTS_P",
"PublicDescription": "Counts the number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method. The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core.",
@@ -559,6 +626,7 @@
},
{
"BriefDescription": "Number of uops decoded out of instructions exclusively fetched by decoder 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UOPS_DECODED.DEC0",
"PublicDescription": "Uops exclusively fetched by decoder 0",
@@ -567,6 +635,7 @@
},
{
"BriefDescription": "Number of uops executed on port 0",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa1",
"EventName": "UOPS_DISPATCHED.PORT_0",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 0.",
@@ -575,6 +644,7 @@
},
{
"BriefDescription": "Number of uops executed on port 1",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa1",
"EventName": "UOPS_DISPATCHED.PORT_1",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 1.",
@@ -583,6 +653,7 @@
},
{
"BriefDescription": "Number of uops executed on port 2 and 3",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa1",
"EventName": "UOPS_DISPATCHED.PORT_2_3",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 2 and 3.",
@@ -591,6 +662,7 @@
},
{
"BriefDescription": "Number of uops executed on port 4 and 9",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa1",
"EventName": "UOPS_DISPATCHED.PORT_4_9",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 5 and 9.",
@@ -599,6 +671,7 @@
},
{
"BriefDescription": "Number of uops executed on port 5",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa1",
"EventName": "UOPS_DISPATCHED.PORT_5",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 5.",
@@ -607,6 +680,7 @@
},
{
"BriefDescription": "Number of uops executed on port 6",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa1",
"EventName": "UOPS_DISPATCHED.PORT_6",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 6.",
@@ -615,6 +689,7 @@
},
{
"BriefDescription": "Number of uops executed on port 7 and 8",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa1",
"EventName": "UOPS_DISPATCHED.PORT_7_8",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 7 and 8.",
@@ -623,6 +698,7 @@
},
{
"BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
@@ -632,6 +708,7 @@
},
{
"BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "2",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
@@ -641,6 +718,7 @@
},
{
"BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
@@ -650,6 +728,7 @@
},
{
"BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "4",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
@@ -659,6 +738,7 @@
},
{
"BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_1",
@@ -668,6 +748,7 @@
},
{
"BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "2",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_2",
@@ -677,6 +758,7 @@
},
{
"BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "3",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_3",
@@ -686,6 +768,7 @@
},
{
"BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "4",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_4",
@@ -695,6 +778,7 @@
},
{
"BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.STALL_CYCLES",
@@ -705,6 +789,7 @@
},
{
"BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.THREAD",
"SampleAfterValue": "2000003",
@@ -712,6 +797,7 @@
},
{
"BriefDescription": "Counts the number of x87 uops dispatched.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.X87",
"PublicDescription": "Counts the number of x87 uops executed.",
@@ -720,6 +806,7 @@
},
{
"BriefDescription": "Uops that RAT issues to RS",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x0e",
"EventName": "UOPS_ISSUED.ANY",
"PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
@@ -728,6 +815,7 @@
},
{
"BriefDescription": "Cycles when RAT does not issue Uops to RS for the thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.STALL_CYCLES",
@@ -738,6 +826,7 @@
},
{
"BriefDescription": "Uops inserted at issue-stage in order to preserve upper bits of vector registers.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x0e",
"EventName": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH",
"PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to 'Mixing Intel AVX and Intel SSE Code' section of the Optimization Guide.",
@@ -746,6 +835,7 @@
},
{
"BriefDescription": "Retirement slots used.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.SLOTS",
"PublicDescription": "Counts the retirement slots used each cycle.",
@@ -754,6 +844,7 @@
},
{
"BriefDescription": "Cycles without actually retired uops.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.STALL_CYCLES",
@@ -764,6 +855,7 @@
},
{
"BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "10",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.TOTAL_CYCLES",
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/uncore-cache.json b/tools/perf/pmu-events/arch/x86/icelakex/uncore-cache.json
index a950ba3ddcb4..8c73708befef 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/uncore-cache.json
@@ -1,80 +1,98 @@
[
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_PMM_MEMMODE_NM_INVITOX.LOCAL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x65",
"EventName": "UNC_CHA_2LM_NM_INVITOX.LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_PMM_MEMMODE_NM_INVITOX.REMOTE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x65",
"EventName": "UNC_CHA_2LM_NM_INVITOX.REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_PMM_MEMMODE_NM_INVITOX.SETCONFLICT",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x65",
"EventName": "UNC_CHA_2LM_NM_INVITOX.SETCONFLICT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.LLC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x64",
"EventName": "UNC_CHA_2LM_NM_SETCONFLICTS.LLC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.SF",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x64",
"EventName": "UNC_CHA_2LM_NM_SETCONFLICTS.SF",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.TOR",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x64",
"EventName": "UNC_CHA_2LM_NM_SETCONFLICTS.TOR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.MEMWR",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x70",
"EventName": "UNC_CHA_2LM_NM_SETCONFLICTS2.MEMWR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.MEMWRNI",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x70",
"EventName": "UNC_CHA_2LM_NM_SETCONFLICTS2.MEMWRNI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 0 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -82,8 +100,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 1 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -91,8 +111,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 2 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -100,8 +122,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 3 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -109,8 +133,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 4 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -118,8 +144,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 5 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -127,8 +155,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 6 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x40",
@@ -136,8 +166,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 7 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x80",
@@ -145,8 +177,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 10 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -154,8 +188,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 8 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -163,8 +199,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 9 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -172,8 +210,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 0 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -181,8 +221,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 1 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -190,8 +232,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 2 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -199,8 +243,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 3 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -208,8 +254,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 4 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -217,8 +265,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 5 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -226,8 +276,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 6 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x40",
@@ -235,8 +287,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 7 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x80",
@@ -244,8 +298,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 10 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -253,8 +309,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 8 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -262,8 +320,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 9 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -271,8 +331,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 0 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -280,8 +342,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 1 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -289,8 +353,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 2 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -298,8 +364,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 3 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -307,8 +375,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -316,8 +386,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -325,8 +397,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 6 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x40",
@@ -334,8 +408,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 7 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x80",
@@ -343,8 +419,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 10 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -352,8 +430,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 8 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -361,8 +441,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 9 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -370,8 +452,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 0 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -379,8 +463,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 1 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -388,8 +474,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 2 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -397,8 +485,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 3 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -406,8 +496,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 4 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -415,8 +507,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 5 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -424,8 +518,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 6 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x40",
@@ -433,8 +529,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 7 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x80",
@@ -442,8 +540,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x8B",
"EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 10 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -451,8 +551,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x8B",
"EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 8 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -460,8 +562,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x8B",
"EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 9 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -469,8 +573,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 0 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -478,8 +584,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 1 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -487,8 +595,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 2 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -496,8 +606,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 3 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -505,8 +617,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 4 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -514,8 +628,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 5 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -523,8 +639,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 6 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x40",
@@ -532,8 +650,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 7 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x80",
@@ -541,8 +661,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 10 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -550,8 +672,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 8 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -559,8 +683,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 9 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -568,8 +694,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 0 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -577,8 +705,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 1 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -586,8 +716,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 2 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -595,8 +727,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 3 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -604,8 +738,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 4 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -613,8 +749,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 5 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -622,8 +760,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 6 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x40",
@@ -631,8 +771,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 7 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x80",
@@ -640,8 +782,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 10 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -649,8 +793,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 8 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -658,8 +804,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 9 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -667,8 +815,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 0 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -676,8 +826,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 1 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -685,8 +837,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 2 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -694,8 +848,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 3 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -703,8 +859,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -712,8 +870,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -721,8 +881,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x40",
@@ -730,8 +892,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x80",
@@ -739,8 +903,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x8D",
"EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 10 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -748,8 +914,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x8D",
"EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 8 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -757,8 +925,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x8D",
"EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 9 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -766,8 +936,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 0 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -775,8 +947,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 1 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -784,8 +958,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 2 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -793,8 +969,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 3 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -802,8 +980,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 4 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -811,8 +991,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 5 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -820,8 +1002,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 6 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x40",
@@ -829,8 +1013,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 7 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x80",
@@ -838,8 +1024,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x8F",
"EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 10 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -847,8 +1035,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x8F",
"EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 8 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -856,8 +1046,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x8F",
"EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 9 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -865,8 +1057,10 @@
},
{
"BriefDescription": "CHA to iMC Bypass : Intermediate bypass Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x57",
"EventName": "UNC_CHA_BYPASS_CHA_IMC.INTERMEDIATE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA to iMC Bypass : Intermediate bypass Taken : Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not. : Filter for transactions that succeeded in taking the intermediate bypass.",
"UMask": "0x2",
@@ -874,8 +1068,10 @@
},
{
"BriefDescription": "CHA to iMC Bypass : Not Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x57",
"EventName": "UNC_CHA_BYPASS_CHA_IMC.NOT_TAKEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA to iMC Bypass : Not Taken : Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not. : Filter for transactions that could not take the bypass, and issues a read to memory. Note that transactions that did not take the bypass but did not issue read to memory will not be counted.",
"UMask": "0x4",
@@ -883,8 +1079,10 @@
},
{
"BriefDescription": "CHA to iMC Bypass : Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x57",
"EventName": "UNC_CHA_BYPASS_CHA_IMC.TAKEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA to iMC Bypass : Taken : Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not. : Filter for transactions that succeeded in taking the full bypass.",
"UMask": "0x1",
@@ -892,12 +1090,14 @@
},
{
"BriefDescription": "Clockticks of the uncore caching and home agent (CHA)",
+ "Counter": "0,1,2,3",
"EventName": "UNC_CHA_CLOCKTICKS",
"PerPkg": "1",
"Unit": "CHA"
},
{
"BriefDescription": "CMS Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0xc0",
"EventName": "UNC_CHA_CMS_CLOCKTICKS",
"PerPkg": "1",
@@ -905,8 +1105,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued : Any Cycle with Multiple Snoops",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.ANY_GTONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Core Cross Snoops Issued : Any Cycle with Multiple Snoops : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0xf2",
@@ -914,8 +1116,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued : Any Single Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.ANY_ONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Core Cross Snoops Issued : Any Single Snoop : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0xf1",
@@ -923,8 +1127,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued : Multiple Core Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.CORE_GTONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Core Cross Snoops Issued : Multiple Core Requests : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x42",
@@ -932,8 +1138,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued : Single Core Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.CORE_ONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Core Cross Snoops Issued : Single Core Requests : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x41",
@@ -941,8 +1149,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued : Multiple Eviction",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.EVICT_GTONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Core Cross Snoops Issued : Multiple Eviction : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x82",
@@ -950,8 +1160,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued : Single Eviction",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.EVICT_ONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Core Cross Snoops Issued : Single Eviction : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x81",
@@ -959,8 +1171,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued : Multiple External Snoops",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.EXT_GTONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Core Cross Snoops Issued : Multiple External Snoops : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x22",
@@ -968,8 +1182,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued : Single External Snoops",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.EXT_ONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Core Cross Snoops Issued : Single External Snoops : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x21",
@@ -977,8 +1193,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued : Multiple Snoop Targets from Remote",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.REMOTE_GTONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Core Cross Snoops Issued : Multiple Snoop Targets from Remote : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x12",
@@ -986,8 +1204,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued : Single Snoop Target from Remote",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.REMOTE_ONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Core Cross Snoops Issued : Single Snoop Target from Remote : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x11",
@@ -995,104 +1215,130 @@
},
{
"BriefDescription": "Counter 0 Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_CHA_COUNTER0_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counter 0 Occupancy : Since occupancy counts can only be captured in the Cbo's 0 counter, this event allows a user to capture occupancy related information by filtering the Cb0 occupancy count captured in Counter 0. The filtering available is found in the control register - threshold, invert and edge detect. E.g. setting threshold to 1 can effectively monitor how many cycles the monitored queue has an entry.",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6E",
"EventName": "UNC_CHA_DIRECT_GO.HA_SUPPRESS_DRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6E",
"EventName": "UNC_CHA_DIRECT_GO.HA_SUPPRESS_NO_D2C",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6E",
"EventName": "UNC_CHA_DIRECT_GO.HA_TOR_DEALLOC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6D",
"EventName": "UNC_CHA_DIRECT_GO_OPC.EXTCMP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6D",
"EventName": "UNC_CHA_DIRECT_GO_OPC.FAST_GO",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6D",
"EventName": "UNC_CHA_DIRECT_GO_OPC.FAST_GO_PULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6D",
"EventName": "UNC_CHA_DIRECT_GO_OPC.GO",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6D",
"EventName": "UNC_CHA_DIRECT_GO_OPC.GO_PULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6D",
"EventName": "UNC_CHA_DIRECT_GO_OPC.IDLE_DUE_SUPPRESS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6D",
"EventName": "UNC_CHA_DIRECT_GO_OPC.NOP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6D",
"EventName": "UNC_CHA_DIRECT_GO_OPC.PULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Multi-socket cacheline directory state lookups : Snoop Not Needed",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_CHA_DIR_LOOKUP.NO_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Multi-socket cacheline directory state lookups : Snoop Not Needed : Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to. : Filters for transactions that did not have to send any snoops because the directory was clean.",
"UMask": "0x2",
@@ -1100,8 +1346,10 @@
},
{
"BriefDescription": "Multi-socket cacheline directory state lookups : Snoop Needed",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_CHA_DIR_LOOKUP.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Multi-socket cacheline directory state lookups : Snoop Needed : Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to. : Filters for transactions that had to send one or more snoops because the directory was not clean.",
"UMask": "0x1",
@@ -1109,6 +1357,7 @@
},
{
"BriefDescription": "Multi-socket cacheline directory state updates; memory write due to directory update from the home agent (HA) pipe",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_CHA_DIR_UPDATE.HA",
"PerPkg": "1",
@@ -1118,6 +1367,7 @@
},
{
"BriefDescription": "Multi-socket cacheline directory state updates; memory write due to directory update from (table of requests) TOR pipe",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_CHA_DIR_UPDATE.TOR",
"PerPkg": "1",
@@ -1127,8 +1377,10 @@
},
{
"BriefDescription": "Distress signal asserted : DPT Local",
+ "Counter": "0,1,2,3",
"EventCode": "0xAF",
"EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : DPT Local : Counts the number of cycles either the local or incoming distress signals are asserted. : Dynamic Prefetch Throttle triggered by this tile",
"UMask": "0x4",
@@ -1136,8 +1388,10 @@
},
{
"BriefDescription": "Distress signal asserted : DPT Remote",
+ "Counter": "0,1,2,3",
"EventCode": "0xAF",
"EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_NONLOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : DPT Remote : Counts the number of cycles either the local or incoming distress signals are asserted. : Dynamic Prefetch Throttle received by this tile",
"UMask": "0x8",
@@ -1145,8 +1399,10 @@
},
{
"BriefDescription": "Distress signal asserted : DPT Stalled - IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xAF",
"EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_STALL_IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : DPT Stalled - IV : Counts the number of cycles either the local or incoming distress signals are asserted. : DPT occurred while regular IVs were received, causing DPT to be stalled",
"UMask": "0x40",
@@ -1154,8 +1410,10 @@
},
{
"BriefDescription": "Distress signal asserted : DPT Stalled - No Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xAF",
"EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_STALL_NOCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : DPT Stalled - No Credit : Counts the number of cycles either the local or incoming distress signals are asserted. : DPT occurred while credit not available causing DPT to be stalled",
"UMask": "0x80",
@@ -1163,8 +1421,10 @@
},
{
"BriefDescription": "Distress signal asserted : Horizontal",
+ "Counter": "0,1,2,3",
"EventCode": "0xAF",
"EventName": "UNC_CHA_DISTRESS_ASSERTED.HORZ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : Horizontal : Counts the number of cycles either the local or incoming distress signals are asserted. : If TGR egress is full, then agents will throttle outgoing AD IDI transactions",
"UMask": "0x2",
@@ -1172,8 +1432,10 @@
},
{
"BriefDescription": "Distress signal asserted : PMM Local",
+ "Counter": "0,1,2,3",
"EventCode": "0xAF",
"EventName": "UNC_CHA_DISTRESS_ASSERTED.PMM_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : PMM Local : Counts the number of cycles either the local or incoming distress signals are asserted. : If the CHA TOR has too many PMM transactions, this signal will throttle outgoing MS2IDI traffic",
"UMask": "0x10",
@@ -1181,8 +1443,10 @@
},
{
"BriefDescription": "Distress signal asserted : PMM Remote",
+ "Counter": "0,1,2,3",
"EventCode": "0xAF",
"EventName": "UNC_CHA_DISTRESS_ASSERTED.PMM_NONLOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : PMM Remote : Counts the number of cycles either the local or incoming distress signals are asserted. : If another CHA TOR has too many PMM transactions, this signal will throttle outgoing MS2IDI traffic",
"UMask": "0x20",
@@ -1190,8 +1454,10 @@
},
{
"BriefDescription": "Distress signal asserted : Vertical",
+ "Counter": "0,1,2,3",
"EventCode": "0xAF",
"EventName": "UNC_CHA_DISTRESS_ASSERTED.VERT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : Vertical : Counts the number of cycles either the local or incoming distress signals are asserted. : If IRQ egress is full, then agents will throttle outgoing AD IDI transactions",
"UMask": "0x1",
@@ -1199,8 +1465,10 @@
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_CHA_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress Blocking due to Ordering requirements : Down : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x4",
@@ -1208,8 +1476,10 @@
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_CHA_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress Blocking due to Ordering requirements : Up : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x1",
@@ -1217,8 +1487,10 @@
},
{
"BriefDescription": "Read request from a remote socket which hit in the HitMe Cache to a line In the E state",
+ "Counter": "0,1,2,3",
"EventCode": "0x5F",
"EventName": "UNC_CHA_HITME_HIT.EX_RDS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts read requests from a remote socket which hit in the HitME cache (used to cache the multi-socket Directory state) to a line in the E(Exclusive) state. This includes the following read opcodes (RdCode, RdData, RdDataMigratory, RdCur, RdInv*, Inv*).",
"UMask": "0x1",
@@ -1226,8 +1498,10 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache : Remote socket ownership read requests that hit in S state.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5F",
"EventName": "UNC_CHA_HITME_HIT.SHARED_OWNREQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts Number of Hits in HitMe Cache : Remote socket ownership read requests that hit in S state. : Shared hit and op is RdInvOwn, RdInv, Inv*",
"UMask": "0x4",
@@ -1235,16 +1509,20 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache : Remote socket WBMtoE requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x5F",
"EventName": "UNC_CHA_HITME_HIT.WBMTOE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache : Remote socket writeback to I or S requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x5F",
"EventName": "UNC_CHA_HITME_HIT.WBMTOI_OR_S",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts Number of Hits in HitMe Cache : Remote socket writeback to I or S requests : op is WbMtoI, WbPushMtoI, WbFlush, or WbMtoS",
"UMask": "0x10",
@@ -1252,8 +1530,10 @@
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed : Remote socket read requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x5E",
"EventName": "UNC_CHA_HITME_LOOKUP.READ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts Number of times HitMe Cache is accessed : Remote socket read requests : op is RdCode, RdData, RdDataMigratory, RdCur, RdInvOwn, RdInv, Inv*",
"UMask": "0x1",
@@ -1261,8 +1541,10 @@
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed : Remote socket write (i.e. writeback) requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x5E",
"EventName": "UNC_CHA_HITME_LOOKUP.WRITE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts Number of times HitMe Cache is accessed : Remote socket write (i.e. writeback) requests : op is WbMtoE, WbMtoI, WbPushMtoI, WbFlush, or WbMtoS",
"UMask": "0x2",
@@ -1270,8 +1552,10 @@
},
{
"BriefDescription": "Counts Number of Misses in HitMe Cache : Remote socket RdInvOwn requests that are not to shared line",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_CHA_HITME_MISS.NOTSHARED_RDINVOWN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts Number of Misses in HitMe Cache : Remote socket RdInvOwn requests that are not to shared line : No SF/LLC HitS/F and op is RdInvOwn",
"UMask": "0x40",
@@ -1279,8 +1563,10 @@
},
{
"BriefDescription": "Counts Number of Misses in HitMe Cache : Remote socket read or invalidate requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_CHA_HITME_MISS.READ_OR_INV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts Number of Misses in HitMe Cache : Remote socket read or invalidate requests : op is RdCode, RdData, RdDataMigratory, RdCur, RdInv, Inv*",
"UMask": "0x80",
@@ -1288,8 +1574,10 @@
},
{
"BriefDescription": "Counts Number of Misses in HitMe Cache : Remote socket RdInvOwn requests to shared line",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_CHA_HITME_MISS.SHARED_RDINVOWN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts Number of Misses in HitMe Cache : Remote socket RdInvOwn requests to shared line : SF/LLC HitS/F and op is RdInvOwn",
"UMask": "0x20",
@@ -1297,16 +1585,20 @@
},
{
"BriefDescription": "Counts the number of Allocate/Update to HitMe Cache : Deallocate HitME$ on Reads without RspFwdI*",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_CHA_HITME_UPDATE.DEALLOCATE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "Counts the number of Allocate/Update to HitMe Cache : op is RspIFwd or RspIFwdWb for a local request",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_CHA_HITME_UPDATE.DEALLOCATE_RSPFWDI_LOC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of Allocate/Update to HitMe Cache : op is RspIFwd or RspIFwdWb for a local request : Received RspFwdI* for a local request, but converted HitME$ to SF entry",
"UMask": "0x1",
@@ -1314,16 +1606,20 @@
},
{
"BriefDescription": "Counts the number of Allocate/Update to HitMe Cache : Update HitMe Cache on RdInvOwn even if not RspFwdI*",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_CHA_HITME_UPDATE.RDINVOWN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Counts the number of Allocate/Update to HitMe Cache : op is RspIFwd or RspIFwdWb for a remote request",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_CHA_HITME_UPDATE.RSPFWDI_REM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of Allocate/Update to HitMe Cache : op is RspIFwd or RspIFwdWb for a remote request : Updated HitME$ on RspFwdI* or local HitM/E received for a remote request",
"UMask": "0x2",
@@ -1331,16 +1627,20 @@
},
{
"BriefDescription": "Counts the number of Allocate/Update to HitMe Cache : Update HitMe Cache to SHARed",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_CHA_HITME_UPDATE.SHARED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Horizontal AD Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AD Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -1348,8 +1648,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AD Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -1357,8 +1659,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AD Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -1366,8 +1670,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AD Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -1375,8 +1681,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_CHA_HORZ_RING_AKC_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -1384,8 +1692,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_CHA_HORZ_RING_AKC_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -1393,8 +1703,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_CHA_HORZ_RING_AKC_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -1402,8 +1714,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_CHA_HORZ_RING_AKC_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -1411,8 +1725,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -1420,8 +1736,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -1429,8 +1747,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -1438,8 +1758,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -1447,8 +1769,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use : Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal BL Ring in Use : Left and Even : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -1456,8 +1780,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use : Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal BL Ring in Use : Left and Odd : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -1465,8 +1791,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use : Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal BL Ring in Use : Right and Even : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -1474,8 +1802,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use : Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal BL Ring in Use : Right and Odd : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -1483,8 +1813,10 @@
},
{
"BriefDescription": "Horizontal IV Ring in Use : Left",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_CHA_HORZ_RING_IV_IN_USE.LEFT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal IV Ring in Use : Left : Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x1",
@@ -1492,8 +1824,10 @@
},
{
"BriefDescription": "Horizontal IV Ring in Use : Right",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_CHA_HORZ_RING_IV_IN_USE.RIGHT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal IV Ring in Use : Right : Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x4",
@@ -1501,6 +1835,7 @@
},
{
"BriefDescription": "Normal priority reads issued to the memory controller from the CHA",
+ "Counter": "0,1,2,3",
"EventCode": "0x59",
"EventName": "UNC_CHA_IMC_READS_COUNT.NORMAL",
"PerPkg": "1",
@@ -1510,8 +1845,10 @@
},
{
"BriefDescription": "HA to iMC Reads Issued : ISOCH",
+ "Counter": "0,1,2,3",
"EventCode": "0x59",
"EventName": "UNC_CHA_IMC_READS_COUNT.PRIORITY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "HA to iMC Reads Issued : ISOCH : Count of the number of reads issued to any of the memory controller channels. This can be filtered by the priority of the reads.",
"UMask": "0x2",
@@ -1519,6 +1856,7 @@
},
{
"BriefDescription": "CHA to iMC Full Line Writes Issued : Full Line Non-ISOCH",
+ "Counter": "0,1,2,3",
"EventCode": "0x5B",
"EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL",
"PerPkg": "1",
@@ -1528,8 +1866,10 @@
},
{
"BriefDescription": "CHA to iMC Full Line Writes Issued : ISOCH Full Line",
+ "Counter": "0,1,2,3",
"EventCode": "0x5B",
"EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL_PRIORITY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA to iMC Full Line Writes Issued : ISOCH Full Line : Counts the total number of full line writes issued from the HA into the memory controller.",
"UMask": "0x4",
@@ -1537,8 +1877,10 @@
},
{
"BriefDescription": "CHA to iMC Full Line Writes Issued : Partial Non-ISOCH",
+ "Counter": "0,1,2,3",
"EventCode": "0x5B",
"EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA to iMC Full Line Writes Issued : Partial Non-ISOCH : Counts the total number of full line writes issued from the HA into the memory controller.",
"UMask": "0x2",
@@ -1546,8 +1888,10 @@
},
{
"BriefDescription": "CHA to iMC Full Line Writes Issued : ISOCH Partial",
+ "Counter": "0,1,2,3",
"EventCode": "0x5B",
"EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL_PRIORITY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA to iMC Full Line Writes Issued : ISOCH Partial : Counts the total number of full line writes issued from the HA into the memory controller.",
"UMask": "0x8",
@@ -1555,8 +1899,10 @@
},
{
"BriefDescription": "Cache and Snoop Filter Lookups; Any Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.; Filters for any transaction originating from the IPQ or IRQ. This does not include lookups originating from the ISMQ.",
"UMask": "0x1fffff",
@@ -1564,8 +1910,10 @@
},
{
"BriefDescription": "Cache Lookups : All transactions from Remote Agents",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.ALL_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : All transactions from Remote Agents : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0x1e20ff",
@@ -1573,34 +1921,42 @@
},
{
"BriefDescription": "Cache Lookups : All Request Filter",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.ANY_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : All Request Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Any local or remote transaction to the LLC, including prefetch.",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.CODE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1bd0ff",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.CODE_READ_LOCAL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.CODE_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x19d0ff",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : Code Reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.CODE_READ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Code Reads : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0x1bd0ff",
@@ -1608,16 +1964,20 @@
},
{
"BriefDescription": "Cache Lookups : CRd Request Filter",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.CODE_READ_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : CRd Request Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Local or remote CRd transactions to the LLC. This includes CRd prefetch.",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : CRd Requests that come from the local socket (usually the core)",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.CODE_READ_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : CRd Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote CRd transactions to the LLC. This includes CRd prefetch.",
"UMask": "0x19d0ff",
@@ -1625,8 +1985,10 @@
},
{
"BriefDescription": "Cache Lookups : Code Read Misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.CODE_READ_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Code Read Misses : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0x1bd001",
@@ -1634,8 +1996,10 @@
},
{
"BriefDescription": "Cache Lookups : CRd Requests that come from a Remote socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.CODE_READ_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : CRd Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote CRd transactions to the LLC. This includes CRd prefetch.",
"UMask": "0x1a10ff",
@@ -1643,32 +2007,39 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.CODE_READ_REMOTE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.CODE_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1a10ff",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : Local request Filter",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.COREPREF_OR_DMND_LOCAL_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Local request Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Any local transaction to the LLC, including prefetches from the Core",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.DATA_READ",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.DATA_RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1bc1ff",
"Unit": "CHA"
},
{
"BriefDescription": "Cache and Snoop Filter Lookups; Data Read Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ",
"PerPkg": "1",
@@ -1678,25 +2049,31 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1fc1ff",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : Data Read Request Filter",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Data Read Request Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Read transactions.",
"Unit": "CHA"
},
{
"BriefDescription": "Cache and Snoop Filter Lookups; Data Read Request that come from the local socket (usually the core)",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
"UMask": "0x19c1ff",
@@ -1704,8 +2081,10 @@
},
{
"BriefDescription": "Cache Lookups : Data Read Misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Data Read Misses : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0x1bc101",
@@ -1713,8 +2092,10 @@
},
{
"BriefDescription": "Cache and Snoop Filter Lookups; Data Read Requests that come from a Remote socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
"UMask": "0x1a01ff",
@@ -1722,17 +2103,21 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.DATA_READ_LOCAL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.DMND_READ_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x841ff",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : E State",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.E",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : E State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Hit Exclusive State",
"UMask": "0x20",
@@ -1740,8 +2125,10 @@
},
{
"BriefDescription": "Cache Lookups : F State",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : F State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Hit Forward State",
"UMask": "0x80",
@@ -1749,8 +2136,10 @@
},
{
"BriefDescription": "Cache Lookups : Flush or Invalidate Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.FLUSH_INV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Flush : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing.",
"UMask": "0x1a44ff",
@@ -1758,8 +2147,10 @@
},
{
"BriefDescription": "Cache Lookups : Flush or Invalidate Requests that come from the local socket (usually the core)",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.FLUSH_INV_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Flush : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing.",
"UMask": "0x1844ff",
@@ -1767,8 +2158,10 @@
},
{
"BriefDescription": "Cache Lookups : Flush or Invalidate requests that come from a Remote socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.FLUSH_INV_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Flush : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing.",
"UMask": "0x1a04ff",
@@ -1776,16 +2169,20 @@
},
{
"BriefDescription": "Cache Lookups : Flush or Invalidate Filter",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.FLUSH_OR_INV_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Flush or Invalidate Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : I State",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.I",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : I State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Miss",
"UMask": "0x1",
@@ -1793,8 +2190,10 @@
},
{
"BriefDescription": "Cache and Snoop Filter Lookups; Prefetch requests to the LLC that come from the local socket (usually the core)",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LLCPREF_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
"UMask": "0x189dff",
@@ -1802,42 +2201,52 @@
},
{
"BriefDescription": "Cache Lookups : Local LLC prefetch requests (from LLC) Filter",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LLCPREF_LOCAL_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Local LLC prefetch requests (from LLC) Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Any local LLC prefetch to the LLC",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.LLCPREF_LOCAL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LLC_PF_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x189dff",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.LOC_HOM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCALLY_HOMED_ADDRESS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xbdfff",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : Transactions homed locally Filter",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Transactions homed locally Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Transaction whose address resides in the local MC.",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : Transactions homed locally",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOC_HOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Transactions homed locally : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Transaction whose address resides in the local MC.",
"UMask": "0xbdfff",
@@ -1845,8 +2254,10 @@
},
{
"BriefDescription": "Cache Lookups : M State",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.M",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : M State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Hit Modified State",
"UMask": "0x40",
@@ -1854,8 +2265,10 @@
},
{
"BriefDescription": "Cache Lookups : All Misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.MISS_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : All Misses : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0x1fe001",
@@ -1863,24 +2276,30 @@
},
{
"BriefDescription": "Cache Lookups : Write Request Filter",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.OTHER_REQ_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Write Request Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Writeback transactions to the LLC This includes all write transactions -- both Cacheable and UC.",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : Remote non-snoop request Filter",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.PREF_OR_DMND_REMOTE_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Remote non-snoop request Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Non-snoop transactions to the LLC from remote agent",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : Reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.READ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Reads : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0x1bd9ff",
@@ -1888,8 +2307,10 @@
},
{
"BriefDescription": "Cache Lookups : Locally Requested Reads that are Locally HOMed",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.READ_LOCAL_LOC_HOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Locally Requested Reads that are Locally HOMed : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0x9d9ff",
@@ -1897,8 +2318,10 @@
},
{
"BriefDescription": "Cache Lookups : Locally Requested Reads that are Remotely HOMed",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.READ_LOCAL_REM_HOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Locally Requested Reads that are Remotely HOMed : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0x11d9ff",
@@ -1906,8 +2329,10 @@
},
{
"BriefDescription": "Cache Lookups : Read Misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.READ_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Read Misses : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0x1bd901",
@@ -1915,8 +2340,10 @@
},
{
"BriefDescription": "Cache Lookups : Locally HOMed Read Misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.READ_MISS_LOC_HOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Locally HOMed Read Misses : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0xbd901",
@@ -1924,8 +2351,10 @@
},
{
"BriefDescription": "Cache Lookups : Remotely HOMed Read Misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.READ_MISS_REM_HOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Remotely HOMed Read Misses : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0x13d901",
@@ -1933,8 +2362,10 @@
},
{
"BriefDescription": "Cache Lookups : Remotely requested Read or Snoop Misses that are Remotely HOMed",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.READ_OR_SNOOP_REMOTE_MISS_REM_HOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Remotely requested Read or Snoop Misses that are Remotely HOMed : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0x161901",
@@ -1942,8 +2373,10 @@
},
{
"BriefDescription": "Cache Lookups : Remotely Requested Reads that are Locally HOMed",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.READ_REMOTE_LOC_HOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Remotely Requested Reads that are Locally HOMed : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0xa19ff",
@@ -1951,8 +2384,10 @@
},
{
"BriefDescription": "Cache Lookups : Reads that Hit the Snoop Filter",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.READ_SF_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Reads that Hit the Snoop Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0x1bd90e",
@@ -1960,33 +2395,41 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.REM_HOM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.REMOTELY_HOMED_ADDRESS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x15dfff",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : Transactions homed remotely Filter",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Transactions homed remotely Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Transaction whose address resides in a remote MC",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : Remote snoop request Filter",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_SNOOP_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Remote snoop request Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Snoop transactions to the LLC from remote agent",
"Unit": "CHA"
},
{
"BriefDescription": "Cache and Snoop Filter Lookups; Snoop Requests from a Remote Socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.; Filters for any transaction originating from the IPQ or IRQ. This does not include lookups originating from the ISMQ.",
"UMask": "0x1c19ff",
@@ -1994,8 +2437,10 @@
},
{
"BriefDescription": "Cache Lookups : Transactions homed remotely",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.REM_HOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Transactions homed remotely : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Transaction whose address resides in a remote MC",
"UMask": "0x15dfff",
@@ -2003,8 +2448,10 @@
},
{
"BriefDescription": "Cache Lookups : RFO Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : RFO Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote RFO transactions to the LLC. This includes RFO prefetch.",
"UMask": "0x1bc8ff",
@@ -2012,16 +2459,20 @@
},
{
"BriefDescription": "Cache Lookups : RFO Request Filter",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.RFO_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : RFO Request Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Local or remote RFO transactions to the LLC. This includes RFO prefetch.",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : RFO Requests that come from the local socket (usually the core)",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.RFO_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : RFO Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote RFO transactions to the LLC. This includes RFO prefetch.",
"UMask": "0x19c8ff",
@@ -2029,8 +2480,10 @@
},
{
"BriefDescription": "Cache Lookups : RFO Misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.RFO_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : RFO Misses : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0x1bc801",
@@ -2038,17 +2491,21 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.RFO_LOCAL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.RFO_PREF_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x888ff",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : RFO Requests that come from a Remote socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.RFO_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : RFO Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote RFO transactions to the LLC. This includes RFO prefetch.",
"UMask": "0x1a08ff",
@@ -2056,8 +2513,10 @@
},
{
"BriefDescription": "Cache Lookups : S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.S",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : S State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Hit Shared State",
"UMask": "0x10",
@@ -2065,8 +2524,10 @@
},
{
"BriefDescription": "Cache Lookups : SnoopFilter - E State",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.SF_E",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : SnoopFilter - E State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : SF Hit Exclusive State",
"UMask": "0x4",
@@ -2074,8 +2535,10 @@
},
{
"BriefDescription": "Cache Lookups : SnoopFilter - H State",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.SF_H",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : SnoopFilter - H State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : SF Hit HitMe State",
"UMask": "0x8",
@@ -2083,8 +2546,10 @@
},
{
"BriefDescription": "Cache Lookups : SnoopFilter - S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.SF_S",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : SnoopFilter - S State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : SF Hit Shared State",
"UMask": "0x2",
@@ -2092,8 +2557,10 @@
},
{
"BriefDescription": "Cache Lookups : Filters Requests for those that write info into the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.WRITES_AND_OTHER",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Write Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Writeback transactions from L2 to the LLC This includes all write transactions -- both Cacheable and UC.",
"UMask": "0x1a42ff",
@@ -2101,24 +2568,29 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.WRITES_AND_OTHER",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.WRITE_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x842ff",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.WRITES_AND_OTHER",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.WRITE_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x17c2ff",
"Unit": "CHA"
},
{
"BriefDescription": "Lines Victimized : All Lines Victimized",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.ALL",
"PerPkg": "1",
@@ -2128,8 +2600,10 @@
},
{
"BriefDescription": "Lines Victimized : Lines in E state",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.E_STATE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Lines in E state : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x2",
@@ -2137,8 +2611,10 @@
},
{
"BriefDescription": "Lines Victimized : Local - All Lines",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Local - All Lines : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x200f",
@@ -2146,8 +2622,10 @@
},
{
"BriefDescription": "Lines Victimized : Local - Lines in E State",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_E",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Local - Lines in E State : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x2002",
@@ -2155,8 +2633,10 @@
},
{
"BriefDescription": "Lines Victimized : Local - Lines in M State",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_M",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Local - Lines in M State : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x2001",
@@ -2164,16 +2644,20 @@
},
{
"BriefDescription": "Lines Victimized : Local Only",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_ONLY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Local Only : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"Unit": "CHA"
},
{
"BriefDescription": "Lines Victimized : Local - Lines in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_S",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Local - Lines in S State : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x2004",
@@ -2181,8 +2665,10 @@
},
{
"BriefDescription": "Lines Victimized : Lines in M state",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.M_STATE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Lines in M state : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x1",
@@ -2190,8 +2676,10 @@
},
{
"BriefDescription": "Lines Victimized : Remote - All Lines",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Remote - All Lines : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x800f",
@@ -2199,8 +2687,10 @@
},
{
"BriefDescription": "Lines Victimized : Remote - Lines in E State",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_E",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Remote - Lines in E State : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x8002",
@@ -2208,8 +2698,10 @@
},
{
"BriefDescription": "Lines Victimized : Remote - Lines in M State",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_M",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Remote - Lines in M State : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x8001",
@@ -2217,16 +2709,20 @@
},
{
"BriefDescription": "Lines Victimized : Remote Only",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_ONLY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Remote Only : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"Unit": "CHA"
},
{
"BriefDescription": "Lines Victimized : Remote - Lines in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_S",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Remote - Lines in S State : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x8004",
@@ -2234,8 +2730,10 @@
},
{
"BriefDescription": "Lines Victimized : Lines in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.S_STATE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Lines in S State : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x4",
@@ -2243,8 +2741,10 @@
},
{
"BriefDescription": "Cbo Misc : CV0 Prefetch Miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_CHA_MISC.CV0_PREF_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cbo Misc : CV0 Prefetch Miss : Miscellaneous events in the Cbo.",
"UMask": "0x20",
@@ -2252,8 +2752,10 @@
},
{
"BriefDescription": "Cbo Misc : CV0 Prefetch Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_CHA_MISC.CV0_PREF_VIC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cbo Misc : CV0 Prefetch Victim : Miscellaneous events in the Cbo.",
"UMask": "0x10",
@@ -2261,8 +2763,10 @@
},
{
"BriefDescription": "Number of times that an RFO hit in S state.",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_CHA_MISC.RFO_HIT_S",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts when a RFO (the Read for Ownership issued before a write) request hit a cacheline in the S (Shared) state.",
"UMask": "0x8",
@@ -2270,8 +2774,10 @@
},
{
"BriefDescription": "Cbo Misc : Silent Snoop Eviction",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_CHA_MISC.RSPI_WAS_FSE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cbo Misc : Silent Snoop Eviction : Miscellaneous events in the Cbo. : Counts the number of times when a Snoop hit in FSE states and triggered a silent eviction. This is useful because this information is lost in the PRE encodings.",
"UMask": "0x1",
@@ -2279,8 +2785,10 @@
},
{
"BriefDescription": "Cbo Misc : Write Combining Aliasing",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_CHA_MISC.WC_ALIASING",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cbo Misc : Write Combining Aliasing : Miscellaneous events in the Cbo. : Counts the number of times that a USWC write (WCIL(F)) transaction hit in the LLC in M state, triggering a WBMtoI followed by the USWC write. This occurs when there is WC aliasing.",
"UMask": "0x2",
@@ -2288,24 +2796,30 @@
},
{
"BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI0",
+ "Counter": "0,1,2,3",
"EventCode": "0xE6",
"EventName": "UNC_CHA_MISC_EXTERNAL.MBE_INST0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI1",
+ "Counter": "0,1,2,3",
"EventCode": "0xE6",
"EventName": "UNC_CHA_MISC_EXTERNAL.MBE_INST1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "OSB Snoop Broadcast : Local InvItoE",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_CHA_OSB.LOCAL_INVITOE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "OSB Snoop Broadcast : Local InvItoE : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
"UMask": "0x1",
@@ -2313,8 +2827,10 @@
},
{
"BriefDescription": "OSB Snoop Broadcast : Local Rd",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_CHA_OSB.LOCAL_READ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "OSB Snoop Broadcast : Local Rd : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
"UMask": "0x2",
@@ -2322,8 +2838,10 @@
},
{
"BriefDescription": "OSB Snoop Broadcast : Off",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_CHA_OSB.OFF_PWRHEURISTIC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "OSB Snoop Broadcast : Off : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
"UMask": "0x20",
@@ -2331,8 +2849,10 @@
},
{
"BriefDescription": "OSB Snoop Broadcast : Remote Rd",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_CHA_OSB.REMOTE_READ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "OSB Snoop Broadcast : Remote Rd : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
"UMask": "0x4",
@@ -2340,8 +2860,10 @@
},
{
"BriefDescription": "OSB Snoop Broadcast : Remote Rd InvItoE",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_CHA_OSB.REMOTE_READINVITOE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "OSB Snoop Broadcast : Remote Rd InvItoE : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
"UMask": "0x8",
@@ -2349,8 +2871,10 @@
},
{
"BriefDescription": "OSB Snoop Broadcast : RFO HitS Snoop Broadcast",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_CHA_OSB.RFO_HITS_SNP_BCAST",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "OSB Snoop Broadcast : RFO HitS Snoop Broadcast : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
"UMask": "0x10",
@@ -2358,48 +2882,60 @@
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.ADEGRCREDIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.AKEGRCREDIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.ALLRSFWAYS_RES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.BLEGRCREDIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.FSF_VICP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.GOTRACK_ALLOWSNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"UMask": "0x4",
@@ -2407,8 +2943,10 @@
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.GOTRACK_ALLWAYRSV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"UMask": "0x10",
@@ -2416,8 +2954,10 @@
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.GOTRACK_PAMATCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"UMask": "0x2",
@@ -2425,8 +2965,10 @@
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.GOTRACK_WAYMATCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"UMask": "0x8",
@@ -2434,32 +2976,40 @@
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.HACREDIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.IDX_INPIPE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.IPQ_SETMATCH_VICP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.IRQ_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"UMask": "0x20",
@@ -2467,80 +3017,100 @@
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.IRQ_SETMATCH_VICP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.ISMQ_SETMATCH_VICP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.IVEGRCREDIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.LLC_WAYS_RES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.NOTALLOWSNOOP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.ONE_FSF_VIC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.ONE_RSP_CON",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.PMM_MEMMODE_TORMATCH_MULTI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.PMM_MEMMODE_TOR_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.PRQ_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"UMask": "0x40",
@@ -2548,8 +3118,10 @@
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.PTL_INPIPE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"UMask": "0x80",
@@ -2557,8 +3129,10 @@
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.RMW_SETMATCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"UMask": "0x1",
@@ -2566,128 +3140,130 @@
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.RRQ_SETMATCH_VICP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.SETMATCHENTRYWSCT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.SF_WAYS_RES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.TOPA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.TORID_MATCH_GO_P",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.VN_AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.VN_AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
- "EventCode": "0x42",
- "EventName": "UNC_CHA_PIPE_REJECT.VN_BL_NCB",
- "PerPkg": "1",
- "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Pipe Rejects",
- "EventCode": "0x42",
- "EventName": "UNC_CHA_PIPE_REJECT.VN_BL_NCS",
- "PerPkg": "1",
- "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.VN_BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
- "EventCode": "0x42",
- "EventName": "UNC_CHA_PIPE_REJECT.VN_BL_WB",
- "PerPkg": "1",
- "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.WAY_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.LOCAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x65",
"EventName": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.REMOTE",
+ "Counter": "0,1,2,3",
"EventCode": "0x65",
"EventName": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.SETCONFLICT",
+ "Counter": "0,1,2,3",
"EventCode": "0x65",
"EventName": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.SETCONFLICT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "PMM Memory Mode related events : Counts the number of times CHA saw NM Set conflict in SF/LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x64",
"EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.LLC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PMM Memory Mode related events : Counts the number of times CHA saw NM Set conflict in SF/LLC : NM evictions due to another read to the same near memory set in the LLC.",
"UMask": "0x2",
@@ -2695,8 +3271,10 @@
},
{
"BriefDescription": "PMM Memory Mode related events : Counts the number of times CHA saw NM Set conflict in SF/LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x64",
"EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.SF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PMM Memory Mode related events : Counts the number of times CHA saw NM Set conflict in SF/LLC : NM evictions due to another read to the same near memory set in the SF.",
"UMask": "0x1",
@@ -2704,8 +3282,10 @@
},
{
"BriefDescription": "PMM Memory Mode related events : Counts the number of times CHA saw NM Set conflict in TOR",
+ "Counter": "0,1,2,3",
"EventCode": "0x64",
"EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.TOR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PMM Memory Mode related events : Counts the number of times CHA saw NM Set conflict in TOR : No Reject in the CHA due to a pending read to the same near memory set in the TOR.",
"UMask": "0x4",
@@ -2713,88 +3293,110 @@
},
{
"BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.IODC",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.IODC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.MEMWR",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.MEMWR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.MEMWRNI",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.MEMWRNI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_PMM_QOS.DDR4_FAST_INSERT",
+ "Counter": "0,1,2,3",
"EventCode": "0x66",
"EventName": "UNC_CHA_PMM_QOS.DDR4_FAST_INSERT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_PMM_QOS.REJ_IRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x66",
"EventName": "UNC_CHA_PMM_QOS.REJ_IRQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_PMM_QOS.SLOWTORQ_SKIP",
+ "Counter": "0,1,2,3",
"EventCode": "0x66",
"EventName": "UNC_CHA_PMM_QOS.SLOWTORQ_SKIP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_PMM_QOS.SLOW_INSERT",
+ "Counter": "0,1,2,3",
"EventCode": "0x66",
"EventName": "UNC_CHA_PMM_QOS.SLOW_INSERT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_PMM_QOS.THROTTLE",
+ "Counter": "0,1,2,3",
"EventCode": "0x66",
"EventName": "UNC_CHA_PMM_QOS.THROTTLE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_PMM_QOS.THROTTLE_IRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x66",
"EventName": "UNC_CHA_PMM_QOS.THROTTLE_IRQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_PMM_QOS.THROTTLE_PRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x66",
"EventName": "UNC_CHA_PMM_QOS.THROTTLE_PRQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_PMM_QOS_OCCUPANCY.DDR_FAST_FIFO",
+ "Counter": "0,1,2,3",
"EventCode": "0x67",
"EventName": "UNC_CHA_PMM_QOS_OCCUPANCY.DDR_FAST_FIFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": count # of FAST TOR Request inserted to ha_tor_req_fifo",
"UMask": "0x2",
@@ -2802,8 +3404,10 @@
},
{
"BriefDescription": "UNC_CHA_PMM_QOS_OCCUPANCY.DDR_SLOW_FIFO",
+ "Counter": "0,1,2,3",
"EventCode": "0x67",
"EventName": "UNC_CHA_PMM_QOS_OCCUPANCY.DDR_SLOW_FIFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": count # of SLOW TOR Request inserted to ha_pmm_tor_req_fifo",
"UMask": "0x1",
@@ -2811,8 +3415,10 @@
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty : MC0",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx READ Credits Empty : MC0 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 0 only.",
"UMask": "0x1",
@@ -2820,8 +3426,10 @@
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty : MC1",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx READ Credits Empty : MC1 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 1 only.",
"UMask": "0x2",
@@ -2829,40 +3437,50 @@
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty : MC10",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx READ Credits Empty : MC10 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 10 only.",
"Unit": "CHA"
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty : MC11",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC11",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx READ Credits Empty : MC11 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 11 only.",
"Unit": "CHA"
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty : MC12",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC12",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx READ Credits Empty : MC12 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 12 only.",
"Unit": "CHA"
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty : MC13",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC13",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx READ Credits Empty : MC13 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 13 only.",
"Unit": "CHA"
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty : MC2",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx READ Credits Empty : MC2 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 2 only.",
"UMask": "0x4",
@@ -2870,8 +3488,10 @@
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty : MC3",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx READ Credits Empty : MC3 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 3 only.",
"UMask": "0x8",
@@ -2879,8 +3499,10 @@
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty : MC4",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx READ Credits Empty : MC4 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 4 only.",
"UMask": "0x10",
@@ -2888,8 +3510,10 @@
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty : MC5",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx READ Credits Empty : MC5 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 5 only.",
"UMask": "0x20",
@@ -2897,8 +3521,10 @@
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty : MC6",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx READ Credits Empty : MC6 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 6 only.",
"UMask": "0x40",
@@ -2906,8 +3532,10 @@
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty : MC7",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx READ Credits Empty : MC7 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 7 only.",
"UMask": "0x80",
@@ -2915,24 +3543,30 @@
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty : MC8",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx READ Credits Empty : MC8 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 8 only.",
"Unit": "CHA"
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty : MC9",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx READ Credits Empty : MC9 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 9 only.",
"Unit": "CHA"
},
{
"BriefDescription": "Local INVITOE requests (exclusive ownership of a cache line without receiving data) that miss the SF/LLC and remote INVITOE requests sent to the CHA's home agent",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.INVITOE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the total number of requests coming from a unit on this socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.",
"UMask": "0x30",
@@ -2940,6 +3574,7 @@
},
{
"BriefDescription": "Local INVITOE requests (exclusive ownership of a cache line without receiving data) that miss the SF/LLC and are sent to the CHA's home agent",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.INVITOE_LOCAL",
"PerPkg": "1",
@@ -2949,6 +3584,7 @@
},
{
"BriefDescription": "Remote INVITOE requests (exclusive ownership of a cache line without receiving data) sent to the CHA's home agent",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.INVITOE_REMOTE",
"PerPkg": "1",
@@ -2958,6 +3594,7 @@
},
{
"BriefDescription": "Local read requests that miss the SF/LLC and remote read requests sent to the CHA's home agent",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.READS",
"PerPkg": "1",
@@ -2967,6 +3604,7 @@
},
{
"BriefDescription": "Local read requests that miss the SF/LLC and are sent to the CHA's home agent",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.READS_LOCAL",
"PerPkg": "1",
@@ -2976,6 +3614,7 @@
},
{
"BriefDescription": "Remote read requests sent to the CHA's home agent",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.READS_REMOTE",
"PerPkg": "1",
@@ -2985,6 +3624,7 @@
},
{
"BriefDescription": "Local write requests that miss the SF/LLC and remote write requests sent to the CHA's home agent",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.WRITES",
"PerPkg": "1",
@@ -2994,6 +3634,7 @@
},
{
"BriefDescription": "Local write requests that miss the SF/LLC and are sent to the CHA's home agent",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.WRITES_LOCAL",
"PerPkg": "1",
@@ -3003,6 +3644,7 @@
},
{
"BriefDescription": "Remote write requests sent to the CHA's home agent",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.WRITES_REMOTE",
"PerPkg": "1",
@@ -3012,8 +3654,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring. : AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xAC",
"EventName": "UNC_CHA_RING_BOUNCES_HORZ.AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Horizontal Ring. : AD : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x1",
@@ -3021,8 +3665,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring. : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xAC",
"EventName": "UNC_CHA_RING_BOUNCES_HORZ.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Horizontal Ring. : AK : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x2",
@@ -3030,8 +3676,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring. : BL",
+ "Counter": "0,1,2,3",
"EventCode": "0xAC",
"EventName": "UNC_CHA_RING_BOUNCES_HORZ.BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Horizontal Ring. : BL : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x4",
@@ -3039,8 +3687,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring. : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xAC",
"EventName": "UNC_CHA_RING_BOUNCES_HORZ.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Horizontal Ring. : IV : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x8",
@@ -3048,8 +3698,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring. : AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_CHA_RING_BOUNCES_VERT.AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Vertical Ring. : AD : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x1",
@@ -3057,8 +3709,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring. : Acknowledgements to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_CHA_RING_BOUNCES_VERT.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Vertical Ring. : Acknowledgements to core : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x2",
@@ -3066,8 +3720,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_CHA_RING_BOUNCES_VERT.AKC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Vertical Ring. : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x10",
@@ -3075,8 +3731,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring. : Data Responses to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_CHA_RING_BOUNCES_VERT.BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Vertical Ring. : Data Responses to core : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x4",
@@ -3084,8 +3742,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring. : Snoops of processor's cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_CHA_RING_BOUNCES_VERT.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Vertical Ring. : Snoops of processor's cache. : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x8",
@@ -3093,95 +3753,119 @@
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring : AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xAD",
"EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.AD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xAD",
"EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring : Acknowledgements to Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xAD",
"EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring : BL",
+ "Counter": "0,1,2,3",
"EventCode": "0xAD",
"EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xAD",
"EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.IV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring : AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_CHA_RING_SINK_STARVED_VERT.AD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring : Acknowledgements to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_CHA_RING_SINK_STARVED_VERT.AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_CHA_RING_SINK_STARVED_VERT.AKC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring : Data Responses to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_CHA_RING_SINK_STARVED_VERT.BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring : Snoops of processor's cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_CHA_RING_SINK_STARVED_VERT.IV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Source Throttle",
+ "Counter": "0,1,2,3",
"EventCode": "0xae",
"EventName": "UNC_CHA_RING_SRC_THRTL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Allocations : IPQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_CHA_RxC_INSERTS.IPQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Allocations : IPQ : Counts number of allocations per cycle into the specified Ingress queue.",
"UMask": "0x4",
@@ -3189,8 +3873,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Allocations : IRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_CHA_RxC_INSERTS.IRQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Allocations : IRQ : Counts number of allocations per cycle into the specified Ingress queue.",
"UMask": "0x1",
@@ -3198,8 +3884,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Allocations : IRQ Rejected",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_CHA_RxC_INSERTS.IRQ_REJ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Allocations : IRQ Rejected : Counts number of allocations per cycle into the specified Ingress queue.",
"UMask": "0x2",
@@ -3207,8 +3895,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Allocations : PRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_CHA_RxC_INSERTS.PRQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Allocations : PRQ : Counts number of allocations per cycle into the specified Ingress queue.",
"UMask": "0x10",
@@ -3216,8 +3906,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Allocations : PRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_CHA_RxC_INSERTS.PRQ_REJ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Allocations : PRQ : Counts number of allocations per cycle into the specified Ingress queue.",
"UMask": "0x20",
@@ -3225,8 +3917,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Allocations : RRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_CHA_RxC_INSERTS.RRQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Allocations : RRQ : Counts number of allocations per cycle into the specified Ingress queue.",
"UMask": "0x40",
@@ -3234,8 +3928,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Allocations : WBQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_CHA_RxC_INSERTS.WBQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Allocations : WBQ : Counts number of allocations per cycle into the specified Ingress queue.",
"UMask": "0x80",
@@ -3243,8 +3939,10 @@
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_CHA_RxC_IPQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0 : No AD VN0 credit for generating a request",
"UMask": "0x1",
@@ -3252,8 +3950,10 @@
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_CHA_RxC_IPQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0 : No AD VN0 credit for generating a response",
"UMask": "0x2",
@@ -3261,8 +3961,10 @@
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_CHA_RxC_IPQ0_REJECT.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request : Can't inject AK ring message",
"UMask": "0x40",
@@ -3270,8 +3972,10 @@
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0 : No BL VN0 credit for NCB",
"UMask": "0x10",
@@ -3279,8 +3983,10 @@
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0 : No BL VN0 credit for NCS",
"UMask": "0x20",
@@ -3288,8 +3994,10 @@
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0 : No BL VN0 credit for generating a response",
"UMask": "0x4",
@@ -3297,8 +4005,10 @@
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0 : No BL VN0 credit for generating a writeback",
"UMask": "0x8",
@@ -3306,8 +4016,10 @@
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_CHA_RxC_IPQ0_REJECT.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request : Can't inject IV ring message",
"UMask": "0x80",
@@ -3315,16 +4027,20 @@
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : Allow Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_CHA_RxC_IPQ1_REJECT.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_CHA_RxC_IPQ1_REJECT.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IPQ Requests (from CMS) Rejected - Set 1 : ANY0 : Any condition listed in the IPQ0 Reject counter was true",
"UMask": "0x1",
@@ -3332,16 +4048,20 @@
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_CHA_RxC_IPQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : LLC OR SF Way",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_CHA_RxC_IPQ1_REJECT.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IPQ Requests (from CMS) Rejected - Set 1 : LLC OR SF Way : Way conflict with another request that caused the reject",
"UMask": "0x20",
@@ -3349,16 +4069,20 @@
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : LLC Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_CHA_RxC_IPQ1_REJECT.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : PhyAddr Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_CHA_RxC_IPQ1_REJECT.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IPQ Requests (from CMS) Rejected - Set 1 : PhyAddr Match : Address match with an outstanding request that was rejected.",
"UMask": "0x80",
@@ -3366,8 +4090,10 @@
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : SF Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_CHA_RxC_IPQ1_REJECT.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IPQ Requests (from CMS) Rejected - Set 1 : SF Victim : Requests did not generate Snoop filter victim",
"UMask": "0x8",
@@ -3375,16 +4101,20 @@
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_CHA_RxC_IPQ1_REJECT.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0 : No AD VN0 credit for generating a request",
"UMask": "0x1",
@@ -3392,8 +4122,10 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0 : No AD VN0 credit for generating a response",
"UMask": "0x2",
@@ -3401,8 +4133,10 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request : Can't inject AK ring message",
"UMask": "0x40",
@@ -3410,8 +4144,10 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0 : No BL VN0 credit for NCB",
"UMask": "0x10",
@@ -3419,8 +4155,10 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0 : No BL VN0 credit for NCS",
"UMask": "0x20",
@@ -3428,8 +4166,10 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0 : No BL VN0 credit for generating a response",
"UMask": "0x4",
@@ -3437,8 +4177,10 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0 : No BL VN0 credit for generating a writeback",
"UMask": "0x8",
@@ -3446,8 +4188,10 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request : Can't inject IV ring message",
"UMask": "0x80",
@@ -3455,16 +4199,20 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : Allow Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 1 : ANY0 : Any condition listed in the IRQ0 Reject counter was true",
"UMask": "0x1",
@@ -3472,16 +4220,20 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : LLC or SF Way",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 1 : LLC or SF Way : Way conflict with another request that caused the reject",
"UMask": "0x20",
@@ -3489,24 +4241,30 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : LLC Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; PhyAddr Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : SF Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 1 : SF Victim : Requests did not generate Snoop filter victim",
"UMask": "0x8",
@@ -3514,16 +4272,20 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "ISMQ Rejects - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Rejects - Set 0 : AD REQ on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No AD VN0 credit for generating a request",
"UMask": "0x1",
@@ -3531,8 +4293,10 @@
},
{
"BriefDescription": "ISMQ Rejects - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Rejects - Set 0 : AD RSP on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No AD VN0 credit for generating a response",
"UMask": "0x2",
@@ -3540,8 +4304,10 @@
},
{
"BriefDescription": "ISMQ Rejects - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Rejects - Set 0 : Non UPI AK Request : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Can't inject AK ring message",
"UMask": "0x40",
@@ -3549,8 +4315,10 @@
},
{
"BriefDescription": "ISMQ Rejects - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Rejects - Set 0 : BL NCB on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for NCB",
"UMask": "0x10",
@@ -3558,8 +4326,10 @@
},
{
"BriefDescription": "ISMQ Rejects - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Rejects - Set 0 : BL NCS on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for NCS",
"UMask": "0x20",
@@ -3567,8 +4337,10 @@
},
{
"BriefDescription": "ISMQ Rejects - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Rejects - Set 0 : BL RSP on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for generating a response",
"UMask": "0x4",
@@ -3576,8 +4348,10 @@
},
{
"BriefDescription": "ISMQ Rejects - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Rejects - Set 0 : BL WB on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for generating a writeback",
"UMask": "0x8",
@@ -3585,8 +4359,10 @@
},
{
"BriefDescription": "ISMQ Rejects - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Rejects - Set 0 : Non UPI IV Request : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Can't inject IV ring message",
"UMask": "0x80",
@@ -3594,8 +4370,10 @@
},
{
"BriefDescription": "ISMQ Retries - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Retries - Set 0 : AD REQ on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No AD VN0 credit for generating a request",
"UMask": "0x1",
@@ -3603,8 +4381,10 @@
},
{
"BriefDescription": "ISMQ Retries - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Retries - Set 0 : AD RSP on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No AD VN0 credit for generating a response",
"UMask": "0x2",
@@ -3612,8 +4392,10 @@
},
{
"BriefDescription": "ISMQ Retries - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Retries - Set 0 : Non UPI AK Request : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Can't inject AK ring message",
"UMask": "0x40",
@@ -3621,8 +4403,10 @@
},
{
"BriefDescription": "ISMQ Retries - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Retries - Set 0 : BL NCB on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for NCB",
"UMask": "0x10",
@@ -3630,8 +4414,10 @@
},
{
"BriefDescription": "ISMQ Retries - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Retries - Set 0 : BL NCS on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for NCS",
"UMask": "0x20",
@@ -3639,8 +4425,10 @@
},
{
"BriefDescription": "ISMQ Retries - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Retries - Set 0 : BL RSP on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for generating a response",
"UMask": "0x4",
@@ -3648,8 +4436,10 @@
},
{
"BriefDescription": "ISMQ Retries - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Retries - Set 0 : BL WB on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for generating a writeback",
"UMask": "0x8",
@@ -3657,8 +4447,10 @@
},
{
"BriefDescription": "ISMQ Retries - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Retries - Set 0 : Non UPI IV Request : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Can't inject IV ring message",
"UMask": "0x80",
@@ -3666,8 +4458,10 @@
},
{
"BriefDescription": "ISMQ Rejects - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_CHA_RxC_ISMQ1_REJECT.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Rejects - Set 1 : ANY0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Any condition listed in the ISMQ0 Reject counter was true",
"UMask": "0x1",
@@ -3675,8 +4469,10 @@
},
{
"BriefDescription": "ISMQ Rejects - Set 1 : HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_CHA_RxC_ISMQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Rejects - Set 1 : HA : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x2",
@@ -3684,8 +4480,10 @@
},
{
"BriefDescription": "ISMQ Retries - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2D",
"EventName": "UNC_CHA_RxC_ISMQ1_RETRY.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Retries - Set 1 : ANY0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Any condition listed in the ISMQ0 Reject counter was true",
"UMask": "0x1",
@@ -3693,8 +4491,10 @@
},
{
"BriefDescription": "ISMQ Retries - Set 1 : HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x2D",
"EventName": "UNC_CHA_RxC_ISMQ1_RETRY.HA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Retries - Set 1 : HA : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x2",
@@ -3702,8 +4502,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Occupancy : IPQ",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_CHA_RxC_OCCUPANCY.IPQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Occupancy : IPQ : Counts number of entries in the specified Ingress queue in each cycle.",
"UMask": "0x4",
@@ -3711,8 +4513,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Occupancy : IRQ",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_CHA_RxC_OCCUPANCY.IRQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Occupancy : IRQ : Counts number of entries in the specified Ingress queue in each cycle.",
"UMask": "0x1",
@@ -3720,8 +4524,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Occupancy : RRQ",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_CHA_RxC_OCCUPANCY.RRQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Occupancy : RRQ : Counts number of entries in the specified Ingress queue in each cycle.",
"UMask": "0x40",
@@ -3729,8 +4535,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Occupancy : WBQ",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_CHA_RxC_OCCUPANCY.WBQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Occupancy : WBQ : Counts number of entries in the specified Ingress queue in each cycle.",
"UMask": "0x80",
@@ -3738,8 +4546,10 @@
},
{
"BriefDescription": "Other Retries - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 0 : AD REQ on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No AD VN0 credit for generating a request",
"UMask": "0x1",
@@ -3747,8 +4557,10 @@
},
{
"BriefDescription": "Other Retries - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 0 : AD RSP on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No AD VN0 credit for generating a response",
"UMask": "0x2",
@@ -3756,8 +4568,10 @@
},
{
"BriefDescription": "Other Retries - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 0 : Non UPI AK Request : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Can't inject AK ring message",
"UMask": "0x40",
@@ -3765,8 +4579,10 @@
},
{
"BriefDescription": "Other Retries - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 0 : BL NCB on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No BL VN0 credit for NCB",
"UMask": "0x10",
@@ -3774,8 +4590,10 @@
},
{
"BriefDescription": "Other Retries - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 0 : BL NCS on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No BL VN0 credit for NCS",
"UMask": "0x20",
@@ -3783,8 +4601,10 @@
},
{
"BriefDescription": "Other Retries - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 0 : BL RSP on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No BL VN0 credit for generating a response",
"UMask": "0x4",
@@ -3792,8 +4612,10 @@
},
{
"BriefDescription": "Other Retries - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 0 : BL WB on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No BL VN0 credit for generating a writeback",
"UMask": "0x8",
@@ -3801,8 +4623,10 @@
},
{
"BriefDescription": "Other Retries - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 0 : Non UPI IV Request : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Can't inject IV ring message",
"UMask": "0x80",
@@ -3810,8 +4634,10 @@
},
{
"BriefDescription": "Other Retries - Set 1 : Allow Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 1 : Allow Snoop : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x40",
@@ -3819,8 +4645,10 @@
},
{
"BriefDescription": "Other Retries - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 1 : ANY0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Any condition listed in the Other0 Reject counter was true",
"UMask": "0x1",
@@ -3828,8 +4656,10 @@
},
{
"BriefDescription": "Other Retries - Set 1 : HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.HA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 1 : HA : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x2",
@@ -3837,8 +4667,10 @@
},
{
"BriefDescription": "Other Retries - Set 1 : LLC OR SF Way",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 1 : LLC OR SF Way : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Way conflict with another request that caused the reject",
"UMask": "0x20",
@@ -3846,8 +4678,10 @@
},
{
"BriefDescription": "Other Retries - Set 1 : LLC Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 1 : LLC Victim : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x4",
@@ -3855,8 +4689,10 @@
},
{
"BriefDescription": "Other Retries - Set 1 : PhyAddr Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 1 : PhyAddr Match : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Address match with an outstanding request that was rejected.",
"UMask": "0x80",
@@ -3864,8 +4700,10 @@
},
{
"BriefDescription": "Other Retries - Set 1 : SF Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 1 : SF Victim : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Requests did not generate Snoop filter victim",
"UMask": "0x8",
@@ -3873,8 +4711,10 @@
},
{
"BriefDescription": "Other Retries - Set 1 : Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 1 : Victim : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x10",
@@ -3882,8 +4722,10 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0 : No AD VN0 credit for generating a request",
"UMask": "0x1",
@@ -3891,8 +4733,10 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0 : No AD VN0 credit for generating a response",
"UMask": "0x2",
@@ -3900,8 +4744,10 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request : Can't inject AK ring message",
"UMask": "0x40",
@@ -3909,8 +4755,10 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0 : No BL VN0 credit for NCB",
"UMask": "0x10",
@@ -3918,8 +4766,10 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0 : No BL VN0 credit for NCS",
"UMask": "0x20",
@@ -3927,8 +4777,10 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0 : No BL VN0 credit for generating a response",
"UMask": "0x4",
@@ -3936,8 +4788,10 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0 : No BL VN0 credit for generating a writeback",
"UMask": "0x8",
@@ -3945,8 +4799,10 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request : Can't inject IV ring message",
"UMask": "0x80",
@@ -3954,16 +4810,20 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : Allow Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 1 : ANY0 : Any condition listed in the PRQ0 Reject counter was true",
"UMask": "0x1",
@@ -3971,16 +4831,20 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : LLC OR SF Way",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 1 : LLC OR SF Way : Way conflict with another request that caused the reject",
"UMask": "0x20",
@@ -3988,16 +4852,20 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : LLC Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : PhyAddr Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 1 : PhyAddr Match : Address match with an outstanding request that was rejected.",
"UMask": "0x80",
@@ -4005,8 +4873,10 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : SF Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 1 : SF Victim : Requests did not generate Snoop filter victim",
"UMask": "0x8",
@@ -4014,16 +4884,20 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "Request Queue Retries - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 0 : AD REQ on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No AD VN0 credit for generating a request",
"UMask": "0x1",
@@ -4031,8 +4905,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 0 : AD RSP on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No AD VN0 credit for generating a response",
"UMask": "0x2",
@@ -4040,8 +4916,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 0 : Non UPI AK Request : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Can't inject AK ring message",
"UMask": "0x40",
@@ -4049,8 +4927,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 0 : BL NCB on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No BL VN0 credit for NCB",
"UMask": "0x10",
@@ -4058,8 +4938,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 0 : BL NCS on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No BL VN0 credit for NCS",
"UMask": "0x20",
@@ -4067,8 +4949,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 0 : BL RSP on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No BL VN0 credit for generating a response",
"UMask": "0x4",
@@ -4076,8 +4960,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 0 : BL WB on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No BL VN0 credit for generating a writeback",
"UMask": "0x8",
@@ -4085,8 +4971,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 0 : Non UPI IV Request : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Can't inject IV ring message",
"UMask": "0x80",
@@ -4094,8 +4982,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 1 : Allow Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 1 : Allow Snoop : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x40",
@@ -4103,8 +4993,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 1 : ANY0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Any condition listed in the WBQ0 Reject counter was true",
"UMask": "0x1",
@@ -4112,8 +5004,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 1 : HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.HA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 1 : HA : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x2",
@@ -4121,8 +5015,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 1 : LLC OR SF Way",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 1 : LLC OR SF Way : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Way conflict with another request that caused the reject",
"UMask": "0x20",
@@ -4130,8 +5026,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 1 : LLC Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 1 : LLC Victim : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x4",
@@ -4139,8 +5037,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 1 : PhyAddr Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 1 : PhyAddr Match : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Address match with an outstanding request that was rejected.",
"UMask": "0x80",
@@ -4148,8 +5048,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 1 : SF Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 1 : SF Victim : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Requests did not generate Snoop filter victim",
"UMask": "0x8",
@@ -4157,8 +5059,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 1 : Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 1 : Victim : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x10",
@@ -4166,8 +5070,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_CHA_RxC_RRQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 0 : AD REQ on VN0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : No AD VN0 credit for generating a request",
"UMask": "0x1",
@@ -4175,8 +5081,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_CHA_RxC_RRQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 0 : AD RSP on VN0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : No AD VN0 credit for generating a response",
"UMask": "0x2",
@@ -4184,8 +5092,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_CHA_RxC_RRQ0_REJECT.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 0 : Non UPI AK Request : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : Can't inject AK ring message",
"UMask": "0x40",
@@ -4193,8 +5103,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 0 : BL NCB on VN0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : No BL VN0 credit for NCB",
"UMask": "0x10",
@@ -4202,8 +5114,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 0 : BL NCS on VN0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : No BL VN0 credit for NCS",
"UMask": "0x20",
@@ -4211,8 +5125,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 0 : BL RSP on VN0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : No BL VN0 credit for generating a response",
"UMask": "0x4",
@@ -4220,8 +5136,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 0 : BL WB on VN0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : No BL VN0 credit for generating a writeback",
"UMask": "0x8",
@@ -4229,8 +5147,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_CHA_RxC_RRQ0_REJECT.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 0 : Non UPI IV Request : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : Can't inject IV ring message",
"UMask": "0x80",
@@ -4238,8 +5158,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 1 : Allow Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_CHA_RxC_RRQ1_REJECT.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 1 : Allow Snoop : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x40",
@@ -4247,8 +5169,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_CHA_RxC_RRQ1_REJECT.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 1 : ANY0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : Any condition listed in the RRQ0 Reject counter was true",
"UMask": "0x1",
@@ -4256,8 +5180,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 1 : HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_CHA_RxC_RRQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 1 : HA : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x2",
@@ -4265,8 +5191,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 1 : LLC OR SF Way",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_CHA_RxC_RRQ1_REJECT.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 1 : LLC OR SF Way : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : Way conflict with another request that caused the reject",
"UMask": "0x20",
@@ -4274,8 +5202,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 1 : LLC Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_CHA_RxC_RRQ1_REJECT.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 1 : LLC Victim : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x4",
@@ -4283,8 +5213,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 1 : PhyAddr Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_CHA_RxC_RRQ1_REJECT.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 1 : PhyAddr Match : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : Address match with an outstanding request that was rejected.",
"UMask": "0x80",
@@ -4292,8 +5224,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 1 : SF Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_CHA_RxC_RRQ1_REJECT.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 1 : SF Victim : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : Requests did not generate Snoop filter victim",
"UMask": "0x8",
@@ -4301,8 +5235,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 1 : Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_CHA_RxC_RRQ1_REJECT.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 1 : Victim : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x10",
@@ -4310,8 +5246,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_CHA_RxC_WBQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 0 : AD REQ on VN0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : No AD VN0 credit for generating a request",
"UMask": "0x1",
@@ -4319,8 +5257,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_CHA_RxC_WBQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 0 : AD RSP on VN0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : No AD VN0 credit for generating a response",
"UMask": "0x2",
@@ -4328,8 +5268,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_CHA_RxC_WBQ0_REJECT.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 0 : Non UPI AK Request : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : Can't inject AK ring message",
"UMask": "0x40",
@@ -4337,8 +5279,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 0 : BL NCB on VN0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : No BL VN0 credit for NCB",
"UMask": "0x10",
@@ -4346,8 +5290,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 0 : BL NCS on VN0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : No BL VN0 credit for NCS",
"UMask": "0x20",
@@ -4355,8 +5301,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 0 : BL RSP on VN0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : No BL VN0 credit for generating a response",
"UMask": "0x4",
@@ -4364,8 +5312,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 0 : BL WB on VN0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : No BL VN0 credit for generating a writeback",
"UMask": "0x8",
@@ -4373,8 +5323,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_CHA_RxC_WBQ0_REJECT.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 0 : Non UPI IV Request : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : Can't inject IV ring message",
"UMask": "0x80",
@@ -4382,8 +5334,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 1 : Allow Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_CHA_RxC_WBQ1_REJECT.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 1 : Allow Snoop : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x40",
@@ -4391,8 +5345,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_CHA_RxC_WBQ1_REJECT.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 1 : ANY0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : Any condition listed in the WBQ0 Reject counter was true",
"UMask": "0x1",
@@ -4400,8 +5356,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 1 : HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_CHA_RxC_WBQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 1 : HA : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x2",
@@ -4409,8 +5367,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 1 : LLC OR SF Way",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_CHA_RxC_WBQ1_REJECT.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 1 : LLC OR SF Way : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : Way conflict with another request that caused the reject",
"UMask": "0x20",
@@ -4418,8 +5378,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 1 : LLC Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_CHA_RxC_WBQ1_REJECT.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 1 : LLC Victim : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x4",
@@ -4427,8 +5389,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 1 : PhyAddr Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_CHA_RxC_WBQ1_REJECT.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 1 : PhyAddr Match : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : Address match with an outstanding request that was rejected.",
"UMask": "0x80",
@@ -4436,8 +5400,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 1 : SF Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_CHA_RxC_WBQ1_REJECT.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 1 : SF Victim : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : Requests did not generate Snoop filter victim",
"UMask": "0x8",
@@ -4445,8 +5411,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 1 : Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_CHA_RxC_WBQ1_REJECT.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 1 : Victim : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x10",
@@ -4454,8 +5422,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE5",
"EventName": "UNC_CHA_RxR_BUSY_STARVED.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority : All == Credited + Uncredited",
"UMask": "0x11",
@@ -4463,8 +5433,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE5",
"EventName": "UNC_CHA_RxR_BUSY_STARVED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x10",
@@ -4472,8 +5444,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE5",
"EventName": "UNC_CHA_RxR_BUSY_STARVED.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x1",
@@ -4481,8 +5455,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE5",
"EventName": "UNC_CHA_RxR_BUSY_STARVED.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority : All == Credited + Uncredited",
"UMask": "0x44",
@@ -4490,8 +5466,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE5",
"EventName": "UNC_CHA_RxR_BUSY_STARVED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x40",
@@ -4499,8 +5477,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE5",
"EventName": "UNC_CHA_RxR_BUSY_STARVED.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x4",
@@ -4508,8 +5488,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_CHA_RxR_BYPASS.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : AD - All : Number of packets bypassing the CMS Ingress : All == Credited + Uncredited",
"UMask": "0x11",
@@ -4517,8 +5499,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_CHA_RxR_BYPASS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : AD - Credited : Number of packets bypassing the CMS Ingress",
"UMask": "0x10",
@@ -4526,8 +5510,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_CHA_RxR_BYPASS.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : AD - Uncredited : Number of packets bypassing the CMS Ingress",
"UMask": "0x1",
@@ -4535,8 +5521,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_CHA_RxR_BYPASS.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : AK : Number of packets bypassing the CMS Ingress",
"UMask": "0x2",
@@ -4544,8 +5532,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_CHA_RxR_BYPASS.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : AKC - Uncredited : Number of packets bypassing the CMS Ingress",
"UMask": "0x80",
@@ -4553,8 +5543,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_CHA_RxR_BYPASS.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : BL - All : Number of packets bypassing the CMS Ingress : All == Credited + Uncredited",
"UMask": "0x44",
@@ -4562,8 +5554,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_CHA_RxR_BYPASS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : BL - Credited : Number of packets bypassing the CMS Ingress",
"UMask": "0x40",
@@ -4571,8 +5565,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_CHA_RxR_BYPASS.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : BL - Uncredited : Number of packets bypassing the CMS Ingress",
"UMask": "0x4",
@@ -4580,8 +5576,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_CHA_RxR_BYPASS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : IV : Number of packets bypassing the CMS Ingress",
"UMask": "0x8",
@@ -4589,8 +5587,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_CHA_RxR_CRD_STARVED.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -4598,8 +5598,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_CHA_RxR_CRD_STARVED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x10",
@@ -4607,8 +5609,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_CHA_RxR_CRD_STARVED.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x1",
@@ -4616,8 +5620,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_CHA_RxR_CRD_STARVED.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AK : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x2",
@@ -4625,8 +5631,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_CHA_RxR_CRD_STARVED.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -4634,8 +5642,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_CHA_RxR_CRD_STARVED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x40",
@@ -4643,8 +5653,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_CHA_RxR_CRD_STARVED.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x4",
@@ -4652,8 +5664,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : IFV - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_CHA_RxR_CRD_STARVED.IFV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : IFV - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x80",
@@ -4661,8 +5675,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_CHA_RxR_CRD_STARVED.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : IV : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x8",
@@ -4670,16 +5686,20 @@
},
{
"BriefDescription": "Transgress Injection Starvation",
+ "Counter": "0,1,2,3",
"EventCode": "0xe4",
"EventName": "UNC_CHA_RxR_CRD_STARVED_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"Unit": "CHA"
},
{
"BriefDescription": "Transgress Ingress Allocations : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_CHA_RxR_INSERTS.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : AD - All : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
"UMask": "0x11",
@@ -4687,8 +5707,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_CHA_RxR_INSERTS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : AD - Credited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x10",
@@ -4696,8 +5718,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_CHA_RxR_INSERTS.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : AD - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x1",
@@ -4705,8 +5729,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_CHA_RxR_INSERTS.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : AK : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x2",
@@ -4714,8 +5740,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_CHA_RxR_INSERTS.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : AKC - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x80",
@@ -4723,8 +5751,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_CHA_RxR_INSERTS.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : BL - All : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
"UMask": "0x44",
@@ -4732,8 +5762,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_CHA_RxR_INSERTS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : BL - Credited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x40",
@@ -4741,8 +5773,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_CHA_RxR_INSERTS.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : BL - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x4",
@@ -4750,8 +5784,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_CHA_RxR_INSERTS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : IV : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x8",
@@ -4759,8 +5795,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_CHA_RxR_OCCUPANCY.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : AD - All : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
"UMask": "0x11",
@@ -4768,8 +5806,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_CHA_RxR_OCCUPANCY.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : AD - Credited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x10",
@@ -4777,8 +5817,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_CHA_RxR_OCCUPANCY.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : AD - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x1",
@@ -4786,8 +5828,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_CHA_RxR_OCCUPANCY.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : AK : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x2",
@@ -4795,8 +5839,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_CHA_RxR_OCCUPANCY.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : AKC - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x80",
@@ -4804,8 +5850,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_CHA_RxR_OCCUPANCY.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : BL - All : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
"UMask": "0x44",
@@ -4813,8 +5861,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_CHA_RxR_OCCUPANCY.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : BL - Credited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x20",
@@ -4822,8 +5872,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_CHA_RxR_OCCUPANCY.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : BL - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x4",
@@ -4831,8 +5883,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_CHA_RxR_OCCUPANCY.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : IV : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x8",
@@ -4840,6 +5894,7 @@
},
{
"BriefDescription": "Snoop filter capacity evictions for E-state entries.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3D",
"EventName": "UNC_CHA_SF_EVICTION.E_STATE",
"PerPkg": "1",
@@ -4849,6 +5904,7 @@
},
{
"BriefDescription": "Snoop filter capacity evictions for M-state entries.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3D",
"EventName": "UNC_CHA_SF_EVICTION.M_STATE",
"PerPkg": "1",
@@ -4858,6 +5914,7 @@
},
{
"BriefDescription": "Snoop filter capacity evictions for S-state entries.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3D",
"EventName": "UNC_CHA_SF_EVICTION.S_STATE",
"PerPkg": "1",
@@ -4867,8 +5924,10 @@
},
{
"BriefDescription": "Snoops Sent : All",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_CHA_SNOOPS_SENT.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoops Sent : All : Counts the number of snoops issued by the HA.",
"UMask": "0x1",
@@ -4876,8 +5935,10 @@
},
{
"BriefDescription": "Snoops Sent : Broadcast snoops for Local Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_CHA_SNOOPS_SENT.BCST_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoops Sent : Broadcast snoops for Local Requests : Counts the number of snoops issued by the HA. : Counts the number of broadcast snoops issued by the HA responding to local requests",
"UMask": "0x10",
@@ -4885,8 +5946,10 @@
},
{
"BriefDescription": "Snoops Sent : Broadcast snoops for Remote Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_CHA_SNOOPS_SENT.BCST_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoops Sent : Broadcast snoops for Remote Requests : Counts the number of snoops issued by the HA. : Counts the number of broadcast snoops issued by the HA responding to remote requests",
"UMask": "0x20",
@@ -4894,8 +5957,10 @@
},
{
"BriefDescription": "Snoops Sent : Directed snoops for Local Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_CHA_SNOOPS_SENT.DIRECT_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoops Sent : Directed snoops for Local Requests : Counts the number of snoops issued by the HA. : Counts the number of directed snoops issued by the HA responding to local requests",
"UMask": "0x40",
@@ -4903,8 +5968,10 @@
},
{
"BriefDescription": "Snoops Sent : Directed snoops for Remote Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_CHA_SNOOPS_SENT.DIRECT_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoops Sent : Directed snoops for Remote Requests : Counts the number of snoops issued by the HA. : Counts the number of directed snoops issued by the HA responding to remote requests",
"UMask": "0x80",
@@ -4912,8 +5979,10 @@
},
{
"BriefDescription": "Snoops Sent : Snoops sent for Local Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_CHA_SNOOPS_SENT.LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoops Sent : Snoops sent for Local Requests : Counts the number of snoops issued by the HA. : Counts the number of broadcast or directed snoops issued by the HA responding to local requests",
"UMask": "0x4",
@@ -4921,8 +5990,10 @@
},
{
"BriefDescription": "Snoops Sent : Snoops sent for Remote Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_CHA_SNOOPS_SENT.REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoops Sent : Snoops sent for Remote Requests : Counts the number of snoops issued by the HA. : Counts the number of broadcast or directed snoops issued by the HA responding to remote requests",
"UMask": "0x8",
@@ -4930,8 +6001,10 @@
},
{
"BriefDescription": "Snoop Responses Received : RSPCNFLCT*",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "UNC_CHA_SNOOP_RESP.RSPCNFLCT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received : RSPCNFLCT* : Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1. : Filters for snoops responses of RspConflict. This is returned when a snoop finds an existing outstanding transaction in a remote caching agent when it CAMs that caching agent. This triggers conflict resolution hardware. This covers both RspCnflct and RspCnflctWbI.",
"UMask": "0x40",
@@ -4939,8 +6012,10 @@
},
{
"BriefDescription": "Snoop Responses Received : RspFwd",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "UNC_CHA_SNOOP_RESP.RSPFWD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received : RspFwd : Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1. : Filters for a snoop response of RspFwd to a CA request. This snoop response is only possible for RdCur when a snoop HITM/E in a remote caching agent and it directly forwards data to a requestor without changing the requestor's cache line state.",
"UMask": "0x80",
@@ -4948,8 +6023,10 @@
},
{
"BriefDescription": "Snoop Responses Received : Rsp*Fwd*WB",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "UNC_CHA_SNOOP_RESP.RSPFWDWB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received : Rsp*Fwd*WB : Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1. : Filters for a snoop response of Rsp*Fwd*WB. This snoop response is only used in 4s systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to the home to be written back to memory.",
"UMask": "0x20",
@@ -4957,8 +6034,10 @@
},
{
"BriefDescription": "Snoop Responses Received : RspI",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "UNC_CHA_SNOOP_RESP.RSPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts when a transaction with the opcode type RspI Snoop Response was received which indicates the remote cache does not have the data, or when the remote cache silently evicts data (such as when an RFO: the Read for Ownership issued before a write hits non-modified data).",
"UMask": "0x1",
@@ -4966,8 +6045,10 @@
},
{
"BriefDescription": "Snoop Responses Received : RspIFwd",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "UNC_CHA_SNOOP_RESP.RSPIFWD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts when a a transaction with the opcode type RspIFwd Snoop Response was received which indicates a remote caching agent forwarded the data and the requesting agent is able to acquire the data in E (Exclusive) or M (modified) states. This is commonly returned with RFO (the Read for Ownership issued before a write) transactions. The snoop could have either been to a cacheline in the M,E,F (Modified, Exclusive or Forward) states.",
"UMask": "0x4",
@@ -4975,8 +6056,10 @@
},
{
"BriefDescription": "Snoop Responses Received : RspS",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "UNC_CHA_SNOOP_RESP.RSPS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts when a transaction with the opcode type RspS Snoop Response was received which indicates when a remote cache has data but is not forwarding it. It is a way to let the requesting socket know that it cannot allocate the data in E state. No data is sent with S RspS.",
"UMask": "0x2",
@@ -4984,8 +6067,10 @@
},
{
"BriefDescription": "Snoop Responses Received : RspSFwd",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "UNC_CHA_SNOOP_RESP.RSPSFWD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts when a a transaction with the opcode type RspSFwd Snoop Response was received which indicates a remote caching agent forwarded the data but held on to its current copy. This is common for data and code reads that hit in a remote socket in E (Exclusive) or F (Forward) state.",
"UMask": "0x8",
@@ -4993,8 +6078,10 @@
},
{
"BriefDescription": "Snoop Responses Received : Rsp*WB",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "UNC_CHA_SNOOP_RESP.RSPWB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received : Rsp*WB : Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1. : Filters for a snoop response of RspIWB or RspSWB. This is returned when a non-RFO request hits in M state. Data and Code Reads can return either RspIWB or RspSWB depending on how the system has been configured. InvItoE transactions will also return RspIWB because they must acquire ownership.",
"UMask": "0x10",
@@ -5002,8 +6089,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local : RspCnflct",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPCNFLCT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received Local : RspCnflct : Number of snoop responses received for a Local request : Filters for snoops responses of RspConflict to local CA requests. This is returned when a snoop finds an existing outstanding transaction in a remote caching agent when it CAMs that caching agent. This triggers conflict resolution hardware. This covers both RspCnflct and RspCnflctWbI.",
"UMask": "0x40",
@@ -5011,8 +6100,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local : RspFwd",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPFWD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received Local : RspFwd : Number of snoop responses received for a Local request : Filters for a snoop response of RspFwd to local CA requests. This snoop response is only possible for RdCur when a snoop HITM/E in a remote caching agent and it directly forwards data to a requestor without changing the requestor's cache line state.",
"UMask": "0x80",
@@ -5020,8 +6111,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local : Rsp*FWD*WB",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPFWDWB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received Local : Rsp*FWD*WB : Number of snoop responses received for a Local request : Filters for a snoop response of Rsp*Fwd*WB to local CA requests. This snoop response is only used in 4s systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to the home to be written back to memory.",
"UMask": "0x20",
@@ -5029,8 +6122,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local : RspI",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received Local : RspI : Number of snoop responses received for a Local request : Filters for snoops responses of RspI to local CA requests. RspI is returned when the remote cache does not have the data, or when the remote cache silently evicts data (such as when an RFO hits non-modified data).",
"UMask": "0x1",
@@ -5038,8 +6133,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local : RspIFwd",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPIFWD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received Local : RspIFwd : Number of snoop responses received for a Local request : Filters for snoop responses of RspIFwd to local CA requests. This is returned when a remote caching agent forwards data and the requesting agent is able to acquire the data in E or M states. This is commonly returned with RFO transactions. It can be either a HitM or a HitFE.",
"UMask": "0x4",
@@ -5047,8 +6144,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local : RspS",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received Local : RspS : Number of snoop responses received for a Local request : Filters for snoop responses of RspS to local CA requests. RspS is returned when a remote cache has data but is not forwarding it. It is a way to let the requesting socket know that it cannot allocate the data in E state. No data is sent with S RspS.",
"UMask": "0x2",
@@ -5056,8 +6155,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local : RspSFwd",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPSFWD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received Local : RspSFwd : Number of snoop responses received for a Local request : Filters for a snoop response of RspSFwd to local CA requests. This is returned when a remote caching agent forwards data but holds on to its currently copy. This is common for data and code reads that hit in a remote socket in E or F state.",
"UMask": "0x8",
@@ -5065,8 +6166,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local : Rsp*WB",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPWB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received Local : Rsp*WB : Number of snoop responses received for a Local request : Filters for a snoop response of RspIWB or RspSWB to local CA requests. This is returned when a non-RFO request hits in M state. Data and Code Reads can return either RspIWB or RspSWB depending on how the system has been configured. InvItoE transactions will also return RspIWB because they must acquire ownership.",
"UMask": "0x10",
@@ -5074,56 +6177,70 @@
},
{
"BriefDescription": "Misc Snoop Responses Received : MtoI RspIDataM",
+ "Counter": "0,1,2,3",
"EventCode": "0x6B",
"EventName": "UNC_CHA_SNOOP_RSP_MISC.MTOI_RSPDATAM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Misc Snoop Responses Received : MtoI RspIFwdM",
+ "Counter": "0,1,2,3",
"EventCode": "0x6B",
"EventName": "UNC_CHA_SNOOP_RSP_MISC.MTOI_RSPIFWDM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Misc Snoop Responses Received : Pull Data Partial - Hit LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x6B",
"EventName": "UNC_CHA_SNOOP_RSP_MISC.PULLDATAPTL_HITLLC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "Misc Snoop Responses Received : Pull Data Partial - Hit SF",
+ "Counter": "0,1,2,3",
"EventCode": "0x6B",
"EventName": "UNC_CHA_SNOOP_RSP_MISC.PULLDATAPTL_HITSF",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "Misc Snoop Responses Received : RspIFwdPtl Hit LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x6B",
"EventName": "UNC_CHA_SNOOP_RSP_MISC.RSPIFWDMPTL_HITLLC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Misc Snoop Responses Received : RspIFwdPtl Hit SF",
+ "Counter": "0,1,2,3",
"EventCode": "0x6B",
"EventName": "UNC_CHA_SNOOP_RSP_MISC.RSPIFWDMPTL_HITSF",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 0 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -5131,8 +6248,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 1 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -5140,8 +6259,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 2 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -5149,8 +6270,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 3 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -5158,8 +6281,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 4 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -5167,8 +6292,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 5 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -5176,8 +6303,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 6 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x40",
@@ -5185,8 +6314,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 7 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x80",
@@ -5194,8 +6325,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 0 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -5203,8 +6336,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 1 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -5212,8 +6347,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 2 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -5221,8 +6358,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 3 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -5230,8 +6369,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 4 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -5239,8 +6380,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 5 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -5248,8 +6391,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 6 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x40",
@@ -5257,8 +6402,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 7 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x80",
@@ -5266,8 +6413,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 0 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -5275,8 +6424,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 1 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -5284,8 +6435,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 2 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -5293,8 +6446,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 3 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -5302,8 +6457,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 4 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -5311,8 +6468,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 5 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -5320,8 +6479,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 6 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x40",
@@ -5329,8 +6490,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 7 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x80",
@@ -5338,8 +6501,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 0 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -5347,8 +6512,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 1 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -5356,8 +6523,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 2 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -5365,8 +6534,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 3 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -5374,8 +6545,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 4 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -5383,8 +6556,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 5 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -5392,8 +6567,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 6 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x40",
@@ -5401,8 +6578,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 7 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x80",
@@ -5410,8 +6589,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 10 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -5419,8 +6600,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 8 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -5428,8 +6611,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 9 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -5437,8 +6622,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xD3",
"EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 10 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -5446,8 +6633,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xD3",
"EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 8 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -5455,8 +6644,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xD3",
"EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 9 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -5464,8 +6655,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xD5",
"EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 10 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -5473,8 +6666,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xD5",
"EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 8 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -5482,8 +6677,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xD5",
"EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 9 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -5491,8 +6688,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xD7",
"EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 10 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -5500,8 +6699,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xD7",
"EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 8 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -5509,8 +6710,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xD7",
"EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 9 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -5518,8 +6721,10 @@
},
{
"BriefDescription": "TOR Inserts : All",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : All : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc001ffff",
@@ -5527,24 +6732,30 @@
},
{
"BriefDescription": "TOR Inserts : DDR4 Access",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : DDR4 Access : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.DDR",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.DDR4",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : SF/LLC Evictions",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.EVICT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : SF/LLC Evictions : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : TOR allocation occurred as a result of SF/LLC evictions (came from the ISMQ)",
"UMask": "0x2",
@@ -5552,14 +6763,17 @@
},
{
"BriefDescription": "TOR Inserts : Just Hits",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.HIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : Just Hits : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : All requests from iA Cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA",
"PerPkg": "1",
@@ -5569,6 +6783,7 @@
},
{
"BriefDescription": "TOR Inserts : CLFlushes issued by iA Cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_CLFLUSH",
"PerPkg": "1",
@@ -5578,8 +6793,10 @@
},
{
"BriefDescription": "TOR Inserts : CLFlushOpts issued by iA Cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_CLFLUSHOPT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : CLFlushOpts issued by iA Cores : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8d7ff01",
@@ -5587,6 +6804,7 @@
},
{
"BriefDescription": "TOR Inserts : CRDs issued by iA Cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_CRD",
"PerPkg": "1",
@@ -5596,8 +6814,10 @@
},
{
"BriefDescription": "TOR Inserts; CRd Pref from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_CRD_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts; Code read prefetch from local IA that misses in the snoop filter",
"UMask": "0xc88fff01",
@@ -5605,8 +6825,10 @@
},
{
"BriefDescription": "TOR Inserts : DRds issued by iA Cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_DRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : DRds issued by iA Cores : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc817ff01",
@@ -5614,8 +6836,10 @@
},
{
"BriefDescription": "TOR Inserts : DRd PTEs issued by iA Cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_DRDPTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : DRd PTEs issued by iA Cores due to a page walk : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc837ff01",
@@ -5623,8 +6847,10 @@
},
{
"BriefDescription": "TOR Inserts : DRd_Opts issued by iA Cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_OPT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : DRd_Opts issued by iA Cores : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc827ff01",
@@ -5632,8 +6858,10 @@
},
{
"BriefDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_OPT_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8a7ff01",
@@ -5641,6 +6869,7 @@
},
{
"BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_PREF",
"PerPkg": "1",
@@ -5650,6 +6879,7 @@
},
{
"BriefDescription": "TOR Inserts : All requests from iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT",
"PerPkg": "1",
@@ -5659,6 +6889,7 @@
},
{
"BriefDescription": "TOR Inserts : CRds issued by iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD",
"PerPkg": "1",
@@ -5668,6 +6899,7 @@
},
{
"BriefDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD_PREF",
"PerPkg": "1",
@@ -5677,6 +6909,7 @@
},
{
"BriefDescription": "TOR Inserts : DRds issued by iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD",
"PerPkg": "1",
@@ -5686,8 +6919,10 @@
},
{
"BriefDescription": "TOR Inserts : DRd PTEs issued by iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRDPTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : DRd PTEs issued by iA Cores due to page walks that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc837fd01",
@@ -5695,8 +6930,10 @@
},
{
"BriefDescription": "TOR Inserts : DRd_Opts issued by iA Cores that hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_OPT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : DRd_Opts issued by iA Cores that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc827fd01",
@@ -5704,8 +6941,10 @@
},
{
"BriefDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores that hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_OPT_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8a7fd01",
@@ -5713,6 +6952,7 @@
},
{
"BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_PREF",
"PerPkg": "1",
@@ -5722,8 +6962,10 @@
},
{
"BriefDescription": "TOR Inserts : ItoMs issued by iA Cores that Hit LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_ITOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : ItoMs issued by iA Cores that Hit LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc47fd01",
@@ -5731,8 +6973,10 @@
},
{
"BriefDescription": "TOR Inserts : LLCPrefCode issued by iA Cores that hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFCODE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : LLCPrefCode issued by iA Cores that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcccffd01",
@@ -5740,17 +6984,21 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFCODE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFCRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xcccffd01",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : LLCPrefData issued by iA Cores that hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFDATA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : LLCPrefData issued by iA Cores that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xccd7fd01",
@@ -5758,15 +7006,18 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFDATA",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFDRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xccd7fd01",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFRFO",
"PerPkg": "1",
@@ -5776,6 +7027,7 @@
},
{
"BriefDescription": "TOR Inserts : RFOs issued by iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO",
"PerPkg": "1",
@@ -5785,6 +7037,7 @@
},
{
"BriefDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO_PREF",
"PerPkg": "1",
@@ -5794,8 +7047,10 @@
},
{
"BriefDescription": "TOR Inserts : SpecItoMs issued by iA Cores that hit in the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_SPECITOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : SpecItoMs issued by iA Cores that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc57fd01",
@@ -5803,8 +7058,10 @@
},
{
"BriefDescription": "TOR Inserts : ItoMs issued by iA Cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_ITOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : ItoMs issued by iA Cores : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc47ff01",
@@ -5812,8 +7069,10 @@
},
{
"BriefDescription": "TOR Inserts : ItoMCacheNears issued by iA Cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_ITOMCACHENEAR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : ItoMCacheNears issued by iA Cores : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcd47ff01",
@@ -5821,8 +7080,10 @@
},
{
"BriefDescription": "TOR Inserts : LLCPrefCode issued by iA Cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFCODE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : LLCPrefCode issued by iA Cores : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcccfff01",
@@ -5830,6 +7091,7 @@
},
{
"BriefDescription": "TOR Inserts : LLCPrefData issued by iA Cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFDATA",
"PerPkg": "1",
@@ -5839,6 +7101,7 @@
},
{
"BriefDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFRFO",
"PerPkg": "1",
@@ -5848,6 +7111,7 @@
},
{
"BriefDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
"PerPkg": "1",
@@ -5857,6 +7121,7 @@
},
{
"BriefDescription": "TOR Inserts : CRds issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD",
"PerPkg": "1",
@@ -5866,8 +7131,10 @@
},
{
"BriefDescription": "TOR Inserts : CRd issued by iA Cores that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : CRd issued by iA Cores that Missed the LLC - HOMed locally : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc80efe01",
@@ -5875,6 +7142,7 @@
},
{
"BriefDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF",
"PerPkg": "1",
@@ -5884,8 +7152,10 @@
},
{
"BriefDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed locally : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc88efe01",
@@ -5893,8 +7163,10 @@
},
{
"BriefDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed remotely : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc88f7e01",
@@ -5902,8 +7174,10 @@
},
{
"BriefDescription": "TOR Inserts : CRd issued by iA Cores that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : CRd issued by iA Cores that Missed the LLC - HOMed remotely : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc80f7e01",
@@ -5911,6 +7185,7 @@
},
{
"BriefDescription": "TOR Inserts : DRds issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD",
"PerPkg": "1",
@@ -5920,8 +7195,10 @@
},
{
"BriefDescription": "TOR Inserts : DRd PTEs issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRDPTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : DRd PTEs issued by iA Cores due to a page walk that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc837fe01",
@@ -5929,6 +7206,7 @@
},
{
"BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting DDR Mem that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_DDR",
"PerPkg": "1",
@@ -5938,6 +7216,7 @@
},
{
"BriefDescription": "TOR Inserts : DRds issued by iA Cores that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL",
"PerPkg": "1",
@@ -5947,6 +7226,7 @@
},
{
"BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL_DDR",
"PerPkg": "1",
@@ -5956,6 +7236,7 @@
},
{
"BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL_PMM",
"PerPkg": "1",
@@ -5965,8 +7246,10 @@
},
{
"BriefDescription": "TOR Inserts : DRd_Opt issued by iA Cores that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : DRd_Opt issued by iA Cores that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc827fe01",
@@ -5974,8 +7257,10 @@
},
{
"BriefDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8a7fe01",
@@ -5983,6 +7268,7 @@
},
{
"BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting PMM Mem that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PMM",
"PerPkg": "1",
@@ -5992,6 +7278,7 @@
},
{
"BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF",
"PerPkg": "1",
@@ -6001,8 +7288,10 @@
},
{
"BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8978601",
@@ -6010,6 +7299,7 @@
},
{
"BriefDescription": "TOR Inserts; DRd Pref misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL",
"PerPkg": "1",
@@ -6019,8 +7309,10 @@
},
{
"BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8968601",
@@ -6028,8 +7320,10 @@
},
{
"BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8968a01",
@@ -6037,8 +7331,10 @@
},
{
"BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8978a01",
@@ -6046,6 +7342,7 @@
},
{
"BriefDescription": "TOR Inserts; DRd Pref misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE",
"PerPkg": "1",
@@ -6055,8 +7352,10 @@
},
{
"BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8970601",
@@ -6064,8 +7363,10 @@
},
{
"BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8970a01",
@@ -6073,6 +7374,7 @@
},
{
"BriefDescription": "TOR Inserts : DRds issued by iA Cores that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE",
"PerPkg": "1",
@@ -6082,6 +7384,7 @@
},
{
"BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE_DDR",
"PerPkg": "1",
@@ -6091,6 +7394,7 @@
},
{
"BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE_PMM",
"PerPkg": "1",
@@ -6100,6 +7404,7 @@
},
{
"BriefDescription": "TOR Inserts; WCiLF misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_FULL_STREAMING_WR",
"PerPkg": "1",
@@ -6109,8 +7414,10 @@
},
{
"BriefDescription": "TOR Inserts; WCiLF misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_FULL_STREAMING_WR_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts; Data read from local IA that misses in the snoop filter",
"UMask": "0xc8678601",
@@ -6118,17 +7425,21 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA_MISS_WCILF_DDR",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_FULL_STREAMING_WR_DRAM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc8678601",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts; WCiLF misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_FULL_STREAMING_WR_LOCAL_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts; Data read from local IA that misses in the snoop filter",
"UMask": "0xc8668601",
@@ -6136,17 +7447,21 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCILF_DDR",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_FULL_STREAMING_WR_LOCAL_DRAM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc8668601",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts; WCiLF misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_FULL_STREAMING_WR_LOCAL_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts; Data read from local IA that misses in the snoop filter",
"UMask": "0xc8668a01",
@@ -6154,8 +7469,10 @@
},
{
"BriefDescription": "TOR Inserts; WCiLF misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_FULL_STREAMING_WR_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts; Data read from local IA that misses in the snoop filter",
"UMask": "0xc8678a01",
@@ -6163,8 +7480,10 @@
},
{
"BriefDescription": "TOR Inserts; WCiLF misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_FULL_STREAMING_WR_REMOTE_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts; Data read from local IA that misses in the snoop filter",
"UMask": "0xc8670601",
@@ -6172,17 +7491,21 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCILF_DDR",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_FULL_STREAMING_WR_REMOTE_DRAM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc8670601",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts; WCiLF misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_FULL_STREAMING_WR_REMOTE_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts; Data read from local IA that misses in the snoop filter",
"UMask": "0xc8670a01",
@@ -6190,8 +7513,10 @@
},
{
"BriefDescription": "TOR Inserts : ItoMs issued by iA Cores that Missed LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_ITOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : ItoMs issued by iA Cores that Missed LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc47fe01",
@@ -6199,8 +7524,10 @@
},
{
"BriefDescription": "TOR Inserts : LLCPrefCode issued by iA Cores that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFCODE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : LLCPrefCode issued by iA Cores that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcccffe01",
@@ -6208,6 +7535,7 @@
},
{
"BriefDescription": "TOR Inserts : LLCPrefData issued by iA Cores that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFDATA",
"PerPkg": "1",
@@ -6217,6 +7545,7 @@
},
{
"BriefDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFRFO",
"PerPkg": "1",
@@ -6226,8 +7555,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCILF_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed locally : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8668601",
@@ -6235,8 +7566,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCILF_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed locally : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8668a01",
@@ -6244,8 +7577,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCIL_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed locally : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86e8601",
@@ -6253,8 +7588,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCIL_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed locally : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86e8a01",
@@ -6262,6 +7599,7 @@
},
{
"BriefDescription": "TOR Inserts; WCiL misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_PARTIAL_STREAMING_WR",
"PerPkg": "1",
@@ -6271,8 +7609,10 @@
},
{
"BriefDescription": "TOR Inserts; WCiL misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_PARTIAL_STREAMING_WR_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts; Data read from local IA that misses in the snoop filter",
"UMask": "0xc86f8601",
@@ -6280,17 +7620,21 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA_MISS_WCIL_DDR",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_PARTIAL_STREAMING_WR_DRAM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc86f8601",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts; WCiL misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_PARTIAL_STREAMING_WR_LOCAL_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts; Data read from local IA that misses in the snoop filter",
"UMask": "0xc86e8601",
@@ -6298,17 +7642,21 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCIL_DDR",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_PARTIAL_STREAMING_WR_LOCAL_DRAM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc86e8601",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts; WCiL misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_PARTIAL_STREAMING_WR_LOCAL_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts; Data read from local IA that misses in the snoop filter",
"UMask": "0xc86e8a01",
@@ -6316,8 +7664,10 @@
},
{
"BriefDescription": "TOR Inserts; WCiL misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_PARTIAL_STREAMING_WR_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts; Data read from local IA that misses in the snoop filter",
"UMask": "0xc86f8a01",
@@ -6325,8 +7675,10 @@
},
{
"BriefDescription": "TOR Inserts; WCiL misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_PARTIAL_STREAMING_WR_REMOTE_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts; Data read from local IA that misses in the snoop filter",
"UMask": "0xc86f0601",
@@ -6334,17 +7686,21 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCIL_DDR",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_PARTIAL_STREAMING_WR_REMOTE_DRAM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc86f0601",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts; WCiL misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_PARTIAL_STREAMING_WR_REMOTE_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts; Data read from local IA that misses in the snoop filter",
"UMask": "0xc86f0a01",
@@ -6352,8 +7708,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCILF_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8670601",
@@ -6361,8 +7719,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed remote memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCILF_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8670a01",
@@ -6370,8 +7730,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCIL_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86f0601",
@@ -6379,8 +7741,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCIL_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86f0a01",
@@ -6388,6 +7752,7 @@
},
{
"BriefDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO",
"PerPkg": "1",
@@ -6397,6 +7762,7 @@
},
{
"BriefDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_LOCAL",
"PerPkg": "1",
@@ -6406,6 +7772,7 @@
},
{
"BriefDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF",
"PerPkg": "1",
@@ -6415,6 +7782,7 @@
},
{
"BriefDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_LOCAL",
"PerPkg": "1",
@@ -6424,6 +7792,7 @@
},
{
"BriefDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_REMOTE",
"PerPkg": "1",
@@ -6433,6 +7802,7 @@
},
{
"BriefDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_REMOTE",
"PerPkg": "1",
@@ -6442,8 +7812,10 @@
},
{
"BriefDescription": "TOR Inserts : SpecItoMs issued by iA Cores that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_SPECITOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : SpecItoMs issued by iA Cores that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc57fe01",
@@ -6451,8 +7823,10 @@
},
{
"BriefDescription": "TOR Inserts : UCRdFs issued by iA Cores that Missed LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_UCRDF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : UCRdFs issued by iA Cores that Missed LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc877de01",
@@ -6460,8 +7834,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLs issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : WCiLs issued by iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86ffe01",
@@ -6469,8 +7845,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLF issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : WCiLF issued by iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc867fe01",
@@ -6478,8 +7856,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8678601",
@@ -6487,8 +7867,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8678a01",
@@ -6496,8 +7878,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86f8601",
@@ -6505,8 +7889,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86f8a01",
@@ -6514,8 +7900,10 @@
},
{
"BriefDescription": "TOR Inserts : WiLs issued by iA Cores that Missed LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WIL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : WiLs issued by iA Cores that Missed LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc87fde01",
@@ -6523,6 +7911,7 @@
},
{
"BriefDescription": "TOR Inserts : RFOs issued by iA Cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_RFO",
"PerPkg": "1",
@@ -6532,6 +7921,7 @@
},
{
"BriefDescription": "TOR Inserts : RFO_Prefs issued by iA Cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_RFO_PREF",
"PerPkg": "1",
@@ -6541,6 +7931,7 @@
},
{
"BriefDescription": "TOR Inserts : SpecItoMs issued by iA Cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_SPECITOM",
"PerPkg": "1",
@@ -6550,8 +7941,10 @@
},
{
"BriefDescription": "TOR Inserts : WBEFtoEs issued by an IA Core. Non Modified Write Backs",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WBEFTOE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WbEFtoEs issued by iA Cores . (Non Modified Write Backs) :Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc3fff01",
@@ -6559,8 +7952,10 @@
},
{
"BriefDescription": "TOR Inserts : WBEFtoIs issued by an IA Core. Non Modified Write Backs",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WBEFTOI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WbEFtoIs issued by iA Cores . (Non Modified Write Backs) :Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc37ff01",
@@ -6568,8 +7963,10 @@
},
{
"BriefDescription": "TOR Inserts : WBMtoEs issued by an IA Core. Non Modified Write Backs",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WBMTOE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WbMtoEs issued by iA Cores . (Non Modified Write Backs) :Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc2fff01",
@@ -6577,8 +7974,10 @@
},
{
"BriefDescription": "TOR Inserts : WbMtoIs issued by an iA Cores. Modified Write Backs",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WBMTOI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WbMtoIs issued by iA Cores . (Modified Write Backs) :Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc27ff01",
@@ -6586,8 +7985,10 @@
},
{
"BriefDescription": "TOR Inserts : WBStoIs issued by an IA Core. Non Modified Write Backs",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WBSTOI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WbStoIs issued by iA Cores . (Non Modified Write Backs) :Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc67ff01",
@@ -6595,8 +7996,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLs issued by iA Cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WCIL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : WCiLs issued by iA Cores : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86fff01",
@@ -6604,8 +8007,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLF issued by iA Cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WCILF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : WCiLF issued by iA Cores : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc867ff01",
@@ -6613,6 +8018,7 @@
},
{
"BriefDescription": "TOR Inserts : All requests from IO Devices",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO",
"PerPkg": "1",
@@ -6622,8 +8028,10 @@
},
{
"BriefDescription": "TOR Inserts : CLFlushes issued by IO Devices",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_CLFLUSH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : CLFlushes issued by IO Devices : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8c3ff04",
@@ -6631,6 +8039,7 @@
},
{
"BriefDescription": "TOR Inserts : All requests from IO Devices that hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_HIT",
"PerPkg": "1",
@@ -6640,6 +8049,7 @@
},
{
"BriefDescription": "TOR Inserts : ItoMs issued by IO Devices that Hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_ITOM",
"PerPkg": "1",
@@ -6649,6 +8059,7 @@
},
{
"BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_ITOMCACHENEAR",
"PerPkg": "1",
@@ -6658,6 +8069,7 @@
},
{
"BriefDescription": "TOR Inserts : PCIRdCurs issued by IO Devices that hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_PCIRDCUR",
"PerPkg": "1",
@@ -6667,8 +8079,10 @@
},
{
"BriefDescription": "TOR Inserts : RFOs issued by IO Devices that hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : RFOs issued by IO Devices that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc803fd04",
@@ -6676,6 +8090,7 @@
},
{
"BriefDescription": "TOR Inserts : ItoMs issued by IO Devices",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM",
"PerPkg": "1",
@@ -6685,6 +8100,7 @@
},
{
"BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR",
"PerPkg": "1",
@@ -6694,6 +8110,7 @@
},
{
"BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices to locally HOMed memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR_LOCAL",
"PerPkg": "1",
@@ -6703,6 +8120,7 @@
},
{
"BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices to remotely HOMed memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR_REMOTE",
"PerPkg": "1",
@@ -6712,6 +8130,7 @@
},
{
"BriefDescription": "TOR Inserts : ItoMs issued by IO Devices to locally HOMed memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM_LOCAL",
"PerPkg": "1",
@@ -6721,6 +8140,7 @@
},
{
"BriefDescription": "TOR Inserts : ItoMs issued by IO Devices to remotely HOMed memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM_REMOTE",
"PerPkg": "1",
@@ -6730,6 +8150,7 @@
},
{
"BriefDescription": "TOR Inserts : All requests from IO Devices that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS",
"PerPkg": "1",
@@ -6739,6 +8160,7 @@
},
{
"BriefDescription": "TOR Inserts : ItoMs issued by IO Devices that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM",
"PerPkg": "1",
@@ -6748,6 +8170,7 @@
},
{
"BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR",
"PerPkg": "1",
@@ -6757,6 +8180,7 @@
},
{
"BriefDescription": "TOR Inserts : PCIRdCurs issued by IO Devices that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_PCIRDCUR",
"PerPkg": "1",
@@ -6766,8 +8190,10 @@
},
{
"BriefDescription": "TOR Inserts : RFOs issued by IO Devices that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : RFOs issued by IO Devices that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc803fe04",
@@ -6775,6 +8201,7 @@
},
{
"BriefDescription": "TOR Inserts : PCIRdCurs issued by IO Devices",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR",
"PerPkg": "1",
@@ -6784,6 +8211,7 @@
},
{
"BriefDescription": "PCIRDCUR (read) transactions from an IO device that addresses memory on the local socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR_LOCAL",
"PerPkg": "1",
@@ -6793,6 +8221,7 @@
},
{
"BriefDescription": "PCIRDCUR (read) transactions from an IO device that addresses memory on a remote socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR_REMOTE",
"PerPkg": "1",
@@ -6802,8 +8231,10 @@
},
{
"BriefDescription": "TOR Inserts : RFOs issued by IO Devices",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : RFOs issued by IO Devices : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc803ff04",
@@ -6811,8 +8242,10 @@
},
{
"BriefDescription": "TOR Inserts : WbMtoIs issued by IO Devices",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_WBMTOI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : WbMtoIs issued by IO Devices : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc23ff04",
@@ -6820,8 +8253,10 @@
},
{
"BriefDescription": "TOR Inserts : IPQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IPQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : IPQ : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x8",
@@ -6829,8 +8264,10 @@
},
{
"BriefDescription": "TOR Inserts : IRQ - iA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IRQ_IA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : IRQ - iA : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : From an iA Core",
"UMask": "0x1",
@@ -6838,8 +8275,10 @@
},
{
"BriefDescription": "TOR Inserts : IRQ - Non iA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IRQ_NON_IA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : IRQ - Non iA : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x10",
@@ -6847,24 +8286,30 @@
},
{
"BriefDescription": "TOR Inserts : Just ISOC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.ISOC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : Just ISOC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : Just Local Targets",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.LOCAL_TGT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : Just Local Targets : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : All from Local iA and IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.LOC_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : All from Local iA and IO : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : All locally initiated requests",
"UMask": "0xc000ff05",
@@ -6872,8 +8317,10 @@
},
{
"BriefDescription": "TOR Inserts : All from Local iA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.LOC_IA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : All from Local iA : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : All locally initiated requests from iA Cores",
"UMask": "0xc000ff01",
@@ -6881,8 +8328,10 @@
},
{
"BriefDescription": "TOR Inserts : All from Local IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.LOC_IO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : All from Local IO : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : All locally generated IO traffic",
"UMask": "0xc000ff04",
@@ -6890,72 +8339,90 @@
},
{
"BriefDescription": "TOR Inserts : Match the Opcode in b[29:19] of the extended umask field",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.MATCH_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : Match the Opcode in b[29:19] of the extended umask field : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : Just Misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : Just Misses : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : MMCFG Access",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.MMCFG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : MMCFG Access : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : Just NearMem",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.NEARMEM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : Just NearMem : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : Just NonCoherent",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.NONCOH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : Just NonCoherent : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : Just NotNearMem",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.NOT_NEARMEM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : Just NotNearMem : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : PMM Access",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : PMM Access : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : Match the PreMorphed Opcode in b[29:19] of the extended umask field",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.PREMORPH_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : Match the PreMorphed Opcode in b[29:19] of the extended umask field : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : PRQ - IOSF",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.PRQ_IOSF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : PRQ - IOSF : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : From a PCIe Device",
"UMask": "0x4",
@@ -6963,8 +8430,10 @@
},
{
"BriefDescription": "TOR Inserts : PRQ - Non IOSF",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.PRQ_NON_IOSF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : PRQ - Non IOSF : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x20",
@@ -6972,16 +8441,20 @@
},
{
"BriefDescription": "TOR Inserts : Just Remote Targets",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.REMOTE_TGT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : Just Remote Targets : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : RRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.RRQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : RRQ : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x40",
@@ -6989,8 +8462,10 @@
},
{
"BriefDescription": "TOR Inserts : WBQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.WBQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : WBQ : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x80",
@@ -6998,16 +8473,20 @@
},
{
"BriefDescription": "TOR Occupancy : DDR4 Access",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DDR4 Access : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : SF/LLC Evictions",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.EVICT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : SF/LLC Evictions : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : TOR allocation occurred as a result of SF/LLC evictions (came from the ISMQ)",
"UMask": "0x2",
@@ -7015,14 +8494,17 @@
},
{
"BriefDescription": "TOR Occupancy : Just Hits",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.HIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : Just Hits : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : All requests from iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA",
"PerPkg": "1",
@@ -7032,8 +8514,10 @@
},
{
"BriefDescription": "TOR Occupancy : CLFlushes issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CLFLUSH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : CLFlushes issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8c7ff01",
@@ -7041,8 +8525,10 @@
},
{
"BriefDescription": "TOR Occupancy : CLFlushOpts issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CLFLUSHOPT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : CLFlushOpts issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8d7ff01",
@@ -7050,6 +8536,7 @@
},
{
"BriefDescription": "TOR Occupancy : CRDs issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CRD",
"PerPkg": "1",
@@ -7059,8 +8546,10 @@
},
{
"BriefDescription": "TOR Occupancy; CRd Pref from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CRD_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Code read prefetch from local IA that misses in the snoop filter",
"UMask": "0xc88fff01",
@@ -7068,6 +8557,7 @@
},
{
"BriefDescription": "TOR Occupancy : DRds issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD",
"PerPkg": "1",
@@ -7077,8 +8567,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRDPTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc837ff01",
@@ -7086,8 +8578,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRd_Opts issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_OPT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Opts issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc827ff01",
@@ -7095,8 +8589,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_OPT_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8a7ff01",
@@ -7104,8 +8600,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc897ff01",
@@ -7113,6 +8611,7 @@
},
{
"BriefDescription": "TOR Occupancy : All requests from iA Cores that Hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT",
"PerPkg": "1",
@@ -7122,8 +8621,10 @@
},
{
"BriefDescription": "TOR Occupancy : CRds issued by iA Cores that Hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : CRds issued by iA Cores that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc80ffd01",
@@ -7131,8 +8632,10 @@
},
{
"BriefDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc88ffd01",
@@ -7140,8 +8643,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRds issued by iA Cores that Hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRds issued by iA Cores that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc817fd01",
@@ -7149,8 +8654,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk that hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRDPTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc837fd01",
@@ -7158,8 +8665,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRd_Opts issued by iA Cores that hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_OPT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Opts issued by iA Cores that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc827fd01",
@@ -7167,8 +8676,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores that hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_OPT_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8a7fd01",
@@ -7176,8 +8687,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores that Hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc897fd01",
@@ -7185,8 +8698,10 @@
},
{
"BriefDescription": "TOR Occupancy : ItoMs issued by iA Cores that Hit LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_ITOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : ItoMs issued by iA Cores that Hit LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc47fd01",
@@ -7194,8 +8709,10 @@
},
{
"BriefDescription": "TOR Occupancy : LLCPrefCode issued by iA Cores that hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFCODE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : LLCPrefCode issued by iA Cores that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcccffd01",
@@ -7203,8 +8720,10 @@
},
{
"BriefDescription": "TOR Occupancy : LLCPrefData issued by iA Cores that hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFDATA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : LLCPrefData issued by iA Cores that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xccd7fd01",
@@ -7212,8 +8731,10 @@
},
{
"BriefDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores that hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFRFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xccc7fd01",
@@ -7221,8 +8742,10 @@
},
{
"BriefDescription": "TOR Occupancy : RFOs issued by iA Cores that Hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : RFOs issued by iA Cores that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc807fd01",
@@ -7230,8 +8753,10 @@
},
{
"BriefDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc887fd01",
@@ -7239,8 +8764,10 @@
},
{
"BriefDescription": "TOR Occupancy : ItoMs issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_ITOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : ItoMs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc47ff01",
@@ -7248,8 +8775,10 @@
},
{
"BriefDescription": "TOR Occupancy : ItoMCacheNears issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_ITOMCACHENEAR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : ItoMCacheNears issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcd47ff01",
@@ -7257,8 +8786,10 @@
},
{
"BriefDescription": "TOR Occupancy : LLCPrefCode issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFCODE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : LLCPrefCode issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcccfff01",
@@ -7266,8 +8797,10 @@
},
{
"BriefDescription": "TOR Occupancy : LLCPrefData issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFDATA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : LLCPrefData issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xccd7ff01",
@@ -7275,8 +8808,10 @@
},
{
"BriefDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFRFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xccc7ff01",
@@ -7284,6 +8819,7 @@
},
{
"BriefDescription": "TOR Occupancy : All requests from iA Cores that Missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS",
"PerPkg": "1",
@@ -7293,6 +8829,7 @@
},
{
"BriefDescription": "TOR Occupancy : CRds issued by iA Cores that Missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD",
"PerPkg": "1",
@@ -7302,8 +8839,10 @@
},
{
"BriefDescription": "TOR Occupancy : CRd issued by iA Cores that Missed the LLC - HOMed locally",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : CRd issued by iA Cores that Missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc80efe01",
@@ -7311,8 +8850,10 @@
},
{
"BriefDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc88ffe01",
@@ -7320,8 +8861,10 @@
},
{
"BriefDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed locally",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc88efe01",
@@ -7329,8 +8872,10 @@
},
{
"BriefDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed remotely",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc88f7e01",
@@ -7338,8 +8883,10 @@
},
{
"BriefDescription": "TOR Occupancy : CRd issued by iA Cores that Missed the LLC - HOMed remotely",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : CRd issued by iA Cores that Missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc80f7e01",
@@ -7347,6 +8894,7 @@
},
{
"BriefDescription": "TOR Occupancy : DRds issued by iA Cores that Missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD",
"PerPkg": "1",
@@ -7356,8 +8904,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk that missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRDPTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc837fe01",
@@ -7365,6 +8915,7 @@
},
{
"BriefDescription": "TOR Occupancy : DRds issued by iA Cores targeting DDR Mem that Missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_DDR",
"PerPkg": "1",
@@ -7374,6 +8925,7 @@
},
{
"BriefDescription": "TOR Occupancy : DRds issued by iA Cores that Missed the LLC - HOMed locally",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_LOCAL",
"PerPkg": "1",
@@ -7383,8 +8935,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_LOCAL_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8168601",
@@ -7392,8 +8946,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_LOCAL_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8168a01",
@@ -7401,8 +8957,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRd_Opt issued by iA Cores that missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Opt issued by iA Cores that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc827fe01",
@@ -7410,8 +8968,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores that missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8a7fe01",
@@ -7419,6 +8979,7 @@
},
{
"BriefDescription": "TOR Occupancy : DRds issued by iA Cores targeting PMM Mem that Missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PMM",
"PerPkg": "1",
@@ -7428,8 +8989,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores that Missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc897fe01",
@@ -7437,8 +9000,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8978601",
@@ -7446,8 +9011,10 @@
},
{
"BriefDescription": "TOR Occupancy; DRd Pref misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Data read prefetch from local IA that misses in the snoop filter",
"UMask": "0xc896fe01",
@@ -7455,8 +9022,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_LOCAL_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8968601",
@@ -7464,8 +9033,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_LOCAL_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8968a01",
@@ -7473,8 +9044,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8978a01",
@@ -7482,8 +9055,10 @@
},
{
"BriefDescription": "TOR Occupancy; DRd Pref misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Data read prefetch from local IA that misses in the snoop filter",
"UMask": "0xc8977e01",
@@ -7491,8 +9066,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_REMOTE_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8970601",
@@ -7500,8 +9077,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_REMOTE_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8970a01",
@@ -7509,6 +9088,7 @@
},
{
"BriefDescription": "TOR Occupancy : DRds issued by iA Cores that Missed the LLC - HOMed remotely",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_REMOTE",
"PerPkg": "1",
@@ -7518,8 +9098,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_REMOTE_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8170601",
@@ -7527,8 +9109,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_REMOTE_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8170a01",
@@ -7536,8 +9120,10 @@
},
{
"BriefDescription": "TOR Occupancy; WCiLF misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_FULL_STREAMING_WR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Data read from local IA that misses in the snoop filter",
"UMask": "0xc867fe01",
@@ -7545,8 +9131,10 @@
},
{
"BriefDescription": "TOR Occupancy; WCiLF misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_FULL_STREAMING_WR_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Data read from local IA that misses in the snoop filter",
"UMask": "0xc8678601",
@@ -7554,8 +9142,10 @@
},
{
"BriefDescription": "TOR Occupancy; WCiLF misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_FULL_STREAMING_WR_LOCAL_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Data read from local IA that misses in the snoop filter",
"UMask": "0xc8668601",
@@ -7563,8 +9153,10 @@
},
{
"BriefDescription": "TOR Occupancy; WCiLF misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_FULL_STREAMING_WR_LOCAL_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Data read from local IA that misses in the snoop filter",
"UMask": "0xc8668a01",
@@ -7572,8 +9164,10 @@
},
{
"BriefDescription": "TOR Occupancy; WCiLF misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_FULL_STREAMING_WR_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Data read from local IA that misses in the snoop filter",
"UMask": "0xc8678a01",
@@ -7581,8 +9175,10 @@
},
{
"BriefDescription": "TOR Occupancy; WCiLF misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_FULL_STREAMING_WR_REMOTE_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Data read from local IA that misses in the snoop filter",
"UMask": "0xc8670601",
@@ -7590,8 +9186,10 @@
},
{
"BriefDescription": "TOR Occupancy; WCiLF misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_FULL_STREAMING_WR_REMOTE_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Data read from local IA that misses in the snoop filter",
"UMask": "0xc8670a01",
@@ -7599,8 +9197,10 @@
},
{
"BriefDescription": "TOR Occupancy : ItoMs issued by iA Cores that Missed LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_ITOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : ItoMs issued by iA Cores that Missed LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc47fe01",
@@ -7608,8 +9208,10 @@
},
{
"BriefDescription": "TOR Occupancy : LLCPrefCode issued by iA Cores that missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFCODE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : LLCPrefCode issued by iA Cores that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcccffe01",
@@ -7617,8 +9219,10 @@
},
{
"BriefDescription": "TOR Occupancy : LLCPrefData issued by iA Cores that missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFDATA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : LLCPrefData issued by iA Cores that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xccd7fe01",
@@ -7626,8 +9230,10 @@
},
{
"BriefDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores that missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFRFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xccc7fe01",
@@ -7635,8 +9241,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed locally",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCILF_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8668601",
@@ -7644,8 +9252,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed locally",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCILF_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8668a01",
@@ -7653,8 +9263,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed locally",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCIL_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86e8601",
@@ -7662,8 +9274,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed locally",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCIL_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86e8a01",
@@ -7671,8 +9285,10 @@
},
{
"BriefDescription": "TOR Occupancy; WCiL misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_PARTIAL_STREAMING_WR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Data read from local IA that misses in the snoop filter",
"UMask": "0xc86ffe01",
@@ -7680,8 +9296,10 @@
},
{
"BriefDescription": "TOR Occupancy; WCiL misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_PARTIAL_STREAMING_WR_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Data read from local IA that misses in the snoop filter",
"UMask": "0xc86f8601",
@@ -7689,8 +9307,10 @@
},
{
"BriefDescription": "TOR Occupancy; WCiL misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_PARTIAL_STREAMING_WR_LOCAL_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Data read from local IA that misses in the snoop filter",
"UMask": "0xc86e8601",
@@ -7698,8 +9318,10 @@
},
{
"BriefDescription": "TOR Occupancy; WCiL misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_PARTIAL_STREAMING_WR_LOCAL_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Data read from local IA that misses in the snoop filter",
"UMask": "0xc86e8a01",
@@ -7707,8 +9329,10 @@
},
{
"BriefDescription": "TOR Occupancy; WCiL misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_PARTIAL_STREAMING_WR_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Data read from local IA that misses in the snoop filter",
"UMask": "0xc86f8a01",
@@ -7716,8 +9340,10 @@
},
{
"BriefDescription": "TOR Occupancy; WCiL misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_PARTIAL_STREAMING_WR_REMOTE_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Data read from local IA that misses in the snoop filter",
"UMask": "0xc86f0601",
@@ -7725,8 +9351,10 @@
},
{
"BriefDescription": "TOR Occupancy; WCiL misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_PARTIAL_STREAMING_WR_REMOTE_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Data read from local IA that misses in the snoop filter",
"UMask": "0xc86f0a01",
@@ -7734,8 +9362,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCILF_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8670601",
@@ -7743,8 +9373,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCILF_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8670a01",
@@ -7752,8 +9384,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCIL_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86f0601",
@@ -7761,8 +9395,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCIL_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86f0a01",
@@ -7770,6 +9406,7 @@
},
{
"BriefDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO",
"PerPkg": "1",
@@ -7779,8 +9416,10 @@
},
{
"BriefDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC - HOMed locally",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc806fe01",
@@ -7788,8 +9427,10 @@
},
{
"BriefDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc887fe01",
@@ -7797,8 +9438,10 @@
},
{
"BriefDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Missed the LLC - HOMed locally",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc886fe01",
@@ -7806,8 +9449,10 @@
},
{
"BriefDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Missed the LLC - HOMed remotely",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8877e01",
@@ -7815,8 +9460,10 @@
},
{
"BriefDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC - HOMed remotely",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8077e01",
@@ -7824,8 +9471,10 @@
},
{
"BriefDescription": "TOR Occupancy : SpecItoMs issued by iA Cores that missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_SPECITOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : SpecItoMs issued by iA Cores that missed the LLC: For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc57fe01",
@@ -7833,8 +9482,10 @@
},
{
"BriefDescription": "TOR Occupancy : UCRdFs issued by iA Cores that Missed LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_UCRDF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : UCRdFs issued by iA Cores that Missed LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc877de01",
@@ -7842,8 +9493,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores that Missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86ffe01",
@@ -7851,8 +9504,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLF issued by iA Cores that Missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLF issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc867fe01",
@@ -7860,8 +9515,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8678601",
@@ -7869,8 +9526,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8678a01",
@@ -7878,8 +9537,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86f8601",
@@ -7887,8 +9548,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86f8a01",
@@ -7896,8 +9559,10 @@
},
{
"BriefDescription": "TOR Occupancy : WiLs issued by iA Cores that Missed LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WIL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WiLs issued by iA Cores that Missed LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc87fde01",
@@ -7905,6 +9570,7 @@
},
{
"BriefDescription": "TOR Occupancy : RFOs issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_RFO",
"PerPkg": "1",
@@ -7914,8 +9580,10 @@
},
{
"BriefDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_RFO_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc887ff01",
@@ -7923,8 +9591,10 @@
},
{
"BriefDescription": "TOR Occupancy : SpecItoMs issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_SPECITOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : SpecItoMs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc57ff01",
@@ -7932,8 +9602,10 @@
},
{
"BriefDescription": "TOR Occupancy : WbMtoIs issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WBMTOI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WbMtoIs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc27ff01",
@@ -7941,8 +9613,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WCIL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86fff01",
@@ -7950,8 +9624,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLF issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WCILF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLF issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc867ff01",
@@ -7959,6 +9635,7 @@
},
{
"BriefDescription": "TOR Occupancy : All requests from IO Devices",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO",
"PerPkg": "1",
@@ -7968,8 +9645,10 @@
},
{
"BriefDescription": "TOR Occupancy : CLFlushes issued by IO Devices",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_CLFLUSH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : CLFlushes issued by IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8c3ff04",
@@ -7977,6 +9656,7 @@
},
{
"BriefDescription": "TOR Occupancy : All requests from IO Devices that hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT",
"PerPkg": "1",
@@ -7986,8 +9666,10 @@
},
{
"BriefDescription": "TOR Occupancy : ItoMs issued by IO Devices that Hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_ITOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : ItoMs issued by IO Devices that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc43fd04",
@@ -7995,8 +9677,10 @@
},
{
"BriefDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_ITOMCACHENEAR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcd43fd04",
@@ -8004,8 +9688,10 @@
},
{
"BriefDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_PCIRDCUR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8f3fd04",
@@ -8013,8 +9699,10 @@
},
{
"BriefDescription": "TOR Occupancy : RFOs issued by IO Devices that hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : RFOs issued by IO Devices that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc803fd04",
@@ -8022,8 +9710,10 @@
},
{
"BriefDescription": "TOR Occupancy : ItoMs issued by IO Devices",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_ITOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : ItoMs issued by IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc43ff04",
@@ -8031,8 +9721,10 @@
},
{
"BriefDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_ITOMCACHENEAR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcd43ff04",
@@ -8040,6 +9732,7 @@
},
{
"BriefDescription": "TOR Occupancy : All requests from IO Devices that missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS",
"PerPkg": "1",
@@ -8049,8 +9742,10 @@
},
{
"BriefDescription": "TOR Occupancy : ItoMs issued by IO Devices that missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : ItoMs issued by IO Devices that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc43fe04",
@@ -8058,8 +9753,10 @@
},
{
"BriefDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOMCACHENEAR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcd43fe04",
@@ -8067,6 +9764,7 @@
},
{
"BriefDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_PCIRDCUR",
"PerPkg": "1",
@@ -8076,8 +9774,10 @@
},
{
"BriefDescription": "TOR Occupancy : RFOs issued by IO Devices that missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : RFOs issued by IO Devices that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc803fe04",
@@ -8085,6 +9785,7 @@
},
{
"BriefDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_PCIRDCUR",
"PerPkg": "1",
@@ -8094,8 +9795,10 @@
},
{
"BriefDescription": "TOR Occupancy : RFOs issued by IO Devices",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : RFOs issued by IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc803ff04",
@@ -8103,8 +9806,10 @@
},
{
"BriefDescription": "TOR Occupancy : WbMtoIs issued by IO Devices",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_WBMTOI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WbMtoIs issued by IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc23ff04",
@@ -8112,8 +9817,10 @@
},
{
"BriefDescription": "TOR Occupancy : IPQ",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IPQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : IPQ : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x8",
@@ -8121,8 +9828,10 @@
},
{
"BriefDescription": "TOR Occupancy : IRQ - iA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IRQ_IA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : IRQ - iA : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : From an iA Core",
"UMask": "0x1",
@@ -8130,8 +9839,10 @@
},
{
"BriefDescription": "TOR Occupancy : IRQ - Non iA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IRQ_NON_IA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : IRQ - Non iA : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x10",
@@ -8139,24 +9850,30 @@
},
{
"BriefDescription": "TOR Occupancy : Just ISOC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.ISOC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : Just ISOC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : Just Local Targets",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.LOCAL_TGT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : Just Local Targets : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : All from Local iA and IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All from Local iA and IO : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : All locally initiated requests",
"UMask": "0xc000ff05",
@@ -8164,8 +9881,10 @@
},
{
"BriefDescription": "TOR Occupancy : All from Local iA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_IA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All from Local iA : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : All locally initiated requests from iA Cores",
"UMask": "0xc000ff01",
@@ -8173,8 +9892,10 @@
},
{
"BriefDescription": "TOR Occupancy : All from Local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_IO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All from Local IO : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : All locally generated IO traffic",
"UMask": "0xc000ff04",
@@ -8182,72 +9903,90 @@
},
{
"BriefDescription": "TOR Occupancy : Match the Opcode in b[29:19] of the extended umask field",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.MATCH_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : Match the Opcode in b[29:19] of the extended umask field : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : Just Misses",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : Just Misses : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : MMCFG Access",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.MMCFG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : MMCFG Access : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : Just NearMem",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.NEARMEM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : Just NearMem : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : Just NonCoherent",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.NONCOH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : Just NonCoherent : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : Just NotNearMem",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.NOT_NEARMEM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : Just NotNearMem : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : PMM Access",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : PMM Access : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : Match the PreMorphed Opcode in b[29:19] of the extended umask field",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.PREMORPH_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : Match the PreMorphed Opcode in b[29:19] of the extended umask field : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : PRQ - IOSF",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.PRQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : PRQ - IOSF : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : From a PCIe Device",
"UMask": "0x4",
@@ -8255,8 +9994,10 @@
},
{
"BriefDescription": "TOR Occupancy : PRQ - Non IOSF",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.PRQ_NON_IOSF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : PRQ - Non IOSF : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x20",
@@ -8264,16 +10005,20 @@
},
{
"BriefDescription": "TOR Occupancy : Just Remote Targets",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.REMOTE_TGT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : Just Remote Targets : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "CMS Horizontal ADS Used : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_CHA_TxR_HORZ_ADS_USED.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : AD - All : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -8281,8 +10026,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_CHA_TxR_HORZ_ADS_USED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : AD - Credited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -8290,8 +10037,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_CHA_TxR_HORZ_ADS_USED.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : AD - Uncredited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -8299,8 +10048,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_CHA_TxR_HORZ_ADS_USED.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : BL - All : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -8308,8 +10059,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_CHA_TxR_HORZ_ADS_USED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : BL - Credited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -8317,8 +10070,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_CHA_TxR_HORZ_ADS_USED.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : BL - Uncredited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -8326,8 +10081,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_CHA_TxR_HORZ_BYPASS.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : AD - All : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -8335,8 +10092,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_CHA_TxR_HORZ_BYPASS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : AD - Credited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -8344,8 +10103,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_CHA_TxR_HORZ_BYPASS.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : AD - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -8353,8 +10114,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_CHA_TxR_HORZ_BYPASS.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : AK : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -8362,8 +10125,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_CHA_TxR_HORZ_BYPASS.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : AKC - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x80",
@@ -8371,8 +10136,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_CHA_TxR_HORZ_BYPASS.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : BL - All : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -8380,8 +10147,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_CHA_TxR_HORZ_BYPASS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : BL - Credited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -8389,8 +10158,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_CHA_TxR_HORZ_BYPASS.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : BL - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -8398,8 +10169,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_CHA_TxR_HORZ_BYPASS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : IV : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x8",
@@ -8407,8 +10180,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - All : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -8416,8 +10191,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -8425,8 +10202,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -8434,8 +10213,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AK : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -8443,8 +10224,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AKC - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x80",
@@ -8452,8 +10235,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - All : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -8461,8 +10246,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -8470,8 +10257,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -8479,8 +10268,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : IV : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -8488,8 +10279,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - All : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -8497,8 +10290,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -8506,8 +10301,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -8515,8 +10312,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AK : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -8524,8 +10323,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AKC - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x80",
@@ -8533,8 +10334,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - All : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -8542,8 +10345,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -8551,8 +10356,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -8560,8 +10367,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : IV : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -8569,8 +10378,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_CHA_TxR_HORZ_INSERTS.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : AD - All : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -8578,8 +10389,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_CHA_TxR_HORZ_INSERTS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : AD - Credited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -8587,8 +10400,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_CHA_TxR_HORZ_INSERTS.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : AD - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -8596,8 +10411,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_CHA_TxR_HORZ_INSERTS.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : AK : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -8605,8 +10422,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_CHA_TxR_HORZ_INSERTS.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : AKC - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x80",
@@ -8614,8 +10433,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_CHA_TxR_HORZ_INSERTS.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : BL - All : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -8623,8 +10444,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_CHA_TxR_HORZ_INSERTS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : BL - Credited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -8632,8 +10455,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_CHA_TxR_HORZ_INSERTS.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : BL - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -8641,8 +10466,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_CHA_TxR_HORZ_INSERTS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : IV : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -8650,8 +10477,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_CHA_TxR_HORZ_NACK.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : AD - All : Counts number of Egress packets NACK'ed on to the Horizontal Ring : All == Credited + Uncredited",
"UMask": "0x11",
@@ -8659,8 +10488,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_CHA_TxR_HORZ_NACK.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : AD - Credited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x10",
@@ -8668,8 +10499,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_CHA_TxR_HORZ_NACK.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : AD - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x1",
@@ -8677,8 +10510,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_CHA_TxR_HORZ_NACK.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : AK : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x2",
@@ -8686,8 +10521,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_CHA_TxR_HORZ_NACK.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : AKC - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x80",
@@ -8695,8 +10532,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_CHA_TxR_HORZ_NACK.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : BL - All : Counts number of Egress packets NACK'ed on to the Horizontal Ring : All == Credited + Uncredited",
"UMask": "0x44",
@@ -8704,8 +10543,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_CHA_TxR_HORZ_NACK.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : BL - Credited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x40",
@@ -8713,8 +10554,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_CHA_TxR_HORZ_NACK.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : BL - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x4",
@@ -8722,8 +10565,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_CHA_TxR_HORZ_NACK.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : IV : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x8",
@@ -8731,8 +10576,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : AD - All : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -8740,8 +10587,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : AD - Credited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -8749,8 +10598,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : AD - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -8758,8 +10609,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : AK : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -8767,8 +10620,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : AKC - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x80",
@@ -8776,8 +10631,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : BL - All : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -8785,8 +10642,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : BL - Credited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -8794,8 +10653,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : BL - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -8803,8 +10664,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : IV : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -8812,8 +10675,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_CHA_TxR_HORZ_STARVED.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : AD - All : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time. : All == Credited + Uncredited",
"UMask": "0x1",
@@ -8821,8 +10686,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_CHA_TxR_HORZ_STARVED.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : AD - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x1",
@@ -8830,8 +10697,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_CHA_TxR_HORZ_STARVED.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : AK : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x2",
@@ -8839,8 +10708,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_CHA_TxR_HORZ_STARVED.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : AKC - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x80",
@@ -8848,8 +10719,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_CHA_TxR_HORZ_STARVED.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : BL - All : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time. : All == Credited + Uncredited",
"UMask": "0x4",
@@ -8857,8 +10730,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_CHA_TxR_HORZ_STARVED.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : BL - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x4",
@@ -8866,8 +10741,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_CHA_TxR_HORZ_STARVED.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : IV : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x8",
@@ -8875,8 +10752,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_CHA_TxR_VERT_ADS_USED.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AD - Agent 0 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -8884,8 +10763,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_CHA_TxR_VERT_ADS_USED.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AD - Agent 1 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -8893,8 +10774,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_CHA_TxR_VERT_ADS_USED.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : BL - Agent 0 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -8902,8 +10785,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_CHA_TxR_VERT_ADS_USED.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : BL - Agent 1 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -8911,8 +10796,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_CHA_TxR_VERT_BYPASS.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AD - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -8920,8 +10807,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_CHA_TxR_VERT_BYPASS.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AD - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -8929,8 +10818,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_CHA_TxR_VERT_BYPASS.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AK - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -8938,8 +10829,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_CHA_TxR_VERT_BYPASS.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AK - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x20",
@@ -8947,8 +10840,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_CHA_TxR_VERT_BYPASS.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : BL - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -8956,8 +10851,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_CHA_TxR_VERT_BYPASS.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : BL - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -8965,8 +10862,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : IV - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_CHA_TxR_VERT_BYPASS.IV_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : IV - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x8",
@@ -8974,8 +10873,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_CHA_TxR_VERT_BYPASS_1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AKC - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -8983,8 +10884,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_CHA_TxR_VERT_BYPASS_1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AKC - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -8992,8 +10895,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -9001,8 +10906,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -9010,8 +10917,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -9019,8 +10928,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -9028,8 +10939,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -9037,8 +10950,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -9046,8 +10961,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : IV - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : IV - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -9055,8 +10972,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -9064,8 +10983,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -9073,8 +10994,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -9082,8 +11005,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -9091,8 +11016,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -9100,8 +11027,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -9109,8 +11038,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -9118,8 +11049,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -9127,8 +11060,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : IV - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : IV - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -9136,8 +11071,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_NE1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -9145,8 +11082,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_NE1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -9154,8 +11093,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_CHA_TxR_VERT_INSERTS0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AD - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -9163,8 +11104,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_CHA_TxR_VERT_INSERTS0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AD - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -9172,8 +11115,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_CHA_TxR_VERT_INSERTS0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AK - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -9181,8 +11126,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_CHA_TxR_VERT_INSERTS0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AK - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -9190,8 +11137,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_CHA_TxR_VERT_INSERTS0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : BL - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -9199,8 +11148,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_CHA_TxR_VERT_INSERTS0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : BL - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -9208,8 +11159,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : IV - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_CHA_TxR_VERT_INSERTS0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : IV - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -9217,8 +11170,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_CHA_TxR_VERT_INSERTS1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AKC - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -9226,8 +11181,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_CHA_TxR_VERT_INSERTS1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AKC - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -9235,8 +11192,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_CHA_TxR_VERT_NACK0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AD - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x1",
@@ -9244,8 +11203,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_CHA_TxR_VERT_NACK0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AD - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x10",
@@ -9253,8 +11214,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_CHA_TxR_VERT_NACK0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AK - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x2",
@@ -9262,8 +11225,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_CHA_TxR_VERT_NACK0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AK - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x20",
@@ -9271,8 +11236,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_CHA_TxR_VERT_NACK0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : BL - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x4",
@@ -9280,8 +11247,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_CHA_TxR_VERT_NACK0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : BL - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x40",
@@ -9289,8 +11258,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_CHA_TxR_VERT_NACK0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : IV : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x8",
@@ -9298,8 +11269,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_CHA_TxR_VERT_NACK1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AKC - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x1",
@@ -9307,8 +11280,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_CHA_TxR_VERT_NACK1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AKC - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x2",
@@ -9316,8 +11291,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AD - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -9325,8 +11302,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AD - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -9334,8 +11313,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AK - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -9343,8 +11324,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AK - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -9352,8 +11335,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : BL - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -9361,8 +11346,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : BL - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -9370,8 +11357,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : IV - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : IV - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -9379,8 +11368,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_CHA_TxR_VERT_OCCUPANCY1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AKC - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -9388,8 +11379,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_CHA_TxR_VERT_OCCUPANCY1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AKC - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -9397,8 +11390,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_CHA_TxR_VERT_STARVED0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x1",
@@ -9406,8 +11401,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_CHA_TxR_VERT_STARVED0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x10",
@@ -9415,8 +11412,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_CHA_TxR_VERT_STARVED0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x2",
@@ -9424,8 +11423,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_CHA_TxR_VERT_STARVED0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x20",
@@ -9433,8 +11434,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_CHA_TxR_VERT_STARVED0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x4",
@@ -9442,8 +11445,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_CHA_TxR_VERT_STARVED0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x40",
@@ -9451,8 +11456,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_CHA_TxR_VERT_STARVED0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : IV : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x8",
@@ -9460,8 +11467,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9B",
"EventName": "UNC_CHA_TxR_VERT_STARVED1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x1",
@@ -9469,8 +11478,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9B",
"EventName": "UNC_CHA_TxR_VERT_STARVED1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x2",
@@ -9478,8 +11489,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9B",
"EventName": "UNC_CHA_TxR_VERT_STARVED1.TGC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x4",
@@ -9487,8 +11500,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_CHA_VERT_RING_AD_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AD Ring In Use : Down and Even : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -9496,8 +11511,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_CHA_VERT_RING_AD_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AD Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -9505,8 +11522,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_CHA_VERT_RING_AD_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AD Ring In Use : Up and Even : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -9514,8 +11533,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_CHA_VERT_RING_AD_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AD Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -9523,8 +11544,10 @@
},
{
"BriefDescription": "Vertical AKC Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_CHA_VERT_RING_AKC_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AKC Ring In Use : Down and Even : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -9532,8 +11555,10 @@
},
{
"BriefDescription": "Vertical AKC Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_CHA_VERT_RING_AKC_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AKC Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -9541,8 +11566,10 @@
},
{
"BriefDescription": "Vertical AKC Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_CHA_VERT_RING_AKC_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AKC Ring In Use : Up and Even : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -9550,8 +11577,10 @@
},
{
"BriefDescription": "Vertical AKC Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_CHA_VERT_RING_AKC_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AKC Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -9559,8 +11588,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_CHA_VERT_RING_AK_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AK Ring In Use : Down and Even : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -9568,8 +11599,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_CHA_VERT_RING_AK_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AK Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -9577,8 +11610,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_CHA_VERT_RING_AK_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AK Ring In Use : Up and Even : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -9586,8 +11621,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_CHA_VERT_RING_AK_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AK Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -9595,8 +11632,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use : Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_CHA_VERT_RING_BL_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical BL Ring in Use : Down and Even : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -9604,8 +11643,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use : Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_CHA_VERT_RING_BL_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical BL Ring in Use : Down and Odd : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -9613,8 +11654,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use : Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_CHA_VERT_RING_BL_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical BL Ring in Use : Up and Even : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -9622,8 +11665,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use : Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_CHA_VERT_RING_BL_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical BL Ring in Use : Up and Odd : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -9631,8 +11676,10 @@
},
{
"BriefDescription": "Vertical IV Ring in Use : Down",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_CHA_VERT_RING_IV_IN_USE.DN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical IV Ring in Use : Down : Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x4",
@@ -9640,8 +11687,10 @@
},
{
"BriefDescription": "Vertical IV Ring in Use : Up",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_CHA_VERT_RING_IV_IN_USE.UP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical IV Ring in Use : Up : Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x1",
@@ -9649,8 +11698,10 @@
},
{
"BriefDescription": "Vertical TGC Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_CHA_VERT_RING_TGC_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical TGC Ring In Use : Down and Even : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -9658,8 +11709,10 @@
},
{
"BriefDescription": "Vertical TGC Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_CHA_VERT_RING_TGC_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical TGC Ring In Use : Down and Odd : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -9667,8 +11720,10 @@
},
{
"BriefDescription": "Vertical TGC Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_CHA_VERT_RING_TGC_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical TGC Ring In Use : Up and Even : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -9676,8 +11731,10 @@
},
{
"BriefDescription": "Vertical TGC Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_CHA_VERT_RING_TGC_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical TGC Ring In Use : Up and Odd : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -9685,8 +11742,10 @@
},
{
"BriefDescription": "WbPushMtoI : Pushed to LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_CHA_WB_PUSH_MTOI.LLC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WbPushMtoI : Pushed to LLC : Counts the number of times when the CHA was received WbPushMtoI : Counts the number of times when the CHA was able to push WbPushMToI to LLC",
"UMask": "0x1",
@@ -9694,8 +11753,10 @@
},
{
"BriefDescription": "WbPushMtoI : Pushed to Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_CHA_WB_PUSH_MTOI.MEM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WbPushMtoI : Pushed to Memory : Counts the number of times when the CHA was received WbPushMtoI : Counts the number of times when the CHA was unable to push WbPushMToI to LLC (hence pushed it to MEM)",
"UMask": "0x2",
@@ -9703,8 +11764,10 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC0",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC0 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 0 only.",
"UMask": "0x1",
@@ -9712,8 +11775,10 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC1",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC1 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 1 only.",
"UMask": "0x2",
@@ -9721,40 +11786,50 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC10",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC10 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 10 only.",
"Unit": "CHA"
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC11",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC11",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC11 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 11 only.",
"Unit": "CHA"
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC12",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC12",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC12 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 12 only.",
"Unit": "CHA"
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC13",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC13",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC13 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 13 only.",
"Unit": "CHA"
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC2",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC2 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 2 only.",
"UMask": "0x4",
@@ -9762,8 +11837,10 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC3",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC3 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 3 only.",
"UMask": "0x8",
@@ -9771,8 +11848,10 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC4",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC4 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 4 only.",
"UMask": "0x10",
@@ -9780,8 +11859,10 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC5",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC5 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 5 only.",
"UMask": "0x20",
@@ -9789,8 +11870,10 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC6",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC6 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 6 only.",
"UMask": "0x40",
@@ -9798,8 +11881,10 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC7",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC7 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 7 only.",
"UMask": "0x80",
@@ -9807,24 +11892,30 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC8",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC8 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 8 only.",
"Unit": "CHA"
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC9",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC9 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 9 only.",
"Unit": "CHA"
},
{
"BriefDescription": "XPT Prefetches : Dropped (on 0?) - Conflict",
+ "Counter": "0,1,2,3",
"EventCode": "0x6f",
"EventName": "UNC_CHA_XPT_PREF.DROP0_CONFLICT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "XPT Prefetches : Dropped (on 0?) - Conflict : Number of XPT prefetches dropped due to AD CMS write port contention",
"UMask": "0x8",
@@ -9832,8 +11923,10 @@
},
{
"BriefDescription": "XPT Prefetches : Dropped (on 0?) - No Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x6f",
"EventName": "UNC_CHA_XPT_PREF.DROP0_NOCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "XPT Prefetches : Dropped (on 0?) - No Credits : Number of XPT prefetches dropped due to lack of XPT AD egress credits",
"UMask": "0x4",
@@ -9841,8 +11934,10 @@
},
{
"BriefDescription": "XPT Prefetches : Dropped (on 1?) - Conflict",
+ "Counter": "0,1,2,3",
"EventCode": "0x6f",
"EventName": "UNC_CHA_XPT_PREF.DROP1_CONFLICT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "XPT Prefetches : Dropped (on 1?) - Conflict : Number of XPT prefetches dropped due to AD CMS write port contention",
"UMask": "0x80",
@@ -9850,8 +11945,10 @@
},
{
"BriefDescription": "XPT Prefetches : Dropped (on 1?) - No Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x6f",
"EventName": "UNC_CHA_XPT_PREF.DROP1_NOCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "XPT Prefetches : Dropped (on 1?) - No Credits : Number of XPT prefetches dropped due to lack of XPT AD egress credits",
"UMask": "0x40",
@@ -9859,8 +11956,10 @@
},
{
"BriefDescription": "XPT Prefetches : Sent (on 0?)",
+ "Counter": "0,1,2,3",
"EventCode": "0x6f",
"EventName": "UNC_CHA_XPT_PREF.SENT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "XPT Prefetches : Sent (on 0?) : Number of XPT prefetches sent",
"UMask": "0x1",
@@ -9868,8 +11967,10 @@
},
{
"BriefDescription": "XPT Prefetches : Sent (on 1?)",
+ "Counter": "0,1,2,3",
"EventCode": "0x6f",
"EventName": "UNC_CHA_XPT_PREF.SENT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "XPT Prefetches : Sent (on 1?) : Number of XPT prefetches sent",
"UMask": "0x10",
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/icelakex/uncore-interconnect.json
index 6997e6f7d366..97bec6cfc79c 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/uncore-interconnect.json
@@ -1,8 +1,10 @@
[
{
"BriefDescription": "Total Write Cache Occupancy : Any Source",
+ "Counter": "0,1",
"EventCode": "0x0F",
"EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.ANY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Total Write Cache Occupancy : Any Source : Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events. : Tracks all requests from any source port.",
"UMask": "0x1",
@@ -10,8 +12,10 @@
},
{
"BriefDescription": "Total Write Cache Occupancy : Snoops",
+ "Counter": "0,1",
"EventCode": "0x0F",
"EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.IV_Q",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Total Write Cache Occupancy : Snoops : Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.",
"UMask": "0x2",
@@ -19,6 +23,7 @@
},
{
"BriefDescription": "Total IRP occupancy of inbound read and write requests to coherent memory.",
+ "Counter": "0,1",
"EventCode": "0x0f",
"EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.MEM",
"PerPkg": "1",
@@ -28,6 +33,7 @@
},
{
"BriefDescription": "Clockticks of the IO coherency tracker (IRP)",
+ "Counter": "0,1",
"EventCode": "0x01",
"EventName": "UNC_I_CLOCKTICKS",
"PerPkg": "1",
@@ -35,8 +41,10 @@
},
{
"BriefDescription": "Coherent Ops : CLFlush",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_I_COHERENT_OPS.CLFLUSH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Coherent Ops : CLFlush : Counts the number of coherency related operations serviced by the IRP",
"UMask": "0x80",
@@ -44,6 +52,7 @@
},
{
"BriefDescription": "PCIITOM request issued by the IRP unit to the mesh with the intention of writing a full cacheline.",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_I_COHERENT_OPS.PCITOM",
"PerPkg": "1",
@@ -53,8 +62,10 @@
},
{
"BriefDescription": "RFO request issued by the IRP unit to the mesh with the intention of writing a partial cacheline.",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_I_COHERENT_OPS.RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RFO request issued by the IRP unit to the mesh with the intention of writing a partial cacheline to coherent memory. RFO is a Read For Ownership command that requests ownership of the cacheline and moves data from the mesh to IRP cache.",
"UMask": "0x8",
@@ -62,6 +73,7 @@
},
{
"BriefDescription": "Coherent Ops : WbMtoI",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_I_COHERENT_OPS.WBMTOI",
"PerPkg": "1",
@@ -71,6 +83,7 @@
},
{
"BriefDescription": "FAF RF full",
+ "Counter": "0,1",
"EventCode": "0x17",
"EventName": "UNC_I_FAF_FULL",
"PerPkg": "1",
@@ -78,6 +91,7 @@
},
{
"BriefDescription": "Inbound read requests received by the IRP and inserted into the FAF queue.",
+ "Counter": "0,1",
"EventCode": "0x18",
"EventName": "UNC_I_FAF_INSERTS",
"PerPkg": "1",
@@ -86,6 +100,7 @@
},
{
"BriefDescription": "Occupancy of the IRP FAF queue.",
+ "Counter": "0,1",
"EventCode": "0x19",
"EventName": "UNC_I_FAF_OCCUPANCY",
"PerPkg": "1",
@@ -94,6 +109,7 @@
},
{
"BriefDescription": "FAF allocation -- sent to ADQ",
+ "Counter": "0,1",
"EventCode": "0x16",
"EventName": "UNC_I_FAF_TRANSACTIONS",
"PerPkg": "1",
@@ -101,14 +117,17 @@
},
{
"BriefDescription": ": All Inserts Outbound (BL, AK, Snoops)",
+ "Counter": "0,1",
"EventCode": "0x20",
"EventName": "UNC_I_IRP_ALL.EVICTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "IRP"
},
{
"BriefDescription": ": All Inserts Inbound (p2p + faf + cset)",
+ "Counter": "0,1",
"EventCode": "0x20",
"EventName": "UNC_I_IRP_ALL.INBOUND_INSERTS",
"PerPkg": "1",
@@ -117,78 +136,97 @@
},
{
"BriefDescription": ": All Inserts Outbound (BL, AK, Snoops)",
+ "Counter": "0,1",
"EventCode": "0x20",
"EventName": "UNC_I_IRP_ALL.OUTBOUND_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "IRP"
},
{
"BriefDescription": "Counts Timeouts - Set 0 : Cache Inserts of Atomic Transactions as Secondary",
+ "Counter": "0,1",
"EventCode": "0x1E",
"EventName": "UNC_I_MISC0.2ND_ATOMIC_INSERT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "IRP"
},
{
"BriefDescription": "Counts Timeouts - Set 0 : Cache Inserts of Read Transactions as Secondary",
+ "Counter": "0,1",
"EventCode": "0x1e",
"EventName": "UNC_I_MISC0.2ND_RD_INSERT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "IRP"
},
{
"BriefDescription": "Counts Timeouts - Set 0 : Cache Inserts of Write Transactions as Secondary",
+ "Counter": "0,1",
"EventCode": "0x1e",
"EventName": "UNC_I_MISC0.2ND_WR_INSERT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "IRP"
},
{
"BriefDescription": "Counts Timeouts - Set 0 : Fastpath Rejects",
+ "Counter": "0,1",
"EventCode": "0x1E",
"EventName": "UNC_I_MISC0.FAST_REJ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "IRP"
},
{
"BriefDescription": "Counts Timeouts - Set 0 : Fastpath Requests",
+ "Counter": "0,1",
"EventCode": "0x1e",
"EventName": "UNC_I_MISC0.FAST_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "IRP"
},
{
"BriefDescription": "Counts Timeouts - Set 0 : Fastpath Transfers From Primary to Secondary",
+ "Counter": "0,1",
"EventCode": "0x1E",
"EventName": "UNC_I_MISC0.FAST_XFER",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "IRP"
},
{
"BriefDescription": "Counts Timeouts - Set 0 : Prefetch Ack Hints From Primary to Secondary",
+ "Counter": "0,1",
"EventCode": "0x1E",
"EventName": "UNC_I_MISC0.PF_ACK_HINT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "IRP"
},
{
"BriefDescription": "Counts Timeouts - Set 0 : Slow path fwpf didn't find prefetch",
+ "Counter": "0,1",
"EventCode": "0x1E",
"EventName": "UNC_I_MISC0.SLOWPATH_FWPF_NO_PRF",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "IRP"
},
{
"BriefDescription": "Misc Events - Set 1 : Lost Forward",
+ "Counter": "0,1",
"EventCode": "0x1F",
"EventName": "UNC_I_MISC1.LOST_FWD",
"PerPkg": "1",
@@ -198,8 +236,10 @@
},
{
"BriefDescription": "Misc Events - Set 1 : Received Invalid",
+ "Counter": "0,1",
"EventCode": "0x1F",
"EventName": "UNC_I_MISC1.SEC_RCVD_INVLD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Misc Events - Set 1 : Received Invalid : Secondary received a transfer that did not have sufficient MESI state",
"UMask": "0x20",
@@ -207,8 +247,10 @@
},
{
"BriefDescription": "Misc Events - Set 1 : Received Valid",
+ "Counter": "0,1",
"EventCode": "0x1F",
"EventName": "UNC_I_MISC1.SEC_RCVD_VLD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Misc Events - Set 1 : Received Valid : Secondary received a transfer that did have sufficient MESI state",
"UMask": "0x40",
@@ -216,8 +258,10 @@
},
{
"BriefDescription": "Misc Events - Set 1 : Slow Transfer of E Line",
+ "Counter": "0,1",
"EventCode": "0x1f",
"EventName": "UNC_I_MISC1.SLOW_E",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Misc Events - Set 1 : Slow Transfer of E Line : Secondary received a transfer that did have sufficient MESI state",
"UMask": "0x4",
@@ -225,8 +269,10 @@
},
{
"BriefDescription": "Misc Events - Set 1 : Slow Transfer of I Line",
+ "Counter": "0,1",
"EventCode": "0x1f",
"EventName": "UNC_I_MISC1.SLOW_I",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Misc Events - Set 1 : Slow Transfer of I Line : Snoop took cacheline ownership before write from data was committed.",
"UMask": "0x1",
@@ -234,8 +280,10 @@
},
{
"BriefDescription": "Misc Events - Set 1 : Slow Transfer of M Line",
+ "Counter": "0,1",
"EventCode": "0x1f",
"EventName": "UNC_I_MISC1.SLOW_M",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Misc Events - Set 1 : Slow Transfer of M Line : Snoop took cacheline ownership before write from data was committed.",
"UMask": "0x8",
@@ -243,8 +291,10 @@
},
{
"BriefDescription": "Misc Events - Set 1 : Slow Transfer of S Line",
+ "Counter": "0,1",
"EventCode": "0x1f",
"EventName": "UNC_I_MISC1.SLOW_S",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Misc Events - Set 1 : Slow Transfer of S Line : Secondary received a transfer that did not have sufficient MESI state",
"UMask": "0x2",
@@ -252,88 +302,110 @@
},
{
"BriefDescription": "P2P Requests",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_I_P2P_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "P2P Requests : P2P requests from the ITC",
"Unit": "IRP"
},
{
"BriefDescription": "P2P Occupancy",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_P2P_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "P2P Occupancy : P2P B & S Queue Occupancy",
"Unit": "IRP"
},
{
"BriefDescription": "P2P Transactions : P2P completions",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_P2P_TRANSACTIONS.CMPL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "IRP"
},
{
"BriefDescription": "P2P Transactions : match if local only",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_P2P_TRANSACTIONS.LOC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "IRP"
},
{
"BriefDescription": "P2P Transactions : match if local and target matches",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_P2P_TRANSACTIONS.LOC_AND_TGT_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "IRP"
},
{
"BriefDescription": "P2P Transactions : P2P Message",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_P2P_TRANSACTIONS.MSG",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "IRP"
},
{
"BriefDescription": "P2P Transactions : P2P reads",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_P2P_TRANSACTIONS.RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "IRP"
},
{
"BriefDescription": "P2P Transactions : Match if remote only",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_P2P_TRANSACTIONS.REM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "IRP"
},
{
"BriefDescription": "P2P Transactions : match if remote and target matches",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_P2P_TRANSACTIONS.REM_AND_TGT_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "IRP"
},
{
"BriefDescription": "P2P Transactions : P2P Writes",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_P2P_TRANSACTIONS.WR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "IRP"
},
{
"BriefDescription": "Responses to snoops of any type that hit M, E, S or I line in the IIO",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.ALL_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit M, E, S or I line in the IIO",
"UMask": "0x7e",
@@ -341,8 +413,10 @@
},
{
"BriefDescription": "Responses to snoops of any type that hit E or S line in the IIO cache",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.ALL_HIT_ES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit E or S line in the IIO cache",
"UMask": "0x74",
@@ -350,8 +424,10 @@
},
{
"BriefDescription": "Responses to snoops of any type that hit I line in the IIO cache",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.ALL_HIT_I",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit I line in the IIO cache",
"UMask": "0x72",
@@ -359,6 +435,7 @@
},
{
"BriefDescription": "Responses to snoops of any type that hit M line in the IIO cache",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.ALL_HIT_M",
"PerPkg": "1",
@@ -368,8 +445,10 @@
},
{
"BriefDescription": "Responses to snoops of any type that miss the IIO cache",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.ALL_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Responses to snoops of any type (code, data, invalidate) that miss the IIO cache",
"UMask": "0x71",
@@ -377,64 +456,80 @@
},
{
"BriefDescription": "Snoop Responses : Hit E or S",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.HIT_ES",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses : Hit I",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.HIT_I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses : Hit M",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.HIT_M",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses : Miss",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses : SnpCode",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.SNPCODE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses : SnpData",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.SNPDATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses : SnpInv",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.SNPINV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "IRP"
},
{
"BriefDescription": "Inbound Transaction Count : Atomic",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_I_TRANSACTIONS.ATOMIC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Inbound Transaction Count : Atomic : Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID. : Tracks the number of atomic transactions",
"UMask": "0x10",
@@ -442,8 +537,10 @@
},
{
"BriefDescription": "Inbound Transaction Count : Other",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_I_TRANSACTIONS.OTHER",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Inbound Transaction Count : Other : Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID. : Tracks the number of 'other' kinds of transactions.",
"UMask": "0x20",
@@ -451,8 +548,10 @@
},
{
"BriefDescription": "Inbound Transaction Count : Writes",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_I_TRANSACTIONS.WRITES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Inbound Transaction Count : Writes : Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID. : Tracks only write requests. Each write request should have a prefetch, so there is no need to explicitly track these requests. For writes that are tickled and have to retry, the counter will be incremented for each retry.",
"UMask": "0x2",
@@ -460,6 +559,7 @@
},
{
"BriefDescription": "Inbound write (fast path) requests received by the IRP.",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_I_TRANSACTIONS.WR_PREF",
"PerPkg": "1",
@@ -469,134 +569,170 @@
},
{
"BriefDescription": "AK Egress Allocations",
+ "Counter": "0,1",
"EventCode": "0x0B",
"EventName": "UNC_I_TxC_AK_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL DRS Egress Cycles Full",
+ "Counter": "0,1",
"EventCode": "0x05",
"EventName": "UNC_I_TxC_BL_DRS_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL DRS Egress Inserts",
+ "Counter": "0,1",
"EventCode": "0x02",
"EventName": "UNC_I_TxC_BL_DRS_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL DRS Egress Occupancy",
+ "Counter": "0,1",
"EventCode": "0x08",
"EventName": "UNC_I_TxC_BL_DRS_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL NCB Egress Cycles Full",
+ "Counter": "0,1",
"EventCode": "0x06",
"EventName": "UNC_I_TxC_BL_NCB_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL NCB Egress Inserts",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "UNC_I_TxC_BL_NCB_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL NCB Egress Occupancy",
+ "Counter": "0,1",
"EventCode": "0x09",
"EventName": "UNC_I_TxC_BL_NCB_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL NCS Egress Cycles Full",
+ "Counter": "0,1",
"EventCode": "0x07",
"EventName": "UNC_I_TxC_BL_NCS_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL NCS Egress Inserts",
+ "Counter": "0,1",
"EventCode": "0x04",
"EventName": "UNC_I_TxC_BL_NCS_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL NCS Egress Occupancy",
+ "Counter": "0,1",
"EventCode": "0x0A",
"EventName": "UNC_I_TxC_BL_NCS_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "UNC_I_TxR2_AD01_STALL_CREDIT_CYCLES",
+ "Counter": "0,1",
"EventCode": "0x1C",
"EventName": "UNC_I_TxR2_AD01_STALL_CREDIT_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": Counts the number times when it is not possible to issue a request to the M2PCIe because there are no Egress Credits available on AD0, A1 or AD0&AD1 both. Stalls on both AD0 and AD1 will count as 2",
"Unit": "IRP"
},
{
"BriefDescription": "No AD0 Egress Credits Stalls",
+ "Counter": "0,1",
"EventCode": "0x1A",
"EventName": "UNC_I_TxR2_AD0_STALL_CREDIT_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No AD0 Egress Credits Stalls : Counts the number times when it is not possible to issue a request to the M2PCIe because there are no AD0 Egress Credits available.",
"Unit": "IRP"
},
{
"BriefDescription": "No AD1 Egress Credits Stalls",
+ "Counter": "0,1",
"EventCode": "0x1B",
"EventName": "UNC_I_TxR2_AD1_STALL_CREDIT_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No AD1 Egress Credits Stalls : Counts the number times when it is not possible to issue a request to the M2PCIe because there are no AD1 Egress Credits available.",
"Unit": "IRP"
},
{
"BriefDescription": "No BL Egress Credit Stalls",
+ "Counter": "0,1",
"EventCode": "0x1D",
"EventName": "UNC_I_TxR2_BL_STALL_CREDIT_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No BL Egress Credit Stalls : Counts the number times when it is not possible to issue data to the R2PCIe because there are no BL Egress Credits available.",
"Unit": "IRP"
},
{
"BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
"EventCode": "0x0D",
"EventName": "UNC_I_TxS_DATA_INSERTS_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Outbound Read Requests : Counts the number of requests issued to the switch (towards the devices).",
"Unit": "IRP"
},
{
"BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
"EventCode": "0x0E",
"EventName": "UNC_I_TxS_DATA_INSERTS_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Outbound Read Requests : Counts the number of requests issued to the switch (towards the devices).",
"Unit": "IRP"
},
{
"BriefDescription": "Outbound Request Queue Occupancy",
+ "Counter": "0,1",
"EventCode": "0x0C",
"EventName": "UNC_I_TxS_REQUEST_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Outbound Request Queue Occupancy : Accumulates the number of outstanding outbound requests from the IRP to the switch (towards the devices). This can be used in conjunction with the allocations event in order to calculate average latency of outbound requests.",
"Unit": "IRP"
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 0 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -604,8 +740,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 1 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -613,8 +751,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 2 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -622,8 +762,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 3 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -631,8 +773,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 4 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -640,8 +784,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 5 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -649,8 +795,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 6 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x40",
@@ -658,8 +806,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 7 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x80",
@@ -667,8 +817,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 10 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -676,8 +828,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 8 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -685,8 +839,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 9 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -694,8 +850,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 0 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -703,8 +861,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 1 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -712,8 +872,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 2 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -721,8 +883,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 3 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -730,8 +894,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 4 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -739,8 +905,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 5 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -748,8 +916,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 6 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x40",
@@ -757,8 +927,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 7 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x80",
@@ -766,8 +938,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 10 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -775,8 +949,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 8 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -784,8 +960,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 9 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -793,8 +971,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 0 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -802,8 +982,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 1 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -811,8 +993,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 2 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -820,8 +1004,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 3 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -829,8 +1015,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -838,8 +1026,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -847,8 +1037,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 6 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x40",
@@ -856,8 +1048,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 7 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x80",
@@ -865,8 +1059,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 10 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -874,8 +1070,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 8 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -883,8 +1081,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 9 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -892,8 +1092,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 0 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -901,8 +1103,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 1 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -910,8 +1114,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 2 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -919,8 +1125,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 3 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -928,8 +1136,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 4 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -937,8 +1147,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 5 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -946,8 +1158,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 6 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x40",
@@ -955,8 +1169,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 7 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x80",
@@ -964,8 +1180,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x8B",
"EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 10 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -973,8 +1191,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x8B",
"EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 8 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -982,8 +1202,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x8B",
"EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 9 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -991,8 +1213,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 0 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -1000,8 +1224,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 1 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -1009,8 +1235,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 2 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -1018,8 +1246,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 3 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -1027,8 +1257,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 4 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -1036,8 +1268,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 5 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -1045,8 +1279,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 6 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x40",
@@ -1054,8 +1290,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 7 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x80",
@@ -1063,8 +1301,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 10 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -1072,8 +1312,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 8 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -1081,8 +1323,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 9 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -1090,8 +1334,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 0 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -1099,8 +1345,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 1 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -1108,8 +1356,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 2 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -1117,8 +1367,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 3 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -1126,8 +1378,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 4 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -1135,8 +1389,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 5 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -1144,8 +1400,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 6 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x40",
@@ -1153,8 +1411,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 7 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x80",
@@ -1162,8 +1422,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 10 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -1171,8 +1433,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 8 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -1180,8 +1444,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 9 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -1189,8 +1455,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 0 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -1198,8 +1466,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 1 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -1207,8 +1477,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 2 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -1216,8 +1488,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 3 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -1225,8 +1499,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -1234,8 +1510,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -1243,8 +1521,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x40",
@@ -1252,8 +1532,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x80",
@@ -1261,8 +1543,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x8D",
"EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 10 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -1270,8 +1554,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x8D",
"EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 8 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -1279,8 +1565,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x8D",
"EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 9 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -1288,8 +1576,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 0 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -1297,8 +1587,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 1 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -1306,8 +1598,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 2 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -1315,8 +1609,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 3 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -1324,8 +1620,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 4 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -1333,8 +1631,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 5 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -1342,8 +1642,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 6 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x40",
@@ -1351,8 +1653,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 7 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x80",
@@ -1360,8 +1664,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x8F",
"EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 10 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -1369,8 +1675,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x8F",
"EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 8 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -1378,8 +1686,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x8F",
"EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 9 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -1387,44 +1697,54 @@
},
{
"BriefDescription": "M2M to iMC Bypass : Not Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M2M_BYPASS_M2M_EGRESS.NOT_TAKEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "M2M to iMC Bypass : Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M2M_BYPASS_M2M_EGRESS.TAKEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M to iMC Bypass : Not Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_BYPASS_M2M_INGRESS.NOT_TAKEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "M2M to iMC Bypass : Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_BYPASS_M2M_INGRESS.TAKEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Clockticks of the mesh to memory (M2M)",
+ "Counter": "0,1,2,3",
"EventName": "UNC_M2M_CLOCKTICKS",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "CMS Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0xc0",
"EventName": "UNC_M2M_CMS_CLOCKTICKS",
"PerPkg": "1",
@@ -1432,113 +1752,142 @@
},
{
"BriefDescription": "Cycles when direct to core mode, which bypasses the CHA, was disabled",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_DIRSTATE",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_NOTFORKED",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_NOTFORKED",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Number of reads in which direct to core transaction was overridden",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_DIRECT2CORE_TXN_OVERRIDE",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Number of reads in which direct to Intel UPI transactions were overridden",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_CREDITS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles when Direct2UPI was Disabled",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_DIRSTATE",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Number of reads that a message sent direct2 Intel UPI was overridden",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_M2M_DIRECT2UPI_TXN_OVERRIDE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Clockticks of the mesh to PCI (M2P)",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Hit : On NonDirty Line in A State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Hit : On NonDirty Line in I State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Hit : On NonDirty Line in L State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_P",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Hit : On NonDirty Line in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Hit : On Dirty Line in A State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Hit : On Dirty Line in I State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Hit : On Dirty Line in L State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_P",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Hit : On Dirty Line in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Multi-socket cacheline Directory Lookups : Found in any state",
+ "Counter": "0,1,2,3",
"EventCode": "0x2D",
"EventName": "UNC_M2M_DIRECTORY_LOOKUP.ANY",
"PerPkg": "1",
@@ -1547,6 +1896,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Lookups : Found in A state",
+ "Counter": "0,1,2,3",
"EventCode": "0x2D",
"EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_A",
"PerPkg": "1",
@@ -1555,6 +1905,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Lookups : Found in I state",
+ "Counter": "0,1,2,3",
"EventCode": "0x2D",
"EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_I",
"PerPkg": "1",
@@ -1563,6 +1914,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Lookups : Found in S state",
+ "Counter": "0,1,2,3",
"EventCode": "0x2D",
"EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_S",
"PerPkg": "1",
@@ -1571,70 +1923,87 @@
},
{
"BriefDescription": "Directory Miss : On NonDirty Line in A State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Miss : On NonDirty Line in I State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Miss : On NonDirty Line in L State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_P",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Miss : On NonDirty Line in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Miss : On Dirty Line in A State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Miss : On Dirty Line in I State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Miss : On Dirty Line in L State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_P",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Miss : On Dirty Line in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates : From/to any state. Note: event counts are incorrect in 2LM mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.ANY",
"PerPkg": "1",
@@ -1643,8 +2012,10 @@
},
{
"BriefDescription": "Distress signal asserted : DPT Local",
+ "Counter": "0,1,2,3",
"EventCode": "0xAF",
"EventName": "UNC_M2M_DISTRESS_ASSERTED.DPT_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : DPT Local : Counts the number of cycles either the local or incoming distress signals are asserted. : Dynamic Prefetch Throttle triggered by this tile",
"UMask": "0x4",
@@ -1652,8 +2023,10 @@
},
{
"BriefDescription": "Distress signal asserted : DPT Remote",
+ "Counter": "0,1,2,3",
"EventCode": "0xAF",
"EventName": "UNC_M2M_DISTRESS_ASSERTED.DPT_NONLOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : DPT Remote : Counts the number of cycles either the local or incoming distress signals are asserted. : Dynamic Prefetch Throttle received by this tile",
"UMask": "0x8",
@@ -1661,8 +2034,10 @@
},
{
"BriefDescription": "Distress signal asserted : DPT Stalled - IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xAF",
"EventName": "UNC_M2M_DISTRESS_ASSERTED.DPT_STALL_IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : DPT Stalled - IV : Counts the number of cycles either the local or incoming distress signals are asserted. : DPT occurred while regular IVs were received, causing DPT to be stalled",
"UMask": "0x40",
@@ -1670,8 +2045,10 @@
},
{
"BriefDescription": "Distress signal asserted : DPT Stalled - No Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xAF",
"EventName": "UNC_M2M_DISTRESS_ASSERTED.DPT_STALL_NOCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : DPT Stalled - No Credit : Counts the number of cycles either the local or incoming distress signals are asserted. : DPT occurred while credit not available causing DPT to be stalled",
"UMask": "0x80",
@@ -1679,8 +2056,10 @@
},
{
"BriefDescription": "Distress signal asserted : Horizontal",
+ "Counter": "0,1,2,3",
"EventCode": "0xAF",
"EventName": "UNC_M2M_DISTRESS_ASSERTED.HORZ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : Horizontal : Counts the number of cycles either the local or incoming distress signals are asserted. : If TGR egress is full, then agents will throttle outgoing AD IDI transactions",
"UMask": "0x2",
@@ -1688,8 +2067,10 @@
},
{
"BriefDescription": "Distress signal asserted : PMM Local",
+ "Counter": "0,1,2,3",
"EventCode": "0xAF",
"EventName": "UNC_M2M_DISTRESS_ASSERTED.PMM_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : PMM Local : Counts the number of cycles either the local or incoming distress signals are asserted. : If the CHA TOR has too many PMM transactions, this signal will throttle outgoing MS2IDI traffic",
"UMask": "0x10",
@@ -1697,8 +2078,10 @@
},
{
"BriefDescription": "Distress signal asserted : PMM Remote",
+ "Counter": "0,1,2,3",
"EventCode": "0xAF",
"EventName": "UNC_M2M_DISTRESS_ASSERTED.PMM_NONLOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : PMM Remote : Counts the number of cycles either the local or incoming distress signals are asserted. : If another CHA TOR has too many PMM transactions, this signal will throttle outgoing MS2IDI traffic",
"UMask": "0x20",
@@ -1706,8 +2089,10 @@
},
{
"BriefDescription": "Distress signal asserted : Vertical",
+ "Counter": "0,1,2,3",
"EventCode": "0xAF",
"EventName": "UNC_M2M_DISTRESS_ASSERTED.VERT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : Vertical : Counts the number of cycles either the local or incoming distress signals are asserted. : If IRQ egress is full, then agents will throttle outgoing AD IDI transactions",
"UMask": "0x1",
@@ -1715,22 +2100,28 @@
},
{
"BriefDescription": "UNC_M2M_DISTRESS_PMM",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "UNC_M2M_DISTRESS_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_DISTRESS_PMM_MEMMODE",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "UNC_M2M_DISTRESS_PMM_MEMMODE",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M2M_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress Blocking due to Ordering requirements : Down : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x4",
@@ -1738,8 +2129,10 @@
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M2M_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress Blocking due to Ordering requirements : Up : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x1",
@@ -1747,8 +2140,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AD Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -1756,8 +2151,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AD Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -1765,8 +2162,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AD Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -1774,8 +2173,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AD Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -1783,8 +2184,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M2M_HORZ_RING_AKC_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -1792,8 +2195,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M2M_HORZ_RING_AKC_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -1801,8 +2206,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M2M_HORZ_RING_AKC_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -1810,8 +2217,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M2M_HORZ_RING_AKC_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -1819,8 +2228,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -1828,8 +2239,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -1837,8 +2250,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -1846,8 +2261,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -1855,8 +2272,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use : Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal BL Ring in Use : Left and Even : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -1864,8 +2283,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use : Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal BL Ring in Use : Left and Odd : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -1873,8 +2294,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use : Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal BL Ring in Use : Right and Even : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -1882,8 +2305,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use : Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal BL Ring in Use : Right and Odd : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -1891,8 +2316,10 @@
},
{
"BriefDescription": "Horizontal IV Ring in Use : Left",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M2M_HORZ_RING_IV_IN_USE.LEFT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal IV Ring in Use : Left : Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x1",
@@ -1900,8 +2327,10 @@
},
{
"BriefDescription": "Horizontal IV Ring in Use : Right",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M2M_HORZ_RING_IV_IN_USE.RIGHT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal IV Ring in Use : Right : Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x4",
@@ -1909,64 +2338,80 @@
},
{
"BriefDescription": "M2M Reads Issued to iMC : All, regardless of priority. - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x704",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Reads Issued to iMC : All, regardless of priority. - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.CH0_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x104",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Reads Issued to iMC : From TGR - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.CH0_FROM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x140",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Reads Issued to iMC : Critical Priority - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.CH0_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x102",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Reads Issued to iMC : Normal Priority - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.CH0_NORMAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x101",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Reads Issued to iMC : DDR, acting as Cache - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.CH0_TO_DDR_AS_CACHE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x110",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Reads Issued to iMC : DDR - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.CH0_TO_DDR_AS_MEM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x108",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Reads Issued to iMC : PMM - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.CH0_TO_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2M Reads Issued to iMC : PMM - Ch0 : Counts all PMM dimm read requests(full line) sent from M2M to iMC",
"UMask": "0x120",
@@ -1974,56 +2419,70 @@
},
{
"BriefDescription": "M2M Reads Issued to iMC : All, regardless of priority. - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.CH1_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x204",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Reads Issued to iMC : From TGR - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.CH1_FROM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x240",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Reads Issued to iMC : Critical Priority - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.CH1_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x202",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Reads Issued to iMC : Normal Priority - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.CH1_NORMAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x201",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Reads Issued to iMC : DDR, acting as Cache - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.CH1_TO_DDR_AS_CACHE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x210",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Reads Issued to iMC : DDR - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.CH1_TO_DDR_AS_MEM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x208",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Reads Issued to iMC : PMM - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.CH1_TO_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2M Reads Issued to iMC : PMM - Ch1 : Counts all PMM dimm read requests(full line) sent from M2M to iMC",
"UMask": "0x220",
@@ -2031,54 +2490,67 @@
},
{
"BriefDescription": "M2M Reads Issued to iMC : From TGR - Ch2",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.CH2_FROM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x440",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Reads Issued to iMC : From TGR - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.FROM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x740",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Reads Issued to iMC : Critical Priority - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x702",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Reads Issued to iMC : Normal Priority - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.NORMAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x701",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Reads Issued to iMC : DDR, acting as Cache - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.TO_DDR_AS_CACHE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x710",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Reads Issued to iMC : DDR - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.TO_DDR_AS_MEM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x708",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Reads Issued to iMC : PMM - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.TO_PMM",
"PerPkg": "1",
@@ -2087,93 +2559,117 @@
},
{
"BriefDescription": "M2M Writes Issued to iMC : All Writes - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1c10",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : All Writes - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.CH0_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x410",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : From TGR - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.CH0_FROM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : Full Line Non-ISOCH - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.CH0_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x401",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : ISOCH Full Line - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.CH0_FULL_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x404",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : Non-Inclusive - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.CH0_NI",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : Non-Inclusive Miss - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.CH0_NI_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : Partial Non-ISOCH - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.CH0_PARTIAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x402",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : ISOCH Partial - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.CH0_PARTIAL_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x408",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : DDR, acting as Cache - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.CH0_TO_DDR_AS_CACHE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x440",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : DDR - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.CH0_TO_DDR_AS_MEM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x420",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : PMM - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.CH0_TO_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2M Writes Issued to iMC : PMM - Ch0 : Counts all PMM dimm writes requests(full line and partial) sent from M2M to iMC",
"UMask": "0x480",
@@ -2181,85 +2677,107 @@
},
{
"BriefDescription": "M2M Writes Issued to iMC : All Writes - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.CH1_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x810",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : From TGR - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.CH1_FROM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : Full Line Non-ISOCH - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.CH1_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x801",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : ISOCH Full Line - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.CH1_FULL_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x804",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : Non-Inclusive - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.CH1_NI",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : Non-Inclusive Miss - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.CH1_NI_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : Partial Non-ISOCH - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.CH1_PARTIAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x802",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : ISOCH Partial - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.CH1_PARTIAL_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x808",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : DDR, acting as Cache - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.CH1_TO_DDR_AS_CACHE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x840",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : DDR - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.CH1_TO_DDR_AS_MEM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x820",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : PMM - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.CH1_TO_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2M Writes Issued to iMC : PMM - Ch1 : Counts all PMM dimm writes requests(full line and partial) sent from M2M to iMC",
"UMask": "0x880",
@@ -2267,75 +2785,94 @@
},
{
"BriefDescription": "M2M Writes Issued to iMC : From TGR - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.FROM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : Full Line Non-ISOCH - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.FULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1c01",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : ISOCH Full Line - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.FULL_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1c04",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : Non-Inclusive - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.NI",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : Non-Inclusive Miss - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.NI_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : Partial Non-ISOCH - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.PARTIAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1c02",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : ISOCH Partial - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.PARTIAL_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1c08",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : DDR, acting as Cache - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.TO_DDR_AS_CACHE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1c40",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : DDR - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.TO_DDR_AS_MEM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1c20",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : PMM - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.TO_PMM",
"PerPkg": "1",
@@ -2344,281 +2881,353 @@
},
{
"BriefDescription": "Write Tracker Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x64",
"EventName": "UNC_M2M_MIRR_WRQ_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x65",
"EventName": "UNC_M2M_MIRR_WRQ_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI0",
+ "Counter": "0,1,2,3",
"EventCode": "0xE6",
"EventName": "UNC_M2M_MISC_EXTERNAL.MBE_INST0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI1",
+ "Counter": "0,1,2,3",
"EventCode": "0xE6",
"EventName": "UNC_M2M_MISC_EXTERNAL.MBE_INST1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Number Packet Header Matches : MC Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x4C",
"EventName": "UNC_M2M_PKT_MATCH.MC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Number Packet Header Matches : Mesh Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x4C",
"EventName": "UNC_M2M_PKT_MATCH.MESH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_PREFCAM_CIS_DROPS",
+ "Counter": "0,1,2,3",
"EventCode": "0x73",
"EventName": "UNC_M2M_PREFCAM_CIS_DROPS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Cycles Full : All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x6B",
"EventName": "UNC_M2M_PREFCAM_CYCLES_FULL.ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Cycles Full : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x6B",
"EventName": "UNC_M2M_PREFCAM_CYCLES_FULL.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Cycles Full : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x6B",
"EventName": "UNC_M2M_PREFCAM_CYCLES_FULL.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Cycles Full : Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x6B",
"EventName": "UNC_M2M_PREFCAM_CYCLES_FULL.CH2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Cycles Not Empty : All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x6C",
"EventName": "UNC_M2M_PREFCAM_CYCLES_NE.ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Cycles Not Empty : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x6C",
"EventName": "UNC_M2M_PREFCAM_CYCLES_NE.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Cycles Not Empty : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x6C",
"EventName": "UNC_M2M_PREFCAM_CYCLES_NE.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Cycles Not Empty : Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x6C",
"EventName": "UNC_M2M_PREFCAM_CYCLES_NE.CH2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Deallocs",
+ "Counter": "0,1,2,3",
"EventCode": "0x6E",
"EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH0_HITA0_INVAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Deallocs",
+ "Counter": "0,1,2,3",
"EventCode": "0x6E",
"EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH0_HITA1_INVAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Deallocs",
+ "Counter": "0,1,2,3",
"EventCode": "0x6E",
"EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH0_MISS_INVAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Deallocs",
+ "Counter": "0,1,2,3",
"EventCode": "0x6E",
"EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH0_RSP_PDRESET",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Deallocs",
+ "Counter": "0,1,2,3",
"EventCode": "0x6E",
"EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH1_HITA0_INVAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Deallocs",
+ "Counter": "0,1,2,3",
"EventCode": "0x6E",
"EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH1_HITA1_INVAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Deallocs",
+ "Counter": "0,1,2,3",
"EventCode": "0x6E",
"EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH1_MISS_INVAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Deallocs",
+ "Counter": "0,1,2,3",
"EventCode": "0x6E",
"EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH1_RSP_PDRESET",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Deallocs",
+ "Counter": "0,1,2,3",
"EventCode": "0x6E",
"EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH2_HITA0_INVAL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Deallocs",
+ "Counter": "0,1,2,3",
"EventCode": "0x6E",
"EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH2_HITA1_INVAL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Deallocs",
+ "Counter": "0,1,2,3",
"EventCode": "0x6E",
"EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH2_MISS_INVAL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Deallocs",
+ "Counter": "0,1,2,3",
"EventCode": "0x6E",
"EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH2_RSP_PDRESET",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped : UPI - Ch 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x6F",
"EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH0_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped : XPT - Ch 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x6F",
"EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH0_XPT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped : UPI - Ch 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x6F",
"EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH1_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped : XPT - Ch 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x6F",
"EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH1_XPT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped : UPI - Ch 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x6F",
"EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH2_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped : XPT - Ch 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x6F",
"EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH2_XPT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped : UPI - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x6f",
"EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.UPI_ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2a",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped : XPT - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x6f",
"EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.XPT_ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x15",
"Unit": "M2M"
},
{
"BriefDescription": "Demands Merged with CAMed Prefetches : XPT & UPI- Ch 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.CH0_XPTUPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Demands Merged with CAMed Prefetches : XPT & UPI - Ch 0",
"UMask": "0x1",
@@ -2626,8 +3235,10 @@
},
{
"BriefDescription": "Demands Merged with CAMed Prefetches : XPT & UPI - Ch 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.CH1_XPTUPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Demands Merged with CAMed Prefetches : XPT & UPI- Ch 1",
"UMask": "0x4",
@@ -2635,8 +3246,10 @@
},
{
"BriefDescription": "Demands Merged with CAMed Prefetches : XPT & UPI- Ch 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.CH2_XPTUPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Demands Merged with CAMed Prefetches : XPT & UPI - Ch 2",
"UMask": "0x10",
@@ -2644,8 +3257,10 @@
},
{
"BriefDescription": "Demands Merged with CAMed Prefetches : XPT & UPI- All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.XPTUPI_ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Demands Merged with CAMed Prefetches : XPT & UPI - All Channels",
"UMask": "0x15",
@@ -2653,8 +3268,10 @@
},
{
"BriefDescription": "Demands Not Merged with CAMed Prefetches : XPT & UPI - Ch 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x75",
"EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.CH0_XPTUPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Demands Not Merged with CAMed Prefetches : XPT & UPI- Ch 0",
"UMask": "0x1",
@@ -2662,8 +3279,10 @@
},
{
"BriefDescription": "Demands Not Merged with CAMed Prefetches : XPT & UPI - Ch 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x75",
"EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.CH1_XPTUPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Demands Not Merged with CAMed Prefetches : XPT & UPI- Ch 1",
"UMask": "0x4",
@@ -2671,460 +3290,578 @@
},
{
"BriefDescription": "Demands Not Merged with CAMed Prefetches : XPT & UPI - Ch 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x75",
"EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.CH2_XPTUPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "Demands Not Merged with CAMed Prefetches : XPT & UPI - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x75",
"EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.XPTUPI_ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x15",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.ERRORBLK_RxC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.NOT_PF_SAD_REGION",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.PF_AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.PF_CAM_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.PF_CAM_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.PF_SECURE_DROP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.RPQ_PROXY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.STOP_B2B",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.UPI_THRESH",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.WPQ_PROXY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.XPT_THRESH",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.ERRORBLK_RxC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.NOT_PF_SAD_REGION",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.PF_AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.PF_CAM_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.PF_CAM_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.PF_SECURE_DROP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.RPQ_PROXY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.STOP_B2B",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.UPI_THRESH",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.WPQ_PROXY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.XPT_THRESH",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch2 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH2.ERRORBLK_RxC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch2 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH2.NOT_PF_SAD_REGION",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch2 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH2.PF_AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch2 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH2.PF_CAM_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch2 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH2.PF_CAM_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch2 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH2.PF_SECURE_DROP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch2 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH2.RPQ_PROXY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch2 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH2.STOP_B2B",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch2 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH2.UPI_THRESH",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch2 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH2.WPQ_PROXY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch2 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH2.XPT_THRESH",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Inserts : UPI - Ch 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x6D",
"EventName": "UNC_M2M_PREFCAM_INSERTS.CH0_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Inserts : XPT - Ch 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x6D",
"EventName": "UNC_M2M_PREFCAM_INSERTS.CH0_XPT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Inserts : UPI - Ch 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x6D",
"EventName": "UNC_M2M_PREFCAM_INSERTS.CH1_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Inserts : XPT - Ch 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x6D",
"EventName": "UNC_M2M_PREFCAM_INSERTS.CH1_XPT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Inserts : UPI - Ch 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x6D",
"EventName": "UNC_M2M_PREFCAM_INSERTS.CH2_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Inserts : XPT - Ch 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x6D",
"EventName": "UNC_M2M_PREFCAM_INSERTS.CH2_XPT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Inserts : UPI - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x6d",
"EventName": "UNC_M2M_PREFCAM_INSERTS.UPI_ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2a",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Inserts : XPT - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x6D",
"EventName": "UNC_M2M_PREFCAM_INSERTS.XPT_ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x15",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Occupancy : All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x6A",
"EventName": "UNC_M2M_PREFCAM_OCCUPANCY.ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x6A",
"EventName": "UNC_M2M_PREFCAM_OCCUPANCY.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Occupancy : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x6A",
"EventName": "UNC_M2M_PREFCAM_OCCUPANCY.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Occupancy : Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x6A",
"EventName": "UNC_M2M_PREFCAM_OCCUPANCY.CH2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": ": All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x76",
"EventName": "UNC_M2M_PREFCAM_RESP_MISS.ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "M2M"
},
{
"BriefDescription": ": Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x76",
"EventName": "UNC_M2M_PREFCAM_RESP_MISS.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": ": Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x76",
"EventName": "UNC_M2M_PREFCAM_RESP_MISS.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": ": Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x76",
"EventName": "UNC_M2M_PREFCAM_RESP_MISS.CH2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_PREFCAM_RxC_CYCLES_NE",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "UNC_M2M_PREFCAM_RxC_CYCLES_NE",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_PREFCAM_RxC_DEALLOCS.1LM_POSTED",
+ "Counter": "0,1,2,3",
"EventCode": "0x7A",
"EventName": "UNC_M2M_PREFCAM_RxC_DEALLOCS.1LM_POSTED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_PREFCAM_RxC_DEALLOCS.CIS",
+ "Counter": "0,1,2,3",
"EventCode": "0x7A",
"EventName": "UNC_M2M_PREFCAM_RxC_DEALLOCS.CIS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_PREFCAM_RxC_DEALLOCS.PMM_MEMMODE_ACCEPT",
+ "Counter": "0,1,2,3",
"EventCode": "0x7A",
"EventName": "UNC_M2M_PREFCAM_RxC_DEALLOCS.PMM_MEMMODE_ACCEPT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_PREFCAM_RxC_DEALLOCS.SQUASHED",
+ "Counter": "0,1,2,3",
"EventCode": "0x7A",
"EventName": "UNC_M2M_PREFCAM_RxC_DEALLOCS.SQUASHED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_PREFCAM_RxC_INSERTS",
+ "Counter": "0,1,2,3",
"EventCode": "0x78",
"EventName": "UNC_M2M_PREFCAM_RxC_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_PREFCAM_RxC_OCCUPANCY",
+ "Counter": "0,1,2,3",
"EventCode": "0x77",
"EventName": "UNC_M2M_PREFCAM_RxC_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring. : AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xAC",
"EventName": "UNC_M2M_RING_BOUNCES_HORZ.AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Horizontal Ring. : AD : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x1",
@@ -3132,8 +3869,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring. : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xAC",
"EventName": "UNC_M2M_RING_BOUNCES_HORZ.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Horizontal Ring. : AK : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x2",
@@ -3141,8 +3880,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring. : BL",
+ "Counter": "0,1,2,3",
"EventCode": "0xAC",
"EventName": "UNC_M2M_RING_BOUNCES_HORZ.BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Horizontal Ring. : BL : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x4",
@@ -3150,8 +3891,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring. : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xAC",
"EventName": "UNC_M2M_RING_BOUNCES_HORZ.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Horizontal Ring. : IV : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x8",
@@ -3159,8 +3902,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring. : AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_M2M_RING_BOUNCES_VERT.AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Vertical Ring. : AD : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x1",
@@ -3168,8 +3913,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring. : Acknowledgements to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_M2M_RING_BOUNCES_VERT.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Vertical Ring. : Acknowledgements to core : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x2",
@@ -3177,8 +3924,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_M2M_RING_BOUNCES_VERT.AKC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Vertical Ring. : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x10",
@@ -3186,8 +3935,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring. : Data Responses to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_M2M_RING_BOUNCES_VERT.BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Vertical Ring. : Data Responses to core : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x4",
@@ -3195,8 +3946,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring. : Snoops of processor's cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_M2M_RING_BOUNCES_VERT.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Vertical Ring. : Snoops of processor's cache. : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x8",
@@ -3204,237 +3957,299 @@
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring : AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xAD",
"EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.AD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xAD",
"EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring : Acknowledgements to Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xAD",
"EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring : BL",
+ "Counter": "0,1,2,3",
"EventCode": "0xAD",
"EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xAD",
"EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.IV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring : AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_M2M_RING_SINK_STARVED_VERT.AD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring : Acknowledgements to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_M2M_RING_SINK_STARVED_VERT.AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_M2M_RING_SINK_STARVED_VERT.AKC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring : Data Responses to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_M2M_RING_SINK_STARVED_VERT.BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring : Snoops of processor's cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_M2M_RING_SINK_STARVED_VERT.IV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Source Throttle",
+ "Counter": "0,1,2,3",
"EventCode": "0xae",
"EventName": "UNC_M2M_RING_SRC_THRTL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Regular : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M2M_RPQ_NO_REG_CRD.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Regular : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M2M_RPQ_NO_REG_CRD.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Regular : Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M2M_RPQ_NO_REG_CRD.CH2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC RPQ Cycles w/Credits - PMM : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x4F",
"EventName": "UNC_M2M_RPQ_NO_REG_CRD_PMM.CHN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC RPQ Cycles w/Credits - PMM : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x4F",
"EventName": "UNC_M2M_RPQ_NO_REG_CRD_PMM.CHN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC RPQ Cycles w/Credits - PMM : Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x4F",
"EventName": "UNC_M2M_RPQ_NO_REG_CRD_PMM.CHN2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Special : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M2M_RPQ_NO_SPEC_CRD.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Special : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M2M_RPQ_NO_SPEC_CRD.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Special : Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M2M_RPQ_NO_SPEC_CRD.CH2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "AD Ingress (from CMS) Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_M2M_RxC_AD_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "AD Ingress (from CMS) Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M2M_RxC_AD_CYCLES_NE",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "AD Ingress (from CMS) Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_M2M_RxC_AD_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "AD Ingress (from CMS) Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_M2M_RxC_AD_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "AD Ingress (from CMS) Occupancy - Prefetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x77",
"EventName": "UNC_M2M_RxC_AD_PREF_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "UNC_M2M_RxC_AK_WR_CMP",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "BL Ingress (from CMS) Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "UNC_M2M_RxC_BL_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "BL Ingress (from CMS) Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x07",
"EventName": "UNC_M2M_RxC_BL_CYCLES_NE",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "BL Ingress (from CMS) Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_M2M_RxC_BL_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "BL Ingress (from CMS) Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_M2M_RxC_BL_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Transgress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE5",
"EventName": "UNC_M2M_RxR_BUSY_STARVED.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority : All == Credited + Uncredited",
"UMask": "0x11",
@@ -3442,8 +4257,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE5",
"EventName": "UNC_M2M_RxR_BUSY_STARVED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x10",
@@ -3451,8 +4268,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE5",
"EventName": "UNC_M2M_RxR_BUSY_STARVED.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x1",
@@ -3460,8 +4279,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE5",
"EventName": "UNC_M2M_RxR_BUSY_STARVED.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority : All == Credited + Uncredited",
"UMask": "0x44",
@@ -3469,8 +4290,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE5",
"EventName": "UNC_M2M_RxR_BUSY_STARVED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x40",
@@ -3478,8 +4301,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE5",
"EventName": "UNC_M2M_RxR_BUSY_STARVED.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x4",
@@ -3487,8 +4312,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_M2M_RxR_BYPASS.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : AD - All : Number of packets bypassing the CMS Ingress : All == Credited + Uncredited",
"UMask": "0x11",
@@ -3496,8 +4323,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_M2M_RxR_BYPASS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : AD - Credited : Number of packets bypassing the CMS Ingress",
"UMask": "0x10",
@@ -3505,8 +4334,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_M2M_RxR_BYPASS.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : AD - Uncredited : Number of packets bypassing the CMS Ingress",
"UMask": "0x1",
@@ -3514,8 +4345,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_M2M_RxR_BYPASS.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : AK : Number of packets bypassing the CMS Ingress",
"UMask": "0x2",
@@ -3523,8 +4356,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_M2M_RxR_BYPASS.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : AKC - Uncredited : Number of packets bypassing the CMS Ingress",
"UMask": "0x80",
@@ -3532,8 +4367,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_M2M_RxR_BYPASS.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : BL - All : Number of packets bypassing the CMS Ingress : All == Credited + Uncredited",
"UMask": "0x44",
@@ -3541,8 +4378,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_M2M_RxR_BYPASS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : BL - Credited : Number of packets bypassing the CMS Ingress",
"UMask": "0x40",
@@ -3550,8 +4389,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_M2M_RxR_BYPASS.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : BL - Uncredited : Number of packets bypassing the CMS Ingress",
"UMask": "0x4",
@@ -3559,8 +4400,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_M2M_RxR_BYPASS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : IV : Number of packets bypassing the CMS Ingress",
"UMask": "0x8",
@@ -3568,8 +4411,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_M2M_RxR_CRD_STARVED.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -3577,8 +4422,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_M2M_RxR_CRD_STARVED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x10",
@@ -3586,8 +4433,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_M2M_RxR_CRD_STARVED.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x1",
@@ -3595,8 +4444,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_M2M_RxR_CRD_STARVED.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AK : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x2",
@@ -3604,8 +4455,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_M2M_RxR_CRD_STARVED.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -3613,8 +4466,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_M2M_RxR_CRD_STARVED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x40",
@@ -3622,8 +4477,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_M2M_RxR_CRD_STARVED.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x4",
@@ -3631,8 +4488,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : IFV - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_M2M_RxR_CRD_STARVED.IFV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : IFV - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x80",
@@ -3640,8 +4499,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_M2M_RxR_CRD_STARVED.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : IV : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x8",
@@ -3649,16 +4510,20 @@
},
{
"BriefDescription": "Transgress Injection Starvation",
+ "Counter": "0,1,2,3",
"EventCode": "0xe4",
"EventName": "UNC_M2M_RxR_CRD_STARVED_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"Unit": "M2M"
},
{
"BriefDescription": "Transgress Ingress Allocations : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_M2M_RxR_INSERTS.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : AD - All : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
"UMask": "0x11",
@@ -3666,8 +4531,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_M2M_RxR_INSERTS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : AD - Credited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x10",
@@ -3675,8 +4542,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_M2M_RxR_INSERTS.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : AD - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x1",
@@ -3684,8 +4553,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_M2M_RxR_INSERTS.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : AK : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x2",
@@ -3693,8 +4564,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_M2M_RxR_INSERTS.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : AKC - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x80",
@@ -3702,8 +4575,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_M2M_RxR_INSERTS.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : BL - All : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
"UMask": "0x44",
@@ -3711,8 +4586,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_M2M_RxR_INSERTS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : BL - Credited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x40",
@@ -3720,8 +4597,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_M2M_RxR_INSERTS.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : BL - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x4",
@@ -3729,8 +4608,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_M2M_RxR_INSERTS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : IV : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x8",
@@ -3738,8 +4619,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M2M_RxR_OCCUPANCY.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : AD - All : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
"UMask": "0x11",
@@ -3747,8 +4630,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M2M_RxR_OCCUPANCY.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : AD - Credited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x10",
@@ -3756,8 +4641,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M2M_RxR_OCCUPANCY.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : AD - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x1",
@@ -3765,8 +4652,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M2M_RxR_OCCUPANCY.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : AK : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x2",
@@ -3774,8 +4663,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M2M_RxR_OCCUPANCY.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : AKC - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x80",
@@ -3783,8 +4674,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M2M_RxR_OCCUPANCY.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : BL - All : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
"UMask": "0x44",
@@ -3792,8 +4685,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M2M_RxR_OCCUPANCY.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : BL - Credited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x20",
@@ -3801,8 +4696,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M2M_RxR_OCCUPANCY.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : BL - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x4",
@@ -3810,8 +4707,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M2M_RxR_OCCUPANCY.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : IV : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x8",
@@ -3819,64 +4718,82 @@
},
{
"BriefDescription": "UNC_M2M_SCOREBOARD_AD_RETRY_ACCEPTS",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_M2M_SCOREBOARD_AD_RETRY_ACCEPTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_SCOREBOARD_AD_RETRY_REJECTS",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_M2M_SCOREBOARD_AD_RETRY_REJECTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Retry - Mem Mirroring Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_M2M_SCOREBOARD_BL_RETRY_ACCEPTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Retry - Mem Mirroring Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x36",
"EventName": "UNC_M2M_SCOREBOARD_BL_RETRY_REJECTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Scoreboard Accepts",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_M2M_SCOREBOARD_RD_ACCEPTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Scoreboard Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_M2M_SCOREBOARD_RD_REJECTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Scoreboard Accepts",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_M2M_SCOREBOARD_WR_ACCEPTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Scoreboard Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_M2M_SCOREBOARD_WR_REJECTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 0 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -3884,8 +4801,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 1 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -3893,8 +4812,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 2 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -3902,8 +4823,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 3 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -3911,8 +4834,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 4 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -3920,8 +4845,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 5 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -3929,8 +4856,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 6 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x40",
@@ -3938,8 +4867,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 7 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x80",
@@ -3947,8 +4878,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 0 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -3956,8 +4889,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 1 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -3965,8 +4900,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 2 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -3974,8 +4911,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 3 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -3983,8 +4922,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 4 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -3992,8 +4933,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 5 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -4001,8 +4944,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 6 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x40",
@@ -4010,8 +4955,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 7 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x80",
@@ -4019,8 +4966,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 0 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -4028,8 +4977,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 1 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -4037,8 +4988,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 2 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -4046,8 +4999,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 3 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -4055,8 +5010,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 4 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -4064,8 +5021,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 5 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -4073,8 +5032,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 6 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x40",
@@ -4082,8 +5043,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 7 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x80",
@@ -4091,8 +5054,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 0 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -4100,8 +5065,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 1 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -4109,8 +5076,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 2 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -4118,8 +5087,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 3 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -4127,8 +5098,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 4 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -4136,8 +5109,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 5 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -4145,8 +5120,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 6 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x40",
@@ -4154,8 +5131,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 7 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x80",
@@ -4163,8 +5142,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 10 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -4172,8 +5153,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 8 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -4181,8 +5164,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 9 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -4190,8 +5175,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xD3",
"EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 10 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -4199,8 +5186,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xD3",
"EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 8 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -4208,8 +5197,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xD3",
"EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 9 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -4217,8 +5208,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xD5",
"EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 10 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -4226,8 +5219,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xD5",
"EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 8 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -4235,8 +5230,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xD5",
"EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 9 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -4244,8 +5241,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xD7",
"EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 10 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -4253,8 +5252,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xD7",
"EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 8 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -4262,8 +5263,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xD7",
"EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 9 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -4271,6 +5274,7 @@
},
{
"BriefDescription": "Tag Hit : Clean NearMem Read Hit",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_M2M_TAG_HIT.NM_RD_HIT_CLEAN",
"PerPkg": "1",
@@ -4280,6 +5284,7 @@
},
{
"BriefDescription": "Tag Hit : Dirty NearMem Read Hit",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_M2M_TAG_HIT.NM_RD_HIT_DIRTY",
"PerPkg": "1",
@@ -4289,8 +5294,10 @@
},
{
"BriefDescription": "Tag Hit : Clean NearMem Underfill Hit",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_M2M_TAG_HIT.NM_UFILL_HIT_CLEAN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Tag Hit : Clean NearMem Underfill Hit : Tag Hit indicates when a request sent to the iMC hit in Near Memory. : Counts clean underfill hits due to a partial write",
"UMask": "0x4",
@@ -4298,8 +5305,10 @@
},
{
"BriefDescription": "Tag Hit : Dirty NearMem Underfill Hit",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_M2M_TAG_HIT.NM_UFILL_HIT_DIRTY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Tag Hit : Dirty NearMem Underfill Hit : Tag Hit indicates when a request sent to the iMC hit in Near Memory. : Counts dirty underfill read hits due to a partial write",
"UMask": "0x8",
@@ -4307,620 +5316,778 @@
},
{
"BriefDescription": "Tag Miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_M2M_TAG_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Number AD Ingress Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M2M_TGR_AD_CREDITS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Number BL Ingress Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M2M_TGR_BL_CREDITS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Cycles Full : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_M2M_TRACKER_FULL.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Cycles Full : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_M2M_TRACKER_FULL.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Cycles Full : Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_M2M_TRACKER_FULL.CH2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Inserts : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "UNC_M2M_TRACKER_INSERTS.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Inserts : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "UNC_M2M_TRACKER_INSERTS.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Inserts : Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "UNC_M2M_TRACKER_INSERTS.CH2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Cycles Not Empty : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2M_TRACKER_NE.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Cycles Not Empty : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2M_TRACKER_NE.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Cycles Not Empty : Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2M_TRACKER_NE.CH2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Occupancy : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Occupancy : Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "AD Egress (to CMS) Credit Acquired",
+ "Counter": "0,1,2,3",
"EventCode": "0x0d",
"EventName": "UNC_M2M_TxC_AD_CREDITS_ACQUIRED",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "AD Egress (to CMS) Credits Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x0e",
"EventName": "UNC_M2M_TxC_AD_CREDIT_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "AD Egress (to CMS) Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x0c",
"EventName": "UNC_M2M_TxC_AD_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "AD Egress (to CMS) Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x0b",
"EventName": "UNC_M2M_TxC_AD_CYCLES_NE",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "AD Egress (to CMS) Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x09",
"EventName": "UNC_M2M_TxC_AD_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles with No AD Egress (to CMS) Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x0f",
"EventName": "UNC_M2M_TxC_AD_NO_CREDIT_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles Stalled with No AD Egress (to CMS) Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M2M_TxC_AD_NO_CREDIT_STALLED",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "AD Egress (to CMS) Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x0A",
"EventName": "UNC_M2M_TxC_AD_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Outbound Ring Transactions on AK : CRD Transactions to Cbo",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_M2M_TxC_AK.CRD_CBO",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Outbound Ring Transactions on AK : NDR Transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_M2M_TxC_AK.NDR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "AKC Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x5F",
"EventName": "UNC_M2M_TxC_AKC_CREDITS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Credit Acquired : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_M2M_TxC_AK_CREDITS_ACQUIRED.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Credit Acquired : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_M2M_TxC_AK_CREDITS_ACQUIRED.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Full : All",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Full : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Full : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.RDCRD0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.RDCRD1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x88",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCMP0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCMP1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa0",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCRD0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCRD1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x90",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Not Empty : All",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_M2M_TxC_AK_CYCLES_NE.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Not Empty : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_M2M_TxC_AK_CYCLES_NE.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Not Empty : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_M2M_TxC_AK_CYCLES_NE.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_M2M_TxC_AK_CYCLES_NE.RDCRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_M2M_TxC_AK_CYCLES_NE.WRCMP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_M2M_TxC_AK_CYCLES_NE.WRCRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Allocations : All",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2M_TxC_AK_INSERTS.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Allocations : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2M_TxC_AK_INSERTS.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Allocations : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2M_TxC_AK_INSERTS.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2M_TxC_AK_INSERTS.PREF_RD_CAM_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2M_TxC_AK_INSERTS.RDCRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2M_TxC_AK_INSERTS.WRCMP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2M_TxC_AK_INSERTS.WRCRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles with No AK Egress (to CMS) Credits : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_M2M_TxC_AK_NO_CREDIT_CYCLES.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles with No AK Egress (to CMS) Credits : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_M2M_TxC_AK_NO_CREDIT_CYCLES.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles Stalled with No AK Egress (to CMS) Credits : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M2M_TxC_AK_NO_CREDIT_STALLED.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles Stalled with No AK Egress (to CMS) Credits : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M2M_TxC_AK_NO_CREDIT_STALLED.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Occupancy : All",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_M2M_TxC_AK_OCCUPANCY.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Occupancy : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_M2M_TxC_AK_OCCUPANCY.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Occupancy : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_M2M_TxC_AK_OCCUPANCY.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_M2M_TxC_AK_OCCUPANCY.RDCRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_M2M_TxC_AK_OCCUPANCY.WRCMP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_M2M_TxC_AK_OCCUPANCY.WRCRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "Outbound DRS Ring Transactions to Cache : Data to Cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2M_TxC_BL.DRS_CACHE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Outbound DRS Ring Transactions to Cache : Data to Core",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2M_TxC_BL.DRS_CORE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Outbound DRS Ring Transactions to Cache : Data to QPI",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2M_TxC_BL.DRS_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Credit Acquired : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2M_TxC_BL_CREDITS_ACQUIRED.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Credit Acquired : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2M_TxC_BL_CREDITS_ACQUIRED.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Full : All",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M2M_TxC_BL_CYCLES_FULL.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Full : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M2M_TxC_BL_CYCLES_FULL.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Full : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M2M_TxC_BL_CYCLES_FULL.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Not Empty : All",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M2M_TxC_BL_CYCLES_NE.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Not Empty : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M2M_TxC_BL_CYCLES_NE.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Not Empty : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M2M_TxC_BL_CYCLES_NE.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Allocations : All",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_M2M_TxC_BL_INSERTS.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Allocations : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_M2M_TxC_BL_INSERTS.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Allocations : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_M2M_TxC_BL_INSERTS.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles with No BL Egress (to CMS) Credits : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_M2M_TxC_BL_NO_CREDIT_CYCLES.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles with No BL Egress (to CMS) Credits : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_M2M_TxC_BL_NO_CREDIT_CYCLES.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles Stalled with No BL Egress (to CMS) Credits : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_M2M_TxC_BL_NO_CREDIT_STALLED.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles Stalled with No BL Egress (to CMS) Credits : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_M2M_TxC_BL_NO_CREDIT_STALLED.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "CMS Horizontal ADS Used : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_M2M_TxR_HORZ_ADS_USED.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : AD - All : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -4928,8 +6095,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_M2M_TxR_HORZ_ADS_USED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : AD - Credited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -4937,8 +6106,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_M2M_TxR_HORZ_ADS_USED.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : AD - Uncredited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -4946,8 +6117,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_M2M_TxR_HORZ_ADS_USED.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : BL - All : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -4955,8 +6128,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_M2M_TxR_HORZ_ADS_USED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : BL - Credited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -4964,8 +6139,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_M2M_TxR_HORZ_ADS_USED.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : BL - Uncredited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -4973,8 +6150,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_M2M_TxR_HORZ_BYPASS.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : AD - All : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -4982,8 +6161,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_M2M_TxR_HORZ_BYPASS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : AD - Credited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -4991,8 +6172,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_M2M_TxR_HORZ_BYPASS.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : AD - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -5000,8 +6183,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_M2M_TxR_HORZ_BYPASS.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : AK : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -5009,8 +6194,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_M2M_TxR_HORZ_BYPASS.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : AKC - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x80",
@@ -5018,8 +6205,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_M2M_TxR_HORZ_BYPASS.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : BL - All : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -5027,8 +6216,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_M2M_TxR_HORZ_BYPASS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : BL - Credited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -5036,8 +6227,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_M2M_TxR_HORZ_BYPASS.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : BL - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -5045,8 +6238,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_M2M_TxR_HORZ_BYPASS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : IV : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x8",
@@ -5054,8 +6249,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - All : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -5063,8 +6260,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -5072,8 +6271,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -5081,8 +6282,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AK : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -5090,8 +6293,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AKC - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x80",
@@ -5099,8 +6304,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - All : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -5108,8 +6315,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -5117,8 +6326,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -5126,8 +6337,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : IV : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -5135,8 +6348,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - All : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -5144,8 +6359,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -5153,8 +6370,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -5162,8 +6381,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AK : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -5171,8 +6392,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AKC - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x80",
@@ -5180,8 +6403,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - All : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -5189,8 +6414,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -5198,8 +6425,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -5207,8 +6436,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : IV : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -5216,8 +6447,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M2M_TxR_HORZ_INSERTS.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : AD - All : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -5225,8 +6458,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M2M_TxR_HORZ_INSERTS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : AD - Credited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -5234,8 +6469,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M2M_TxR_HORZ_INSERTS.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : AD - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -5243,8 +6480,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M2M_TxR_HORZ_INSERTS.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : AK : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -5252,8 +6491,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M2M_TxR_HORZ_INSERTS.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : AKC - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x80",
@@ -5261,8 +6502,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M2M_TxR_HORZ_INSERTS.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : BL - All : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -5270,8 +6513,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M2M_TxR_HORZ_INSERTS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : BL - Credited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -5279,8 +6524,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M2M_TxR_HORZ_INSERTS.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : BL - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -5288,8 +6535,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M2M_TxR_HORZ_INSERTS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : IV : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -5297,8 +6546,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_M2M_TxR_HORZ_NACK.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : AD - All : Counts number of Egress packets NACK'ed on to the Horizontal Ring : All == Credited + Uncredited",
"UMask": "0x11",
@@ -5306,8 +6557,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_M2M_TxR_HORZ_NACK.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : AD - Credited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x10",
@@ -5315,8 +6568,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_M2M_TxR_HORZ_NACK.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : AD - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x1",
@@ -5324,8 +6579,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_M2M_TxR_HORZ_NACK.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : AK : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x2",
@@ -5333,8 +6590,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_M2M_TxR_HORZ_NACK.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : AKC - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x80",
@@ -5342,8 +6601,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_M2M_TxR_HORZ_NACK.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : BL - All : Counts number of Egress packets NACK'ed on to the Horizontal Ring : All == Credited + Uncredited",
"UMask": "0x44",
@@ -5351,8 +6612,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_M2M_TxR_HORZ_NACK.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : BL - Credited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x40",
@@ -5360,8 +6623,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_M2M_TxR_HORZ_NACK.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : BL - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x4",
@@ -5369,8 +6634,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_M2M_TxR_HORZ_NACK.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : IV : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x8",
@@ -5378,8 +6645,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : AD - All : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -5387,8 +6656,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : AD - Credited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -5396,8 +6667,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : AD - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -5405,8 +6678,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : AK : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -5414,8 +6689,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : AKC - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x80",
@@ -5423,8 +6700,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : BL - All : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -5432,8 +6711,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : BL - Credited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -5441,8 +6722,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : BL - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -5450,8 +6733,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : IV : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -5459,8 +6744,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_M2M_TxR_HORZ_STARVED.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : AD - All : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time. : All == Credited + Uncredited",
"UMask": "0x1",
@@ -5468,8 +6755,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_M2M_TxR_HORZ_STARVED.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : AD - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x1",
@@ -5477,8 +6766,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_M2M_TxR_HORZ_STARVED.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : AK : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x2",
@@ -5486,8 +6777,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_M2M_TxR_HORZ_STARVED.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : AKC - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x80",
@@ -5495,8 +6788,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_M2M_TxR_HORZ_STARVED.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : BL - All : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time. : All == Credited + Uncredited",
"UMask": "0x4",
@@ -5504,8 +6799,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_M2M_TxR_HORZ_STARVED.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : BL - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x4",
@@ -5513,8 +6810,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_M2M_TxR_HORZ_STARVED.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : IV : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x8",
@@ -5522,8 +6821,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_M2M_TxR_VERT_ADS_USED.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AD - Agent 0 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -5531,8 +6832,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_M2M_TxR_VERT_ADS_USED.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AD - Agent 1 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -5540,8 +6843,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_M2M_TxR_VERT_ADS_USED.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : BL - Agent 0 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -5549,8 +6854,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_M2M_TxR_VERT_ADS_USED.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : BL - Agent 1 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -5558,8 +6865,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_M2M_TxR_VERT_BYPASS.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AD - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -5567,8 +6876,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_M2M_TxR_VERT_BYPASS.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AD - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -5576,8 +6887,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_M2M_TxR_VERT_BYPASS.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AK - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -5585,8 +6898,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_M2M_TxR_VERT_BYPASS.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AK - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x20",
@@ -5594,8 +6909,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_M2M_TxR_VERT_BYPASS.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : BL - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -5603,8 +6920,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_M2M_TxR_VERT_BYPASS.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : BL - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -5612,8 +6931,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : IV - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_M2M_TxR_VERT_BYPASS.IV_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : IV - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x8",
@@ -5621,8 +6942,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_M2M_TxR_VERT_BYPASS_1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AKC - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -5630,8 +6953,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_M2M_TxR_VERT_BYPASS_1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AKC - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -5639,8 +6964,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -5648,8 +6975,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -5657,8 +6986,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -5666,8 +6997,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -5675,8 +7008,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -5684,8 +7019,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -5693,8 +7030,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : IV - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : IV - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -5702,8 +7041,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -5711,8 +7052,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -5720,8 +7063,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -5729,8 +7074,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -5738,8 +7085,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -5747,8 +7096,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -5756,8 +7107,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -5765,8 +7118,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -5774,8 +7129,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : IV - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : IV - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -5783,8 +7140,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_NE1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -5792,8 +7151,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_NE1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -5801,8 +7162,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2M_TxR_VERT_INSERTS0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AD - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -5810,8 +7173,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2M_TxR_VERT_INSERTS0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AD - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -5819,8 +7184,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2M_TxR_VERT_INSERTS0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AK - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -5828,8 +7195,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2M_TxR_VERT_INSERTS0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AK - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -5837,8 +7206,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2M_TxR_VERT_INSERTS0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : BL - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -5846,8 +7217,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2M_TxR_VERT_INSERTS0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : BL - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -5855,8 +7228,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : IV - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2M_TxR_VERT_INSERTS0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : IV - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -5864,8 +7239,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_M2M_TxR_VERT_INSERTS1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AKC - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -5873,8 +7250,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_M2M_TxR_VERT_INSERTS1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AKC - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -5882,8 +7261,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2M_TxR_VERT_NACK0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AD - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x1",
@@ -5891,8 +7272,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2M_TxR_VERT_NACK0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AD - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x10",
@@ -5900,8 +7283,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2M_TxR_VERT_NACK0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AK - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x2",
@@ -5909,8 +7294,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2M_TxR_VERT_NACK0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AK - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x20",
@@ -5918,8 +7305,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2M_TxR_VERT_NACK0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : BL - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x4",
@@ -5927,8 +7316,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2M_TxR_VERT_NACK0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : BL - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x40",
@@ -5936,8 +7327,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2M_TxR_VERT_NACK0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : IV : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x8",
@@ -5945,8 +7338,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_M2M_TxR_VERT_NACK1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AKC - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x1",
@@ -5954,8 +7349,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_M2M_TxR_VERT_NACK1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AKC - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x2",
@@ -5963,8 +7360,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AD - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -5972,8 +7371,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AD - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -5981,8 +7382,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AK - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -5990,8 +7393,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AK - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -5999,8 +7404,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : BL - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -6008,8 +7415,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : BL - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -6017,8 +7426,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : IV - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : IV - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -6026,8 +7437,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_M2M_TxR_VERT_OCCUPANCY1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AKC - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -6035,8 +7448,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_M2M_TxR_VERT_OCCUPANCY1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AKC - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -6044,8 +7459,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_M2M_TxR_VERT_STARVED0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x1",
@@ -6053,8 +7470,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_M2M_TxR_VERT_STARVED0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x10",
@@ -6062,8 +7481,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_M2M_TxR_VERT_STARVED0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x2",
@@ -6071,8 +7492,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_M2M_TxR_VERT_STARVED0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x20",
@@ -6080,8 +7503,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_M2M_TxR_VERT_STARVED0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x4",
@@ -6089,8 +7514,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_M2M_TxR_VERT_STARVED0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x40",
@@ -6098,8 +7525,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_M2M_TxR_VERT_STARVED0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : IV : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x8",
@@ -6107,8 +7536,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9B",
"EventName": "UNC_M2M_TxR_VERT_STARVED1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x1",
@@ -6116,8 +7547,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9B",
"EventName": "UNC_M2M_TxR_VERT_STARVED1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x2",
@@ -6125,8 +7558,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9B",
"EventName": "UNC_M2M_TxR_VERT_STARVED1.TGC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x4",
@@ -6134,8 +7569,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M2M_VERT_RING_AD_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AD Ring In Use : Down and Even : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -6143,8 +7580,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M2M_VERT_RING_AD_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AD Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -6152,8 +7591,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M2M_VERT_RING_AD_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AD Ring In Use : Up and Even : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -6161,8 +7602,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M2M_VERT_RING_AD_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AD Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -6170,8 +7613,10 @@
},
{
"BriefDescription": "Vertical AKC Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M2M_VERT_RING_AKC_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AKC Ring In Use : Down and Even : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -6179,8 +7624,10 @@
},
{
"BriefDescription": "Vertical AKC Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M2M_VERT_RING_AKC_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AKC Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -6188,8 +7635,10 @@
},
{
"BriefDescription": "Vertical AKC Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M2M_VERT_RING_AKC_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AKC Ring In Use : Up and Even : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -6197,8 +7646,10 @@
},
{
"BriefDescription": "Vertical AKC Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M2M_VERT_RING_AKC_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AKC Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -6206,8 +7657,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M2M_VERT_RING_AK_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AK Ring In Use : Down and Even : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -6215,8 +7668,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M2M_VERT_RING_AK_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AK Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -6224,8 +7679,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M2M_VERT_RING_AK_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AK Ring In Use : Up and Even : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -6233,8 +7690,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M2M_VERT_RING_AK_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AK Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -6242,8 +7701,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use : Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M2M_VERT_RING_BL_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical BL Ring in Use : Down and Even : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -6251,8 +7712,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use : Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M2M_VERT_RING_BL_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical BL Ring in Use : Down and Odd : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -6260,8 +7723,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use : Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M2M_VERT_RING_BL_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical BL Ring in Use : Up and Even : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -6269,8 +7734,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use : Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M2M_VERT_RING_BL_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical BL Ring in Use : Up and Odd : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -6278,8 +7745,10 @@
},
{
"BriefDescription": "Vertical IV Ring in Use : Down",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M2M_VERT_RING_IV_IN_USE.DN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical IV Ring in Use : Down : Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x4",
@@ -6287,8 +7756,10 @@
},
{
"BriefDescription": "Vertical IV Ring in Use : Up",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M2M_VERT_RING_IV_IN_USE.UP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical IV Ring in Use : Up : Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x1",
@@ -6296,8 +7767,10 @@
},
{
"BriefDescription": "Vertical TGC Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M2M_VERT_RING_TGC_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical TGC Ring In Use : Down and Even : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -6305,8 +7778,10 @@
},
{
"BriefDescription": "Vertical TGC Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M2M_VERT_RING_TGC_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical TGC Ring In Use : Down and Odd : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -6314,8 +7789,10 @@
},
{
"BriefDescription": "Vertical TGC Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M2M_VERT_RING_TGC_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical TGC Ring In Use : Up and Even : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -6323,8 +7800,10 @@
},
{
"BriefDescription": "Vertical TGC Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M2M_VERT_RING_TGC_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical TGC Ring In Use : Up and Odd : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -6332,352 +7811,440 @@
},
{
"BriefDescription": "WPQ Flush : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_M2M_WPQ_FLUSH.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "WPQ Flush : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_M2M_WPQ_FLUSH.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "WPQ Flush : Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_M2M_WPQ_FLUSH.CH2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x4D",
"EventName": "UNC_M2M_WPQ_NO_REG_CRD.CHN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x4D",
"EventName": "UNC_M2M_WPQ_NO_REG_CRD.CHN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular : Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x4D",
"EventName": "UNC_M2M_WPQ_NO_REG_CRD.CHN2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC WPQ Cycles w/Credits - PMM : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_M2M_WPQ_NO_REG_CRD_PMM.CHN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC WPQ Cycles w/Credits - PMM : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_M2M_WPQ_NO_REG_CRD_PMM.CHN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC WPQ Cycles w/Credits - PMM : Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_M2M_WPQ_NO_REG_CRD_PMM.CHN2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Special : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x4E",
"EventName": "UNC_M2M_WPQ_NO_SPEC_CRD.CHN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Special : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x4E",
"EventName": "UNC_M2M_WPQ_NO_SPEC_CRD.CHN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Special : Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x4E",
"EventName": "UNC_M2M_WPQ_NO_SPEC_CRD.CHN2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Cycles Full : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x4A",
"EventName": "UNC_M2M_WR_TRACKER_FULL.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Cycles Full : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x4A",
"EventName": "UNC_M2M_WR_TRACKER_FULL.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Cycles Full : Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x4A",
"EventName": "UNC_M2M_WR_TRACKER_FULL.CH2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Cycles Full : Mirror",
+ "Counter": "0,1,2,3",
"EventCode": "0x4A",
"EventName": "UNC_M2M_WR_TRACKER_FULL.MIRR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Inserts : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_M2M_WR_TRACKER_INSERTS.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Inserts : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_M2M_WR_TRACKER_INSERTS.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Inserts : Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_M2M_WR_TRACKER_INSERTS.CH2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Cycles Not Empty : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_M2M_WR_TRACKER_NE.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Cycles Not Empty : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_M2M_WR_TRACKER_NE.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Cycles Not Empty : Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_M2M_WR_TRACKER_NE.CH2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Cycles Not Empty : Mirror",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_M2M_WR_TRACKER_NE.MIRR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_M2M_WR_TRACKER_NE.MIRR_NONTGR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_M2M_WR_TRACKER_NE.MIRR_PWR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Non-Posted Inserts : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_INSERTS.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Non-Posted Inserts : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_INSERTS.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Non-Posted Inserts : Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_INSERTS.CH2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Non-Posted Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x62",
"EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_OCCUPANCY.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Non-Posted Occupancy : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x62",
"EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_OCCUPANCY.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Non-Posted Occupancy : Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x62",
"EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_OCCUPANCY.CH2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_M2M_WR_TRACKER_OCCUPANCY.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Occupancy : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_M2M_WR_TRACKER_OCCUPANCY.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Occupancy : Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_M2M_WR_TRACKER_OCCUPANCY.CH2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Occupancy : Mirror",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_M2M_WR_TRACKER_OCCUPANCY.MIRR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_M2M_WR_TRACKER_OCCUPANCY.MIRR_NONTGR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_M2M_WR_TRACKER_OCCUPANCY.MIRR_PWR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Posted Inserts : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x5E",
"EventName": "UNC_M2M_WR_TRACKER_POSTED_INSERTS.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Posted Inserts : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x5E",
"EventName": "UNC_M2M_WR_TRACKER_POSTED_INSERTS.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Posted Inserts : Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x5E",
"EventName": "UNC_M2M_WR_TRACKER_POSTED_INSERTS.CH2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Posted Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_M2M_WR_TRACKER_POSTED_OCCUPANCY.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Posted Occupancy : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_M2M_WR_TRACKER_POSTED_OCCUPANCY.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Posted Occupancy : Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_M2M_WR_TRACKER_POSTED_OCCUPANCY.CH2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 0 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -6685,8 +8252,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 1 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -6694,8 +8263,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 2 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -6703,8 +8274,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 3 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -6712,8 +8285,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 4 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -6721,8 +8296,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 5 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -6730,8 +8307,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 6 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x40",
@@ -6739,8 +8318,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 7 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x80",
@@ -6748,8 +8329,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 10 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -6757,8 +8340,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 8 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -6766,8 +8351,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 9 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -6775,8 +8362,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 0 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -6784,8 +8373,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 1 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -6793,8 +8384,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 2 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -6802,8 +8395,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 3 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -6811,8 +8406,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 4 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -6820,8 +8417,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 5 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -6829,8 +8428,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 6 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x40",
@@ -6838,8 +8439,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 7 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x80",
@@ -6847,8 +8450,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 10 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -6856,8 +8461,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 8 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -6865,8 +8472,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 9 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -6874,8 +8483,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 0 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -6883,8 +8494,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 1 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -6892,8 +8505,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 2 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -6901,8 +8516,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 3 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -6910,8 +8527,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -6919,8 +8538,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -6928,8 +8549,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 6 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x40",
@@ -6937,8 +8560,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 7 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x80",
@@ -6946,8 +8571,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 10 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -6955,8 +8582,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 8 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -6964,8 +8593,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 9 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -6973,8 +8604,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 0 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -6982,8 +8615,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 1 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -6991,8 +8626,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 2 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -7000,8 +8637,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 3 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -7009,8 +8648,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 4 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -7018,8 +8659,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 5 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -7027,8 +8670,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 6 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x40",
@@ -7036,8 +8681,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 7 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x80",
@@ -7045,8 +8692,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x8B",
"EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 10 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -7054,8 +8703,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x8B",
"EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 8 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -7063,8 +8714,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x8B",
"EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 9 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -7072,8 +8725,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 0 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -7081,8 +8736,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 1 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -7090,8 +8747,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 2 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -7099,8 +8758,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 3 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -7108,8 +8769,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 4 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -7117,8 +8780,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 5 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -7126,8 +8791,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 6 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x40",
@@ -7135,8 +8802,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 7 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x80",
@@ -7144,8 +8813,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 10 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -7153,8 +8824,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 8 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -7162,8 +8835,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 9 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -7171,8 +8846,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 0 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -7180,8 +8857,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 1 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -7189,8 +8868,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 2 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -7198,8 +8879,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 3 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -7207,8 +8890,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 4 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -7216,8 +8901,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 5 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -7225,8 +8912,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 6 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x40",
@@ -7234,8 +8923,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 7 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x80",
@@ -7243,8 +8934,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 10 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -7252,8 +8945,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 8 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -7261,8 +8956,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 9 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -7270,8 +8967,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_M3UPI_AG1_BL_CRD_ACQUIRED0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 0 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -7279,8 +8978,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_M3UPI_AG1_BL_CRD_ACQUIRED0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 1 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -7288,8 +8989,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_M3UPI_AG1_BL_CRD_ACQUIRED0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 2 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -7297,8 +9000,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_M3UPI_AG1_BL_CRD_ACQUIRED0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 3 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -7306,8 +9011,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_M3UPI_AG1_BL_CRD_ACQUIRED0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -7315,8 +9022,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_M3UPI_AG1_BL_CRD_ACQUIRED0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -7324,8 +9033,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_M3UPI_AG1_BL_CRD_ACQUIRED0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x40",
@@ -7333,8 +9044,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_M3UPI_AG1_BL_CRD_ACQUIRED0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x80",
@@ -7342,8 +9055,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x8D",
"EventName": "UNC_M3UPI_AG1_BL_CRD_ACQUIRED1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 10 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -7351,8 +9066,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x8D",
"EventName": "UNC_M3UPI_AG1_BL_CRD_ACQUIRED1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 8 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -7360,8 +9077,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x8D",
"EventName": "UNC_M3UPI_AG1_BL_CRD_ACQUIRED1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 9 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -7369,8 +9088,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 0 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -7378,8 +9099,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 1 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -7387,8 +9110,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 2 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -7396,8 +9121,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 3 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -7405,8 +9132,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 4 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -7414,8 +9143,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 5 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -7423,8 +9154,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 6 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x40",
@@ -7432,8 +9165,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 7 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x80",
@@ -7441,8 +9176,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x8F",
"EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 10 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -7450,8 +9187,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x8F",
"EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 8 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -7459,8 +9198,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x8F",
"EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 9 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -7468,8 +9209,10 @@
},
{
"BriefDescription": "CBox AD Credits Empty : Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CBox AD Credits Empty : Requests : No credits available to send to Cbox on the AD Ring (covers higher CBoxes)",
"UMask": "0x4",
@@ -7477,8 +9220,10 @@
},
{
"BriefDescription": "CBox AD Credits Empty : Snoops",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CBox AD Credits Empty : Snoops : No credits available to send to Cbox on the AD Ring (covers higher CBoxes)",
"UMask": "0x8",
@@ -7486,8 +9231,10 @@
},
{
"BriefDescription": "CBox AD Credits Empty : VNA Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.VNA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CBox AD Credits Empty : VNA Messages : No credits available to send to Cbox on the AD Ring (covers higher CBoxes)",
"UMask": "0x1",
@@ -7495,8 +9242,10 @@
},
{
"BriefDescription": "CBox AD Credits Empty : Writebacks",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CBox AD Credits Empty : Writebacks : No credits available to send to Cbox on the AD Ring (covers higher CBoxes)",
"UMask": "0x2",
@@ -7504,6 +9253,7 @@
},
{
"BriefDescription": "Clockticks of the mesh to UPI (M3UPI)",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_M3UPI_CLOCKTICKS",
"PerPkg": "1",
@@ -7512,31 +9262,39 @@
},
{
"BriefDescription": "CMS Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0xc0",
"EventName": "UNC_M3UPI_CMS_CLOCKTICKS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M3UPI"
},
{
"BriefDescription": "D2C Sent",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_M3UPI_D2C_SENT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "D2C Sent : Count cases BL sends direct to core",
"Unit": "M3UPI"
},
{
"BriefDescription": "D2U Sent",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_M3UPI_D2U_SENT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "D2U Sent : Cases where SMI3 sends D2U command",
"Unit": "M3UPI"
},
{
"BriefDescription": "Distress signal asserted : DPT Local",
+ "Counter": "0,1,2,3",
"EventCode": "0xAF",
"EventName": "UNC_M3UPI_DISTRESS_ASSERTED.DPT_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : DPT Local : Counts the number of cycles either the local or incoming distress signals are asserted. : Dynamic Prefetch Throttle triggered by this tile",
"UMask": "0x4",
@@ -7544,8 +9302,10 @@
},
{
"BriefDescription": "Distress signal asserted : DPT Remote",
+ "Counter": "0,1,2,3",
"EventCode": "0xAF",
"EventName": "UNC_M3UPI_DISTRESS_ASSERTED.DPT_NONLOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : DPT Remote : Counts the number of cycles either the local or incoming distress signals are asserted. : Dynamic Prefetch Throttle received by this tile",
"UMask": "0x8",
@@ -7553,8 +9313,10 @@
},
{
"BriefDescription": "Distress signal asserted : DPT Stalled - IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xAF",
"EventName": "UNC_M3UPI_DISTRESS_ASSERTED.DPT_STALL_IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : DPT Stalled - IV : Counts the number of cycles either the local or incoming distress signals are asserted. : DPT occurred while regular IVs were received, causing DPT to be stalled",
"UMask": "0x40",
@@ -7562,8 +9324,10 @@
},
{
"BriefDescription": "Distress signal asserted : DPT Stalled - No Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xAF",
"EventName": "UNC_M3UPI_DISTRESS_ASSERTED.DPT_STALL_NOCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : DPT Stalled - No Credit : Counts the number of cycles either the local or incoming distress signals are asserted. : DPT occurred while credit not available causing DPT to be stalled",
"UMask": "0x80",
@@ -7571,8 +9335,10 @@
},
{
"BriefDescription": "Distress signal asserted : Horizontal",
+ "Counter": "0,1,2,3",
"EventCode": "0xAF",
"EventName": "UNC_M3UPI_DISTRESS_ASSERTED.HORZ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : Horizontal : Counts the number of cycles either the local or incoming distress signals are asserted. : If TGR egress is full, then agents will throttle outgoing AD IDI transactions",
"UMask": "0x2",
@@ -7580,8 +9346,10 @@
},
{
"BriefDescription": "Distress signal asserted : PMM Local",
+ "Counter": "0,1,2,3",
"EventCode": "0xAF",
"EventName": "UNC_M3UPI_DISTRESS_ASSERTED.PMM_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : PMM Local : Counts the number of cycles either the local or incoming distress signals are asserted. : If the CHA TOR has too many PMM transactions, this signal will throttle outgoing MS2IDI traffic",
"UMask": "0x10",
@@ -7589,8 +9357,10 @@
},
{
"BriefDescription": "Distress signal asserted : PMM Remote",
+ "Counter": "0,1,2,3",
"EventCode": "0xAF",
"EventName": "UNC_M3UPI_DISTRESS_ASSERTED.PMM_NONLOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : PMM Remote : Counts the number of cycles either the local or incoming distress signals are asserted. : If another CHA TOR has too many PMM transactions, this signal will throttle outgoing MS2IDI traffic",
"UMask": "0x20",
@@ -7598,8 +9368,10 @@
},
{
"BriefDescription": "Distress signal asserted : Vertical",
+ "Counter": "0,1,2,3",
"EventCode": "0xAF",
"EventName": "UNC_M3UPI_DISTRESS_ASSERTED.VERT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : Vertical : Counts the number of cycles either the local or incoming distress signals are asserted. : If IRQ egress is full, then agents will throttle outgoing AD IDI transactions",
"UMask": "0x1",
@@ -7607,8 +9379,10 @@
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M3UPI_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress Blocking due to Ordering requirements : Down : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x4",
@@ -7616,8 +9390,10 @@
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M3UPI_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress Blocking due to Ordering requirements : Up : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x1",
@@ -7625,8 +9401,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M3UPI_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AD Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -7634,8 +9412,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M3UPI_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AD Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -7643,8 +9423,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M3UPI_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AD Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -7652,8 +9434,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M3UPI_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AD Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -7661,8 +9445,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M3UPI_HORZ_RING_AKC_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -7670,8 +9456,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M3UPI_HORZ_RING_AKC_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -7679,8 +9467,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M3UPI_HORZ_RING_AKC_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -7688,8 +9478,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M3UPI_HORZ_RING_AKC_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -7697,8 +9489,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M3UPI_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -7706,8 +9500,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M3UPI_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -7715,8 +9511,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M3UPI_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -7724,8 +9522,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M3UPI_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -7733,8 +9533,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use : Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M3UPI_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal BL Ring in Use : Left and Even : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -7742,8 +9544,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use : Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M3UPI_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal BL Ring in Use : Left and Odd : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -7751,8 +9555,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use : Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M3UPI_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal BL Ring in Use : Right and Even : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -7760,8 +9566,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use : Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M3UPI_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal BL Ring in Use : Right and Odd : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -7769,8 +9577,10 @@
},
{
"BriefDescription": "Horizontal IV Ring in Use : Left",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M3UPI_HORZ_RING_IV_IN_USE.LEFT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal IV Ring in Use : Left : Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x1",
@@ -7778,8 +9588,10 @@
},
{
"BriefDescription": "Horizontal IV Ring in Use : Right",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M3UPI_HORZ_RING_IV_IN_USE.RIGHT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal IV Ring in Use : Right : Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x4",
@@ -7787,8 +9599,10 @@
},
{
"BriefDescription": "M2 BL Credits Empty : IIO0 and IIO1 share the same ring destination. (1 VN0 credit only)",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2 BL Credits Empty : IIO0 and IIO1 share the same ring destination. (1 VN0 credit only) : No vn0 and vna credits available to send to M2",
"UMask": "0x1",
@@ -7796,8 +9610,10 @@
},
{
"BriefDescription": "M2 BL Credits Empty : IIO2",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO2_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2 BL Credits Empty : IIO2 : No vn0 and vna credits available to send to M2",
"UMask": "0x2",
@@ -7805,8 +9621,10 @@
},
{
"BriefDescription": "M2 BL Credits Empty : IIO3",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO3_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2 BL Credits Empty : IIO3 : No vn0 and vna credits available to send to M2",
"UMask": "0x4",
@@ -7814,8 +9632,10 @@
},
{
"BriefDescription": "M2 BL Credits Empty : IIO4",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO4_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2 BL Credits Empty : IIO4 : No vn0 and vna credits available to send to M2",
"UMask": "0x8",
@@ -7823,8 +9643,10 @@
},
{
"BriefDescription": "M2 BL Credits Empty : IIO5",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO5_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2 BL Credits Empty : IIO5 : No vn0 and vna credits available to send to M2",
"UMask": "0x10",
@@ -7832,8 +9654,10 @@
},
{
"BriefDescription": "M2 BL Credits Empty : All IIO targets for NCS are in single mask. ORs them together",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2 BL Credits Empty : All IIO targets for NCS are in single mask. ORs them together : No vn0 and vna credits available to send to M2",
"UMask": "0x40",
@@ -7841,8 +9665,10 @@
},
{
"BriefDescription": "M2 BL Credits Empty : Selected M2p BL NCS credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.NCS_SEL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2 BL Credits Empty : Selected M2p BL NCS credits : No vn0 and vna credits available to send to M2",
"UMask": "0x80",
@@ -7850,8 +9676,10 @@
},
{
"BriefDescription": "M2 BL Credits Empty : IIO5",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.UBOX_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2 BL Credits Empty : IIO5 : No vn0 and vna credits available to send to M2",
"UMask": "0x20",
@@ -7859,24 +9687,30 @@
},
{
"BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI0",
+ "Counter": "0,1,2,3",
"EventCode": "0xE6",
"EventName": "UNC_M3UPI_MISC_EXTERNAL.MBE_INST0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M3UPI"
},
{
"BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI1",
+ "Counter": "0,1,2,3",
"EventCode": "0xE6",
"EventName": "UNC_M3UPI_MISC_EXTERNAL.MBE_INST1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M3UPI"
},
{
"BriefDescription": "Multi Slot Flit Received : AD - Slot 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x3E",
"EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AD_SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Multi Slot Flit Received : AD - Slot 0 : Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
"UMask": "0x1",
@@ -7884,8 +9718,10 @@
},
{
"BriefDescription": "Multi Slot Flit Received : AD - Slot 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x3E",
"EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AD_SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Multi Slot Flit Received : AD - Slot 1 : Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
"UMask": "0x2",
@@ -7893,8 +9729,10 @@
},
{
"BriefDescription": "Multi Slot Flit Received : AD - Slot 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x3E",
"EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AD_SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Multi Slot Flit Received : AD - Slot 2 : Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
"UMask": "0x4",
@@ -7902,8 +9740,10 @@
},
{
"BriefDescription": "Multi Slot Flit Received : AK - Slot 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x3E",
"EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AK_SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Multi Slot Flit Received : AK - Slot 0 : Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
"UMask": "0x10",
@@ -7911,8 +9751,10 @@
},
{
"BriefDescription": "Multi Slot Flit Received : AK - Slot 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x3E",
"EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AK_SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Multi Slot Flit Received : AK - Slot 2 : Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
"UMask": "0x20",
@@ -7920,8 +9762,10 @@
},
{
"BriefDescription": "Multi Slot Flit Received : BL - Slot 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x3E",
"EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.BL_SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Multi Slot Flit Received : BL - Slot 0 : Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
"UMask": "0x8",
@@ -7929,8 +9773,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring. : AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xAC",
"EventName": "UNC_M3UPI_RING_BOUNCES_HORZ.AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Horizontal Ring. : AD : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x1",
@@ -7938,8 +9784,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring. : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xAC",
"EventName": "UNC_M3UPI_RING_BOUNCES_HORZ.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Horizontal Ring. : AK : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x2",
@@ -7947,8 +9795,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring. : BL",
+ "Counter": "0,1,2,3",
"EventCode": "0xAC",
"EventName": "UNC_M3UPI_RING_BOUNCES_HORZ.BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Horizontal Ring. : BL : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x4",
@@ -7956,8 +9806,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring. : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xAC",
"EventName": "UNC_M3UPI_RING_BOUNCES_HORZ.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Horizontal Ring. : IV : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x8",
@@ -7965,8 +9817,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring. : AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_M3UPI_RING_BOUNCES_VERT.AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Vertical Ring. : AD : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x1",
@@ -7974,8 +9828,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring. : Acknowledgements to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_M3UPI_RING_BOUNCES_VERT.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Vertical Ring. : Acknowledgements to core : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x2",
@@ -7983,8 +9839,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_M3UPI_RING_BOUNCES_VERT.AKC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Vertical Ring. : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x10",
@@ -7992,8 +9850,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring. : Data Responses to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_M3UPI_RING_BOUNCES_VERT.BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Vertical Ring. : Data Responses to core : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x4",
@@ -8001,8 +9861,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring. : Snoops of processor's cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_M3UPI_RING_BOUNCES_VERT.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Vertical Ring. : Snoops of processor's cache. : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x8",
@@ -8010,95 +9872,119 @@
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring : AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xAD",
"EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.AD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M3UPI"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xAD",
"EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M3UPI"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring : Acknowledgements to Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xAD",
"EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M3UPI"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring : BL",
+ "Counter": "0,1,2,3",
"EventCode": "0xAD",
"EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M3UPI"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xAD",
"EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.IV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M3UPI"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring : AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_M3UPI_RING_SINK_STARVED_VERT.AD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M3UPI"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring : Acknowledgements to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_M3UPI_RING_SINK_STARVED_VERT.AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M3UPI"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_M3UPI_RING_SINK_STARVED_VERT.AKC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M3UPI"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring : Data Responses to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_M3UPI_RING_SINK_STARVED_VERT.BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M3UPI"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring : Snoops of processor's cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_M3UPI_RING_SINK_STARVED_VERT.IV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M3UPI"
},
{
"BriefDescription": "Source Throttle",
+ "Counter": "0,1,2,3",
"EventCode": "0xae",
"EventName": "UNC_M3UPI_RING_SRC_THRTL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M3UPI"
},
{
"BriefDescription": "Lost Arb for VN0 : REQ on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN0 : REQ on AD : VN0 message requested but lost arbitration : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -8106,8 +9992,10 @@
},
{
"BriefDescription": "Lost Arb for VN0 : RSP on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN0 : RSP on AD : VN0 message requested but lost arbitration : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -8115,8 +10003,10 @@
},
{
"BriefDescription": "Lost Arb for VN0 : SNP on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN0 : SNP on AD : VN0 message requested but lost arbitration : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -8124,8 +10014,10 @@
},
{
"BriefDescription": "Lost Arb for VN0 : NCB on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN0 : NCB on BL : VN0 message requested but lost arbitration : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -8133,8 +10025,10 @@
},
{
"BriefDescription": "Lost Arb for VN0 : NCS on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN0 : NCS on BL : VN0 message requested but lost arbitration : Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -8142,8 +10036,10 @@
},
{
"BriefDescription": "Lost Arb for VN0 : RSP on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN0 : RSP on BL : VN0 message requested but lost arbitration : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -8151,8 +10047,10 @@
},
{
"BriefDescription": "Lost Arb for VN0 : WB on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN0 : WB on BL : VN0 message requested but lost arbitration : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -8160,8 +10058,10 @@
},
{
"BriefDescription": "Lost Arb for VN1 : REQ on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x4C",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN1 : REQ on AD : VN1 message requested but lost arbitration : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -8169,8 +10069,10 @@
},
{
"BriefDescription": "Lost Arb for VN1 : RSP on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x4C",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN1 : RSP on AD : VN1 message requested but lost arbitration : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -8178,8 +10080,10 @@
},
{
"BriefDescription": "Lost Arb for VN1 : SNP on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x4C",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN1 : SNP on AD : VN1 message requested but lost arbitration : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -8187,8 +10091,10 @@
},
{
"BriefDescription": "Lost Arb for VN1 : NCB on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x4C",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN1 : NCB on BL : VN1 message requested but lost arbitration : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -8196,8 +10102,10 @@
},
{
"BriefDescription": "Lost Arb for VN1 : NCS on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x4C",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN1 : NCS on BL : VN1 message requested but lost arbitration : Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -8205,8 +10113,10 @@
},
{
"BriefDescription": "Lost Arb for VN1 : RSP on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x4C",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN1 : RSP on BL : VN1 message requested but lost arbitration : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -8214,8 +10124,10 @@
},
{
"BriefDescription": "Lost Arb for VN1 : WB on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x4C",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN1 : WB on BL : VN1 message requested but lost arbitration : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -8223,8 +10135,10 @@
},
{
"BriefDescription": "Arb Miscellaneous : AD, BL Parallel Win VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x4D",
"EventName": "UNC_M3UPI_RxC_ARB_MISC.ADBL_PARALLEL_WIN_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Arb Miscellaneous : AD, BL Parallel Win VN0 : AD and BL messages won arbitration concurrently / in parallel",
"UMask": "0x10",
@@ -8232,8 +10146,10 @@
},
{
"BriefDescription": "Arb Miscellaneous : AD, BL Parallel Win VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x4D",
"EventName": "UNC_M3UPI_RxC_ARB_MISC.ADBL_PARALLEL_WIN_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Arb Miscellaneous : AD, BL Parallel Win VN1 : AD and BL messages won arbitration concurrently / in parallel",
"UMask": "0x20",
@@ -8241,8 +10157,10 @@
},
{
"BriefDescription": "Arb Miscellaneous : Max Parallel Win",
+ "Counter": "0,1,2,3",
"EventCode": "0x4D",
"EventName": "UNC_M3UPI_RxC_ARB_MISC.ALL_PARALLEL_WIN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Arb Miscellaneous : Max Parallel Win : VN0 and VN1 arbitration sub-pipelines both produced AD and BL winners (maximum possible parallel winners)",
"UMask": "0x80",
@@ -8250,8 +10168,10 @@
},
{
"BriefDescription": "Arb Miscellaneous : No Progress on Pending AD VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x4D",
"EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_AD_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Arb Miscellaneous : No Progress on Pending AD VN0 : Arbitration stage made no progress on pending ad vn0 messages because slotting stage cannot accept new message",
"UMask": "0x1",
@@ -8259,8 +10179,10 @@
},
{
"BriefDescription": "Arb Miscellaneous : No Progress on Pending AD VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x4D",
"EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_AD_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Arb Miscellaneous : No Progress on Pending AD VN1 : Arbitration stage made no progress on pending ad vn1 messages because slotting stage cannot accept new message",
"UMask": "0x2",
@@ -8268,8 +10190,10 @@
},
{
"BriefDescription": "Arb Miscellaneous : No Progress on Pending BL VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x4D",
"EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_BL_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Arb Miscellaneous : No Progress on Pending BL VN0 : Arbitration stage made no progress on pending bl vn0 messages because slotting stage cannot accept new message",
"UMask": "0x4",
@@ -8277,8 +10201,10 @@
},
{
"BriefDescription": "Arb Miscellaneous : No Progress on Pending BL VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x4D",
"EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_BL_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Arb Miscellaneous : No Progress on Pending BL VN1 : Arbitration stage made no progress on pending bl vn1 messages because slotting stage cannot accept new message",
"UMask": "0x8",
@@ -8286,8 +10212,10 @@
},
{
"BriefDescription": "Arb Miscellaneous : VN0, VN1 Parallel Win",
+ "Counter": "0,1,2,3",
"EventCode": "0x4D",
"EventName": "UNC_M3UPI_RxC_ARB_MISC.VN01_PARALLEL_WIN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Arb Miscellaneous : VN0, VN1 Parallel Win : VN0 and VN1 arbitration sub-pipelines had parallel winners (at least one AD or BL on each side)",
"UMask": "0x40",
@@ -8295,8 +10223,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN0 : REQ on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN0 : REQ on AD : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -8304,8 +10234,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN0 : RSP on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN0 : RSP on AD : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -8313,8 +10245,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN0 : SNP on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN0 : SNP on AD : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -8322,8 +10256,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN0 : NCB on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN0 : NCB on BL : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -8331,8 +10267,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN0 : NCS on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN0 : NCS on BL : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -8340,8 +10278,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN0 : RSP on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN0 : RSP on BL : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -8349,8 +10289,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN0 : WB on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN0 : WB on BL : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -8358,8 +10300,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN1 : REQ on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN1 : REQ on AD : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -8367,8 +10311,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN1 : RSP on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN1 : RSP on AD : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -8376,8 +10322,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN1 : SNP on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN1 : SNP on AD : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -8385,8 +10333,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN1 : NCB on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN1 : NCB on BL : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -8394,8 +10344,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN1 : NCS on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN1 : NCS on BL : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -8403,8 +10355,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN1 : RSP on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN1 : RSP on BL : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -8412,8 +10366,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN1 : WB on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN1 : WB on BL : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -8421,8 +10377,10 @@
},
{
"BriefDescription": "Can't Arb for VN0 : REQ on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN0 : REQ on AD : VN0 message was not able to request arbitration while some other message won arbitration : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -8430,8 +10388,10 @@
},
{
"BriefDescription": "Can't Arb for VN0 : RSP on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN0 : RSP on AD : VN0 message was not able to request arbitration while some other message won arbitration : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -8439,8 +10399,10 @@
},
{
"BriefDescription": "Can't Arb for VN0 : SNP on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN0 : SNP on AD : VN0 message was not able to request arbitration while some other message won arbitration : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -8448,8 +10410,10 @@
},
{
"BriefDescription": "Can't Arb for VN0 : NCB on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN0 : NCB on BL : VN0 message was not able to request arbitration while some other message won arbitration : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -8457,8 +10421,10 @@
},
{
"BriefDescription": "Can't Arb for VN0 : NCS on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN0 : NCS on BL : VN0 message was not able to request arbitration while some other message won arbitration : Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -8466,8 +10432,10 @@
},
{
"BriefDescription": "Can't Arb for VN0 : RSP on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN0 : RSP on BL : VN0 message was not able to request arbitration while some other message won arbitration : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -8475,8 +10443,10 @@
},
{
"BriefDescription": "Can't Arb for VN0 : WB on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN0 : WB on BL : VN0 message was not able to request arbitration while some other message won arbitration : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -8484,8 +10454,10 @@
},
{
"BriefDescription": "Can't Arb for VN1 : REQ on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x4A",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN1 : REQ on AD : VN1 message was not able to request arbitration while some other message won arbitration : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -8493,8 +10465,10 @@
},
{
"BriefDescription": "Can't Arb for VN1 : RSP on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x4A",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN1 : RSP on AD : VN1 message was not able to request arbitration while some other message won arbitration : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -8502,8 +10476,10 @@
},
{
"BriefDescription": "Can't Arb for VN1 : SNP on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x4A",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN1 : SNP on AD : VN1 message was not able to request arbitration while some other message won arbitration : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -8511,8 +10487,10 @@
},
{
"BriefDescription": "Can't Arb for VN1 : NCB on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x4A",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN1 : NCB on BL : VN1 message was not able to request arbitration while some other message won arbitration : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -8520,8 +10498,10 @@
},
{
"BriefDescription": "Can't Arb for VN1 : NCS on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x4A",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN1 : NCS on BL : VN1 message was not able to request arbitration while some other message won arbitration : Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -8529,8 +10509,10 @@
},
{
"BriefDescription": "Can't Arb for VN1 : RSP on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x4A",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN1 : RSP on BL : VN1 message was not able to request arbitration while some other message won arbitration : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -8538,8 +10520,10 @@
},
{
"BriefDescription": "Can't Arb for VN1 : WB on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x4A",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN1 : WB on BL : VN1 message was not able to request arbitration while some other message won arbitration : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -8547,8 +10531,10 @@
},
{
"BriefDescription": "Ingress Queue Bypasses : AD to Slot 0 on BL Arb",
+ "Counter": "0,1,2",
"EventCode": "0x40",
"EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S0_BL_ARB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress Queue Bypasses : AD to Slot 0 on BL Arb : Number of times message is bypassed around the Ingress Queue : AD is taking bypass to slot 0 of independent flit while bl message is in arbitration",
"UMask": "0x2",
@@ -8556,8 +10542,10 @@
},
{
"BriefDescription": "Ingress Queue Bypasses : AD to Slot 0 on Idle",
+ "Counter": "0,1,2",
"EventCode": "0x40",
"EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S0_IDLE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress Queue Bypasses : AD to Slot 0 on Idle : Number of times message is bypassed around the Ingress Queue : AD is taking bypass to slot 0 of independent flit while pipeline is idle",
"UMask": "0x1",
@@ -8565,8 +10553,10 @@
},
{
"BriefDescription": "Ingress Queue Bypasses : AD + BL to Slot 1",
+ "Counter": "0,1,2",
"EventCode": "0x40",
"EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S1_BL_SLOT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress Queue Bypasses : AD + BL to Slot 1 : Number of times message is bypassed around the Ingress Queue : AD is taking bypass to flit slot 1 while merging with bl message in same flit",
"UMask": "0x4",
@@ -8574,8 +10564,10 @@
},
{
"BriefDescription": "Ingress Queue Bypasses : AD + BL to Slot 2",
+ "Counter": "0,1,2",
"EventCode": "0x40",
"EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S2_BL_SLOT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress Queue Bypasses : AD + BL to Slot 2 : Number of times message is bypassed around the Ingress Queue : AD is taking bypass to flit slot 2 while merging with bl message in same flit",
"UMask": "0x8",
@@ -8583,8 +10575,10 @@
},
{
"BriefDescription": "Miscellaneous Credit Events : Any In BGF FIFO",
+ "Counter": "0,1,2,3",
"EventCode": "0x5F",
"EventName": "UNC_M3UPI_RxC_CRD_MISC.ANY_BGF_FIFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Miscellaneous Credit Events : Any In BGF FIFO : Indication that at least one packet (flit) is in the bgf (fifo only)",
"UMask": "0x1",
@@ -8592,8 +10586,10 @@
},
{
"BriefDescription": "Miscellaneous Credit Events : Any in BGF Path",
+ "Counter": "0,1,2,3",
"EventCode": "0x5F",
"EventName": "UNC_M3UPI_RxC_CRD_MISC.ANY_BGF_PATH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Miscellaneous Credit Events : Any in BGF Path : Indication that at least one packet (flit) is in the bgf path (i.e. pipe to fifo)",
"UMask": "0x2",
@@ -8601,8 +10597,10 @@
},
{
"BriefDescription": "Miscellaneous Credit Events",
+ "Counter": "0,1,2,3",
"EventCode": "0x5F",
"EventName": "UNC_M3UPI_RxC_CRD_MISC.LT1_FOR_D2K",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Miscellaneous Credit Events : d2k credit count is less than 1",
"UMask": "0x10",
@@ -8610,8 +10608,10 @@
},
{
"BriefDescription": "Miscellaneous Credit Events",
+ "Counter": "0,1,2,3",
"EventCode": "0x5F",
"EventName": "UNC_M3UPI_RxC_CRD_MISC.LT2_FOR_D2K",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Miscellaneous Credit Events : d2k credit count is less than 2",
"UMask": "0x20",
@@ -8619,8 +10619,10 @@
},
{
"BriefDescription": "Miscellaneous Credit Events : No D2K For Arb",
+ "Counter": "0,1,2,3",
"EventCode": "0x5F",
"EventName": "UNC_M3UPI_RxC_CRD_MISC.VN0_NO_D2K_FOR_ARB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Miscellaneous Credit Events : No D2K For Arb : VN0 BL RSP message was blocked from arbitration request due to lack of D2K CMP credit",
"UMask": "0x4",
@@ -8628,8 +10630,10 @@
},
{
"BriefDescription": "Miscellaneous Credit Events",
+ "Counter": "0,1,2,3",
"EventCode": "0x5F",
"EventName": "UNC_M3UPI_RxC_CRD_MISC.VN1_NO_D2K_FOR_ARB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Miscellaneous Credit Events : VN1 BL RSP message was blocked from arbitration request due to lack of D2K CMP credits",
"UMask": "0x8",
@@ -8637,8 +10641,10 @@
},
{
"BriefDescription": "Credit Occupancy : Credits Consumed",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_M3UPI_RxC_CRD_OCC.CONSUMED",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Credit Occupancy : Credits Consumed : number of remote vna credits consumed per cycle",
"UMask": "0x80",
@@ -8646,8 +10652,10 @@
},
{
"BriefDescription": "Credit Occupancy : D2K Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_M3UPI_RxC_CRD_OCC.D2K_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Credit Occupancy : D2K Credits : D2K completion fifo credit occupancy (credits in use), accumulated across all cycles",
"UMask": "0x10",
@@ -8655,8 +10663,10 @@
},
{
"BriefDescription": "Credit Occupancy : Packets in BGF FIFO",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_M3UPI_RxC_CRD_OCC.FLITS_IN_FIFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Credit Occupancy : Packets in BGF FIFO : Occupancy of m3upi ingress -> upi link layer bgf; packets (flits) in fifo",
"UMask": "0x2",
@@ -8664,8 +10674,10 @@
},
{
"BriefDescription": "Credit Occupancy : Packets in BGF Path",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_M3UPI_RxC_CRD_OCC.FLITS_IN_PATH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Credit Occupancy : Packets in BGF Path : Occupancy of m3upi ingress -> upi link layer bgf; packets (flits) in path (i.e. pipe to fifo or fifo)",
"UMask": "0x4",
@@ -8673,8 +10685,10 @@
},
{
"BriefDescription": "Credit Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_M3UPI_RxC_CRD_OCC.P1P_FIFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Credit Occupancy : count of bl messages in pump-1-pending state, in completion fifo only",
"UMask": "0x40",
@@ -8682,8 +10696,10 @@
},
{
"BriefDescription": "Credit Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_M3UPI_RxC_CRD_OCC.P1P_TOTAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Credit Occupancy : count of bl messages in pump-1-pending state, in marker table and in fifo",
"UMask": "0x20",
@@ -8691,8 +10707,10 @@
},
{
"BriefDescription": "Credit Occupancy : Transmit Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_M3UPI_RxC_CRD_OCC.TxQ_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Credit Occupancy : Transmit Credits : Link layer transmit queue credit occupancy (credits in use), accumulated across all cycles",
"UMask": "0x8",
@@ -8700,8 +10718,10 @@
},
{
"BriefDescription": "Credit Occupancy : VNA In Use",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_M3UPI_RxC_CRD_OCC.VNA_IN_USE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Credit Occupancy : VNA In Use : Remote UPI VNA credit occupancy (number of credits in use), accumulated across all cycles",
"UMask": "0x1",
@@ -8709,8 +10729,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : REQ on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : REQ on AD : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -8718,8 +10740,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : RSP on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : RSP on AD : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -8727,8 +10751,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : SNP on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : SNP on AD : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -8736,8 +10762,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : NCB on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : NCB on BL : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -8745,8 +10773,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : NCS on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : NCS on BL : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -8754,8 +10784,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : RSP on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : RSP on BL : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -8763,8 +10795,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : WB on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : WB on BL : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -8772,8 +10806,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty : REQ on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty : REQ on AD : Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -8781,8 +10817,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty : RSP on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty : RSP on AD : Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -8790,8 +10828,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty : SNP on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty : SNP on AD : Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -8799,8 +10839,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty : NCB on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty : NCB on BL : Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -8808,8 +10850,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty : NCS on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty : NCS on BL : Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -8817,8 +10861,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty : RSP on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty : RSP on BL : Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -8826,8 +10872,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty : WB on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty : WB on BL : Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -8835,8 +10883,10 @@
},
{
"BriefDescription": "Data Flit Not Sent : All",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_M3UPI_RxC_DATA_FLITS_NOT_SENT.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Data Flit Not Sent : All : Data flit is ready for transmission but could not be sent : data flit is ready for transmission but could not be sent for any reason, e.g. low credits, low tsv, stall injection",
"UMask": "0x1",
@@ -8844,8 +10894,10 @@
},
{
"BriefDescription": "Data Flit Not Sent : No BGF Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_M3UPI_RxC_DATA_FLITS_NOT_SENT.NO_BGF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Data Flit Not Sent : No BGF Credits : Data flit is ready for transmission but could not be sent",
"UMask": "0x8",
@@ -8853,8 +10905,10 @@
},
{
"BriefDescription": "Data Flit Not Sent : No TxQ Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_M3UPI_RxC_DATA_FLITS_NOT_SENT.NO_TXQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Data Flit Not Sent : No TxQ Credits : Data flit is ready for transmission but could not be sent",
"UMask": "0x10",
@@ -8862,8 +10916,10 @@
},
{
"BriefDescription": "Data Flit Not Sent : TSV High",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_M3UPI_RxC_DATA_FLITS_NOT_SENT.TSV_HI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Data Flit Not Sent : TSV High : Data flit is ready for transmission but could not be sent : data flit is ready for transmission but was not sent while tsv high",
"UMask": "0x2",
@@ -8871,8 +10927,10 @@
},
{
"BriefDescription": "Data Flit Not Sent : Cycle valid for Flit",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_M3UPI_RxC_DATA_FLITS_NOT_SENT.VALID_FOR_FLIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Data Flit Not Sent : Cycle valid for Flit : Data flit is ready for transmission but could not be sent : data flit is ready for transmission but was not sent while cycle is valid for flit transmission",
"UMask": "0x4",
@@ -8880,8 +10938,10 @@
},
{
"BriefDescription": "Generating BL Data Flit Sequence : Wait on Pump 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x57",
"EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P0_WAIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Generating BL Data Flit Sequence : Wait on Pump 0 : generating bl data flit sequence; waiting for data pump 0",
"UMask": "0x1",
@@ -8889,8 +10949,10 @@
},
{
"BriefDescription": "Generating BL Data Flit Sequence",
+ "Counter": "0,1,2,3",
"EventCode": "0x57",
"EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_AT_LIMIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Generating BL Data Flit Sequence : pump-1-pending logic is at capacity (pending table plus completion fifo at limit)",
"UMask": "0x10",
@@ -8898,8 +10960,10 @@
},
{
"BriefDescription": "Generating BL Data Flit Sequence",
+ "Counter": "0,1,2,3",
"EventCode": "0x57",
"EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_BUSY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Generating BL Data Flit Sequence : pump-1-pending logic is tracking at least one message",
"UMask": "0x8",
@@ -8907,8 +10971,10 @@
},
{
"BriefDescription": "Generating BL Data Flit Sequence",
+ "Counter": "0,1,2,3",
"EventCode": "0x57",
"EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_FIFO_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Generating BL Data Flit Sequence : pump-1-pending completion fifo is full",
"UMask": "0x40",
@@ -8916,8 +10982,10 @@
},
{
"BriefDescription": "Generating BL Data Flit Sequence",
+ "Counter": "0,1,2,3",
"EventCode": "0x57",
"EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_HOLD_P0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Generating BL Data Flit Sequence : pump-1-pending logic is at or near capacity, such that pump-0-only bl messages are getting stalled in slotting stage",
"UMask": "0x20",
@@ -8925,8 +10993,10 @@
},
{
"BriefDescription": "Generating BL Data Flit Sequence",
+ "Counter": "0,1,2,3",
"EventCode": "0x57",
"EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_TO_LIMBO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Generating BL Data Flit Sequence : a bl message finished but is in limbo and moved to pump-1-pending logic",
"UMask": "0x4",
@@ -8934,8 +11004,10 @@
},
{
"BriefDescription": "Generating BL Data Flit Sequence : Wait on Pump 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x57",
"EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1_WAIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Generating BL Data Flit Sequence : Wait on Pump 1 : generating bl data flit sequence; waiting for data pump 1",
"UMask": "0x2",
@@ -8943,8 +11015,10 @@
},
{
"BriefDescription": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_IN_HOLDOFF",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_IN_HOLDOFF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": slot 2 request naturally serviced during hold-off period",
"UMask": "0x4",
@@ -8952,8 +11026,10 @@
},
{
"BriefDescription": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_IN_SERVICE",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_IN_SERVICE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": slot 2 request forcibly serviced during service window",
"UMask": "0x8",
@@ -8961,8 +11037,10 @@
},
{
"BriefDescription": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_RECEIVED",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_RECEIVED",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": slot 2 request received from link layer while idle (with no slot 2 request active immediately prior)",
"UMask": "0x1",
@@ -8970,8 +11048,10 @@
},
{
"BriefDescription": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_WITHDRAWN",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_WITHDRAWN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": slot 2 request withdrawn during hold-off period or service window",
"UMask": "0x2",
@@ -8979,16 +11059,20 @@
},
{
"BriefDescription": "Slotting BL Message Into Header Flit : All",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M3UPI"
},
{
"BriefDescription": "Slotting BL Message Into Header Flit : Needs Data Flit",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.NEED_DATA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Slotting BL Message Into Header Flit : Needs Data Flit : BL message requires data flit sequence",
"UMask": "0x2",
@@ -8996,8 +11080,10 @@
},
{
"BriefDescription": "Slotting BL Message Into Header Flit : Wait on Pump 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P0_WAIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Slotting BL Message Into Header Flit : Wait on Pump 0 : Waiting for header pump 0",
"UMask": "0x4",
@@ -9005,8 +11091,10 @@
},
{
"BriefDescription": "Slotting BL Message Into Header Flit : Don't Need Pump 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_NOT_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Slotting BL Message Into Header Flit : Don't Need Pump 1 : Header pump 1 is not required for flit",
"UMask": "0x10",
@@ -9014,8 +11102,10 @@
},
{
"BriefDescription": "Slotting BL Message Into Header Flit : Don't Need Pump 1 - Bubble",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_NOT_REQ_BUT_BUBBLE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Slotting BL Message Into Header Flit : Don't Need Pump 1 - Bubble : Header pump 1 is not required for flit but flit transmission delayed",
"UMask": "0x20",
@@ -9023,8 +11113,10 @@
},
{
"BriefDescription": "Slotting BL Message Into Header Flit : Don't Need Pump 1 - Not Avail",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_NOT_REQ_NOT_AVAIL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Slotting BL Message Into Header Flit : Don't Need Pump 1 - Not Avail : Header pump 1 is not required for flit and not available",
"UMask": "0x40",
@@ -9032,8 +11124,10 @@
},
{
"BriefDescription": "Slotting BL Message Into Header Flit : Wait on Pump 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_WAIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Slotting BL Message Into Header Flit : Wait on Pump 1 : Waiting for header pump 1",
"UMask": "0x8",
@@ -9041,8 +11135,10 @@
},
{
"BriefDescription": "Flit Gen - Header 1 : Accumulate",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.ACCUM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Flit Gen - Header 1 : Accumulate : Events related to Header Flit Generation - Set 1 : Header flit slotting control state machine is in any accumulate state; multi-message flit may be assembled over multiple cycles",
"UMask": "0x1",
@@ -9050,8 +11146,10 @@
},
{
"BriefDescription": "Flit Gen - Header 1 : Accumulate Ready",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.ACCUM_READ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Flit Gen - Header 1 : Accumulate Ready : Events related to Header Flit Generation - Set 1 : header flit slotting control state machine is in accum_ready state; flit is ready to send but transmission is blocked; more messages may be slotted into flit",
"UMask": "0x2",
@@ -9059,8 +11157,10 @@
},
{
"BriefDescription": "Flit Gen - Header 1 : Accumulate Wasted",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.ACCUM_WASTED",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Flit Gen - Header 1 : Accumulate Wasted : Events related to Header Flit Generation - Set 1 : Flit is being assembled over multiple cycles, but no additional message is being slotted into flit in current cycle; accumulate cycle is wasted",
"UMask": "0x4",
@@ -9068,8 +11168,10 @@
},
{
"BriefDescription": "Flit Gen - Header 1 : Run-Ahead - Blocked",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_BLOCKED",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Flit Gen - Header 1 : Run-Ahead - Blocked : Events related to Header Flit Generation - Set 1 : Header flit slotting entered run-ahead state; new header flit is started while transmission of prior, fully assembled flit is blocked",
"UMask": "0x8",
@@ -9077,8 +11179,10 @@
},
{
"BriefDescription": "Flit Gen - Header 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_MSG1_AFTER",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Flit Gen - Header 1 : Events related to Header Flit Generation - Set 1 : run-ahead mode: message was slotted only after run-ahead was over; run-ahead mode definitely wasted",
"UMask": "0x80",
@@ -9086,8 +11190,10 @@
},
{
"BriefDescription": "Flit Gen - Header 1 : Run-Ahead - Message",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_MSG1_DURING",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Flit Gen - Header 1 : Run-Ahead - Message : Events related to Header Flit Generation - Set 1 : run-ahead mode: one message slotted during run-ahead",
"UMask": "0x10",
@@ -9095,8 +11201,10 @@
},
{
"BriefDescription": "Flit Gen - Header 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_MSG2_AFTER",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Flit Gen - Header 1 : Events related to Header Flit Generation - Set 1 : run-ahead mode: second message slotted immediately after run-ahead; potential run-ahead success",
"UMask": "0x20",
@@ -9104,8 +11212,10 @@
},
{
"BriefDescription": "Flit Gen - Header 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_MSG2_SENT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Flit Gen - Header 1 : Events related to Header Flit Generation - Set 1 : run-ahead mode: two (or three) message flit sent immediately after run-ahead; complete run-ahead success",
"UMask": "0x40",
@@ -9113,8 +11223,10 @@
},
{
"BriefDescription": "Flit Gen - Header 2 : Parallel Ok",
+ "Counter": "0,1,2,3",
"EventCode": "0x52",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.PAR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Flit Gen - Header 2 : Parallel Ok : Events related to Header Flit Generation - Set 2 : new header flit construction may proceed in parallel with data flit sequence",
"UMask": "0x4",
@@ -9122,8 +11234,10 @@
},
{
"BriefDescription": "Flit Gen - Header 2 : Parallel Flit Finished",
+ "Counter": "0,1,2,3",
"EventCode": "0x52",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.PAR_FLIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Flit Gen - Header 2 : Parallel Flit Finished : Events related to Header Flit Generation - Set 2 : header flit finished assembly in parallel with data flit sequence",
"UMask": "0x10",
@@ -9131,8 +11245,10 @@
},
{
"BriefDescription": "Flit Gen - Header 2 : Parallel Message",
+ "Counter": "0,1,2,3",
"EventCode": "0x52",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.PAR_MSG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Flit Gen - Header 2 : Parallel Message : Events related to Header Flit Generation - Set 2 : message is slotted into header flit in parallel with data flit sequence",
"UMask": "0x8",
@@ -9140,8 +11256,10 @@
},
{
"BriefDescription": "Flit Gen - Header 2 : Rate-matching Stall",
+ "Counter": "0,1,2,3",
"EventCode": "0x52",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.RMSTALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Flit Gen - Header 2 : Rate-matching Stall : Events related to Header Flit Generation - Set 2 : Rate-matching stall injected",
"UMask": "0x1",
@@ -9149,8 +11267,10 @@
},
{
"BriefDescription": "Flit Gen - Header 2 : Rate-matching Stall - No Message",
+ "Counter": "0,1,2,3",
"EventCode": "0x52",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.RMSTALL_NOMSG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Flit Gen - Header 2 : Rate-matching Stall - No Message : Events related to Header Flit Generation - Set 2 : Rate matching stall injected, but no additional message slotted during stall cycle",
"UMask": "0x2",
@@ -9158,8 +11278,10 @@
},
{
"BriefDescription": "Sent Header Flit : One Message",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.1_MSG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Sent Header Flit : One Message : One message in flit; VNA or non-VNA flit",
"UMask": "0x1",
@@ -9167,8 +11289,10 @@
},
{
"BriefDescription": "Sent Header Flit : One Message in non-VNA",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.1_MSG_VNX",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Sent Header Flit : One Message in non-VNA : One message in flit; non-VNA flit",
"UMask": "0x8",
@@ -9176,8 +11300,10 @@
},
{
"BriefDescription": "Sent Header Flit : Two Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.2_MSGS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Sent Header Flit : Two Messages : Two messages in flit; VNA flit",
"UMask": "0x2",
@@ -9185,8 +11311,10 @@
},
{
"BriefDescription": "Sent Header Flit : Three Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.3_MSGS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Sent Header Flit : Three Messages : Three messages in flit; VNA flit",
"UMask": "0x4",
@@ -9194,32 +11322,40 @@
},
{
"BriefDescription": "Sent Header Flit : One Slot Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.SLOTS_1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M3UPI"
},
{
"BriefDescription": "Sent Header Flit : Two Slots Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.SLOTS_2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M3UPI"
},
{
"BriefDescription": "Sent Header Flit : All Slots Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.SLOTS_3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M3UPI"
},
{
"BriefDescription": "Header Not Sent : All",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Header Not Sent : All : header flit is ready for transmission but could not be sent : header flit is ready for transmission but could not be sent for any reason, e.g. no credits, low tsv, stall injection",
"UMask": "0x1",
@@ -9227,8 +11363,10 @@
},
{
"BriefDescription": "Header Not Sent : No BGF Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.NO_BGF_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Header Not Sent : No BGF Credits : header flit is ready for transmission but could not be sent : No BGF credits available",
"UMask": "0x8",
@@ -9236,8 +11374,10 @@
},
{
"BriefDescription": "Header Not Sent : No BGF Credits + No Extra Message Slotted",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.NO_BGF_NO_MSG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Header Not Sent : No BGF Credits + No Extra Message Slotted : header flit is ready for transmission but could not be sent : No BGF credits available; no additional message slotted into flit",
"UMask": "0x20",
@@ -9245,8 +11385,10 @@
},
{
"BriefDescription": "Header Not Sent : No TxQ Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.NO_TXQ_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Header Not Sent : No TxQ Credits : header flit is ready for transmission but could not be sent : No TxQ credits available",
"UMask": "0x10",
@@ -9254,8 +11396,10 @@
},
{
"BriefDescription": "Header Not Sent : No TxQ Credits + No Extra Message Slotted",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.NO_TXQ_NO_MSG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Header Not Sent : No TxQ Credits + No Extra Message Slotted : header flit is ready for transmission but could not be sent : No TxQ credits available; no additional message slotted into flit",
"UMask": "0x40",
@@ -9263,8 +11407,10 @@
},
{
"BriefDescription": "Header Not Sent : TSV High",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.TSV_HI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Header Not Sent : TSV High : header flit is ready for transmission but could not be sent : header flit is ready for transmission but was not sent while tsv high",
"UMask": "0x2",
@@ -9272,8 +11418,10 @@
},
{
"BriefDescription": "Header Not Sent : Cycle valid for Flit",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.VALID_FOR_FLIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Header Not Sent : Cycle valid for Flit : header flit is ready for transmission but could not be sent : header flit is ready for transmission but was not sent while cycle is valid for flit transmission",
"UMask": "0x4",
@@ -9281,8 +11429,10 @@
},
{
"BriefDescription": "Message Held : Can't Slot AD",
+ "Counter": "0,1,2",
"EventCode": "0x50",
"EventName": "UNC_M3UPI_RxC_HELD.CANT_SLOT_AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Message Held : Can't Slot AD : some AD message could not be slotted (logical OR of all AD events under INGR_SLOT_CANT_MC_VN{0,1})",
"UMask": "0x10",
@@ -9290,8 +11440,10 @@
},
{
"BriefDescription": "Message Held : Can't Slot BL",
+ "Counter": "0,1,2",
"EventCode": "0x50",
"EventName": "UNC_M3UPI_RxC_HELD.CANT_SLOT_BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Message Held : Can't Slot BL : some BL message could not be slotted (logical OR of all BL events under INGR_SLOT_CANT_MC_VN{0,1})",
"UMask": "0x20",
@@ -9299,8 +11451,10 @@
},
{
"BriefDescription": "Message Held : Parallel Attempt",
+ "Counter": "0,1,2",
"EventCode": "0x50",
"EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_ATTEMPT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Message Held : Parallel Attempt : ad and bl messages attempted to slot into the same flit in parallel",
"UMask": "0x4",
@@ -9308,8 +11462,10 @@
},
{
"BriefDescription": "Message Held : Parallel Success",
+ "Counter": "0,1,2",
"EventCode": "0x50",
"EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_SUCCESS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Message Held : Parallel Success : ad and bl messages were actually slotted into the same flit in parallel",
"UMask": "0x8",
@@ -9317,8 +11473,10 @@
},
{
"BriefDescription": "Message Held : VN0",
+ "Counter": "0,1,2",
"EventCode": "0x50",
"EventName": "UNC_M3UPI_RxC_HELD.VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Message Held : VN0 : vn0 message(s) that couldn't be slotted into last vn0 flit are held in slotting stage while processing vn1 flit",
"UMask": "0x1",
@@ -9326,8 +11484,10 @@
},
{
"BriefDescription": "Message Held : VN1",
+ "Counter": "0,1,2",
"EventCode": "0x50",
"EventName": "UNC_M3UPI_RxC_HELD.VN1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Message Held : VN1 : vn1 message(s) that couldn't be slotted into last vn1 flit are held in slotting stage while processing vn0 flit",
"UMask": "0x2",
@@ -9335,8 +11495,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts : REQ on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN0.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Ingress (from CMS) Queue - Inserts : REQ on AD : Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -9344,8 +11506,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts : RSP on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN0.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Ingress (from CMS) Queue - Inserts : RSP on AD : Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -9353,8 +11517,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts : SNP on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN0.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Ingress (from CMS) Queue - Inserts : SNP on AD : Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -9362,8 +11528,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts : NCB on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN0.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Ingress (from CMS) Queue - Inserts : NCB on BL : Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -9371,8 +11539,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts : NCS on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN0.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Ingress (from CMS) Queue - Inserts : NCS on BL : Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -9380,8 +11550,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts : RSP on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN0.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Ingress (from CMS) Queue - Inserts : RSP on BL : Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -9389,8 +11561,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts : WB on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN0.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Ingress (from CMS) Queue - Inserts : WB on BL : Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -9398,8 +11572,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts : REQ on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN1.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Ingress (from CMS) Queue - Inserts : REQ on AD : Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -9407,8 +11583,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts : RSP on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN1.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Ingress (from CMS) Queue - Inserts : RSP on AD : Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -9416,8 +11594,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts : SNP on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN1.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Ingress (from CMS) Queue - Inserts : SNP on AD : Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -9425,8 +11605,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts : NCB on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN1.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Ingress (from CMS) Queue - Inserts : NCB on BL : Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -9434,8 +11616,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts : NCS on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN1.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Ingress (from CMS) Queue - Inserts : NCS on BL : Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -9443,8 +11627,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts : RSP on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN1.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Ingress (from CMS) Queue - Inserts : RSP on BL : Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -9452,8 +11638,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts : WB on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN1.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Ingress (from CMS) Queue - Inserts : WB on BL : Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -9461,8 +11649,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy : REQ on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Ingress (from CMS) Queue - Occupancy : REQ on AD : Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency. : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -9470,8 +11660,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy : RSP on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Ingress (from CMS) Queue - Occupancy : RSP on AD : Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency. : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -9479,8 +11671,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy : SNP on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Ingress (from CMS) Queue - Occupancy : SNP on AD : Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency. : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -9488,8 +11682,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy : NCB on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Ingress (from CMS) Queue - Occupancy : NCB on BL : Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency. : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -9497,8 +11693,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy : NCS on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Ingress (from CMS) Queue - Occupancy : NCS on BL : Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency. : Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -9506,8 +11704,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy : RSP on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Ingress (from CMS) Queue - Occupancy : RSP on BL : Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency. : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -9515,8 +11715,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy : WB on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Ingress (from CMS) Queue - Occupancy : WB on BL : Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency. : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -9524,8 +11726,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy : REQ on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Ingress (from CMS) Queue - Occupancy : REQ on AD : Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency. : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -9533,8 +11737,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy : RSP on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Ingress (from CMS) Queue - Occupancy : RSP on AD : Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency. : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -9542,8 +11748,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy : SNP on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Ingress (from CMS) Queue - Occupancy : SNP on AD : Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency. : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -9551,8 +11759,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy : NCB on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Ingress (from CMS) Queue - Occupancy : NCB on BL : Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency. : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -9560,8 +11770,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy : NCS on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Ingress (from CMS) Queue - Occupancy : NCS on BL : Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency. : Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -9569,8 +11781,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy : RSP on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Ingress (from CMS) Queue - Occupancy : RSP on BL : Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency. : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -9578,8 +11792,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy : WB on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Ingress (from CMS) Queue - Occupancy : WB on BL : Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency. : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -9587,8 +11803,10 @@
},
{
"BriefDescription": "VN0 message can't slot into flit : REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4E",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message can't slot into flit : REQ on AD : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -9596,8 +11814,10 @@
},
{
"BriefDescription": "VN0 message can't slot into flit : RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4E",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message can't slot into flit : RSP on AD : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -9605,8 +11825,10 @@
},
{
"BriefDescription": "VN0 message can't slot into flit : SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4E",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message can't slot into flit : SNP on AD : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -9614,8 +11836,10 @@
},
{
"BriefDescription": "VN0 message can't slot into flit : NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4E",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message can't slot into flit : NCB on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -9623,8 +11847,10 @@
},
{
"BriefDescription": "VN0 message can't slot into flit : NCS on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4E",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message can't slot into flit : NCS on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -9632,8 +11858,10 @@
},
{
"BriefDescription": "VN0 message can't slot into flit : RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4E",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message can't slot into flit : RSP on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -9641,8 +11869,10 @@
},
{
"BriefDescription": "VN0 message can't slot into flit : WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4E",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message can't slot into flit : WB on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -9650,8 +11880,10 @@
},
{
"BriefDescription": "VN1 message can't slot into flit : REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4F",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message can't slot into flit : REQ on AD : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -9659,8 +11891,10 @@
},
{
"BriefDescription": "VN1 message can't slot into flit : RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4F",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message can't slot into flit : RSP on AD : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -9668,8 +11902,10 @@
},
{
"BriefDescription": "VN1 message can't slot into flit : SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4F",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message can't slot into flit : SNP on AD : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -9677,8 +11913,10 @@
},
{
"BriefDescription": "VN1 message can't slot into flit : NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4F",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message can't slot into flit : NCB on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -9686,8 +11924,10 @@
},
{
"BriefDescription": "VN1 message can't slot into flit : NCS on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4F",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message can't slot into flit : NCS on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -9695,8 +11935,10 @@
},
{
"BriefDescription": "VN1 message can't slot into flit : RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4F",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message can't slot into flit : RSP on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -9704,8 +11946,10 @@
},
{
"BriefDescription": "VN1 message can't slot into flit : WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4F",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message can't slot into flit : WB on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -9713,8 +11957,10 @@
},
{
"BriefDescription": "Remote VNA Credits : Any In Use",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_M3UPI_RxC_VNA_CRD.ANY_IN_USE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Remote VNA Credits : Any In Use : At least one remote vna credit is in use",
"UMask": "0x20",
@@ -9722,8 +11968,10 @@
},
{
"BriefDescription": "Remote VNA Credits : Corrected",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_M3UPI_RxC_VNA_CRD.CORRECTED",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Remote VNA Credits : Corrected : Number of remote vna credits corrected (local return) per cycle",
"UMask": "0x1",
@@ -9731,8 +11979,10 @@
},
{
"BriefDescription": "Remote VNA Credits : Level < 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_M3UPI_RxC_VNA_CRD.LT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Remote VNA Credits : Level < 1 : Remote vna credit level is less than 1 (i.e. no vna credits available)",
"UMask": "0x2",
@@ -9740,8 +11990,10 @@
},
{
"BriefDescription": "Remote VNA Credits : Level < 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_M3UPI_RxC_VNA_CRD.LT10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Remote VNA Credits : Level < 10 : remote vna credit level is less than 10; parallel vn0/vn1 arb not possible",
"UMask": "0x10",
@@ -9749,8 +12001,10 @@
},
{
"BriefDescription": "Remote VNA Credits : Level < 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_M3UPI_RxC_VNA_CRD.LT4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Remote VNA Credits : Level < 4 : Remote vna credit level is less than 4; bl (or ad requiring 4 vna) cannot arb on vna",
"UMask": "0x4",
@@ -9758,8 +12012,10 @@
},
{
"BriefDescription": "Remote VNA Credits : Level < 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_M3UPI_RxC_VNA_CRD.LT5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Remote VNA Credits : Level < 5 : Remote vna credit level is less than 5; parallel ad/bl arb on vna not possible",
"UMask": "0x8",
@@ -9767,8 +12023,10 @@
},
{
"BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.REQ_ADBL_ALLOC_L5",
+ "Counter": "0,1,2,3",
"EventCode": "0x59",
"EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.REQ_ADBL_ALLOC_L5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": remote vna credit count was less than 5 and allocation to ad or bl messages was required",
"UMask": "0x2",
@@ -9776,8 +12034,10 @@
},
{
"BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.REQ_VN01_ALLOC_LT10",
+ "Counter": "0,1,2,3",
"EventCode": "0x59",
"EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.REQ_VN01_ALLOC_LT10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": remote vna credit count was less than 10 and allocation to vn0 or vn1 was required",
"UMask": "0x1",
@@ -9785,8 +12045,10 @@
},
{
"BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_JUST_AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x59",
"EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_JUST_AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": on vn0, remote vna credits were allocated only to ad messages, not to bl",
"UMask": "0x10",
@@ -9794,8 +12056,10 @@
},
{
"BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_JUST_BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x59",
"EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_JUST_BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": on vn0, remote vna credits were allocated only to bl messages, not to ad",
"UMask": "0x20",
@@ -9803,8 +12067,10 @@
},
{
"BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_ONLY",
+ "Counter": "0,1,2,3",
"EventCode": "0x59",
"EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_ONLY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": remote vna credits were allocated only to vn0, not to vn1",
"UMask": "0x4",
@@ -9812,8 +12078,10 @@
},
{
"BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_JUST_AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x59",
"EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_JUST_AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": on vn1, remote vna credits were allocated only to ad messages, not to bl",
"UMask": "0x40",
@@ -9821,8 +12089,10 @@
},
{
"BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_JUST_BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x59",
"EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_JUST_BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": on vn1, remote vna credits were allocated only to bl messages, not to ad",
"UMask": "0x80",
@@ -9830,8 +12100,10 @@
},
{
"BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_ONLY",
+ "Counter": "0,1,2,3",
"EventCode": "0x59",
"EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_ONLY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": remote vna credits were allocated only to vn1, not to vn0",
"UMask": "0x8",
@@ -9839,8 +12111,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE5",
"EventName": "UNC_M3UPI_RxR_BUSY_STARVED.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority : All == Credited + Uncredited",
"UMask": "0x11",
@@ -9848,8 +12122,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE5",
"EventName": "UNC_M3UPI_RxR_BUSY_STARVED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x10",
@@ -9857,8 +12133,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE5",
"EventName": "UNC_M3UPI_RxR_BUSY_STARVED.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x1",
@@ -9866,8 +12144,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE5",
"EventName": "UNC_M3UPI_RxR_BUSY_STARVED.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority : All == Credited + Uncredited",
"UMask": "0x44",
@@ -9875,8 +12155,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE5",
"EventName": "UNC_M3UPI_RxR_BUSY_STARVED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x40",
@@ -9884,8 +12166,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE5",
"EventName": "UNC_M3UPI_RxR_BUSY_STARVED.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x4",
@@ -9893,8 +12177,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_M3UPI_RxR_BYPASS.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : AD - All : Number of packets bypassing the CMS Ingress : All == Credited + Uncredited",
"UMask": "0x11",
@@ -9902,8 +12188,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_M3UPI_RxR_BYPASS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : AD - Credited : Number of packets bypassing the CMS Ingress",
"UMask": "0x10",
@@ -9911,8 +12199,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_M3UPI_RxR_BYPASS.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : AD - Uncredited : Number of packets bypassing the CMS Ingress",
"UMask": "0x1",
@@ -9920,8 +12210,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_M3UPI_RxR_BYPASS.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : AK : Number of packets bypassing the CMS Ingress",
"UMask": "0x2",
@@ -9929,8 +12221,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_M3UPI_RxR_BYPASS.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : AKC - Uncredited : Number of packets bypassing the CMS Ingress",
"UMask": "0x80",
@@ -9938,8 +12232,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_M3UPI_RxR_BYPASS.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : BL - All : Number of packets bypassing the CMS Ingress : All == Credited + Uncredited",
"UMask": "0x44",
@@ -9947,8 +12243,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_M3UPI_RxR_BYPASS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : BL - Credited : Number of packets bypassing the CMS Ingress",
"UMask": "0x40",
@@ -9956,8 +12254,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_M3UPI_RxR_BYPASS.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : BL - Uncredited : Number of packets bypassing the CMS Ingress",
"UMask": "0x4",
@@ -9965,8 +12265,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_M3UPI_RxR_BYPASS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : IV : Number of packets bypassing the CMS Ingress",
"UMask": "0x8",
@@ -9974,8 +12276,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_M3UPI_RxR_CRD_STARVED.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -9983,8 +12287,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_M3UPI_RxR_CRD_STARVED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x10",
@@ -9992,8 +12298,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_M3UPI_RxR_CRD_STARVED.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x1",
@@ -10001,8 +12309,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_M3UPI_RxR_CRD_STARVED.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AK : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x2",
@@ -10010,8 +12320,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_M3UPI_RxR_CRD_STARVED.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -10019,8 +12331,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_M3UPI_RxR_CRD_STARVED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x40",
@@ -10028,8 +12342,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_M3UPI_RxR_CRD_STARVED.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x4",
@@ -10037,8 +12353,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : IFV - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_M3UPI_RxR_CRD_STARVED.IFV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : IFV - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x80",
@@ -10046,8 +12364,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_M3UPI_RxR_CRD_STARVED.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : IV : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x8",
@@ -10055,16 +12375,20 @@
},
{
"BriefDescription": "Transgress Injection Starvation",
+ "Counter": "0,1,2,3",
"EventCode": "0xe4",
"EventName": "UNC_M3UPI_RxR_CRD_STARVED_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"Unit": "M3UPI"
},
{
"BriefDescription": "Transgress Ingress Allocations : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_M3UPI_RxR_INSERTS.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : AD - All : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
"UMask": "0x11",
@@ -10072,8 +12396,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_M3UPI_RxR_INSERTS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : AD - Credited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x10",
@@ -10081,8 +12407,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_M3UPI_RxR_INSERTS.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : AD - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x1",
@@ -10090,8 +12418,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_M3UPI_RxR_INSERTS.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : AK : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x2",
@@ -10099,8 +12429,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_M3UPI_RxR_INSERTS.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : AKC - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x80",
@@ -10108,8 +12440,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_M3UPI_RxR_INSERTS.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : BL - All : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
"UMask": "0x44",
@@ -10117,8 +12451,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_M3UPI_RxR_INSERTS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : BL - Credited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x40",
@@ -10126,8 +12462,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_M3UPI_RxR_INSERTS.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : BL - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x4",
@@ -10135,8 +12473,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_M3UPI_RxR_INSERTS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : IV : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x8",
@@ -10144,8 +12484,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M3UPI_RxR_OCCUPANCY.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : AD - All : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
"UMask": "0x11",
@@ -10153,8 +12495,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M3UPI_RxR_OCCUPANCY.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : AD - Credited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x10",
@@ -10162,8 +12506,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M3UPI_RxR_OCCUPANCY.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : AD - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x1",
@@ -10171,8 +12517,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M3UPI_RxR_OCCUPANCY.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : AK : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x2",
@@ -10180,8 +12528,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M3UPI_RxR_OCCUPANCY.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : AKC - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x80",
@@ -10189,8 +12539,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M3UPI_RxR_OCCUPANCY.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : BL - All : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
"UMask": "0x44",
@@ -10198,8 +12550,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M3UPI_RxR_OCCUPANCY.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : BL - Credited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x20",
@@ -10207,8 +12561,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M3UPI_RxR_OCCUPANCY.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : BL - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x4",
@@ -10216,8 +12572,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M3UPI_RxR_OCCUPANCY.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : IV : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x8",
@@ -10225,8 +12583,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 0 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -10234,8 +12594,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 1 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -10243,8 +12605,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 2 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -10252,8 +12616,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 3 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -10261,8 +12627,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 4 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -10270,8 +12638,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 5 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -10279,8 +12649,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 6 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x40",
@@ -10288,8 +12660,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 7 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x80",
@@ -10297,8 +12671,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 0 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -10306,8 +12682,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 1 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -10315,8 +12693,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 2 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -10324,8 +12704,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 3 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -10333,8 +12715,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 4 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -10342,8 +12726,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 5 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -10351,8 +12737,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 6 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x40",
@@ -10360,8 +12748,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 7 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x80",
@@ -10369,8 +12759,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 0 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -10378,8 +12770,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 1 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -10387,8 +12781,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 2 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -10396,8 +12792,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 3 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -10405,8 +12803,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 4 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -10414,8 +12814,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 5 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -10423,8 +12825,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 6 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x40",
@@ -10432,8 +12836,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 7 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x80",
@@ -10441,8 +12847,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 0 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -10450,8 +12858,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 1 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -10459,8 +12869,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 2 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -10468,8 +12880,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 3 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -10477,8 +12891,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 4 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -10486,8 +12902,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 5 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -10495,8 +12913,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 6 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x40",
@@ -10504,8 +12924,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 7 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x80",
@@ -10513,8 +12935,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "UNC_M3UPI_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 10 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -10522,8 +12946,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "UNC_M3UPI_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 8 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -10531,8 +12957,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "UNC_M3UPI_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 9 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -10540,8 +12968,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xD3",
"EventName": "UNC_M3UPI_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 10 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -10549,8 +12979,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xD3",
"EventName": "UNC_M3UPI_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 8 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -10558,8 +12990,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xD3",
"EventName": "UNC_M3UPI_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 9 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -10567,8 +13001,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xD5",
"EventName": "UNC_M3UPI_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 10 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -10576,8 +13012,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xD5",
"EventName": "UNC_M3UPI_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 8 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -10585,8 +13023,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xD5",
"EventName": "UNC_M3UPI_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 9 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -10594,8 +13034,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xD7",
"EventName": "UNC_M3UPI_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 10 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -10603,8 +13045,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xD7",
"EventName": "UNC_M3UPI_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 8 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -10612,8 +13056,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xD7",
"EventName": "UNC_M3UPI_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 9 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -10621,8 +13067,10 @@
},
{
"BriefDescription": "Failed ARB for AD : VN0 REQ Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for AD : VN0 REQ Messages : AD arb but no win; arb request asserted but not won",
"UMask": "0x1",
@@ -10630,8 +13078,10 @@
},
{
"BriefDescription": "Failed ARB for AD : VN0 RSP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for AD : VN0 RSP Messages : AD arb but no win; arb request asserted but not won",
"UMask": "0x4",
@@ -10639,8 +13089,10 @@
},
{
"BriefDescription": "Failed ARB for AD : VN0 SNP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for AD : VN0 SNP Messages : AD arb but no win; arb request asserted but not won",
"UMask": "0x2",
@@ -10648,8 +13100,10 @@
},
{
"BriefDescription": "Failed ARB for AD : VN0 WB Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for AD : VN0 WB Messages : AD arb but no win; arb request asserted but not won",
"UMask": "0x8",
@@ -10657,8 +13111,10 @@
},
{
"BriefDescription": "Failed ARB for AD : VN1 REQ Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for AD : VN1 REQ Messages : AD arb but no win; arb request asserted but not won",
"UMask": "0x10",
@@ -10666,8 +13122,10 @@
},
{
"BriefDescription": "Failed ARB for AD : VN1 RSP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for AD : VN1 RSP Messages : AD arb but no win; arb request asserted but not won",
"UMask": "0x40",
@@ -10675,8 +13133,10 @@
},
{
"BriefDescription": "Failed ARB for AD : VN1 SNP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for AD : VN1 SNP Messages : AD arb but no win; arb request asserted but not won",
"UMask": "0x20",
@@ -10684,8 +13144,10 @@
},
{
"BriefDescription": "Failed ARB for AD : VN1 WB Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for AD : VN1 WB Messages : AD arb but no win; arb request asserted but not won",
"UMask": "0x80",
@@ -10693,8 +13155,10 @@
},
{
"BriefDescription": "AD FlowQ Bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.AD_SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD FlowQ Bypass : Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
"UMask": "0x1",
@@ -10702,8 +13166,10 @@
},
{
"BriefDescription": "AD FlowQ Bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.AD_SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD FlowQ Bypass : Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
"UMask": "0x2",
@@ -10711,8 +13177,10 @@
},
{
"BriefDescription": "AD FlowQ Bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.AD_SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD FlowQ Bypass : Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
"UMask": "0x4",
@@ -10720,8 +13188,10 @@
},
{
"BriefDescription": "AD FlowQ Bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.BL_EARLY_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD FlowQ Bypass : Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
"UMask": "0x8",
@@ -10729,8 +13199,10 @@
},
{
"BriefDescription": "AD Flow Q Not Empty : VN0 REQ Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Not Empty : VN0 REQ Messages : Number of cycles the AD Egress queue is Not Empty",
"UMask": "0x1",
@@ -10738,8 +13210,10 @@
},
{
"BriefDescription": "AD Flow Q Not Empty : VN0 RSP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Not Empty : VN0 RSP Messages : Number of cycles the AD Egress queue is Not Empty",
"UMask": "0x4",
@@ -10747,8 +13221,10 @@
},
{
"BriefDescription": "AD Flow Q Not Empty : VN0 SNP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Not Empty : VN0 SNP Messages : Number of cycles the AD Egress queue is Not Empty",
"UMask": "0x2",
@@ -10756,8 +13232,10 @@
},
{
"BriefDescription": "AD Flow Q Not Empty : VN0 WB Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Not Empty : VN0 WB Messages : Number of cycles the AD Egress queue is Not Empty",
"UMask": "0x8",
@@ -10765,8 +13243,10 @@
},
{
"BriefDescription": "AD Flow Q Not Empty : VN1 REQ Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Not Empty : VN1 REQ Messages : Number of cycles the AD Egress queue is Not Empty",
"UMask": "0x10",
@@ -10774,8 +13254,10 @@
},
{
"BriefDescription": "AD Flow Q Not Empty : VN1 RSP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Not Empty : VN1 RSP Messages : Number of cycles the AD Egress queue is Not Empty",
"UMask": "0x40",
@@ -10783,8 +13265,10 @@
},
{
"BriefDescription": "AD Flow Q Not Empty : VN1 SNP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Not Empty : VN1 SNP Messages : Number of cycles the AD Egress queue is Not Empty",
"UMask": "0x20",
@@ -10792,8 +13276,10 @@
},
{
"BriefDescription": "AD Flow Q Not Empty : VN1 WB Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Not Empty : VN1 WB Messages : Number of cycles the AD Egress queue is Not Empty",
"UMask": "0x80",
@@ -10801,8 +13287,10 @@
},
{
"BriefDescription": "AD Flow Q Inserts : VN0 REQ Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2D",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Inserts : VN0 REQ Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x1",
@@ -10810,8 +13298,10 @@
},
{
"BriefDescription": "AD Flow Q Inserts : VN0 RSP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2D",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Inserts : VN0 RSP Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x4",
@@ -10819,8 +13309,10 @@
},
{
"BriefDescription": "AD Flow Q Inserts : VN0 SNP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2D",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Inserts : VN0 SNP Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x2",
@@ -10828,8 +13320,10 @@
},
{
"BriefDescription": "AD Flow Q Inserts : VN0 WB Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2D",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Inserts : VN0 WB Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x8",
@@ -10837,8 +13331,10 @@
},
{
"BriefDescription": "AD Flow Q Inserts : VN1 REQ Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2D",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Inserts : VN1 REQ Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x10",
@@ -10846,8 +13342,10 @@
},
{
"BriefDescription": "AD Flow Q Inserts : VN1 RSP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2D",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Inserts : VN1 RSP Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x40",
@@ -10855,8 +13353,10 @@
},
{
"BriefDescription": "AD Flow Q Inserts : VN1 SNP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2D",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN1_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Inserts : VN1 SNP Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x20",
@@ -10864,78 +13364,98 @@
},
{
"BriefDescription": "AD Flow Q Occupancy : VN0 REQ Messages",
+ "Counter": "0",
"EventCode": "0x1C",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M3UPI"
},
{
"BriefDescription": "AD Flow Q Occupancy : VN0 RSP Messages",
+ "Counter": "0",
"EventCode": "0x1C",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M3UPI"
},
{
"BriefDescription": "AD Flow Q Occupancy : VN0 SNP Messages",
+ "Counter": "0",
"EventCode": "0x1C",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M3UPI"
},
{
"BriefDescription": "AD Flow Q Occupancy : VN0 WB Messages",
+ "Counter": "0",
"EventCode": "0x1C",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M3UPI"
},
{
"BriefDescription": "AD Flow Q Occupancy : VN1 REQ Messages",
+ "Counter": "0",
"EventCode": "0x1C",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M3UPI"
},
{
"BriefDescription": "AD Flow Q Occupancy : VN1 RSP Messages",
+ "Counter": "0",
"EventCode": "0x1C",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M3UPI"
},
{
"BriefDescription": "AD Flow Q Occupancy : VN1 SNP Messages",
+ "Counter": "0",
"EventCode": "0x1C",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN1_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M3UPI"
},
{
"BriefDescription": "AK Flow Q Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_M3UPI_TxC_AK_FLQ_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M3UPI"
},
{
"BriefDescription": "AK Flow Q Occupancy",
+ "Counter": "0",
"EventCode": "0x1E",
"EventName": "UNC_M3UPI_TxC_AK_FLQ_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M3UPI"
},
{
"BriefDescription": "Failed ARB for BL : VN0 NCB Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for BL : VN0 NCB Messages : BL arb but no win; arb request asserted but not won",
"UMask": "0x4",
@@ -10943,8 +13463,10 @@
},
{
"BriefDescription": "Failed ARB for BL : VN0 NCS Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for BL : VN0 NCS Messages : BL arb but no win; arb request asserted but not won",
"UMask": "0x8",
@@ -10952,8 +13474,10 @@
},
{
"BriefDescription": "Failed ARB for BL : VN0 RSP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for BL : VN0 RSP Messages : BL arb but no win; arb request asserted but not won",
"UMask": "0x1",
@@ -10961,8 +13485,10 @@
},
{
"BriefDescription": "Failed ARB for BL : VN0 WB Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for BL : VN0 WB Messages : BL arb but no win; arb request asserted but not won",
"UMask": "0x2",
@@ -10970,8 +13496,10 @@
},
{
"BriefDescription": "Failed ARB for BL : VN1 NCS Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for BL : VN1 NCS Messages : BL arb but no win; arb request asserted but not won",
"UMask": "0x40",
@@ -10979,8 +13507,10 @@
},
{
"BriefDescription": "Failed ARB for BL : VN1 NCB Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for BL : VN1 NCB Messages : BL arb but no win; arb request asserted but not won",
"UMask": "0x80",
@@ -10988,8 +13518,10 @@
},
{
"BriefDescription": "Failed ARB for BL : VN1 RSP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for BL : VN1 RSP Messages : BL arb but no win; arb request asserted but not won",
"UMask": "0x10",
@@ -10997,8 +13529,10 @@
},
{
"BriefDescription": "Failed ARB for BL : VN1 WB Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for BL : VN1 WB Messages : BL arb but no win; arb request asserted but not won",
"UMask": "0x20",
@@ -11006,8 +13540,10 @@
},
{
"BriefDescription": "BL Flow Q Not Empty : VN0 REQ Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Not Empty : VN0 REQ Messages : Number of cycles the BL Egress queue is Not Empty",
"UMask": "0x1",
@@ -11015,8 +13551,10 @@
},
{
"BriefDescription": "BL Flow Q Not Empty : VN0 RSP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Not Empty : VN0 RSP Messages : Number of cycles the BL Egress queue is Not Empty",
"UMask": "0x4",
@@ -11024,8 +13562,10 @@
},
{
"BriefDescription": "BL Flow Q Not Empty : VN0 SNP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Not Empty : VN0 SNP Messages : Number of cycles the BL Egress queue is Not Empty",
"UMask": "0x2",
@@ -11033,8 +13573,10 @@
},
{
"BriefDescription": "BL Flow Q Not Empty : VN0 WB Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Not Empty : VN0 WB Messages : Number of cycles the BL Egress queue is Not Empty",
"UMask": "0x8",
@@ -11042,8 +13584,10 @@
},
{
"BriefDescription": "BL Flow Q Not Empty : VN1 REQ Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Not Empty : VN1 REQ Messages : Number of cycles the BL Egress queue is Not Empty",
"UMask": "0x10",
@@ -11051,8 +13595,10 @@
},
{
"BriefDescription": "BL Flow Q Not Empty : VN1 RSP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Not Empty : VN1 RSP Messages : Number of cycles the BL Egress queue is Not Empty",
"UMask": "0x40",
@@ -11060,8 +13606,10 @@
},
{
"BriefDescription": "BL Flow Q Not Empty : VN1 SNP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Not Empty : VN1 SNP Messages : Number of cycles the BL Egress queue is Not Empty",
"UMask": "0x20",
@@ -11069,8 +13617,10 @@
},
{
"BriefDescription": "BL Flow Q Not Empty : VN1 WB Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Not Empty : VN1 WB Messages : Number of cycles the BL Egress queue is Not Empty",
"UMask": "0x80",
@@ -11078,8 +13628,10 @@
},
{
"BriefDescription": "BL Flow Q Inserts : VN0 RSP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Inserts : VN0 RSP Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x1",
@@ -11087,8 +13639,10 @@
},
{
"BriefDescription": "BL Flow Q Inserts : VN0 WB Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Inserts : VN0 WB Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x2",
@@ -11096,8 +13650,10 @@
},
{
"BriefDescription": "BL Flow Q Inserts : VN0 NCS Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Inserts : VN0 NCS Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x8",
@@ -11105,8 +13661,10 @@
},
{
"BriefDescription": "BL Flow Q Inserts : VN0 NCB Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Inserts : VN0 NCB Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x4",
@@ -11114,8 +13672,10 @@
},
{
"BriefDescription": "BL Flow Q Inserts : VN1 RSP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Inserts : VN1 RSP Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x10",
@@ -11123,8 +13683,10 @@
},
{
"BriefDescription": "BL Flow Q Inserts : VN1 WB Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Inserts : VN1 WB Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x20",
@@ -11132,8 +13694,10 @@
},
{
"BriefDescription": "BL Flow Q Inserts : VN1_NCB Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Inserts : VN1_NCB Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x80",
@@ -11141,8 +13705,10 @@
},
{
"BriefDescription": "BL Flow Q Inserts : VN1_NCS Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Inserts : VN1_NCS Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x40",
@@ -11150,120 +13716,150 @@
},
{
"BriefDescription": "BL Flow Q Occupancy : VN0 NCB Messages",
+ "Counter": "0",
"EventCode": "0x1D",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy : VN0 NCS Messages",
+ "Counter": "0",
"EventCode": "0x1D",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy : VN0 RSP Messages",
+ "Counter": "0",
"EventCode": "0x1D",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy : VN0 WB Messages",
+ "Counter": "0",
"EventCode": "0x1D",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy : VN1_NCS Messages",
+ "Counter": "0",
"EventCode": "0x1D",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy : VN1_NCB Messages",
+ "Counter": "0",
"EventCode": "0x1D",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy : VN1 RSP Messages",
+ "Counter": "0",
"EventCode": "0x1D",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy : VN1 WB Messages",
+ "Counter": "0",
"EventCode": "0x1D",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy : VN0 RSP Messages",
+ "Counter": "0",
"EventCode": "0x1F",
"EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN0_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy : VN0 WB Messages",
+ "Counter": "0",
"EventCode": "0x1F",
"EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN0_THROUGH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy : VN0 NCB Messages",
+ "Counter": "0",
"EventCode": "0x1F",
"EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN0_WRPULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy : VN1 RSP Messages",
+ "Counter": "0",
"EventCode": "0x1F",
"EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN1_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy : VN1 WB Messages",
+ "Counter": "0",
"EventCode": "0x1F",
"EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN1_THROUGH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy : VN1_NCS Messages",
+ "Counter": "0",
"EventCode": "0x1F",
"EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN1_WRPULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M3UPI"
},
{
"BriefDescription": "CMS Horizontal ADS Used : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : AD - All : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -11271,8 +13867,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : AD - Credited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -11280,8 +13878,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : AD - Uncredited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -11289,8 +13889,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : BL - All : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -11298,8 +13900,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : BL - Credited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -11307,8 +13911,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : BL - Uncredited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -11316,8 +13922,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : AD - All : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -11325,8 +13933,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : AD - Credited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -11334,8 +13944,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : AD - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -11343,8 +13955,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : AK : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -11352,8 +13966,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : AKC - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x80",
@@ -11361,8 +13977,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : BL - All : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -11370,8 +13988,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : BL - Credited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -11379,8 +13999,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : BL - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -11388,8 +14010,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : IV : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x8",
@@ -11397,8 +14021,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - All : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -11406,8 +14032,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -11415,8 +14043,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -11424,8 +14054,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AK : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -11433,8 +14065,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AKC - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x80",
@@ -11442,8 +14076,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - All : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -11451,8 +14087,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -11460,8 +14098,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -11469,8 +14109,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : IV : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -11478,8 +14120,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - All : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -11487,8 +14131,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -11496,8 +14142,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -11505,8 +14153,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AK : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -11514,8 +14164,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AKC - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x80",
@@ -11523,8 +14175,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - All : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -11532,8 +14186,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -11541,8 +14197,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -11550,8 +14208,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : IV : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -11559,8 +14219,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : AD - All : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -11568,8 +14230,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : AD - Credited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -11577,8 +14241,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : AD - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -11586,8 +14252,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : AK : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -11595,8 +14263,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : AKC - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x80",
@@ -11604,8 +14274,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : BL - All : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -11613,8 +14285,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : BL - Credited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -11622,8 +14296,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : BL - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -11631,8 +14307,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : IV : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -11640,8 +14318,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_M3UPI_TxR_HORZ_NACK.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : AD - All : Counts number of Egress packets NACK'ed on to the Horizontal Ring : All == Credited + Uncredited",
"UMask": "0x11",
@@ -11649,8 +14329,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_M3UPI_TxR_HORZ_NACK.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : AD - Credited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x10",
@@ -11658,8 +14340,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_M3UPI_TxR_HORZ_NACK.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : AD - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x1",
@@ -11667,8 +14351,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_M3UPI_TxR_HORZ_NACK.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : AK : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x2",
@@ -11676,8 +14362,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_M3UPI_TxR_HORZ_NACK.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : AKC - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x80",
@@ -11685,8 +14373,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_M3UPI_TxR_HORZ_NACK.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : BL - All : Counts number of Egress packets NACK'ed on to the Horizontal Ring : All == Credited + Uncredited",
"UMask": "0x44",
@@ -11694,8 +14384,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_M3UPI_TxR_HORZ_NACK.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : BL - Credited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x40",
@@ -11703,8 +14395,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_M3UPI_TxR_HORZ_NACK.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : BL - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x4",
@@ -11712,8 +14406,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_M3UPI_TxR_HORZ_NACK.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : IV : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x8",
@@ -11721,8 +14417,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : AD - All : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -11730,8 +14428,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : AD - Credited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -11739,8 +14439,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : AD - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -11748,8 +14450,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : AK : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -11757,8 +14461,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : AKC - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x80",
@@ -11766,8 +14472,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : BL - All : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -11775,8 +14483,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : BL - Credited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -11784,8 +14494,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : BL - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -11793,8 +14505,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : IV : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -11802,8 +14516,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_M3UPI_TxR_HORZ_STARVED.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : AD - All : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time. : All == Credited + Uncredited",
"UMask": "0x1",
@@ -11811,8 +14527,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_M3UPI_TxR_HORZ_STARVED.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : AD - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x1",
@@ -11820,8 +14538,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_M3UPI_TxR_HORZ_STARVED.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : AK : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x2",
@@ -11829,8 +14549,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_M3UPI_TxR_HORZ_STARVED.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : AKC - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x80",
@@ -11838,8 +14560,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_M3UPI_TxR_HORZ_STARVED.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : BL - All : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time. : All == Credited + Uncredited",
"UMask": "0x4",
@@ -11847,8 +14571,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_M3UPI_TxR_HORZ_STARVED.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : BL - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x4",
@@ -11856,8 +14582,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_M3UPI_TxR_HORZ_STARVED.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : IV : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x8",
@@ -11865,8 +14593,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AD - Agent 0 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -11874,8 +14604,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AD - Agent 1 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -11883,8 +14615,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : BL - Agent 0 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -11892,8 +14626,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : BL - Agent 1 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -11901,8 +14637,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_M3UPI_TxR_VERT_BYPASS.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AD - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -11910,8 +14648,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_M3UPI_TxR_VERT_BYPASS.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AD - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -11919,8 +14659,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_M3UPI_TxR_VERT_BYPASS.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AK - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -11928,8 +14670,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_M3UPI_TxR_VERT_BYPASS.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AK - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x20",
@@ -11937,8 +14681,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_M3UPI_TxR_VERT_BYPASS.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : BL - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -11946,8 +14692,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_M3UPI_TxR_VERT_BYPASS.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : BL - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -11955,8 +14703,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : IV - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_M3UPI_TxR_VERT_BYPASS.IV_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : IV - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x8",
@@ -11964,8 +14714,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_M3UPI_TxR_VERT_BYPASS_1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AKC - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -11973,8 +14725,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_M3UPI_TxR_VERT_BYPASS_1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AKC - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -11982,8 +14736,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -11991,8 +14747,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -12000,8 +14758,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -12009,8 +14769,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -12018,8 +14780,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -12027,8 +14791,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -12036,8 +14802,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : IV - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : IV - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -12045,8 +14813,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -12054,8 +14824,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -12063,8 +14835,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -12072,8 +14846,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -12081,8 +14857,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -12090,8 +14868,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -12099,8 +14879,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -12108,8 +14890,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -12117,8 +14901,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : IV - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : IV - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -12126,8 +14912,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -12135,8 +14923,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -12144,8 +14934,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M3UPI_TxR_VERT_INSERTS0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AD - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -12153,8 +14945,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M3UPI_TxR_VERT_INSERTS0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AD - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -12162,8 +14956,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M3UPI_TxR_VERT_INSERTS0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AK - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -12171,8 +14967,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M3UPI_TxR_VERT_INSERTS0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AK - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -12180,8 +14978,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M3UPI_TxR_VERT_INSERTS0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : BL - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -12189,8 +14989,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M3UPI_TxR_VERT_INSERTS0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : BL - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -12198,8 +15000,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : IV - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M3UPI_TxR_VERT_INSERTS0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : IV - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -12207,8 +15011,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_M3UPI_TxR_VERT_INSERTS1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AKC - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -12216,8 +15022,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_M3UPI_TxR_VERT_INSERTS1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AKC - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -12225,8 +15033,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M3UPI_TxR_VERT_NACK0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AD - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x1",
@@ -12234,8 +15044,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M3UPI_TxR_VERT_NACK0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AD - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x10",
@@ -12243,8 +15055,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M3UPI_TxR_VERT_NACK0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AK - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x2",
@@ -12252,8 +15066,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M3UPI_TxR_VERT_NACK0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AK - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x20",
@@ -12261,8 +15077,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M3UPI_TxR_VERT_NACK0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : BL - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x4",
@@ -12270,8 +15088,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M3UPI_TxR_VERT_NACK0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : BL - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x40",
@@ -12279,8 +15099,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M3UPI_TxR_VERT_NACK0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : IV : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x8",
@@ -12288,8 +15110,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_M3UPI_TxR_VERT_NACK1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AKC - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x1",
@@ -12297,8 +15121,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_M3UPI_TxR_VERT_NACK1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AKC - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x2",
@@ -12306,8 +15132,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AD - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -12315,8 +15143,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AD - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -12324,8 +15154,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AK - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -12333,8 +15165,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AK - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -12342,8 +15176,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : BL - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -12351,8 +15187,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : BL - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -12360,8 +15198,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : IV - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : IV - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -12369,8 +15209,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AKC - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -12378,8 +15220,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AKC - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -12387,8 +15231,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_M3UPI_TxR_VERT_STARVED0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x1",
@@ -12396,8 +15242,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_M3UPI_TxR_VERT_STARVED0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x10",
@@ -12405,8 +15253,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_M3UPI_TxR_VERT_STARVED0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x2",
@@ -12414,8 +15264,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_M3UPI_TxR_VERT_STARVED0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x20",
@@ -12423,8 +15275,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_M3UPI_TxR_VERT_STARVED0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x4",
@@ -12432,8 +15286,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_M3UPI_TxR_VERT_STARVED0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x40",
@@ -12441,8 +15297,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_M3UPI_TxR_VERT_STARVED0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : IV : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x8",
@@ -12450,8 +15308,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9B",
"EventName": "UNC_M3UPI_TxR_VERT_STARVED1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x1",
@@ -12459,8 +15319,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9B",
"EventName": "UNC_M3UPI_TxR_VERT_STARVED1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x2",
@@ -12468,8 +15330,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9B",
"EventName": "UNC_M3UPI_TxR_VERT_STARVED1.TGC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x4",
@@ -12477,8 +15341,10 @@
},
{
"BriefDescription": "UPI0 AD Credits Empty : VN0 REQ Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN0_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 AD Credits Empty : VN0 REQ Messages : No credits available to send to UPIs on the AD Ring",
"UMask": "0x2",
@@ -12486,8 +15352,10 @@
},
{
"BriefDescription": "UPI0 AD Credits Empty : VN0 RSP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 AD Credits Empty : VN0 RSP Messages : No credits available to send to UPIs on the AD Ring",
"UMask": "0x8",
@@ -12495,8 +15363,10 @@
},
{
"BriefDescription": "UPI0 AD Credits Empty : VN0 SNP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN0_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 AD Credits Empty : VN0 SNP Messages : No credits available to send to UPIs on the AD Ring",
"UMask": "0x4",
@@ -12504,8 +15374,10 @@
},
{
"BriefDescription": "UPI0 AD Credits Empty : VN1 REQ Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 AD Credits Empty : VN1 REQ Messages : No credits available to send to UPIs on the AD Ring",
"UMask": "0x10",
@@ -12513,8 +15385,10 @@
},
{
"BriefDescription": "UPI0 AD Credits Empty : VN1 RSP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 AD Credits Empty : VN1 RSP Messages : No credits available to send to UPIs on the AD Ring",
"UMask": "0x40",
@@ -12522,8 +15396,10 @@
},
{
"BriefDescription": "UPI0 AD Credits Empty : VN1 SNP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN1_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 AD Credits Empty : VN1 SNP Messages : No credits available to send to UPIs on the AD Ring",
"UMask": "0x20",
@@ -12531,8 +15407,10 @@
},
{
"BriefDescription": "UPI0 AD Credits Empty : VNA",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VNA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 AD Credits Empty : VNA : No credits available to send to UPIs on the AD Ring",
"UMask": "0x1",
@@ -12540,8 +15418,10 @@
},
{
"BriefDescription": "UPI0 BL Credits Empty : VN0 RSP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN0_NCS_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 BL Credits Empty : VN0 RSP Messages : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
"UMask": "0x4",
@@ -12549,8 +15429,10 @@
},
{
"BriefDescription": "UPI0 BL Credits Empty : VN0 REQ Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 BL Credits Empty : VN0 REQ Messages : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
"UMask": "0x2",
@@ -12558,8 +15440,10 @@
},
{
"BriefDescription": "UPI0 BL Credits Empty : VN0 SNP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 BL Credits Empty : VN0 SNP Messages : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
"UMask": "0x8",
@@ -12567,8 +15451,10 @@
},
{
"BriefDescription": "UPI0 BL Credits Empty : VN1 RSP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN1_NCS_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 BL Credits Empty : VN1 RSP Messages : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
"UMask": "0x20",
@@ -12576,8 +15462,10 @@
},
{
"BriefDescription": "UPI0 BL Credits Empty : VN1 REQ Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 BL Credits Empty : VN1 REQ Messages : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
"UMask": "0x10",
@@ -12585,8 +15473,10 @@
},
{
"BriefDescription": "UPI0 BL Credits Empty : VN1 SNP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 BL Credits Empty : VN1 SNP Messages : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
"UMask": "0x40",
@@ -12594,8 +15484,10 @@
},
{
"BriefDescription": "UPI0 BL Credits Empty : VNA",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VNA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 BL Credits Empty : VNA : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
"UMask": "0x1",
@@ -12603,16 +15495,20 @@
},
{
"BriefDescription": "FlowQ Generated Prefetch",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_M3UPI_UPI_PREFETCH_SPAWN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "FlowQ Generated Prefetch : Count cases where FlowQ causes spawn of Prefetch to iMC/SMI3 target",
"Unit": "M3UPI"
},
{
"BriefDescription": "Vertical AD Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M3UPI_VERT_RING_AD_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AD Ring In Use : Down and Even : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -12620,8 +15516,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M3UPI_VERT_RING_AD_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AD Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -12629,8 +15527,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M3UPI_VERT_RING_AD_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AD Ring In Use : Up and Even : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -12638,8 +15538,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M3UPI_VERT_RING_AD_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AD Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -12647,8 +15549,10 @@
},
{
"BriefDescription": "Vertical AKC Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M3UPI_VERT_RING_AKC_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AKC Ring In Use : Down and Even : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -12656,8 +15560,10 @@
},
{
"BriefDescription": "Vertical AKC Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M3UPI_VERT_RING_AKC_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AKC Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -12665,8 +15571,10 @@
},
{
"BriefDescription": "Vertical AKC Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M3UPI_VERT_RING_AKC_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AKC Ring In Use : Up and Even : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -12674,8 +15582,10 @@
},
{
"BriefDescription": "Vertical AKC Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M3UPI_VERT_RING_AKC_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AKC Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -12683,8 +15593,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M3UPI_VERT_RING_AK_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AK Ring In Use : Down and Even : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -12692,8 +15604,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M3UPI_VERT_RING_AK_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AK Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -12701,8 +15615,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M3UPI_VERT_RING_AK_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AK Ring In Use : Up and Even : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -12710,8 +15626,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M3UPI_VERT_RING_AK_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AK Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -12719,8 +15637,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use : Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M3UPI_VERT_RING_BL_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical BL Ring in Use : Down and Even : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -12728,8 +15648,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use : Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M3UPI_VERT_RING_BL_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical BL Ring in Use : Down and Odd : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -12737,8 +15659,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use : Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M3UPI_VERT_RING_BL_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical BL Ring in Use : Up and Even : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -12746,8 +15670,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use : Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M3UPI_VERT_RING_BL_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical BL Ring in Use : Up and Odd : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -12755,8 +15681,10 @@
},
{
"BriefDescription": "Vertical IV Ring in Use : Down",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M3UPI_VERT_RING_IV_IN_USE.DN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical IV Ring in Use : Down : Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x4",
@@ -12764,8 +15692,10 @@
},
{
"BriefDescription": "Vertical IV Ring in Use : Up",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M3UPI_VERT_RING_IV_IN_USE.UP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical IV Ring in Use : Up : Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x1",
@@ -12773,8 +15703,10 @@
},
{
"BriefDescription": "Vertical TGC Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M3UPI_VERT_RING_TGC_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical TGC Ring In Use : Down and Even : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -12782,8 +15714,10 @@
},
{
"BriefDescription": "Vertical TGC Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M3UPI_VERT_RING_TGC_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical TGC Ring In Use : Down and Odd : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -12791,8 +15725,10 @@
},
{
"BriefDescription": "Vertical TGC Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M3UPI_VERT_RING_TGC_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical TGC Ring In Use : Up and Even : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -12800,8 +15736,10 @@
},
{
"BriefDescription": "Vertical TGC Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M3UPI_VERT_RING_TGC_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical TGC Ring In Use : Up and Odd : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -12809,8 +15747,10 @@
},
{
"BriefDescription": "VN0 Credit Used : WB on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x5B",
"EventName": "UNC_M3UPI_VN0_CREDITS_USED.NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Credit Used : WB on BL : Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers. : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -12818,8 +15758,10 @@
},
{
"BriefDescription": "VN0 Credit Used : NCB on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x5B",
"EventName": "UNC_M3UPI_VN0_CREDITS_USED.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Credit Used : NCB on BL : Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers. : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -12827,8 +15769,10 @@
},
{
"BriefDescription": "VN0 Credit Used : REQ on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x5B",
"EventName": "UNC_M3UPI_VN0_CREDITS_USED.REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Credit Used : REQ on AD : Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers. : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -12836,8 +15780,10 @@
},
{
"BriefDescription": "VN0 Credit Used : RSP on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x5B",
"EventName": "UNC_M3UPI_VN0_CREDITS_USED.RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Credit Used : RSP on AD : Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers. : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -12845,8 +15791,10 @@
},
{
"BriefDescription": "VN0 Credit Used : SNP on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x5B",
"EventName": "UNC_M3UPI_VN0_CREDITS_USED.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Credit Used : SNP on AD : Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers. : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -12854,8 +15802,10 @@
},
{
"BriefDescription": "VN0 Credit Used : RSP on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x5B",
"EventName": "UNC_M3UPI_VN0_CREDITS_USED.WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Credit Used : RSP on BL : Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers. : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -12863,8 +15813,10 @@
},
{
"BriefDescription": "VN0 No Credits : WB on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_M3UPI_VN0_NO_CREDITS.NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 No Credits : WB on BL : Number of Cycles there were no VN0 Credits : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -12872,8 +15824,10 @@
},
{
"BriefDescription": "VN0 No Credits : NCB on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_M3UPI_VN0_NO_CREDITS.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 No Credits : NCB on BL : Number of Cycles there were no VN0 Credits : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -12881,8 +15835,10 @@
},
{
"BriefDescription": "VN0 No Credits : REQ on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_M3UPI_VN0_NO_CREDITS.REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 No Credits : REQ on AD : Number of Cycles there were no VN0 Credits : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -12890,8 +15846,10 @@
},
{
"BriefDescription": "VN0 No Credits : RSP on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_M3UPI_VN0_NO_CREDITS.RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 No Credits : RSP on AD : Number of Cycles there were no VN0 Credits : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -12899,8 +15857,10 @@
},
{
"BriefDescription": "VN0 No Credits : SNP on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_M3UPI_VN0_NO_CREDITS.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 No Credits : SNP on AD : Number of Cycles there were no VN0 Credits : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -12908,8 +15868,10 @@
},
{
"BriefDescription": "VN0 No Credits : RSP on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_M3UPI_VN0_NO_CREDITS.WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 No Credits : RSP on BL : Number of Cycles there were no VN0 Credits : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -12917,8 +15879,10 @@
},
{
"BriefDescription": "VN1 Credit Used : WB on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "UNC_M3UPI_VN1_CREDITS_USED.NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Credit Used : WB on BL : Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers. : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -12926,8 +15890,10 @@
},
{
"BriefDescription": "VN1 Credit Used : NCB on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "UNC_M3UPI_VN1_CREDITS_USED.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Credit Used : NCB on BL : Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers. : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -12935,8 +15901,10 @@
},
{
"BriefDescription": "VN1 Credit Used : REQ on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "UNC_M3UPI_VN1_CREDITS_USED.REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Credit Used : REQ on AD : Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers. : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -12944,8 +15912,10 @@
},
{
"BriefDescription": "VN1 Credit Used : RSP on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "UNC_M3UPI_VN1_CREDITS_USED.RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Credit Used : RSP on AD : Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers. : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -12953,8 +15923,10 @@
},
{
"BriefDescription": "VN1 Credit Used : SNP on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "UNC_M3UPI_VN1_CREDITS_USED.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Credit Used : SNP on AD : Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers. : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -12962,8 +15934,10 @@
},
{
"BriefDescription": "VN1 Credit Used : RSP on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "UNC_M3UPI_VN1_CREDITS_USED.WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Credit Used : RSP on BL : Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers. : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -12971,8 +15945,10 @@
},
{
"BriefDescription": "VN1 No Credits : WB on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x5E",
"EventName": "UNC_M3UPI_VN1_NO_CREDITS.NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 No Credits : WB on BL : Number of Cycles there were no VN1 Credits : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -12980,8 +15956,10 @@
},
{
"BriefDescription": "VN1 No Credits : NCB on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x5E",
"EventName": "UNC_M3UPI_VN1_NO_CREDITS.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 No Credits : NCB on BL : Number of Cycles there were no VN1 Credits : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -12989,8 +15967,10 @@
},
{
"BriefDescription": "VN1 No Credits : REQ on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x5E",
"EventName": "UNC_M3UPI_VN1_NO_CREDITS.REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 No Credits : REQ on AD : Number of Cycles there were no VN1 Credits : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -12998,8 +15978,10 @@
},
{
"BriefDescription": "VN1 No Credits : RSP on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x5E",
"EventName": "UNC_M3UPI_VN1_NO_CREDITS.RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 No Credits : RSP on AD : Number of Cycles there were no VN1 Credits : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -13007,8 +15989,10 @@
},
{
"BriefDescription": "VN1 No Credits : SNP on AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x5E",
"EventName": "UNC_M3UPI_VN1_NO_CREDITS.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 No Credits : SNP on AD : Number of Cycles there were no VN1 Credits : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -13016,8 +16000,10 @@
},
{
"BriefDescription": "VN1 No Credits : RSP on BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x5E",
"EventName": "UNC_M3UPI_VN1_NO_CREDITS.WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 No Credits : RSP on BL : Number of Cycles there were no VN1 Credits : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -13025,168 +16011,210 @@
},
{
"BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_EQ_LOCALDEST_VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x7E",
"EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_EQ_LOCALDEST_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x82",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_EQ_LOCALDEST_VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x7E",
"EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_EQ_LOCALDEST_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa0",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_GT_LOCALDEST_VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x7E",
"EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_GT_LOCALDEST_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x81",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_GT_LOCALDEST_VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x7E",
"EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_GT_LOCALDEST_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x90",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_LT_LOCALDEST_VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x7E",
"EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_LT_LOCALDEST_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x84",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_LT_LOCALDEST_VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x7E",
"EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_LT_LOCALDEST_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc0",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_EQ_LOCALDEST_VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x7E",
"EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_EQ_LOCALDEST_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_EQ_LOCALDEST_VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x7E",
"EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_EQ_LOCALDEST_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_GT_LOCALDEST_VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x7E",
"EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_GT_LOCALDEST_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_GT_LOCALDEST_VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x7E",
"EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_GT_LOCALDEST_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_LT_LOCALDEST_VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x7E",
"EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_LT_LOCALDEST_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_LT_LOCALDEST_VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x7E",
"EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_LT_LOCALDEST_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_PENDING.LOCALDEST_VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x7D",
"EventName": "UNC_M3UPI_WB_PENDING.LOCALDEST_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_PENDING.LOCALDEST_VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x7D",
"EventName": "UNC_M3UPI_WB_PENDING.LOCALDEST_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_PENDING.LOCAL_AND_RT_VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x7D",
"EventName": "UNC_M3UPI_WB_PENDING.LOCAL_AND_RT_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_PENDING.LOCAL_AND_RT_VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x7D",
"EventName": "UNC_M3UPI_WB_PENDING.LOCAL_AND_RT_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_PENDING.ROUTETHRU_VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x7D",
"EventName": "UNC_M3UPI_WB_PENDING.ROUTETHRU_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_PENDING.ROUTETHRU_VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x7D",
"EventName": "UNC_M3UPI_WB_PENDING.ROUTETHRU_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_PENDING.WAITING4PULL_VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x7D",
"EventName": "UNC_M3UPI_WB_PENDING.WAITING4PULL_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_PENDING.WAITING4PULL_VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x7D",
"EventName": "UNC_M3UPI_WB_PENDING.WAITING4PULL_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_XPT_PFTCH.ARB",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_M3UPI_XPT_PFTCH.ARB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": xpt prefetch message is making arbitration request",
"UMask": "0x4",
@@ -13194,8 +16222,10 @@
},
{
"BriefDescription": "UNC_M3UPI_XPT_PFTCH.ARRIVED",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_M3UPI_XPT_PFTCH.ARRIVED",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": xpt prefetch message arrived in ingress pipeline",
"UMask": "0x1",
@@ -13203,8 +16233,10 @@
},
{
"BriefDescription": "UNC_M3UPI_XPT_PFTCH.BYPASS",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_M3UPI_XPT_PFTCH.BYPASS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": xpt prefetch message took bypass path",
"UMask": "0x2",
@@ -13212,8 +16244,10 @@
},
{
"BriefDescription": "UNC_M3UPI_XPT_PFTCH.FLITTED",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_M3UPI_XPT_PFTCH.FLITTED",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": xpt prefetch message was slotted into flit (non bypass)",
"UMask": "0x10",
@@ -13221,8 +16255,10 @@
},
{
"BriefDescription": "UNC_M3UPI_XPT_PFTCH.LOST_ARB",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_M3UPI_XPT_PFTCH.LOST_ARB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": xpt prefetch message lost arbitration",
"UMask": "0x8",
@@ -13230,8 +16266,10 @@
},
{
"BriefDescription": "UNC_M3UPI_XPT_PFTCH.LOST_OLD",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_M3UPI_XPT_PFTCH.LOST_OLD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": xpt prefetch message was dropped because it became too old",
"UMask": "0x20",
@@ -13239,8 +16277,10 @@
},
{
"BriefDescription": "UNC_M3UPI_XPT_PFTCH.LOST_QFULL",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_M3UPI_XPT_PFTCH.LOST_QFULL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": xpt prefetch message was dropped because it was overwritten by new message while prefetch queue was full",
"UMask": "0x20",
@@ -13248,6 +16288,7 @@
},
{
"BriefDescription": "Number of kfclks",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_UPI_CLOCKTICKS",
"PerPkg": "1",
@@ -13256,8 +16297,10 @@
},
{
"BriefDescription": "Direct packet attempts : D2C",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2C",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Direct packet attempts : D2C : Counts the number of DRS packets that we attempted to do direct2core/direct2UPI on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.",
"UMask": "0x1",
@@ -13265,8 +16308,10 @@
},
{
"BriefDescription": "Direct packet attempts : D2K",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2K",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Direct packet attempts : D2K : Counts the number of DRS packets that we attempted to do direct2core/direct2UPI on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.",
"UMask": "0x2",
@@ -13274,70 +16319,87 @@
},
{
"BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ1",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ2",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ1",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ2",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ3",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.BL_VNA_EQ0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.BL_VNA_EQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "Cycles in L1",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_UPI_L1_POWER_CYCLES",
"PerPkg": "1",
@@ -13346,182 +16408,228 @@
},
{
"BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.BGF_CRD",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_UPI_M3_BYP_BLOCKED.BGF_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.GV_BLOCK",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_UPI_M3_BYP_BLOCKED.GV_BLOCK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_CRD_RETURN_BLOCKED",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_UPI_M3_CRD_RETURN_BLOCKED",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.BGF_CRD",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_UPI_M3_RXQ_BLOCKED.BGF_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_BTW_2_THRESH",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_BTW_2_THRESH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_BTW_0_THRESH",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_BTW_0_THRESH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.GV_BLOCK",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_UPI_M3_RXQ_BLOCKED.GV_BLOCK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "UPI"
},
{
"BriefDescription": "Cycles where phy is not in L0, L0c, L0p, L1",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_UPI_PHY_INIT_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UPI"
},
{
"BriefDescription": "L1 Req Nack",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_UPI_POWER_L1_NACK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "L1 Req Nack : Counts the number of times a link sends/receives a LinkReqNAck. When the UPI links would like to change power state, the Tx side initiates a request to the Rx side requesting to change states. This requests can either be accepted or denied. If the Rx side replies with an Ack, the power mode will change. If it replies with NAck, no change will take place. This can be filtered based on Rx and Tx. An Rx LinkReqNAck refers to receiving an NAck (meaning this agent's Tx originally requested the power change). A Tx LinkReqNAck refers to sending this command (meaning the peer agent's Tx originally requested the power change and this agent accepted it).",
"Unit": "UPI"
},
{
"BriefDescription": "L1 Req (same as L1 Ack).",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_UPI_POWER_L1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "L1 Req (same as L1 Ack). : Counts the number of times a link sends/receives a LinkReqAck. When the UPI links would like to change power state, the Tx side initiates a request to the Rx side requesting to change states. This requests can either be accepted or denied. If the Rx side replies with an Ack, the power mode will change. If it replies with NAck, no change will take place. This can be filtered based on Rx and Tx. An Rx LinkReqAck refers to receiving an Ack (meaning this agent's Tx originally requested the power change). A Tx LinkReqAck refers to sending this command (meaning the peer agent's Tx originally requested the power change and this agent accepted it).",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.ACK",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.ACK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.VN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.VNA",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.VNA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UPI"
},
{
"BriefDescription": "Cycles in L0p",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_UPI_RxL0P_POWER_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles in L0p : Number of UPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 1/2 of the UPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize UPI for snoops and their responses. Use edge detect to count the number of instances when the UPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.",
"Unit": "UPI"
},
{
"BriefDescription": "Cycles in L0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_UPI_RxL0_POWER_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles in L0 : Number of UPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0xe",
@@ -13529,8 +16637,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass, Match Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass, Match Opcode : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0x10e",
@@ -13538,8 +16648,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0xf",
@@ -13547,8 +16659,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard, Match Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard, Match Opcode : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0x10f",
@@ -13556,8 +16670,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port : Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Receive path of a UPI Port : Request : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0x8",
@@ -13565,8 +16681,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port : Request, Match Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.REQ_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Receive path of a UPI Port : Request, Match Opcode : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0x108",
@@ -13574,8 +16692,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port : Response - Conflict",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSPCNFLT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Receive path of a UPI Port : Response - Conflict : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0x1aa",
@@ -13583,8 +16703,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port : Response - Invalid",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Receive path of a UPI Port : Response - Invalid : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0x12a",
@@ -13592,8 +16714,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port : Response - Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_DATA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Receive path of a UPI Port : Response - Data : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0xc",
@@ -13601,8 +16725,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port : Response - Data, Match Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_DATA_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Receive path of a UPI Port : Response - Data, Match Opcode : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0x10c",
@@ -13610,8 +16736,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port : Response - No Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_NODATA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Receive path of a UPI Port : Response - No Data : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0xa",
@@ -13619,8 +16747,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port : Response - No Data, Match Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_NODATA_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Receive path of a UPI Port : Response - No Data, Match Opcode : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0x10a",
@@ -13628,8 +16758,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port : Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Receive path of a UPI Port : Snoop : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0x9",
@@ -13637,8 +16769,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port : Snoop, Match Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.SNP_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Receive path of a UPI Port : Snoop, Match Opcode : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0x109",
@@ -13646,8 +16780,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port : Writeback",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Receive path of a UPI Port : Writeback : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0xd",
@@ -13655,8 +16791,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port : Writeback, Match Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.WB_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Receive path of a UPI Port : Writeback, Match Opcode : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0x10d",
@@ -13664,8 +16802,10 @@
},
{
"BriefDescription": "RxQ Flit Buffer Bypassed : Slot 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_UPI_RxL_BYPASSED.SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RxQ Flit Buffer Bypassed : Slot 0 : Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of flits transferred, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
"UMask": "0x1",
@@ -13673,8 +16813,10 @@
},
{
"BriefDescription": "RxQ Flit Buffer Bypassed : Slot 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_UPI_RxL_BYPASSED.SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RxQ Flit Buffer Bypassed : Slot 1 : Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of flits transferred, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
"UMask": "0x2",
@@ -13682,8 +16824,10 @@
},
{
"BriefDescription": "RxQ Flit Buffer Bypassed : Slot 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_UPI_RxL_BYPASSED.SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RxQ Flit Buffer Bypassed : Slot 2 : Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of flits transferred, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
"UMask": "0x4",
@@ -13691,46 +16835,57 @@
},
{
"BriefDescription": "CRC Errors Detected",
+ "Counter": "0,1,2,3",
"EventCode": "0x0B",
"EventName": "UNC_UPI_RxL_CRC_ERRORS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CRC Errors Detected : Number of CRC errors detected in the UPI Agent. Each UPI flit incorporates 8 bits of CRC for error detection. This counts the number of flits where the CRC was able to detect an error. After an error has been detected, the UPI agent will send a request to the transmitting socket to resend the flit (as well as any flits that came after it).",
"Unit": "UPI"
},
{
"BriefDescription": "LLR Requests Sent",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "UNC_UPI_RxL_CRC_LLR_REQ_TRANSMIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "LLR Requests Sent : Number of LLR Requests were transmitted. This should generally be <= the number of CRC errors detected. If multiple errors are detected before the Rx side receives a LLC_REQ_ACK from the Tx side, there is no need to send more LLR_REQ_NACKs.",
"Unit": "UPI"
},
{
"BriefDescription": "VN0 Credit Consumed",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_UPI_RxL_CREDITS_CONSUMED_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Credit Consumed : Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
"Unit": "UPI"
},
{
"BriefDescription": "VN1 Credit Consumed",
+ "Counter": "0,1,2,3",
"EventCode": "0x3A",
"EventName": "UNC_UPI_RxL_CREDITS_CONSUMED_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Credit Consumed : Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
"Unit": "UPI"
},
{
"BriefDescription": "VNA Credit Consumed",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_UPI_RxL_CREDITS_CONSUMED_VNA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VNA Credit Consumed : Counts the number of times that an RxQ VNA credit was consumed (i.e. message uses a VNA credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
"Unit": "UPI"
},
{
"BriefDescription": "Valid Flits Received : All Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.ALL_DATA",
"PerPkg": "1",
@@ -13740,6 +16895,7 @@
},
{
"BriefDescription": "Valid Flits Received : Null FLITs received from any slot",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.ALL_NULL",
"PerPkg": "1",
@@ -13749,8 +16905,10 @@
},
{
"BriefDescription": "Valid Flits Received : Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.DATA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Received : Data : Shows legal flit time (hides impact of L0p and L0c). : Count Data Flits (which consume all slots), but how much to count is based on Slot0-2 mask, so count can be 0-3 depending on which slots are enabled for counting..",
"UMask": "0x8",
@@ -13758,8 +16916,10 @@
},
{
"BriefDescription": "Valid Flits Received : Null FLITs received from any slot",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.IDLE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Received : Null FLITs received from any slot : Shows legal flit time (hides impact of L0p and L0c).",
"UMask": "0x47",
@@ -13767,8 +16927,10 @@
},
{
"BriefDescription": "Valid Flits Received : LLCRD Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.LLCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Received : LLCRD Not Empty : Shows legal flit time (hides impact of L0p and L0c). : Enables counting of LLCRD (with non-zero payload). This only applies to slot 2 since LLCRD is only allowed in slot 2",
"UMask": "0x10",
@@ -13776,8 +16938,10 @@
},
{
"BriefDescription": "Valid Flits Received : LLCTRL",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.LLCTRL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Received : LLCTRL : Shows legal flit time (hides impact of L0p and L0c). : Equivalent to an idle packet. Enables counting of slot 0 LLCTRL messages.",
"UMask": "0x40",
@@ -13785,6 +16949,7 @@
},
{
"BriefDescription": "Valid Flits Received : All Non Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.NON_DATA",
"PerPkg": "1",
@@ -13794,8 +16959,10 @@
},
{
"BriefDescription": "Valid Flits Received : Slot NULL or LLCRD Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.NULL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Received : Slot NULL or LLCRD Empty : Shows legal flit time (hides impact of L0p and L0c). : LLCRD with all zeros is treated as NULL. Slot 1 is not treated as NULL if slot 0 is a dual slot. This can apply to slot 0,1, or 2.",
"UMask": "0x20",
@@ -13803,8 +16970,10 @@
},
{
"BriefDescription": "Valid Flits Received : Protocol Header",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.PROTHDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Received : Protocol Header : Shows legal flit time (hides impact of L0p and L0c). : Enables count of protocol headers in slot 0,1,2 (depending on slot uMask bits)",
"UMask": "0x80",
@@ -13812,8 +16981,10 @@
},
{
"BriefDescription": "Valid Flits Received : Slot 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Received : Slot 0 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 0 - Other mask bits determine types of headers to count.",
"UMask": "0x1",
@@ -13821,8 +16992,10 @@
},
{
"BriefDescription": "Valid Flits Received : Slot 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Received : Slot 1 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 1 - Other mask bits determine types of headers to count.",
"UMask": "0x2",
@@ -13830,8 +17003,10 @@
},
{
"BriefDescription": "Valid Flits Received : Slot 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Received : Slot 2 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 2 - Other mask bits determine types of headers to count.",
"UMask": "0x4",
@@ -13839,8 +17014,10 @@
},
{
"BriefDescription": "RxQ Flit Buffer Allocations : Slot 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_UPI_RxL_INSERTS.SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RxQ Flit Buffer Allocations : Slot 0 : Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
"UMask": "0x1",
@@ -13848,8 +17025,10 @@
},
{
"BriefDescription": "RxQ Flit Buffer Allocations : Slot 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_UPI_RxL_INSERTS.SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RxQ Flit Buffer Allocations : Slot 1 : Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
"UMask": "0x2",
@@ -13857,8 +17036,10 @@
},
{
"BriefDescription": "RxQ Flit Buffer Allocations : Slot 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_UPI_RxL_INSERTS.SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RxQ Flit Buffer Allocations : Slot 2 : Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
"UMask": "0x4",
@@ -13866,8 +17047,10 @@
},
{
"BriefDescription": "RxQ Occupancy - All Packets : Slot 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RxQ Occupancy - All Packets : Slot 0 : Accumulates the number of elements in the UPI RxQ in each cycle. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.",
"UMask": "0x1",
@@ -13875,8 +17058,10 @@
},
{
"BriefDescription": "RxQ Occupancy - All Packets : Slot 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RxQ Occupancy - All Packets : Slot 1 : Accumulates the number of elements in the UPI RxQ in each cycle. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.",
"UMask": "0x2",
@@ -13884,8 +17069,10 @@
},
{
"BriefDescription": "RxQ Occupancy - All Packets : Slot 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RxQ Occupancy - All Packets : Slot 2 : Accumulates the number of elements in the UPI RxQ in each cycle. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.",
"UMask": "0x4",
@@ -13893,118 +17080,147 @@
},
{
"BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ1",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ2",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ0",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ2",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ0",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ1",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.CFG_CTL",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.CFG_CTL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.DFX",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.DFX",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RETRY",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RETRY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_BYPASS",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_BYPASS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_CRED",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_CRED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.SPARE",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.SPARE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.TXQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.TXQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "UPI"
},
{
"BriefDescription": "Cycles in L0p",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_UPI_TxL0P_POWER_CYCLES",
"PerPkg": "1",
@@ -14013,30 +17229,38 @@
},
{
"BriefDescription": "UNC_UPI_TxL0P_POWER_CYCLES_LL_ENTER",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_UPI_TxL0P_POWER_CYCLES_LL_ENTER",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_POWER_CYCLES_M3_EXIT",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_UPI_TxL0P_POWER_CYCLES_M3_EXIT",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UPI"
},
{
"BriefDescription": "Cycles in L0",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_UPI_TxL0_POWER_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles in L0 : Number of UPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0xe",
@@ -14044,8 +17268,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass, Match Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass, Match Opcode : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0x10e",
@@ -14053,8 +17279,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0xf",
@@ -14062,8 +17290,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard, Match Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard, Match Opcode : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0x10f",
@@ -14071,8 +17301,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port : Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Transmit path of a UPI Port : Request : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0x8",
@@ -14080,8 +17312,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port : Request, Match Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.REQ_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Transmit path of a UPI Port : Request, Match Opcode : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0x108",
@@ -14089,8 +17323,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port : Response - Conflict",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSPCNFLT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Transmit path of a UPI Port : Response - Conflict : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0x1aa",
@@ -14098,8 +17334,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port : Response - Invalid",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Transmit path of a UPI Port : Response - Invalid : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0x12a",
@@ -14107,8 +17345,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port : Response - Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_DATA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Transmit path of a UPI Port : Response - Data : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0xc",
@@ -14116,8 +17356,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port : Response - Data, Match Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_DATA_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Transmit path of a UPI Port : Response - Data, Match Opcode : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0x10c",
@@ -14125,8 +17367,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port : Response - No Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_NODATA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Transmit path of a UPI Port : Response - No Data : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0xa",
@@ -14134,8 +17378,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port : Response - No Data, Match Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_NODATA_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Transmit path of a UPI Port : Response - No Data, Match Opcode : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0x10a",
@@ -14143,8 +17389,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port : Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Transmit path of a UPI Port : Snoop : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0x9",
@@ -14152,8 +17400,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port : Snoop, Match Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.SNP_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Transmit path of a UPI Port : Snoop, Match Opcode : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0x109",
@@ -14161,8 +17411,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port : Writeback",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Transmit path of a UPI Port : Writeback : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0xd",
@@ -14170,8 +17422,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port : Writeback, Match Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.WB_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Transmit path of a UPI Port : Writeback, Match Opcode : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0x10d",
@@ -14179,14 +17433,17 @@
},
{
"BriefDescription": "Tx Flit Buffer Bypassed",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_UPI_TxL_BYPASSED",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Tx Flit Buffer Bypassed : Counts the number of times that an incoming flit was able to bypass the Tx flit buffer and pass directly out the UPI Link. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link.",
"Unit": "UPI"
},
{
"BriefDescription": "Valid Flits Sent : All Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.ALL_DATA",
"PerPkg": "1",
@@ -14196,6 +17453,7 @@
},
{
"BriefDescription": "Valid Flits Sent : Null FLITs transmitted to any slot",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.ALL_NULL",
"PerPkg": "1",
@@ -14205,8 +17463,10 @@
},
{
"BriefDescription": "Valid Flits Sent : Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.DATA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Sent : Data : Shows legal flit time (hides impact of L0p and L0c). : Count Data Flits (which consume all slots), but how much to count is based on Slot0-2 mask, so count can be 0-3 depending on which slots are enabled for counting..",
"UMask": "0x8",
@@ -14214,8 +17474,10 @@
},
{
"BriefDescription": "Valid Flits Sent : Idle",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.IDLE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Sent : Idle : Shows legal flit time (hides impact of L0p and L0c).",
"UMask": "0x47",
@@ -14223,8 +17485,10 @@
},
{
"BriefDescription": "Valid Flits Sent : LLCRD Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.LLCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Sent : LLCRD Not Empty : Shows legal flit time (hides impact of L0p and L0c). : Enables counting of LLCRD (with non-zero payload). This only applies to slot 2 since LLCRD is only allowed in slot 2",
"UMask": "0x10",
@@ -14232,8 +17496,10 @@
},
{
"BriefDescription": "Valid Flits Sent : LLCTRL",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.LLCTRL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Sent : LLCTRL : Shows legal flit time (hides impact of L0p and L0c). : Equivalent to an idle packet. Enables counting of slot 0 LLCTRL messages.",
"UMask": "0x40",
@@ -14241,6 +17507,7 @@
},
{
"BriefDescription": "Valid Flits Sent : All Non Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.NON_DATA",
"PerPkg": "1",
@@ -14250,8 +17517,10 @@
},
{
"BriefDescription": "Valid Flits Sent : Slot NULL or LLCRD Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.NULL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Sent : Slot NULL or LLCRD Empty : Shows legal flit time (hides impact of L0p and L0c). : LLCRD with all zeros is treated as NULL. Slot 1 is not treated as NULL if slot 0 is a dual slot. This can apply to slot 0,1, or 2.",
"UMask": "0x20",
@@ -14259,8 +17528,10 @@
},
{
"BriefDescription": "Valid Flits Sent : Protocol Header",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.PROTHDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Sent : Protocol Header : Shows legal flit time (hides impact of L0p and L0c). : Enables count of protocol headers in slot 0,1,2 (depending on slot uMask bits)",
"UMask": "0x80",
@@ -14268,8 +17539,10 @@
},
{
"BriefDescription": "Valid Flits Sent : Slot 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Sent : Slot 0 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 0 - Other mask bits determine types of headers to count.",
"UMask": "0x1",
@@ -14277,8 +17550,10 @@
},
{
"BriefDescription": "Valid Flits Sent : Slot 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Sent : Slot 1 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 1 - Other mask bits determine types of headers to count.",
"UMask": "0x2",
@@ -14286,8 +17561,10 @@
},
{
"BriefDescription": "Valid Flits Sent : Slot 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Sent : Slot 2 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 2 - Other mask bits determine types of headers to count.",
"UMask": "0x4",
@@ -14295,37 +17572,46 @@
},
{
"BriefDescription": "Tx Flit Buffer Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_UPI_TxL_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Tx Flit Buffer Allocations : Number of allocations into the UPI Tx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
"Unit": "UPI"
},
{
"BriefDescription": "Tx Flit Buffer Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_UPI_TxL_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Tx Flit Buffer Occupancy : Accumulates the number of flits in the TxQ. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This can be used with the cycles not empty event to track average occupancy, or the allocations event to track average lifetime in the TxQ.",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_VNA_CREDIT_RETURN_BLOCKED_VN01",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_UPI_VNA_CREDIT_RETURN_BLOCKED_VN01",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UPI"
},
{
"BriefDescription": "VNA Credits Pending Return - Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_UPI_VNA_CREDIT_RETURN_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VNA Credits Pending Return - Occupancy : Number of VNA credits in the Rx side that are waitng to be returned back across the link.",
"Unit": "UPI"
},
{
"BriefDescription": "Clockticks in the UBOX using a dedicated 48-bit Fixed Counter",
+ "Counter": "FIXED",
"EventCode": "0xff",
"EventName": "UNC_U_CLOCKTICKS",
"PerPkg": "1",
@@ -14333,16 +17619,20 @@
},
{
"BriefDescription": "Message Received : Doorbell",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.DOORBELL_RCVD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UBOX"
},
{
"BriefDescription": "Message Received : Interrupt",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.INT_PRIO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Message Received : Interrupt : Interrupts",
"UMask": "0x10",
@@ -14350,8 +17640,10 @@
},
{
"BriefDescription": "Message Received : IPI",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.IPI_RCVD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Message Received : IPI : Inter Processor Interrupts",
"UMask": "0x4",
@@ -14359,8 +17651,10 @@
},
{
"BriefDescription": "Message Received : MSI",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.MSI_RCVD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Message Received : MSI : Message Signaled Interrupts - interrupts sent by devices (including PCIe via IOxAPIC) (Socket Mode only)",
"UMask": "0x2",
@@ -14368,8 +17662,10 @@
},
{
"BriefDescription": "Message Received : VLW",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.VLW_RCVD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Message Received : VLW : Virtual Logical Wire (legacy) message were received from Uncore.",
"UMask": "0x1",
@@ -14377,160 +17673,200 @@
},
{
"BriefDescription": "IDI Lock/SplitLock Cycles",
+ "Counter": "0,1",
"EventCode": "0x44",
"EventName": "UNC_U_LOCK_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IDI Lock/SplitLock Cycles : Number of times an IDI Lock/SplitLock sequence was started",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_CBO_NCB",
+ "Counter": "0,1",
"EventCode": "0x4D",
"EventName": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_CBO_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_CBO_NCS",
+ "Counter": "0,1",
"EventCode": "0x4D",
"EventName": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_CBO_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_UPI_NCB",
+ "Counter": "0,1",
"EventCode": "0x4D",
"EventName": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_UPI_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_UPI_NCS",
+ "Counter": "0,1",
"EventCode": "0x4D",
"EventName": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_UPI_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCB",
+ "Counter": "0,1",
"EventCode": "0x4D",
"EventName": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCS",
+ "Counter": "0,1",
"EventCode": "0x4D",
"EventName": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_UPI_NCB",
+ "Counter": "0,1",
"EventCode": "0x4D",
"EventName": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_UPI_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_UPI_NCS",
+ "Counter": "0,1",
"EventCode": "0x4D",
"EventName": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_UPI_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC2.RxC_CYCLES_EMPTY_BL",
+ "Counter": "0,1",
"EventCode": "0x4E",
"EventName": "UNC_U_M2U_MISC2.RxC_CYCLES_EMPTY_BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC2.RxC_CYCLES_FULL_BL",
+ "Counter": "0,1",
"EventCode": "0x4E",
"EventName": "UNC_U_M2U_MISC2.RxC_CYCLES_FULL_BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCB",
+ "Counter": "0,1",
"EventCode": "0x4E",
"EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCS",
+ "Counter": "0,1",
"EventCode": "0x4E",
"EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_AK",
+ "Counter": "0,1",
"EventCode": "0x4E",
"EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_AKC",
+ "Counter": "0,1",
"EventCode": "0x4E",
"EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_AKC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_BL",
+ "Counter": "0,1",
"EventCode": "0x4E",
"EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_FULL_BL",
+ "Counter": "0,1",
"EventCode": "0x4E",
"EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_FULL_BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC3.TxC_CYCLES_FULL_AK",
+ "Counter": "0,1",
"EventCode": "0x4F",
"EventName": "UNC_U_M2U_MISC3.TxC_CYCLES_FULL_AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC3.TxC_CYCLES_FULL_AKC",
+ "Counter": "0,1",
"EventCode": "0x4F",
"EventName": "UNC_U_M2U_MISC3.TxC_CYCLES_FULL_AKC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UBOX"
},
{
"BriefDescription": "Cycles PHOLD Assert to Ack : Assert to ACK",
+ "Counter": "0,1",
"EventCode": "0x45",
"EventName": "UNC_U_PHOLD_CYCLES.ASSERT_TO_ACK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles PHOLD Assert to Ack : Assert to ACK : PHOLD cycles.",
"UMask": "0x1",
@@ -14538,32 +17874,40 @@
},
{
"BriefDescription": "UNC_U_RACU_DRNG.PFTCH_BUF_EMPTY",
+ "Counter": "0,1",
"EventCode": "0x4C",
"EventName": "UNC_U_RACU_DRNG.PFTCH_BUF_EMPTY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_RACU_DRNG.RDRAND",
+ "Counter": "0,1",
"EventCode": "0x4C",
"EventName": "UNC_U_RACU_DRNG.RDRAND",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_RACU_DRNG.RDSEED",
+ "Counter": "0,1",
"EventCode": "0x4C",
"EventName": "UNC_U_RACU_DRNG.RDSEED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UBOX"
},
{
"BriefDescription": "RACU Request",
+ "Counter": "0,1",
"EventCode": "0x46",
"EventName": "UNC_U_RACU_REQUESTS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RACU Request : Number outstanding register requests within message channel tracker",
"Unit": "UBOX"
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/uncore-io.json b/tools/perf/pmu-events/arch/x86/icelakex/uncore-io.json
index 1b8a719b81a5..3c3c2cf51e1d 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/uncore-io.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/uncore-io.json
@@ -1,70 +1,87 @@
[
{
"BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "1",
"EventCode": "0xff",
"EventName": "UNC_IIO_BANDWIDTH_IN.PART0_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "iio_free_running"
},
{
"BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "2",
"EventCode": "0xff",
"EventName": "UNC_IIO_BANDWIDTH_IN.PART1_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x21",
"Unit": "iio_free_running"
},
{
"BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "3",
"EventCode": "0xff",
"EventName": "UNC_IIO_BANDWIDTH_IN.PART2_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x22",
"Unit": "iio_free_running"
},
{
"BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "4",
"EventCode": "0xff",
"EventName": "UNC_IIO_BANDWIDTH_IN.PART3_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x23",
"Unit": "iio_free_running"
},
{
"BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "5",
"EventCode": "0xff",
"EventName": "UNC_IIO_BANDWIDTH_IN.PART4_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x24",
"Unit": "iio_free_running"
},
{
"BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "6",
"EventCode": "0xff",
"EventName": "UNC_IIO_BANDWIDTH_IN.PART5_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x25",
"Unit": "iio_free_running"
},
{
"BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "7",
"EventCode": "0xff",
"EventName": "UNC_IIO_BANDWIDTH_IN.PART6_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x26",
"Unit": "iio_free_running"
},
{
"BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "8",
"EventCode": "0xff",
"EventName": "UNC_IIO_BANDWIDTH_IN.PART7_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x27",
"Unit": "iio_free_running"
},
{
"BriefDescription": "Clockticks of the integrated IO (IIO) traffic controller",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_IIO_CLOCKTICKS",
"PerPkg": "1",
@@ -73,6 +90,7 @@
},
{
"BriefDescription": "Free running counter that increments for IIO clocktick",
+ "Counter": "0",
"EventCode": "0xff",
"EventName": "UNC_IIO_CLOCKTICKS_FREERUN",
"PerPkg": "1",
@@ -82,8 +100,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts : All Ports",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.ALL",
+ "Experimental": "1",
"FCMask": "0x04",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -92,6 +112,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0-7",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.ALL_PARTS",
"FCMask": "0x04",
@@ -103,6 +124,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART0",
"FCMask": "0x04",
@@ -114,6 +136,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART1",
"FCMask": "0x04",
@@ -125,6 +148,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART2",
"FCMask": "0x04",
@@ -136,6 +160,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART3",
"FCMask": "0x04",
@@ -147,6 +172,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART4",
"FCMask": "0x04",
@@ -158,6 +184,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART5",
"FCMask": "0x04",
@@ -169,6 +196,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART6",
"FCMask": "0x04",
@@ -180,6 +208,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART7",
"FCMask": "0x04",
@@ -191,8 +220,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 0-7",
+ "Counter": "2,3",
"EventCode": "0xD5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL",
+ "Experimental": "1",
"FCMask": "0x04",
"PerPkg": "1",
"PublicDescription": "PCIe Completion Buffer Occupancy : Part 0-7",
@@ -201,6 +232,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 0-7",
+ "Counter": "2,3",
"EventCode": "0xd5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL_PARTS",
"FCMask": "0x04",
@@ -211,6 +243,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 0",
+ "Counter": "2,3",
"EventCode": "0xd5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART0",
"FCMask": "0x04",
@@ -221,6 +254,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 1",
+ "Counter": "2,3",
"EventCode": "0xd5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART1",
"FCMask": "0x04",
@@ -231,6 +265,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 2",
+ "Counter": "2,3",
"EventCode": "0xd5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART2",
"FCMask": "0x04",
@@ -241,6 +276,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 3",
+ "Counter": "2,3",
"EventCode": "0xd5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART3",
"FCMask": "0x04",
@@ -251,6 +287,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 4",
+ "Counter": "2,3",
"EventCode": "0xd5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART4",
"FCMask": "0x04",
@@ -261,6 +298,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 5",
+ "Counter": "2,3",
"EventCode": "0xd5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART5",
"FCMask": "0x04",
@@ -271,6 +309,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 6",
+ "Counter": "2,3",
"EventCode": "0xd5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART6",
"FCMask": "0x04",
@@ -281,6 +320,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 7",
+ "Counter": "2,3",
"EventCode": "0xd5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART7",
"FCMask": "0x04",
@@ -291,8 +331,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -302,8 +344,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -313,8 +357,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -324,8 +370,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -335,8 +383,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -346,8 +396,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -357,8 +409,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -368,8 +422,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -379,8 +435,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x40",
@@ -390,8 +448,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x80",
@@ -401,8 +461,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -412,8 +474,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -423,8 +487,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -434,8 +500,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -445,8 +513,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -456,8 +526,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -467,8 +539,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -478,8 +552,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -489,8 +565,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x40",
@@ -500,8 +578,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x80",
@@ -511,8 +591,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -522,8 +604,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -533,8 +617,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -544,8 +630,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -555,8 +643,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -566,8 +656,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -577,8 +669,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -588,8 +682,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -599,8 +695,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x40",
@@ -610,8 +708,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x80",
@@ -621,8 +721,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -632,8 +734,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -643,8 +747,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -654,8 +760,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -665,8 +773,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -676,8 +786,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -687,8 +799,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -698,8 +812,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -709,8 +825,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x40",
@@ -720,8 +838,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x80",
@@ -731,8 +851,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -742,8 +864,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -753,6 +877,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART0",
"FCMask": "0x07",
@@ -764,6 +889,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART1",
"FCMask": "0x07",
@@ -775,6 +901,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART2",
"FCMask": "0x07",
@@ -786,6 +913,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART3",
"FCMask": "0x07",
@@ -797,6 +925,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART4",
"FCMask": "0x07",
@@ -808,6 +937,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART5",
"FCMask": "0x07",
@@ -819,6 +949,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART6",
"FCMask": "0x07",
@@ -830,6 +961,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART7",
"FCMask": "0x07",
@@ -841,8 +973,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -852,8 +986,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -863,6 +999,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART0",
"FCMask": "0x07",
@@ -874,6 +1011,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART1",
"FCMask": "0x07",
@@ -885,6 +1023,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART2",
"FCMask": "0x07",
@@ -896,6 +1035,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART3",
"FCMask": "0x07",
@@ -907,6 +1047,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART4",
"FCMask": "0x07",
@@ -918,6 +1059,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART5",
"FCMask": "0x07",
@@ -929,6 +1071,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART6",
"FCMask": "0x07",
@@ -940,6 +1083,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART7",
"FCMask": "0x07",
@@ -951,8 +1095,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -962,8 +1108,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -973,8 +1121,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -984,8 +1134,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -995,8 +1147,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -1006,8 +1160,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -1017,8 +1173,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -1028,8 +1186,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -1039,8 +1199,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x40",
@@ -1050,8 +1212,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x80",
@@ -1061,8 +1225,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -1072,8 +1238,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -1083,8 +1251,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -1094,8 +1264,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -1105,8 +1277,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -1116,8 +1290,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -1127,8 +1303,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -1138,8 +1316,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -1149,8 +1329,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x40",
@@ -1160,8 +1342,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x80",
@@ -1171,8 +1355,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -1182,8 +1368,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -1193,8 +1381,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -1204,8 +1394,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -1215,8 +1407,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -1226,8 +1420,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -1237,8 +1433,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -1248,8 +1446,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -1259,8 +1459,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x40",
@@ -1270,8 +1472,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x80",
@@ -1281,8 +1485,10 @@
},
{
"BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -1292,8 +1498,10 @@
},
{
"BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -1303,6 +1511,7 @@
},
{
"BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART0",
"FCMask": "0x07",
@@ -1314,6 +1523,7 @@
},
{
"BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART1",
"FCMask": "0x07",
@@ -1325,6 +1535,7 @@
},
{
"BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART2",
"FCMask": "0x07",
@@ -1336,6 +1547,7 @@
},
{
"BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART3",
"FCMask": "0x07",
@@ -1347,6 +1559,7 @@
},
{
"BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART4",
"FCMask": "0x07",
@@ -1358,6 +1571,7 @@
},
{
"BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART5",
"FCMask": "0x07",
@@ -1369,6 +1583,7 @@
},
{
"BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART6",
"FCMask": "0x07",
@@ -1380,6 +1595,7 @@
},
{
"BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART7",
"FCMask": "0x07",
@@ -1391,8 +1607,10 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -1402,8 +1620,10 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -1413,6 +1633,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0",
"FCMask": "0x07",
@@ -1424,6 +1645,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1",
"FCMask": "0x07",
@@ -1435,6 +1657,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2",
"FCMask": "0x07",
@@ -1446,6 +1669,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
"FCMask": "0x07",
@@ -1457,6 +1681,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART4",
"FCMask": "0x07",
@@ -1468,6 +1693,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART5",
"FCMask": "0x07",
@@ -1479,6 +1705,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART6",
"FCMask": "0x07",
@@ -1490,6 +1717,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART7",
"FCMask": "0x07",
@@ -1501,8 +1729,10 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -1512,8 +1742,10 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -1523,6 +1755,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0",
"FCMask": "0x07",
@@ -1534,6 +1767,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1",
"FCMask": "0x07",
@@ -1545,6 +1779,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2",
"FCMask": "0x07",
@@ -1556,6 +1791,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
"FCMask": "0x07",
@@ -1567,6 +1803,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART4",
"FCMask": "0x07",
@@ -1578,6 +1815,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART5",
"FCMask": "0x07",
@@ -1589,6 +1827,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART6",
"FCMask": "0x07",
@@ -1600,6 +1839,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART7",
"FCMask": "0x07",
@@ -1611,8 +1851,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Messages",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -1622,8 +1864,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Messages",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -1633,8 +1877,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Messages",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -1644,8 +1890,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Messages",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -1655,8 +1903,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Messages",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -1666,8 +1916,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Messages",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -1677,8 +1929,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Messages",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -1688,8 +1942,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Messages",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -1699,8 +1955,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Messages",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x40",
@@ -1710,8 +1968,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Messages",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x80",
@@ -1721,8 +1981,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -1732,8 +1994,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -1743,8 +2007,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -1754,8 +2020,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -1765,8 +2033,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -1776,8 +2046,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -1787,8 +2059,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -1798,8 +2072,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -1809,8 +2085,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x40",
@@ -1820,8 +2098,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x80",
@@ -1831,8 +2111,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -1842,8 +2124,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -1853,8 +2137,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -1864,8 +2150,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -1875,8 +2163,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -1886,8 +2176,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -1897,8 +2189,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -1908,8 +2202,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -1919,8 +2215,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x40",
@@ -1930,8 +2228,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x80",
@@ -1941,8 +2241,10 @@
},
{
"BriefDescription": "Incoming arbitration requests : Passing data to be written",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_IIO_INBOUND_ARB_REQ.DATA",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -1952,8 +2254,10 @@
},
{
"BriefDescription": "Incoming arbitration requests : Issuing final read or write of line",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_IIO_INBOUND_ARB_REQ.FINAL_RD_WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -1963,8 +2267,10 @@
},
{
"BriefDescription": "Incoming arbitration requests : Processing response from IOMMU",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_IIO_INBOUND_ARB_REQ.IOMMU_HIT",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -1974,8 +2280,10 @@
},
{
"BriefDescription": "Incoming arbitration requests : Issuing to IOMMU",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_IIO_INBOUND_ARB_REQ.IOMMU_REQ",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -1985,8 +2293,10 @@
},
{
"BriefDescription": "Incoming arbitration requests : Request Ownership",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_IIO_INBOUND_ARB_REQ.REQ_OWN",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -1996,8 +2306,10 @@
},
{
"BriefDescription": "Incoming arbitration requests : Writing line",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_IIO_INBOUND_ARB_REQ.WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2007,8 +2319,10 @@
},
{
"BriefDescription": "Incoming arbitration requests granted : Passing data to be written",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_IIO_INBOUND_ARB_WON.DATA",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2018,8 +2332,10 @@
},
{
"BriefDescription": "Incoming arbitration requests granted : Issuing final read or write of line",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_IIO_INBOUND_ARB_WON.FINAL_RD_WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2029,8 +2345,10 @@
},
{
"BriefDescription": "Incoming arbitration requests granted : Processing response from IOMMU",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_IIO_INBOUND_ARB_WON.IOMMU_HIT",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2040,8 +2358,10 @@
},
{
"BriefDescription": "Incoming arbitration requests granted : Issuing to IOMMU",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_IIO_INBOUND_ARB_WON.IOMMU_REQ",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2051,8 +2371,10 @@
},
{
"BriefDescription": "Incoming arbitration requests granted : Request Ownership",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_IIO_INBOUND_ARB_WON.REQ_OWN",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2062,8 +2384,10 @@
},
{
"BriefDescription": "Incoming arbitration requests granted : Writing line",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_IIO_INBOUND_ARB_WON.WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2073,8 +2397,10 @@
},
{
"BriefDescription": ": IOTLB Hits to a 1G Page",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.1G_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": IOTLB Hits to a 1G Page : Counts if a transaction to a 1G page, on its first lookup, hits the IOTLB.",
"UMask": "0x10",
@@ -2082,8 +2408,10 @@
},
{
"BriefDescription": ": IOTLB Hits to a 2M Page",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.2M_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": IOTLB Hits to a 2M Page : Counts if a transaction to a 2M page, on its first lookup, hits the IOTLB.",
"UMask": "0x8",
@@ -2091,8 +2419,10 @@
},
{
"BriefDescription": ": IOTLB Hits to a 4K Page",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.4K_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": IOTLB Hits to a 4K Page : Counts if a transaction to a 4K page, on its first lookup, hits the IOTLB.",
"UMask": "0x4",
@@ -2100,8 +2430,10 @@
},
{
"BriefDescription": ": IOTLB lookups all",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.ALL_LOOKUPS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": IOTLB lookups all : Some transactions have to look up IOTLB multiple times. Counts every time a request looks up IOTLB.",
"UMask": "0x2",
@@ -2109,8 +2441,10 @@
},
{
"BriefDescription": ": Context cache hits",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.CTXT_CACHE_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": Context cache hits : Counts each time a first look up of the transaction hits the RCC.",
"UMask": "0x80",
@@ -2118,8 +2452,10 @@
},
{
"BriefDescription": ": Context cache lookups",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.CTXT_CACHE_LOOKUPS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": Context cache lookups : Counts each time a transaction looks up root context cache.",
"UMask": "0x40",
@@ -2127,8 +2463,10 @@
},
{
"BriefDescription": ": IOTLB lookups first",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.FIRST_LOOKUPS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": IOTLB lookups first : Some transactions have to look up IOTLB multiple times. Counts the first time a request looks up IOTLB.",
"UMask": "0x1",
@@ -2136,8 +2474,10 @@
},
{
"BriefDescription": ": IOTLB Fills (same as IOTLB miss)",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.MISSES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": IOTLB Fills (same as IOTLB miss) : When a transaction misses IOTLB, it does a page walk to look up memory and bring in the relevant page translation. Counts when this page translation is written to IOTLB.",
"UMask": "0x20",
@@ -2145,8 +2485,10 @@
},
{
"BriefDescription": ": Cycles PWT full",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.CYC_PWT_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": Cycles PWT full : Counts cycles the IOMMU has reached its maximum limit for outstanding page walks.",
"UMask": "0x80",
@@ -2154,8 +2496,10 @@
},
{
"BriefDescription": ": IOMMU memory access",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.NUM_MEM_ACCESSES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": IOMMU memory access : IOMMU sends out memory fetches when it misses the cache look up which is indicated by this signal. M2IOSF only uses low priority channel",
"UMask": "0x40",
@@ -2163,8 +2507,10 @@
},
{
"BriefDescription": ": PWC Hit to a 1G page",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.PWC_1G_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": PWC Hit to a 1G page : Counts each time a transaction's first look up hits the SLPWC at the 1G level",
"UMask": "0x8",
@@ -2172,8 +2518,10 @@
},
{
"BriefDescription": ": PWC Hit to a 2M page",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.PWC_2M_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": PWC Hit to a 2M page : Counts each time a transaction's first look up hits the SLPWC at the 2M level",
"UMask": "0x4",
@@ -2181,8 +2529,10 @@
},
{
"BriefDescription": ": PWC Hit to a 4K page",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.PWC_4K_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": PWC Hit to a 4K page : Counts each time a transaction's first look up hits the SLPWC at the 4K level",
"UMask": "0x2",
@@ -2190,8 +2540,10 @@
},
{
"BriefDescription": ": PWT Hit to a 256T page",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.PWC_512G_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": PWT Hit to a 256T page : Counts each time a transaction's first look up hits the SLPWC at the 512G level",
"UMask": "0x10",
@@ -2199,8 +2551,10 @@
},
{
"BriefDescription": ": PageWalk cache fill",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.PWC_CACHE_FILLS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": PageWalk cache fill : When a transaction misses SLPWC, it does a page walk to look up memory and bring in the relevant page translation. When this page translation is written to SLPWC, ObsPwcFillValid_nnnH is asserted.",
"UMask": "0x20",
@@ -2208,8 +2562,10 @@
},
{
"BriefDescription": ": PageWalk cache lookup",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.PWT_CACHE_LOOKUPS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": PageWalk cache lookup : Counts each time a transaction looks up second level page walk cache.",
"UMask": "0x1",
@@ -2217,8 +2573,10 @@
},
{
"BriefDescription": ": Interrupt Entry cache hit",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_IIO_IOMMU3.INT_CACHE_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": Interrupt Entry cache hit : Counts each time a transaction's first look up hits the IEC.",
"UMask": "0x80",
@@ -2226,8 +2584,10 @@
},
{
"BriefDescription": ": Interrupt Entry cache lookup",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_IIO_IOMMU3.INT_CACHE_LOOKUPS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": Interrupt Entry cache lookup : Counts the number of transaction looks up that interrupt remapping cache.",
"UMask": "0x40",
@@ -2235,8 +2595,10 @@
},
{
"BriefDescription": ": Device-selective Context cache invalidation cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_IIO_IOMMU3.NUM_CTXT_CACHE_INVAL_DEVICE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": Device-selective Context cache invalidation cycles : Counts number of Device selective context cache invalidation events",
"UMask": "0x20",
@@ -2244,8 +2606,10 @@
},
{
"BriefDescription": ": Domain-selective Context cache invalidation cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_IIO_IOMMU3.NUM_CTXT_CACHE_INVAL_DOMAIN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": Domain-selective Context cache invalidation cycles : Counts number of Domain selective context cache invalidation events",
"UMask": "0x10",
@@ -2253,8 +2617,10 @@
},
{
"BriefDescription": ": Context cache global invalidation cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_IIO_IOMMU3.NUM_CTXT_CACHE_INVAL_GBL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": Context cache global invalidation cycles : Counts number of Context Cache global invalidation events",
"UMask": "0x8",
@@ -2262,8 +2628,10 @@
},
{
"BriefDescription": ": Domain-selective IOTLB invalidation cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_IIO_IOMMU3.NUM_INVAL_DOMAIN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": Domain-selective IOTLB invalidation cycles : Counts number of Domain selective invalidation events",
"UMask": "0x2",
@@ -2271,8 +2639,10 @@
},
{
"BriefDescription": ": Global IOTLB invalidation cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_IIO_IOMMU3.NUM_INVAL_GBL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": Global IOTLB invalidation cycles : Indicates that IOMMU is doing global invalidation.",
"UMask": "0x1",
@@ -2280,8 +2650,10 @@
},
{
"BriefDescription": ": Page-selective IOTLB invalidation cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_IIO_IOMMU3.NUM_INVAL_PAGE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": Page-selective IOTLB invalidation cycles : Counts number of Page-selective within Domain Invalidation events",
"UMask": "0x4",
@@ -2289,8 +2661,10 @@
},
{
"BriefDescription": "AND Mask/match for debug bus : Non-PCIE bus",
+ "Counter": "0,1",
"EventCode": "0x02",
"EventName": "UNC_IIO_MASK_MATCH_AND.BUS0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AND Mask/match for debug bus : Non-PCIE bus : Asserted if all bits specified by mask match",
"UMask": "0x1",
@@ -2298,8 +2672,10 @@
},
{
"BriefDescription": "AND Mask/match for debug bus : Non-PCIE bus and PCIE bus",
+ "Counter": "0,1",
"EventCode": "0x02",
"EventName": "UNC_IIO_MASK_MATCH_AND.BUS0_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AND Mask/match for debug bus : Non-PCIE bus and PCIE bus : Asserted if all bits specified by mask match",
"UMask": "0x8",
@@ -2307,8 +2683,10 @@
},
{
"BriefDescription": "AND Mask/match for debug bus : Non-PCIE bus and !(PCIE bus)",
+ "Counter": "0,1",
"EventCode": "0x02",
"EventName": "UNC_IIO_MASK_MATCH_AND.BUS0_NOT_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AND Mask/match for debug bus : Non-PCIE bus and !(PCIE bus) : Asserted if all bits specified by mask match",
"UMask": "0x4",
@@ -2316,8 +2694,10 @@
},
{
"BriefDescription": "AND Mask/match for debug bus : PCIE bus",
+ "Counter": "0,1",
"EventCode": "0x02",
"EventName": "UNC_IIO_MASK_MATCH_AND.BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AND Mask/match for debug bus : PCIE bus : Asserted if all bits specified by mask match",
"UMask": "0x2",
@@ -2325,8 +2705,10 @@
},
{
"BriefDescription": "AND Mask/match for debug bus : !(Non-PCIE bus) and PCIE bus",
+ "Counter": "0,1",
"EventCode": "0x02",
"EventName": "UNC_IIO_MASK_MATCH_AND.NOT_BUS0_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AND Mask/match for debug bus : !(Non-PCIE bus) and PCIE bus : Asserted if all bits specified by mask match",
"UMask": "0x10",
@@ -2334,8 +2716,10 @@
},
{
"BriefDescription": "AND Mask/match for debug bus : !(Non-PCIE bus) and !(PCIE bus)",
+ "Counter": "0,1",
"EventCode": "0x02",
"EventName": "UNC_IIO_MASK_MATCH_AND.NOT_BUS0_NOT_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AND Mask/match for debug bus : !(Non-PCIE bus) and !(PCIE bus) : Asserted if all bits specified by mask match",
"UMask": "0x20",
@@ -2343,8 +2727,10 @@
},
{
"BriefDescription": "OR Mask/match for debug bus : Non-PCIE bus",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "UNC_IIO_MASK_MATCH_OR.BUS0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "OR Mask/match for debug bus : Non-PCIE bus : Asserted if any bits specified by mask match",
"UMask": "0x1",
@@ -2352,8 +2738,10 @@
},
{
"BriefDescription": "OR Mask/match for debug bus : Non-PCIE bus and PCIE bus",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "UNC_IIO_MASK_MATCH_OR.BUS0_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "OR Mask/match for debug bus : Non-PCIE bus and PCIE bus : Asserted if any bits specified by mask match",
"UMask": "0x8",
@@ -2361,8 +2749,10 @@
},
{
"BriefDescription": "OR Mask/match for debug bus : Non-PCIE bus and !(PCIE bus)",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "UNC_IIO_MASK_MATCH_OR.BUS0_NOT_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "OR Mask/match for debug bus : Non-PCIE bus and !(PCIE bus) : Asserted if any bits specified by mask match",
"UMask": "0x4",
@@ -2370,8 +2760,10 @@
},
{
"BriefDescription": "OR Mask/match for debug bus : PCIE bus",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "UNC_IIO_MASK_MATCH_OR.BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "OR Mask/match for debug bus : PCIE bus : Asserted if any bits specified by mask match",
"UMask": "0x2",
@@ -2379,8 +2771,10 @@
},
{
"BriefDescription": "OR Mask/match for debug bus : !(Non-PCIE bus) and PCIE bus",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "UNC_IIO_MASK_MATCH_OR.NOT_BUS0_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "OR Mask/match for debug bus : !(Non-PCIE bus) and PCIE bus : Asserted if any bits specified by mask match",
"UMask": "0x10",
@@ -2388,8 +2782,10 @@
},
{
"BriefDescription": "OR Mask/match for debug bus : !(Non-PCIE bus) and !(PCIE bus)",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "UNC_IIO_MASK_MATCH_OR.NOT_BUS0_NOT_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "OR Mask/match for debug bus : !(Non-PCIE bus) and !(PCIE bus) : Asserted if any bits specified by mask match",
"UMask": "0x20",
@@ -2397,15 +2793,19 @@
},
{
"BriefDescription": "Counting disabled",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_IIO_NOTHING",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IIO"
},
{
"BriefDescription": "Occupancy of outbound request queue : To device",
+ "Counter": "2,3",
"EventCode": "0xC5",
"EventName": "UNC_IIO_NUM_OUSTANDING_REQ_FROM_CPU.TO_IO",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2415,8 +2815,10 @@
},
{
"BriefDescription": ": Passing data to be written",
+ "Counter": "2,3",
"EventCode": "0x88",
"EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.DATA",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2426,8 +2828,10 @@
},
{
"BriefDescription": ": Issuing final read or write of line",
+ "Counter": "2,3",
"EventCode": "0x88",
"EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.FINAL_RD_WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2436,8 +2840,10 @@
},
{
"BriefDescription": ": Processing response from IOMMU",
+ "Counter": "2,3",
"EventCode": "0x88",
"EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.IOMMU_HIT",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2446,8 +2852,10 @@
},
{
"BriefDescription": ": Issuing to IOMMU",
+ "Counter": "2,3",
"EventCode": "0x88",
"EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.IOMMU_REQ",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2456,8 +2864,10 @@
},
{
"BriefDescription": ": Request Ownership",
+ "Counter": "2,3",
"EventCode": "0x88",
"EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.REQ_OWN",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2467,8 +2877,10 @@
},
{
"BriefDescription": ": Writing line",
+ "Counter": "2,3",
"EventCode": "0x88",
"EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2478,8 +2890,10 @@
},
{
"BriefDescription": "Number requests sent to PCIe from main die : From ITC",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UNC_IIO_NUM_REQ_FROM_CPU.ITC",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2489,8 +2903,10 @@
},
{
"BriefDescription": "Number requests sent to PCIe from main die : Completion allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UNC_IIO_NUM_REQ_FROM_CPU.PREALLOC",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2499,8 +2915,10 @@
},
{
"BriefDescription": "Number requests PCIe makes of the main die : Drop request",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU.ALL.DROP",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2510,6 +2928,7 @@
},
{
"BriefDescription": "Number requests PCIe makes of the main die : All",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU.COMMIT.ALL",
"FCMask": "0x07",
@@ -2521,8 +2940,10 @@
},
{
"BriefDescription": "Num requests sent by PCIe - by target : Abort",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.ABORT",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2531,8 +2952,10 @@
},
{
"BriefDescription": "Num requests sent by PCIe - by target : Confined P2P",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.CONFINED_P2P",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2541,8 +2964,10 @@
},
{
"BriefDescription": "Num requests sent by PCIe - by target : Local P2P",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.LOC_P2P",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2551,8 +2976,10 @@
},
{
"BriefDescription": "Num requests sent by PCIe - by target : Multi-cast",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MCAST",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2561,8 +2988,10 @@
},
{
"BriefDescription": "Num requests sent by PCIe - by target : Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MEM",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2571,8 +3000,10 @@
},
{
"BriefDescription": "Num requests sent by PCIe - by target : MsgB",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MSGB",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2581,8 +3012,10 @@
},
{
"BriefDescription": "Num requests sent by PCIe - by target : Remote P2P",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.REM_P2P",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2591,8 +3024,10 @@
},
{
"BriefDescription": "Num requests sent by PCIe - by target : Ubox",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.UBOX",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2601,15 +3036,19 @@
},
{
"BriefDescription": "ITC address map 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8F",
"EventName": "UNC_IIO_NUM_TGT_MATCHED_REQ_OF_CPU",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IIO"
},
{
"BriefDescription": "Outbound cacheline requests issued : 64B requests issued to device",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_IIO_OUTBOUND_CL_REQS_ISSUED.TO_IO",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2619,8 +3058,10 @@
},
{
"BriefDescription": "Outbound TLP (transaction layer packet) requests issued : To device",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "UNC_IIO_OUTBOUND_TLP_REQS_ISSUED.TO_IO",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2630,16 +3071,20 @@
},
{
"BriefDescription": "PWT occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_IIO_PWT_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PWT occupancy : Indicates how many page walks are outstanding at any point in time.",
"Unit": "IIO"
},
{
"BriefDescription": "PCIe Request - cacheline complete : Passing data to be written",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_IIO_REQ_FROM_PCIE_CL_CMPL.DATA",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2649,8 +3094,10 @@
},
{
"BriefDescription": "PCIe Request - cacheline complete : Issuing final read or write of line",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_IIO_REQ_FROM_PCIE_CL_CMPL.FINAL_RD_WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2660,8 +3107,10 @@
},
{
"BriefDescription": "PCIe Request - cacheline complete : Request Ownership",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_IIO_REQ_FROM_PCIE_CL_CMPL.REQ_OWN",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2671,8 +3120,10 @@
},
{
"BriefDescription": "PCIe Request - cacheline complete : Writing line",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_IIO_REQ_FROM_PCIE_CL_CMPL.WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2682,8 +3133,10 @@
},
{
"BriefDescription": "PCIe Request complete : Passing data to be written",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.DATA",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2693,8 +3146,10 @@
},
{
"BriefDescription": "PCIe Request complete : Issuing final read or write of line",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.FINAL_RD_WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2704,8 +3159,10 @@
},
{
"BriefDescription": "PCIe Request complete : Processing response from IOMMU",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.IOMMU_HIT",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2715,8 +3172,10 @@
},
{
"BriefDescription": "PCIe Request complete : Issuing to IOMMU",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.IOMMU_REQ",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2726,8 +3185,10 @@
},
{
"BriefDescription": "PCIe Request complete : Request Ownership",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.REQ_OWN",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2737,8 +3198,10 @@
},
{
"BriefDescription": "PCIe Request complete : Writing line",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2748,8 +3211,10 @@
},
{
"BriefDescription": "PCIe Request - pass complete : Passing data to be written",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_IIO_REQ_FROM_PCIE_PASS_CMPL.DATA",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2759,8 +3224,10 @@
},
{
"BriefDescription": "PCIe Request - pass complete : Issuing final read or write of line",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_IIO_REQ_FROM_PCIE_PASS_CMPL.FINAL_RD_WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2770,8 +3237,10 @@
},
{
"BriefDescription": "PCIe Request - pass complete : Request Ownership",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_IIO_REQ_FROM_PCIE_PASS_CMPL.REQ_OWN",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2781,8 +3250,10 @@
},
{
"BriefDescription": "PCIe Request - pass complete : Writing line",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_IIO_REQ_FROM_PCIE_PASS_CMPL.WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2792,16 +3263,20 @@
},
{
"BriefDescription": "Symbol Times on Link",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_IIO_SYMBOL_TIMES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Symbol Times on Link : Gen1 - increment once every 4nS, Gen2 - increment once every 2nS, Gen3 - increment once every 1nS",
"Unit": "IIO"
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -2811,8 +3286,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -2822,8 +3299,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -2833,8 +3312,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -2844,8 +3325,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -2855,8 +3338,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -2866,8 +3351,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -2877,8 +3364,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -2888,8 +3377,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x40",
@@ -2899,8 +3390,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x80",
@@ -2910,8 +3403,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -2921,8 +3416,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -2932,8 +3429,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -2943,8 +3442,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -2954,8 +3455,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -2965,8 +3468,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -2976,8 +3481,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -2987,8 +3494,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -2998,8 +3507,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x40",
@@ -3009,8 +3520,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x80",
@@ -3020,8 +3533,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -3031,8 +3546,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -3042,8 +3559,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -3053,8 +3572,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -3064,8 +3585,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -3075,8 +3598,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -3086,8 +3611,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3097,8 +3624,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3108,8 +3637,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x40",
@@ -3119,8 +3650,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x80",
@@ -3130,8 +3663,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -3141,8 +3676,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -3152,8 +3689,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -3163,8 +3702,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -3174,8 +3715,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -3185,8 +3728,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -3196,8 +3741,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3207,8 +3754,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3218,8 +3767,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x40",
@@ -3229,8 +3780,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x80",
@@ -3240,8 +3793,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -3251,8 +3806,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -3262,6 +3819,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART0",
"FCMask": "0x07",
@@ -3273,6 +3831,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART1",
"FCMask": "0x07",
@@ -3284,6 +3843,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART2",
"FCMask": "0x07",
@@ -3295,6 +3855,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART3",
"FCMask": "0x07",
@@ -3306,6 +3867,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART4",
"FCMask": "0x07",
@@ -3317,6 +3879,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART5",
"FCMask": "0x07",
@@ -3328,6 +3891,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART6",
"FCMask": "0x07",
@@ -3339,6 +3903,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART7",
"FCMask": "0x07",
@@ -3350,8 +3915,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -3361,8 +3928,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -3372,6 +3941,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART0",
"FCMask": "0x07",
@@ -3383,6 +3953,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART1",
"FCMask": "0x07",
@@ -3394,6 +3965,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART2",
"FCMask": "0x07",
@@ -3405,6 +3977,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART3",
"FCMask": "0x07",
@@ -3416,6 +3989,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART4",
"FCMask": "0x07",
@@ -3427,6 +4001,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART5",
"FCMask": "0x07",
@@ -3438,6 +4013,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART6",
"FCMask": "0x07",
@@ -3449,6 +4025,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART7",
"FCMask": "0x07",
@@ -3460,8 +4037,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -3471,8 +4050,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -3482,8 +4063,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -3493,8 +4076,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -3504,8 +4089,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -3515,8 +4102,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -3526,8 +4115,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3537,8 +4128,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3548,8 +4141,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x40",
@@ -3559,8 +4154,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x80",
@@ -3570,8 +4167,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -3581,8 +4180,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -3592,8 +4193,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -3603,8 +4206,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -3614,8 +4219,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -3625,8 +4232,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3636,8 +4245,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3647,8 +4258,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x40",
@@ -3658,8 +4271,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x80",
@@ -3669,8 +4284,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -3680,8 +4297,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -3691,8 +4310,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -3702,8 +4323,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -3713,8 +4336,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -3724,8 +4349,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -3735,8 +4362,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3746,8 +4375,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3757,8 +4388,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x40",
@@ -3768,8 +4401,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x80",
@@ -3779,8 +4414,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -3790,8 +4427,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -3801,6 +4440,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART0",
"FCMask": "0x07",
@@ -3812,6 +4452,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART1",
"FCMask": "0x07",
@@ -3823,6 +4464,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART2",
"FCMask": "0x07",
@@ -3834,6 +4476,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART3",
"FCMask": "0x07",
@@ -3845,6 +4488,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART4",
"FCMask": "0x07",
@@ -3856,6 +4500,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART5",
"FCMask": "0x07",
@@ -3867,6 +4512,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART6",
"FCMask": "0x07",
@@ -3878,6 +4524,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART7",
"FCMask": "0x07",
@@ -3889,8 +4536,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -3900,8 +4549,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -3911,6 +4562,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART0",
"FCMask": "0x07",
@@ -3922,6 +4574,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART1",
"FCMask": "0x07",
@@ -3933,6 +4586,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART2",
"FCMask": "0x07",
@@ -3944,6 +4598,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART3",
"FCMask": "0x07",
@@ -3955,6 +4610,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART4",
"FCMask": "0x07",
@@ -3966,6 +4622,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART5",
"FCMask": "0x07",
@@ -3977,6 +4634,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART6",
"FCMask": "0x07",
@@ -3988,6 +4646,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART7",
"FCMask": "0x07",
@@ -3999,8 +4658,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -4010,8 +4671,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -4021,6 +4684,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART0",
"FCMask": "0x07",
@@ -4032,6 +4696,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART1",
"FCMask": "0x07",
@@ -4043,6 +4708,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART2",
"FCMask": "0x07",
@@ -4054,6 +4720,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART3",
"FCMask": "0x07",
@@ -4065,6 +4732,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART4",
"FCMask": "0x07",
@@ -4076,6 +4744,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART5",
"FCMask": "0x07",
@@ -4087,6 +4756,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART6",
"FCMask": "0x07",
@@ -4098,6 +4768,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART7",
"FCMask": "0x07",
@@ -4109,8 +4780,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -4120,8 +4793,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -4131,8 +4806,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -4142,8 +4819,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -4153,8 +4832,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -4164,8 +4845,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -4175,8 +4858,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -4186,8 +4871,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -4197,8 +4884,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x40",
@@ -4208,8 +4897,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x80",
@@ -4219,8 +4910,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -4230,8 +4923,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -4241,8 +4936,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -4252,8 +4949,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -4263,8 +4962,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -4274,8 +4975,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -4285,8 +4988,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -4296,8 +5001,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -4307,8 +5014,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x40",
@@ -4318,8 +5027,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x80",
@@ -4329,8 +5040,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -4340,8 +5053,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -4351,8 +5066,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -4362,8 +5079,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -4373,8 +5092,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -4384,8 +5105,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -4395,8 +5118,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -4406,8 +5131,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -4417,8 +5144,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x40",
@@ -4428,8 +5157,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x80",
@@ -4439,8 +5170,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 0 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -4448,8 +5181,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 1 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -4457,8 +5192,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 2 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -4466,8 +5203,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 3 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -4475,8 +5214,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 4 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -4484,8 +5225,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 5 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -4493,8 +5236,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 6 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x40",
@@ -4502,8 +5247,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 7 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x80",
@@ -4511,8 +5258,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 10 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -4520,8 +5269,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 8 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -4529,8 +5280,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 9 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -4538,8 +5291,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 0 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -4547,8 +5302,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 1 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -4556,8 +5313,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 2 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -4565,8 +5324,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 3 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -4574,8 +5335,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 4 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -4583,8 +5346,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 5 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -4592,8 +5357,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 6 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x40",
@@ -4601,8 +5368,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 7 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x80",
@@ -4610,8 +5379,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 10 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -4619,8 +5390,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 8 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -4628,8 +5401,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 9 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -4637,8 +5412,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 0 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -4646,8 +5423,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 1 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -4655,8 +5434,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 2 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -4664,8 +5445,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 3 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -4673,8 +5456,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -4682,8 +5467,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -4691,8 +5478,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 6 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x40",
@@ -4700,8 +5489,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 7 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x80",
@@ -4709,8 +5500,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 10 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -4718,8 +5511,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 8 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -4727,8 +5522,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 9 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -4736,8 +5533,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x8a",
"EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 0 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -4745,8 +5544,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8a",
"EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 1 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -4754,8 +5555,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x8a",
"EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 2 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -4763,8 +5566,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x8a",
"EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 3 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -4772,8 +5577,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8a",
"EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 4 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -4781,8 +5588,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8a",
"EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 5 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -4790,8 +5599,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x8a",
"EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 6 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x40",
@@ -4799,8 +5610,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x8a",
"EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 7 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x80",
@@ -4808,8 +5621,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x8b",
"EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 10 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -4817,8 +5632,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x8b",
"EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 8 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -4826,8 +5643,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x8b",
"EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 9 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -4835,8 +5654,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 0 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -4844,8 +5665,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 1 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -4853,8 +5676,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 2 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -4862,8 +5687,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 3 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -4871,8 +5698,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 4 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -4880,8 +5709,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 5 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -4889,8 +5720,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 6 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x40",
@@ -4898,8 +5731,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 7 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x80",
@@ -4907,8 +5742,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 10 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -4916,8 +5753,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 8 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -4925,8 +5764,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 9 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -4934,8 +5775,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 0 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -4943,8 +5786,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 1 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -4952,8 +5797,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 2 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -4961,8 +5808,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 3 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -4970,8 +5819,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 4 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -4979,8 +5830,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 5 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -4988,8 +5841,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 6 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x40",
@@ -4997,8 +5852,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 7 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x80",
@@ -5006,8 +5863,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 10 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -5015,8 +5874,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 8 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -5024,8 +5885,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 9 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -5033,8 +5896,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x8c",
"EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 0 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -5042,8 +5907,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8c",
"EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 1 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -5051,8 +5918,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x8c",
"EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 2 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -5060,8 +5929,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x8c",
"EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 3 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -5069,8 +5940,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8c",
"EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -5078,8 +5951,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8c",
"EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -5087,8 +5962,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8c",
"EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x40",
@@ -5096,8 +5973,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8c",
"EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x80",
@@ -5105,8 +5984,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x8d",
"EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 10 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -5114,8 +5995,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x8d",
"EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 8 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -5123,8 +6006,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x8d",
"EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 9 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -5132,8 +6017,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 0 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -5141,8 +6028,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 1 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -5150,8 +6039,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 2 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -5159,8 +6050,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 3 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -5168,8 +6061,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 4 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -5177,8 +6072,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 5 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -5186,8 +6083,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 6 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x40",
@@ -5195,8 +6094,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 7 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x80",
@@ -5204,8 +6105,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x8f",
"EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 10 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -5213,8 +6116,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x8f",
"EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 8 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -5222,8 +6127,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x8f",
"EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 9 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -5231,6 +6138,7 @@
},
{
"BriefDescription": "Clockticks of the mesh to PCI (M2P)",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_M2P_CLOCKTICKS",
"PerPkg": "1",
@@ -5239,6 +6147,7 @@
},
{
"BriefDescription": "CMS Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0xc0",
"EventName": "UNC_M2P_CMS_CLOCKTICKS",
"PerPkg": "1",
@@ -5246,8 +6155,10 @@
},
{
"BriefDescription": "Distress signal asserted : DPT Local",
+ "Counter": "0,1,2,3",
"EventCode": "0xaf",
"EventName": "UNC_M2P_DISTRESS_ASSERTED.DPT_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : DPT Local : Counts the number of cycles either the local or incoming distress signals are asserted. : Dynamic Prefetch Throttle triggered by this tile",
"UMask": "0x4",
@@ -5255,8 +6166,10 @@
},
{
"BriefDescription": "Distress signal asserted : DPT Remote",
+ "Counter": "0,1,2,3",
"EventCode": "0xaf",
"EventName": "UNC_M2P_DISTRESS_ASSERTED.DPT_NONLOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : DPT Remote : Counts the number of cycles either the local or incoming distress signals are asserted. : Dynamic Prefetch Throttle received by this tile",
"UMask": "0x8",
@@ -5264,8 +6177,10 @@
},
{
"BriefDescription": "Distress signal asserted : DPT Stalled - IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xaf",
"EventName": "UNC_M2P_DISTRESS_ASSERTED.DPT_STALL_IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : DPT Stalled - IV : Counts the number of cycles either the local or incoming distress signals are asserted. : DPT occurred while regular IVs were received, causing DPT to be stalled",
"UMask": "0x40",
@@ -5273,8 +6188,10 @@
},
{
"BriefDescription": "Distress signal asserted : DPT Stalled - No Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xaf",
"EventName": "UNC_M2P_DISTRESS_ASSERTED.DPT_STALL_NOCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : DPT Stalled - No Credit : Counts the number of cycles either the local or incoming distress signals are asserted. : DPT occurred while credit not available causing DPT to be stalled",
"UMask": "0x80",
@@ -5282,8 +6199,10 @@
},
{
"BriefDescription": "Distress signal asserted : Horizontal",
+ "Counter": "0,1,2,3",
"EventCode": "0xaf",
"EventName": "UNC_M2P_DISTRESS_ASSERTED.HORZ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : Horizontal : Counts the number of cycles either the local or incoming distress signals are asserted. : If TGR egress is full, then agents will throttle outgoing AD IDI transactions",
"UMask": "0x2",
@@ -5291,8 +6210,10 @@
},
{
"BriefDescription": "Distress signal asserted : PMM Local",
+ "Counter": "0,1,2,3",
"EventCode": "0xAF",
"EventName": "UNC_M2P_DISTRESS_ASSERTED.PMM_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : PMM Local : Counts the number of cycles either the local or incoming distress signals are asserted. : If the CHA TOR has too many PMM transactions, this signal will throttle outgoing MS2IDI traffic",
"UMask": "0x10",
@@ -5300,8 +6221,10 @@
},
{
"BriefDescription": "Distress signal asserted : PMM Remote",
+ "Counter": "0,1,2,3",
"EventCode": "0xAF",
"EventName": "UNC_M2P_DISTRESS_ASSERTED.PMM_NONLOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : PMM Remote : Counts the number of cycles either the local or incoming distress signals are asserted. : If another CHA TOR has too many PMM transactions, this signal will throttle outgoing MS2IDI traffic",
"UMask": "0x20",
@@ -5309,8 +6232,10 @@
},
{
"BriefDescription": "Distress signal asserted : Vertical",
+ "Counter": "0,1,2,3",
"EventCode": "0xaf",
"EventName": "UNC_M2P_DISTRESS_ASSERTED.VERT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : Vertical : Counts the number of cycles either the local or incoming distress signals are asserted. : If IRQ egress is full, then agents will throttle outgoing AD IDI transactions",
"UMask": "0x1",
@@ -5318,8 +6243,10 @@
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "Counter": "0,1,2,3",
"EventCode": "0xba",
"EventName": "UNC_M2P_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress Blocking due to Ordering requirements : Down : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x4",
@@ -5327,8 +6254,10 @@
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "Counter": "0,1,2,3",
"EventCode": "0xba",
"EventName": "UNC_M2P_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress Blocking due to Ordering requirements : Up : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x1",
@@ -5336,8 +6265,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xb6",
"EventName": "UNC_M2P_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AD Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -5345,8 +6276,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xb6",
"EventName": "UNC_M2P_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AD Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -5354,8 +6287,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xb6",
"EventName": "UNC_M2P_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AD Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -5363,8 +6298,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xb6",
"EventName": "UNC_M2P_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AD Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -5372,8 +6309,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xbb",
"EventName": "UNC_M2P_HORZ_RING_AKC_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -5381,8 +6320,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xbb",
"EventName": "UNC_M2P_HORZ_RING_AKC_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -5390,8 +6331,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xbb",
"EventName": "UNC_M2P_HORZ_RING_AKC_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -5399,8 +6342,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xbb",
"EventName": "UNC_M2P_HORZ_RING_AKC_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -5408,8 +6353,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xb7",
"EventName": "UNC_M2P_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -5417,8 +6364,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xb7",
"EventName": "UNC_M2P_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -5426,8 +6375,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xb7",
"EventName": "UNC_M2P_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -5435,8 +6386,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xb7",
"EventName": "UNC_M2P_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -5444,8 +6397,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use : Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xb8",
"EventName": "UNC_M2P_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal BL Ring in Use : Left and Even : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -5453,8 +6408,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use : Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xb8",
"EventName": "UNC_M2P_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal BL Ring in Use : Left and Odd : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -5462,8 +6419,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use : Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xb8",
"EventName": "UNC_M2P_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal BL Ring in Use : Right and Even : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -5471,8 +6430,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use : Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xb8",
"EventName": "UNC_M2P_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal BL Ring in Use : Right and Odd : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -5480,8 +6441,10 @@
},
{
"BriefDescription": "Horizontal IV Ring in Use : Left",
+ "Counter": "0,1,2,3",
"EventCode": "0xb9",
"EventName": "UNC_M2P_HORZ_RING_IV_IN_USE.LEFT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal IV Ring in Use : Left : Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x1",
@@ -5489,8 +6452,10 @@
},
{
"BriefDescription": "Horizontal IV Ring in Use : Right",
+ "Counter": "0,1,2,3",
"EventCode": "0xb9",
"EventName": "UNC_M2P_HORZ_RING_IV_IN_USE.RIGHT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal IV Ring in Use : Right : Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x4",
@@ -5498,8 +6463,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credit Acquired : DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.DRS_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credit Acquired : DRS : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the DRS message class.",
"UMask": "0x1",
@@ -5507,8 +6474,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credit Acquired : DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.DRS_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credit Acquired : DRS : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the DRS message class.",
"UMask": "0x2",
@@ -5516,8 +6485,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credit Acquired : NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.NCB_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credit Acquired : NCB : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCB message class.",
"UMask": "0x4",
@@ -5525,8 +6496,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credit Acquired : NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.NCB_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credit Acquired : NCB : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCB message class.",
"UMask": "0x8",
@@ -5534,8 +6507,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credit Acquired : NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.NCS_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credit Acquired : NCS : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCS message class.",
"UMask": "0x10",
@@ -5543,8 +6518,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credit Acquired : NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.NCS_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credit Acquired : NCS : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credit for transfer through CMS Port 0s to the IIO for the NCS message class.",
"UMask": "0x20",
@@ -5552,8 +6529,10 @@
},
{
"BriefDescription": "M2PCIe IIO Failed to Acquire a Credit : DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_M2P_IIO_CREDITS_REJECT.DRS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Failed to Acquire a Credit : DRS : Counts the number of times that a request pending in the BL Ingress attempted to acquire either a NCB or NCS credit to transmit into the IIO, but was rejected because no credits were available. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits to the IIO for the DRS message class.",
"UMask": "0x8",
@@ -5561,8 +6540,10 @@
},
{
"BriefDescription": "M2PCIe IIO Failed to Acquire a Credit : NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_M2P_IIO_CREDITS_REJECT.NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Failed to Acquire a Credit : NCB : Counts the number of times that a request pending in the BL Ingress attempted to acquire either a NCB or NCS credit to transmit into the IIO, but was rejected because no credits were available. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits to the IIO for the NCB message class.",
"UMask": "0x10",
@@ -5570,8 +6551,10 @@
},
{
"BriefDescription": "M2PCIe IIO Failed to Acquire a Credit : NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_M2P_IIO_CREDITS_REJECT.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Failed to Acquire a Credit : NCS : Counts the number of times that a request pending in the BL Ingress attempted to acquire either a NCB or NCS credit to transmit into the IIO, but was rejected because no credits were available. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits to the IIO for the NCS message class.",
"UMask": "0x20",
@@ -5579,8 +6562,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credits in Use : DRS to CMS Port 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_M2P_IIO_CREDITS_USED.DRS_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credits in Use : DRS to CMS Port 0 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the DRS message class.",
"UMask": "0x1",
@@ -5588,8 +6573,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credits in Use : DRS to CMS Port 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_M2P_IIO_CREDITS_USED.DRS_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credits in Use : DRS to CMS Port 1 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the DRS message class.",
"UMask": "0x2",
@@ -5597,8 +6584,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credits in Use : NCB to CMS Port 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_M2P_IIO_CREDITS_USED.NCB_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credits in Use : NCB to CMS Port 0 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCB message class.",
"UMask": "0x4",
@@ -5606,8 +6595,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credits in Use : NCB to CMS Port 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_M2P_IIO_CREDITS_USED.NCB_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credits in Use : NCB to CMS Port 1 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCB message class.",
"UMask": "0x8",
@@ -5615,8 +6606,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credits in Use : NCS to CMS Port 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_M2P_IIO_CREDITS_USED.NCS_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credits in Use : NCS to CMS Port 0 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCS message class.",
"UMask": "0x10",
@@ -5624,8 +6617,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credits in Use : NCS to CMS Port 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_M2P_IIO_CREDITS_USED.NCS_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credits in Use : NCS to CMS Port 1 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credit for transfer through CMS Port 0s to the IIO for the NCS message class.",
"UMask": "0x20",
@@ -5633,912 +6628,1140 @@
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF0 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF0 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF1 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF1 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF2 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF2_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF2 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF2_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF3 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF3_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF3 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF3_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 1 : M2IOSF4 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF4_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 1 : M2IOSF4 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF4_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 1 : M2IOSF5 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF5_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 1 : M2IOSF5 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF5_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF0 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF0 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF1 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF1 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF2 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF2_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF2 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF2_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF3 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF3_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF3 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF3_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 1 : M2IOSF4 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x1a",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_1.MS2IOSF4_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 1 : M2IOSF4 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x1a",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_1.MS2IOSF4_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 1 : M2IOSF5 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x1a",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_1.MS2IOSF5_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 1 : M2IOSF5 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x1a",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_1.MS2IOSF5_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Shared Credits Returned : Agent0",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M2P_LOCAL_P2P_SHAR_RETURNED.AGENT_0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Shared Credits Returned : Agent1",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M2P_LOCAL_P2P_SHAR_RETURNED.AGENT_1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Shared Credits Returned : Agent2",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M2P_LOCAL_P2P_SHAR_RETURNED.AGENT_2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent0",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent1",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent2",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent3",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent4",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent5",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF0 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF0 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF1 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF1 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF2 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF2_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF2 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF2_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF3 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF3_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF3 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF3_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 1 : M2IOSF4 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF4_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 1 : M2IOSF4 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF4_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 1 : M2IOSF5 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF5_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 1 : M2IOSF5 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF5_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF0 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x4a",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF0 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4a",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF1 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x4a",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF1 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4a",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF2 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x4a",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF2_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF2 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4a",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF2_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF3 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x4a",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF3_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF3 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4a",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF3_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 1 : M2IOSF4 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x4b",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF4_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 1 : M2IOSF4 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4b",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF4_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 1 : M2IOSF5 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x4b",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF5_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 1 : M2IOSF5 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4b",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF5_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI0",
+ "Counter": "0,1,2,3",
"EventCode": "0xe6",
"EventName": "UNC_M2P_MISC_EXTERNAL.MBE_INST0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI1",
+ "Counter": "0,1,2,3",
"EventCode": "0xe6",
"EventName": "UNC_M2P_MISC_EXTERNAL.MBE_INST1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "P2P Credit Occupancy : All",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "P2P Credit Occupancy : Local NCB",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.LOCAL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "P2P Credit Occupancy : Local NCS",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.LOCAL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "P2P Credit Occupancy : Remote NCB",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.REMOTE_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "P2P Credit Occupancy : Remote NCS",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.REMOTE_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Dedicated Credits Received : All",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_M2P_P2P_DED_RECEIVED.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Dedicated Credits Received : Local NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_M2P_P2P_DED_RECEIVED.LOCAL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Dedicated Credits Received : Local NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_M2P_P2P_DED_RECEIVED.LOCAL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Dedicated Credits Received : Remote NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_M2P_P2P_DED_RECEIVED.REMOTE_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Dedicated Credits Received : Remote NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_M2P_P2P_DED_RECEIVED.REMOTE_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Shared Credits Received : All",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_M2P_P2P_SHAR_RECEIVED.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Shared Credits Received : Local NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_M2P_P2P_SHAR_RECEIVED.LOCAL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Shared Credits Received : Local NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_M2P_P2P_SHAR_RECEIVED.LOCAL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Shared Credits Received : Remote NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_M2P_P2P_SHAR_RECEIVED.REMOTE_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Shared Credits Received : Remote NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_M2P_P2P_SHAR_RECEIVED.REMOTE_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI0 - DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI0_DRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI0 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI0 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI1 - DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI1_DRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI1 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI1 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Dedicated P2P Credit Taken - 1 : UPI2 - DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_1.UPI2_DRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Dedicated P2P Credit Taken - 1 : UPI2 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_1.UPI2_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Dedicated P2P Credit Taken - 1 : UPI2 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_1.UPI2_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote P2P Dedicated Credits Returned : UPI0 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote P2P Dedicated Credits Returned : UPI0 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote P2P Dedicated Credits Returned : UPI1 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote P2P Dedicated Credits Returned : UPI1 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote P2P Dedicated Credits Returned : UPI2 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI2_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote P2P Dedicated Credits Returned : UPI2 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI2_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote P2P Shared Credits Returned : Agent0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M2P_REMOTE_P2P_SHAR_RETURNED.AGENT_0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote P2P Shared Credits Returned : Agent1",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M2P_REMOTE_P2P_SHAR_RETURNED.AGENT_1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote P2P Shared Credits Returned : Agent2",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M2P_REMOTE_P2P_SHAR_RETURNED.AGENT_2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Shared P2P Credit Returned to credit ring : Agent0",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_RETURNED.AGENT_0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Shared P2P Credit Returned to credit ring : Agent1",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_RETURNED.AGENT_1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Shared P2P Credit Returned to credit ring : Agent2",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_RETURNED.AGENT_2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI0 - DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI0_DRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI0 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI0 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI1 - DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI1_DRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI1 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI1 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Shared P2P Credit Taken - 1 : UPI2 - DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_1.UPI2_DRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Shared P2P Credit Taken - 1 : UPI2 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_1.UPI2_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Shared P2P Credit Taken - 1 : UPI2 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_1.UPI2_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI0 - DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI0_DRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI0 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI0 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI1 - DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI1_DRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI1 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI1 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Remote Shared P2P Credit - 1 : UPI2 - DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4d",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_1.UPI2_DRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Remote Shared P2P Credit - 1 : UPI2 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x4d",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_1.UPI2_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Remote Shared P2P Credit - 1 : UPI2 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4d",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_1.UPI2_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring. : AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xac",
"EventName": "UNC_M2P_RING_BOUNCES_HORZ.AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Horizontal Ring. : AD : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x1",
@@ -6546,8 +7769,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring. : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xac",
"EventName": "UNC_M2P_RING_BOUNCES_HORZ.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Horizontal Ring. : AK : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x2",
@@ -6555,8 +7780,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring. : BL",
+ "Counter": "0,1,2,3",
"EventCode": "0xac",
"EventName": "UNC_M2P_RING_BOUNCES_HORZ.BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Horizontal Ring. : BL : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x4",
@@ -6564,8 +7791,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring. : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xac",
"EventName": "UNC_M2P_RING_BOUNCES_HORZ.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Horizontal Ring. : IV : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x8",
@@ -6573,8 +7802,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring. : AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xaa",
"EventName": "UNC_M2P_RING_BOUNCES_VERT.AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Vertical Ring. : AD : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x1",
@@ -6582,8 +7813,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring. : Acknowledgements to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xaa",
"EventName": "UNC_M2P_RING_BOUNCES_VERT.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Vertical Ring. : Acknowledgements to core : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x2",
@@ -6591,8 +7824,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring.",
+ "Counter": "0,1,2,3",
"EventCode": "0xaa",
"EventName": "UNC_M2P_RING_BOUNCES_VERT.AKC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Vertical Ring. : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x10",
@@ -6600,8 +7835,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring. : Data Responses to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xaa",
"EventName": "UNC_M2P_RING_BOUNCES_VERT.BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Vertical Ring. : Data Responses to core : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x4",
@@ -6609,8 +7846,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring. : Snoops of processor's cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xaa",
"EventName": "UNC_M2P_RING_BOUNCES_VERT.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Vertical Ring. : Snoops of processor's cache. : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x8",
@@ -6618,95 +7857,119 @@
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring : AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xad",
"EventName": "UNC_M2P_RING_SINK_STARVED_HORZ.AD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xad",
"EventName": "UNC_M2P_RING_SINK_STARVED_HORZ.AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring : Acknowledgements to Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xad",
"EventName": "UNC_M2P_RING_SINK_STARVED_HORZ.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring : BL",
+ "Counter": "0,1,2,3",
"EventCode": "0xad",
"EventName": "UNC_M2P_RING_SINK_STARVED_HORZ.BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xad",
"EventName": "UNC_M2P_RING_SINK_STARVED_HORZ.IV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring : AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xab",
"EventName": "UNC_M2P_RING_SINK_STARVED_VERT.AD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring : Acknowledgements to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xab",
"EventName": "UNC_M2P_RING_SINK_STARVED_VERT.AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0xab",
"EventName": "UNC_M2P_RING_SINK_STARVED_VERT.AKC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring : Data Responses to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xab",
"EventName": "UNC_M2P_RING_SINK_STARVED_VERT.BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring : Snoops of processor's cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xab",
"EventName": "UNC_M2P_RING_SINK_STARVED_VERT.IV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Source Throttle",
+ "Counter": "0,1,2,3",
"EventCode": "0xae",
"EventName": "UNC_M2P_RING_SRC_THRTL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M2P_RxC_CYCLES_NE.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
"UMask": "0x80",
@@ -6714,8 +7977,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M2P_RxC_CYCLES_NE.CHA_IDI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
"UMask": "0x1",
@@ -6723,8 +7988,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M2P_RxC_CYCLES_NE.CHA_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
"UMask": "0x2",
@@ -6732,8 +7999,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M2P_RxC_CYCLES_NE.CHA_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
"UMask": "0x4",
@@ -6741,8 +8010,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M2P_RxC_CYCLES_NE.IIO_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
"UMask": "0x20",
@@ -6750,8 +8021,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M2P_RxC_CYCLES_NE.IIO_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
"UMask": "0x40",
@@ -6759,8 +8032,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M2P_RxC_CYCLES_NE.UPI_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
"UMask": "0x8",
@@ -6768,8 +8043,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M2P_RxC_CYCLES_NE.UPI_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
"UMask": "0x10",
@@ -6777,8 +8054,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2P_RxC_INSERTS.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
"UMask": "0x80",
@@ -6786,8 +8065,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2P_RxC_INSERTS.CHA_IDI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
"UMask": "0x1",
@@ -6795,8 +8076,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2P_RxC_INSERTS.CHA_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
"UMask": "0x2",
@@ -6804,8 +8087,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2P_RxC_INSERTS.CHA_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
"UMask": "0x4",
@@ -6813,8 +8098,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2P_RxC_INSERTS.IIO_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
"UMask": "0x20",
@@ -6822,8 +8109,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2P_RxC_INSERTS.IIO_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
"UMask": "0x40",
@@ -6831,8 +8120,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2P_RxC_INSERTS.UPI_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
"UMask": "0x8",
@@ -6840,8 +8131,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2P_RxC_INSERTS.UPI_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
"UMask": "0x10",
@@ -6849,8 +8142,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xe5",
"EventName": "UNC_M2P_RxR_BUSY_STARVED.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority : All == Credited + Uncredited",
"UMask": "0x11",
@@ -6858,8 +8153,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe5",
"EventName": "UNC_M2P_RxR_BUSY_STARVED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x10",
@@ -6867,8 +8164,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe5",
"EventName": "UNC_M2P_RxR_BUSY_STARVED.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x1",
@@ -6876,8 +8175,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xe5",
"EventName": "UNC_M2P_RxR_BUSY_STARVED.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority : All == Credited + Uncredited",
"UMask": "0x44",
@@ -6885,8 +8186,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe5",
"EventName": "UNC_M2P_RxR_BUSY_STARVED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x40",
@@ -6894,8 +8197,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe5",
"EventName": "UNC_M2P_RxR_BUSY_STARVED.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x4",
@@ -6903,8 +8208,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xe2",
"EventName": "UNC_M2P_RxR_BYPASS.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : AD - All : Number of packets bypassing the CMS Ingress : All == Credited + Uncredited",
"UMask": "0x11",
@@ -6912,8 +8219,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe2",
"EventName": "UNC_M2P_RxR_BYPASS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : AD - Credited : Number of packets bypassing the CMS Ingress",
"UMask": "0x10",
@@ -6921,8 +8230,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe2",
"EventName": "UNC_M2P_RxR_BYPASS.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : AD - Uncredited : Number of packets bypassing the CMS Ingress",
"UMask": "0x1",
@@ -6930,8 +8241,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xe2",
"EventName": "UNC_M2P_RxR_BYPASS.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : AK : Number of packets bypassing the CMS Ingress",
"UMask": "0x2",
@@ -6939,8 +8252,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe2",
"EventName": "UNC_M2P_RxR_BYPASS.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : AKC - Uncredited : Number of packets bypassing the CMS Ingress",
"UMask": "0x80",
@@ -6948,8 +8263,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xe2",
"EventName": "UNC_M2P_RxR_BYPASS.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : BL - All : Number of packets bypassing the CMS Ingress : All == Credited + Uncredited",
"UMask": "0x44",
@@ -6957,8 +8274,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe2",
"EventName": "UNC_M2P_RxR_BYPASS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : BL - Credited : Number of packets bypassing the CMS Ingress",
"UMask": "0x40",
@@ -6966,8 +8285,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe2",
"EventName": "UNC_M2P_RxR_BYPASS.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : BL - Uncredited : Number of packets bypassing the CMS Ingress",
"UMask": "0x4",
@@ -6975,8 +8296,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xe2",
"EventName": "UNC_M2P_RxR_BYPASS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : IV : Number of packets bypassing the CMS Ingress",
"UMask": "0x8",
@@ -6984,8 +8307,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xe3",
"EventName": "UNC_M2P_RxR_CRD_STARVED.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -6993,8 +8318,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe3",
"EventName": "UNC_M2P_RxR_CRD_STARVED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x10",
@@ -7002,8 +8329,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe3",
"EventName": "UNC_M2P_RxR_CRD_STARVED.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x1",
@@ -7011,8 +8340,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xe3",
"EventName": "UNC_M2P_RxR_CRD_STARVED.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AK : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x2",
@@ -7020,8 +8351,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xe3",
"EventName": "UNC_M2P_RxR_CRD_STARVED.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -7029,8 +8362,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe3",
"EventName": "UNC_M2P_RxR_CRD_STARVED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x40",
@@ -7038,8 +8373,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe3",
"EventName": "UNC_M2P_RxR_CRD_STARVED.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x4",
@@ -7047,8 +8384,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : IFV - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe3",
"EventName": "UNC_M2P_RxR_CRD_STARVED.IFV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : IFV - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x80",
@@ -7056,8 +8395,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xe3",
"EventName": "UNC_M2P_RxR_CRD_STARVED.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : IV : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x8",
@@ -7065,16 +8406,20 @@
},
{
"BriefDescription": "Transgress Injection Starvation",
+ "Counter": "0,1,2,3",
"EventCode": "0xe4",
"EventName": "UNC_M2P_RxR_CRD_STARVED_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Transgress Ingress Allocations : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xe1",
"EventName": "UNC_M2P_RxR_INSERTS.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : AD - All : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
"UMask": "0x11",
@@ -7082,8 +8427,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe1",
"EventName": "UNC_M2P_RxR_INSERTS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : AD - Credited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x10",
@@ -7091,8 +8438,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe1",
"EventName": "UNC_M2P_RxR_INSERTS.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : AD - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x1",
@@ -7100,8 +8449,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xe1",
"EventName": "UNC_M2P_RxR_INSERTS.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : AK : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x2",
@@ -7109,8 +8460,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe1",
"EventName": "UNC_M2P_RxR_INSERTS.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : AKC - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x80",
@@ -7118,8 +8471,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xe1",
"EventName": "UNC_M2P_RxR_INSERTS.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : BL - All : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
"UMask": "0x44",
@@ -7127,8 +8482,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe1",
"EventName": "UNC_M2P_RxR_INSERTS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : BL - Credited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x40",
@@ -7136,8 +8493,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe1",
"EventName": "UNC_M2P_RxR_INSERTS.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : BL - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x4",
@@ -7145,8 +8504,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xe1",
"EventName": "UNC_M2P_RxR_INSERTS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : IV : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x8",
@@ -7154,8 +8515,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xe0",
"EventName": "UNC_M2P_RxR_OCCUPANCY.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : AD - All : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
"UMask": "0x11",
@@ -7163,8 +8526,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe0",
"EventName": "UNC_M2P_RxR_OCCUPANCY.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : AD - Credited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x10",
@@ -7172,8 +8537,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe0",
"EventName": "UNC_M2P_RxR_OCCUPANCY.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : AD - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x1",
@@ -7181,8 +8548,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xe0",
"EventName": "UNC_M2P_RxR_OCCUPANCY.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : AK : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x2",
@@ -7190,8 +8559,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe0",
"EventName": "UNC_M2P_RxR_OCCUPANCY.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : AKC - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x80",
@@ -7199,8 +8570,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xe0",
"EventName": "UNC_M2P_RxR_OCCUPANCY.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : BL - All : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
"UMask": "0x44",
@@ -7208,8 +8581,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe0",
"EventName": "UNC_M2P_RxR_OCCUPANCY.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : BL - Credited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x20",
@@ -7217,8 +8592,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe0",
"EventName": "UNC_M2P_RxR_OCCUPANCY.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : BL - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x4",
@@ -7226,8 +8603,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xe0",
"EventName": "UNC_M2P_RxR_OCCUPANCY.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : IV : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x8",
@@ -7235,8 +8614,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xd0",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 0 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -7244,8 +8625,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xd0",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 1 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -7253,8 +8636,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xd0",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 2 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -7262,8 +8647,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xd0",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 3 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -7271,8 +8658,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xd0",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 4 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -7280,8 +8669,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xd0",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 5 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -7289,8 +8680,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xd0",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 6 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x40",
@@ -7298,8 +8691,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xd0",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 7 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x80",
@@ -7307,8 +8702,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xd2",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 0 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -7316,8 +8713,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xd2",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 1 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -7325,8 +8724,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xd2",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 2 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -7334,8 +8735,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xd2",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 3 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -7343,8 +8746,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xd2",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 4 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -7352,8 +8757,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xd2",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 5 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -7361,8 +8768,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xd2",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 6 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x40",
@@ -7370,8 +8779,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xd2",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 7 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x80",
@@ -7379,8 +8790,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xd4",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 0 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -7388,8 +8801,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xd4",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 1 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -7397,8 +8812,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xd4",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 2 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -7406,8 +8823,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xd4",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 3 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -7415,8 +8834,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xd4",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 4 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -7424,8 +8845,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xd4",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 5 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -7433,8 +8856,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xd4",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 6 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x40",
@@ -7442,8 +8867,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xd4",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 7 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x80",
@@ -7451,8 +8878,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xd6",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 0 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -7460,8 +8889,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xd6",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 1 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -7469,8 +8900,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xd6",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 2 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -7478,8 +8911,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xd6",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 3 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -7487,8 +8922,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xd6",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 4 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -7496,8 +8933,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xd6",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 5 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -7505,8 +8944,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xd6",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 6 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x40",
@@ -7514,8 +8955,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xd6",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 7 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x80",
@@ -7523,8 +8966,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xd1",
"EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 10 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -7532,8 +8977,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xd1",
"EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 8 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -7541,8 +8988,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xd1",
"EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 9 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -7550,8 +8999,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xd3",
"EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 10 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -7559,8 +9010,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xd3",
"EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 8 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -7568,8 +9021,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xd3",
"EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 9 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -7577,8 +9032,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xd5",
"EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 10 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -7586,8 +9043,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xd5",
"EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 8 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -7595,8 +9054,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xd5",
"EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 9 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -7604,8 +9065,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xd7",
"EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 10 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -7613,8 +9076,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xd7",
"EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 8 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -7622,8 +9087,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xd7",
"EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 9 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -7631,24 +9098,30 @@
},
{
"BriefDescription": "UNC_M2P_TxC_CREDITS.PMM",
+ "Counter": "0,1",
"EventCode": "0x2D",
"EventName": "UNC_M2P_TxC_CREDITS.PMM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "UNC_M2P_TxC_CREDITS.PRQ",
+ "Counter": "0,1",
"EventCode": "0x2d",
"EventName": "UNC_M2P_TxC_CREDITS.PRQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Egress (to CMS) Cycles Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2P_TxC_CYCLES_FULL.AD_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress (to CMS) Cycles Full : Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.",
"UMask": "0x1",
@@ -7656,8 +9129,10 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2P_TxC_CYCLES_FULL.AD_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress (to CMS) Cycles Full : Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.",
"UMask": "0x10",
@@ -7665,8 +9140,10 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2P_TxC_CYCLES_FULL.AK_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress (to CMS) Cycles Full : Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.",
"UMask": "0x2",
@@ -7674,8 +9151,10 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2P_TxC_CYCLES_FULL.AK_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress (to CMS) Cycles Full : Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.",
"UMask": "0x20",
@@ -7683,8 +9162,10 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2P_TxC_CYCLES_FULL.BL_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress (to CMS) Cycles Full : Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.",
"UMask": "0x4",
@@ -7692,8 +9173,10 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2P_TxC_CYCLES_FULL.BL_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress (to CMS) Cycles Full : Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.",
"UMask": "0x40",
@@ -7701,8 +9184,10 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2P_TxC_CYCLES_FULL.PMM_BLOCK_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress (to CMS) Cycles Full : Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.",
"UMask": "0x80",
@@ -7710,8 +9195,10 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2P_TxC_CYCLES_FULL.PMM_BLOCK_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress (to CMS) Cycles Full : Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.",
"UMask": "0x8",
@@ -7719,8 +9206,10 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "Counter": "0,1",
"EventCode": "0x23",
"EventName": "UNC_M2P_TxC_CYCLES_NE.AD_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress (to CMS) Cycles Not Empty : Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.",
"UMask": "0x1",
@@ -7728,8 +9217,10 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "Counter": "0,1",
"EventCode": "0x23",
"EventName": "UNC_M2P_TxC_CYCLES_NE.AD_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress (to CMS) Cycles Not Empty : Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.",
"UMask": "0x10",
@@ -7737,8 +9228,10 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "Counter": "0,1",
"EventCode": "0x23",
"EventName": "UNC_M2P_TxC_CYCLES_NE.AK_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress (to CMS) Cycles Not Empty : Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.",
"UMask": "0x2",
@@ -7746,8 +9239,10 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "Counter": "0,1",
"EventCode": "0x23",
"EventName": "UNC_M2P_TxC_CYCLES_NE.AK_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress (to CMS) Cycles Not Empty : Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.",
"UMask": "0x20",
@@ -7755,8 +9250,10 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "Counter": "0,1",
"EventCode": "0x23",
"EventName": "UNC_M2P_TxC_CYCLES_NE.BL_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress (to CMS) Cycles Not Empty : Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.",
"UMask": "0x4",
@@ -7764,8 +9261,10 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "Counter": "0,1",
"EventCode": "0x23",
"EventName": "UNC_M2P_TxC_CYCLES_NE.BL_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress (to CMS) Cycles Not Empty : Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.",
"UMask": "0x40",
@@ -7773,8 +9272,10 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "Counter": "0,1",
"EventCode": "0x23",
"EventName": "UNC_M2P_TxC_CYCLES_NE.PMM_DISTRESS_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress (to CMS) Cycles Not Empty : Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.",
"UMask": "0x80",
@@ -7782,8 +9283,10 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "Counter": "0,1",
"EventCode": "0x23",
"EventName": "UNC_M2P_TxC_CYCLES_NE.PMM_DISTRESS_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress (to CMS) Cycles Not Empty : Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.",
"UMask": "0x8",
@@ -7791,8 +9294,10 @@
},
{
"BriefDescription": "Egress (to CMS) Ingress",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2P_TxC_INSERTS.AD_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress (to CMS) Ingress : Counts the number of number of messages inserted into the the M2PCIe Egress queue. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy.",
"UMask": "0x1",
@@ -7800,8 +9305,10 @@
},
{
"BriefDescription": "Egress (to CMS) Ingress",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2P_TxC_INSERTS.AD_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress (to CMS) Ingress : Counts the number of number of messages inserted into the the M2PCIe Egress queue. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy.",
"UMask": "0x10",
@@ -7809,8 +9316,10 @@
},
{
"BriefDescription": "Egress (to CMS) Ingress",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2P_TxC_INSERTS.AK_CRD_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress (to CMS) Ingress : Counts the number of number of messages inserted into the the M2PCIe Egress queue. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy.",
"UMask": "0x8",
@@ -7818,8 +9327,10 @@
},
{
"BriefDescription": "Egress (to CMS) Ingress",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2P_TxC_INSERTS.AK_CRD_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress (to CMS) Ingress : Counts the number of number of messages inserted into the the M2PCIe Egress queue. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy.",
"UMask": "0x80",
@@ -7827,8 +9338,10 @@
},
{
"BriefDescription": "Egress (to CMS) Ingress",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2P_TxC_INSERTS.BL_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress (to CMS) Ingress : Counts the number of number of messages inserted into the the M2PCIe Egress queue. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy.",
"UMask": "0x4",
@@ -7836,8 +9349,10 @@
},
{
"BriefDescription": "Egress (to CMS) Ingress",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2P_TxC_INSERTS.BL_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress (to CMS) Ingress : Counts the number of number of messages inserted into the the M2PCIe Egress queue. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy.",
"UMask": "0x40",
@@ -7845,8 +9360,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xa6",
"EventName": "UNC_M2P_TxR_HORZ_ADS_USED.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : AD - All : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -7854,8 +9371,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa6",
"EventName": "UNC_M2P_TxR_HORZ_ADS_USED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : AD - Credited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -7863,8 +9382,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa6",
"EventName": "UNC_M2P_TxR_HORZ_ADS_USED.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : AD - Uncredited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -7872,8 +9393,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xa6",
"EventName": "UNC_M2P_TxR_HORZ_ADS_USED.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : BL - All : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -7881,8 +9404,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa6",
"EventName": "UNC_M2P_TxR_HORZ_ADS_USED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : BL - Credited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -7890,8 +9415,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa6",
"EventName": "UNC_M2P_TxR_HORZ_ADS_USED.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : BL - Uncredited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -7899,8 +9426,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xa7",
"EventName": "UNC_M2P_TxR_HORZ_BYPASS.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : AD - All : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -7908,8 +9437,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa7",
"EventName": "UNC_M2P_TxR_HORZ_BYPASS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : AD - Credited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -7917,8 +9448,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa7",
"EventName": "UNC_M2P_TxR_HORZ_BYPASS.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : AD - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -7926,8 +9459,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xa7",
"EventName": "UNC_M2P_TxR_HORZ_BYPASS.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : AK : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -7935,8 +9470,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa7",
"EventName": "UNC_M2P_TxR_HORZ_BYPASS.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : AKC - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x80",
@@ -7944,8 +9481,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xa7",
"EventName": "UNC_M2P_TxR_HORZ_BYPASS.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : BL - All : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -7953,8 +9492,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa7",
"EventName": "UNC_M2P_TxR_HORZ_BYPASS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : BL - Credited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -7962,8 +9503,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa7",
"EventName": "UNC_M2P_TxR_HORZ_BYPASS.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : BL - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -7971,8 +9514,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xa7",
"EventName": "UNC_M2P_TxR_HORZ_BYPASS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : IV : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x8",
@@ -7980,8 +9525,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xa2",
"EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - All : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -7989,8 +9536,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa2",
"EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -7998,8 +9547,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa2",
"EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -8007,8 +9558,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xa2",
"EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AK : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -8016,8 +9569,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa2",
"EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AKC - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x80",
@@ -8025,8 +9580,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xa2",
"EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - All : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -8034,8 +9591,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa2",
"EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -8043,8 +9602,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa2",
"EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -8052,8 +9613,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xa2",
"EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : IV : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -8061,8 +9624,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xa3",
"EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - All : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -8070,8 +9635,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa3",
"EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -8079,8 +9646,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa3",
"EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -8088,8 +9657,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xa3",
"EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AK : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -8097,8 +9668,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa3",
"EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AKC - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x80",
@@ -8106,8 +9679,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xa3",
"EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - All : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -8115,8 +9690,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa3",
"EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -8124,8 +9701,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa3",
"EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -8133,8 +9712,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xa3",
"EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : IV : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -8142,8 +9723,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xa1",
"EventName": "UNC_M2P_TxR_HORZ_INSERTS.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : AD - All : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -8151,8 +9734,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa1",
"EventName": "UNC_M2P_TxR_HORZ_INSERTS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : AD - Credited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -8160,8 +9745,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa1",
"EventName": "UNC_M2P_TxR_HORZ_INSERTS.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : AD - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -8169,8 +9756,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xa1",
"EventName": "UNC_M2P_TxR_HORZ_INSERTS.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : AK : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -8178,8 +9767,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa1",
"EventName": "UNC_M2P_TxR_HORZ_INSERTS.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : AKC - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x80",
@@ -8187,8 +9778,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xa1",
"EventName": "UNC_M2P_TxR_HORZ_INSERTS.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : BL - All : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -8196,8 +9789,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa1",
"EventName": "UNC_M2P_TxR_HORZ_INSERTS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : BL - Credited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -8205,8 +9800,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa1",
"EventName": "UNC_M2P_TxR_HORZ_INSERTS.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : BL - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -8214,8 +9811,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xa1",
"EventName": "UNC_M2P_TxR_HORZ_INSERTS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : IV : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -8223,8 +9822,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xa4",
"EventName": "UNC_M2P_TxR_HORZ_NACK.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : AD - All : Counts number of Egress packets NACK'ed on to the Horizontal Ring : All == Credited + Uncredited",
"UMask": "0x11",
@@ -8232,8 +9833,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa4",
"EventName": "UNC_M2P_TxR_HORZ_NACK.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : AD - Credited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x10",
@@ -8241,8 +9844,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa4",
"EventName": "UNC_M2P_TxR_HORZ_NACK.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : AD - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x1",
@@ -8250,8 +9855,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xa4",
"EventName": "UNC_M2P_TxR_HORZ_NACK.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : AK : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x2",
@@ -8259,8 +9866,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa4",
"EventName": "UNC_M2P_TxR_HORZ_NACK.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : AKC - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x80",
@@ -8268,8 +9877,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xa4",
"EventName": "UNC_M2P_TxR_HORZ_NACK.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : BL - All : Counts number of Egress packets NACK'ed on to the Horizontal Ring : All == Credited + Uncredited",
"UMask": "0x44",
@@ -8277,8 +9888,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa4",
"EventName": "UNC_M2P_TxR_HORZ_NACK.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : BL - Credited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x40",
@@ -8286,8 +9899,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa4",
"EventName": "UNC_M2P_TxR_HORZ_NACK.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : BL - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x4",
@@ -8295,8 +9910,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xa4",
"EventName": "UNC_M2P_TxR_HORZ_NACK.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : IV : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x8",
@@ -8304,8 +9921,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xa0",
"EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : AD - All : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -8313,8 +9932,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa0",
"EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : AD - Credited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -8322,8 +9943,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa0",
"EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : AD - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -8331,8 +9954,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xa0",
"EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : AK : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -8340,8 +9965,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa0",
"EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : AKC - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x80",
@@ -8349,8 +9976,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xa0",
"EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : BL - All : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -8358,8 +9987,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa0",
"EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : BL - Credited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -8367,8 +9998,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa0",
"EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : BL - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -8376,8 +10009,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xa0",
"EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : IV : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -8385,8 +10020,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xa5",
"EventName": "UNC_M2P_TxR_HORZ_STARVED.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : AD - All : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time. : All == Credited + Uncredited",
"UMask": "0x1",
@@ -8394,8 +10031,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa5",
"EventName": "UNC_M2P_TxR_HORZ_STARVED.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : AD - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x1",
@@ -8403,8 +10042,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xa5",
"EventName": "UNC_M2P_TxR_HORZ_STARVED.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : AK : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x2",
@@ -8412,8 +10053,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa5",
"EventName": "UNC_M2P_TxR_HORZ_STARVED.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : AKC - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x80",
@@ -8421,8 +10064,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xa5",
"EventName": "UNC_M2P_TxR_HORZ_STARVED.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : BL - All : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time. : All == Credited + Uncredited",
"UMask": "0x4",
@@ -8430,8 +10075,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa5",
"EventName": "UNC_M2P_TxR_HORZ_STARVED.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : BL - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x4",
@@ -8439,8 +10086,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xa5",
"EventName": "UNC_M2P_TxR_HORZ_STARVED.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : IV : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x8",
@@ -8448,8 +10097,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9c",
"EventName": "UNC_M2P_TxR_VERT_ADS_USED.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AD - Agent 0 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -8457,8 +10108,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9c",
"EventName": "UNC_M2P_TxR_VERT_ADS_USED.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AD - Agent 1 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -8466,8 +10119,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9c",
"EventName": "UNC_M2P_TxR_VERT_ADS_USED.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : BL - Agent 0 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -8475,8 +10130,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9c",
"EventName": "UNC_M2P_TxR_VERT_ADS_USED.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : BL - Agent 1 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -8484,8 +10141,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9d",
"EventName": "UNC_M2P_TxR_VERT_BYPASS.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AD - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -8493,8 +10152,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9d",
"EventName": "UNC_M2P_TxR_VERT_BYPASS.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AD - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -8502,8 +10163,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9d",
"EventName": "UNC_M2P_TxR_VERT_BYPASS.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AK - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -8511,8 +10174,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9d",
"EventName": "UNC_M2P_TxR_VERT_BYPASS.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AK - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x20",
@@ -8520,8 +10185,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9d",
"EventName": "UNC_M2P_TxR_VERT_BYPASS.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : BL - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -8529,8 +10196,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9d",
"EventName": "UNC_M2P_TxR_VERT_BYPASS.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : BL - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -8538,8 +10207,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : IV - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9d",
"EventName": "UNC_M2P_TxR_VERT_BYPASS.IV_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : IV - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x8",
@@ -8547,8 +10218,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9e",
"EventName": "UNC_M2P_TxR_VERT_BYPASS_1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AKC - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -8556,8 +10229,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9e",
"EventName": "UNC_M2P_TxR_VERT_BYPASS_1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AKC - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -8565,8 +10240,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -8574,8 +10251,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -8583,8 +10262,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -8592,8 +10273,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -8601,8 +10284,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -8610,8 +10295,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -8619,8 +10306,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : IV - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : IV - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -8628,8 +10317,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -8637,8 +10328,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -8646,8 +10339,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -8655,8 +10350,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -8664,8 +10361,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -8673,8 +10372,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -8682,8 +10383,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -8691,8 +10394,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -8700,8 +10405,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : IV - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : IV - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -8709,8 +10416,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_M2P_TxR_VERT_CYCLES_NE1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -8718,8 +10427,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_M2P_TxR_VERT_CYCLES_NE1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -8727,8 +10438,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2P_TxR_VERT_INSERTS0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AD - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -8736,8 +10449,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2P_TxR_VERT_INSERTS0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AD - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -8745,8 +10460,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2P_TxR_VERT_INSERTS0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AK - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -8754,8 +10471,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2P_TxR_VERT_INSERTS0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AK - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -8763,8 +10482,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2P_TxR_VERT_INSERTS0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : BL - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -8772,8 +10493,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2P_TxR_VERT_INSERTS0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : BL - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -8781,8 +10504,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : IV - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2P_TxR_VERT_INSERTS0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : IV - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -8790,8 +10515,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_M2P_TxR_VERT_INSERTS1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AKC - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -8799,8 +10526,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_M2P_TxR_VERT_INSERTS1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AKC - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -8808,8 +10537,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2P_TxR_VERT_NACK0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AD - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x1",
@@ -8817,8 +10548,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2P_TxR_VERT_NACK0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AD - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x10",
@@ -8826,8 +10559,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2P_TxR_VERT_NACK0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AK - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x2",
@@ -8835,8 +10570,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2P_TxR_VERT_NACK0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AK - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x20",
@@ -8844,8 +10581,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2P_TxR_VERT_NACK0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : BL - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x4",
@@ -8853,8 +10592,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2P_TxR_VERT_NACK0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : BL - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x40",
@@ -8862,8 +10603,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2P_TxR_VERT_NACK0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : IV : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x8",
@@ -8871,8 +10614,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_M2P_TxR_VERT_NACK1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AKC - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x1",
@@ -8880,8 +10625,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_M2P_TxR_VERT_NACK1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AKC - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x2",
@@ -8889,8 +10636,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AD - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -8898,8 +10647,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AD - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -8907,8 +10658,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AK - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -8916,8 +10669,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AK - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -8925,8 +10680,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : BL - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -8934,8 +10691,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : BL - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -8943,8 +10702,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : IV - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : IV - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -8952,8 +10713,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_M2P_TxR_VERT_OCCUPANCY1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AKC - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -8961,8 +10724,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_M2P_TxR_VERT_OCCUPANCY1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AKC - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -8970,8 +10735,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9a",
"EventName": "UNC_M2P_TxR_VERT_STARVED0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x1",
@@ -8979,8 +10746,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9a",
"EventName": "UNC_M2P_TxR_VERT_STARVED0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x10",
@@ -8988,8 +10757,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9a",
"EventName": "UNC_M2P_TxR_VERT_STARVED0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x2",
@@ -8997,8 +10768,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9a",
"EventName": "UNC_M2P_TxR_VERT_STARVED0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x20",
@@ -9006,8 +10779,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9a",
"EventName": "UNC_M2P_TxR_VERT_STARVED0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x4",
@@ -9015,8 +10790,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9a",
"EventName": "UNC_M2P_TxR_VERT_STARVED0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x40",
@@ -9024,8 +10801,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x9a",
"EventName": "UNC_M2P_TxR_VERT_STARVED0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : IV : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x8",
@@ -9033,8 +10812,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9b",
"EventName": "UNC_M2P_TxR_VERT_STARVED1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x1",
@@ -9042,8 +10823,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9b",
"EventName": "UNC_M2P_TxR_VERT_STARVED1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x2",
@@ -9051,8 +10834,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9b",
"EventName": "UNC_M2P_TxR_VERT_STARVED1.TGC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x4",
@@ -9060,8 +10845,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xb0",
"EventName": "UNC_M2P_VERT_RING_AD_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AD Ring In Use : Down and Even : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -9069,8 +10856,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xb0",
"EventName": "UNC_M2P_VERT_RING_AD_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AD Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -9078,8 +10867,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xb0",
"EventName": "UNC_M2P_VERT_RING_AD_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AD Ring In Use : Up and Even : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -9087,8 +10878,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xb0",
"EventName": "UNC_M2P_VERT_RING_AD_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AD Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -9096,8 +10889,10 @@
},
{
"BriefDescription": "Vertical AKC Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xb4",
"EventName": "UNC_M2P_VERT_RING_AKC_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AKC Ring In Use : Down and Even : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -9105,8 +10900,10 @@
},
{
"BriefDescription": "Vertical AKC Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xb4",
"EventName": "UNC_M2P_VERT_RING_AKC_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AKC Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -9114,8 +10911,10 @@
},
{
"BriefDescription": "Vertical AKC Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xb4",
"EventName": "UNC_M2P_VERT_RING_AKC_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AKC Ring In Use : Up and Even : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -9123,8 +10922,10 @@
},
{
"BriefDescription": "Vertical AKC Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xb4",
"EventName": "UNC_M2P_VERT_RING_AKC_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AKC Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -9132,8 +10933,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xb1",
"EventName": "UNC_M2P_VERT_RING_AK_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AK Ring In Use : Down and Even : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -9141,8 +10944,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xb1",
"EventName": "UNC_M2P_VERT_RING_AK_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AK Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -9150,8 +10955,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xb1",
"EventName": "UNC_M2P_VERT_RING_AK_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AK Ring In Use : Up and Even : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -9159,8 +10966,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xb1",
"EventName": "UNC_M2P_VERT_RING_AK_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AK Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -9168,8 +10977,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use : Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xb2",
"EventName": "UNC_M2P_VERT_RING_BL_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical BL Ring in Use : Down and Even : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -9177,8 +10988,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use : Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xb2",
"EventName": "UNC_M2P_VERT_RING_BL_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical BL Ring in Use : Down and Odd : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -9186,8 +10999,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use : Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xb2",
"EventName": "UNC_M2P_VERT_RING_BL_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical BL Ring in Use : Up and Even : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -9195,8 +11010,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use : Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xb2",
"EventName": "UNC_M2P_VERT_RING_BL_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical BL Ring in Use : Up and Odd : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -9204,8 +11021,10 @@
},
{
"BriefDescription": "Vertical IV Ring in Use : Down",
+ "Counter": "0,1,2,3",
"EventCode": "0xb3",
"EventName": "UNC_M2P_VERT_RING_IV_IN_USE.DN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical IV Ring in Use : Down : Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x4",
@@ -9213,8 +11032,10 @@
},
{
"BriefDescription": "Vertical IV Ring in Use : Up",
+ "Counter": "0,1,2,3",
"EventCode": "0xb3",
"EventName": "UNC_M2P_VERT_RING_IV_IN_USE.UP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical IV Ring in Use : Up : Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x1",
@@ -9222,8 +11043,10 @@
},
{
"BriefDescription": "Vertical TGC Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xb5",
"EventName": "UNC_M2P_VERT_RING_TGC_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical TGC Ring In Use : Down and Even : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -9231,8 +11054,10 @@
},
{
"BriefDescription": "Vertical TGC Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xb5",
"EventName": "UNC_M2P_VERT_RING_TGC_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical TGC Ring In Use : Down and Odd : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -9240,8 +11065,10 @@
},
{
"BriefDescription": "Vertical TGC Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xb5",
"EventName": "UNC_M2P_VERT_RING_TGC_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical TGC Ring In Use : Up and Even : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -9249,8 +11076,10 @@
},
{
"BriefDescription": "Vertical TGC Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xb5",
"EventName": "UNC_M2P_VERT_RING_TGC_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical TGC Ring In Use : Up and Odd : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/uncore-memory.json b/tools/perf/pmu-events/arch/x86/icelakex/uncore-memory.json
index 814d9599474d..87604c953c0f 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/uncore-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "DRAM Activate Count : All Activates",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_M_ACT_COUNT.ALL",
"PerPkg": "1",
@@ -10,8 +11,10 @@
},
{
"BriefDescription": "DRAM Activate Count : Activate due to Bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_M_ACT_COUNT.BYP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM Activate Count : Activate due to Bypass : Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
"UMask": "0x8",
@@ -19,6 +22,7 @@
},
{
"BriefDescription": "All DRAM CAS commands issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_M_CAS_COUNT.ALL",
"PerPkg": "1",
@@ -28,6 +32,7 @@
},
{
"BriefDescription": "All DRAM read CAS commands issued (including underfills)",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_M_CAS_COUNT.RD",
"PerPkg": "1",
@@ -37,8 +42,10 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM RD_CAS commands w/auto-pre",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_M_CAS_COUNT.RD_PRE_REG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM RD_CAS commands w/auto-pre : DRAM RD_CAS and WR_CAS Commands : Counts the total number or DRAM Read CAS commands issued on this channel. This includes both regular RD CAS commands as well as those with explicit Precharge. AutoPre is only used in systems that are using closed page policy. We do not filter based on major mode, as RD_CAS is not issued during WMM (with the exception of underfills).",
"UMask": "0x2",
@@ -46,8 +53,10 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_M_CAS_COUNT.RD_PRE_UNDERFILL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM RD_CAS and WR_CAS Commands",
"UMask": "0x8",
@@ -55,8 +64,10 @@
},
{
"BriefDescription": "All DRAM read CAS commands issued (does not include underfills)",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_M_CAS_COUNT.RD_REG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the total number of DRAM Read CAS commands issued on this channel. This includes both regular RD CAS commands as well as those with implicit Precharge. We do not filter based on major mode, as RD_CAS is not issued during WMM (with the exception of underfills).",
"UMask": "0x1",
@@ -64,8 +75,10 @@
},
{
"BriefDescription": "DRAM underfill read CAS commands issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the total of DRAM Read CAS commands issued due to an underfill",
"UMask": "0x4",
@@ -73,6 +86,7 @@
},
{
"BriefDescription": "All DRAM write CAS commands issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_M_CAS_COUNT.WR",
"PerPkg": "1",
@@ -82,8 +96,10 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM WR_CAS commands w/o auto-pre",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_M_CAS_COUNT.WR_NONPRE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM WR_CAS commands w/o auto-pre : DRAM RD_CAS and WR_CAS Commands",
"UMask": "0x10",
@@ -91,8 +107,10 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM WR_CAS commands w/ auto-pre",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_M_CAS_COUNT.WR_PRE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM WR_CAS commands w/ auto-pre : DRAM RD_CAS and WR_CAS Commands",
"UMask": "0x20",
@@ -100,28 +118,34 @@
},
{
"BriefDescription": "DRAM Clockticks",
+ "Counter": "0,1,2,3",
"EventName": "UNC_M_CLOCKTICKS",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "Free running counter that increments for the Memory Controller",
+ "Counter": "4",
"EventCode": "0xff",
"EventName": "UNC_M_CLOCKTICKS_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "imc_free_running"
},
{
"BriefDescription": "DRAM Precharge All Commands",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M_DRAM_PRE_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM Precharge All Commands : Counts the number of times that the precharge all command was sent.",
"Unit": "iMC"
},
{
"BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_M_DRAM_REFRESH.HIGH",
"PerPkg": "1",
@@ -131,6 +155,7 @@
},
{
"BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_M_DRAM_REFRESH.OPPORTUNISTIC",
"PerPkg": "1",
@@ -140,6 +165,7 @@
},
{
"BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_M_DRAM_REFRESH.PANIC",
"PerPkg": "1",
@@ -149,6 +175,7 @@
},
{
"BriefDescription": "Half clockticks for IMC",
+ "Counter": "FIXED",
"EventCode": "0xff",
"EventName": "UNC_M_HCLOCKTICKS",
"PerPkg": "1",
@@ -156,37 +183,46 @@
},
{
"BriefDescription": "UNC_M_PARITY_ERRORS",
+ "Counter": "0,1,2,3",
"EventCode": "0x2c",
"EventName": "UNC_M_PARITY_ERRORS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_PCLS.RD",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M_PCLS.RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_PCLS.TOTAL",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M_PCLS.TOTAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_PCLS.WR",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M_PCLS.WR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "PMM Commands : All",
+ "Counter": "0,1,2,3",
"EventCode": "0xEA",
"EventName": "UNC_M_PMM_CMD1.ALL",
"PerPkg": "1",
@@ -196,22 +232,27 @@
},
{
"BriefDescription": "PMM Commands : Misc Commands (error, flow ACKs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xEA",
"EventName": "UNC_M_PMM_CMD1.MISC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "iMC"
},
{
"BriefDescription": "PMM Commands : Misc GNTs",
+ "Counter": "0,1,2,3",
"EventCode": "0xEA",
"EventName": "UNC_M_PMM_CMD1.MISC_GNT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "iMC"
},
{
"BriefDescription": "PMM Commands : Reads - RPQ",
+ "Counter": "0,1,2,3",
"EventCode": "0xEA",
"EventName": "UNC_M_PMM_CMD1.RD",
"PerPkg": "1",
@@ -221,14 +262,17 @@
},
{
"BriefDescription": "PMM Commands : RPQ GNTs",
+ "Counter": "0,1,2,3",
"EventCode": "0xEA",
"EventName": "UNC_M_PMM_CMD1.RPQ_GNTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "PMM Commands : Underfill reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xEA",
"EventName": "UNC_M_PMM_CMD1.UFILL_RD",
"PerPkg": "1",
@@ -238,14 +282,17 @@
},
{
"BriefDescription": "PMM Commands : Underfill GNTs",
+ "Counter": "0,1,2,3",
"EventCode": "0xEA",
"EventName": "UNC_M_PMM_CMD1.WPQ_GNTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "iMC"
},
{
"BriefDescription": "PMM Commands : Writes",
+ "Counter": "0,1,2,3",
"EventCode": "0xEA",
"EventName": "UNC_M_PMM_CMD1.WR",
"PerPkg": "1",
@@ -255,84 +302,105 @@
},
{
"BriefDescription": "PMM Commands - Part 2 : Expected No data packet (ERID matched NDP encoding)",
+ "Counter": "0,1,2,3",
"EventCode": "0xEB",
"EventName": "UNC_M_PMM_CMD2.NODATA_EXP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "PMM Commands - Part 2 : Unexpected No data packet (ERID matched a Read, but data was a NDP)",
+ "Counter": "0,1,2,3",
"EventCode": "0xEB",
"EventName": "UNC_M_PMM_CMD2.NODATA_UNEXP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "PMM Commands - Part 2 : Opportunistic Reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xEB",
"EventName": "UNC_M_PMM_CMD2.OPP_RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "PMM Commands - Part 2 : ECC Errors",
+ "Counter": "0,1,2,3",
"EventCode": "0xEB",
"EventName": "UNC_M_PMM_CMD2.PMM_ECC_ERROR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "iMC"
},
{
"BriefDescription": "PMM Commands - Part 2 : ERID detectable parity error",
+ "Counter": "0,1,2,3",
"EventCode": "0xEB",
"EventName": "UNC_M_PMM_CMD2.PMM_ERID_ERROR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "iMC"
},
{
"BriefDescription": "PMM Commands - Part 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xEB",
"EventName": "UNC_M_PMM_CMD2.PMM_ERID_STARVED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "iMC"
},
{
"BriefDescription": "PMM Commands - Part 2 : Read Requests - Slot 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xEB",
"EventName": "UNC_M_PMM_CMD2.REQS_SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "PMM Commands - Part 2 : Read Requests - Slot 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xEB",
"EventName": "UNC_M_PMM_CMD2.REQS_SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "PMM Read Queue Cycles Full",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_M_PMM_RPQ_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "PMM Read Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_M_PMM_RPQ_CYCLES_NE",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "PMM Read Queue Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_M_PMM_RPQ_INSERTS",
"PerPkg": "1",
@@ -341,6 +409,7 @@
},
{
"BriefDescription": "PMM Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M_PMM_RPQ_OCCUPANCY.ALL",
"PerPkg": "1",
@@ -350,8 +419,10 @@
},
{
"BriefDescription": "PMM Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M_PMM_RPQ_OCCUPANCY.GNT_WAIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PMM Read Pending Queue Occupancy : Accumulates the per cycle occupancy of the PMM Read Pending Queue.",
"UMask": "0x4",
@@ -359,8 +430,10 @@
},
{
"BriefDescription": "PMM Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M_PMM_RPQ_OCCUPANCY.NO_GNT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PMM Read Pending Queue Occupancy : Accumulates the per cycle occupancy of the PMM Read Pending Queue.",
"UMask": "0x2",
@@ -368,34 +441,43 @@
},
{
"BriefDescription": "PMM Write Queue Cycles Full",
+ "Counter": "0,1,2,3",
"EventCode": "0xE6",
"EventName": "UNC_M_PMM_WPQ_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "PMM Write Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0xE5",
"EventName": "UNC_M_PMM_WPQ_CYCLES_NE",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_PMM_WPQ_FLUSH",
+ "Counter": "0,1,2,3",
"EventCode": "0xe8",
"EventName": "UNC_M_PMM_WPQ_FLUSH",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_PMM_WPQ_FLUSH_CYC",
+ "Counter": "0,1,2,3",
"EventCode": "0xe9",
"EventName": "UNC_M_PMM_WPQ_FLUSH_CYC",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "PMM Write Queue Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0xE7",
"EventName": "UNC_M_PMM_WPQ_INSERTS",
"PerPkg": "1",
@@ -404,6 +486,7 @@
},
{
"BriefDescription": "PMM Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0xE4",
"EventName": "UNC_M_PMM_WPQ_OCCUPANCY.ALL",
"PerPkg": "1",
@@ -413,8 +496,10 @@
},
{
"BriefDescription": "PMM Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0xE4",
"EventName": "UNC_M_PMM_WPQ_OCCUPANCY.CAS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PMM Write Pending Queue Occupancy : Accumulates the per cycle occupancy of the PMM Write Pending Queue.",
"UMask": "0x2",
@@ -422,8 +507,10 @@
},
{
"BriefDescription": "PMM Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0xE4",
"EventName": "UNC_M_PMM_WPQ_OCCUPANCY.PWR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PMM Write Pending Queue Occupancy : Accumulates the per cycle occupancy of the PMM Write Pending Queue.",
"UMask": "0x4",
@@ -431,16 +518,20 @@
},
{
"BriefDescription": "Channel PPD Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_M_POWER_CHANNEL_PPD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Channel PPD Cycles : Number of cycles when all the ranks in the channel are in PPD mode. If IBT=off is enabled, then this can be used to count those cycles. If it is not enabled, then this can count the number of cycles when that could have been taken advantage of.",
"Unit": "iMC"
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank : DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M_POWER_CKE_CYCLES.LOW_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CKE_ON_CYCLES by Rank : DIMM ID : Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
"UMask": "0x1",
@@ -448,8 +539,10 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank : DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M_POWER_CKE_CYCLES.LOW_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CKE_ON_CYCLES by Rank : DIMM ID : Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
"UMask": "0x2",
@@ -457,8 +550,10 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank : DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M_POWER_CKE_CYCLES.LOW_2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CKE_ON_CYCLES by Rank : DIMM ID : Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
"UMask": "0x4",
@@ -466,8 +561,10 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank : DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M_POWER_CKE_CYCLES.LOW_3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CKE_ON_CYCLES by Rank : DIMM ID : Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
"UMask": "0x8",
@@ -475,8 +572,10 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M_POWER_CRIT_THROTTLE_CYCLES.SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Throttle Cycles for Rank 0 : Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1. : Thermal throttling is performed per DIMM. We support 3 DIMMs per channel. This ID allows us to filter by ID.",
"UMask": "0x1",
@@ -484,8 +583,10 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M_POWER_CRIT_THROTTLE_CYCLES.SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Throttle Cycles for Rank 0 : Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
"UMask": "0x2",
@@ -493,16 +594,20 @@
},
{
"BriefDescription": "Clock-Enabled Self-Refresh",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M_POWER_SELF_REFRESH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Clock-Enabled Self-Refresh : Counts the number of cycles when the iMC is in self-refresh and the iMC still has a clock. This happens in some package C-states. For example, the PCU may ask the iMC to enter self-refresh even though some of the cores are still processing. One use of this is for Monroe technology. Self-refresh is required during package C3 and C6, but there is no clock in the iMC at this time, so it is not possible to count these cases.",
"Unit": "iMC"
},
{
"BriefDescription": "Throttle Cycles for Rank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Throttle Cycles for Rank 0 : Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1. : Thermal throttling is performed per DIMM. We support 3 DIMMs per channel. This ID allows us to filter by ID.",
"UMask": "0x1",
@@ -510,8 +615,10 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Throttle Cycles for Rank 0 : Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
"UMask": "0x2",
@@ -519,6 +626,7 @@
},
{
"BriefDescription": "DRAM Precharge commands.",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_M_PRE_COUNT.ALL",
"PerPkg": "1",
@@ -528,8 +636,10 @@
},
{
"BriefDescription": "DRAM Precharge commands. : Precharge due to page miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_M_PRE_COUNT.PAGE_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM Precharge commands. : Precharge due to page miss : Counts the number of DRAM Precharge commands sent on this channel. : Pages Misses are due to precharges from bank scheduler (rd/wr requests)",
"UMask": "0xc",
@@ -537,6 +647,7 @@
},
{
"BriefDescription": "DRAM Precharge commands. : Precharge due to page table",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_M_PRE_COUNT.PGT",
"PerPkg": "1",
@@ -546,6 +657,7 @@
},
{
"BriefDescription": "DRAM Precharge commands. : Precharge due to read",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_M_PRE_COUNT.RD",
"PerPkg": "1",
@@ -555,6 +667,7 @@
},
{
"BriefDescription": "DRAM Precharge commands. : Precharge due to write",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_M_PRE_COUNT.WR",
"PerPkg": "1",
@@ -564,52 +677,66 @@
},
{
"BriefDescription": "Read Data Buffer Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M_RDB_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "Read Data Buffer Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M_RDB_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "Read Data Buffer Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M_RDB_NOT_EMPTY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "Read Data Buffer Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_M_RDB_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "Read Pending Queue Full Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_M_RPQ_CYCLES_FULL_PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Read Pending Queue Full Cycles : Counts the number of cycles when the Read Pending Queue is full. When the RPQ is full, the HA will not be able to issue any additional read requests into the iMC. This count should be similar count in the HA which tracks the number of cycles that the HA has no RPQ credits, just somewhat smaller to account for the credit return overhead. We generally do not expect to see RPQ become full except for potentially during Write Major Mode or while running with slow DRAM. This event only tracks non-ISOC queue entries.",
"Unit": "iMC"
},
{
"BriefDescription": "Read Pending Queue Full Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_M_RPQ_CYCLES_FULL_PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Read Pending Queue Full Cycles : Counts the number of cycles when the Read Pending Queue is full. When the RPQ is full, the HA will not be able to issue any additional read requests into the iMC. This count should be similar count in the HA which tracks the number of cycles that the HA has no RPQ credits, just somewhat smaller to account for the credit return overhead. We generally do not expect to see RPQ become full except for potentially during Write Major Mode or while running with slow DRAM. This event only tracks non-ISOC queue entries.",
"Unit": "iMC"
},
{
"BriefDescription": "Read Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M_RPQ_CYCLES_NE.PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Read Pending Queue Not Empty : Counts the number of cycles that the Read Pending Queue is not empty. This can then be used to calculate the average occupancy (in conjunction with the Read Pending Queue Occupancy count). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This filter is to be used in conjunction with the occupancy filter so that one can correctly track the average occupancies for schedulable entries and scheduled requests.",
"UMask": "0x1",
@@ -617,8 +744,10 @@
},
{
"BriefDescription": "Read Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M_RPQ_CYCLES_NE.PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Read Pending Queue Not Empty : Counts the number of cycles that the Read Pending Queue is not empty. This can then be used to calculate the average occupancy (in conjunction with the Read Pending Queue Occupancy count). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This filter is to be used in conjunction with the occupancy filter so that one can correctly track the average occupancies for schedulable entries and scheduled requests.",
"UMask": "0x2",
@@ -626,6 +755,7 @@
},
{
"BriefDescription": "Read Pending Queue Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M_RPQ_INSERTS.PCH0",
"PerPkg": "1",
@@ -635,6 +765,7 @@
},
{
"BriefDescription": "Read Pending Queue Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M_RPQ_INSERTS.PCH1",
"PerPkg": "1",
@@ -644,6 +775,7 @@
},
{
"BriefDescription": "Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M_RPQ_OCCUPANCY_PCH0",
"PerPkg": "1",
@@ -652,6 +784,7 @@
},
{
"BriefDescription": "Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "UNC_M_RPQ_OCCUPANCY_PCH1",
"PerPkg": "1",
@@ -660,749 +793,930 @@
},
{
"BriefDescription": "Scoreboard Accesses : Scoreboard Accesses Accepted",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M_SB_ACCESSES.ACCEPTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "iMC"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xd2",
"EventName": "UNC_M_SB_ACCESSES.FMRD_CMPS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "iMC"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xd2",
"EventName": "UNC_M_SB_ACCESSES.FMWR_CMPS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Accesses : Write Accepts",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M_SB_ACCESSES.FM_RD_CMPS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Accesses : Write Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M_SB_ACCESSES.FM_WR_CMPS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "iMC"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xd2",
"EventName": "UNC_M_SB_ACCESSES.NMRD_CMPS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xd2",
"EventName": "UNC_M_SB_ACCESSES.NMWR_CMPS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Accesses : FM read completions",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M_SB_ACCESSES.NM_RD_CMPS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Accesses : FM write completions",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M_SB_ACCESSES.NM_WR_CMPS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Accesses : Read Accepts",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M_SB_ACCESSES.RD_ACCEPTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Accesses : Read Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M_SB_ACCESSES.RD_REJECTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Accesses : Scoreboard Accesses Rejected",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M_SB_ACCESSES.REJECTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Accesses : NM read completions",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M_SB_ACCESSES.WR_ACCEPTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Accesses : NM write completions",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M_SB_ACCESSES.WR_REJECTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": ": Alloc",
+ "Counter": "0,1,2,3",
"EventCode": "0xD9",
"EventName": "UNC_M_SB_CANARY.ALLOC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": ": Dealloc",
+ "Counter": "0,1,2,3",
"EventCode": "0xD9",
"EventName": "UNC_M_SB_CANARY.DEALLOC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_CANARY.FM_RD_STARVED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xd9",
"EventName": "UNC_M_SB_CANARY.FMRD_STARVED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "iMC"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_CANARY.FM_TGR_WR_STARVED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xd9",
"EventName": "UNC_M_SB_CANARY.FMTGRWR_STARVED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "iMC"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_CANARY.FM_WR_STARVED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xd9",
"EventName": "UNC_M_SB_CANARY.FMWR_STARVED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "iMC"
},
{
"BriefDescription": ": Near Mem Write Starved",
+ "Counter": "0,1,2,3",
"EventCode": "0xD9",
"EventName": "UNC_M_SB_CANARY.FM_RD_STARVED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "iMC"
},
{
"BriefDescription": ": Far Mem Write Starved",
+ "Counter": "0,1,2,3",
"EventCode": "0xD9",
"EventName": "UNC_M_SB_CANARY.FM_TGR_WR_STARVED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "iMC"
},
{
"BriefDescription": ": Far Mem Read Starved",
+ "Counter": "0,1,2,3",
"EventCode": "0xD9",
"EventName": "UNC_M_SB_CANARY.FM_WR_STARVED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "iMC"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_CANARY.NM_RD_STARVED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xd9",
"EventName": "UNC_M_SB_CANARY.NMRD_STARVED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_CANARY.NM_WR_STARVED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xd9",
"EventName": "UNC_M_SB_CANARY.NMWR_STARVED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": ": Valid",
+ "Counter": "0,1,2,3",
"EventCode": "0xD9",
"EventName": "UNC_M_SB_CANARY.NM_RD_STARVED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": ": Near Mem Read Starved",
+ "Counter": "0,1,2,3",
"EventCode": "0xD9",
"EventName": "UNC_M_SB_CANARY.NM_WR_STARVED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": ": Reject",
+ "Counter": "0,1,2,3",
"EventCode": "0xD9",
"EventName": "UNC_M_SB_CANARY.VLD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Cycles Full",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "UNC_M_SB_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Cycles Not-Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_M_SB_CYCLES_NE",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Inserts : Block region reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M_SB_INSERTS.BLOCK_RDS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Inserts : Block region writes",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M_SB_INSERTS.BLOCK_WRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Inserts : Persistent Mem reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M_SB_INSERTS.PMM_RDS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Inserts : Persistent Mem writes",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M_SB_INSERTS.PMM_WRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Inserts : Reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M_SB_INSERTS.RDS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Inserts : Writes",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M_SB_INSERTS.WRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Occupancy : Block region reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xD5",
"EventName": "UNC_M_SB_OCCUPANCY.BLOCK_RDS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Occupancy : Block region writes",
+ "Counter": "0,1,2,3",
"EventCode": "0xD5",
"EventName": "UNC_M_SB_OCCUPANCY.BLOCK_WRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Occupancy : Persistent Mem reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xD5",
"EventName": "UNC_M_SB_OCCUPANCY.PMM_RDS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Occupancy : Persistent Mem writes",
+ "Counter": "0,1,2,3",
"EventCode": "0xD5",
"EventName": "UNC_M_SB_OCCUPANCY.PMM_WRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Occupancy : Reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xD5",
"EventName": "UNC_M_SB_OCCUPANCY.RDS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Prefetch Inserts : All",
+ "Counter": "0,1,2,3",
"EventCode": "0xDA",
"EventName": "UNC_M_SB_PREF_INSERTS.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Prefetch Inserts : DDR4",
+ "Counter": "0,1,2,3",
"EventCode": "0xDA",
"EventName": "UNC_M_SB_PREF_INSERTS.DDR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Prefetch Inserts : Persistent Mem",
+ "Counter": "0,1,2,3",
"EventCode": "0xDA",
"EventName": "UNC_M_SB_PREF_INSERTS.PMM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Prefetch Occupancy : All",
+ "Counter": "0,1,2,3",
"EventCode": "0xDB",
"EventName": "UNC_M_SB_PREF_OCCUPANCY.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Prefetch Occupancy : DDR4",
+ "Counter": "0,1,2,3",
"EventCode": "0xDB",
"EventName": "UNC_M_SB_PREF_OCCUPANCY.DDR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_PREF_OCCUPANCY.PMM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xdb",
"EventName": "UNC_M_SB_PREF_OCCUPANCY.PMEM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Prefetch Occupancy : Persistent Mem",
+ "Counter": "0,1,2,3",
"EventCode": "0xdb",
"EventName": "UNC_M_SB_PREF_OCCUPANCY.PMM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "Number of Scoreboard Requests Rejected",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M_SB_REJECT.CANARY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "Number of Scoreboard Requests Rejected",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M_SB_REJECT.DDR_EARLY_CMP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "iMC"
},
{
"BriefDescription": "Number of Scoreboard Requests Rejected : FM requests rejected due to full address conflict",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M_SB_REJECT.FM_ADDR_CNFLT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "Number of Scoreboard Requests Rejected : NM requests rejected due to set conflict",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M_SB_REJECT.NM_SET_CNFLT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "Number of Scoreboard Requests Rejected : Patrol requests rejected due to set conflict",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M_SB_REJECT.PATROL_SET_CNFLT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_ALLOC.FM_RD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xd7",
"EventName": "UNC_M_SB_STRV_ALLOC.FMRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_ALLOC.FM_TGR",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xd7",
"EventName": "UNC_M_SB_STRV_ALLOC.FMTGR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_ALLOC.FM_WR",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xd7",
"EventName": "UNC_M_SB_STRV_ALLOC.FMWR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": ": Far Mem Read - Set",
+ "Counter": "0,1,2,3",
"EventCode": "0xD7",
"EventName": "UNC_M_SB_STRV_ALLOC.FM_RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": ": Near Mem Read - Clear",
+ "Counter": "0,1,2,3",
"EventCode": "0xD7",
"EventName": "UNC_M_SB_STRV_ALLOC.FM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": ": Far Mem Write - Set",
+ "Counter": "0,1,2,3",
"EventCode": "0xD7",
"EventName": "UNC_M_SB_STRV_ALLOC.FM_WR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_ALLOC.NM_RD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xd7",
"EventName": "UNC_M_SB_STRV_ALLOC.NMRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_ALLOC.NM_WR",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xd7",
"EventName": "UNC_M_SB_STRV_ALLOC.NMWR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": ": Near Mem Read - Set",
+ "Counter": "0,1,2,3",
"EventCode": "0xD7",
"EventName": "UNC_M_SB_STRV_ALLOC.NM_RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": ": Near Mem Write - Set",
+ "Counter": "0,1,2,3",
"EventCode": "0xD7",
"EventName": "UNC_M_SB_STRV_ALLOC.NM_WR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_DEALLOC.FM_RD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xde",
"EventName": "UNC_M_SB_STRV_DEALLOC.FMRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_DEALLOC.FM_TGR",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xde",
"EventName": "UNC_M_SB_STRV_DEALLOC.FMTGR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_DEALLOC.FM_WR",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xde",
"EventName": "UNC_M_SB_STRV_DEALLOC.FMWR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": ": Far Mem Read - Set",
+ "Counter": "0,1,2,3",
"EventCode": "0xDE",
"EventName": "UNC_M_SB_STRV_DEALLOC.FM_RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": ": Near Mem Read - Clear",
+ "Counter": "0,1,2,3",
"EventCode": "0xDE",
"EventName": "UNC_M_SB_STRV_DEALLOC.FM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": ": Far Mem Write - Set",
+ "Counter": "0,1,2,3",
"EventCode": "0xDE",
"EventName": "UNC_M_SB_STRV_DEALLOC.FM_WR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_DEALLOC.NM_RD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xde",
"EventName": "UNC_M_SB_STRV_DEALLOC.NMRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_DEALLOC.NM_WR",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xde",
"EventName": "UNC_M_SB_STRV_DEALLOC.NMWR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": ": Near Mem Read - Set",
+ "Counter": "0,1,2,3",
"EventCode": "0xDE",
"EventName": "UNC_M_SB_STRV_DEALLOC.NM_RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": ": Near Mem Write - Set",
+ "Counter": "0,1,2,3",
"EventCode": "0xDE",
"EventName": "UNC_M_SB_STRV_DEALLOC.NM_WR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_OCC.FM_RD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xd8",
"EventName": "UNC_M_SB_STRV_OCC.FMRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_OCC.FM_TGR",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xd8",
"EventName": "UNC_M_SB_STRV_OCC.FMTGR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_OCC.FM_WR",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xd8",
"EventName": "UNC_M_SB_STRV_OCC.FMWR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": ": Far Mem Read",
+ "Counter": "0,1,2,3",
"EventCode": "0xD8",
"EventName": "UNC_M_SB_STRV_OCC.FM_RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": ": Near Mem Read - Clear",
+ "Counter": "0,1,2,3",
"EventCode": "0xD8",
"EventName": "UNC_M_SB_STRV_OCC.FM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": ": Far Mem Write",
+ "Counter": "0,1,2,3",
"EventCode": "0xD8",
"EventName": "UNC_M_SB_STRV_OCC.FM_WR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_OCC.NM_RD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xd8",
"EventName": "UNC_M_SB_STRV_OCC.NMRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_OCC.NM_WR",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xd8",
"EventName": "UNC_M_SB_STRV_OCC.NMWR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": ": Near Mem Read",
+ "Counter": "0,1,2,3",
"EventCode": "0xD8",
"EventName": "UNC_M_SB_STRV_OCC.NM_RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": ": Near Mem Write",
+ "Counter": "0,1,2,3",
"EventCode": "0xD8",
"EventName": "UNC_M_SB_STRV_OCC.NM_WR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_SB_TAGGED.DDR4_CMP",
+ "Counter": "0,1,2,3",
"EventCode": "0xDD",
"EventName": "UNC_M_SB_TAGGED.DDR4_CMP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_SB_TAGGED.NEW",
+ "Counter": "0,1,2,3",
"EventCode": "0xDD",
"EventName": "UNC_M_SB_TAGGED.NEW",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_SB_TAGGED.OCC",
+ "Counter": "0,1,2,3",
"EventCode": "0xDD",
"EventName": "UNC_M_SB_TAGGED.OCC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_SB_TAGGED.PMM0_CMP",
+ "Counter": "0,1,2,3",
"EventCode": "0xDD",
"EventName": "UNC_M_SB_TAGGED.PMM0_CMP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_SB_TAGGED.PMM1_CMP",
+ "Counter": "0,1,2,3",
"EventCode": "0xDD",
"EventName": "UNC_M_SB_TAGGED.PMM1_CMP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_SB_TAGGED.PMM2_CMP",
+ "Counter": "0,1,2,3",
"EventCode": "0xDD",
"EventName": "UNC_M_SB_TAGGED.PMM2_CMP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_SB_TAGGED.RD_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xDD",
"EventName": "UNC_M_SB_TAGGED.RD_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_SB_TAGGED.RD_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xDD",
"EventName": "UNC_M_SB_TAGGED.RD_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "2LM Tag Check : Hit in Near Memory Cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xD3",
"EventName": "UNC_M_TAGCHK.HIT",
"PerPkg": "1",
@@ -1411,6 +1725,7 @@
},
{
"BriefDescription": "2LM Tag Check : Miss, no data in this line",
+ "Counter": "0,1,2,3",
"EventCode": "0xD3",
"EventName": "UNC_M_TAGCHK.MISS_CLEAN",
"PerPkg": "1",
@@ -1419,6 +1734,7 @@
},
{
"BriefDescription": "2LM Tag Check : Miss, existing data may be evicted to Far Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0xD3",
"EventName": "UNC_M_TAGCHK.MISS_DIRTY",
"PerPkg": "1",
@@ -1427,6 +1743,7 @@
},
{
"BriefDescription": "2LM Tag Check : Read Hit in Near Memory Cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xD3",
"EventName": "UNC_M_TAGCHK.NM_RD_HIT",
"PerPkg": "1",
@@ -1435,6 +1752,7 @@
},
{
"BriefDescription": "2LM Tag Check : Write Hit in Near Memory Cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xD3",
"EventName": "UNC_M_TAGCHK.NM_WR_HIT",
"PerPkg": "1",
@@ -1443,24 +1761,30 @@
},
{
"BriefDescription": "Write Pending Queue Full Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M_WPQ_CYCLES_FULL_PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Write Pending Queue Full Cycles : Counts the number of cycles when the Write Pending Queue is full. When the WPQ is full, the HA will not be able to issue any additional write requests into the iMC. This count should be similar count in the CHA which tracks the number of cycles that the CHA has no WPQ credits, just somewhat smaller to account for the credit return overhead.",
"Unit": "iMC"
},
{
"BriefDescription": "Write Pending Queue Full Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_M_WPQ_CYCLES_FULL_PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Write Pending Queue Full Cycles : Counts the number of cycles when the Write Pending Queue is full. When the WPQ is full, the HA will not be able to issue any additional write requests into the iMC. This count should be similar count in the CHA which tracks the number of cycles that the CHA has no WPQ credits, just somewhat smaller to account for the credit return overhead.",
"Unit": "iMC"
},
{
"BriefDescription": "Write Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M_WPQ_CYCLES_NE.PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Write Pending Queue Not Empty : Counts the number of cycles that the Write Pending Queue is not empty. This can then be used to calculate the average queue occupancy (in conjunction with the WPQ Occupancy Accumulation count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies.",
"UMask": "0x1",
@@ -1468,8 +1792,10 @@
},
{
"BriefDescription": "Write Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M_WPQ_CYCLES_NE.PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Write Pending Queue Not Empty : Counts the number of cycles that the Write Pending Queue is not empty. This can then be used to calculate the average queue occupancy (in conjunction with the WPQ Occupancy Accumulation count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies.",
"UMask": "0x2",
@@ -1477,6 +1803,7 @@
},
{
"BriefDescription": "Write Pending Queue Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M_WPQ_INSERTS.PCH0",
"PerPkg": "1",
@@ -1486,6 +1813,7 @@
},
{
"BriefDescription": "Write Pending Queue Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M_WPQ_INSERTS.PCH1",
"PerPkg": "1",
@@ -1495,6 +1823,7 @@
},
{
"BriefDescription": "Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M_WPQ_OCCUPANCY_PCH0",
"PerPkg": "1",
@@ -1503,6 +1832,7 @@
},
{
"BriefDescription": "Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_WPQ_OCCUPANCY_PCH1",
"PerPkg": "1",
@@ -1511,8 +1841,10 @@
},
{
"BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_M_WPQ_READ_HIT.PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Write Pending Queue CAM Match : Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
"UMask": "0x1",
@@ -1520,8 +1852,10 @@
},
{
"BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_M_WPQ_READ_HIT.PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Write Pending Queue CAM Match : Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
"UMask": "0x2",
@@ -1529,8 +1863,10 @@
},
{
"BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M_WPQ_WRITE_HIT.PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Write Pending Queue CAM Match : Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
"UMask": "0x1",
@@ -1538,8 +1874,10 @@
},
{
"BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M_WPQ_WRITE_HIT.PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Write Pending Queue CAM Match : Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
"UMask": "0x2",
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/uncore-power.json b/tools/perf/pmu-events/arch/x86/icelakex/uncore-power.json
index 920cab6ffe37..03984d61ab29 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/uncore-power.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/uncore-power.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Clockticks of the power control unit (PCU)",
+ "Counter": "0,1,2,3",
"EventName": "UNC_P_CLOCKTICKS",
"PerPkg": "1",
"PublicDescription": "Clockticks of the power control unit (PCU) : The PCU runs off a fixed 1 GHz clock. This event counts the number of pclk cycles measured while the counter was enabled. The pclk, like the Memory Controller's dclk, counts at a constant rate making it a good measure of actual wall time.",
@@ -8,147 +9,185 @@
},
{
"BriefDescription": "UNC_P_CORE_TRANSITION_CYCLES",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_P_CORE_TRANSITION_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "PCU"
},
{
"BriefDescription": "UNC_P_DEMOTIONS",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_P_DEMOTIONS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "PCU"
},
{
"BriefDescription": "Phase Shed 0 Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x75",
"EventName": "UNC_P_FIVR_PS_PS0_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Phase Shed 0 Cycles : Cycles spent in phase-shedding power state 0",
"Unit": "PCU"
},
{
"BriefDescription": "Phase Shed 1 Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x76",
"EventName": "UNC_P_FIVR_PS_PS1_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Phase Shed 1 Cycles : Cycles spent in phase-shedding power state 1",
"Unit": "PCU"
},
{
"BriefDescription": "Phase Shed 2 Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x77",
"EventName": "UNC_P_FIVR_PS_PS2_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Phase Shed 2 Cycles : Cycles spent in phase-shedding power state 2",
"Unit": "PCU"
},
{
"BriefDescription": "Phase Shed 3 Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x78",
"EventName": "UNC_P_FIVR_PS_PS3_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Phase Shed 3 Cycles : Cycles spent in phase-shedding power state 3",
"Unit": "PCU"
},
{
"BriefDescription": "AVX256 Frequency Clipping",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "UNC_P_FREQ_CLIP_AVX256",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "PCU"
},
{
"BriefDescription": "AVX512 Frequency Clipping",
+ "Counter": "0,1,2,3",
"EventCode": "0x4a",
"EventName": "UNC_P_FREQ_CLIP_AVX512",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "PCU"
},
{
"BriefDescription": "Thermal Strongest Upper Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Thermal Strongest Upper Limit Cycles : Number of cycles any frequency is reduced due to a thermal limit. Count only if throttling is occurring.",
"Unit": "PCU"
},
{
"BriefDescription": "Power Strongest Upper Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_P_FREQ_MAX_POWER_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Power Strongest Upper Limit Cycles : Counts the number of cycles when power is the upper limit on frequency.",
"Unit": "PCU"
},
{
"BriefDescription": "IO P Limit Strongest Lower Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x73",
"EventName": "UNC_P_FREQ_MIN_IO_P_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IO P Limit Strongest Lower Limit Cycles : Counts the number of cycles when IO P Limit is preventing us from dropping the frequency lower. This algorithm monitors the needs to the IO subsystem on both local and remote sockets and will maintain a frequency high enough to maintain good IO BW. This is necessary for when all the IA cores on a socket are idle but a user still would like to maintain high IO Bandwidth.",
"Unit": "PCU"
},
{
"BriefDescription": "Cycles spent changing Frequency",
+ "Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "UNC_P_FREQ_TRANS_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles spent changing Frequency : Counts the number of cycles when the system is changing frequency. This can not be filtered by thread ID. One can also use it with the occupancy counter that monitors number of threads in C0 to estimate the performance impact that frequency transitions had on the system.",
"Unit": "PCU"
},
{
"BriefDescription": "Memory Phase Shedding Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_P_MEMORY_PHASE_SHEDDING_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Memory Phase Shedding Cycles : Counts the number of cycles that the PCU has triggered memory phase shedding. This is a mode that can be run in the iMC physicals that saves power at the expense of additional latency.",
"Unit": "PCU"
},
{
"BriefDescription": "Package C State Residency - C0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_P_PKG_RESIDENCY_C0_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Package C State Residency - C0 : Counts the number of cycles when the package was in C0. This event can be used in conjunction with edge detect to count C0 entrances (or exits using invert). Residency events do not include transition times.",
"Unit": "PCU"
},
{
"BriefDescription": "Package C State Residency - C2E",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_P_PKG_RESIDENCY_C2E_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Package C State Residency - C2E : Counts the number of cycles when the package was in C2E. This event can be used in conjunction with edge detect to count C2E entrances (or exits using invert). Residency events do not include transition times.",
"Unit": "PCU"
},
{
"BriefDescription": "Package C State Residency - C3",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_P_PKG_RESIDENCY_C3_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Package C State Residency - C3 : Counts the number of cycles when the package was in C3. This event can be used in conjunction with edge detect to count C3 entrances (or exits using invert). Residency events do not include transition times.",
"Unit": "PCU"
},
{
"BriefDescription": "Package C State Residency - C6",
+ "Counter": "0,1,2,3",
"EventCode": "0x2D",
"EventName": "UNC_P_PKG_RESIDENCY_C6_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Package C State Residency - C6 : Counts the number of cycles when the package was in C6. This event can be used in conjunction with edge detect to count C6 entrances (or exits using invert). Residency events do not include transition times.",
"Unit": "PCU"
},
{
"BriefDescription": "UNC_P_PMAX_THROTTLED_CYCLES",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_P_PMAX_THROTTLED_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "PCU"
},
{
"BriefDescription": "Number of cores in C-State : C0 and C1",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cores in C-State : C0 and C1 : This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
"UMask": "0x40",
@@ -156,8 +195,10 @@
},
{
"BriefDescription": "Number of cores in C-State : C3",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cores in C-State : C3 : This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
"UMask": "0x80",
@@ -165,8 +206,10 @@
},
{
"BriefDescription": "Number of cores in C-State : C6 and C7",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cores in C-State : C6 and C7 : This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
"UMask": "0xc0",
@@ -174,32 +217,40 @@
},
{
"BriefDescription": "External Prochot",
+ "Counter": "0,1,2,3",
"EventCode": "0x0A",
"EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "External Prochot : Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip.",
"Unit": "PCU"
},
{
"BriefDescription": "Internal Prochot",
+ "Counter": "0,1,2,3",
"EventCode": "0x09",
"EventName": "UNC_P_PROCHOT_INTERNAL_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Internal Prochot : Counts the number of cycles that we are in Internal PROCHOT mode. This mode is triggered when a sensor on the die determines that we are too hot and must throttle to avoid damaging the chip.",
"Unit": "PCU"
},
{
"BriefDescription": "Total Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_P_TOTAL_TRANSITION_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Total Core C State Transition Cycles : Number of cycles spent performing core C state transitions across all cores.",
"Unit": "PCU"
},
{
"BriefDescription": "VR Hot",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_P_VR_HOT_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VR Hot : Number of cycles that a CPU SVID VR is hot. Does not cover DRAM VRs",
"Unit": "PCU"
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/virtual-memory.json b/tools/perf/pmu-events/arch/x86/icelakex/virtual-memory.json
index e3227c7f2fe9..9df790d4361f 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/virtual-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Loads that miss the DTLB and hit the STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for a demand load.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (all page sizes) caused by demand data loads. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -26,6 +29,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data load to a 1G page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
"PublicDescription": "Counts completed page walks (1G sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -34,6 +38,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data load to a 2M/4M page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -42,6 +47,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data load to a 4K page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts completed page walks (4K sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -50,6 +56,7 @@
},
{
"BriefDescription": "Number of page walks outstanding for a demand load in the PMH each cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding for a demand load in the PMH (Page Miss Handler) each cycle.",
@@ -58,6 +65,7 @@
},
{
"BriefDescription": "Stores that miss the DTLB and hit the STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.STLB_HIT",
"PublicDescription": "Counts stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
@@ -66,6 +74,7 @@
},
{
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
@@ -75,6 +84,7 @@
},
{
"BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (all page sizes) caused by demand data stores. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -83,6 +93,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data store to a 1G page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
"PublicDescription": "Counts completed page walks (1G sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -91,6 +102,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data store to a 2M/4M page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -99,6 +111,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data store to a 4K page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts completed page walks (4K sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -107,6 +120,7 @@
},
{
"BriefDescription": "Number of page walks outstanding for a store in the PMH each cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding for a store in the PMH (Page Miss Handler) each cycle.",
@@ -115,6 +129,7 @@
},
{
"BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.STLB_HIT",
"PublicDescription": "Counts instruction fetch requests that miss the ITLB (Instruction TLB) and hit the STLB (Second-level TLB).",
@@ -123,6 +138,7 @@
},
{
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_ACTIVE",
@@ -132,6 +148,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (all page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
@@ -140,6 +157,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts completed page walks (2M/4M page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
@@ -148,6 +166,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts completed page walks (4K page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
@@ -156,6 +175,7 @@
},
{
"BriefDescription": "Number of page walks outstanding for an outstanding code request in the PMH each cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding for an outstanding code (instruction fetch) request in the PMH (Page Miss Handler) each cycle.",
@@ -164,6 +184,7 @@
},
{
"BriefDescription": "DTLB flush attempts of the thread-specific entries",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "TLB_FLUSH.DTLB_THREAD",
"PublicDescription": "Counts the number of DTLB flush attempts of the thread-specific entries.",
@@ -172,6 +193,7 @@
},
{
"BriefDescription": "STLB flush attempts",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "TLB_FLUSH.STLB_ANY",
"PublicDescription": "Counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, etc.).",
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/cache.json b/tools/perf/pmu-events/arch/x86/ivybridge/cache.json
index 46570b522095..563ec3f71c5a 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/cache.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "L1D data line replacements",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "L1D.REPLACEMENT",
"PublicDescription": "Counts the number of lines brought into the L1 data cache.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Cycles a demand request was blocked due to Fill Buffers unavailability",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.FB_FULL",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "L1D miss outstanding duration in cycles",
+ "Counter": "2",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING",
"PublicDescription": "Increments the number of outstanding L1D misses every cycle. Set Cmask = 1 and Edge =1 to count occurrences.",
@@ -26,6 +29,7 @@
},
{
"BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "Counter": "2",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING_CYCLES",
@@ -35,6 +39,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core",
+ "Counter": "2",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
@@ -44,6 +49,7 @@
},
{
"BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in any state.",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L2_L1D_WB_RQSTS.ALL",
"SampleAfterValue": "200003",
@@ -51,6 +57,7 @@
},
{
"BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in E state",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L2_L1D_WB_RQSTS.HIT_E",
"PublicDescription": "Not rejected writebacks from L1D to L2 cache lines in E state.",
@@ -59,6 +66,7 @@
},
{
"BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in M state",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L2_L1D_WB_RQSTS.HIT_M",
"PublicDescription": "Not rejected writebacks from L1D to L2 cache lines in M state.",
@@ -67,6 +75,7 @@
},
{
"BriefDescription": "Count the number of modified Lines evicted from L1 and missed L2. (Non-rejected WBs from the DCU.)",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L2_L1D_WB_RQSTS.MISS",
"PublicDescription": "Not rejected writebacks that missed LLC.",
@@ -75,6 +84,7 @@
},
{
"BriefDescription": "L2 cache lines filling L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.ALL",
"PublicDescription": "L2 cache lines filling L2.",
@@ -83,6 +93,7 @@
},
{
"BriefDescription": "L2 cache lines in E state filling L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.E",
"PublicDescription": "L2 cache lines in E state filling L2.",
@@ -91,6 +102,7 @@
},
{
"BriefDescription": "L2 cache lines in I state filling L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.I",
"PublicDescription": "L2 cache lines in I state filling L2.",
@@ -99,6 +111,7 @@
},
{
"BriefDescription": "L2 cache lines in S state filling L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.S",
"PublicDescription": "L2 cache lines in S state filling L2.",
@@ -107,6 +120,7 @@
},
{
"BriefDescription": "Clean L2 cache lines evicted by demand",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.DEMAND_CLEAN",
"PublicDescription": "Clean L2 cache lines evicted by demand.",
@@ -115,6 +129,7 @@
},
{
"BriefDescription": "Dirty L2 cache lines evicted by demand",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.DEMAND_DIRTY",
"PublicDescription": "Dirty L2 cache lines evicted by demand.",
@@ -123,6 +138,7 @@
},
{
"BriefDescription": "Dirty L2 cache lines filling the L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.DIRTY_ALL",
"PublicDescription": "Dirty L2 cache lines filling the L2.",
@@ -131,6 +147,7 @@
},
{
"BriefDescription": "Clean L2 cache lines evicted by L2 prefetch",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.PF_CLEAN",
"PublicDescription": "Clean L2 cache lines evicted by the MLC prefetcher.",
@@ -139,6 +156,7 @@
},
{
"BriefDescription": "Dirty L2 cache lines evicted by L2 prefetch",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.PF_DIRTY",
"PublicDescription": "Dirty L2 cache lines evicted by the MLC prefetcher.",
@@ -147,6 +165,7 @@
},
{
"BriefDescription": "L2 code requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_CODE_RD",
"PublicDescription": "Counts all L2 code requests.",
@@ -155,6 +174,7 @@
},
{
"BriefDescription": "Demand Data Read requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
"PublicDescription": "Counts any demand and L1 HW prefetch data load requests to L2.",
@@ -163,6 +183,7 @@
},
{
"BriefDescription": "Requests from L2 hardware prefetchers",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_PF",
"PublicDescription": "Counts all L2 HW prefetcher requests.",
@@ -171,6 +192,7 @@
},
{
"BriefDescription": "RFO requests to L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_RFO",
"PublicDescription": "Counts all L2 store RFO requests.",
@@ -179,6 +201,7 @@
},
{
"BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_HIT",
"PublicDescription": "Number of instruction fetches that hit the L2 cache.",
@@ -187,6 +210,7 @@
},
{
"BriefDescription": "L2 cache misses when fetching instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_MISS",
"PublicDescription": "Number of instruction fetches that missed the L2 cache.",
@@ -195,6 +219,7 @@
},
{
"BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
"PublicDescription": "Demand Data Read requests that hit L2 cache.",
@@ -203,6 +228,7 @@
},
{
"BriefDescription": "Requests from the L2 hardware prefetchers that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.PF_HIT",
"PublicDescription": "Counts all L2 HW prefetcher requests that hit L2.",
@@ -211,6 +237,7 @@
},
{
"BriefDescription": "Requests from the L2 hardware prefetchers that miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.PF_MISS",
"PublicDescription": "Counts all L2 HW prefetcher requests that missed L2.",
@@ -219,6 +246,7 @@
},
{
"BriefDescription": "RFO requests that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_HIT",
"PublicDescription": "RFO requests that hit L2 cache.",
@@ -227,6 +255,7 @@
},
{
"BriefDescription": "RFO requests that miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_MISS",
"PublicDescription": "Counts the number of store RFO requests that miss the L2 cache.",
@@ -235,6 +264,7 @@
},
{
"BriefDescription": "RFOs that access cache lines in any state",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_STORE_LOCK_RQSTS.ALL",
"PublicDescription": "RFOs that access cache lines in any state.",
@@ -243,6 +273,7 @@
},
{
"BriefDescription": "RFOs that hit cache lines in M state",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_STORE_LOCK_RQSTS.HIT_M",
"PublicDescription": "RFOs that hit cache lines in M state.",
@@ -251,6 +282,7 @@
},
{
"BriefDescription": "RFOs that miss cache lines",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_STORE_LOCK_RQSTS.MISS",
"PublicDescription": "RFOs that miss cache lines.",
@@ -259,6 +291,7 @@
},
{
"BriefDescription": "L2 or LLC HW prefetches that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.ALL_PF",
"PublicDescription": "Any MLC or LLC HW prefetch accessing L2, including rejects.",
@@ -267,6 +300,7 @@
},
{
"BriefDescription": "Transactions accessing L2 pipe",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.ALL_REQUESTS",
"PublicDescription": "Transactions accessing L2 pipe.",
@@ -275,6 +309,7 @@
},
{
"BriefDescription": "L2 cache accesses when fetching instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.CODE_RD",
"PublicDescription": "L2 cache accesses when fetching instructions.",
@@ -283,6 +318,7 @@
},
{
"BriefDescription": "Demand Data Read requests that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.DEMAND_DATA_RD",
"PublicDescription": "Demand Data Read requests that access L2 cache.",
@@ -291,6 +327,7 @@
},
{
"BriefDescription": "L1D writebacks that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.L1D_WB",
"PublicDescription": "L1D writebacks that access L2 cache.",
@@ -299,6 +336,7 @@
},
{
"BriefDescription": "L2 fill requests that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.L2_FILL",
"PublicDescription": "L2 fill requests that access L2 cache.",
@@ -307,6 +345,7 @@
},
{
"BriefDescription": "L2 writebacks that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.L2_WB",
"PublicDescription": "L2 writebacks that access L2 cache.",
@@ -315,6 +354,7 @@
},
{
"BriefDescription": "RFO requests that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.RFO",
"PublicDescription": "RFO requests that access L2 cache.",
@@ -323,6 +363,7 @@
},
{
"BriefDescription": "Cycles when L1D is locked",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
"PublicDescription": "Cycles in which the L1D is locked.",
@@ -331,6 +372,7 @@
},
{
"BriefDescription": "Core-originated cacheable demand requests missed LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.MISS",
"PublicDescription": "This event counts each cache miss condition for references to the last level cache.",
@@ -339,6 +381,7 @@
},
{
"BriefDescription": "Core-originated cacheable demand requests that refer to LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
"PublicDescription": "This event counts requests originating from the core that reference a cache line in the last level cache.",
@@ -347,6 +390,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were LLC and cross-core snoop hits in on-pkg core cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT",
"PEBS": "1",
@@ -355,6 +399,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were HitM responses from shared LLC.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM",
"PEBS": "1",
@@ -363,6 +408,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were LLC hit and cross-core snoop missed in on-pkg core cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS",
"PEBS": "1",
@@ -371,6 +417,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were hits in LLC without snoops required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE",
"PEBS": "1",
@@ -379,6 +426,7 @@
},
{
"BriefDescription": "Retired load uops which data sources missed LLC but serviced from local dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD3",
"EventName": "MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM",
"PublicDescription": "Retired load uops whose data source was local memory (cross-socket snoop not needed or missed).",
@@ -387,6 +435,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
"PEBS": "1",
@@ -395,6 +444,7 @@
},
{
"BriefDescription": "Retired load uops with L1 cache hits as data sources.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
"PEBS": "1",
@@ -403,6 +453,7 @@
},
{
"BriefDescription": "Retired load uops which data sources following L1 data-cache miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
"PEBS": "1",
@@ -411,6 +462,7 @@
},
{
"BriefDescription": "Retired load uops with L2 cache hits as data sources.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
"PEBS": "1",
@@ -419,6 +471,7 @@
},
{
"BriefDescription": "Retired load uops with L2 cache misses as data sources.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
"PEBS": "1",
@@ -427,6 +480,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were data hits in LLC without snoops required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.LLC_HIT",
"PEBS": "1",
@@ -435,6 +489,7 @@
},
{
"BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.LLC_MISS",
"PEBS": "1",
@@ -443,6 +498,7 @@
},
{
"BriefDescription": "All retired load uops. (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
"PEBS": "1",
@@ -451,6 +507,7 @@
},
{
"BriefDescription": "All retired store uops. (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"PEBS": "1",
@@ -459,6 +516,7 @@
},
{
"BriefDescription": "Retired load uops with locked access. (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"PEBS": "1",
@@ -467,6 +525,7 @@
},
{
"BriefDescription": "Retired load uops that split across a cacheline boundary. (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"PEBS": "1",
@@ -475,6 +534,7 @@
},
{
"BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"PEBS": "1",
@@ -483,6 +543,7 @@
},
{
"BriefDescription": "Retired load uops that miss the STLB. (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
"PEBS": "1",
@@ -491,6 +552,7 @@
},
{
"BriefDescription": "Retired store uops that miss the STLB. (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
"PEBS": "1",
@@ -499,6 +561,7 @@
},
{
"BriefDescription": "Demand and prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
"PublicDescription": "Data read requests sent to uncore (demand and prefetch).",
@@ -507,6 +570,7 @@
},
{
"BriefDescription": "Cacheable and noncacheable code read requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
"PublicDescription": "Demand code read requests sent to uncore.",
@@ -515,6 +579,7 @@
},
{
"BriefDescription": "Demand Data Read requests sent to uncore",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
"PublicDescription": "Demand data read requests sent to uncore.",
@@ -523,6 +588,7 @@
},
{
"BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
"PublicDescription": "Demand RFO read requests sent to uncore, including regular RFOs, locks, ItoM.",
@@ -531,6 +597,7 @@
},
{
"BriefDescription": "Cases when offcore requests buffer cannot take more entries for core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
"PublicDescription": "Cases when offcore requests buffer cannot take more entries for core.",
@@ -539,6 +606,7 @@
},
{
"BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
"PublicDescription": "Offcore outstanding cacheable data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
@@ -547,6 +615,7 @@
},
{
"BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
@@ -556,6 +625,7 @@
},
{
"BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
@@ -565,6 +635,7 @@
},
{
"BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
@@ -574,6 +645,7 @@
},
{
"BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
@@ -583,6 +655,7 @@
},
{
"BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
"PublicDescription": "Offcore outstanding Demand Code Read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
@@ -591,6 +664,7 @@
},
{
"BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
"PublicDescription": "Offcore outstanding Demand Data Read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
@@ -599,6 +673,7 @@
},
{
"BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
@@ -608,6 +683,7 @@
},
{
"BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
"PublicDescription": "Offcore outstanding RFO store transactions in SQ to uncore. Set Cmask=1 to count cycles.",
@@ -616,6 +692,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch code reads that hit in the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -625,6 +702,7 @@
},
{
"BriefDescription": "Counts demand & prefetch code reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -634,6 +712,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -643,6 +722,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads that hit in the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -652,6 +732,7 @@
},
{
"BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -661,6 +742,7 @@
},
{
"BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -670,6 +752,7 @@
},
{
"BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -679,6 +762,7 @@
},
{
"BriefDescription": "Counts all data/code/rfo references (demand & prefetch)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -688,6 +772,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -697,6 +782,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs that hit in the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -706,6 +792,7 @@
},
{
"BriefDescription": "Counts demand & prefetch RFOs that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -715,6 +802,7 @@
},
{
"BriefDescription": "Counts all writebacks from the core to the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -724,6 +812,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -733,6 +822,7 @@
},
{
"BriefDescription": "Counts all demand code reads that hit in the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -742,6 +832,7 @@
},
{
"BriefDescription": "Counts demand code reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -751,6 +842,7 @@
},
{
"BriefDescription": "Counts all demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -760,6 +852,7 @@
},
{
"BriefDescription": "Counts all demand data reads that hit in the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -769,6 +862,7 @@
},
{
"BriefDescription": "Counts demand data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -778,6 +872,7 @@
},
{
"BriefDescription": "Counts demand data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -787,6 +882,7 @@
},
{
"BriefDescription": "Counts demand data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -796,6 +892,7 @@
},
{
"BriefDescription": "Counts all demand rfo's",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -805,6 +902,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) that hit in the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -814,6 +912,7 @@
},
{
"BriefDescription": "Counts demand data writes (RFOs) that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -823,6 +922,7 @@
},
{
"BriefDescription": "Counts demand data writes (RFOs) that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -832,6 +932,7 @@
},
{
"BriefDescription": "Counts miscellaneous accesses that include port i/o, MMIO and uncacheable memory accesses. It also includes L2 hints sent to LLC to keep a line from being evicted out of the core caches",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -841,6 +942,7 @@
},
{
"BriefDescription": "Counts requests where the address of an atomic lock instruction spans a cache line boundary or the lock instruction is executed on uncacheable address",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.SPLIT_LOCK_UC_LOCK.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -850,6 +952,7 @@
},
{
"BriefDescription": "Counts non-temporal stores",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -859,6 +962,7 @@
},
{
"BriefDescription": "Split locks in SQ",
+ "Counter": "0,1,2,3",
"EventCode": "0xF4",
"EventName": "SQ_MISC.SPLIT_LOCK",
"SampleAfterValue": "100003",
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/counter.json b/tools/perf/pmu-events/arch/x86/ivybridge/counter.json
new file mode 100644
index 000000000000..35bb154900d7
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/counter.json
@@ -0,0 +1,17 @@
+[
+ {
+ "Unit": "core",
+ "CountersNumFixed": "3",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "ARB",
+ "CountersNumFixed": "1",
+ "CountersNumGeneric": "2"
+ },
+ {
+ "Unit": "CBOX",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "2"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/floating-point.json b/tools/perf/pmu-events/arch/x86/ivybridge/floating-point.json
index 89c6d47cc077..336fa00ad006 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/floating-point.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Cycles with any input/output SSE or FP assist",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.ANY",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "Number of SIMD FP assists due to input values",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.SIMD_INPUT",
"PublicDescription": "Number of SIMD FP assists due to input values.",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "Number of SIMD FP assists due to Output values",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.SIMD_OUTPUT",
"PublicDescription": "Number of SIMD FP assists due to output values.",
@@ -26,6 +29,7 @@
},
{
"BriefDescription": "Number of X87 assists due to input value.",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.X87_INPUT",
"PublicDescription": "Number of X87 FP assists due to input values.",
@@ -34,6 +38,7 @@
},
{
"BriefDescription": "Number of X87 assists due to output value.",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.X87_OUTPUT",
"PublicDescription": "Number of X87 FP assists due to output values.",
@@ -42,6 +47,7 @@
},
{
"BriefDescription": "Number of SSE* or AVX-128 FP Computational packed double-precision uops issued this cycle",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE",
"PublicDescription": "Number of SSE* or AVX-128 FP Computational packed double-precision uops issued this cycle.",
@@ -50,6 +56,7 @@
},
{
"BriefDescription": "Number of SSE* or AVX-128 FP Computational packed single-precision uops issued this cycle",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_PACKED_SINGLE",
"PublicDescription": "Number of SSE* or AVX-128 FP Computational packed single-precision uops issued this cycle.",
@@ -58,6 +65,7 @@
},
{
"BriefDescription": "Number of SSE* or AVX-128 FP Computational scalar double-precision uops issued this cycle",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE",
"PublicDescription": "Counts number of SSE* or AVX-128 double precision FP scalar uops executed.",
@@ -66,6 +74,7 @@
},
{
"BriefDescription": "Number of SSE* or AVX-128 FP Computational scalar single-precision uops issued this cycle",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE",
"PublicDescription": "Number of SSE* or AVX-128 FP Computational scalar single-precision uops issued this cycle.",
@@ -74,6 +83,7 @@
},
{
"BriefDescription": "Number of FP Computational Uops Executed this cycle. The number of FADD, FSUB, FCOM, FMULs, integer MULs and IMULs, FDIVs, FPREMs, FSQRTS, integer DIVs, and IDIVs. This event does not distinguish an FADD used in the middle of a transcendental flow from a s",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.X87",
"PublicDescription": "Counts number of X87 uops executed.",
@@ -82,6 +92,7 @@
},
{
"BriefDescription": "Number of SIMD Move Elimination candidate uops that were eliminated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "MOVE_ELIMINATION.SIMD_ELIMINATED",
"SampleAfterValue": "1000003",
@@ -89,6 +100,7 @@
},
{
"BriefDescription": "Number of SIMD Move Elimination candidate uops that were not eliminated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "MOVE_ELIMINATION.SIMD_NOT_ELIMINATED",
"SampleAfterValue": "1000003",
@@ -96,6 +108,7 @@
},
{
"BriefDescription": "Number of GSSE memory assist for stores. GSSE microcode assist is being invoked whenever the hardware is unable to properly handle GSSE-256b operations.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "OTHER_ASSISTS.AVX_STORE",
"PublicDescription": "Number of assists associated with 256-bit AVX store operations.",
@@ -104,6 +117,7 @@
},
{
"BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "OTHER_ASSISTS.AVX_TO_SSE",
"SampleAfterValue": "100003",
@@ -111,6 +125,7 @@
},
{
"BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "OTHER_ASSISTS.SSE_TO_AVX",
"SampleAfterValue": "100003",
@@ -118,6 +133,7 @@
},
{
"BriefDescription": "number of AVX-256 Computational FP double precision uops issued this cycle",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "SIMD_FP_256.PACKED_DOUBLE",
"PublicDescription": "Counts 256-bit packed double-precision floating-point instructions.",
@@ -126,6 +142,7 @@
},
{
"BriefDescription": "number of GSSE-256 Computational FP single precision uops issued this cycle",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "SIMD_FP_256.PACKED_SINGLE",
"PublicDescription": "Counts 256-bit packed single-precision floating-point instructions.",
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/frontend.json b/tools/perf/pmu-events/arch/x86/ivybridge/frontend.json
index 4ee100024ca9..0d6c829a6023 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/frontend.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "Counter": "0,1,2,3",
"EventCode": "0xE6",
"EventName": "BACLEARS.ANY",
"PublicDescription": "Number of front end re-steers due to BPU misprediction.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switches",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "DSB2MITE_SWITCHES.COUNT",
"PublicDescription": "Number of DSB to MITE switches.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
"PublicDescription": "Cycles DSB to MITE switches caused delay.",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Cycles when Decode Stream Buffer (DSB) fill encounter more than 3 Decode Stream Buffer (DSB) lines",
+ "Counter": "0,1,2,3",
"EventCode": "0xAC",
"EventName": "DSB_FILL.EXCEED_DSB_LINES",
"PublicDescription": "DSB Fill encountered > 3 DSB lines.",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.HIT",
"PublicDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches.",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "Cycles where a code-fetch stalled due to L1 instruction-cache miss or an iTLB miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.IFETCH_STALL",
"PublicDescription": "Cycles where a code-fetch stalled due to L1 instruction-cache miss or an iTLB miss.",
@@ -49,6 +55,7 @@
},
{
"BriefDescription": "Instruction cache, streaming buffer and victim cache misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.MISSES",
"PublicDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Misses. Includes UC accesses.",
@@ -57,6 +64,7 @@
},
{
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0x79",
"EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
@@ -66,6 +74,7 @@
},
{
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
@@ -75,6 +84,7 @@
},
{
"BriefDescription": "Cycles MITE is delivering 4 Uops",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0x79",
"EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
@@ -84,6 +94,7 @@
},
{
"BriefDescription": "Cycles MITE is delivering any Uop",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
@@ -93,6 +104,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.DSB_CYCLES",
@@ -102,6 +114,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.DSB_UOPS",
"PublicDescription": "Increment each cycle. # of uops delivered to IDQ from DSB path. Set Cmask = 1 to count cycles.",
@@ -110,6 +123,7 @@
},
{
"BriefDescription": "Instruction Decode Queue (IDQ) empty cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.EMPTY",
"PublicDescription": "Counts cycles the IDQ is empty.",
@@ -118,6 +132,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MITE_ALL_UOPS",
"PublicDescription": "Number of uops delivered to IDQ from any path.",
@@ -126,6 +141,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MITE_CYCLES",
@@ -135,6 +151,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MITE_UOPS",
"PublicDescription": "Increment each cycle # of uops delivered to IDQ from MITE path. Set Cmask = 1 to count cycles.",
@@ -143,6 +160,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MS_CYCLES",
@@ -152,6 +170,7 @@
},
{
"BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MS_DSB_CYCLES",
@@ -161,6 +180,7 @@
},
{
"BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x79",
@@ -171,6 +191,7 @@
},
{
"BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_DSB_UOPS",
"PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
@@ -179,6 +200,7 @@
},
{
"BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_MITE_UOPS",
"PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
@@ -187,6 +209,7 @@
},
{
"BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x79",
@@ -197,6 +220,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_UOPS",
"PublicDescription": "Increment each cycle # of uops delivered to IDQ from MS by either DSB or MITE. Set Cmask = 1 to count cycles.",
@@ -205,6 +229,7 @@
},
{
"BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
"PublicDescription": "Count issue pipeline slots where no uop was delivered from the front end to the back end when there is no back-end stall.",
@@ -213,6 +238,7 @@
},
{
"BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
@@ -221,6 +247,7 @@
},
{
"BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
@@ -230,6 +257,7 @@
},
{
"BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
+ "Counter": "0,1,2,3",
"CounterMask": "3",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
@@ -238,6 +266,7 @@
},
{
"BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
@@ -246,6 +275,7 @@
},
{
"BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json b/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json
index 5f3f0b5aebad..77d37db98b70 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json
@@ -90,7 +90,7 @@
{
"BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists",
"MetricExpr": "66 * OTHER_ASSISTS.ANY_WB_ASSIST / tma_info_thread_slots",
- "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricGroup": "BvIO;TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_assists",
"MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
"PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: OTHER_ASSISTS.ANY",
@@ -100,7 +100,7 @@
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
"MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "1 - (tma_frontend_bound + tma_bad_speculation + tma_retiring)",
- "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvOB;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
"MetricThreshold": "tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL1",
@@ -121,7 +121,7 @@
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * tma_bad_speculation",
- "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
+ "MetricGroup": "BadSpec;BrMispredicts;BvMP;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
"MetricName": "tma_branch_mispredicts",
"MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
@@ -151,7 +151,7 @@
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(60 * (MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.LLC_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.LLC_MISS))) + 43 * (MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.LLC_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.LLC_MISS)))) / tma_info_thread_clks",
- "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricGroup": "BvMS;DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_contested_accesses",
"MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS_PS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
@@ -172,7 +172,7 @@
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "43 * (MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.LLC_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.LLC_MISS))) / tma_info_thread_clks",
- "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricGroup": "BvMS;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_data_sharing",
"MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT_PS. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
@@ -181,7 +181,7 @@
{
"BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
"MetricExpr": "ARITH.FPU_DIV_ACTIVE / tma_info_core_core_clks",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
"MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_UOPS",
@@ -218,7 +218,7 @@
{
"BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
"MetricExpr": "(7 * DTLB_LOAD_MISSES.STLB_HIT + DTLB_LOAD_MISSES.WALK_DURATION) / tma_info_thread_clks",
- "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
+ "MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
"MetricName": "tma_dtlb_load",
"MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_UOPS_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store",
@@ -227,7 +227,7 @@
{
"BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
"MetricExpr": "(7 * DTLB_STORE_MISSES.STLB_HIT + DTLB_STORE_MISSES.WALK_DURATION) / tma_info_thread_clks",
- "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
+ "MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
"MetricName": "tma_dtlb_store",
"MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_UOPS_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load",
@@ -236,7 +236,7 @@
{
"BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing",
"MetricExpr": "60 * OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE / tma_info_thread_clks",
- "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
+ "MetricGroup": "BvMS;DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
"MetricName": "tma_false_sharing",
"MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
@@ -246,7 +246,7 @@
"BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_info_memory_load_miss_real_latency * cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@ / tma_info_thread_clks",
- "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
+ "MetricGroup": "BvMS;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
"MetricName": "tma_fb_full",
"MetricThreshold": "tma_fb_full > 0.3",
"PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
@@ -320,7 +320,7 @@
{
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / tma_info_thread_slots",
- "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvFB;BvIO;PGO;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_frontend_bound",
"MetricThreshold": "tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL1",
@@ -340,7 +340,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses.",
"MetricExpr": "ICACHE.IFETCH_STALL / tma_info_thread_clks - tma_itlb_misses",
- "MetricGroup": "BigFootprint;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_icache_misses",
"MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"ScaleUnit": "100%"
@@ -447,12 +447,12 @@
"MetricThreshold": "tma_info_inst_mix_ipstore < 8"
},
{
- "BriefDescription": "Instruction per taken branch",
+ "BriefDescription": "Instructions per taken branch",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
"MetricName": "tma_info_inst_mix_iptb",
"MetricThreshold": "tma_info_inst_mix_iptb < 9",
- "PublicDescription": "Instruction per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_lcp"
+ "PublicDescription": "Instructions per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_lcp"
},
{
"BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
@@ -473,7 +473,7 @@
"MetricName": "tma_info_memory_core_l3_cache_fill_bw_2t"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
"MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l1d_cache_fill_bw"
@@ -485,7 +485,7 @@
"MetricName": "tma_info_memory_l1mpki"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
"MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l2_cache_fill_bw"
@@ -497,7 +497,13 @@
"MetricName": "tma_info_memory_l2mpki"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Offcore requests (L2 cache miss) per kilo instruction for demand RFOs",
+ "MetricExpr": "1e3 * OFFCORE_REQUESTS.DEMAND_RFO / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Offcore",
+ "MetricName": "tma_info_memory_l2mpki_rfo"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
"MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l3_cache_fill_bw"
@@ -549,7 +555,7 @@
"MetricThreshold": "tma_info_memory_tlb_page_walks_utilization > 0.5"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per core",
"MetricExpr": "UOPS_EXECUTED.THREAD / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
"MetricName": "tma_info_pipeline_execute"
@@ -568,13 +574,13 @@
},
{
"BriefDescription": "Average CPU Utilization (percentage)",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "tma_info_system_cpus_utilized / #num_cpus_online",
"MetricGroup": "HPC;Summary",
"MetricName": "tma_info_system_cpu_utilization"
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "#num_cpus_online * tma_info_system_cpu_utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized"
},
@@ -669,7 +675,7 @@
"MetricThreshold": "tma_info_thread_uoppi > 1.05"
},
{
- "BriefDescription": "Instruction per taken branch",
+ "BriefDescription": "Uops per taken branch",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW",
"MetricName": "tma_info_thread_uptb",
@@ -678,7 +684,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
"MetricExpr": "(12 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION) / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_itlb_misses",
"MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: ITLB_MISSES.WALK_COMPLETED",
@@ -696,7 +702,7 @@
{
"BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
"MetricExpr": "(CYCLE_ACTIVITY.STALLS_L1D_PENDING - CYCLE_ACTIVITY.STALLS_L2_PENDING) / tma_info_thread_clks",
- "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricGroup": "BvML;CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l2_bound",
"MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L2_HIT_PS",
@@ -716,7 +722,7 @@
"BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "29 * (MEM_LOAD_UOPS_RETIRED.LLC_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.LLC_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.LLC_MISS))) / tma_info_thread_clks",
- "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
+ "MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
"MetricName": "tma_l3_hit_latency",
"MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS. Related metrics: tma_mem_latency",
@@ -765,7 +771,7 @@
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_bad_speculation - tma_branch_mispredicts",
- "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
+ "MetricGroup": "BadSpec;BvMS;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
"MetricName": "tma_machine_clears",
"MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
@@ -775,7 +781,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=6@) / tma_info_thread_clks",
- "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
"MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full",
@@ -784,7 +790,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM)",
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth",
- "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
+ "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
"MetricName": "tma_mem_latency",
"MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_l3_hit_latency",
@@ -922,7 +928,7 @@
{
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise).",
"MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC) / tma_info_core_core_clks",
- "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricGroup": "BvCB;PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_3m",
"MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%"
@@ -930,7 +936,7 @@
{
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / tma_info_thread_slots",
- "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvUW;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_retiring",
"MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL1",
@@ -959,7 +965,7 @@
{
"BriefDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors)",
"MetricExpr": "(OFFCORE_REQUESTS_BUFFER.SQ_FULL / 2 if #SMT_on else OFFCORE_REQUESTS_BUFFER.SQ_FULL) / tma_info_core_core_clks",
- "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
+ "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
"MetricName": "tma_sq_full",
"MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth",
@@ -987,7 +993,7 @@
"BriefDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(L2_RQSTS.RFO_HIT * 9 * (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) + (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_thread_clks",
- "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
+ "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
"MetricName": "tma_store_latency",
"MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/memory.json b/tools/perf/pmu-events/arch/x86/ivybridge/memory.json
index fd1fe491c577..40f40384d58b 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/memory.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
"SampleAfterValue": "100003",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Loads with latency value being above 128",
+ "Counter": "3",
"EventCode": "0xCD",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
"MSRIndex": "0x3F6",
@@ -19,6 +21,7 @@
},
{
"BriefDescription": "Loads with latency value being above 16",
+ "Counter": "3",
"EventCode": "0xCD",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
"MSRIndex": "0x3F6",
@@ -30,6 +33,7 @@
},
{
"BriefDescription": "Loads with latency value being above 256",
+ "Counter": "3",
"EventCode": "0xCD",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
"MSRIndex": "0x3F6",
@@ -41,6 +45,7 @@
},
{
"BriefDescription": "Loads with latency value being above 32",
+ "Counter": "3",
"EventCode": "0xCD",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
"MSRIndex": "0x3F6",
@@ -52,6 +57,7 @@
},
{
"BriefDescription": "Loads with latency value being above 4",
+ "Counter": "3",
"EventCode": "0xCD",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
"MSRIndex": "0x3F6",
@@ -63,6 +69,7 @@
},
{
"BriefDescription": "Loads with latency value being above 512",
+ "Counter": "3",
"EventCode": "0xCD",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
"MSRIndex": "0x3F6",
@@ -74,6 +81,7 @@
},
{
"BriefDescription": "Loads with latency value being above 64",
+ "Counter": "3",
"EventCode": "0xCD",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
"MSRIndex": "0x3F6",
@@ -85,6 +93,7 @@
},
{
"BriefDescription": "Loads with latency value being above 8",
+ "Counter": "3",
"EventCode": "0xCD",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
"MSRIndex": "0x3F6",
@@ -96,6 +105,7 @@
},
{
"BriefDescription": "Sample stores and collect precise store operation via PEBS record. PMC3 only.",
+ "Counter": "3",
"EventCode": "0xCD",
"EventName": "MEM_TRANS_RETIRED.PRECISE_STORE",
"PEBS": "2",
@@ -104,6 +114,7 @@
},
{
"BriefDescription": "Speculative cache line split load uops dispatched to L1 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "MISALIGN_MEM_REF.LOADS",
"PublicDescription": "Speculative cache-line split load uops dispatched to L1D.",
@@ -112,6 +123,7 @@
},
{
"BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "MISALIGN_MEM_REF.STORES",
"PublicDescription": "Speculative cache-line split Store-address uops dispatched to L1D.",
@@ -120,6 +132,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch code reads that miss the LLC and the data returned from dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -129,6 +142,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads that miss the LLC and the data returned from dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -138,6 +152,7 @@
},
{
"BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the LLC and the data returned from dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -147,6 +162,7 @@
},
{
"BriefDescription": "Counts LLC replacements",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN_SOCKET.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -156,6 +172,7 @@
},
{
"BriefDescription": "Counts demand code reads that miss the LLC and the data returned from dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -165,6 +182,7 @@
},
{
"BriefDescription": "Counts demand data reads that miss the LLC and the data returned from dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -174,6 +192,7 @@
},
{
"BriefDescription": "Number of any page walk that had a miss in LLC.",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "PAGE_WALKS.LLC_MISS",
"SampleAfterValue": "100003",
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/metricgroups.json b/tools/perf/pmu-events/arch/x86/ivybridge/metricgroups.json
index 8c808347f6da..4193c90c3459 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/metricgroups.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/metricgroups.json
@@ -5,7 +5,18 @@
"BigFootprint": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"BrMispredicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Branches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvBC": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvCB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvFB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvIO": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvML": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMP": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMS": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMT": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvOB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvUW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"CacheHits": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "CacheMisses": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Compute": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Cor": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"DSB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/other.json b/tools/perf/pmu-events/arch/x86/ivybridge/other.json
index e80e99d064ba..2e796d533c13 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/other.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/other.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Unhalted core cycles when the thread is in ring 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "CPL_CYCLES.RING0",
"PublicDescription": "Unhalted core cycles when the thread is in ring 0.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Number of intervals between processor halts while thread is in ring 0",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x5C",
@@ -19,6 +21,7 @@
},
{
"BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "CPL_CYCLES.RING123",
"PublicDescription": "Unhalted core cycles when the thread is not in ring 0.",
@@ -27,6 +30,7 @@
},
{
"BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "LOCK_CYCLES.SPLIT_LOCK_UC_LOCK_DURATION",
"PublicDescription": "Cycles in which the L1D and L2 are locked, due to a UC lock or split lock.",
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json b/tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json
index 30a3da9cd22b..da05eaaae22c 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Divide operations executed",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x14",
@@ -11,6 +12,7 @@
},
{
"BriefDescription": "Cycles when divider is busy executing divide operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "ARITH.FPU_DIV_ACTIVE",
"PublicDescription": "Cycles that the divider is active, includes INT and FP. Set 'edge =1, cmask=1' to count the number of divides.",
@@ -19,6 +21,7 @@
},
{
"BriefDescription": "Speculative and retired branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_BRANCHES",
"PublicDescription": "Counts all near executed branches (not necessarily retired).",
@@ -27,6 +30,7 @@
},
{
"BriefDescription": "Speculative and retired macro-conditional branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
"PublicDescription": "Speculative and retired macro-conditional branches.",
@@ -35,6 +39,7 @@
},
{
"BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
"PublicDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects.",
@@ -43,6 +48,7 @@
},
{
"BriefDescription": "Speculative and retired direct near calls",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
"PublicDescription": "Speculative and retired direct near calls.",
@@ -51,6 +57,7 @@
},
{
"BriefDescription": "Speculative and retired indirect branches excluding calls and returns",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
"PublicDescription": "Speculative and retired indirect branches excluding calls and returns.",
@@ -59,6 +66,7 @@
},
{
"BriefDescription": "Speculative and retired indirect return branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
"SampleAfterValue": "200003",
@@ -66,6 +74,7 @@
},
{
"BriefDescription": "Not taken macro-conditional branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL",
"PublicDescription": "Not taken macro-conditional branches.",
@@ -74,6 +83,7 @@
},
{
"BriefDescription": "Taken speculative and retired macro-conditional branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL",
"PublicDescription": "Taken speculative and retired macro-conditional branches.",
@@ -82,6 +92,7 @@
},
{
"BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP",
"PublicDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects.",
@@ -90,6 +101,7 @@
},
{
"BriefDescription": "Taken speculative and retired direct near calls",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
"PublicDescription": "Taken speculative and retired direct near calls.",
@@ -98,6 +110,7 @@
},
{
"BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
"PublicDescription": "Taken speculative and retired indirect branches excluding calls and returns.",
@@ -106,6 +119,7 @@
},
{
"BriefDescription": "Taken speculative and retired indirect calls",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
"PublicDescription": "Taken speculative and retired indirect calls.",
@@ -114,6 +128,7 @@
},
{
"BriefDescription": "Taken speculative and retired indirect branches with return mnemonic",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
"PublicDescription": "Taken speculative and retired indirect branches with return mnemonic.",
@@ -122,6 +137,7 @@
},
{
"BriefDescription": "All (macro) branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"PublicDescription": "Branch instructions at retirement.",
@@ -129,6 +145,7 @@
},
{
"BriefDescription": "All (macro) branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
"PEBS": "2",
@@ -137,6 +154,7 @@
},
{
"BriefDescription": "Conditional branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
"PEBS": "1",
@@ -145,6 +163,7 @@
},
{
"BriefDescription": "Far branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"PublicDescription": "Number of far branches retired.",
@@ -153,6 +172,7 @@
},
{
"BriefDescription": "Direct and indirect near call instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
"PEBS": "1",
@@ -161,6 +181,7 @@
},
{
"BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3).",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
"PEBS": "1",
@@ -169,6 +190,7 @@
},
{
"BriefDescription": "Return instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
"PEBS": "1",
@@ -177,6 +199,7 @@
},
{
"BriefDescription": "Taken branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
"PEBS": "1",
@@ -185,6 +208,7 @@
},
{
"BriefDescription": "Not taken branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NOT_TAKEN",
"PublicDescription": "Counts the number of not taken branch instructions retired.",
@@ -193,6 +217,7 @@
},
{
"BriefDescription": "Speculative and retired mispredicted macro conditional branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.ALL_BRANCHES",
"PublicDescription": "Counts all near executed branches (not necessarily retired).",
@@ -201,6 +226,7 @@
},
{
"BriefDescription": "Speculative and retired mispredicted macro conditional branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
"PublicDescription": "Speculative and retired mispredicted macro conditional branches.",
@@ -209,6 +235,7 @@
},
{
"BriefDescription": "Mispredicted indirect branches excluding calls and returns",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
"PublicDescription": "Mispredicted indirect branches excluding calls and returns.",
@@ -217,6 +244,7 @@
},
{
"BriefDescription": "Speculative mispredicted indirect branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.INDIRECT",
"PublicDescription": "Counts speculatively miss-predicted indirect branches at execution time. Counts for indirect near CALL or JMP instructions (RET excluded).",
@@ -225,6 +253,7 @@
},
{
"BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
"PublicDescription": "Not taken speculative and retired mispredicted macro conditional branches.",
@@ -233,6 +262,7 @@
},
{
"BriefDescription": "Taken speculative and retired mispredicted macro conditional branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
"PublicDescription": "Taken speculative and retired mispredicted macro conditional branches.",
@@ -241,6 +271,7 @@
},
{
"BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
"PublicDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns.",
@@ -249,6 +280,7 @@
},
{
"BriefDescription": "Taken speculative and retired mispredicted indirect calls",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
"PublicDescription": "Taken speculative and retired mispredicted indirect calls.",
@@ -257,6 +289,7 @@
},
{
"BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
"PublicDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic.",
@@ -265,6 +298,7 @@
},
{
"BriefDescription": "All mispredicted macro branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"PublicDescription": "Mispredicted branch instructions at retirement.",
@@ -272,6 +306,7 @@
},
{
"BriefDescription": "Mispredicted macro branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
"PEBS": "2",
@@ -280,6 +315,7 @@
},
{
"BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.CONDITIONAL",
"PEBS": "1",
@@ -288,6 +324,7 @@
},
{
"BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
"PEBS": "1",
@@ -296,6 +333,7 @@
},
{
"BriefDescription": "Count XClk pulses when this thread is unhalted and the other is halted.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "2000003",
@@ -303,6 +341,7 @@
},
{
"BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
"PublicDescription": "Increments at the frequency of XCLK (100 MHz) when not halted.",
@@ -312,6 +351,7 @@
{
"AnyThread": "1",
"BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted. (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
"SampleAfterValue": "2000003",
@@ -319,6 +359,7 @@
},
{
"BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "2000003",
@@ -326,12 +367,14 @@
},
{
"BriefDescription": "Reference cycles when the core is not in halt state.",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"SampleAfterValue": "2000003",
"UMask": "0x3"
},
{
"BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.REF_XCLK",
"PublicDescription": "Reference cycles when the thread is unhalted. (counts at 100 MHz rate)",
@@ -341,6 +384,7 @@
{
"AnyThread": "1",
"BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted. (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
"SampleAfterValue": "2000003",
@@ -348,6 +392,7 @@
},
{
"BriefDescription": "Core cycles when the thread is not in halt state.",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"SampleAfterValue": "2000003",
"UMask": "0x2"
@@ -355,6 +400,7 @@
{
"AnyThread": "1",
"BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
"PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
"SampleAfterValue": "2000003",
@@ -362,6 +408,7 @@
},
{
"BriefDescription": "Thread cycles when thread is not in halt state",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
"PublicDescription": "Counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
@@ -370,6 +417,7 @@
{
"AnyThread": "1",
"BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
"PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
@@ -377,6 +425,7 @@
},
{
"BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
"CounterMask": "8",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
@@ -385,6 +434,7 @@
},
{
"BriefDescription": "Cycles with pending L1 cache miss loads.",
+ "Counter": "2",
"CounterMask": "8",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
@@ -394,6 +444,7 @@
},
{
"BriefDescription": "Cycles while L2 cache miss load* is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
@@ -402,6 +453,7 @@
},
{
"BriefDescription": "Cycles with pending L2 cache miss loads.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
@@ -411,6 +463,7 @@
},
{
"BriefDescription": "Cycles with pending memory loads.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_LDM_PENDING",
@@ -420,6 +473,7 @@
},
{
"BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
@@ -428,6 +482,7 @@
},
{
"BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
@@ -437,6 +492,7 @@
},
{
"BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
"CounterMask": "12",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
@@ -445,6 +501,7 @@
},
{
"BriefDescription": "Execution stalls due to L1 data cache misses",
+ "Counter": "2",
"CounterMask": "12",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
@@ -454,6 +511,7 @@
},
{
"BriefDescription": "Execution stalls while L2 cache miss load* is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
@@ -462,6 +520,7 @@
},
{
"BriefDescription": "Execution stalls due to L2 cache misses.",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
@@ -471,6 +530,7 @@
},
{
"BriefDescription": "Execution stalls due to memory subsystem.",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_LDM_PENDING",
@@ -479,6 +539,7 @@
},
{
"BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
@@ -487,6 +548,7 @@
},
{
"BriefDescription": "Total execution stalls.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
@@ -495,6 +557,7 @@
},
{
"BriefDescription": "Stall cycles because IQ is full",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.IQ_FULL",
"PublicDescription": "Stall cycles due to IQ is full.",
@@ -503,6 +566,7 @@
},
{
"BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.LCP",
"SampleAfterValue": "2000003",
@@ -510,12 +574,14 @@
},
{
"BriefDescription": "Instructions retired from execution.",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
{
"BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.ANY_P",
"PublicDescription": "Number of instructions at retirement.",
@@ -523,6 +589,7 @@
},
{
"BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
+ "Counter": "1",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.PREC_DIST",
"PEBS": "2",
@@ -532,6 +599,7 @@
},
{
"BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc.)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x0D",
"EventName": "INT_MISC.RECOVERY_CYCLES",
@@ -541,6 +609,7 @@
{
"AnyThread": "1",
"BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x0D",
"EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
@@ -549,6 +618,7 @@
},
{
"BriefDescription": "Number of occurrences waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc.)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x0D",
@@ -558,6 +628,7 @@
},
{
"BriefDescription": "This event counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.NO_SR",
"PublicDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
@@ -566,6 +637,7 @@
},
{
"BriefDescription": "Cases when loads get true Block-on-Store blocking code preventing store forwarding",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.STORE_FORWARD",
"PublicDescription": "Loads blocked by overlapping with store buffer that cannot be forwarded.",
@@ -574,6 +646,7 @@
},
{
"BriefDescription": "False dependencies in MOB due to partial compare on address",
+ "Counter": "0,1,2,3",
"EventCode": "0x07",
"EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
"PublicDescription": "False dependencies in MOB due to partial compare on address.",
@@ -582,6 +655,7 @@
},
{
"BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch",
+ "Counter": "0,1,2,3",
"EventCode": "0x4C",
"EventName": "LOAD_HIT_PRE.HW_PF",
"PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for H/W prefetch.",
@@ -590,6 +664,7 @@
},
{
"BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch",
+ "Counter": "0,1,2,3",
"EventCode": "0x4C",
"EventName": "LOAD_HIT_PRE.SW_PF",
"PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for S/W prefetch.",
@@ -598,6 +673,7 @@
},
{
"BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xA8",
"EventName": "LSD.CYCLES_4_UOPS",
@@ -607,6 +683,7 @@
},
{
"BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA8",
"EventName": "LSD.CYCLES_ACTIVE",
@@ -616,6 +693,7 @@
},
{
"BriefDescription": "Number of Uops delivered by the LSD.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA8",
"EventName": "LSD.UOPS",
"SampleAfterValue": "2000003",
@@ -623,6 +701,7 @@
},
{
"BriefDescription": "Number of machine clears (nukes) of any type.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xC3",
@@ -632,6 +711,7 @@
},
{
"BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.MASKMOV",
"PublicDescription": "Counts the number of executed AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
@@ -640,6 +720,7 @@
},
{
"BriefDescription": "Self-modifying code (SMC) detected.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.SMC",
"PublicDescription": "Number of self-modifying-code machine clears detected.",
@@ -648,6 +729,7 @@
},
{
"BriefDescription": "Number of integer Move Elimination candidate uops that were eliminated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "MOVE_ELIMINATION.INT_ELIMINATED",
"SampleAfterValue": "1000003",
@@ -655,6 +737,7 @@
},
{
"BriefDescription": "Number of integer Move Elimination candidate uops that were not eliminated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "MOVE_ELIMINATION.INT_NOT_ELIMINATED",
"SampleAfterValue": "1000003",
@@ -662,6 +745,7 @@
},
{
"BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "OTHER_ASSISTS.ANY_WB_ASSIST",
"SampleAfterValue": "100003",
@@ -669,6 +753,7 @@
},
{
"BriefDescription": "Resource-related stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.ANY",
"PublicDescription": "Cycles Allocation is stalled due to Resource Related reason.",
@@ -677,6 +762,7 @@
},
{
"BriefDescription": "Cycles stalled due to re-order buffer full.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.ROB",
"SampleAfterValue": "2000003",
@@ -684,6 +770,7 @@
},
{
"BriefDescription": "Cycles stalled due to no eligible RS entry available.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.RS",
"SampleAfterValue": "2000003",
@@ -691,6 +778,7 @@
},
{
"BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.SB",
"PublicDescription": "Cycles stalled due to no store buffers available (not including draining form sync).",
@@ -699,6 +787,7 @@
},
{
"BriefDescription": "Count cases of saving new LBR",
+ "Counter": "0,1,2,3",
"EventCode": "0xCC",
"EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
"PublicDescription": "Count cases of saving new LBR records by hardware.",
@@ -707,6 +796,7 @@
},
{
"BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "Counter": "0,1,2,3",
"EventCode": "0x5E",
"EventName": "RS_EVENTS.EMPTY_CYCLES",
"PublicDescription": "Cycles the RS is empty for the thread.",
@@ -715,6 +805,7 @@
},
{
"BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x5E",
@@ -725,6 +816,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are dispatched to port 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_0",
"PublicDescription": "Cycles which a Uop is dispatched on port 0.",
@@ -734,6 +826,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are dispatched to port 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_0_CORE",
"PublicDescription": "Cycles per core when uops are dispatched to port 0.",
@@ -742,6 +835,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are dispatched to port 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_1",
"PublicDescription": "Cycles which a Uop is dispatched on port 1.",
@@ -751,6 +845,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are dispatched to port 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_1_CORE",
"PublicDescription": "Cycles per core when uops are dispatched to port 1.",
@@ -759,6 +854,7 @@
},
{
"BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_2",
"PublicDescription": "Cycles which a Uop is dispatched on port 2.",
@@ -768,6 +864,7 @@
{
"AnyThread": "1",
"BriefDescription": "Uops dispatched to port 2, loads and stores per core (speculative and retired).",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_2_CORE",
"SampleAfterValue": "2000003",
@@ -775,6 +872,7 @@
},
{
"BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_3",
"PublicDescription": "Cycles which a Uop is dispatched on port 3.",
@@ -784,6 +882,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when load or STA uops are dispatched to port 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_3_CORE",
"PublicDescription": "Cycles per core when load or STA uops are dispatched to port 3.",
@@ -792,6 +891,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are dispatched to port 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_4",
"PublicDescription": "Cycles which a Uop is dispatched on port 4.",
@@ -801,6 +901,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are dispatched to port 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_4_CORE",
"PublicDescription": "Cycles per core when uops are dispatched to port 4.",
@@ -809,6 +910,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are dispatched to port 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_5",
"PublicDescription": "Cycles which a Uop is dispatched on port 5.",
@@ -818,6 +920,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are dispatched to port 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_5_CORE",
"PublicDescription": "Cycles per core when uops are dispatched to port 5.",
@@ -826,6 +929,7 @@
},
{
"BriefDescription": "Number of uops executed on the core.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE",
"PublicDescription": "Counts total number of uops to be executed per-core each cycle.",
@@ -834,6 +938,7 @@
},
{
"BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
@@ -843,6 +948,7 @@
},
{
"BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
@@ -852,6 +958,7 @@
},
{
"BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core",
+ "Counter": "0,1,2,3",
"CounterMask": "3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
@@ -861,6 +968,7 @@
},
{
"BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
@@ -870,6 +978,7 @@
},
{
"BriefDescription": "Cycles with no micro-ops executed from any thread on physical core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
"Invert": "1",
@@ -879,6 +988,7 @@
},
{
"BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
@@ -888,6 +998,7 @@
},
{
"BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
@@ -897,6 +1008,7 @@
},
{
"BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "Counter": "0,1,2,3",
"CounterMask": "3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
@@ -906,6 +1018,7 @@
},
{
"BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
@@ -915,6 +1028,7 @@
},
{
"BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.STALL_CYCLES",
@@ -924,6 +1038,7 @@
},
{
"BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.THREAD",
"PublicDescription": "Counts total number of uops to be executed per-thread each cycle. Set Cmask = 1, INV =1 to count stall cycles.",
@@ -932,6 +1047,7 @@
},
{
"BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.ANY",
"PublicDescription": "Increments each cycle the # of Uops issued by the RAT to RS. Set Cmask = 1, Inv = 1, Any= 1to count stalled cycles of this core.",
@@ -941,6 +1057,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
@@ -951,6 +1068,7 @@
},
{
"BriefDescription": "Number of flags-merge uops being allocated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.FLAGS_MERGE",
"PublicDescription": "Number of flags-merge uops allocated. Such uops adds delay.",
@@ -959,6 +1077,7 @@
},
{
"BriefDescription": "Number of Multiply packed/scalar single precision uops allocated",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.SINGLE_MUL",
"PublicDescription": "Number of multiply packed/scalar single precision uops allocated.",
@@ -967,6 +1086,7 @@
},
{
"BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.SLOW_LEA",
"PublicDescription": "Number of slow LEA or similar uops allocated. Such uop has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
@@ -975,6 +1095,7 @@
},
{
"BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.STALL_CYCLES",
@@ -985,6 +1106,7 @@
},
{
"BriefDescription": "Retired uops.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.ALL",
"PEBS": "1",
@@ -994,6 +1116,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles without actually retired uops.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.CORE_STALL_CYCLES",
@@ -1003,6 +1126,7 @@
},
{
"BriefDescription": "Retirement slots used.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.RETIRE_SLOTS",
"PEBS": "1",
@@ -1011,6 +1135,7 @@
},
{
"BriefDescription": "Cycles without actually retired uops.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.STALL_CYCLES",
@@ -1020,6 +1145,7 @@
},
{
"BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "Counter": "0,1,2,3",
"CounterMask": "10",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.TOTAL_CYCLES",
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/uncore-cache.json b/tools/perf/pmu-events/arch/x86/ivybridge/uncore-cache.json
index be9a3ed1a940..8379dae91be4 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/uncore-cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "L3 Lookup any request that access cache and found line in E or S-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.ANY_ES",
"PerPkg": "1",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "L3 Lookup any request that access cache and found line in I-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.ANY_I",
"PerPkg": "1",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "L3 Lookup any request that access cache and found line in M-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.ANY_M",
"PerPkg": "1",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "L3 Lookup any request that access cache and found line in MESI-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.ANY_MESI",
"PerPkg": "1",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "L3 Lookup external snoop request that access cache and found line in E or S-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.EXTSNP_ES",
"PerPkg": "1",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "L3 Lookup external snoop request that access cache and found line in I-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.EXTSNP_I",
"PerPkg": "1",
@@ -49,6 +55,7 @@
},
{
"BriefDescription": "L3 Lookup external snoop request that access cache and found line in M-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.EXTSNP_M",
"PerPkg": "1",
@@ -57,6 +64,7 @@
},
{
"BriefDescription": "L3 Lookup external snoop request that access cache and found line in MESI-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.EXTSNP_MESI",
"PerPkg": "1",
@@ -65,6 +73,7 @@
},
{
"BriefDescription": "L3 Lookup read request that access cache and found line in E or S-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.READ_ES",
"PerPkg": "1",
@@ -73,6 +82,7 @@
},
{
"BriefDescription": "L3 Lookup read request that access cache and found line in I-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.READ_I",
"PerPkg": "1",
@@ -81,6 +91,7 @@
},
{
"BriefDescription": "L3 Lookup read request that access cache and found line in M-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.READ_M",
"PerPkg": "1",
@@ -89,6 +100,7 @@
},
{
"BriefDescription": "L3 Lookup read request that access cache and found line in any MESI-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.READ_MESI",
"PerPkg": "1",
@@ -97,6 +109,7 @@
},
{
"BriefDescription": "L3 Lookup write request that access cache and found line in E or S-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_ES",
"PerPkg": "1",
@@ -105,6 +118,7 @@
},
{
"BriefDescription": "L3 Lookup write request that access cache and found line in I-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_I",
"PerPkg": "1",
@@ -113,6 +127,7 @@
},
{
"BriefDescription": "L3 Lookup write request that access cache and found line in M-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_M",
"PerPkg": "1",
@@ -121,6 +136,7 @@
},
{
"BriefDescription": "L3 Lookup write request that access cache and found line in MESI-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_MESI",
"PerPkg": "1",
@@ -129,6 +145,7 @@
},
{
"BriefDescription": "A cross-core snoop resulted from L3 Eviction which hits a modified line in some processor core.",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_CBO_XSNP_RESPONSE.HITM_EVICTION",
"PerPkg": "1",
@@ -137,6 +154,7 @@
},
{
"BriefDescription": "An external snoop hits a modified line in some processor core.",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_CBO_XSNP_RESPONSE.HITM_EXTERNAL",
"PerPkg": "1",
@@ -145,6 +163,7 @@
},
{
"BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a modified line in some processor core.",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_CBO_XSNP_RESPONSE.HITM_XCORE",
"PerPkg": "1",
@@ -153,6 +172,7 @@
},
{
"BriefDescription": "A cross-core snoop resulted from L3 Eviction which hits a non-modified line in some processor core.",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_CBO_XSNP_RESPONSE.HIT_EVICTION",
"PerPkg": "1",
@@ -161,6 +181,7 @@
},
{
"BriefDescription": "An external snoop hits a non-modified line in some processor core.",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_CBO_XSNP_RESPONSE.HIT_EXTERNAL",
"PerPkg": "1",
@@ -169,6 +190,7 @@
},
{
"BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a non-modified line in some processor core.",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_CBO_XSNP_RESPONSE.HIT_XCORE",
"PerPkg": "1",
@@ -177,6 +199,7 @@
},
{
"BriefDescription": "A cross-core snoop resulted from L3 Eviction which misses in some processor core.",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_CBO_XSNP_RESPONSE.MISS_EVICTION",
"PerPkg": "1",
@@ -185,6 +208,7 @@
},
{
"BriefDescription": "An external snoop misses in some processor core.",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_CBO_XSNP_RESPONSE.MISS_EXTERNAL",
"PerPkg": "1",
@@ -193,6 +217,7 @@
},
{
"BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which misses in some processor core.",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_CBO_XSNP_RESPONSE.MISS_XCORE",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/ivybridge/uncore-interconnect.json
index c3252c094a9c..ba340e858ed4 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/uncore-interconnect.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Cycles weighted by number of requests pending in Coherency Tracker.",
+ "Counter": "0",
"EventCode": "0x83",
"EventName": "UNC_ARB_COH_TRK_OCCUPANCY.ALL",
"PerPkg": "1",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Number of requests allocated in Coherency Tracker.",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_ARB_COH_TRK_REQUESTS.ALL",
"PerPkg": "1",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Counts cycles weighted by the number of requests waiting for data returning from the memory controller. Accounts for coherent and non-coherent requests initiated by IA cores, processor graphic units, or LLC.",
+ "Counter": "0",
"EventCode": "0x80",
"EventName": "UNC_ARB_TRK_OCCUPANCY.ALL",
"PerPkg": "1",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Cycles with at least half of the requests outstanding are waiting for data return from memory controller. Account for coherent and non-coherent requests initiated by IA Cores, Processor Graphics Unit, or LLC.",
+ "Counter": "0,1",
"CounterMask": "10",
"EventCode": "0x80",
"EventName": "UNC_ARB_TRK_OCCUPANCY.CYCLES_OVER_HALF_FULL",
@@ -34,6 +38,7 @@
},
{
"BriefDescription": "Cycles with at least one request outstanding is waiting for data return from memory controller. Account for coherent and non-coherent requests initiated by IA Cores, Processor Graphics Unit, or LLC.",
+ "Counter": "0,1",
"CounterMask": "1",
"EventCode": "0x80",
"EventName": "UNC_ARB_TRK_OCCUPANCY.CYCLES_WITH_ANY_REQUEST",
@@ -43,6 +48,7 @@
},
{
"BriefDescription": "Counts the number of coherent and in-coherent requests initiated by IA cores, processor graphic units, or LLC.",
+ "Counter": "0,1",
"EventCode": "0x81",
"EventName": "UNC_ARB_TRK_REQUESTS.ALL",
"PerPkg": "1",
@@ -51,6 +57,7 @@
},
{
"BriefDescription": "Counts the number of LLC evictions allocated.",
+ "Counter": "0,1",
"EventCode": "0x81",
"EventName": "UNC_ARB_TRK_REQUESTS.EVICTIONS",
"PerPkg": "1",
@@ -59,6 +66,7 @@
},
{
"BriefDescription": "Counts the number of allocated write entries, include full, partial, and LLC evictions.",
+ "Counter": "0,1",
"EventCode": "0x81",
"EventName": "UNC_ARB_TRK_REQUESTS.WRITES",
"PerPkg": "1",
@@ -67,6 +75,7 @@
},
{
"BriefDescription": "This 48-bit fixed counter counts the UCLK cycles.",
+ "Counter": "Fixed",
"EventCode": "0xff",
"EventName": "UNC_CLOCK.SOCKET",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/virtual-memory.json b/tools/perf/pmu-events/arch/x86/ivybridge/virtual-memory.json
index b97f15cb20fc..8c6128eff958 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/virtual-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Page walk for a large page completed for Demand load.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.LARGE_PAGE_WALK_COMPLETED",
"SampleAfterValue": "100003",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes an page walk of any page size.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "Misses in all TLB levels that cause a page walk of any page size from demand loads.",
@@ -16,6 +18,7 @@
},
{
"BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x5F",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"PublicDescription": "Counts load operations that missed 1st level DTLB but hit the 2nd level.",
@@ -24,6 +27,7 @@
},
{
"BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"PublicDescription": "Misses in all TLB levels that caused page walk completed of any size by demand loads.",
@@ -32,6 +36,7 @@
},
{
"BriefDescription": "Demand load cycles page miss handler (PMH) is busy with this walk.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
"PublicDescription": "Cycle PMH is busy with a walk due to demand loads.",
@@ -40,6 +45,7 @@
},
{
"BriefDescription": "Store misses in all DTLB levels that cause page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "Miss in all TLB levels causes a page walk of any page size (4K/2M/4M/1G).",
@@ -48,6 +54,7 @@
},
{
"BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.STLB_HIT",
"PublicDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
@@ -56,6 +63,7 @@
},
{
"BriefDescription": "Store misses in all DTLB levels that cause completed page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"PublicDescription": "Miss in all TLB levels causes a page walk that completes of any page size (4K/2M/4M/1G).",
@@ -64,6 +72,7 @@
},
{
"BriefDescription": "Cycles when PMH is busy with page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_DURATION",
"PublicDescription": "Cycles PMH is busy with this walk.",
@@ -72,6 +81,7 @@
},
{
"BriefDescription": "Cycle count for an Extended Page table walk. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x4F",
"EventName": "EPT.WALK_CYCLES",
"SampleAfterValue": "2000003",
@@ -79,6 +89,7 @@
},
{
"BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAE",
"EventName": "ITLB.ITLB_FLUSH",
"PublicDescription": "Counts the number of ITLB flushes, includes 4k/2M/4M pages.",
@@ -87,6 +98,7 @@
},
{
"BriefDescription": "Completed page walks in ITLB due to STLB load misses for large pages",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.LARGE_PAGE_WALK_COMPLETED",
"PublicDescription": "Completed page walks in ITLB due to STLB load misses for large pages.",
@@ -95,6 +107,7 @@
},
{
"BriefDescription": "Misses at all ITLB levels that cause page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "Misses in all ITLB levels that cause page walks.",
@@ -103,6 +116,7 @@
},
{
"BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.STLB_HIT",
"PublicDescription": "Number of cache load STLB hits. No page walk.",
@@ -111,6 +125,7 @@
},
{
"BriefDescription": "Misses in all ITLB levels that cause completed page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
"PublicDescription": "Misses in all ITLB levels that cause completed page walks.",
@@ -119,6 +134,7 @@
},
{
"BriefDescription": "Cycles when PMH is busy with page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_DURATION",
"PublicDescription": "Cycle PMH is busy with a walk.",
@@ -127,6 +143,7 @@
},
{
"BriefDescription": "DTLB flush attempts of the thread-specific entries",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "TLB_FLUSH.DTLB_THREAD",
"PublicDescription": "DTLB flush attempts of the thread-specific entries.",
@@ -135,6 +152,7 @@
},
{
"BriefDescription": "STLB flush attempts",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "TLB_FLUSH.STLB_ANY",
"PublicDescription": "Count number of STLB flush attempts.",
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/cache.json b/tools/perf/pmu-events/arch/x86/ivytown/cache.json
index 0e8e77253978..4b2128f1a765 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/cache.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "L1D data line replacements",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "L1D.REPLACEMENT",
"PublicDescription": "Counts the number of lines brought into the L1 data cache.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Cycles a demand request was blocked due to Fill Buffers unavailability",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.FB_FULL",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "L1D miss outstanding duration in cycles",
+ "Counter": "2",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING",
"PublicDescription": "Increments the number of outstanding L1D misses every cycle. Set Cmask = 1 and Edge =1 to count occurrences.",
@@ -26,6 +29,7 @@
},
{
"BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "Counter": "2",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING_CYCLES",
@@ -35,6 +39,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core",
+ "Counter": "2",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
@@ -44,6 +49,7 @@
},
{
"BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in any state.",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L2_L1D_WB_RQSTS.ALL",
"SampleAfterValue": "200003",
@@ -51,6 +57,7 @@
},
{
"BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in E state",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L2_L1D_WB_RQSTS.HIT_E",
"PublicDescription": "Not rejected writebacks from L1D to L2 cache lines in E state.",
@@ -59,6 +66,7 @@
},
{
"BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in M state",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L2_L1D_WB_RQSTS.HIT_M",
"PublicDescription": "Not rejected writebacks from L1D to L2 cache lines in M state.",
@@ -67,6 +75,7 @@
},
{
"BriefDescription": "Count the number of modified Lines evicted from L1 and missed L2. (Non-rejected WBs from the DCU.)",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L2_L1D_WB_RQSTS.MISS",
"PublicDescription": "Not rejected writebacks that missed LLC.",
@@ -75,6 +84,7 @@
},
{
"BriefDescription": "L2 cache lines filling L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.ALL",
"PublicDescription": "L2 cache lines filling L2.",
@@ -83,6 +93,7 @@
},
{
"BriefDescription": "L2 cache lines in E state filling L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.E",
"PublicDescription": "L2 cache lines in E state filling L2.",
@@ -91,6 +102,7 @@
},
{
"BriefDescription": "L2 cache lines in I state filling L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.I",
"PublicDescription": "L2 cache lines in I state filling L2.",
@@ -99,6 +111,7 @@
},
{
"BriefDescription": "L2 cache lines in S state filling L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.S",
"PublicDescription": "L2 cache lines in S state filling L2.",
@@ -107,6 +120,7 @@
},
{
"BriefDescription": "Clean L2 cache lines evicted by demand",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.DEMAND_CLEAN",
"PublicDescription": "Clean L2 cache lines evicted by demand.",
@@ -115,6 +129,7 @@
},
{
"BriefDescription": "Dirty L2 cache lines evicted by demand",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.DEMAND_DIRTY",
"PublicDescription": "Dirty L2 cache lines evicted by demand.",
@@ -123,6 +138,7 @@
},
{
"BriefDescription": "Dirty L2 cache lines filling the L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.DIRTY_ALL",
"PublicDescription": "Dirty L2 cache lines filling the L2.",
@@ -131,6 +147,7 @@
},
{
"BriefDescription": "Clean L2 cache lines evicted by L2 prefetch",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.PF_CLEAN",
"PublicDescription": "Clean L2 cache lines evicted by the MLC prefetcher.",
@@ -139,6 +156,7 @@
},
{
"BriefDescription": "Dirty L2 cache lines evicted by L2 prefetch",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.PF_DIRTY",
"PublicDescription": "Dirty L2 cache lines evicted by the MLC prefetcher.",
@@ -147,6 +165,7 @@
},
{
"BriefDescription": "L2 code requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_CODE_RD",
"PublicDescription": "Counts all L2 code requests.",
@@ -155,6 +174,7 @@
},
{
"BriefDescription": "Demand Data Read requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
"PublicDescription": "Counts any demand and L1 HW prefetch data load requests to L2.",
@@ -163,6 +183,7 @@
},
{
"BriefDescription": "Requests from L2 hardware prefetchers",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_PF",
"PublicDescription": "Counts all L2 HW prefetcher requests.",
@@ -171,6 +192,7 @@
},
{
"BriefDescription": "RFO requests to L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_RFO",
"PublicDescription": "Counts all L2 store RFO requests.",
@@ -179,6 +201,7 @@
},
{
"BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_HIT",
"PublicDescription": "Number of instruction fetches that hit the L2 cache.",
@@ -187,6 +210,7 @@
},
{
"BriefDescription": "L2 cache misses when fetching instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_MISS",
"PublicDescription": "Number of instruction fetches that missed the L2 cache.",
@@ -195,6 +219,7 @@
},
{
"BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
"PublicDescription": "Demand Data Read requests that hit L2 cache.",
@@ -203,6 +228,7 @@
},
{
"BriefDescription": "Requests from the L2 hardware prefetchers that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.PF_HIT",
"PublicDescription": "Counts all L2 HW prefetcher requests that hit L2.",
@@ -211,6 +237,7 @@
},
{
"BriefDescription": "Requests from the L2 hardware prefetchers that miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.PF_MISS",
"PublicDescription": "Counts all L2 HW prefetcher requests that missed L2.",
@@ -219,6 +246,7 @@
},
{
"BriefDescription": "RFO requests that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_HIT",
"PublicDescription": "RFO requests that hit L2 cache.",
@@ -227,6 +255,7 @@
},
{
"BriefDescription": "RFO requests that miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_MISS",
"PublicDescription": "Counts the number of store RFO requests that miss the L2 cache.",
@@ -235,6 +264,7 @@
},
{
"BriefDescription": "RFOs that access cache lines in any state",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_STORE_LOCK_RQSTS.ALL",
"PublicDescription": "RFOs that access cache lines in any state.",
@@ -243,6 +273,7 @@
},
{
"BriefDescription": "RFOs that hit cache lines in M state",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_STORE_LOCK_RQSTS.HIT_M",
"PublicDescription": "RFOs that hit cache lines in M state.",
@@ -251,6 +282,7 @@
},
{
"BriefDescription": "RFOs that miss cache lines",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_STORE_LOCK_RQSTS.MISS",
"PublicDescription": "RFOs that miss cache lines.",
@@ -259,6 +291,7 @@
},
{
"BriefDescription": "L2 or LLC HW prefetches that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.ALL_PF",
"PublicDescription": "Any MLC or LLC HW prefetch accessing L2, including rejects.",
@@ -267,6 +300,7 @@
},
{
"BriefDescription": "Transactions accessing L2 pipe",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.ALL_REQUESTS",
"PublicDescription": "Transactions accessing L2 pipe.",
@@ -275,6 +309,7 @@
},
{
"BriefDescription": "L2 cache accesses when fetching instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.CODE_RD",
"PublicDescription": "L2 cache accesses when fetching instructions.",
@@ -283,6 +318,7 @@
},
{
"BriefDescription": "Demand Data Read requests that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.DEMAND_DATA_RD",
"PublicDescription": "Demand Data Read requests that access L2 cache.",
@@ -291,6 +327,7 @@
},
{
"BriefDescription": "L1D writebacks that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.L1D_WB",
"PublicDescription": "L1D writebacks that access L2 cache.",
@@ -299,6 +336,7 @@
},
{
"BriefDescription": "L2 fill requests that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.L2_FILL",
"PublicDescription": "L2 fill requests that access L2 cache.",
@@ -307,6 +345,7 @@
},
{
"BriefDescription": "L2 writebacks that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.L2_WB",
"PublicDescription": "L2 writebacks that access L2 cache.",
@@ -315,6 +354,7 @@
},
{
"BriefDescription": "RFO requests that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.RFO",
"PublicDescription": "RFO requests that access L2 cache.",
@@ -323,6 +363,7 @@
},
{
"BriefDescription": "Cycles when L1D is locked",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
"PublicDescription": "Cycles in which the L1D is locked.",
@@ -331,6 +372,7 @@
},
{
"BriefDescription": "Core-originated cacheable demand requests missed LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.MISS",
"PublicDescription": "This event counts each cache miss condition for references to the last level cache.",
@@ -339,6 +381,7 @@
},
{
"BriefDescription": "Core-originated cacheable demand requests that refer to LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
"PublicDescription": "This event counts requests originating from the core that reference a cache line in the last level cache.",
@@ -347,6 +390,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were LLC and cross-core snoop hits in on-pkg core cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT",
"PEBS": "1",
@@ -355,6 +399,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were HitM responses from shared LLC.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM",
"PEBS": "1",
@@ -363,6 +408,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were LLC hit and cross-core snoop missed in on-pkg core cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS",
"PEBS": "1",
@@ -371,6 +417,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were hits in LLC without snoops required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE",
"PEBS": "1",
@@ -379,6 +426,7 @@
},
{
"BriefDescription": "Retired load uops whose data source was local DRAM (Snoop not needed, Snoop Miss, or Snoop Hit data not forwarded).",
+ "Counter": "0,1,2,3",
"EventCode": "0xD3",
"EventName": "MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM",
"SampleAfterValue": "100007",
@@ -386,6 +434,7 @@
},
{
"BriefDescription": "Retired load uops whose data source was remote DRAM (Snoop not needed, Snoop Miss, or Snoop Hit data not forwarded).",
+ "Counter": "0,1,2,3",
"EventCode": "0xD3",
"EventName": "MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_DRAM",
"SampleAfterValue": "100007",
@@ -393,6 +442,7 @@
},
{
"BriefDescription": "Data forwarded from remote cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD3",
"EventName": "MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_FWD",
"SampleAfterValue": "100007",
@@ -400,6 +450,7 @@
},
{
"BriefDescription": "Remote cache HITM.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD3",
"EventName": "MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_HITM",
"SampleAfterValue": "100007",
@@ -407,6 +458,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
"PEBS": "1",
@@ -415,6 +467,7 @@
},
{
"BriefDescription": "Retired load uops with L1 cache hits as data sources.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
"PEBS": "1",
@@ -423,6 +476,7 @@
},
{
"BriefDescription": "Retired load uops which data sources following L1 data-cache miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
"PEBS": "1",
@@ -431,6 +485,7 @@
},
{
"BriefDescription": "Retired load uops with L2 cache hits as data sources.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
"PEBS": "1",
@@ -439,6 +494,7 @@
},
{
"BriefDescription": "Retired load uops with L2 cache misses as data sources.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
"PEBS": "1",
@@ -447,6 +503,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were data hits in LLC without snoops required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.LLC_HIT",
"PEBS": "1",
@@ -455,6 +512,7 @@
},
{
"BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.LLC_MISS",
"PEBS": "1",
@@ -463,6 +521,7 @@
},
{
"BriefDescription": "All retired load uops. (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
"PEBS": "1",
@@ -471,6 +530,7 @@
},
{
"BriefDescription": "All retired store uops. (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"PEBS": "1",
@@ -479,6 +539,7 @@
},
{
"BriefDescription": "Retired load uops with locked access. (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"PEBS": "1",
@@ -487,6 +548,7 @@
},
{
"BriefDescription": "Retired load uops that split across a cacheline boundary. (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"PEBS": "1",
@@ -495,6 +557,7 @@
},
{
"BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"PEBS": "1",
@@ -503,6 +566,7 @@
},
{
"BriefDescription": "Retired load uops that miss the STLB. (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
"PEBS": "1",
@@ -511,6 +575,7 @@
},
{
"BriefDescription": "Retired store uops that miss the STLB. (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
"PEBS": "1",
@@ -519,6 +584,7 @@
},
{
"BriefDescription": "Demand and prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
"PublicDescription": "Data read requests sent to uncore (demand and prefetch).",
@@ -527,6 +593,7 @@
},
{
"BriefDescription": "Cacheable and noncacheable code read requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
"PublicDescription": "Demand code read requests sent to uncore.",
@@ -535,6 +602,7 @@
},
{
"BriefDescription": "Demand Data Read requests sent to uncore",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
"PublicDescription": "Demand data read requests sent to uncore.",
@@ -543,6 +611,7 @@
},
{
"BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
"PublicDescription": "Demand RFO read requests sent to uncore, including regular RFOs, locks, ItoM.",
@@ -551,6 +620,7 @@
},
{
"BriefDescription": "Cases when offcore requests buffer cannot take more entries for core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
"PublicDescription": "Cases when offcore requests buffer cannot take more entries for core.",
@@ -559,6 +629,7 @@
},
{
"BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
"PublicDescription": "Offcore outstanding cacheable data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
@@ -567,6 +638,7 @@
},
{
"BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
@@ -576,6 +648,7 @@
},
{
"BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
@@ -585,6 +658,7 @@
},
{
"BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
@@ -594,6 +668,7 @@
},
{
"BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
@@ -603,6 +678,7 @@
},
{
"BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
"PublicDescription": "Offcore outstanding Demand Code Read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
@@ -611,6 +687,7 @@
},
{
"BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
"PublicDescription": "Offcore outstanding Demand Data Read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
@@ -619,6 +696,7 @@
},
{
"BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
@@ -628,6 +706,7 @@
},
{
"BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
"PublicDescription": "Offcore outstanding RFO store transactions in SQ to uncore. Set Cmask=1 to count cycles.",
@@ -636,6 +715,7 @@
},
{
"BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -645,6 +725,7 @@
},
{
"BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -654,6 +735,7 @@
},
{
"BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -663,6 +745,7 @@
},
{
"BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and sibling core snoop returned a clean response",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -672,6 +755,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads that hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -681,6 +765,7 @@
},
{
"BriefDescription": "Counts prefetch data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -690,6 +775,7 @@
},
{
"BriefDescription": "Counts prefetch data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -699,6 +785,7 @@
},
{
"BriefDescription": "Counts prefetch data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -708,6 +795,7 @@
},
{
"BriefDescription": "Counts prefetch data reads that hit in the LLC and sibling core snoop returned a clean response",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -717,6 +805,7 @@
},
{
"BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -726,6 +815,7 @@
},
{
"BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -735,6 +825,7 @@
},
{
"BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -744,6 +835,7 @@
},
{
"BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -753,6 +845,7 @@
},
{
"BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC and sibling core snoop returned a clean response",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -762,6 +855,7 @@
},
{
"BriefDescription": "Counts all writebacks from the core to the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -771,6 +865,7 @@
},
{
"BriefDescription": "Counts all demand code reads that hit in the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -780,6 +875,7 @@
},
{
"BriefDescription": "Counts all demand data reads that hit in the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -789,6 +885,7 @@
},
{
"BriefDescription": "Counts demand data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -798,6 +895,7 @@
},
{
"BriefDescription": "Counts demand data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -807,6 +905,7 @@
},
{
"BriefDescription": "Counts demand data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -816,6 +915,7 @@
},
{
"BriefDescription": "Counts demand data reads that hit in the LLC and sibling core snoop returned a clean response",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -825,6 +925,7 @@
},
{
"BriefDescription": "Counts demand data writes (RFOs) that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -834,6 +935,7 @@
},
{
"BriefDescription": "Counts L2 hints sent to LLC to keep a line from being evicted out of the core caches",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.LRU_HINTS",
"MSRIndex": "0x1a6,0x1a7",
@@ -843,6 +945,7 @@
},
{
"BriefDescription": "Counts miscellaneous accesses that include port i/o, MMIO and uncacheable memory accesses",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.PORTIO_MMIO_UC",
"MSRIndex": "0x1a6,0x1a7",
@@ -852,6 +955,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) code reads that hit in the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -861,6 +965,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -870,6 +975,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -879,6 +985,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -888,6 +995,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -897,6 +1005,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoops sent to sibling cores return clean response",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -906,6 +1015,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -915,6 +1025,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -924,6 +1035,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -933,6 +1045,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -942,6 +1055,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -951,6 +1065,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops sent to sibling cores return clean response",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -960,6 +1075,7 @@
},
{
"BriefDescription": "Counts requests where the address of an atomic lock instruction spans a cache line boundary or the lock instruction is executed on uncacheable address",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.SPLIT_LOCK_UC_LOCK.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -969,6 +1085,7 @@
},
{
"BriefDescription": "Counts non-temporal stores",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -978,6 +1095,7 @@
},
{
"BriefDescription": "Split locks in SQ",
+ "Counter": "0,1,2,3",
"EventCode": "0xF4",
"EventName": "SQ_MISC.SPLIT_LOCK",
"SampleAfterValue": "100003",
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/counter.json b/tools/perf/pmu-events/arch/x86/ivytown/counter.json
new file mode 100644
index 000000000000..b4e46a693f7e
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/ivytown/counter.json
@@ -0,0 +1,52 @@
+[
+ {
+ "Unit": "core",
+ "CountersNumFixed": "3",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "CBOX",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "HA",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "iMC",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "IRP",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "2"
+ },
+ {
+ "Unit": "PCU",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "QPI",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "R2PCIe",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "R3QPI",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "3"
+ },
+ {
+ "Unit": "UBOX",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "2"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/floating-point.json b/tools/perf/pmu-events/arch/x86/ivytown/floating-point.json
index 89c6d47cc077..336fa00ad006 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/floating-point.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Cycles with any input/output SSE or FP assist",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.ANY",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "Number of SIMD FP assists due to input values",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.SIMD_INPUT",
"PublicDescription": "Number of SIMD FP assists due to input values.",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "Number of SIMD FP assists due to Output values",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.SIMD_OUTPUT",
"PublicDescription": "Number of SIMD FP assists due to output values.",
@@ -26,6 +29,7 @@
},
{
"BriefDescription": "Number of X87 assists due to input value.",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.X87_INPUT",
"PublicDescription": "Number of X87 FP assists due to input values.",
@@ -34,6 +38,7 @@
},
{
"BriefDescription": "Number of X87 assists due to output value.",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.X87_OUTPUT",
"PublicDescription": "Number of X87 FP assists due to output values.",
@@ -42,6 +47,7 @@
},
{
"BriefDescription": "Number of SSE* or AVX-128 FP Computational packed double-precision uops issued this cycle",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE",
"PublicDescription": "Number of SSE* or AVX-128 FP Computational packed double-precision uops issued this cycle.",
@@ -50,6 +56,7 @@
},
{
"BriefDescription": "Number of SSE* or AVX-128 FP Computational packed single-precision uops issued this cycle",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_PACKED_SINGLE",
"PublicDescription": "Number of SSE* or AVX-128 FP Computational packed single-precision uops issued this cycle.",
@@ -58,6 +65,7 @@
},
{
"BriefDescription": "Number of SSE* or AVX-128 FP Computational scalar double-precision uops issued this cycle",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE",
"PublicDescription": "Counts number of SSE* or AVX-128 double precision FP scalar uops executed.",
@@ -66,6 +74,7 @@
},
{
"BriefDescription": "Number of SSE* or AVX-128 FP Computational scalar single-precision uops issued this cycle",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE",
"PublicDescription": "Number of SSE* or AVX-128 FP Computational scalar single-precision uops issued this cycle.",
@@ -74,6 +83,7 @@
},
{
"BriefDescription": "Number of FP Computational Uops Executed this cycle. The number of FADD, FSUB, FCOM, FMULs, integer MULs and IMULs, FDIVs, FPREMs, FSQRTS, integer DIVs, and IDIVs. This event does not distinguish an FADD used in the middle of a transcendental flow from a s",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.X87",
"PublicDescription": "Counts number of X87 uops executed.",
@@ -82,6 +92,7 @@
},
{
"BriefDescription": "Number of SIMD Move Elimination candidate uops that were eliminated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "MOVE_ELIMINATION.SIMD_ELIMINATED",
"SampleAfterValue": "1000003",
@@ -89,6 +100,7 @@
},
{
"BriefDescription": "Number of SIMD Move Elimination candidate uops that were not eliminated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "MOVE_ELIMINATION.SIMD_NOT_ELIMINATED",
"SampleAfterValue": "1000003",
@@ -96,6 +108,7 @@
},
{
"BriefDescription": "Number of GSSE memory assist for stores. GSSE microcode assist is being invoked whenever the hardware is unable to properly handle GSSE-256b operations.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "OTHER_ASSISTS.AVX_STORE",
"PublicDescription": "Number of assists associated with 256-bit AVX store operations.",
@@ -104,6 +117,7 @@
},
{
"BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "OTHER_ASSISTS.AVX_TO_SSE",
"SampleAfterValue": "100003",
@@ -111,6 +125,7 @@
},
{
"BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "OTHER_ASSISTS.SSE_TO_AVX",
"SampleAfterValue": "100003",
@@ -118,6 +133,7 @@
},
{
"BriefDescription": "number of AVX-256 Computational FP double precision uops issued this cycle",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "SIMD_FP_256.PACKED_DOUBLE",
"PublicDescription": "Counts 256-bit packed double-precision floating-point instructions.",
@@ -126,6 +142,7 @@
},
{
"BriefDescription": "number of GSSE-256 Computational FP single precision uops issued this cycle",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "SIMD_FP_256.PACKED_SINGLE",
"PublicDescription": "Counts 256-bit packed single-precision floating-point instructions.",
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/frontend.json b/tools/perf/pmu-events/arch/x86/ivytown/frontend.json
index 4ee100024ca9..0d6c829a6023 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/frontend.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "Counter": "0,1,2,3",
"EventCode": "0xE6",
"EventName": "BACLEARS.ANY",
"PublicDescription": "Number of front end re-steers due to BPU misprediction.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switches",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "DSB2MITE_SWITCHES.COUNT",
"PublicDescription": "Number of DSB to MITE switches.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
"PublicDescription": "Cycles DSB to MITE switches caused delay.",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Cycles when Decode Stream Buffer (DSB) fill encounter more than 3 Decode Stream Buffer (DSB) lines",
+ "Counter": "0,1,2,3",
"EventCode": "0xAC",
"EventName": "DSB_FILL.EXCEED_DSB_LINES",
"PublicDescription": "DSB Fill encountered > 3 DSB lines.",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.HIT",
"PublicDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches.",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "Cycles where a code-fetch stalled due to L1 instruction-cache miss or an iTLB miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.IFETCH_STALL",
"PublicDescription": "Cycles where a code-fetch stalled due to L1 instruction-cache miss or an iTLB miss.",
@@ -49,6 +55,7 @@
},
{
"BriefDescription": "Instruction cache, streaming buffer and victim cache misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.MISSES",
"PublicDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Misses. Includes UC accesses.",
@@ -57,6 +64,7 @@
},
{
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0x79",
"EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
@@ -66,6 +74,7 @@
},
{
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
@@ -75,6 +84,7 @@
},
{
"BriefDescription": "Cycles MITE is delivering 4 Uops",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0x79",
"EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
@@ -84,6 +94,7 @@
},
{
"BriefDescription": "Cycles MITE is delivering any Uop",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
@@ -93,6 +104,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.DSB_CYCLES",
@@ -102,6 +114,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.DSB_UOPS",
"PublicDescription": "Increment each cycle. # of uops delivered to IDQ from DSB path. Set Cmask = 1 to count cycles.",
@@ -110,6 +123,7 @@
},
{
"BriefDescription": "Instruction Decode Queue (IDQ) empty cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.EMPTY",
"PublicDescription": "Counts cycles the IDQ is empty.",
@@ -118,6 +132,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MITE_ALL_UOPS",
"PublicDescription": "Number of uops delivered to IDQ from any path.",
@@ -126,6 +141,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MITE_CYCLES",
@@ -135,6 +151,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MITE_UOPS",
"PublicDescription": "Increment each cycle # of uops delivered to IDQ from MITE path. Set Cmask = 1 to count cycles.",
@@ -143,6 +160,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MS_CYCLES",
@@ -152,6 +170,7 @@
},
{
"BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MS_DSB_CYCLES",
@@ -161,6 +180,7 @@
},
{
"BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x79",
@@ -171,6 +191,7 @@
},
{
"BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_DSB_UOPS",
"PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
@@ -179,6 +200,7 @@
},
{
"BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_MITE_UOPS",
"PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
@@ -187,6 +209,7 @@
},
{
"BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x79",
@@ -197,6 +220,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_UOPS",
"PublicDescription": "Increment each cycle # of uops delivered to IDQ from MS by either DSB or MITE. Set Cmask = 1 to count cycles.",
@@ -205,6 +229,7 @@
},
{
"BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
"PublicDescription": "Count issue pipeline slots where no uop was delivered from the front end to the back end when there is no back-end stall.",
@@ -213,6 +238,7 @@
},
{
"BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
@@ -221,6 +247,7 @@
},
{
"BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
@@ -230,6 +257,7 @@
},
{
"BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
+ "Counter": "0,1,2,3",
"CounterMask": "3",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
@@ -238,6 +266,7 @@
},
{
"BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
@@ -246,6 +275,7 @@
},
{
"BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json b/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json
index e6f5b05a71b5..8fe0512c938f 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json
@@ -90,7 +90,7 @@
{
"BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists",
"MetricExpr": "66 * OTHER_ASSISTS.ANY_WB_ASSIST / tma_info_thread_slots",
- "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricGroup": "BvIO;TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_assists",
"MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
"PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: OTHER_ASSISTS.ANY",
@@ -100,7 +100,7 @@
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
"MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "1 - (tma_frontend_bound + tma_bad_speculation + tma_retiring)",
- "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvOB;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
"MetricThreshold": "tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL1",
@@ -121,7 +121,7 @@
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * tma_bad_speculation",
- "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
+ "MetricGroup": "BadSpec;BrMispredicts;BvMP;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
"MetricName": "tma_branch_mispredicts",
"MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
@@ -151,7 +151,7 @@
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(60 * (MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.LLC_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_FWD))) + 43 * (MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.LLC_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_FWD)))) / tma_info_thread_clks",
- "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricGroup": "BvMS;DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_contested_accesses",
"MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS_PS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
@@ -172,7 +172,7 @@
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "43 * (MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.LLC_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_FWD))) / tma_info_thread_clks",
- "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricGroup": "BvMS;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_data_sharing",
"MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT_PS. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
@@ -181,7 +181,7 @@
{
"BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
"MetricExpr": "ARITH.FPU_DIV_ACTIVE / tma_info_core_core_clks",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
"MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_UOPS",
@@ -218,7 +218,7 @@
{
"BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
"MetricExpr": "(7 * DTLB_LOAD_MISSES.STLB_HIT + DTLB_LOAD_MISSES.WALK_DURATION) / tma_info_thread_clks",
- "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
+ "MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
"MetricName": "tma_dtlb_load",
"MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_UOPS_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store",
@@ -227,7 +227,7 @@
{
"BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
"MetricExpr": "(7 * DTLB_STORE_MISSES.STLB_HIT + DTLB_STORE_MISSES.WALK_DURATION) / tma_info_thread_clks",
- "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
+ "MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
"MetricName": "tma_dtlb_store",
"MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_UOPS_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load",
@@ -236,7 +236,7 @@
{
"BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing",
"MetricExpr": "(200 * OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM + 60 * OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE) / tma_info_thread_clks",
- "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
+ "MetricGroup": "BvMS;DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
"MetricName": "tma_false_sharing",
"MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
@@ -246,7 +246,7 @@
"BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_info_memory_load_miss_real_latency * cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@ / tma_info_thread_clks",
- "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
+ "MetricGroup": "BvMS;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
"MetricName": "tma_fb_full",
"MetricThreshold": "tma_fb_full > 0.3",
"PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
@@ -320,7 +320,7 @@
{
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / tma_info_thread_slots",
- "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvFB;BvIO;PGO;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_frontend_bound",
"MetricThreshold": "tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL1",
@@ -340,7 +340,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses.",
"MetricExpr": "ICACHE.IFETCH_STALL / tma_info_thread_clks - tma_itlb_misses",
- "MetricGroup": "BigFootprint;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_icache_misses",
"MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"ScaleUnit": "100%"
@@ -447,12 +447,12 @@
"MetricThreshold": "tma_info_inst_mix_ipstore < 8"
},
{
- "BriefDescription": "Instruction per taken branch",
+ "BriefDescription": "Instructions per taken branch",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
"MetricName": "tma_info_inst_mix_iptb",
"MetricThreshold": "tma_info_inst_mix_iptb < 9",
- "PublicDescription": "Instruction per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_lcp"
+ "PublicDescription": "Instructions per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_lcp"
},
{
"BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
@@ -473,7 +473,7 @@
"MetricName": "tma_info_memory_core_l3_cache_fill_bw_2t"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
"MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l1d_cache_fill_bw"
@@ -485,7 +485,7 @@
"MetricName": "tma_info_memory_l1mpki"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
"MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l2_cache_fill_bw"
@@ -497,7 +497,13 @@
"MetricName": "tma_info_memory_l2mpki"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Offcore requests (L2 cache miss) per kilo instruction for demand RFOs",
+ "MetricExpr": "1e3 * OFFCORE_REQUESTS.DEMAND_RFO / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Offcore",
+ "MetricName": "tma_info_memory_l2mpki_rfo"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
"MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l3_cache_fill_bw"
@@ -549,7 +555,7 @@
"MetricThreshold": "tma_info_memory_tlb_page_walks_utilization > 0.5"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per core",
"MetricExpr": "UOPS_EXECUTED.THREAD / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
"MetricName": "tma_info_pipeline_execute"
@@ -568,13 +574,13 @@
},
{
"BriefDescription": "Average CPU Utilization (percentage)",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "tma_info_system_cpus_utilized / #num_cpus_online",
"MetricGroup": "HPC;Summary",
"MetricName": "tma_info_system_cpu_utilization"
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "#num_cpus_online * tma_info_system_cpu_utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized"
},
@@ -689,7 +695,7 @@
"MetricThreshold": "tma_info_thread_uoppi > 1.05"
},
{
- "BriefDescription": "Instruction per taken branch",
+ "BriefDescription": "Uops per taken branch",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW",
"MetricName": "tma_info_thread_uptb",
@@ -698,7 +704,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
"MetricExpr": "(12 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION) / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_itlb_misses",
"MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: ITLB_MISSES.WALK_COMPLETED",
@@ -716,7 +722,7 @@
{
"BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
"MetricExpr": "(CYCLE_ACTIVITY.STALLS_L1D_PENDING - CYCLE_ACTIVITY.STALLS_L2_PENDING) / tma_info_thread_clks",
- "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricGroup": "BvML;CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l2_bound",
"MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L2_HIT_PS",
@@ -736,7 +742,7 @@
"BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "41 * (MEM_LOAD_UOPS_RETIRED.LLC_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.LLC_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_FWD))) / tma_info_thread_clks",
- "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
+ "MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
"MetricName": "tma_l3_hit_latency",
"MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS. Related metrics: tma_mem_latency",
@@ -794,7 +800,7 @@
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_bad_speculation - tma_branch_mispredicts",
- "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
+ "MetricGroup": "BadSpec;BvMS;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
"MetricName": "tma_machine_clears",
"MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
@@ -804,7 +810,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=6@) / tma_info_thread_clks",
- "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
"MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full",
@@ -813,7 +819,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM)",
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth",
- "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
+ "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
"MetricName": "tma_mem_latency",
"MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_l3_hit_latency",
@@ -951,7 +957,7 @@
{
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise).",
"MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC) / tma_info_core_core_clks",
- "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricGroup": "BvCB;PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_3m",
"MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%"
@@ -978,7 +984,7 @@
{
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / tma_info_thread_slots",
- "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvUW;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_retiring",
"MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL1",
@@ -1007,7 +1013,7 @@
{
"BriefDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors)",
"MetricExpr": "(OFFCORE_REQUESTS_BUFFER.SQ_FULL / 2 if #SMT_on else OFFCORE_REQUESTS_BUFFER.SQ_FULL) / tma_info_core_core_clks",
- "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
+ "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
"MetricName": "tma_sq_full",
"MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth",
@@ -1035,7 +1041,7 @@
"BriefDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(L2_RQSTS.RFO_HIT * 9 * (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) + (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_thread_clks",
- "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
+ "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
"MetricName": "tma_store_latency",
"MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/memory.json b/tools/perf/pmu-events/arch/x86/ivytown/memory.json
index 138d1aa0b32d..73b7e63e3b66 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/memory.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
"SampleAfterValue": "100003",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Loads with latency value being above 128",
+ "Counter": "3",
"EventCode": "0xCD",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
"MSRIndex": "0x3F6",
@@ -19,6 +21,7 @@
},
{
"BriefDescription": "Loads with latency value being above 16",
+ "Counter": "3",
"EventCode": "0xCD",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
"MSRIndex": "0x3F6",
@@ -30,6 +33,7 @@
},
{
"BriefDescription": "Loads with latency value being above 256",
+ "Counter": "3",
"EventCode": "0xCD",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
"MSRIndex": "0x3F6",
@@ -41,6 +45,7 @@
},
{
"BriefDescription": "Loads with latency value being above 32",
+ "Counter": "3",
"EventCode": "0xCD",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
"MSRIndex": "0x3F6",
@@ -52,6 +57,7 @@
},
{
"BriefDescription": "Loads with latency value being above 4",
+ "Counter": "3",
"EventCode": "0xCD",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
"MSRIndex": "0x3F6",
@@ -63,6 +69,7 @@
},
{
"BriefDescription": "Loads with latency value being above 512",
+ "Counter": "3",
"EventCode": "0xCD",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
"MSRIndex": "0x3F6",
@@ -74,6 +81,7 @@
},
{
"BriefDescription": "Loads with latency value being above 64",
+ "Counter": "3",
"EventCode": "0xCD",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
"MSRIndex": "0x3F6",
@@ -85,6 +93,7 @@
},
{
"BriefDescription": "Loads with latency value being above 8",
+ "Counter": "3",
"EventCode": "0xCD",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
"MSRIndex": "0x3F6",
@@ -96,6 +105,7 @@
},
{
"BriefDescription": "Sample stores and collect precise store operation via PEBS record. PMC3 only.",
+ "Counter": "3",
"EventCode": "0xCD",
"EventName": "MEM_TRANS_RETIRED.PRECISE_STORE",
"PEBS": "2",
@@ -104,6 +114,7 @@
},
{
"BriefDescription": "Speculative cache line split load uops dispatched to L1 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "MISALIGN_MEM_REF.LOADS",
"PublicDescription": "Speculative cache-line split load uops dispatched to L1D.",
@@ -112,6 +123,7 @@
},
{
"BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "MISALIGN_MEM_REF.STORES",
"PublicDescription": "Speculative cache-line split Store-address uops dispatched to L1D.",
@@ -120,6 +132,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch code reads that miss the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -129,6 +142,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch code reads that miss the LLC and the data returned from remote dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -138,6 +152,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch code reads that miss the LLC and the data forwarded from remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
@@ -147,6 +162,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads that hits the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -156,6 +172,7 @@
},
{
"BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -165,6 +182,7 @@
},
{
"BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the LLC and the data returned from local dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -174,6 +192,7 @@
},
{
"BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the LLC the data is found in M state in remote cache and forwarded from there",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -183,6 +202,7 @@
},
{
"BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the LLC and the data forwarded from remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
@@ -192,6 +212,7 @@
},
{
"BriefDescription": "Counts all demand code reads that miss the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -201,6 +222,7 @@
},
{
"BriefDescription": "Counts all demand code reads that miss the LLC and the data returned from local dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -210,6 +232,7 @@
},
{
"BriefDescription": "Counts all demand code reads that miss the LLC and the data returned from remote dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -219,6 +242,7 @@
},
{
"BriefDescription": "Counts all demand code reads that miss the LLC the data is found in M state in remote cache and forwarded from there",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -228,6 +252,7 @@
},
{
"BriefDescription": "Counts all demand code reads that miss the LLC and the data forwarded from remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
@@ -237,6 +262,7 @@
},
{
"BriefDescription": "Counts demand data reads that miss the LLC and the data returned from remote & local dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.ANY_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -246,6 +272,7 @@
},
{
"BriefDescription": "Counts demand data reads that miss in the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -255,6 +282,7 @@
},
{
"BriefDescription": "Counts demand data reads that miss the LLC and the data returned from local dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -264,6 +292,7 @@
},
{
"BriefDescription": "Counts demand data reads that miss the LLC and the data returned from remote dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -273,6 +302,7 @@
},
{
"BriefDescription": "Counts demand data reads that miss the LLC the data is found in M state in remote cache and forwarded from there",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -282,6 +312,7 @@
},
{
"BriefDescription": "Counts demand data reads that miss the LLC and the data forwarded from remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
@@ -291,6 +322,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) that miss the LLC and the data is found in M state in remote cache and forwarded from there.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -300,6 +332,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) code reads that miss the LLC and the data returned from remote & local dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -309,6 +342,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data returned from remote & local dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.ANY_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -318,6 +352,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss in the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -327,6 +362,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data returned from local dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -336,6 +372,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data returned from remote dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -345,6 +382,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC the data is found in M state in remote cache and forwarded from there",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -354,6 +392,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data forwarded from remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
@@ -363,6 +402,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss in the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -372,6 +412,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that miss in the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/metricgroups.json b/tools/perf/pmu-events/arch/x86/ivytown/metricgroups.json
index 8c808347f6da..4193c90c3459 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/metricgroups.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/metricgroups.json
@@ -5,7 +5,18 @@
"BigFootprint": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"BrMispredicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Branches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvBC": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvCB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvFB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvIO": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvML": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMP": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMS": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMT": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvOB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvUW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"CacheHits": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "CacheMisses": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Compute": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Cor": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"DSB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/other.json b/tools/perf/pmu-events/arch/x86/ivytown/other.json
index e80e99d064ba..2e796d533c13 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/other.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/other.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Unhalted core cycles when the thread is in ring 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "CPL_CYCLES.RING0",
"PublicDescription": "Unhalted core cycles when the thread is in ring 0.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Number of intervals between processor halts while thread is in ring 0",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x5C",
@@ -19,6 +21,7 @@
},
{
"BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "CPL_CYCLES.RING123",
"PublicDescription": "Unhalted core cycles when the thread is not in ring 0.",
@@ -27,6 +30,7 @@
},
{
"BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "LOCK_CYCLES.SPLIT_LOCK_UC_LOCK_DURATION",
"PublicDescription": "Cycles in which the L1D and L2 are locked, due to a UC lock or split lock.",
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/pipeline.json b/tools/perf/pmu-events/arch/x86/ivytown/pipeline.json
index 30a3da9cd22b..da05eaaae22c 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/pipeline.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Divide operations executed",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x14",
@@ -11,6 +12,7 @@
},
{
"BriefDescription": "Cycles when divider is busy executing divide operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "ARITH.FPU_DIV_ACTIVE",
"PublicDescription": "Cycles that the divider is active, includes INT and FP. Set 'edge =1, cmask=1' to count the number of divides.",
@@ -19,6 +21,7 @@
},
{
"BriefDescription": "Speculative and retired branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_BRANCHES",
"PublicDescription": "Counts all near executed branches (not necessarily retired).",
@@ -27,6 +30,7 @@
},
{
"BriefDescription": "Speculative and retired macro-conditional branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
"PublicDescription": "Speculative and retired macro-conditional branches.",
@@ -35,6 +39,7 @@
},
{
"BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
"PublicDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects.",
@@ -43,6 +48,7 @@
},
{
"BriefDescription": "Speculative and retired direct near calls",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
"PublicDescription": "Speculative and retired direct near calls.",
@@ -51,6 +57,7 @@
},
{
"BriefDescription": "Speculative and retired indirect branches excluding calls and returns",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
"PublicDescription": "Speculative and retired indirect branches excluding calls and returns.",
@@ -59,6 +66,7 @@
},
{
"BriefDescription": "Speculative and retired indirect return branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
"SampleAfterValue": "200003",
@@ -66,6 +74,7 @@
},
{
"BriefDescription": "Not taken macro-conditional branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL",
"PublicDescription": "Not taken macro-conditional branches.",
@@ -74,6 +83,7 @@
},
{
"BriefDescription": "Taken speculative and retired macro-conditional branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL",
"PublicDescription": "Taken speculative and retired macro-conditional branches.",
@@ -82,6 +92,7 @@
},
{
"BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP",
"PublicDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects.",
@@ -90,6 +101,7 @@
},
{
"BriefDescription": "Taken speculative and retired direct near calls",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
"PublicDescription": "Taken speculative and retired direct near calls.",
@@ -98,6 +110,7 @@
},
{
"BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
"PublicDescription": "Taken speculative and retired indirect branches excluding calls and returns.",
@@ -106,6 +119,7 @@
},
{
"BriefDescription": "Taken speculative and retired indirect calls",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
"PublicDescription": "Taken speculative and retired indirect calls.",
@@ -114,6 +128,7 @@
},
{
"BriefDescription": "Taken speculative and retired indirect branches with return mnemonic",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
"PublicDescription": "Taken speculative and retired indirect branches with return mnemonic.",
@@ -122,6 +137,7 @@
},
{
"BriefDescription": "All (macro) branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"PublicDescription": "Branch instructions at retirement.",
@@ -129,6 +145,7 @@
},
{
"BriefDescription": "All (macro) branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
"PEBS": "2",
@@ -137,6 +154,7 @@
},
{
"BriefDescription": "Conditional branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
"PEBS": "1",
@@ -145,6 +163,7 @@
},
{
"BriefDescription": "Far branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"PublicDescription": "Number of far branches retired.",
@@ -153,6 +172,7 @@
},
{
"BriefDescription": "Direct and indirect near call instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
"PEBS": "1",
@@ -161,6 +181,7 @@
},
{
"BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3).",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
"PEBS": "1",
@@ -169,6 +190,7 @@
},
{
"BriefDescription": "Return instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
"PEBS": "1",
@@ -177,6 +199,7 @@
},
{
"BriefDescription": "Taken branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
"PEBS": "1",
@@ -185,6 +208,7 @@
},
{
"BriefDescription": "Not taken branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NOT_TAKEN",
"PublicDescription": "Counts the number of not taken branch instructions retired.",
@@ -193,6 +217,7 @@
},
{
"BriefDescription": "Speculative and retired mispredicted macro conditional branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.ALL_BRANCHES",
"PublicDescription": "Counts all near executed branches (not necessarily retired).",
@@ -201,6 +226,7 @@
},
{
"BriefDescription": "Speculative and retired mispredicted macro conditional branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
"PublicDescription": "Speculative and retired mispredicted macro conditional branches.",
@@ -209,6 +235,7 @@
},
{
"BriefDescription": "Mispredicted indirect branches excluding calls and returns",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
"PublicDescription": "Mispredicted indirect branches excluding calls and returns.",
@@ -217,6 +244,7 @@
},
{
"BriefDescription": "Speculative mispredicted indirect branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.INDIRECT",
"PublicDescription": "Counts speculatively miss-predicted indirect branches at execution time. Counts for indirect near CALL or JMP instructions (RET excluded).",
@@ -225,6 +253,7 @@
},
{
"BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
"PublicDescription": "Not taken speculative and retired mispredicted macro conditional branches.",
@@ -233,6 +262,7 @@
},
{
"BriefDescription": "Taken speculative and retired mispredicted macro conditional branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
"PublicDescription": "Taken speculative and retired mispredicted macro conditional branches.",
@@ -241,6 +271,7 @@
},
{
"BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
"PublicDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns.",
@@ -249,6 +280,7 @@
},
{
"BriefDescription": "Taken speculative and retired mispredicted indirect calls",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
"PublicDescription": "Taken speculative and retired mispredicted indirect calls.",
@@ -257,6 +289,7 @@
},
{
"BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
"PublicDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic.",
@@ -265,6 +298,7 @@
},
{
"BriefDescription": "All mispredicted macro branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"PublicDescription": "Mispredicted branch instructions at retirement.",
@@ -272,6 +306,7 @@
},
{
"BriefDescription": "Mispredicted macro branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
"PEBS": "2",
@@ -280,6 +315,7 @@
},
{
"BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.CONDITIONAL",
"PEBS": "1",
@@ -288,6 +324,7 @@
},
{
"BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
"PEBS": "1",
@@ -296,6 +333,7 @@
},
{
"BriefDescription": "Count XClk pulses when this thread is unhalted and the other is halted.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "2000003",
@@ -303,6 +341,7 @@
},
{
"BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
"PublicDescription": "Increments at the frequency of XCLK (100 MHz) when not halted.",
@@ -312,6 +351,7 @@
{
"AnyThread": "1",
"BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted. (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
"SampleAfterValue": "2000003",
@@ -319,6 +359,7 @@
},
{
"BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "2000003",
@@ -326,12 +367,14 @@
},
{
"BriefDescription": "Reference cycles when the core is not in halt state.",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"SampleAfterValue": "2000003",
"UMask": "0x3"
},
{
"BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.REF_XCLK",
"PublicDescription": "Reference cycles when the thread is unhalted. (counts at 100 MHz rate)",
@@ -341,6 +384,7 @@
{
"AnyThread": "1",
"BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted. (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
"SampleAfterValue": "2000003",
@@ -348,6 +392,7 @@
},
{
"BriefDescription": "Core cycles when the thread is not in halt state.",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"SampleAfterValue": "2000003",
"UMask": "0x2"
@@ -355,6 +400,7 @@
{
"AnyThread": "1",
"BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
"PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
"SampleAfterValue": "2000003",
@@ -362,6 +408,7 @@
},
{
"BriefDescription": "Thread cycles when thread is not in halt state",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
"PublicDescription": "Counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
@@ -370,6 +417,7 @@
{
"AnyThread": "1",
"BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
"PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
@@ -377,6 +425,7 @@
},
{
"BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
"CounterMask": "8",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
@@ -385,6 +434,7 @@
},
{
"BriefDescription": "Cycles with pending L1 cache miss loads.",
+ "Counter": "2",
"CounterMask": "8",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
@@ -394,6 +444,7 @@
},
{
"BriefDescription": "Cycles while L2 cache miss load* is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
@@ -402,6 +453,7 @@
},
{
"BriefDescription": "Cycles with pending L2 cache miss loads.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
@@ -411,6 +463,7 @@
},
{
"BriefDescription": "Cycles with pending memory loads.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_LDM_PENDING",
@@ -420,6 +473,7 @@
},
{
"BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
@@ -428,6 +482,7 @@
},
{
"BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
@@ -437,6 +492,7 @@
},
{
"BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
"CounterMask": "12",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
@@ -445,6 +501,7 @@
},
{
"BriefDescription": "Execution stalls due to L1 data cache misses",
+ "Counter": "2",
"CounterMask": "12",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
@@ -454,6 +511,7 @@
},
{
"BriefDescription": "Execution stalls while L2 cache miss load* is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
@@ -462,6 +520,7 @@
},
{
"BriefDescription": "Execution stalls due to L2 cache misses.",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
@@ -471,6 +530,7 @@
},
{
"BriefDescription": "Execution stalls due to memory subsystem.",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_LDM_PENDING",
@@ -479,6 +539,7 @@
},
{
"BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
@@ -487,6 +548,7 @@
},
{
"BriefDescription": "Total execution stalls.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
@@ -495,6 +557,7 @@
},
{
"BriefDescription": "Stall cycles because IQ is full",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.IQ_FULL",
"PublicDescription": "Stall cycles due to IQ is full.",
@@ -503,6 +566,7 @@
},
{
"BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.LCP",
"SampleAfterValue": "2000003",
@@ -510,12 +574,14 @@
},
{
"BriefDescription": "Instructions retired from execution.",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
{
"BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.ANY_P",
"PublicDescription": "Number of instructions at retirement.",
@@ -523,6 +589,7 @@
},
{
"BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
+ "Counter": "1",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.PREC_DIST",
"PEBS": "2",
@@ -532,6 +599,7 @@
},
{
"BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc.)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x0D",
"EventName": "INT_MISC.RECOVERY_CYCLES",
@@ -541,6 +609,7 @@
{
"AnyThread": "1",
"BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x0D",
"EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
@@ -549,6 +618,7 @@
},
{
"BriefDescription": "Number of occurrences waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc.)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x0D",
@@ -558,6 +628,7 @@
},
{
"BriefDescription": "This event counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.NO_SR",
"PublicDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
@@ -566,6 +637,7 @@
},
{
"BriefDescription": "Cases when loads get true Block-on-Store blocking code preventing store forwarding",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.STORE_FORWARD",
"PublicDescription": "Loads blocked by overlapping with store buffer that cannot be forwarded.",
@@ -574,6 +646,7 @@
},
{
"BriefDescription": "False dependencies in MOB due to partial compare on address",
+ "Counter": "0,1,2,3",
"EventCode": "0x07",
"EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
"PublicDescription": "False dependencies in MOB due to partial compare on address.",
@@ -582,6 +655,7 @@
},
{
"BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch",
+ "Counter": "0,1,2,3",
"EventCode": "0x4C",
"EventName": "LOAD_HIT_PRE.HW_PF",
"PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for H/W prefetch.",
@@ -590,6 +664,7 @@
},
{
"BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch",
+ "Counter": "0,1,2,3",
"EventCode": "0x4C",
"EventName": "LOAD_HIT_PRE.SW_PF",
"PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for S/W prefetch.",
@@ -598,6 +673,7 @@
},
{
"BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xA8",
"EventName": "LSD.CYCLES_4_UOPS",
@@ -607,6 +683,7 @@
},
{
"BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA8",
"EventName": "LSD.CYCLES_ACTIVE",
@@ -616,6 +693,7 @@
},
{
"BriefDescription": "Number of Uops delivered by the LSD.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA8",
"EventName": "LSD.UOPS",
"SampleAfterValue": "2000003",
@@ -623,6 +701,7 @@
},
{
"BriefDescription": "Number of machine clears (nukes) of any type.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xC3",
@@ -632,6 +711,7 @@
},
{
"BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.MASKMOV",
"PublicDescription": "Counts the number of executed AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
@@ -640,6 +720,7 @@
},
{
"BriefDescription": "Self-modifying code (SMC) detected.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.SMC",
"PublicDescription": "Number of self-modifying-code machine clears detected.",
@@ -648,6 +729,7 @@
},
{
"BriefDescription": "Number of integer Move Elimination candidate uops that were eliminated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "MOVE_ELIMINATION.INT_ELIMINATED",
"SampleAfterValue": "1000003",
@@ -655,6 +737,7 @@
},
{
"BriefDescription": "Number of integer Move Elimination candidate uops that were not eliminated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "MOVE_ELIMINATION.INT_NOT_ELIMINATED",
"SampleAfterValue": "1000003",
@@ -662,6 +745,7 @@
},
{
"BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "OTHER_ASSISTS.ANY_WB_ASSIST",
"SampleAfterValue": "100003",
@@ -669,6 +753,7 @@
},
{
"BriefDescription": "Resource-related stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.ANY",
"PublicDescription": "Cycles Allocation is stalled due to Resource Related reason.",
@@ -677,6 +762,7 @@
},
{
"BriefDescription": "Cycles stalled due to re-order buffer full.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.ROB",
"SampleAfterValue": "2000003",
@@ -684,6 +770,7 @@
},
{
"BriefDescription": "Cycles stalled due to no eligible RS entry available.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.RS",
"SampleAfterValue": "2000003",
@@ -691,6 +778,7 @@
},
{
"BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.SB",
"PublicDescription": "Cycles stalled due to no store buffers available (not including draining form sync).",
@@ -699,6 +787,7 @@
},
{
"BriefDescription": "Count cases of saving new LBR",
+ "Counter": "0,1,2,3",
"EventCode": "0xCC",
"EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
"PublicDescription": "Count cases of saving new LBR records by hardware.",
@@ -707,6 +796,7 @@
},
{
"BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "Counter": "0,1,2,3",
"EventCode": "0x5E",
"EventName": "RS_EVENTS.EMPTY_CYCLES",
"PublicDescription": "Cycles the RS is empty for the thread.",
@@ -715,6 +805,7 @@
},
{
"BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x5E",
@@ -725,6 +816,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are dispatched to port 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_0",
"PublicDescription": "Cycles which a Uop is dispatched on port 0.",
@@ -734,6 +826,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are dispatched to port 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_0_CORE",
"PublicDescription": "Cycles per core when uops are dispatched to port 0.",
@@ -742,6 +835,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are dispatched to port 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_1",
"PublicDescription": "Cycles which a Uop is dispatched on port 1.",
@@ -751,6 +845,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are dispatched to port 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_1_CORE",
"PublicDescription": "Cycles per core when uops are dispatched to port 1.",
@@ -759,6 +854,7 @@
},
{
"BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_2",
"PublicDescription": "Cycles which a Uop is dispatched on port 2.",
@@ -768,6 +864,7 @@
{
"AnyThread": "1",
"BriefDescription": "Uops dispatched to port 2, loads and stores per core (speculative and retired).",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_2_CORE",
"SampleAfterValue": "2000003",
@@ -775,6 +872,7 @@
},
{
"BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_3",
"PublicDescription": "Cycles which a Uop is dispatched on port 3.",
@@ -784,6 +882,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when load or STA uops are dispatched to port 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_3_CORE",
"PublicDescription": "Cycles per core when load or STA uops are dispatched to port 3.",
@@ -792,6 +891,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are dispatched to port 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_4",
"PublicDescription": "Cycles which a Uop is dispatched on port 4.",
@@ -801,6 +901,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are dispatched to port 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_4_CORE",
"PublicDescription": "Cycles per core when uops are dispatched to port 4.",
@@ -809,6 +910,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are dispatched to port 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_5",
"PublicDescription": "Cycles which a Uop is dispatched on port 5.",
@@ -818,6 +920,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are dispatched to port 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_5_CORE",
"PublicDescription": "Cycles per core when uops are dispatched to port 5.",
@@ -826,6 +929,7 @@
},
{
"BriefDescription": "Number of uops executed on the core.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE",
"PublicDescription": "Counts total number of uops to be executed per-core each cycle.",
@@ -834,6 +938,7 @@
},
{
"BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
@@ -843,6 +948,7 @@
},
{
"BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
@@ -852,6 +958,7 @@
},
{
"BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core",
+ "Counter": "0,1,2,3",
"CounterMask": "3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
@@ -861,6 +968,7 @@
},
{
"BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
@@ -870,6 +978,7 @@
},
{
"BriefDescription": "Cycles with no micro-ops executed from any thread on physical core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
"Invert": "1",
@@ -879,6 +988,7 @@
},
{
"BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
@@ -888,6 +998,7 @@
},
{
"BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
@@ -897,6 +1008,7 @@
},
{
"BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "Counter": "0,1,2,3",
"CounterMask": "3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
@@ -906,6 +1018,7 @@
},
{
"BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
@@ -915,6 +1028,7 @@
},
{
"BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.STALL_CYCLES",
@@ -924,6 +1038,7 @@
},
{
"BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.THREAD",
"PublicDescription": "Counts total number of uops to be executed per-thread each cycle. Set Cmask = 1, INV =1 to count stall cycles.",
@@ -932,6 +1047,7 @@
},
{
"BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.ANY",
"PublicDescription": "Increments each cycle the # of Uops issued by the RAT to RS. Set Cmask = 1, Inv = 1, Any= 1to count stalled cycles of this core.",
@@ -941,6 +1057,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
@@ -951,6 +1068,7 @@
},
{
"BriefDescription": "Number of flags-merge uops being allocated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.FLAGS_MERGE",
"PublicDescription": "Number of flags-merge uops allocated. Such uops adds delay.",
@@ -959,6 +1077,7 @@
},
{
"BriefDescription": "Number of Multiply packed/scalar single precision uops allocated",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.SINGLE_MUL",
"PublicDescription": "Number of multiply packed/scalar single precision uops allocated.",
@@ -967,6 +1086,7 @@
},
{
"BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.SLOW_LEA",
"PublicDescription": "Number of slow LEA or similar uops allocated. Such uop has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
@@ -975,6 +1095,7 @@
},
{
"BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.STALL_CYCLES",
@@ -985,6 +1106,7 @@
},
{
"BriefDescription": "Retired uops.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.ALL",
"PEBS": "1",
@@ -994,6 +1116,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles without actually retired uops.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.CORE_STALL_CYCLES",
@@ -1003,6 +1126,7 @@
},
{
"BriefDescription": "Retirement slots used.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.RETIRE_SLOTS",
"PEBS": "1",
@@ -1011,6 +1135,7 @@
},
{
"BriefDescription": "Cycles without actually retired uops.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.STALL_CYCLES",
@@ -1020,6 +1145,7 @@
},
{
"BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "Counter": "0,1,2,3",
"CounterMask": "10",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.TOTAL_CYCLES",
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/uncore-cache.json b/tools/perf/pmu-events/arch/x86/ivytown/uncore-cache.json
index 8bf2706eb6d5..64442287ab66 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/uncore-cache.json
@@ -1,12 +1,14 @@
[
{
"BriefDescription": "Uncore Clocks",
+ "Counter": "0,1,2,3",
"EventName": "UNC_C_CLOCKTICKS",
"PerPkg": "1",
"Unit": "CBOX"
},
{
"BriefDescription": "Counter 0 Occupancy",
+ "Counter": "1,2,3",
"EventCode": "0x1f",
"EventName": "UNC_C_COUNTER0_OCCUPANCY",
"PerPkg": "1",
@@ -15,6 +17,7 @@
},
{
"BriefDescription": "Cache Lookups; Any Request",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.ANY",
"PerPkg": "1",
@@ -24,6 +27,7 @@
},
{
"BriefDescription": "Cache Lookups; Data Read Request",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.DATA_READ",
"PerPkg": "1",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Cache Lookups; Lookups that Match NID",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.NID",
"PerPkg": "1",
@@ -42,6 +47,7 @@
},
{
"BriefDescription": "Cache Lookups; External Snoop Request",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.REMOTE_SNOOP",
"PerPkg": "1",
@@ -51,6 +57,7 @@
},
{
"BriefDescription": "Cache Lookups; Write Requests",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.WRITE",
"PerPkg": "1",
@@ -60,6 +67,7 @@
},
{
"BriefDescription": "Lines Victimized; Lines in E state",
+ "Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.E_STATE",
"PerPkg": "1",
@@ -69,6 +77,7 @@
},
{
"BriefDescription": "Lines Victimized",
+ "Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.MISS",
"PerPkg": "1",
@@ -78,6 +87,7 @@
},
{
"BriefDescription": "Lines Victimized; Lines in M state",
+ "Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.M_STATE",
"PerPkg": "1",
@@ -87,6 +97,7 @@
},
{
"BriefDescription": "Lines Victimized; Victimized Lines that Match NID",
+ "Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.NID",
"PerPkg": "1",
@@ -96,6 +107,7 @@
},
{
"BriefDescription": "Lines Victimized; Lines in S State",
+ "Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.S_STATE",
"PerPkg": "1",
@@ -105,6 +117,7 @@
},
{
"BriefDescription": "Cbo Misc; RFO HitS",
+ "Counter": "0,1",
"EventCode": "0x39",
"EventName": "UNC_C_MISC.RFO_HIT_S",
"PerPkg": "1",
@@ -114,6 +127,7 @@
},
{
"BriefDescription": "Cbo Misc; Silent Snoop Eviction",
+ "Counter": "0,1",
"EventCode": "0x39",
"EventName": "UNC_C_MISC.RSPI_WAS_FSE",
"PerPkg": "1",
@@ -123,6 +137,7 @@
},
{
"BriefDescription": "Cbo Misc",
+ "Counter": "0,1",
"EventCode": "0x39",
"EventName": "UNC_C_MISC.STARTED",
"PerPkg": "1",
@@ -132,6 +147,7 @@
},
{
"BriefDescription": "Cbo Misc; Write Combining Aliasing",
+ "Counter": "0,1",
"EventCode": "0x39",
"EventName": "UNC_C_MISC.WC_ALIASING",
"PerPkg": "1",
@@ -141,6 +157,7 @@
},
{
"BriefDescription": "LRU Queue; LRU Age 0",
+ "Counter": "0,1",
"EventCode": "0x3c",
"EventName": "UNC_C_QLRU.AGE0",
"PerPkg": "1",
@@ -150,6 +167,7 @@
},
{
"BriefDescription": "LRU Queue; LRU Age 1",
+ "Counter": "0,1",
"EventCode": "0x3c",
"EventName": "UNC_C_QLRU.AGE1",
"PerPkg": "1",
@@ -159,6 +177,7 @@
},
{
"BriefDescription": "LRU Queue; LRU Age 2",
+ "Counter": "0,1",
"EventCode": "0x3c",
"EventName": "UNC_C_QLRU.AGE2",
"PerPkg": "1",
@@ -168,6 +187,7 @@
},
{
"BriefDescription": "LRU Queue; LRU Age 3",
+ "Counter": "0,1",
"EventCode": "0x3c",
"EventName": "UNC_C_QLRU.AGE3",
"PerPkg": "1",
@@ -177,6 +197,7 @@
},
{
"BriefDescription": "LRU Queue; LRU Bits Decremented",
+ "Counter": "0,1",
"EventCode": "0x3c",
"EventName": "UNC_C_QLRU.LRU_DECREMENT",
"PerPkg": "1",
@@ -186,6 +207,7 @@
},
{
"BriefDescription": "LRU Queue; Non-0 Aged Victim",
+ "Counter": "0,1",
"EventCode": "0x3c",
"EventName": "UNC_C_QLRU.VICTIM_NON_ZERO",
"PerPkg": "1",
@@ -195,6 +217,7 @@
},
{
"BriefDescription": "AD Ring In Use; Counterclockwise",
+ "Counter": "2,3",
"EventCode": "0x1B",
"EventName": "UNC_C_RING_AD_USED.CCW",
"PerPkg": "1",
@@ -204,6 +227,7 @@
},
{
"BriefDescription": "AD Ring In Use; Clockwise",
+ "Counter": "2,3",
"EventCode": "0x1B",
"EventName": "UNC_C_RING_AD_USED.CW",
"PerPkg": "1",
@@ -213,6 +237,7 @@
},
{
"BriefDescription": "AD Ring In Use; Down",
+ "Counter": "2,3",
"EventCode": "0x1B",
"EventName": "UNC_C_RING_AD_USED.DOWN",
"PerPkg": "1",
@@ -222,6 +247,7 @@
},
{
"BriefDescription": "AD Ring In Use; Down and Even on Vring 0",
+ "Counter": "2,3",
"EventCode": "0x1b",
"EventName": "UNC_C_RING_AD_USED.DOWN_VR0_EVEN",
"PerPkg": "1",
@@ -231,6 +257,7 @@
},
{
"BriefDescription": "AD Ring In Use; Down and Odd on Vring 0",
+ "Counter": "2,3",
"EventCode": "0x1b",
"EventName": "UNC_C_RING_AD_USED.DOWN_VR0_ODD",
"PerPkg": "1",
@@ -240,6 +267,7 @@
},
{
"BriefDescription": "AD Ring In Use; Down and Even on VRing 1",
+ "Counter": "2,3",
"EventCode": "0x1b",
"EventName": "UNC_C_RING_AD_USED.DOWN_VR1_EVEN",
"PerPkg": "1",
@@ -249,6 +277,7 @@
},
{
"BriefDescription": "AD Ring In Use; Down and Odd on VRing 1",
+ "Counter": "2,3",
"EventCode": "0x1b",
"EventName": "UNC_C_RING_AD_USED.DOWN_VR1_ODD",
"PerPkg": "1",
@@ -258,6 +287,7 @@
},
{
"BriefDescription": "AD Ring In Use; Up",
+ "Counter": "2,3",
"EventCode": "0x1B",
"EventName": "UNC_C_RING_AD_USED.UP",
"PerPkg": "1",
@@ -267,6 +297,7 @@
},
{
"BriefDescription": "AD Ring In Use; Up and Even on Vring 0",
+ "Counter": "2,3",
"EventCode": "0x1b",
"EventName": "UNC_C_RING_AD_USED.UP_VR0_EVEN",
"PerPkg": "1",
@@ -276,6 +307,7 @@
},
{
"BriefDescription": "AD Ring In Use; Up and Odd on Vring 0",
+ "Counter": "2,3",
"EventCode": "0x1b",
"EventName": "UNC_C_RING_AD_USED.UP_VR0_ODD",
"PerPkg": "1",
@@ -285,6 +317,7 @@
},
{
"BriefDescription": "AD Ring In Use; Up and Even on VRing 1",
+ "Counter": "2,3",
"EventCode": "0x1b",
"EventName": "UNC_C_RING_AD_USED.UP_VR1_EVEN",
"PerPkg": "1",
@@ -294,6 +327,7 @@
},
{
"BriefDescription": "AD Ring In Use; Up and Odd on VRing 1",
+ "Counter": "2,3",
"EventCode": "0x1b",
"EventName": "UNC_C_RING_AD_USED.UP_VR1_ODD",
"PerPkg": "1",
@@ -303,6 +337,7 @@
},
{
"BriefDescription": "AK Ring In Use; Counterclockwise",
+ "Counter": "2,3",
"EventCode": "0x1C",
"EventName": "UNC_C_RING_AK_USED.CCW",
"PerPkg": "1",
@@ -312,6 +347,7 @@
},
{
"BriefDescription": "AK Ring In Use; Clockwise",
+ "Counter": "2,3",
"EventCode": "0x1C",
"EventName": "UNC_C_RING_AK_USED.CW",
"PerPkg": "1",
@@ -321,6 +357,7 @@
},
{
"BriefDescription": "AK Ring In Use; Down",
+ "Counter": "2,3",
"EventCode": "0x1C",
"EventName": "UNC_C_RING_AK_USED.DOWN",
"PerPkg": "1",
@@ -330,6 +367,7 @@
},
{
"BriefDescription": "AK Ring In Use; Down and Even on Vring 0",
+ "Counter": "2,3",
"EventCode": "0x1c",
"EventName": "UNC_C_RING_AK_USED.DOWN_VR0_EVEN",
"PerPkg": "1",
@@ -339,6 +377,7 @@
},
{
"BriefDescription": "AK Ring In Use; Down and Odd on Vring 0",
+ "Counter": "2,3",
"EventCode": "0x1c",
"EventName": "UNC_C_RING_AK_USED.DOWN_VR0_ODD",
"PerPkg": "1",
@@ -348,6 +387,7 @@
},
{
"BriefDescription": "AK Ring In Use; Down and Even on VRing 1",
+ "Counter": "2,3",
"EventCode": "0x1c",
"EventName": "UNC_C_RING_AK_USED.DOWN_VR1_EVEN",
"PerPkg": "1",
@@ -357,6 +397,7 @@
},
{
"BriefDescription": "AK Ring In Use; Down and Odd on VRing 1",
+ "Counter": "2,3",
"EventCode": "0x1c",
"EventName": "UNC_C_RING_AK_USED.DOWN_VR1_ODD",
"PerPkg": "1",
@@ -366,6 +407,7 @@
},
{
"BriefDescription": "AK Ring In Use; Up",
+ "Counter": "2,3",
"EventCode": "0x1C",
"EventName": "UNC_C_RING_AK_USED.UP",
"PerPkg": "1",
@@ -375,6 +417,7 @@
},
{
"BriefDescription": "AK Ring In Use; Up and Even on Vring 0",
+ "Counter": "2,3",
"EventCode": "0x1c",
"EventName": "UNC_C_RING_AK_USED.UP_VR0_EVEN",
"PerPkg": "1",
@@ -384,6 +427,7 @@
},
{
"BriefDescription": "AK Ring In Use; Up and Odd on Vring 0",
+ "Counter": "2,3",
"EventCode": "0x1c",
"EventName": "UNC_C_RING_AK_USED.UP_VR0_ODD",
"PerPkg": "1",
@@ -393,6 +437,7 @@
},
{
"BriefDescription": "AK Ring In Use; Up and Even on VRing 1",
+ "Counter": "2,3",
"EventCode": "0x1c",
"EventName": "UNC_C_RING_AK_USED.UP_VR1_EVEN",
"PerPkg": "1",
@@ -402,6 +447,7 @@
},
{
"BriefDescription": "AK Ring In Use; Up and Odd on VRing 1",
+ "Counter": "2,3",
"EventCode": "0x1c",
"EventName": "UNC_C_RING_AK_USED.UP_VR1_ODD",
"PerPkg": "1",
@@ -411,6 +457,7 @@
},
{
"BriefDescription": "BL Ring in Use; Counterclockwise",
+ "Counter": "2,3",
"EventCode": "0x1D",
"EventName": "UNC_C_RING_BL_USED.CCW",
"PerPkg": "1",
@@ -420,6 +467,7 @@
},
{
"BriefDescription": "BL Ring in Use; Clockwise",
+ "Counter": "2,3",
"EventCode": "0x1D",
"EventName": "UNC_C_RING_BL_USED.CW",
"PerPkg": "1",
@@ -429,6 +477,7 @@
},
{
"BriefDescription": "BL Ring in Use; Down",
+ "Counter": "2,3",
"EventCode": "0x1D",
"EventName": "UNC_C_RING_BL_USED.DOWN",
"PerPkg": "1",
@@ -438,6 +487,7 @@
},
{
"BriefDescription": "BL Ring in Use; Down and Even on Vring 0",
+ "Counter": "2,3",
"EventCode": "0x1d",
"EventName": "UNC_C_RING_BL_USED.DOWN_VR0_EVEN",
"PerPkg": "1",
@@ -447,6 +497,7 @@
},
{
"BriefDescription": "BL Ring in Use; Down and Odd on Vring 0",
+ "Counter": "2,3",
"EventCode": "0x1d",
"EventName": "UNC_C_RING_BL_USED.DOWN_VR0_ODD",
"PerPkg": "1",
@@ -456,6 +507,7 @@
},
{
"BriefDescription": "BL Ring in Use; Down and Even on VRing 1",
+ "Counter": "2,3",
"EventCode": "0x1d",
"EventName": "UNC_C_RING_BL_USED.DOWN_VR1_EVEN",
"PerPkg": "1",
@@ -465,6 +517,7 @@
},
{
"BriefDescription": "BL Ring in Use; Down and Odd on VRing 1",
+ "Counter": "2,3",
"EventCode": "0x1d",
"EventName": "UNC_C_RING_BL_USED.DOWN_VR1_ODD",
"PerPkg": "1",
@@ -474,6 +527,7 @@
},
{
"BriefDescription": "BL Ring in Use; Up",
+ "Counter": "2,3",
"EventCode": "0x1D",
"EventName": "UNC_C_RING_BL_USED.UP",
"PerPkg": "1",
@@ -483,6 +537,7 @@
},
{
"BriefDescription": "BL Ring in Use; Up and Even on Vring 0",
+ "Counter": "2,3",
"EventCode": "0x1d",
"EventName": "UNC_C_RING_BL_USED.UP_VR0_EVEN",
"PerPkg": "1",
@@ -492,6 +547,7 @@
},
{
"BriefDescription": "BL Ring in Use; Up and Odd on Vring 0",
+ "Counter": "2,3",
"EventCode": "0x1d",
"EventName": "UNC_C_RING_BL_USED.UP_VR0_ODD",
"PerPkg": "1",
@@ -501,6 +557,7 @@
},
{
"BriefDescription": "BL Ring in Use; Up and Even on VRing 1",
+ "Counter": "2,3",
"EventCode": "0x1d",
"EventName": "UNC_C_RING_BL_USED.UP_VR1_EVEN",
"PerPkg": "1",
@@ -510,6 +567,7 @@
},
{
"BriefDescription": "BL Ring in Use; Up and Odd on VRing 1",
+ "Counter": "2,3",
"EventCode": "0x1d",
"EventName": "UNC_C_RING_BL_USED.UP_VR1_ODD",
"PerPkg": "1",
@@ -519,6 +577,7 @@
},
{
"BriefDescription": "Number of LLC responses that bounced on the Ring.",
+ "Counter": "0,1",
"EventCode": "0x5",
"EventName": "UNC_C_RING_BOUNCES.AD_IRQ",
"PerPkg": "1",
@@ -527,6 +586,7 @@
},
{
"BriefDescription": "Number of LLC responses that bounced on the Ring.; Acknowledgements to core",
+ "Counter": "0,1",
"EventCode": "0x5",
"EventName": "UNC_C_RING_BOUNCES.AK",
"PerPkg": "1",
@@ -535,6 +595,7 @@
},
{
"BriefDescription": "Number of LLC responses that bounced on the Ring.: Acknowledgements to core",
+ "Counter": "0,1",
"EventCode": "0x5",
"EventName": "UNC_C_RING_BOUNCES.AK_CORE",
"PerPkg": "1",
@@ -543,6 +604,7 @@
},
{
"BriefDescription": "Number of LLC responses that bounced on the Ring.; Data Responses to core",
+ "Counter": "0,1",
"EventCode": "0x5",
"EventName": "UNC_C_RING_BOUNCES.BL",
"PerPkg": "1",
@@ -551,6 +613,7 @@
},
{
"BriefDescription": "Number of LLC responses that bounced on the Ring.: Data Responses to core",
+ "Counter": "0,1",
"EventCode": "0x5",
"EventName": "UNC_C_RING_BOUNCES.BL_CORE",
"PerPkg": "1",
@@ -559,6 +622,7 @@
},
{
"BriefDescription": "Number of LLC responses that bounced on the Ring.; Snoops of processor's cache.",
+ "Counter": "0,1",
"EventCode": "0x5",
"EventName": "UNC_C_RING_BOUNCES.IV",
"PerPkg": "1",
@@ -567,6 +631,7 @@
},
{
"BriefDescription": "Number of LLC responses that bounced on the Ring.: Snoops of processor's cache.",
+ "Counter": "0,1",
"EventCode": "0x5",
"EventName": "UNC_C_RING_BOUNCES.IV_CORE",
"PerPkg": "1",
@@ -575,6 +640,7 @@
},
{
"BriefDescription": "IV Ring in Use; Any",
+ "Counter": "2,3",
"EventCode": "0x1e",
"EventName": "UNC_C_RING_IV_USED.ANY",
"PerPkg": "1",
@@ -584,6 +650,7 @@
},
{
"BriefDescription": "IV Ring in Use; Down",
+ "Counter": "2,3",
"EventCode": "0x1e",
"EventName": "UNC_C_RING_IV_USED.DOWN",
"PerPkg": "1",
@@ -593,6 +660,7 @@
},
{
"BriefDescription": "IV Ring in Use; Up",
+ "Counter": "2,3",
"EventCode": "0x1e",
"EventName": "UNC_C_RING_IV_USED.UP",
"PerPkg": "1",
@@ -601,6 +669,7 @@
"Unit": "CBOX"
},
{
+ "Counter": "0,1",
"EventCode": "0x6",
"EventName": "UNC_C_RING_SINK_STARVED.AD_IPQ",
"PerPkg": "1",
@@ -608,6 +677,7 @@
"Unit": "CBOX"
},
{
+ "Counter": "0,1",
"EventCode": "0x6",
"EventName": "UNC_C_RING_SINK_STARVED.AD_IRQ",
"PerPkg": "1",
@@ -615,6 +685,7 @@
"Unit": "CBOX"
},
{
+ "Counter": "0,1",
"EventCode": "0x6",
"EventName": "UNC_C_RING_SINK_STARVED.IV",
"PerPkg": "1",
@@ -622,6 +693,7 @@
"Unit": "CBOX"
},
{
+ "Counter": "0,1",
"EventCode": "0x7",
"EventName": "UNC_C_RING_SRC_THRTL",
"PerPkg": "1",
@@ -629,6 +701,7 @@
},
{
"BriefDescription": "Ingress Arbiter Blocking Cycles; IRQ",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_C_RxR_EXT_STARVED.IPQ",
"PerPkg": "1",
@@ -638,6 +711,7 @@
},
{
"BriefDescription": "Ingress Arbiter Blocking Cycles; IPQ",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_C_RxR_EXT_STARVED.IRQ",
"PerPkg": "1",
@@ -647,6 +721,7 @@
},
{
"BriefDescription": "Ingress Arbiter Blocking Cycles; ISMQ_BID",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_C_RxR_EXT_STARVED.ISMQ_BIDS",
"PerPkg": "1",
@@ -656,6 +731,7 @@
},
{
"BriefDescription": "Ingress Arbiter Blocking Cycles",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_C_RxR_EXT_STARVED.PRQ",
"PerPkg": "1",
@@ -665,6 +741,7 @@
},
{
"BriefDescription": "Ingress Allocations; IPQ",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_C_RxR_INSERTS.IPQ",
"PerPkg": "1",
@@ -674,6 +751,7 @@
},
{
"BriefDescription": "Ingress Allocations; IRQ",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_C_RxR_INSERTS.IRQ",
"PerPkg": "1",
@@ -683,6 +761,7 @@
},
{
"BriefDescription": "Ingress Allocations; IRQ Rejected",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_C_RxR_INSERTS.IRQ_REJ",
"PerPkg": "1",
@@ -692,6 +771,7 @@
},
{
"BriefDescription": "Ingress Allocations: IRQ Rejected",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_C_RxR_INSERTS.IRQ_REJECTED",
"PerPkg": "1",
@@ -701,6 +781,7 @@
},
{
"BriefDescription": "Ingress Allocations; VFIFO",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_C_RxR_INSERTS.VFIFO",
"PerPkg": "1",
@@ -710,6 +791,7 @@
},
{
"BriefDescription": "Ingress Internal Starvation Cycles; IPQ",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_C_RxR_INT_STARVED.IPQ",
"PerPkg": "1",
@@ -719,6 +801,7 @@
},
{
"BriefDescription": "Ingress Internal Starvation Cycles; IRQ",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_C_RxR_INT_STARVED.IRQ",
"PerPkg": "1",
@@ -728,6 +811,7 @@
},
{
"BriefDescription": "Ingress Internal Starvation Cycles; ISMQ",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_C_RxR_INT_STARVED.ISMQ",
"PerPkg": "1",
@@ -737,6 +821,7 @@
},
{
"BriefDescription": "Probe Queue Retries; Address Conflict",
+ "Counter": "0,1",
"EventCode": "0x31",
"EventName": "UNC_C_RxR_IPQ_RETRY.ADDR_CONFLICT",
"PerPkg": "1",
@@ -746,6 +831,7 @@
},
{
"BriefDescription": "Probe Queue Retries; Any Reject",
+ "Counter": "0,1",
"EventCode": "0x31",
"EventName": "UNC_C_RxR_IPQ_RETRY.ANY",
"PerPkg": "1",
@@ -755,6 +841,7 @@
},
{
"BriefDescription": "Probe Queue Retries; No Egress Credits",
+ "Counter": "0,1",
"EventCode": "0x31",
"EventName": "UNC_C_RxR_IPQ_RETRY.FULL",
"PerPkg": "1",
@@ -764,6 +851,7 @@
},
{
"BriefDescription": "Probe Queue Retries; No QPI Credits",
+ "Counter": "0,1",
"EventCode": "0x31",
"EventName": "UNC_C_RxR_IPQ_RETRY.QPI_CREDITS",
"PerPkg": "1",
@@ -773,6 +861,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects; Address Conflict",
+ "Counter": "0,1",
"EventCode": "0x32",
"EventName": "UNC_C_RxR_IRQ_RETRY.ADDR_CONFLICT",
"PerPkg": "1",
@@ -782,6 +871,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects; Any Reject",
+ "Counter": "0,1",
"EventCode": "0x32",
"EventName": "UNC_C_RxR_IRQ_RETRY.ANY",
"PerPkg": "1",
@@ -791,6 +881,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects; No Egress Credits",
+ "Counter": "0,1",
"EventCode": "0x32",
"EventName": "UNC_C_RxR_IRQ_RETRY.FULL",
"PerPkg": "1",
@@ -800,6 +891,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects; No IIO Credits",
+ "Counter": "0,1",
"EventCode": "0x32",
"EventName": "UNC_C_RxR_IRQ_RETRY.IIO_CREDITS",
"PerPkg": "1",
@@ -809,6 +901,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects; No QPI Credits",
+ "Counter": "0,1",
"EventCode": "0x32",
"EventName": "UNC_C_RxR_IRQ_RETRY.QPI_CREDITS",
"PerPkg": "1",
@@ -818,6 +911,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects; No RTIDs",
+ "Counter": "0,1",
"EventCode": "0x32",
"EventName": "UNC_C_RxR_IRQ_RETRY.RTID",
"PerPkg": "1",
@@ -827,6 +921,7 @@
},
{
"BriefDescription": "ISMQ Retries; Any Reject",
+ "Counter": "0,1",
"EventCode": "0x33",
"EventName": "UNC_C_RxR_ISMQ_RETRY.ANY",
"PerPkg": "1",
@@ -836,6 +931,7 @@
},
{
"BriefDescription": "ISMQ Retries; No Egress Credits",
+ "Counter": "0,1",
"EventCode": "0x33",
"EventName": "UNC_C_RxR_ISMQ_RETRY.FULL",
"PerPkg": "1",
@@ -845,6 +941,7 @@
},
{
"BriefDescription": "ISMQ Retries; No IIO Credits",
+ "Counter": "0,1",
"EventCode": "0x33",
"EventName": "UNC_C_RxR_ISMQ_RETRY.IIO_CREDITS",
"PerPkg": "1",
@@ -854,6 +951,7 @@
},
{
"BriefDescription": "ISMQ Retries; No QPI Credits",
+ "Counter": "0,1",
"EventCode": "0x33",
"EventName": "UNC_C_RxR_ISMQ_RETRY.QPI_CREDITS",
"PerPkg": "1",
@@ -863,6 +961,7 @@
},
{
"BriefDescription": "ISMQ Retries; No RTIDs",
+ "Counter": "0,1",
"EventCode": "0x33",
"EventName": "UNC_C_RxR_ISMQ_RETRY.RTID",
"PerPkg": "1",
@@ -872,6 +971,7 @@
},
{
"BriefDescription": "ISMQ Retries; No WB Credits",
+ "Counter": "0,1",
"EventCode": "0x33",
"EventName": "UNC_C_RxR_ISMQ_RETRY.WB_CREDITS",
"PerPkg": "1",
@@ -881,6 +981,7 @@
},
{
"BriefDescription": "Ingress Occupancy; IPQ",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_C_RxR_OCCUPANCY.IPQ",
"PerPkg": "1",
@@ -890,6 +991,7 @@
},
{
"BriefDescription": "Ingress Occupancy; IRQ",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_C_RxR_OCCUPANCY.IRQ",
"PerPkg": "1",
@@ -899,6 +1001,7 @@
},
{
"BriefDescription": "Ingress Occupancy; IRQ Rejected",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_C_RxR_OCCUPANCY.IRQ_REJ",
"PerPkg": "1",
@@ -908,6 +1011,7 @@
},
{
"BriefDescription": "IRQ Rejected",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_C_RxR_OCCUPANCY.IRQ_REJECTED",
"PerPkg": "1",
@@ -917,6 +1021,7 @@
},
{
"BriefDescription": "Ingress Occupancy; VFIFO",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_C_RxR_OCCUPANCY.VFIFO",
"PerPkg": "1",
@@ -926,6 +1031,7 @@
},
{
"BriefDescription": "TOR Inserts; All",
+ "Counter": "0,1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.ALL",
"PerPkg": "1",
@@ -935,6 +1041,7 @@
},
{
"BriefDescription": "TOR Inserts; Evictions",
+ "Counter": "0,1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.EVICTION",
"PerPkg": "1",
@@ -944,6 +1051,7 @@
},
{
"BriefDescription": "TOR Inserts; Local Memory",
+ "Counter": "0,1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.LOCAL",
"PerPkg": "1",
@@ -953,6 +1061,7 @@
},
{
"BriefDescription": "TOR Inserts; Local Memory - Opcode Matched",
+ "Counter": "0,1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.LOCAL_OPCODE",
"PerPkg": "1",
@@ -962,6 +1071,7 @@
},
{
"BriefDescription": "TOR Inserts; Misses to Local Memory",
+ "Counter": "0,1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.MISS_LOCAL",
"PerPkg": "1",
@@ -971,6 +1081,7 @@
},
{
"BriefDescription": "TOR Inserts; Misses to Local Memory - Opcode Matched",
+ "Counter": "0,1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.MISS_LOCAL_OPCODE",
"PerPkg": "1",
@@ -980,6 +1091,7 @@
},
{
"BriefDescription": "TOR Inserts; Miss Opcode Match",
+ "Counter": "0,1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.MISS_OPCODE",
"PerPkg": "1",
@@ -989,6 +1101,7 @@
},
{
"BriefDescription": "TOR Inserts; Misses to Remote Memory",
+ "Counter": "0,1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.MISS_REMOTE",
"PerPkg": "1",
@@ -998,6 +1111,7 @@
},
{
"BriefDescription": "TOR Inserts; Misses to Remote Memory - Opcode Matched",
+ "Counter": "0,1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.MISS_REMOTE_OPCODE",
"PerPkg": "1",
@@ -1007,6 +1121,7 @@
},
{
"BriefDescription": "TOR Inserts; NID Matched",
+ "Counter": "0,1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.NID_ALL",
"PerPkg": "1",
@@ -1016,6 +1131,7 @@
},
{
"BriefDescription": "TOR Inserts; NID Matched Evictions",
+ "Counter": "0,1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.NID_EVICTION",
"PerPkg": "1",
@@ -1025,6 +1141,7 @@
},
{
"BriefDescription": "TOR Inserts; NID Matched Miss All",
+ "Counter": "0,1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.NID_MISS_ALL",
"PerPkg": "1",
@@ -1034,6 +1151,7 @@
},
{
"BriefDescription": "TOR Inserts; NID and Opcode Matched Miss",
+ "Counter": "0,1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.NID_MISS_OPCODE",
"PerPkg": "1",
@@ -1043,6 +1161,7 @@
},
{
"BriefDescription": "TOR Inserts; NID and Opcode Matched",
+ "Counter": "0,1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.NID_OPCODE",
"PerPkg": "1",
@@ -1052,6 +1171,7 @@
},
{
"BriefDescription": "TOR Inserts; NID Matched Writebacks",
+ "Counter": "0,1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.NID_WB",
"PerPkg": "1",
@@ -1061,6 +1181,7 @@
},
{
"BriefDescription": "TOR Inserts; Opcode Match",
+ "Counter": "0,1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.OPCODE",
"PerPkg": "1",
@@ -1070,6 +1191,7 @@
},
{
"BriefDescription": "TOR Inserts; Remote Memory",
+ "Counter": "0,1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.REMOTE",
"PerPkg": "1",
@@ -1079,6 +1201,7 @@
},
{
"BriefDescription": "TOR Inserts; Remote Memory - Opcode Matched",
+ "Counter": "0,1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.REMOTE_OPCODE",
"PerPkg": "1",
@@ -1088,6 +1211,7 @@
},
{
"BriefDescription": "TOR Inserts; Writebacks",
+ "Counter": "0,1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.WB",
"PerPkg": "1",
@@ -1097,6 +1221,7 @@
},
{
"BriefDescription": "TOR Occupancy; Any",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.ALL",
"PerPkg": "1",
@@ -1106,6 +1231,7 @@
},
{
"BriefDescription": "TOR Occupancy; Evictions",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.EVICTION",
"PerPkg": "1",
@@ -1115,6 +1241,7 @@
},
{
"BriefDescription": "TOR Occupancy",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.LOCAL",
"PerPkg": "1",
@@ -1124,6 +1251,7 @@
},
{
"BriefDescription": "TOR Occupancy; Local Memory - Opcode Matched",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.LOCAL_OPCODE",
"PerPkg": "1",
@@ -1133,6 +1261,7 @@
},
{
"BriefDescription": "TOR Occupancy; Miss All",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.MISS_ALL",
"PerPkg": "1",
@@ -1142,6 +1271,7 @@
},
{
"BriefDescription": "TOR Occupancy",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.MISS_LOCAL",
"PerPkg": "1",
@@ -1151,6 +1281,7 @@
},
{
"BriefDescription": "TOR Occupancy; Misses to Local Memory - Opcode Matched",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.MISS_LOCAL_OPCODE",
"PerPkg": "1",
@@ -1160,6 +1291,7 @@
},
{
"BriefDescription": "TOR Occupancy; Miss Opcode Match",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.MISS_OPCODE",
"PerPkg": "1",
@@ -1169,6 +1301,7 @@
},
{
"BriefDescription": "TOR Occupancy",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.MISS_REMOTE",
"PerPkg": "1",
@@ -1178,6 +1311,7 @@
},
{
"BriefDescription": "TOR Occupancy; Misses to Remote Memory - Opcode Matched",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.MISS_REMOTE_OPCODE",
"PerPkg": "1",
@@ -1187,6 +1321,7 @@
},
{
"BriefDescription": "TOR Occupancy; NID Matched",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.NID_ALL",
"PerPkg": "1",
@@ -1196,6 +1331,7 @@
},
{
"BriefDescription": "TOR Occupancy; NID Matched Evictions",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.NID_EVICTION",
"PerPkg": "1",
@@ -1205,6 +1341,7 @@
},
{
"BriefDescription": "TOR Occupancy; NID Matched",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.NID_MISS_ALL",
"PerPkg": "1",
@@ -1214,6 +1351,7 @@
},
{
"BriefDescription": "TOR Occupancy; NID and Opcode Matched Miss",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.NID_MISS_OPCODE",
"PerPkg": "1",
@@ -1223,6 +1361,7 @@
},
{
"BriefDescription": "TOR Occupancy; NID and Opcode Matched",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.NID_OPCODE",
"PerPkg": "1",
@@ -1232,6 +1371,7 @@
},
{
"BriefDescription": "TOR Occupancy; NID Matched Writebacks",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.NID_WB",
"PerPkg": "1",
@@ -1241,6 +1381,7 @@
},
{
"BriefDescription": "TOR Occupancy; Opcode Match",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.OPCODE",
"PerPkg": "1",
@@ -1250,6 +1391,7 @@
},
{
"BriefDescription": "TOR Occupancy",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.REMOTE",
"PerPkg": "1",
@@ -1259,6 +1401,7 @@
},
{
"BriefDescription": "TOR Occupancy; Remote Memory - Opcode Matched",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.REMOTE_OPCODE",
"PerPkg": "1",
@@ -1268,6 +1411,7 @@
},
{
"BriefDescription": "TOR Occupancy; Writebacks",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.WB",
"PerPkg": "1",
@@ -1277,6 +1421,7 @@
},
{
"BriefDescription": "Onto AD Ring",
+ "Counter": "0,1",
"EventCode": "0x4",
"EventName": "UNC_C_TxR_ADS_USED.AD",
"PerPkg": "1",
@@ -1285,6 +1430,7 @@
},
{
"BriefDescription": "Onto AK Ring",
+ "Counter": "0,1",
"EventCode": "0x4",
"EventName": "UNC_C_TxR_ADS_USED.AK",
"PerPkg": "1",
@@ -1293,6 +1439,7 @@
},
{
"BriefDescription": "Onto BL Ring",
+ "Counter": "0,1",
"EventCode": "0x4",
"EventName": "UNC_C_TxR_ADS_USED.BL",
"PerPkg": "1",
@@ -1301,6 +1448,7 @@
},
{
"BriefDescription": "Egress Allocations; AD - Cachebo",
+ "Counter": "0,1",
"EventCode": "0x2",
"EventName": "UNC_C_TxR_INSERTS.AD_CACHE",
"PerPkg": "1",
@@ -1310,6 +1458,7 @@
},
{
"BriefDescription": "Egress Allocations; AD - Corebo",
+ "Counter": "0,1",
"EventCode": "0x2",
"EventName": "UNC_C_TxR_INSERTS.AD_CORE",
"PerPkg": "1",
@@ -1319,6 +1468,7 @@
},
{
"BriefDescription": "Egress Allocations; AK - Cachebo",
+ "Counter": "0,1",
"EventCode": "0x2",
"EventName": "UNC_C_TxR_INSERTS.AK_CACHE",
"PerPkg": "1",
@@ -1328,6 +1478,7 @@
},
{
"BriefDescription": "Egress Allocations; AK - Corebo",
+ "Counter": "0,1",
"EventCode": "0x2",
"EventName": "UNC_C_TxR_INSERTS.AK_CORE",
"PerPkg": "1",
@@ -1337,6 +1488,7 @@
},
{
"BriefDescription": "Egress Allocations; BL - Cacheno",
+ "Counter": "0,1",
"EventCode": "0x2",
"EventName": "UNC_C_TxR_INSERTS.BL_CACHE",
"PerPkg": "1",
@@ -1346,6 +1498,7 @@
},
{
"BriefDescription": "Egress Allocations; BL - Corebo",
+ "Counter": "0,1",
"EventCode": "0x2",
"EventName": "UNC_C_TxR_INSERTS.BL_CORE",
"PerPkg": "1",
@@ -1355,6 +1508,7 @@
},
{
"BriefDescription": "Egress Allocations; IV - Cachebo",
+ "Counter": "0,1",
"EventCode": "0x2",
"EventName": "UNC_C_TxR_INSERTS.IV_CACHE",
"PerPkg": "1",
@@ -1364,6 +1518,7 @@
},
{
"BriefDescription": "Injection Starvation; Onto AD Ring (to core)",
+ "Counter": "0,1",
"EventCode": "0x3",
"EventName": "UNC_C_TxR_STARVED.AD_CORE",
"PerPkg": "1",
@@ -1373,6 +1528,7 @@
},
{
"BriefDescription": "Injection Starvation; Onto AK Ring",
+ "Counter": "0,1",
"EventCode": "0x3",
"EventName": "UNC_C_TxR_STARVED.AK_BOTH",
"PerPkg": "1",
@@ -1382,6 +1538,7 @@
},
{
"BriefDescription": "Injection Starvation; Onto IV Ring",
+ "Counter": "0,1",
"EventCode": "0x3",
"EventName": "UNC_C_TxR_STARVED.IV",
"PerPkg": "1",
@@ -1391,6 +1548,7 @@
},
{
"BriefDescription": "BT Bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x52",
"EventName": "UNC_H_BT_BYPASS",
"PerPkg": "1",
@@ -1399,6 +1557,7 @@
},
{
"BriefDescription": "BT Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_H_BT_CYCLES_NE",
"PerPkg": "1",
@@ -1407,6 +1566,7 @@
},
{
"BriefDescription": "BT Cycles Not Empty: Local",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_H_BT_CYCLES_NE.LOCAL",
"PerPkg": "1",
@@ -1416,6 +1576,7 @@
},
{
"BriefDescription": "BT Cycles Not Empty: Remote",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_H_BT_CYCLES_NE.REMOTE",
"PerPkg": "1",
@@ -1425,6 +1586,7 @@
},
{
"BriefDescription": "BT Occupancy; Local",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_H_BT_OCCUPANCY.LOCAL",
"PerPkg": "1",
@@ -1434,6 +1596,7 @@
},
{
"BriefDescription": "BT Occupancy; Reads Local",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_H_BT_OCCUPANCY.READS_LOCAL",
"PerPkg": "1",
@@ -1443,6 +1606,7 @@
},
{
"BriefDescription": "BT Occupancy; Reads Remote",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_H_BT_OCCUPANCY.READS_REMOTE",
"PerPkg": "1",
@@ -1452,6 +1616,7 @@
},
{
"BriefDescription": "BT Occupancy; Remote",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_H_BT_OCCUPANCY.REMOTE",
"PerPkg": "1",
@@ -1461,6 +1626,7 @@
},
{
"BriefDescription": "BT Occupancy; Writes Local",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_H_BT_OCCUPANCY.WRITES_LOCAL",
"PerPkg": "1",
@@ -1470,6 +1636,7 @@
},
{
"BriefDescription": "BT Occupancy; Writes Remote",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_H_BT_OCCUPANCY.WRITES_REMOTE",
"PerPkg": "1",
@@ -1479,6 +1646,7 @@
},
{
"BriefDescription": "BT to HT Not Issued; Incoming Data Hazard",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.INCOMING_BL_HAZARD",
"PerPkg": "1",
@@ -1488,6 +1656,7 @@
},
{
"BriefDescription": "BT to HT Not Issued; Incoming Snoop Hazard",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.INCOMING_SNP_HAZARD",
"PerPkg": "1",
@@ -1497,6 +1666,7 @@
},
{
"BriefDescription": "BT to HT Not Issued; Incoming Data Hazard",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.RSPACKCFLT_HAZARD",
"PerPkg": "1",
@@ -1506,6 +1676,7 @@
},
{
"BriefDescription": "BT to HT Not Issued; Incoming Data Hazard",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.WBMDATA_HAZARD",
"PerPkg": "1",
@@ -1515,6 +1686,7 @@
},
{
"BriefDescription": "HA to iMC Bypass; Not Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_H_BYPASS_IMC.NOT_TAKEN",
"PerPkg": "1",
@@ -1524,6 +1696,7 @@
},
{
"BriefDescription": "HA to iMC Bypass; Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_H_BYPASS_IMC.TAKEN",
"PerPkg": "1",
@@ -1533,6 +1706,7 @@
},
{
"BriefDescription": "uclks",
+ "Counter": "0,1,2,3",
"EventName": "UNC_H_CLOCKTICKS",
"PerPkg": "1",
"PublicDescription": "Counts the number of uclks in the HA. This will be slightly different than the count in the Ubox because of enable/freeze delays. The HA is on the other side of the die from the fixed Ubox uclk counter, so the drift could be somewhat larger than in units that are closer like the QPI Agent.",
@@ -1540,6 +1714,7 @@
},
{
"BriefDescription": "Conflict Checks; Acknowledge Conflicts",
+ "Counter": "0,1,2,3",
"EventCode": "0xb",
"EventName": "UNC_H_CONFLICT_CYCLES.ACKCNFLTS",
"PerPkg": "1",
@@ -1549,6 +1724,7 @@
},
{
"BriefDescription": "Conflict Checks; Cmp Fwds",
+ "Counter": "0,1,2,3",
"EventCode": "0xb",
"EventName": "UNC_H_CONFLICT_CYCLES.CMP_FWDS",
"PerPkg": "1",
@@ -1558,6 +1734,7 @@
},
{
"BriefDescription": "Conflict Checks; Conflict Detected",
+ "Counter": "0,1,2,3",
"EventCode": "0xb",
"EventName": "UNC_H_CONFLICT_CYCLES.CONFLICT",
"PerPkg": "1",
@@ -1567,6 +1744,7 @@
},
{
"BriefDescription": "Conflict Checks; Last in conflict chain",
+ "Counter": "0,1,2,3",
"EventCode": "0xb",
"EventName": "UNC_H_CONFLICT_CYCLES.LAST",
"PerPkg": "1",
@@ -1576,6 +1754,7 @@
},
{
"BriefDescription": "Direct2Core Messages Sent",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_H_DIRECT2CORE_COUNT",
"PerPkg": "1",
@@ -1584,6 +1763,7 @@
},
{
"BriefDescription": "Cycles when Direct2Core was Disabled",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_H_DIRECT2CORE_CYCLES_DISABLED",
"PerPkg": "1",
@@ -1592,6 +1772,7 @@
},
{
"BriefDescription": "Number of Reads that had Direct2Core Overridden",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_H_DIRECT2CORE_TXN_OVERRIDE",
"PerPkg": "1",
@@ -1600,6 +1781,7 @@
},
{
"BriefDescription": "Directory Lat Opt Return",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_H_DIRECTORY_LAT_OPT",
"PerPkg": "1",
@@ -1608,6 +1790,7 @@
},
{
"BriefDescription": "Directory Lookups: Any state",
+ "Counter": "0,1,2,3",
"EventCode": "0xc",
"EventName": "UNC_H_DIRECTORY_LOOKUP.ANY",
"PerPkg": "1",
@@ -1617,6 +1800,7 @@
},
{
"BriefDescription": "Directory Lookups; Snoop Not Needed",
+ "Counter": "0,1,2,3",
"EventCode": "0xc",
"EventName": "UNC_H_DIRECTORY_LOOKUP.NO_SNP",
"PerPkg": "1",
@@ -1626,6 +1810,7 @@
},
{
"BriefDescription": "Directory Lookups: Snoop A",
+ "Counter": "0,1,2,3",
"EventCode": "0xc",
"EventName": "UNC_H_DIRECTORY_LOOKUP.SNOOP_A",
"PerPkg": "1",
@@ -1635,6 +1820,7 @@
},
{
"BriefDescription": "Directory Lookups: Snoop S",
+ "Counter": "0,1,2,3",
"EventCode": "0xc",
"EventName": "UNC_H_DIRECTORY_LOOKUP.SNOOP_S",
"PerPkg": "1",
@@ -1644,6 +1830,7 @@
},
{
"BriefDescription": "Directory Lookups; Snoop Needed",
+ "Counter": "0,1,2,3",
"EventCode": "0xc",
"EventName": "UNC_H_DIRECTORY_LOOKUP.SNP",
"PerPkg": "1",
@@ -1653,6 +1840,7 @@
},
{
"BriefDescription": "Directory Lookups: A State",
+ "Counter": "0,1,2,3",
"EventCode": "0xc",
"EventName": "UNC_H_DIRECTORY_LOOKUP.STATE_A",
"PerPkg": "1",
@@ -1662,6 +1850,7 @@
},
{
"BriefDescription": "Directory Lookups: I State",
+ "Counter": "0,1,2,3",
"EventCode": "0xc",
"EventName": "UNC_H_DIRECTORY_LOOKUP.STATE_I",
"PerPkg": "1",
@@ -1671,6 +1860,7 @@
},
{
"BriefDescription": "Directory Lookups: S State",
+ "Counter": "0,1,2,3",
"EventCode": "0xc",
"EventName": "UNC_H_DIRECTORY_LOOKUP.STATE_S",
"PerPkg": "1",
@@ -1680,6 +1870,7 @@
},
{
"BriefDescription": "Directory Updates: A2I",
+ "Counter": "0,1,2,3",
"EventCode": "0xd",
"EventName": "UNC_H_DIRECTORY_UPDATE.A2I",
"PerPkg": "1",
@@ -1689,6 +1880,7 @@
},
{
"BriefDescription": "Directory Updates: A2S",
+ "Counter": "0,1,2,3",
"EventCode": "0xd",
"EventName": "UNC_H_DIRECTORY_UPDATE.A2S",
"PerPkg": "1",
@@ -1698,6 +1890,7 @@
},
{
"BriefDescription": "Directory Updates; Any Directory Update",
+ "Counter": "0,1,2,3",
"EventCode": "0xd",
"EventName": "UNC_H_DIRECTORY_UPDATE.ANY",
"PerPkg": "1",
@@ -1707,6 +1900,7 @@
},
{
"BriefDescription": "Directory Updates; Directory Clear",
+ "Counter": "0,1,2,3",
"EventCode": "0xD",
"EventName": "UNC_H_DIRECTORY_UPDATE.CLEAR",
"PerPkg": "1",
@@ -1716,6 +1910,7 @@
},
{
"BriefDescription": "Directory Updates: I2A",
+ "Counter": "0,1,2,3",
"EventCode": "0xd",
"EventName": "UNC_H_DIRECTORY_UPDATE.I2A",
"PerPkg": "1",
@@ -1725,6 +1920,7 @@
},
{
"BriefDescription": "Directory Updates: I2S",
+ "Counter": "0,1,2,3",
"EventCode": "0xd",
"EventName": "UNC_H_DIRECTORY_UPDATE.I2S",
"PerPkg": "1",
@@ -1734,6 +1930,7 @@
},
{
"BriefDescription": "Directory Updates: S2A",
+ "Counter": "0,1,2,3",
"EventCode": "0xd",
"EventName": "UNC_H_DIRECTORY_UPDATE.S2A",
"PerPkg": "1",
@@ -1743,6 +1940,7 @@
},
{
"BriefDescription": "Directory Updates: S2I",
+ "Counter": "0,1,2,3",
"EventCode": "0xd",
"EventName": "UNC_H_DIRECTORY_UPDATE.S2I",
"PerPkg": "1",
@@ -1752,6 +1950,7 @@
},
{
"BriefDescription": "Directory Updates; Directory Set",
+ "Counter": "0,1,2,3",
"EventCode": "0xD",
"EventName": "UNC_H_DIRECTORY_UPDATE.SET",
"PerPkg": "1",
@@ -1761,6 +1960,7 @@
},
{
"BriefDescription": "AD QPI Link 2 Credit Accumulator",
+ "Counter": "0,1,2,3",
"EventCode": "0x59",
"EventName": "UNC_H_IGR_AD_QPI2_ACCUMULATOR",
"PerPkg": "1",
@@ -1769,6 +1969,7 @@
},
{
"BriefDescription": "BL QPI Link 2 Credit Accumulator",
+ "Counter": "0,1,2,3",
"EventCode": "0x5a",
"EventName": "UNC_H_IGR_BL_QPI2_ACCUMULATOR",
"PerPkg": "1",
@@ -1777,6 +1978,7 @@
},
{
"BriefDescription": "AD QPI Link 2 Credit Accumulator",
+ "Counter": "0,1,2,3",
"EventCode": "0x59",
"EventName": "UNC_H_IGR_CREDITS_AD_QPI2",
"PerPkg": "1",
@@ -1785,6 +1987,7 @@
},
{
"BriefDescription": "BL QPI Link 2 Credit Accumulator",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_H_IGR_CREDITS_BL_QPI2",
"PerPkg": "1",
@@ -1793,6 +1996,7 @@
},
{
"BriefDescription": "Cycles without QPI Ingress Credits; AD to QPI Link 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI0",
"PerPkg": "1",
@@ -1802,6 +2006,7 @@
},
{
"BriefDescription": "Cycles without QPI Ingress Credits; AD to QPI Link 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI1",
"PerPkg": "1",
@@ -1811,6 +2016,7 @@
},
{
"BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI0",
"PerPkg": "1",
@@ -1820,6 +2026,7 @@
},
{
"BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI1",
"PerPkg": "1",
@@ -1829,6 +2036,7 @@
},
{
"BriefDescription": "HA to iMC Normal Priority Reads Issued; Normal Priority",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_H_IMC_READS.NORMAL",
"PerPkg": "1",
@@ -1838,6 +2046,7 @@
},
{
"BriefDescription": "Retry Events",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_H_IMC_RETRY",
"PerPkg": "1",
@@ -1845,6 +2054,7 @@
},
{
"BriefDescription": "HA to iMC Full Line Writes Issued; All Writes",
+ "Counter": "0,1,2,3",
"EventCode": "0x1a",
"EventName": "UNC_H_IMC_WRITES.ALL",
"PerPkg": "1",
@@ -1854,6 +2064,7 @@
},
{
"BriefDescription": "HA to iMC Full Line Writes Issued; Full Line Non-ISOCH",
+ "Counter": "0,1,2,3",
"EventCode": "0x1a",
"EventName": "UNC_H_IMC_WRITES.FULL",
"PerPkg": "1",
@@ -1863,6 +2074,7 @@
},
{
"BriefDescription": "HA to iMC Full Line Writes Issued; ISOCH Full Line",
+ "Counter": "0,1,2,3",
"EventCode": "0x1a",
"EventName": "UNC_H_IMC_WRITES.FULL_ISOCH",
"PerPkg": "1",
@@ -1872,6 +2084,7 @@
},
{
"BriefDescription": "HA to iMC Full Line Writes Issued; Partial Non-ISOCH",
+ "Counter": "0,1,2,3",
"EventCode": "0x1a",
"EventName": "UNC_H_IMC_WRITES.PARTIAL",
"PerPkg": "1",
@@ -1881,6 +2094,7 @@
},
{
"BriefDescription": "HA to iMC Full Line Writes Issued; ISOCH Partial",
+ "Counter": "0,1,2,3",
"EventCode": "0x1a",
"EventName": "UNC_H_IMC_WRITES.PARTIAL_ISOCH",
"PerPkg": "1",
@@ -1890,6 +2104,7 @@
},
{
"BriefDescription": "IODC Conflicts; Any Conflict",
+ "Counter": "0,1,2,3",
"EventCode": "0x57",
"EventName": "UNC_H_IODC_CONFLICTS.ANY",
"PerPkg": "1",
@@ -1898,6 +2113,7 @@
},
{
"BriefDescription": "IODC Conflicts; Last Conflict",
+ "Counter": "0,1,2,3",
"EventCode": "0x57",
"EventName": "UNC_H_IODC_CONFLICTS.LAST",
"PerPkg": "1",
@@ -1906,6 +2122,7 @@
},
{
"BriefDescription": "IODC Conflicts: Remote InvItoE - Same RTID",
+ "Counter": "0,1,2,3",
"EventCode": "0x57",
"EventName": "UNC_H_IODC_CONFLICTS.REMOTE_INVI2E_SAME_RTID",
"PerPkg": "1",
@@ -1914,6 +2131,7 @@
},
{
"BriefDescription": "IODC Conflicts: Remote (Other) - Same Addr",
+ "Counter": "0,1,2,3",
"EventCode": "0x57",
"EventName": "UNC_H_IODC_CONFLICTS.REMOTE_OTHER_SAME_ADDR",
"PerPkg": "1",
@@ -1922,6 +2140,7 @@
},
{
"BriefDescription": "IODC Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_H_IODC_INSERTS",
"PerPkg": "1",
@@ -1930,6 +2149,7 @@
},
{
"BriefDescription": "Num IODC 0 Length Writes",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_H_IODC_OLEN_WBMTOI",
"PerPkg": "1",
@@ -1938,6 +2158,7 @@
},
{
"BriefDescription": "OSB Snoop Broadcast; Local InvItoE",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_H_OSB.INVITOE_LOCAL",
"PerPkg": "1",
@@ -1947,6 +2168,7 @@
},
{
"BriefDescription": "OSB Snoop Broadcast; Local Reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_H_OSB.READS_LOCAL",
"PerPkg": "1",
@@ -1956,6 +2178,7 @@
},
{
"BriefDescription": "OSB Snoop Broadcast; Remote",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_H_OSB.REMOTE",
"PerPkg": "1",
@@ -1965,6 +2188,7 @@
},
{
"BriefDescription": "OSB Early Data Return; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_H_OSB_EDR.ALL",
"PerPkg": "1",
@@ -1974,6 +2198,7 @@
},
{
"BriefDescription": "OSB Early Data Return; Reads to Local I",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_H_OSB_EDR.READS_LOCAL_I",
"PerPkg": "1",
@@ -1983,6 +2208,7 @@
},
{
"BriefDescription": "OSB Early Data Return; Reads to Local S",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_H_OSB_EDR.READS_LOCAL_S",
"PerPkg": "1",
@@ -1992,6 +2218,7 @@
},
{
"BriefDescription": "OSB Early Data Return; Reads to Remote I",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_H_OSB_EDR.READS_REMOTE_I",
"PerPkg": "1",
@@ -2001,6 +2228,7 @@
},
{
"BriefDescription": "OSB Early Data Return; Reads to Remote S",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_H_OSB_EDR.READS_REMOTE_S",
"PerPkg": "1",
@@ -2010,6 +2238,7 @@
},
{
"BriefDescription": "Read and Write Requests; Local InvItoEs",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.INVITOE_LOCAL",
"PerPkg": "1",
@@ -2019,6 +2248,7 @@
},
{
"BriefDescription": "Read and Write Requests; Remote InvItoEs",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.INVITOE_REMOTE",
"PerPkg": "1",
@@ -2028,6 +2258,7 @@
},
{
"BriefDescription": "Read and Write Requests; Reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.READS",
"PerPkg": "1",
@@ -2037,6 +2268,7 @@
},
{
"BriefDescription": "Read and Write Requests; Local Reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.READS_LOCAL",
"PerPkg": "1",
@@ -2046,6 +2278,7 @@
},
{
"BriefDescription": "Read and Write Requests; Remote Reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.READS_REMOTE",
"PerPkg": "1",
@@ -2055,6 +2288,7 @@
},
{
"BriefDescription": "Read and Write Requests; Writes",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.WRITES",
"PerPkg": "1",
@@ -2064,6 +2298,7 @@
},
{
"BriefDescription": "Read and Write Requests; Local Writes",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.WRITES_LOCAL",
"PerPkg": "1",
@@ -2073,6 +2308,7 @@
},
{
"BriefDescription": "Read and Write Requests; Remote Writes",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.WRITES_REMOTE",
"PerPkg": "1",
@@ -2082,6 +2318,7 @@
},
{
"BriefDescription": "HA AD Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x3E",
"EventName": "UNC_H_RING_AD_USED.CCW",
"PerPkg": "1",
@@ -2091,6 +2328,7 @@
},
{
"BriefDescription": "HA AD Ring in Use; Counterclockwise and Even on VRing 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x3e",
"EventName": "UNC_H_RING_AD_USED.CCW_VR0_EVEN",
"PerPkg": "1",
@@ -2100,6 +2338,7 @@
},
{
"BriefDescription": "HA AD Ring in Use; Counterclockwise and Odd on VRing 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x3e",
"EventName": "UNC_H_RING_AD_USED.CCW_VR0_ODD",
"PerPkg": "1",
@@ -2109,6 +2348,7 @@
},
{
"BriefDescription": "HA AD Ring in Use; Counterclockwise and Even on VRing 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x3e",
"EventName": "UNC_H_RING_AD_USED.CCW_VR1_EVEN",
"PerPkg": "1",
@@ -2118,6 +2358,7 @@
},
{
"BriefDescription": "HA AD Ring in Use; Counterclockwise and Odd on VRing 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x3e",
"EventName": "UNC_H_RING_AD_USED.CCW_VR1_ODD",
"PerPkg": "1",
@@ -2127,6 +2368,7 @@
},
{
"BriefDescription": "HA AD Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x3E",
"EventName": "UNC_H_RING_AD_USED.CW",
"PerPkg": "1",
@@ -2136,6 +2378,7 @@
},
{
"BriefDescription": "HA AD Ring in Use; Clockwise and Even on VRing 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x3e",
"EventName": "UNC_H_RING_AD_USED.CW_VR0_EVEN",
"PerPkg": "1",
@@ -2145,6 +2388,7 @@
},
{
"BriefDescription": "HA AD Ring in Use; Clockwise and Odd on VRing 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x3e",
"EventName": "UNC_H_RING_AD_USED.CW_VR0_ODD",
"PerPkg": "1",
@@ -2154,6 +2398,7 @@
},
{
"BriefDescription": "HA AD Ring in Use; Clockwise and Even on VRing 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x3e",
"EventName": "UNC_H_RING_AD_USED.CW_VR1_EVEN",
"PerPkg": "1",
@@ -2163,6 +2408,7 @@
},
{
"BriefDescription": "HA AD Ring in Use; Clockwise and Odd on VRing 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x3e",
"EventName": "UNC_H_RING_AD_USED.CW_VR1_ODD",
"PerPkg": "1",
@@ -2172,6 +2418,7 @@
},
{
"BriefDescription": "HA AK Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x3F",
"EventName": "UNC_H_RING_AK_USED.CCW",
"PerPkg": "1",
@@ -2181,6 +2428,7 @@
},
{
"BriefDescription": "HA AK Ring in Use; Counterclockwise and Even on VRing 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x3f",
"EventName": "UNC_H_RING_AK_USED.CCW_VR0_EVEN",
"PerPkg": "1",
@@ -2190,6 +2438,7 @@
},
{
"BriefDescription": "HA AK Ring in Use; Counterclockwise and Odd on VRing 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x3f",
"EventName": "UNC_H_RING_AK_USED.CCW_VR0_ODD",
"PerPkg": "1",
@@ -2199,6 +2448,7 @@
},
{
"BriefDescription": "HA AK Ring in Use; Counterclockwise and Even on VRing 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x3f",
"EventName": "UNC_H_RING_AK_USED.CCW_VR1_EVEN",
"PerPkg": "1",
@@ -2208,6 +2458,7 @@
},
{
"BriefDescription": "HA AK Ring in Use; Counterclockwise and Odd on VRing 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x3f",
"EventName": "UNC_H_RING_AK_USED.CCW_VR1_ODD",
"PerPkg": "1",
@@ -2217,6 +2468,7 @@
},
{
"BriefDescription": "HA AK Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x3F",
"EventName": "UNC_H_RING_AK_USED.CW",
"PerPkg": "1",
@@ -2226,6 +2478,7 @@
},
{
"BriefDescription": "HA AK Ring in Use; Clockwise and Even on VRing 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x3f",
"EventName": "UNC_H_RING_AK_USED.CW_VR0_EVEN",
"PerPkg": "1",
@@ -2235,6 +2488,7 @@
},
{
"BriefDescription": "HA AK Ring in Use; Clockwise and Odd on VRing 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x3f",
"EventName": "UNC_H_RING_AK_USED.CW_VR0_ODD",
"PerPkg": "1",
@@ -2244,6 +2498,7 @@
},
{
"BriefDescription": "HA AK Ring in Use; Clockwise and Even on VRing 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x3f",
"EventName": "UNC_H_RING_AK_USED.CW_VR1_EVEN",
"PerPkg": "1",
@@ -2253,6 +2508,7 @@
},
{
"BriefDescription": "HA AK Ring in Use; Clockwise and Odd on VRing 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x3f",
"EventName": "UNC_H_RING_AK_USED.CW_VR1_ODD",
"PerPkg": "1",
@@ -2262,6 +2518,7 @@
},
{
"BriefDescription": "HA BL Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_H_RING_BL_USED.CCW",
"PerPkg": "1",
@@ -2271,6 +2528,7 @@
},
{
"BriefDescription": "HA BL Ring in Use; Counterclockwise and Even on VRing 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_H_RING_BL_USED.CCW_VR0_EVEN",
"PerPkg": "1",
@@ -2280,6 +2538,7 @@
},
{
"BriefDescription": "HA BL Ring in Use; Counterclockwise and Odd on VRing 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_H_RING_BL_USED.CCW_VR0_ODD",
"PerPkg": "1",
@@ -2289,6 +2548,7 @@
},
{
"BriefDescription": "HA BL Ring in Use; Counterclockwise and Even on VRing 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_H_RING_BL_USED.CCW_VR1_EVEN",
"PerPkg": "1",
@@ -2298,6 +2558,7 @@
},
{
"BriefDescription": "HA BL Ring in Use; Counterclockwise and Odd on VRing 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_H_RING_BL_USED.CCW_VR1_ODD",
"PerPkg": "1",
@@ -2307,6 +2568,7 @@
},
{
"BriefDescription": "HA BL Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_H_RING_BL_USED.CW",
"PerPkg": "1",
@@ -2316,6 +2578,7 @@
},
{
"BriefDescription": "HA BL Ring in Use; Clockwise and Even on VRing 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_H_RING_BL_USED.CW_VR0_EVEN",
"PerPkg": "1",
@@ -2325,6 +2588,7 @@
},
{
"BriefDescription": "HA BL Ring in Use; Clockwise and Odd on VRing 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_H_RING_BL_USED.CW_VR0_ODD",
"PerPkg": "1",
@@ -2334,6 +2598,7 @@
},
{
"BriefDescription": "HA BL Ring in Use; Clockwise and Even on VRing 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_H_RING_BL_USED.CW_VR1_EVEN",
"PerPkg": "1",
@@ -2343,6 +2608,7 @@
},
{
"BriefDescription": "HA BL Ring in Use; Clockwise and Odd on VRing 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_H_RING_BL_USED.CW_VR1_ODD",
"PerPkg": "1",
@@ -2352,6 +2618,7 @@
},
{
"BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN0",
"PerPkg": "1",
@@ -2361,6 +2628,7 @@
},
{
"BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN1",
"PerPkg": "1",
@@ -2370,6 +2638,7 @@
},
{
"BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN2",
"PerPkg": "1",
@@ -2379,6 +2648,7 @@
},
{
"BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN3",
"PerPkg": "1",
@@ -2388,6 +2658,7 @@
},
{
"BriefDescription": "iMC RPQ Credits Empty - Special; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN0",
"PerPkg": "1",
@@ -2397,6 +2668,7 @@
},
{
"BriefDescription": "iMC RPQ Credits Empty - Special; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN1",
"PerPkg": "1",
@@ -2406,6 +2678,7 @@
},
{
"BriefDescription": "iMC RPQ Credits Empty - Special; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN2",
"PerPkg": "1",
@@ -2415,6 +2688,7 @@
},
{
"BriefDescription": "iMC RPQ Credits Empty - Special; Channel 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN3",
"PerPkg": "1",
@@ -2424,6 +2698,7 @@
},
{
"BriefDescription": "Snoop Responses Received; RSPCNFLCT*",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPCNFLCT",
"PerPkg": "1",
@@ -2433,6 +2708,7 @@
},
{
"BriefDescription": "Snoop Responses Received; RspI",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPI",
"PerPkg": "1",
@@ -2442,6 +2718,7 @@
},
{
"BriefDescription": "Snoop Responses Received; RspIFwd",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPIFWD",
"PerPkg": "1",
@@ -2451,6 +2728,7 @@
},
{
"BriefDescription": "Snoop Responses Received; RspS",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPS",
"PerPkg": "1",
@@ -2460,6 +2738,7 @@
},
{
"BriefDescription": "Snoop Responses Received; RspSFwd",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPSFWD",
"PerPkg": "1",
@@ -2469,6 +2748,7 @@
},
{
"BriefDescription": "Snoop Responses Received; Rsp*Fwd*WB",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSP_FWD_WB",
"PerPkg": "1",
@@ -2478,6 +2758,7 @@
},
{
"BriefDescription": "Snoop Responses Received; Rsp*WB",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSP_WB",
"PerPkg": "1",
@@ -2487,6 +2768,7 @@
},
{
"BriefDescription": "Snoop Responses Received Local; Other",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_H_SNP_RESP_RECV_LOCAL.OTHER",
"PerPkg": "1",
@@ -2496,6 +2778,7 @@
},
{
"BriefDescription": "Snoop Responses Received Local; RspCnflct",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPCNFLCT",
"PerPkg": "1",
@@ -2505,6 +2788,7 @@
},
{
"BriefDescription": "Snoop Responses Received Local; RspI",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPI",
"PerPkg": "1",
@@ -2514,6 +2798,7 @@
},
{
"BriefDescription": "Snoop Responses Received Local; RspIFwd",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPIFWD",
"PerPkg": "1",
@@ -2523,6 +2808,7 @@
},
{
"BriefDescription": "Snoop Responses Received Local; RspS",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPS",
"PerPkg": "1",
@@ -2532,6 +2818,7 @@
},
{
"BriefDescription": "Snoop Responses Received Local; RspSFwd",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPSFWD",
"PerPkg": "1",
@@ -2541,6 +2828,7 @@
},
{
"BriefDescription": "Snoop Responses Received Local; Rsp*FWD*WB",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPxFWDxWB",
"PerPkg": "1",
@@ -2550,6 +2838,7 @@
},
{
"BriefDescription": "Snoop Responses Received Local; Rsp*WB",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPxWB",
"PerPkg": "1",
@@ -2559,6 +2848,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_H_TAD_REQUESTS_G0.REGION0",
"PerPkg": "1",
@@ -2568,6 +2858,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_H_TAD_REQUESTS_G0.REGION1",
"PerPkg": "1",
@@ -2577,6 +2868,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_H_TAD_REQUESTS_G0.REGION2",
"PerPkg": "1",
@@ -2586,6 +2878,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_H_TAD_REQUESTS_G0.REGION3",
"PerPkg": "1",
@@ -2595,6 +2888,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_H_TAD_REQUESTS_G0.REGION4",
"PerPkg": "1",
@@ -2604,6 +2898,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_H_TAD_REQUESTS_G0.REGION5",
"PerPkg": "1",
@@ -2613,6 +2908,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_H_TAD_REQUESTS_G0.REGION6",
"PerPkg": "1",
@@ -2622,6 +2918,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_H_TAD_REQUESTS_G0.REGION7",
"PerPkg": "1",
@@ -2631,6 +2928,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x1c",
"EventName": "UNC_H_TAD_REQUESTS_G1.REGION10",
"PerPkg": "1",
@@ -2640,6 +2938,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 11",
+ "Counter": "0,1,2,3",
"EventCode": "0x1c",
"EventName": "UNC_H_TAD_REQUESTS_G1.REGION11",
"PerPkg": "1",
@@ -2649,6 +2948,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x1c",
"EventName": "UNC_H_TAD_REQUESTS_G1.REGION8",
"PerPkg": "1",
@@ -2658,6 +2958,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x1c",
"EventName": "UNC_H_TAD_REQUESTS_G1.REGION9",
"PerPkg": "1",
@@ -2667,6 +2968,7 @@
},
{
"BriefDescription": "Tracker Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_H_TRACKER_CYCLES_NE",
"PerPkg": "1",
@@ -2675,6 +2977,7 @@
},
{
"BriefDescription": "Outbound NDR Ring Transactions; Non-data Responses",
+ "Counter": "0,1,2,3",
"EventCode": "0xF",
"EventName": "UNC_H_TxR_AD.HOM",
"PerPkg": "1",
@@ -2684,6 +2987,7 @@
},
{
"BriefDescription": "AD Egress Full; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_H_TxR_AD_CYCLES_FULL.ALL",
"PerPkg": "1",
@@ -2693,6 +2997,7 @@
},
{
"BriefDescription": "AD Egress Full; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_H_TxR_AD_CYCLES_FULL.SCHED0",
"PerPkg": "1",
@@ -2702,6 +3007,7 @@
},
{
"BriefDescription": "AD Egress Full; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_H_TxR_AD_CYCLES_FULL.SCHED1",
"PerPkg": "1",
@@ -2711,6 +3017,7 @@
},
{
"BriefDescription": "AD Egress Not Empty; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_H_TxR_AD_CYCLES_NE.ALL",
"PerPkg": "1",
@@ -2720,6 +3027,7 @@
},
{
"BriefDescription": "AD Egress Not Empty; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_H_TxR_AD_CYCLES_NE.SCHED0",
"PerPkg": "1",
@@ -2729,6 +3037,7 @@
},
{
"BriefDescription": "AD Egress Not Empty; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_H_TxR_AD_CYCLES_NE.SCHED1",
"PerPkg": "1",
@@ -2738,6 +3047,7 @@
},
{
"BriefDescription": "AD Egress Allocations; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_H_TxR_AD_INSERTS.ALL",
"PerPkg": "1",
@@ -2747,6 +3057,7 @@
},
{
"BriefDescription": "AD Egress Allocations; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_H_TxR_AD_INSERTS.SCHED0",
"PerPkg": "1",
@@ -2756,6 +3067,7 @@
},
{
"BriefDescription": "AD Egress Allocations; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_H_TxR_AD_INSERTS.SCHED1",
"PerPkg": "1",
@@ -2765,6 +3077,7 @@
},
{
"BriefDescription": "AD Egress Occupancy; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_H_TxR_AD_OCCUPANCY.SCHED0",
"PerPkg": "1",
@@ -2774,6 +3087,7 @@
},
{
"BriefDescription": "AD Egress Occupancy; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_H_TxR_AD_OCCUPANCY.SCHED1",
"PerPkg": "1",
@@ -2783,6 +3097,7 @@
},
{
"BriefDescription": "Outbound Ring Transactions on AK: CRD Transactions to Cbo",
+ "Counter": "0,1,2,3",
"EventCode": "0xe",
"EventName": "UNC_H_TxR_AK.CRD_CBO",
"PerPkg": "1",
@@ -2791,6 +3106,7 @@
},
{
"BriefDescription": "AK Egress Full; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_H_TxR_AK_CYCLES_FULL.ALL",
"PerPkg": "1",
@@ -2800,6 +3116,7 @@
},
{
"BriefDescription": "AK Egress Full; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_H_TxR_AK_CYCLES_FULL.SCHED0",
"PerPkg": "1",
@@ -2809,6 +3126,7 @@
},
{
"BriefDescription": "AK Egress Full; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_H_TxR_AK_CYCLES_FULL.SCHED1",
"PerPkg": "1",
@@ -2818,6 +3136,7 @@
},
{
"BriefDescription": "AK Egress Not Empty; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_H_TxR_AK_CYCLES_NE.ALL",
"PerPkg": "1",
@@ -2827,6 +3146,7 @@
},
{
"BriefDescription": "AK Egress Not Empty; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_H_TxR_AK_CYCLES_NE.SCHED0",
"PerPkg": "1",
@@ -2836,6 +3156,7 @@
},
{
"BriefDescription": "AK Egress Not Empty; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_H_TxR_AK_CYCLES_NE.SCHED1",
"PerPkg": "1",
@@ -2845,6 +3166,7 @@
},
{
"BriefDescription": "AK Egress Allocations; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x2f",
"EventName": "UNC_H_TxR_AK_INSERTS.ALL",
"PerPkg": "1",
@@ -2854,6 +3176,7 @@
},
{
"BriefDescription": "AK Egress Allocations; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2f",
"EventName": "UNC_H_TxR_AK_INSERTS.SCHED0",
"PerPkg": "1",
@@ -2863,6 +3186,7 @@
},
{
"BriefDescription": "AK Egress Allocations; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x2f",
"EventName": "UNC_H_TxR_AK_INSERTS.SCHED1",
"PerPkg": "1",
@@ -2872,6 +3196,7 @@
},
{
"BriefDescription": "AK Egress Occupancy; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_H_TxR_AK_OCCUPANCY.SCHED0",
"PerPkg": "1",
@@ -2881,6 +3206,7 @@
},
{
"BriefDescription": "AK Egress Occupancy; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_H_TxR_AK_OCCUPANCY.SCHED1",
"PerPkg": "1",
@@ -2890,6 +3216,7 @@
},
{
"BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_H_TxR_BL.DRS_CACHE",
"PerPkg": "1",
@@ -2899,6 +3226,7 @@
},
{
"BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Core",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_H_TxR_BL.DRS_CORE",
"PerPkg": "1",
@@ -2908,6 +3236,7 @@
},
{
"BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to QPI",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_H_TxR_BL.DRS_QPI",
"PerPkg": "1",
@@ -2917,6 +3246,7 @@
},
{
"BriefDescription": "BL Egress Full; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x36",
"EventName": "UNC_H_TxR_BL_CYCLES_FULL.ALL",
"PerPkg": "1",
@@ -2926,6 +3256,7 @@
},
{
"BriefDescription": "BL Egress Full; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x36",
"EventName": "UNC_H_TxR_BL_CYCLES_FULL.SCHED0",
"PerPkg": "1",
@@ -2935,6 +3266,7 @@
},
{
"BriefDescription": "BL Egress Full; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x36",
"EventName": "UNC_H_TxR_BL_CYCLES_FULL.SCHED1",
"PerPkg": "1",
@@ -2944,6 +3276,7 @@
},
{
"BriefDescription": "BL Egress Not Empty; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_H_TxR_BL_CYCLES_NE.ALL",
"PerPkg": "1",
@@ -2953,6 +3286,7 @@
},
{
"BriefDescription": "BL Egress Not Empty; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_H_TxR_BL_CYCLES_NE.SCHED0",
"PerPkg": "1",
@@ -2962,6 +3296,7 @@
},
{
"BriefDescription": "BL Egress Not Empty; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_H_TxR_BL_CYCLES_NE.SCHED1",
"PerPkg": "1",
@@ -2971,6 +3306,7 @@
},
{
"BriefDescription": "BL Egress Allocations; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_H_TxR_BL_INSERTS.ALL",
"PerPkg": "1",
@@ -2980,6 +3316,7 @@
},
{
"BriefDescription": "BL Egress Allocations; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_H_TxR_BL_INSERTS.SCHED0",
"PerPkg": "1",
@@ -2989,6 +3326,7 @@
},
{
"BriefDescription": "BL Egress Allocations; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_H_TxR_BL_INSERTS.SCHED1",
"PerPkg": "1",
@@ -2998,6 +3336,7 @@
},
{
"BriefDescription": "BL Egress Occupancy: All",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_H_TxR_BL_OCCUPANCY.ALL",
"PerPkg": "1",
@@ -3006,6 +3345,7 @@
},
{
"BriefDescription": "BL Egress Occupancy; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_H_TxR_BL_OCCUPANCY.SCHED0",
"PerPkg": "1",
@@ -3015,6 +3355,7 @@
},
{
"BriefDescription": "BL Egress Occupancy; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_H_TxR_BL_OCCUPANCY.SCHED1",
"PerPkg": "1",
@@ -3024,6 +3365,7 @@
},
{
"BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN0",
"PerPkg": "1",
@@ -3033,6 +3375,7 @@
},
{
"BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN1",
"PerPkg": "1",
@@ -3042,6 +3385,7 @@
},
{
"BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN2",
"PerPkg": "1",
@@ -3051,6 +3395,7 @@
},
{
"BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN3",
"PerPkg": "1",
@@ -3060,6 +3405,7 @@
},
{
"BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN0",
"PerPkg": "1",
@@ -3069,6 +3415,7 @@
},
{
"BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN1",
"PerPkg": "1",
@@ -3078,6 +3425,7 @@
},
{
"BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN2",
"PerPkg": "1",
@@ -3087,6 +3435,7 @@
},
{
"BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN3",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/ivytown/uncore-interconnect.json
index 914d2cfb3d3d..b805dfc6a625 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/uncore-interconnect.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Address Match (Conflict) Count; Conflict Merges",
+ "Counter": "0,1",
"EventCode": "0x17",
"EventName": "UNC_I_ADDRESS_MATCH.MERGE_COUNT",
"PerPkg": "1",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "Address Match (Conflict) Count; Conflict Stalls",
+ "Counter": "0,1",
"EventCode": "0x17",
"EventName": "UNC_I_ADDRESS_MATCH.STALL_COUNT",
"PerPkg": "1",
@@ -19,6 +21,7 @@
},
{
"BriefDescription": "Write Ack Pending Occupancy; Any Source",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_I_CACHE_ACK_PENDING_OCCUPANCY.ANY",
"PerPkg": "1",
@@ -28,6 +31,7 @@
},
{
"BriefDescription": "Write Ack Pending Occupancy; Select Source",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_I_CACHE_ACK_PENDING_OCCUPANCY.SOURCE",
"PerPkg": "1",
@@ -37,6 +41,7 @@
},
{
"BriefDescription": "Outstanding Write Ownership Occupancy; Any Source",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_CACHE_OWN_OCCUPANCY.ANY",
"PerPkg": "1",
@@ -46,6 +51,7 @@
},
{
"BriefDescription": "Outstanding Write Ownership Occupancy; Select Source",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_CACHE_OWN_OCCUPANCY.SOURCE",
"PerPkg": "1",
@@ -55,6 +61,7 @@
},
{
"BriefDescription": "Outstanding Read Occupancy; Any Source",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_I_CACHE_READ_OCCUPANCY.ANY",
"PerPkg": "1",
@@ -64,6 +71,7 @@
},
{
"BriefDescription": "Outstanding Read Occupancy; Select Source",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_I_CACHE_READ_OCCUPANCY.SOURCE",
"PerPkg": "1",
@@ -73,6 +81,7 @@
},
{
"BriefDescription": "Total Write Cache Occupancy; Any Source",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.ANY",
"PerPkg": "1",
@@ -82,6 +91,7 @@
},
{
"BriefDescription": "Total Write Cache Occupancy; Select Source",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.SOURCE",
"PerPkg": "1",
@@ -91,6 +101,7 @@
},
{
"BriefDescription": "Outstanding Write Occupancy; Any Source",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_I_CACHE_WRITE_OCCUPANCY.ANY",
"PerPkg": "1",
@@ -100,6 +111,7 @@
},
{
"BriefDescription": "Outstanding Write Occupancy; Select Source",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_I_CACHE_WRITE_OCCUPANCY.SOURCE",
"PerPkg": "1",
@@ -109,12 +121,14 @@
},
{
"BriefDescription": "Clocks in the IRP",
+ "Counter": "0,1",
"EventName": "UNC_I_CLOCKTICKS",
"PerPkg": "1",
"PublicDescription": "Number of clocks in the IRP.",
"Unit": "IRP"
},
{
+ "Counter": "0,1",
"EventCode": "0xb",
"EventName": "UNC_I_RxR_AK_CYCLES_FULL",
"PerPkg": "1",
@@ -123,6 +137,7 @@
},
{
"BriefDescription": "AK Ingress Occupancy",
+ "Counter": "0,1",
"EventCode": "0xa",
"EventName": "UNC_I_RxR_AK_INSERTS",
"PerPkg": "1",
@@ -130,6 +145,7 @@
"Unit": "IRP"
},
{
+ "Counter": "0,1",
"EventCode": "0xc",
"EventName": "UNC_I_RxR_AK_OCCUPANCY",
"PerPkg": "1",
@@ -137,6 +153,7 @@
"Unit": "IRP"
},
{
+ "Counter": "0,1",
"EventCode": "0x4",
"EventName": "UNC_I_RxR_BL_DRS_CYCLES_FULL",
"PerPkg": "1",
@@ -145,6 +162,7 @@
},
{
"BriefDescription": "BL Ingress Occupancy - DRS",
+ "Counter": "0,1",
"EventCode": "0x1",
"EventName": "UNC_I_RxR_BL_DRS_INSERTS",
"PerPkg": "1",
@@ -152,6 +170,7 @@
"Unit": "IRP"
},
{
+ "Counter": "0,1",
"EventCode": "0x7",
"EventName": "UNC_I_RxR_BL_DRS_OCCUPANCY",
"PerPkg": "1",
@@ -159,6 +178,7 @@
"Unit": "IRP"
},
{
+ "Counter": "0,1",
"EventCode": "0x5",
"EventName": "UNC_I_RxR_BL_NCB_CYCLES_FULL",
"PerPkg": "1",
@@ -167,6 +187,7 @@
},
{
"BriefDescription": "BL Ingress Occupancy - NCB",
+ "Counter": "0,1",
"EventCode": "0x2",
"EventName": "UNC_I_RxR_BL_NCB_INSERTS",
"PerPkg": "1",
@@ -174,6 +195,7 @@
"Unit": "IRP"
},
{
+ "Counter": "0,1",
"EventCode": "0x8",
"EventName": "UNC_I_RxR_BL_NCB_OCCUPANCY",
"PerPkg": "1",
@@ -181,6 +203,7 @@
"Unit": "IRP"
},
{
+ "Counter": "0,1",
"EventCode": "0x6",
"EventName": "UNC_I_RxR_BL_NCS_CYCLES_FULL",
"PerPkg": "1",
@@ -189,6 +212,7 @@
},
{
"BriefDescription": "BL Ingress Occupancy - NCS",
+ "Counter": "0,1",
"EventCode": "0x3",
"EventName": "UNC_I_RxR_BL_NCS_INSERTS",
"PerPkg": "1",
@@ -196,6 +220,7 @@
"Unit": "IRP"
},
{
+ "Counter": "0,1",
"EventCode": "0x9",
"EventName": "UNC_I_RxR_BL_NCS_OCCUPANCY",
"PerPkg": "1",
@@ -204,6 +229,7 @@
},
{
"BriefDescription": "Tickle Count; Ownership Lost",
+ "Counter": "0,1",
"EventCode": "0x16",
"EventName": "UNC_I_TICKLES.LOST_OWNERSHIP",
"PerPkg": "1",
@@ -213,6 +239,7 @@
},
{
"BriefDescription": "Tickle Count; Data Returned",
+ "Counter": "0,1",
"EventCode": "0x16",
"EventName": "UNC_I_TICKLES.TOP_OF_QUEUE",
"PerPkg": "1",
@@ -222,6 +249,7 @@
},
{
"BriefDescription": "Inbound Transaction Count: Read Prefetches",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_TRANSACTIONS.PD_PREFETCHES",
"PerPkg": "1",
@@ -231,6 +259,7 @@
},
{
"BriefDescription": "Inbound Transaction Count; Read Prefetches",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_TRANSACTIONS.RD_PREFETCHES",
"PerPkg": "1",
@@ -240,6 +269,7 @@
},
{
"BriefDescription": "Inbound Transaction Count; Reads",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_TRANSACTIONS.READS",
"PerPkg": "1",
@@ -249,6 +279,7 @@
},
{
"BriefDescription": "Inbound Transaction Count; Writes",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_TRANSACTIONS.WRITES",
"PerPkg": "1",
@@ -258,6 +289,7 @@
},
{
"BriefDescription": "No AD Egress Credit Stalls",
+ "Counter": "0,1",
"EventCode": "0x18",
"EventName": "UNC_I_TxR_AD_STALL_CREDIT_CYCLES",
"PerPkg": "1",
@@ -266,6 +298,7 @@
},
{
"BriefDescription": "No BL Egress Credit Stalls",
+ "Counter": "0,1",
"EventCode": "0x19",
"EventName": "UNC_I_TxR_BL_STALL_CREDIT_CYCLES",
"PerPkg": "1",
@@ -274,6 +307,7 @@
},
{
"BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
"EventCode": "0xe",
"EventName": "UNC_I_TxR_DATA_INSERTS_NCB",
"PerPkg": "1",
@@ -282,6 +316,7 @@
},
{
"BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
"EventCode": "0xf",
"EventName": "UNC_I_TxR_DATA_INSERTS_NCS",
"PerPkg": "1",
@@ -290,6 +325,7 @@
},
{
"BriefDescription": "Outbound Request Queue Occupancy",
+ "Counter": "0,1",
"EventCode": "0xd",
"EventName": "UNC_I_TxR_REQUEST_OCCUPANCY",
"PerPkg": "1",
@@ -298,6 +334,7 @@
},
{
"BriefDescription": "Write Ordering Stalls",
+ "Counter": "0,1",
"EventCode": "0x1a",
"EventName": "UNC_I_WRITE_ORDERING_STALL_CYCLES",
"PerPkg": "1",
@@ -306,6 +343,7 @@
},
{
"BriefDescription": "Number of qfclks",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_Q_CLOCKTICKS",
"PerPkg": "1",
@@ -314,6 +352,7 @@
},
{
"BriefDescription": "Count of CTO Events",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_Q_CTO_COUNT",
"PerPkg": "1",
@@ -322,6 +361,7 @@
},
{
"BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS",
"PerPkg": "1",
@@ -331,6 +371,7 @@
},
{
"BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress and RBT Miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS_MISS",
"PerPkg": "1",
@@ -340,6 +381,7 @@
},
{
"BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress and RBT Invalid",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS_RBT",
"PerPkg": "1",
@@ -349,6 +391,7 @@
},
{
"BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress and RBT Miss, Invalid",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS_RBT_MISS",
"PerPkg": "1",
@@ -358,6 +401,7 @@
},
{
"BriefDescription": "Direct 2 Core Spawning; Spawn Failure - RBT Miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_DIRECT2CORE.FAILURE_MISS",
"PerPkg": "1",
@@ -367,6 +411,7 @@
},
{
"BriefDescription": "Direct 2 Core Spawning; Spawn Failure - RBT Invalid",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_DIRECT2CORE.FAILURE_RBT_HIT",
"PerPkg": "1",
@@ -376,6 +421,7 @@
},
{
"BriefDescription": "Direct 2 Core Spawning; Spawn Failure - RBT Miss and Invalid",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_DIRECT2CORE.FAILURE_RBT_MISS",
"PerPkg": "1",
@@ -385,6 +431,7 @@
},
{
"BriefDescription": "Direct 2 Core Spawning; Spawn Success",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_DIRECT2CORE.SUCCESS_RBT_HIT",
"PerPkg": "1",
@@ -394,6 +441,7 @@
},
{
"BriefDescription": "Cycles in L1",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_Q_L1_POWER_CYCLES",
"PerPkg": "1",
@@ -401,198 +449,231 @@
"Unit": "QPI"
},
{
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_Q_MATCH_MASK",
"PerPkg": "1",
"Unit": "QPI"
},
{
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_Q_MESSAGE.DRS.AnyDataC",
"PerPkg": "1",
"Unit": "QPI"
},
{
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_Q_MESSAGE.DRS.AnyResp",
"PerPkg": "1",
"Unit": "QPI"
},
{
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_Q_MESSAGE.DRS.AnyResp11flits",
"PerPkg": "1",
"Unit": "QPI"
},
{
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_Q_MESSAGE.DRS.AnyResp9flits",
"PerPkg": "1",
"Unit": "QPI"
},
{
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_Q_MESSAGE.DRS.DataC_E",
"PerPkg": "1",
"Unit": "QPI"
},
{
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_Q_MESSAGE.DRS.DataC_E_Cmp",
"PerPkg": "1",
"Unit": "QPI"
},
{
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_Q_MESSAGE.DRS.DataC_E_FrcAckCnflt",
"PerPkg": "1",
"Unit": "QPI"
},
{
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_Q_MESSAGE.DRS.DataC_F",
"PerPkg": "1",
"Unit": "QPI"
},
{
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_Q_MESSAGE.DRS.DataC_F_Cmp",
"PerPkg": "1",
"Unit": "QPI"
},
{
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_Q_MESSAGE.DRS.DataC_F_FrcAckCnflt",
"PerPkg": "1",
"Unit": "QPI"
},
{
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_Q_MESSAGE.DRS.DataC_M",
"PerPkg": "1",
"Unit": "QPI"
},
{
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_Q_MESSAGE.DRS.WbEData",
"PerPkg": "1",
"Unit": "QPI"
},
{
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_Q_MESSAGE.DRS.WbIData",
"PerPkg": "1",
"Unit": "QPI"
},
{
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_Q_MESSAGE.DRS.WbSData",
"PerPkg": "1",
"Unit": "QPI"
},
{
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_Q_MESSAGE.HOM.AnyReq",
"PerPkg": "1",
"Unit": "QPI"
},
{
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_Q_MESSAGE.HOM.AnyResp",
"PerPkg": "1",
"Unit": "QPI"
},
{
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_Q_MESSAGE.HOM.RespFwd",
"PerPkg": "1",
"Unit": "QPI"
},
{
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_Q_MESSAGE.HOM.RespFwdI",
"PerPkg": "1",
"Unit": "QPI"
},
{
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_Q_MESSAGE.HOM.RespFwdIWb",
"PerPkg": "1",
"Unit": "QPI"
},
{
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_Q_MESSAGE.HOM.RespFwdS",
"PerPkg": "1",
"Unit": "QPI"
},
{
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_Q_MESSAGE.HOM.RespFwdSWb",
"PerPkg": "1",
"Unit": "QPI"
},
{
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_Q_MESSAGE.HOM.RespIWb",
"PerPkg": "1",
"Unit": "QPI"
},
{
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_Q_MESSAGE.HOM.RespSWb",
"PerPkg": "1",
"Unit": "QPI"
},
{
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_Q_MESSAGE.NCB.AnyInt",
"PerPkg": "1",
"Unit": "QPI"
},
{
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_Q_MESSAGE.NCB.AnyMsg",
"PerPkg": "1",
"Unit": "QPI"
},
{
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_Q_MESSAGE.NCB.AnyMsg11flits",
"PerPkg": "1",
"Unit": "QPI"
},
{
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_Q_MESSAGE.NCB.AnyMsg9flits",
"PerPkg": "1",
"Unit": "QPI"
},
{
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_Q_MESSAGE.NCS.AnyMsg1or2flits",
"PerPkg": "1",
"Unit": "QPI"
},
{
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_Q_MESSAGE.NCS.AnyMsg3flits",
"PerPkg": "1",
"Unit": "QPI"
},
{
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_Q_MESSAGE.NCS.NcRd",
"PerPkg": "1",
"Unit": "QPI"
},
{
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_Q_MESSAGE.NDR.AnyCmp",
"PerPkg": "1",
"Unit": "QPI"
},
{
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_Q_MESSAGE.SNP.AnySnp",
"PerPkg": "1",
@@ -600,6 +681,7 @@
},
{
"BriefDescription": "Cycles in L0p",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_Q_RxL0P_POWER_CYCLES",
"PerPkg": "1",
@@ -608,6 +690,7 @@
},
{
"BriefDescription": "Cycles in L0",
+ "Counter": "0,1,2,3",
"EventCode": "0xf",
"EventName": "UNC_Q_RxL0_POWER_CYCLES",
"PerPkg": "1",
@@ -616,6 +699,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Bypassed",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_Q_RxL_BYPASSED",
"PerPkg": "1",
@@ -624,6 +708,7 @@
},
{
"BriefDescription": "CRC Errors Detected; LinkInit",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_Q_RxL_CRC_ERRORS.LINK_INIT",
"PerPkg": "1",
@@ -633,6 +718,7 @@
},
{
"BriefDescription": "CRC Errors Detected; Normal Operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_Q_RxL_CRC_ERRORS.NORMAL_OP",
"PerPkg": "1",
@@ -642,6 +728,7 @@
},
{
"BriefDescription": "VN0 Credit Consumed; DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.DRS",
"PerPkg": "1",
@@ -651,6 +738,7 @@
},
{
"BriefDescription": "VN0 Credit Consumed; HOM",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.HOM",
"PerPkg": "1",
@@ -660,6 +748,7 @@
},
{
"BriefDescription": "VN0 Credit Consumed; NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NCB",
"PerPkg": "1",
@@ -669,6 +758,7 @@
},
{
"BriefDescription": "VN0 Credit Consumed; NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NCS",
"PerPkg": "1",
@@ -678,6 +768,7 @@
},
{
"BriefDescription": "VN0 Credit Consumed; NDR",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NDR",
"PerPkg": "1",
@@ -687,6 +778,7 @@
},
{
"BriefDescription": "VN0 Credit Consumed; SNP",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.SNP",
"PerPkg": "1",
@@ -696,6 +788,7 @@
},
{
"BriefDescription": "VN1 Credit Consumed; DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.DRS",
"PerPkg": "1",
@@ -705,6 +798,7 @@
},
{
"BriefDescription": "VN1 Credit Consumed; HOM",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.HOM",
"PerPkg": "1",
@@ -714,6 +808,7 @@
},
{
"BriefDescription": "VN1 Credit Consumed; NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.NCB",
"PerPkg": "1",
@@ -723,6 +818,7 @@
},
{
"BriefDescription": "VN1 Credit Consumed; NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.NCS",
"PerPkg": "1",
@@ -732,6 +828,7 @@
},
{
"BriefDescription": "VN1 Credit Consumed; NDR",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.NDR",
"PerPkg": "1",
@@ -741,6 +838,7 @@
},
{
"BriefDescription": "VN1 Credit Consumed; SNP",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.SNP",
"PerPkg": "1",
@@ -750,6 +848,7 @@
},
{
"BriefDescription": "VNA Credit Consumed",
+ "Counter": "0,1,2,3",
"EventCode": "0x1d",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VNA",
"PerPkg": "1",
@@ -758,6 +857,7 @@
},
{
"BriefDescription": "RxQ Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0xa",
"EventName": "UNC_Q_RxL_CYCLES_NE",
"PerPkg": "1",
@@ -766,6 +866,7 @@
},
{
"BriefDescription": "RxQ Cycles Not Empty - DRS; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0xF",
"EventName": "UNC_Q_RxL_CYCLES_NE_DRS.VN0",
"PerPkg": "1",
@@ -775,6 +876,7 @@
},
{
"BriefDescription": "RxQ Cycles Not Empty - DRS; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0xF",
"EventName": "UNC_Q_RxL_CYCLES_NE_DRS.VN1",
"PerPkg": "1",
@@ -784,6 +886,7 @@
},
{
"BriefDescription": "RxQ Cycles Not Empty - HOM; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_Q_RxL_CYCLES_NE_HOM.VN0",
"PerPkg": "1",
@@ -793,6 +896,7 @@
},
{
"BriefDescription": "RxQ Cycles Not Empty - HOM; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_Q_RxL_CYCLES_NE_HOM.VN1",
"PerPkg": "1",
@@ -802,6 +906,7 @@
},
{
"BriefDescription": "RxQ Cycles Not Empty - NCB; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_Q_RxL_CYCLES_NE_NCB.VN0",
"PerPkg": "1",
@@ -811,6 +916,7 @@
},
{
"BriefDescription": "RxQ Cycles Not Empty - NCB; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_Q_RxL_CYCLES_NE_NCB.VN1",
"PerPkg": "1",
@@ -820,6 +926,7 @@
},
{
"BriefDescription": "RxQ Cycles Not Empty - NCS; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_Q_RxL_CYCLES_NE_NCS.VN0",
"PerPkg": "1",
@@ -829,6 +936,7 @@
},
{
"BriefDescription": "RxQ Cycles Not Empty - NCS; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_Q_RxL_CYCLES_NE_NCS.VN1",
"PerPkg": "1",
@@ -838,6 +946,7 @@
},
{
"BriefDescription": "RxQ Cycles Not Empty - NDR; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_Q_RxL_CYCLES_NE_NDR.VN0",
"PerPkg": "1",
@@ -847,6 +956,7 @@
},
{
"BriefDescription": "RxQ Cycles Not Empty - NDR; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_Q_RxL_CYCLES_NE_NDR.VN1",
"PerPkg": "1",
@@ -856,6 +966,7 @@
},
{
"BriefDescription": "RxQ Cycles Not Empty - SNP; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_RxL_CYCLES_NE_SNP.VN0",
"PerPkg": "1",
@@ -865,6 +976,7 @@
},
{
"BriefDescription": "RxQ Cycles Not Empty - SNP; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_RxL_CYCLES_NE_SNP.VN1",
"PerPkg": "1",
@@ -874,6 +986,7 @@
},
{
"BriefDescription": "Flits Received - Group 0; Data Tx Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_Q_RxL_FLITS_G0.DATA",
"PerPkg": "1",
@@ -883,6 +996,7 @@
},
{
"BriefDescription": "Flits Received - Group 0; Idle and Null Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_Q_RxL_FLITS_G0.IDLE",
"PerPkg": "1",
@@ -892,6 +1006,7 @@
},
{
"BriefDescription": "Flits Received - Group 0; Non-Data protocol Tx Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_Q_RxL_FLITS_G0.NON_DATA",
"PerPkg": "1",
@@ -901,6 +1016,7 @@
},
{
"BriefDescription": "Flits Received - Group 1; DRS Flits (both Header and Data)",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_RxL_FLITS_G1.DRS",
"PerPkg": "1",
@@ -910,6 +1026,7 @@
},
{
"BriefDescription": "Flits Received - Group 1; DRS Data Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_RxL_FLITS_G1.DRS_DATA",
"PerPkg": "1",
@@ -919,6 +1036,7 @@
},
{
"BriefDescription": "Flits Received - Group 1; DRS Header Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_RxL_FLITS_G1.DRS_NONDATA",
"PerPkg": "1",
@@ -928,6 +1046,7 @@
},
{
"BriefDescription": "Flits Received - Group 1; HOM Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_RxL_FLITS_G1.HOM",
"PerPkg": "1",
@@ -937,6 +1056,7 @@
},
{
"BriefDescription": "Flits Received - Group 1; HOM Non-Request Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_RxL_FLITS_G1.HOM_NONREQ",
"PerPkg": "1",
@@ -946,6 +1066,7 @@
},
{
"BriefDescription": "Flits Received - Group 1; HOM Request Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_RxL_FLITS_G1.HOM_REQ",
"PerPkg": "1",
@@ -955,6 +1076,7 @@
},
{
"BriefDescription": "Flits Received - Group 1; SNP Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_RxL_FLITS_G1.SNP",
"PerPkg": "1",
@@ -964,6 +1086,7 @@
},
{
"BriefDescription": "Flits Received - Group 2; Non-Coherent Rx Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_Q_RxL_FLITS_G2.NCB",
"PerPkg": "1",
@@ -973,6 +1096,7 @@
},
{
"BriefDescription": "Flits Received - Group 2; Non-Coherent data Rx Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_Q_RxL_FLITS_G2.NCB_DATA",
"PerPkg": "1",
@@ -982,6 +1106,7 @@
},
{
"BriefDescription": "Flits Received - Group 2; Non-Coherent non-data Rx Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_Q_RxL_FLITS_G2.NCB_NONDATA",
"PerPkg": "1",
@@ -991,6 +1116,7 @@
},
{
"BriefDescription": "Flits Received - Group 2; Non-Coherent standard Rx Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_Q_RxL_FLITS_G2.NCS",
"PerPkg": "1",
@@ -1000,6 +1126,7 @@
},
{
"BriefDescription": "Flits Received - Group 2; Non-Data Response Rx Flits - AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_Q_RxL_FLITS_G2.NDR_AD",
"PerPkg": "1",
@@ -1009,6 +1136,7 @@
},
{
"BriefDescription": "Flits Received - Group 2; Non-Data Response Rx Flits - AK",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_Q_RxL_FLITS_G2.NDR_AK",
"PerPkg": "1",
@@ -1018,6 +1146,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_Q_RxL_INSERTS",
"PerPkg": "1",
@@ -1026,6 +1155,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_Q_RxL_INSERTS_DRS",
"PerPkg": "1",
@@ -1034,6 +1164,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - DRS; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_Q_RxL_INSERTS_DRS.VN0",
"PerPkg": "1",
@@ -1043,6 +1174,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - DRS; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_Q_RxL_INSERTS_DRS.VN1",
"PerPkg": "1",
@@ -1052,6 +1184,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - HOM",
+ "Counter": "0,1,2,3",
"EventCode": "0xc",
"EventName": "UNC_Q_RxL_INSERTS_HOM",
"PerPkg": "1",
@@ -1060,6 +1193,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - HOM; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0xC",
"EventName": "UNC_Q_RxL_INSERTS_HOM.VN0",
"PerPkg": "1",
@@ -1069,6 +1203,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - HOM; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0xC",
"EventName": "UNC_Q_RxL_INSERTS_HOM.VN1",
"PerPkg": "1",
@@ -1078,6 +1213,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0xa",
"EventName": "UNC_Q_RxL_INSERTS_NCB",
"PerPkg": "1",
@@ -1086,6 +1222,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - NCB; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0xA",
"EventName": "UNC_Q_RxL_INSERTS_NCB.VN0",
"PerPkg": "1",
@@ -1095,6 +1232,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - NCB; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0xA",
"EventName": "UNC_Q_RxL_INSERTS_NCB.VN1",
"PerPkg": "1",
@@ -1104,6 +1242,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0xb",
"EventName": "UNC_Q_RxL_INSERTS_NCS",
"PerPkg": "1",
@@ -1112,6 +1251,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - NCS; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB",
"EventName": "UNC_Q_RxL_INSERTS_NCS.VN0",
"PerPkg": "1",
@@ -1121,6 +1261,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - NCS; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB",
"EventName": "UNC_Q_RxL_INSERTS_NCS.VN1",
"PerPkg": "1",
@@ -1130,6 +1271,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - NDR",
+ "Counter": "0,1,2,3",
"EventCode": "0xe",
"EventName": "UNC_Q_RxL_INSERTS_NDR",
"PerPkg": "1",
@@ -1138,6 +1280,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - NDR; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0xE",
"EventName": "UNC_Q_RxL_INSERTS_NDR.VN0",
"PerPkg": "1",
@@ -1147,6 +1290,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - NDR; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0xE",
"EventName": "UNC_Q_RxL_INSERTS_NDR.VN1",
"PerPkg": "1",
@@ -1156,6 +1300,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - SNP",
+ "Counter": "0,1,2,3",
"EventCode": "0xd",
"EventName": "UNC_Q_RxL_INSERTS_SNP",
"PerPkg": "1",
@@ -1164,6 +1309,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - SNP; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD",
"EventName": "UNC_Q_RxL_INSERTS_SNP.VN0",
"PerPkg": "1",
@@ -1173,6 +1319,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - SNP; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD",
"EventName": "UNC_Q_RxL_INSERTS_SNP.VN1",
"PerPkg": "1",
@@ -1182,6 +1329,7 @@
},
{
"BriefDescription": "RxQ Occupancy - All Packets",
+ "Counter": "0,1,2,3",
"EventCode": "0xb",
"EventName": "UNC_Q_RxL_OCCUPANCY",
"PerPkg": "1",
@@ -1190,6 +1338,7 @@
},
{
"BriefDescription": "RxQ Occupancy - DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_Q_RxL_OCCUPANCY_DRS",
"PerPkg": "1",
@@ -1198,6 +1347,7 @@
},
{
"BriefDescription": "RxQ Occupancy - DRS; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_Q_RxL_OCCUPANCY_DRS.VN0",
"PerPkg": "1",
@@ -1207,6 +1357,7 @@
},
{
"BriefDescription": "RxQ Occupancy - DRS; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_Q_RxL_OCCUPANCY_DRS.VN1",
"PerPkg": "1",
@@ -1216,6 +1367,7 @@
},
{
"BriefDescription": "RxQ Occupancy - HOM",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_Q_RxL_OCCUPANCY_HOM",
"PerPkg": "1",
@@ -1224,6 +1376,7 @@
},
{
"BriefDescription": "RxQ Occupancy - HOM; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_Q_RxL_OCCUPANCY_HOM.VN0",
"PerPkg": "1",
@@ -1233,6 +1386,7 @@
},
{
"BriefDescription": "RxQ Occupancy - HOM; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_Q_RxL_OCCUPANCY_HOM.VN1",
"PerPkg": "1",
@@ -1242,6 +1396,7 @@
},
{
"BriefDescription": "RxQ Occupancy - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_Q_RxL_OCCUPANCY_NCB",
"PerPkg": "1",
@@ -1250,6 +1405,7 @@
},
{
"BriefDescription": "RxQ Occupancy - NCB; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_Q_RxL_OCCUPANCY_NCB.VN0",
"PerPkg": "1",
@@ -1259,6 +1415,7 @@
},
{
"BriefDescription": "RxQ Occupancy - NCB; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_Q_RxL_OCCUPANCY_NCB.VN1",
"PerPkg": "1",
@@ -1268,6 +1425,7 @@
},
{
"BriefDescription": "RxQ Occupancy - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_Q_RxL_OCCUPANCY_NCS",
"PerPkg": "1",
@@ -1276,6 +1434,7 @@
},
{
"BriefDescription": "RxQ Occupancy - NCS; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_Q_RxL_OCCUPANCY_NCS.VN0",
"PerPkg": "1",
@@ -1285,6 +1444,7 @@
},
{
"BriefDescription": "RxQ Occupancy - NCS; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_Q_RxL_OCCUPANCY_NCS.VN1",
"PerPkg": "1",
@@ -1294,6 +1454,7 @@
},
{
"BriefDescription": "RxQ Occupancy - NDR",
+ "Counter": "0,1,2,3",
"EventCode": "0x1a",
"EventName": "UNC_Q_RxL_OCCUPANCY_NDR",
"PerPkg": "1",
@@ -1302,6 +1463,7 @@
},
{
"BriefDescription": "RxQ Occupancy - NDR; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_Q_RxL_OCCUPANCY_NDR.VN0",
"PerPkg": "1",
@@ -1311,6 +1473,7 @@
},
{
"BriefDescription": "RxQ Occupancy - NDR; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_Q_RxL_OCCUPANCY_NDR.VN1",
"PerPkg": "1",
@@ -1320,6 +1483,7 @@
},
{
"BriefDescription": "RxQ Occupancy - SNP",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_Q_RxL_OCCUPANCY_SNP",
"PerPkg": "1",
@@ -1328,6 +1492,7 @@
},
{
"BriefDescription": "RxQ Occupancy - SNP; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_Q_RxL_OCCUPANCY_SNP.VN0",
"PerPkg": "1",
@@ -1337,6 +1502,7 @@
},
{
"BriefDescription": "RxQ Occupancy - SNP; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_Q_RxL_OCCUPANCY_SNP.VN1",
"PerPkg": "1",
@@ -1346,6 +1512,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - HOM",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_Q_RxL_STALLS_VN0.BGF_DRS",
"PerPkg": "1",
@@ -1355,6 +1522,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_Q_RxL_STALLS_VN0.BGF_HOM",
"PerPkg": "1",
@@ -1364,6 +1532,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - SNP",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_Q_RxL_STALLS_VN0.BGF_NCB",
"PerPkg": "1",
@@ -1373,6 +1542,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - NDR",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_Q_RxL_STALLS_VN0.BGF_NCS",
"PerPkg": "1",
@@ -1382,6 +1552,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_Q_RxL_STALLS_VN0.BGF_NDR",
"PerPkg": "1",
@@ -1391,6 +1562,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_Q_RxL_STALLS_VN0.BGF_SNP",
"PerPkg": "1",
@@ -1400,6 +1572,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN0; Egress Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_Q_RxL_STALLS_VN0.EGRESS_CREDITS",
"PerPkg": "1",
@@ -1409,6 +1582,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN0; GV",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_Q_RxL_STALLS_VN0.GV",
"PerPkg": "1",
@@ -1418,6 +1592,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - HOM",
+ "Counter": "0,1,2,3",
"EventCode": "0x3a",
"EventName": "UNC_Q_RxL_STALLS_VN1.BGF_DRS",
"PerPkg": "1",
@@ -1427,6 +1602,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x3a",
"EventName": "UNC_Q_RxL_STALLS_VN1.BGF_HOM",
"PerPkg": "1",
@@ -1436,6 +1612,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - SNP",
+ "Counter": "0,1,2,3",
"EventCode": "0x3a",
"EventName": "UNC_Q_RxL_STALLS_VN1.BGF_NCB",
"PerPkg": "1",
@@ -1445,6 +1622,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - NDR",
+ "Counter": "0,1,2,3",
"EventCode": "0x3a",
"EventName": "UNC_Q_RxL_STALLS_VN1.BGF_NCS",
"PerPkg": "1",
@@ -1454,6 +1632,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x3a",
"EventName": "UNC_Q_RxL_STALLS_VN1.BGF_NDR",
"PerPkg": "1",
@@ -1463,6 +1642,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x3a",
"EventName": "UNC_Q_RxL_STALLS_VN1.BGF_SNP",
"PerPkg": "1",
@@ -1472,6 +1652,7 @@
},
{
"BriefDescription": "Cycles in L0p",
+ "Counter": "0,1,2,3",
"EventCode": "0xd",
"EventName": "UNC_Q_TxL0P_POWER_CYCLES",
"PerPkg": "1",
@@ -1480,6 +1661,7 @@
},
{
"BriefDescription": "Cycles in L0",
+ "Counter": "0,1,2,3",
"EventCode": "0xc",
"EventName": "UNC_Q_TxL0_POWER_CYCLES",
"PerPkg": "1",
@@ -1488,6 +1670,7 @@
},
{
"BriefDescription": "Tx Flit Buffer Bypassed",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_Q_TxL_BYPASSED",
"PerPkg": "1",
@@ -1496,6 +1679,7 @@
},
{
"BriefDescription": "Cycles Stalled with no LLR Credits; LLR is almost full",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_TxL_CRC_NO_CREDITS.ALMOST_FULL",
"PerPkg": "1",
@@ -1505,6 +1689,7 @@
},
{
"BriefDescription": "Cycles Stalled with no LLR Credits; LLR is full",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_TxL_CRC_NO_CREDITS.FULL",
"PerPkg": "1",
@@ -1514,6 +1699,7 @@
},
{
"BriefDescription": "Tx Flit Buffer Cycles not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_Q_TxL_CYCLES_NE",
"PerPkg": "1",
@@ -1522,6 +1708,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 0; Data Tx Flits",
+ "Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G0.DATA",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.; Number of data flits transmitted over QPI. Each flit contains 64b of data. This includes both DRS and NCB data flits (coherent and non-coherent). This can be used to calculate the data bandwidth of the QPI link. One can get a good picture of the QPI-link characteristics by evaluating the protocol flits, data flits, and idle/null flits. This does not include the header flits that go in data packets.",
@@ -1530,6 +1717,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 0; Non-Data protocol Tx Flits",
+ "Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G0.NON_DATA",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.; Number of non-NULL non-data flits transmitted across QPI. This basically tracks the protocol overhead on the QPI link. One can get a good picture of the QPI-link characteristics by evaluating the protocol flits, data flits, and idle/null flits. This includes the header flits for data packets.",
@@ -1538,6 +1726,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 1; DRS Flits (both Header and Data)",
+ "Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G1.DRS",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of flits transmitted over QPI on the DRS (Data Response) channel. DRS flits are used to transmit data with coherency.",
@@ -1546,6 +1735,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 1; DRS Data Flits",
+ "Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G1.DRS_DATA",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of data flits transmitted over QPI on the DRS (Data Response) channel. DRS flits are used to transmit data with coherency. This does not count data flits transmitted over the NCB channel which transmits non-coherent data. This includes only the data flits (not the header).",
@@ -1554,6 +1744,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 1; DRS Header Flits",
+ "Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G1.DRS_NONDATA",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of protocol flits transmitted over QPI on the DRS (Data Response) channel. DRS flits are used to transmit data with coherency. This does not count data flits transmitted over the NCB channel which transmits non-coherent data. This includes only the header flits (not the data). This includes extended headers.",
@@ -1562,6 +1753,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 1; HOM Flits",
+ "Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G1.HOM",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of flits transmitted over QPI on the home channel.",
@@ -1570,6 +1762,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 1; HOM Non-Request Flits",
+ "Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G1.HOM_NONREQ",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of non-request flits transmitted over QPI on the home channel. These are most commonly snoop responses, and this event can be used as a proxy for that.",
@@ -1578,6 +1771,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 1; HOM Request Flits",
+ "Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G1.HOM_REQ",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of data request transmitted over QPI on the home channel. This basically counts the number of remote memory requests transmitted over QPI. In conjunction with the local read count in the Home Agent, one can calculate the number of LLC Misses.",
@@ -1586,6 +1780,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 1; SNP Flits",
+ "Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G1.SNP",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of snoop request flits transmitted over QPI. These requests are contained in the snoop channel. This does not include snoop responses, which are transmitted on the home channel.",
@@ -1594,6 +1789,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 2; Non-Coherent Bypass Tx Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_Q_TxL_FLITS_G2.NCB",
"PerPkg": "1",
@@ -1603,6 +1799,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 2; Non-Coherent data Tx Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_Q_TxL_FLITS_G2.NCB_DATA",
"PerPkg": "1",
@@ -1612,6 +1809,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 2; Non-Coherent non-data Tx Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_Q_TxL_FLITS_G2.NCB_NONDATA",
"PerPkg": "1",
@@ -1621,6 +1819,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 2; Non-Coherent standard Tx Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_Q_TxL_FLITS_G2.NCS",
"PerPkg": "1",
@@ -1630,6 +1829,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 2; Non-Data Response Tx Flits - AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_Q_TxL_FLITS_G2.NDR_AD",
"PerPkg": "1",
@@ -1639,6 +1839,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 2; Non-Data Response Tx Flits - AK",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_Q_TxL_FLITS_G2.NDR_AK",
"PerPkg": "1",
@@ -1648,6 +1849,7 @@
},
{
"BriefDescription": "Tx Flit Buffer Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_Q_TxL_INSERTS",
"PerPkg": "1",
@@ -1656,6 +1858,7 @@
},
{
"BriefDescription": "Tx Flit Buffer Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_Q_TxL_OCCUPANCY",
"PerPkg": "1",
@@ -1664,6 +1867,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - HOM; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_Q_TxR_AD_HOM_CREDIT_ACQUIRED.VN0",
"PerPkg": "1",
@@ -1673,6 +1877,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - HOM; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_Q_TxR_AD_HOM_CREDIT_ACQUIRED.VN1",
"PerPkg": "1",
@@ -1682,6 +1887,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AD HOM; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_Q_TxR_AD_HOM_CREDIT_OCCUPANCY.VN0",
"PerPkg": "1",
@@ -1691,6 +1897,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AD HOM; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_Q_TxR_AD_HOM_CREDIT_OCCUPANCY.VN1",
"PerPkg": "1",
@@ -1700,6 +1907,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AD NDR; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_Q_TxR_AD_NDR_CREDIT_ACQUIRED.VN0",
"PerPkg": "1",
@@ -1709,6 +1917,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AD NDR; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_Q_TxR_AD_NDR_CREDIT_ACQUIRED.VN1",
"PerPkg": "1",
@@ -1718,6 +1927,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AD NDR; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_Q_TxR_AD_NDR_CREDIT_OCCUPANCY.VN0",
"PerPkg": "1",
@@ -1727,6 +1937,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AD NDR; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_Q_TxR_AD_NDR_CREDIT_OCCUPANCY.VN1",
"PerPkg": "1",
@@ -1736,6 +1947,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - SNP; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_Q_TxR_AD_SNP_CREDIT_ACQUIRED.VN0",
"PerPkg": "1",
@@ -1745,6 +1957,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - SNP; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_Q_TxR_AD_SNP_CREDIT_ACQUIRED.VN1",
"PerPkg": "1",
@@ -1754,6 +1967,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AD SNP; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_Q_TxR_AD_SNP_CREDIT_OCCUPANCY.VN0",
"PerPkg": "1",
@@ -1763,6 +1977,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AD SNP; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_Q_TxR_AD_SNP_CREDIT_OCCUPANCY.VN1",
"PerPkg": "1",
@@ -1772,6 +1987,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AK NDR",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_Q_TxR_AK_NDR_CREDIT_ACQUIRED",
"PerPkg": "1",
@@ -1780,6 +1996,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AK NDR: for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_Q_TxR_AK_NDR_CREDIT_ACQUIRED.VN0",
"PerPkg": "1",
@@ -1789,6 +2006,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AK NDR: for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_Q_TxR_AK_NDR_CREDIT_ACQUIRED.VN1",
"PerPkg": "1",
@@ -1798,6 +2016,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AK NDR",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_Q_TxR_AK_NDR_CREDIT_OCCUPANCY",
"PerPkg": "1",
@@ -1806,6 +2025,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AK NDR: for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_Q_TxR_AK_NDR_CREDIT_OCCUPANCY.VN0",
"PerPkg": "1",
@@ -1815,6 +2035,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AK NDR: for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_Q_TxR_AK_NDR_CREDIT_OCCUPANCY.VN1",
"PerPkg": "1",
@@ -1824,6 +2045,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - DRS; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_Q_TxR_BL_DRS_CREDIT_ACQUIRED.VN0",
"PerPkg": "1",
@@ -1833,6 +2055,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - DRS; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_Q_TxR_BL_DRS_CREDIT_ACQUIRED.VN1",
"PerPkg": "1",
@@ -1842,6 +2065,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - DRS; for Shared VN",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_Q_TxR_BL_DRS_CREDIT_ACQUIRED.VN_SHR",
"PerPkg": "1",
@@ -1851,6 +2075,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - BL DRS; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x1f",
"EventName": "UNC_Q_TxR_BL_DRS_CREDIT_OCCUPANCY.VN0",
"PerPkg": "1",
@@ -1860,6 +2085,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - BL DRS; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x1f",
"EventName": "UNC_Q_TxR_BL_DRS_CREDIT_OCCUPANCY.VN1",
"PerPkg": "1",
@@ -1869,6 +2095,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - BL DRS; for Shared VN",
+ "Counter": "0,1,2,3",
"EventCode": "0x1f",
"EventName": "UNC_Q_TxR_BL_DRS_CREDIT_OCCUPANCY.VN_SHR",
"PerPkg": "1",
@@ -1878,6 +2105,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - NCB; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2b",
"EventName": "UNC_Q_TxR_BL_NCB_CREDIT_ACQUIRED.VN0",
"PerPkg": "1",
@@ -1887,6 +2115,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - NCB; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x2b",
"EventName": "UNC_Q_TxR_BL_NCB_CREDIT_ACQUIRED.VN1",
"PerPkg": "1",
@@ -1896,6 +2125,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - BL NCB; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_Q_TxR_BL_NCB_CREDIT_OCCUPANCY.VN0",
"PerPkg": "1",
@@ -1905,6 +2135,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - BL NCB; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_Q_TxR_BL_NCB_CREDIT_OCCUPANCY.VN1",
"PerPkg": "1",
@@ -1914,6 +2145,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - NCS; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2c",
"EventName": "UNC_Q_TxR_BL_NCS_CREDIT_ACQUIRED.VN0",
"PerPkg": "1",
@@ -1923,6 +2155,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - NCS; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x2c",
"EventName": "UNC_Q_TxR_BL_NCS_CREDIT_ACQUIRED.VN1",
"PerPkg": "1",
@@ -1932,6 +2165,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - BL NCS; for VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_Q_TxR_BL_NCS_CREDIT_OCCUPANCY.VN0",
"PerPkg": "1",
@@ -1941,6 +2175,7 @@
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - BL NCS; for VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_Q_TxR_BL_NCS_CREDIT_OCCUPANCY.VN1",
"PerPkg": "1",
@@ -1950,6 +2185,7 @@
},
{
"BriefDescription": "VNA Credits Returned",
+ "Counter": "0,1,2,3",
"EventCode": "0x1c",
"EventName": "UNC_Q_VNA_CREDIT_RETURNS",
"PerPkg": "1",
@@ -1958,6 +2194,7 @@
},
{
"BriefDescription": "VNA Credits Pending Return - Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_Q_VNA_CREDIT_RETURN_OCCUPANCY",
"PerPkg": "1",
@@ -1966,6 +2203,7 @@
},
{
"BriefDescription": "Number of uclks in domain",
+ "Counter": "0,1,2",
"EventCode": "0x1",
"EventName": "UNC_R3_CLOCKTICKS",
"PerPkg": "1",
@@ -1974,6 +2212,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2c",
"EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO10",
"PerPkg": "1",
@@ -1983,6 +2222,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2c",
"EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO11",
"PerPkg": "1",
@@ -1992,6 +2232,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2c",
"EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO12",
"PerPkg": "1",
@@ -2001,6 +2242,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2c",
"EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO13",
"PerPkg": "1",
@@ -2010,6 +2252,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2c",
"EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO14",
"PerPkg": "1",
@@ -2019,6 +2262,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2c",
"EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO8",
"PerPkg": "1",
@@ -2028,6 +2272,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2c",
"EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO9",
"PerPkg": "1",
@@ -2037,6 +2282,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2b",
"EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO0",
"PerPkg": "1",
@@ -2046,6 +2292,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2b",
"EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO1",
"PerPkg": "1",
@@ -2055,6 +2302,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2b",
"EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO2",
"PerPkg": "1",
@@ -2064,6 +2312,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2b",
"EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO3",
"PerPkg": "1",
@@ -2073,6 +2322,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2b",
"EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO4",
"PerPkg": "1",
@@ -2082,6 +2332,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2b",
"EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO5",
"PerPkg": "1",
@@ -2091,6 +2342,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2b",
"EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO6",
"PerPkg": "1",
@@ -2100,6 +2352,7 @@
},
{
"BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2b",
"EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO7",
"PerPkg": "1",
@@ -2109,6 +2362,7 @@
},
{
"BriefDescription": "HA/R2 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2f",
"EventName": "UNC_R3_HA_R2_BL_CREDITS_EMPTY.HA0",
"PerPkg": "1",
@@ -2118,6 +2372,7 @@
},
{
"BriefDescription": "HA/R2 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2f",
"EventName": "UNC_R3_HA_R2_BL_CREDITS_EMPTY.HA1",
"PerPkg": "1",
@@ -2127,6 +2382,7 @@
},
{
"BriefDescription": "HA/R2 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2f",
"EventName": "UNC_R3_HA_R2_BL_CREDITS_EMPTY.R2_NCB",
"PerPkg": "1",
@@ -2136,6 +2392,7 @@
},
{
"BriefDescription": "HA/R2 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2f",
"EventName": "UNC_R3_HA_R2_BL_CREDITS_EMPTY.R2_NCS",
"PerPkg": "1",
@@ -2145,6 +2402,7 @@
},
{
"BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x29",
"EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN0_HOM",
"PerPkg": "1",
@@ -2154,6 +2412,7 @@
},
{
"BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x29",
"EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN0_NDR",
"PerPkg": "1",
@@ -2163,6 +2422,7 @@
},
{
"BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x29",
"EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN0_SNP",
"PerPkg": "1",
@@ -2172,6 +2432,7 @@
},
{
"BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x29",
"EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN1_HOM",
"PerPkg": "1",
@@ -2181,6 +2442,7 @@
},
{
"BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x29",
"EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN1_NDR",
"PerPkg": "1",
@@ -2190,6 +2452,7 @@
},
{
"BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x29",
"EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN1_SNP",
"PerPkg": "1",
@@ -2199,6 +2462,7 @@
},
{
"BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x29",
"EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VNA",
"PerPkg": "1",
@@ -2208,6 +2472,7 @@
},
{
"BriefDescription": "QPI0 BL Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2d",
"EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VN0_HOM",
"PerPkg": "1",
@@ -2217,6 +2482,7 @@
},
{
"BriefDescription": "QPI0 BL Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2d",
"EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VN0_NDR",
"PerPkg": "1",
@@ -2226,6 +2492,7 @@
},
{
"BriefDescription": "QPI0 BL Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2d",
"EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VN0_SNP",
"PerPkg": "1",
@@ -2235,6 +2502,7 @@
},
{
"BriefDescription": "QPI0 BL Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2d",
"EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VN1_HOM",
"PerPkg": "1",
@@ -2244,6 +2512,7 @@
},
{
"BriefDescription": "QPI0 BL Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2d",
"EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VN1_NDR",
"PerPkg": "1",
@@ -2253,6 +2522,7 @@
},
{
"BriefDescription": "QPI0 BL Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2d",
"EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VN1_SNP",
"PerPkg": "1",
@@ -2262,6 +2532,7 @@
},
{
"BriefDescription": "QPI0 BL Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2d",
"EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VNA",
"PerPkg": "1",
@@ -2271,6 +2542,7 @@
},
{
"BriefDescription": "QPI1 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2a",
"EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VN0_HOM",
"PerPkg": "1",
@@ -2280,6 +2552,7 @@
},
{
"BriefDescription": "QPI1 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2a",
"EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VN0_NDR",
"PerPkg": "1",
@@ -2289,6 +2562,7 @@
},
{
"BriefDescription": "QPI1 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2a",
"EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VN0_SNP",
"PerPkg": "1",
@@ -2298,6 +2572,7 @@
},
{
"BriefDescription": "QPI1 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2a",
"EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VN1_HOM",
"PerPkg": "1",
@@ -2307,6 +2582,7 @@
},
{
"BriefDescription": "QPI1 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2a",
"EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VN1_NDR",
"PerPkg": "1",
@@ -2316,6 +2592,7 @@
},
{
"BriefDescription": "QPI1 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2a",
"EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VN1_SNP",
"PerPkg": "1",
@@ -2325,6 +2602,7 @@
},
{
"BriefDescription": "QPI1 AD Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2a",
"EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VNA",
"PerPkg": "1",
@@ -2334,6 +2612,7 @@
},
{
"BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2e",
"EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN0_HOM",
"PerPkg": "1",
@@ -2343,6 +2622,7 @@
},
{
"BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2e",
"EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN0_NDR",
"PerPkg": "1",
@@ -2352,6 +2632,7 @@
},
{
"BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2e",
"EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN0_SNP",
"PerPkg": "1",
@@ -2361,6 +2642,7 @@
},
{
"BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2e",
"EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN1_HOM",
"PerPkg": "1",
@@ -2370,6 +2652,7 @@
},
{
"BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2e",
"EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN1_NDR",
"PerPkg": "1",
@@ -2379,6 +2662,7 @@
},
{
"BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2e",
"EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN1_SNP",
"PerPkg": "1",
@@ -2388,6 +2672,7 @@
},
{
"BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
"EventCode": "0x2e",
"EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VNA",
"PerPkg": "1",
@@ -2397,6 +2682,7 @@
},
{
"BriefDescription": "R3 AD Ring in Use; Counterclockwise",
+ "Counter": "0,1,2",
"EventCode": "0x7",
"EventName": "UNC_R3_RING_AD_USED.CCW",
"PerPkg": "1",
@@ -2406,6 +2692,7 @@
},
{
"BriefDescription": "R3 AD Ring in Use; Counterclockwise and Even on VRing 0",
+ "Counter": "0,1,2",
"EventCode": "0x7",
"EventName": "UNC_R3_RING_AD_USED.CCW_VR0_EVEN",
"PerPkg": "1",
@@ -2415,6 +2702,7 @@
},
{
"BriefDescription": "R3 AD Ring in Use; Counterclockwise and Odd on VRing 0",
+ "Counter": "0,1,2",
"EventCode": "0x7",
"EventName": "UNC_R3_RING_AD_USED.CCW_VR0_ODD",
"PerPkg": "1",
@@ -2424,6 +2712,7 @@
},
{
"BriefDescription": "R3 AD Ring in Use; Clockwise",
+ "Counter": "0,1,2",
"EventCode": "0x7",
"EventName": "UNC_R3_RING_AD_USED.CW",
"PerPkg": "1",
@@ -2433,6 +2722,7 @@
},
{
"BriefDescription": "R3 AD Ring in Use; Clockwise and Even on VRing 0",
+ "Counter": "0,1,2",
"EventCode": "0x7",
"EventName": "UNC_R3_RING_AD_USED.CW_VR0_EVEN",
"PerPkg": "1",
@@ -2442,6 +2732,7 @@
},
{
"BriefDescription": "R3 AD Ring in Use; Clockwise and Odd on VRing 0",
+ "Counter": "0,1,2",
"EventCode": "0x7",
"EventName": "UNC_R3_RING_AD_USED.CW_VR0_ODD",
"PerPkg": "1",
@@ -2451,6 +2742,7 @@
},
{
"BriefDescription": "R3 AK Ring in Use; Counterclockwise",
+ "Counter": "0,1,2",
"EventCode": "0x8",
"EventName": "UNC_R3_RING_AK_USED.CCW",
"PerPkg": "1",
@@ -2460,6 +2752,7 @@
},
{
"BriefDescription": "R3 AK Ring in Use; Counterclockwise and Even on VRing 0",
+ "Counter": "0,1,2",
"EventCode": "0x8",
"EventName": "UNC_R3_RING_AK_USED.CCW_VR0_EVEN",
"PerPkg": "1",
@@ -2469,6 +2762,7 @@
},
{
"BriefDescription": "R3 AK Ring in Use; Counterclockwise and Odd on VRing 0",
+ "Counter": "0,1,2",
"EventCode": "0x8",
"EventName": "UNC_R3_RING_AK_USED.CCW_VR0_ODD",
"PerPkg": "1",
@@ -2478,6 +2772,7 @@
},
{
"BriefDescription": "R3 AK Ring in Use; Clockwise",
+ "Counter": "0,1,2",
"EventCode": "0x8",
"EventName": "UNC_R3_RING_AK_USED.CW",
"PerPkg": "1",
@@ -2487,6 +2782,7 @@
},
{
"BriefDescription": "R3 AK Ring in Use; Clockwise and Even on VRing 0",
+ "Counter": "0,1,2",
"EventCode": "0x8",
"EventName": "UNC_R3_RING_AK_USED.CW_VR0_EVEN",
"PerPkg": "1",
@@ -2496,6 +2792,7 @@
},
{
"BriefDescription": "R3 AK Ring in Use; Clockwise and Odd on VRing 0",
+ "Counter": "0,1,2",
"EventCode": "0x8",
"EventName": "UNC_R3_RING_AK_USED.CW_VR0_ODD",
"PerPkg": "1",
@@ -2505,6 +2802,7 @@
},
{
"BriefDescription": "R3 BL Ring in Use; Counterclockwise",
+ "Counter": "0,1,2",
"EventCode": "0x9",
"EventName": "UNC_R3_RING_BL_USED.CCW",
"PerPkg": "1",
@@ -2514,6 +2812,7 @@
},
{
"BriefDescription": "R3 BL Ring in Use; Counterclockwise and Even on VRing 0",
+ "Counter": "0,1,2",
"EventCode": "0x9",
"EventName": "UNC_R3_RING_BL_USED.CCW_VR0_EVEN",
"PerPkg": "1",
@@ -2523,6 +2822,7 @@
},
{
"BriefDescription": "R3 BL Ring in Use; Counterclockwise and Odd on VRing 0",
+ "Counter": "0,1,2",
"EventCode": "0x9",
"EventName": "UNC_R3_RING_BL_USED.CCW_VR0_ODD",
"PerPkg": "1",
@@ -2532,6 +2832,7 @@
},
{
"BriefDescription": "R3 BL Ring in Use; Clockwise",
+ "Counter": "0,1,2",
"EventCode": "0x9",
"EventName": "UNC_R3_RING_BL_USED.CW",
"PerPkg": "1",
@@ -2541,6 +2842,7 @@
},
{
"BriefDescription": "R3 BL Ring in Use; Clockwise and Even on VRing 0",
+ "Counter": "0,1,2",
"EventCode": "0x9",
"EventName": "UNC_R3_RING_BL_USED.CW_VR0_EVEN",
"PerPkg": "1",
@@ -2550,6 +2852,7 @@
},
{
"BriefDescription": "R3 BL Ring in Use; Clockwise and Odd on VRing 0",
+ "Counter": "0,1,2",
"EventCode": "0x9",
"EventName": "UNC_R3_RING_BL_USED.CW_VR0_ODD",
"PerPkg": "1",
@@ -2559,6 +2862,7 @@
},
{
"BriefDescription": "R2 IV Ring in Use; Any",
+ "Counter": "0,1,2",
"EventCode": "0xA",
"EventName": "UNC_R3_RING_IV_USED.ANY",
"PerPkg": "1",
@@ -2568,6 +2872,7 @@
},
{
"BriefDescription": "R2 IV Ring in Use; Counterclockwise",
+ "Counter": "0,1,2",
"EventCode": "0xa",
"EventName": "UNC_R3_RING_IV_USED.CCW",
"PerPkg": "1",
@@ -2577,6 +2882,7 @@
},
{
"BriefDescription": "R2 IV Ring in Use; Clockwise",
+ "Counter": "0,1,2",
"EventCode": "0xa",
"EventName": "UNC_R3_RING_IV_USED.CW",
"PerPkg": "1",
@@ -2586,6 +2892,7 @@
},
{
"BriefDescription": "AD Ingress Bypassed",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_R3_RxR_AD_BYPASSED",
"PerPkg": "1",
@@ -2594,6 +2901,7 @@
},
{
"BriefDescription": "Ingress Bypassed",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_R3_RxR_BYPASSED.AD",
"PerPkg": "1",
@@ -2603,6 +2911,7 @@
},
{
"BriefDescription": "Ingress Cycles Not Empty; HOM",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_R3_RxR_CYCLES_NE.HOM",
"PerPkg": "1",
@@ -2612,6 +2921,7 @@
},
{
"BriefDescription": "Ingress Cycles Not Empty; NDR",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_R3_RxR_CYCLES_NE.NDR",
"PerPkg": "1",
@@ -2621,6 +2931,7 @@
},
{
"BriefDescription": "Ingress Cycles Not Empty; SNP",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_R3_RxR_CYCLES_NE.SNP",
"PerPkg": "1",
@@ -2630,6 +2941,7 @@
},
{
"BriefDescription": "Ingress Allocations; DRS",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_R3_RxR_INSERTS.DRS",
"PerPkg": "1",
@@ -2639,6 +2951,7 @@
},
{
"BriefDescription": "Ingress Allocations; HOM",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_R3_RxR_INSERTS.HOM",
"PerPkg": "1",
@@ -2648,6 +2961,7 @@
},
{
"BriefDescription": "Ingress Allocations; NCB",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_R3_RxR_INSERTS.NCB",
"PerPkg": "1",
@@ -2657,6 +2971,7 @@
},
{
"BriefDescription": "Ingress Allocations; NCS",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_R3_RxR_INSERTS.NCS",
"PerPkg": "1",
@@ -2666,6 +2981,7 @@
},
{
"BriefDescription": "Ingress Allocations; NDR",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_R3_RxR_INSERTS.NDR",
"PerPkg": "1",
@@ -2675,6 +2991,7 @@
},
{
"BriefDescription": "Ingress Allocations; SNP",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_R3_RxR_INSERTS.SNP",
"PerPkg": "1",
@@ -2684,6 +3001,7 @@
},
{
"BriefDescription": "Ingress Occupancy Accumulator; DRS",
+ "Counter": "0",
"EventCode": "0x13",
"EventName": "UNC_R3_RxR_OCCUPANCY.DRS",
"PerPkg": "1",
@@ -2693,6 +3011,7 @@
},
{
"BriefDescription": "Ingress Occupancy Accumulator; HOM",
+ "Counter": "0",
"EventCode": "0x13",
"EventName": "UNC_R3_RxR_OCCUPANCY.HOM",
"PerPkg": "1",
@@ -2702,6 +3021,7 @@
},
{
"BriefDescription": "Ingress Occupancy Accumulator; NCB",
+ "Counter": "0",
"EventCode": "0x13",
"EventName": "UNC_R3_RxR_OCCUPANCY.NCB",
"PerPkg": "1",
@@ -2711,6 +3031,7 @@
},
{
"BriefDescription": "Ingress Occupancy Accumulator; NCS",
+ "Counter": "0",
"EventCode": "0x13",
"EventName": "UNC_R3_RxR_OCCUPANCY.NCS",
"PerPkg": "1",
@@ -2720,6 +3041,7 @@
},
{
"BriefDescription": "Ingress Occupancy Accumulator; NDR",
+ "Counter": "0",
"EventCode": "0x13",
"EventName": "UNC_R3_RxR_OCCUPANCY.NDR",
"PerPkg": "1",
@@ -2729,6 +3051,7 @@
},
{
"BriefDescription": "Ingress Occupancy Accumulator; SNP",
+ "Counter": "0",
"EventCode": "0x13",
"EventName": "UNC_R3_RxR_OCCUPANCY.SNP",
"PerPkg": "1",
@@ -2738,6 +3061,7 @@
},
{
"BriefDescription": "Egress NACK; AK CCW",
+ "Counter": "0,1",
"EventCode": "0x28",
"EventName": "UNC_R3_TxR_NACK_CCW.AD",
"PerPkg": "1",
@@ -2747,6 +3071,7 @@
},
{
"BriefDescription": "Egress NACK; BL CW",
+ "Counter": "0,1",
"EventCode": "0x28",
"EventName": "UNC_R3_TxR_NACK_CCW.AK",
"PerPkg": "1",
@@ -2756,6 +3081,7 @@
},
{
"BriefDescription": "Egress NACK; BL CCW",
+ "Counter": "0,1",
"EventCode": "0x28",
"EventName": "UNC_R3_TxR_NACK_CCW.BL",
"PerPkg": "1",
@@ -2765,6 +3091,7 @@
},
{
"BriefDescription": "Egress NACK; AD CW",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R3_TxR_NACK_CW.AD",
"PerPkg": "1",
@@ -2774,6 +3101,7 @@
},
{
"BriefDescription": "Egress NACK; AD CCW",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R3_TxR_NACK_CW.AK",
"PerPkg": "1",
@@ -2783,6 +3111,7 @@
},
{
"BriefDescription": "Egress NACK; AK CW",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R3_TxR_NACK_CW.BL",
"PerPkg": "1",
@@ -2792,6 +3121,7 @@
},
{
"BriefDescription": "VN0 Credit Acquisition Failed on DRS; DRS Message Class",
+ "Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_R3_VN0_CREDITS_REJECT.DRS",
"PerPkg": "1",
@@ -2801,6 +3131,7 @@
},
{
"BriefDescription": "VN0 Credit Acquisition Failed on DRS; HOM Message Class",
+ "Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_R3_VN0_CREDITS_REJECT.HOM",
"PerPkg": "1",
@@ -2810,6 +3141,7 @@
},
{
"BriefDescription": "VN0 Credit Acquisition Failed on DRS; NCB Message Class",
+ "Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_R3_VN0_CREDITS_REJECT.NCB",
"PerPkg": "1",
@@ -2819,6 +3151,7 @@
},
{
"BriefDescription": "VN0 Credit Acquisition Failed on DRS; NCS Message Class",
+ "Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_R3_VN0_CREDITS_REJECT.NCS",
"PerPkg": "1",
@@ -2828,6 +3161,7 @@
},
{
"BriefDescription": "VN0 Credit Acquisition Failed on DRS; NDR Message Class",
+ "Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_R3_VN0_CREDITS_REJECT.NDR",
"PerPkg": "1",
@@ -2837,6 +3171,7 @@
},
{
"BriefDescription": "VN0 Credit Acquisition Failed on DRS; SNP Message Class",
+ "Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_R3_VN0_CREDITS_REJECT.SNP",
"PerPkg": "1",
@@ -2846,6 +3181,7 @@
},
{
"BriefDescription": "VN0 Credit Used; DRS Message Class",
+ "Counter": "0,1",
"EventCode": "0x36",
"EventName": "UNC_R3_VN0_CREDITS_USED.DRS",
"PerPkg": "1",
@@ -2855,6 +3191,7 @@
},
{
"BriefDescription": "VN0 Credit Used; HOM Message Class",
+ "Counter": "0,1",
"EventCode": "0x36",
"EventName": "UNC_R3_VN0_CREDITS_USED.HOM",
"PerPkg": "1",
@@ -2864,6 +3201,7 @@
},
{
"BriefDescription": "VN0 Credit Used; NCB Message Class",
+ "Counter": "0,1",
"EventCode": "0x36",
"EventName": "UNC_R3_VN0_CREDITS_USED.NCB",
"PerPkg": "1",
@@ -2873,6 +3211,7 @@
},
{
"BriefDescription": "VN0 Credit Used; NCS Message Class",
+ "Counter": "0,1",
"EventCode": "0x36",
"EventName": "UNC_R3_VN0_CREDITS_USED.NCS",
"PerPkg": "1",
@@ -2882,6 +3221,7 @@
},
{
"BriefDescription": "VN0 Credit Used; NDR Message Class",
+ "Counter": "0,1",
"EventCode": "0x36",
"EventName": "UNC_R3_VN0_CREDITS_USED.NDR",
"PerPkg": "1",
@@ -2891,6 +3231,7 @@
},
{
"BriefDescription": "VN0 Credit Used; SNP Message Class",
+ "Counter": "0,1",
"EventCode": "0x36",
"EventName": "UNC_R3_VN0_CREDITS_USED.SNP",
"PerPkg": "1",
@@ -2900,6 +3241,7 @@
},
{
"BriefDescription": "VN1 Credit Acquisition Failed on DRS; DRS Message Class",
+ "Counter": "0,1",
"EventCode": "0x39",
"EventName": "UNC_R3_VN1_CREDITS_REJECT.DRS",
"PerPkg": "1",
@@ -2909,6 +3251,7 @@
},
{
"BriefDescription": "VN1 Credit Acquisition Failed on DRS; HOM Message Class",
+ "Counter": "0,1",
"EventCode": "0x39",
"EventName": "UNC_R3_VN1_CREDITS_REJECT.HOM",
"PerPkg": "1",
@@ -2918,6 +3261,7 @@
},
{
"BriefDescription": "VN1 Credit Acquisition Failed on DRS; NCB Message Class",
+ "Counter": "0,1",
"EventCode": "0x39",
"EventName": "UNC_R3_VN1_CREDITS_REJECT.NCB",
"PerPkg": "1",
@@ -2927,6 +3271,7 @@
},
{
"BriefDescription": "VN1 Credit Acquisition Failed on DRS; NCS Message Class",
+ "Counter": "0,1",
"EventCode": "0x39",
"EventName": "UNC_R3_VN1_CREDITS_REJECT.NCS",
"PerPkg": "1",
@@ -2936,6 +3281,7 @@
},
{
"BriefDescription": "VN1 Credit Acquisition Failed on DRS; NDR Message Class",
+ "Counter": "0,1",
"EventCode": "0x39",
"EventName": "UNC_R3_VN1_CREDITS_REJECT.NDR",
"PerPkg": "1",
@@ -2945,6 +3291,7 @@
},
{
"BriefDescription": "VN1 Credit Acquisition Failed on DRS; SNP Message Class",
+ "Counter": "0,1",
"EventCode": "0x39",
"EventName": "UNC_R3_VN1_CREDITS_REJECT.SNP",
"PerPkg": "1",
@@ -2954,6 +3301,7 @@
},
{
"BriefDescription": "VN1 Credit Used; DRS Message Class",
+ "Counter": "0,1",
"EventCode": "0x38",
"EventName": "UNC_R3_VN1_CREDITS_USED.DRS",
"PerPkg": "1",
@@ -2963,6 +3311,7 @@
},
{
"BriefDescription": "VN1 Credit Used; HOM Message Class",
+ "Counter": "0,1",
"EventCode": "0x38",
"EventName": "UNC_R3_VN1_CREDITS_USED.HOM",
"PerPkg": "1",
@@ -2972,6 +3321,7 @@
},
{
"BriefDescription": "VN1 Credit Used; NCB Message Class",
+ "Counter": "0,1",
"EventCode": "0x38",
"EventName": "UNC_R3_VN1_CREDITS_USED.NCB",
"PerPkg": "1",
@@ -2981,6 +3331,7 @@
},
{
"BriefDescription": "VN1 Credit Used; NCS Message Class",
+ "Counter": "0,1",
"EventCode": "0x38",
"EventName": "UNC_R3_VN1_CREDITS_USED.NCS",
"PerPkg": "1",
@@ -2990,6 +3341,7 @@
},
{
"BriefDescription": "VN1 Credit Used; NDR Message Class",
+ "Counter": "0,1",
"EventCode": "0x38",
"EventName": "UNC_R3_VN1_CREDITS_USED.NDR",
"PerPkg": "1",
@@ -2999,6 +3351,7 @@
},
{
"BriefDescription": "VN1 Credit Used; SNP Message Class",
+ "Counter": "0,1",
"EventCode": "0x38",
"EventName": "UNC_R3_VN1_CREDITS_USED.SNP",
"PerPkg": "1",
@@ -3008,6 +3361,7 @@
},
{
"BriefDescription": "VNA credit Acquisitions",
+ "Counter": "0,1",
"EventCode": "0x33",
"EventName": "UNC_R3_VNA_CREDITS_ACQUIRED",
"PerPkg": "1",
@@ -3016,6 +3370,7 @@
},
{
"BriefDescription": "VNA credit Acquisitions; HOM Message Class",
+ "Counter": "0,1",
"EventCode": "0x33",
"EventName": "UNC_R3_VNA_CREDITS_ACQUIRED.AD",
"PerPkg": "1",
@@ -3025,6 +3380,7 @@
},
{
"BriefDescription": "VNA credit Acquisitions; HOM Message Class",
+ "Counter": "0,1",
"EventCode": "0x33",
"EventName": "UNC_R3_VNA_CREDITS_ACQUIRED.BL",
"PerPkg": "1",
@@ -3034,6 +3390,7 @@
},
{
"BriefDescription": "VNA Credit Reject; DRS Message Class",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_R3_VNA_CREDITS_REJECT.DRS",
"PerPkg": "1",
@@ -3043,6 +3400,7 @@
},
{
"BriefDescription": "VNA Credit Reject; HOM Message Class",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_R3_VNA_CREDITS_REJECT.HOM",
"PerPkg": "1",
@@ -3052,6 +3410,7 @@
},
{
"BriefDescription": "VNA Credit Reject; NCB Message Class",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_R3_VNA_CREDITS_REJECT.NCB",
"PerPkg": "1",
@@ -3061,6 +3420,7 @@
},
{
"BriefDescription": "VNA Credit Reject; NCS Message Class",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_R3_VNA_CREDITS_REJECT.NCS",
"PerPkg": "1",
@@ -3070,6 +3430,7 @@
},
{
"BriefDescription": "VNA Credit Reject; NDR Message Class",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_R3_VNA_CREDITS_REJECT.NDR",
"PerPkg": "1",
@@ -3079,6 +3440,7 @@
},
{
"BriefDescription": "VNA Credit Reject; SNP Message Class",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_R3_VNA_CREDITS_REJECT.SNP",
"PerPkg": "1",
@@ -3088,6 +3450,7 @@
},
{
"BriefDescription": "Cycles with no VNA credits available",
+ "Counter": "0,1",
"EventCode": "0x31",
"EventName": "UNC_R3_VNA_CREDIT_CYCLES_OUT",
"PerPkg": "1",
@@ -3096,6 +3459,7 @@
},
{
"BriefDescription": "Cycles with 1 or more VNA credits in use",
+ "Counter": "0,1",
"EventCode": "0x32",
"EventName": "UNC_R3_VNA_CREDIT_CYCLES_USED",
"PerPkg": "1",
@@ -3103,12 +3467,14 @@
"Unit": "R3QPI"
},
{
+ "Counter": "0,1",
"EventName": "UNC_U_CLOCKTICKS",
"PerPkg": "1",
"Unit": "UBOX"
},
{
"BriefDescription": "VLW Received",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.DOORBELL_RCVD",
"PerPkg": "1",
@@ -3118,6 +3484,7 @@
},
{
"BriefDescription": "VLW Received",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.INT_PRIO",
"PerPkg": "1",
@@ -3127,6 +3494,7 @@
},
{
"BriefDescription": "VLW Received",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.IPI_RCVD",
"PerPkg": "1",
@@ -3136,6 +3504,7 @@
},
{
"BriefDescription": "VLW Received",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.MSI_RCVD",
"PerPkg": "1",
@@ -3145,6 +3514,7 @@
},
{
"BriefDescription": "VLW Received",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.VLW_RCVD",
"PerPkg": "1",
@@ -3154,6 +3524,7 @@
},
{
"BriefDescription": "Filter Match",
+ "Counter": "0,1",
"EventCode": "0x41",
"EventName": "UNC_U_FILTER_MATCH.DISABLE",
"PerPkg": "1",
@@ -3163,6 +3534,7 @@
},
{
"BriefDescription": "Filter Match",
+ "Counter": "0,1",
"EventCode": "0x41",
"EventName": "UNC_U_FILTER_MATCH.ENABLE",
"PerPkg": "1",
@@ -3172,6 +3544,7 @@
},
{
"BriefDescription": "Filter Match",
+ "Counter": "0,1",
"EventCode": "0x41",
"EventName": "UNC_U_FILTER_MATCH.U2C_DISABLE",
"PerPkg": "1",
@@ -3181,6 +3554,7 @@
},
{
"BriefDescription": "Filter Match",
+ "Counter": "0,1",
"EventCode": "0x41",
"EventName": "UNC_U_FILTER_MATCH.U2C_ENABLE",
"PerPkg": "1",
@@ -3190,6 +3564,7 @@
},
{
"BriefDescription": "IDI Lock/SplitLock Cycles",
+ "Counter": "0,1",
"EventCode": "0x44",
"EventName": "UNC_U_LOCK_CYCLES",
"PerPkg": "1",
@@ -3198,6 +3573,7 @@
},
{
"BriefDescription": "Cycles PHOLD Assert to Ack; Assert to ACK",
+ "Counter": "0,1",
"EventCode": "0x45",
"EventName": "UNC_U_PHOLD_CYCLES.ASSERT_TO_ACK",
"PerPkg": "1",
@@ -3207,6 +3583,7 @@
},
{
"BriefDescription": "RACU Request",
+ "Counter": "0,1",
"EventCode": "0x46",
"EventName": "UNC_U_RACU_REQUESTS",
"PerPkg": "1",
@@ -3214,6 +3591,7 @@
},
{
"BriefDescription": "Monitor Sent to T0; Correctable Machine Check",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.CMC",
"PerPkg": "1",
@@ -3223,6 +3601,7 @@
},
{
"BriefDescription": "Monitor Sent to T0; Livelock",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.LIVELOCK",
"PerPkg": "1",
@@ -3232,6 +3611,7 @@
},
{
"BriefDescription": "Monitor Sent to T0; LTError",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.LTERROR",
"PerPkg": "1",
@@ -3241,6 +3621,7 @@
},
{
"BriefDescription": "Monitor Sent to T0; Monitor T0",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.MONITOR_T0",
"PerPkg": "1",
@@ -3250,6 +3631,7 @@
},
{
"BriefDescription": "Monitor Sent to T0; Monitor T1",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.MONITOR_T1",
"PerPkg": "1",
@@ -3259,6 +3641,7 @@
},
{
"BriefDescription": "Monitor Sent to T0; Other",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.OTHER",
"PerPkg": "1",
@@ -3268,6 +3651,7 @@
},
{
"BriefDescription": "Monitor Sent to T0; Trap",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.TRAP",
"PerPkg": "1",
@@ -3277,6 +3661,7 @@
},
{
"BriefDescription": "Monitor Sent to T0; Uncorrectable Machine Check",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.UMC",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/uncore-io.json b/tools/perf/pmu-events/arch/x86/ivytown/uncore-io.json
index 5887e6ebcfa8..0bc6641fb6a5 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/uncore-io.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/uncore-io.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Number of uclks in domain",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_R2_CLOCKTICKS",
"PerPkg": "1",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "R2PCIe IIO Credit Acquired; DRS",
+ "Counter": "0,1",
"EventCode": "0x33",
"EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.DRS",
"PerPkg": "1",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "R2PCIe IIO Credit Acquired; NCB",
+ "Counter": "0,1",
"EventCode": "0x33",
"EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.NCB",
"PerPkg": "1",
@@ -27,6 +30,7 @@
},
{
"BriefDescription": "R2PCIe IIO Credit Acquired; NCS",
+ "Counter": "0,1",
"EventCode": "0x33",
"EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.NCS",
"PerPkg": "1",
@@ -36,6 +40,7 @@
},
{
"BriefDescription": "R2PCIe IIO Failed to Acquire a Credit; DRS",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_R2_IIO_CREDITS_REJECT.DRS",
"PerPkg": "1",
@@ -45,6 +50,7 @@
},
{
"BriefDescription": "R2PCIe IIO Credits in Use; DRS",
+ "Counter": "0,1",
"EventCode": "0x32",
"EventName": "UNC_R2_IIO_CREDITS_USED.DRS",
"PerPkg": "1",
@@ -54,6 +60,7 @@
},
{
"BriefDescription": "R2PCIe IIO Credits in Use; NCB",
+ "Counter": "0,1",
"EventCode": "0x32",
"EventName": "UNC_R2_IIO_CREDITS_USED.NCB",
"PerPkg": "1",
@@ -63,6 +70,7 @@
},
{
"BriefDescription": "R2PCIe IIO Credits in Use; NCS",
+ "Counter": "0,1",
"EventCode": "0x32",
"EventName": "UNC_R2_IIO_CREDITS_USED.NCS",
"PerPkg": "1",
@@ -72,6 +80,7 @@
},
{
"BriefDescription": "R2 AD Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_R2_RING_AD_USED.CCW",
"PerPkg": "1",
@@ -81,6 +90,7 @@
},
{
"BriefDescription": "R2 AD Ring in Use; Counterclockwise and Even on VRing 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_R2_RING_AD_USED.CCW_VR0_EVEN",
"PerPkg": "1",
@@ -90,6 +100,7 @@
},
{
"BriefDescription": "R2 AD Ring in Use; Counterclockwise and Odd on VRing 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_R2_RING_AD_USED.CCW_VR0_ODD",
"PerPkg": "1",
@@ -99,6 +110,7 @@
},
{
"BriefDescription": "R2 AD Ring in Use; Counterclockwise and Even on VRing 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_R2_RING_AD_USED.CCW_VR1_EVEN",
"PerPkg": "1",
@@ -108,6 +120,7 @@
},
{
"BriefDescription": "R2 AD Ring in Use; Counterclockwise and Odd on VRing 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_R2_RING_AD_USED.CCW_VR1_ODD",
"PerPkg": "1",
@@ -117,6 +130,7 @@
},
{
"BriefDescription": "R2 AD Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_R2_RING_AD_USED.CW",
"PerPkg": "1",
@@ -126,6 +140,7 @@
},
{
"BriefDescription": "R2 AD Ring in Use; Clockwise and Even on VRing 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_R2_RING_AD_USED.CW_VR0_EVEN",
"PerPkg": "1",
@@ -135,6 +150,7 @@
},
{
"BriefDescription": "R2 AD Ring in Use; Clockwise and Odd on VRing 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_R2_RING_AD_USED.CW_VR0_ODD",
"PerPkg": "1",
@@ -144,6 +160,7 @@
},
{
"BriefDescription": "R2 AD Ring in Use; Clockwise and Even on VRing 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_R2_RING_AD_USED.CW_VR1_EVEN",
"PerPkg": "1",
@@ -153,6 +170,7 @@
},
{
"BriefDescription": "R2 AD Ring in Use; Clockwise and Odd on VRing 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_R2_RING_AD_USED.CW_VR1_ODD",
"PerPkg": "1",
@@ -162,6 +180,7 @@
},
{
"BriefDescription": "R2 AK Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_R2_RING_AK_USED.CCW",
"PerPkg": "1",
@@ -171,6 +190,7 @@
},
{
"BriefDescription": "R2 AK Ring in Use; Counterclockwise and Even on VRing 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_R2_RING_AK_USED.CCW_VR0_EVEN",
"PerPkg": "1",
@@ -180,6 +200,7 @@
},
{
"BriefDescription": "R2 AK Ring in Use; Counterclockwise and Odd on VRing 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_R2_RING_AK_USED.CCW_VR0_ODD",
"PerPkg": "1",
@@ -189,6 +210,7 @@
},
{
"BriefDescription": "R2 AK Ring in Use; Counterclockwise and Even on VRing 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_R2_RING_AK_USED.CCW_VR1_EVEN",
"PerPkg": "1",
@@ -198,6 +220,7 @@
},
{
"BriefDescription": "R2 AK Ring in Use; Counterclockwise and Odd on VRing 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_R2_RING_AK_USED.CCW_VR1_ODD",
"PerPkg": "1",
@@ -207,6 +230,7 @@
},
{
"BriefDescription": "R2 AK Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_R2_RING_AK_USED.CW",
"PerPkg": "1",
@@ -216,6 +240,7 @@
},
{
"BriefDescription": "R2 AK Ring in Use; Clockwise and Even on VRing 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_R2_RING_AK_USED.CW_VR0_EVEN",
"PerPkg": "1",
@@ -225,6 +250,7 @@
},
{
"BriefDescription": "R2 AK Ring in Use; Clockwise and Odd on VRing 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_R2_RING_AK_USED.CW_VR0_ODD",
"PerPkg": "1",
@@ -234,6 +260,7 @@
},
{
"BriefDescription": "R2 AK Ring in Use; Clockwise and Even on VRing 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_R2_RING_AK_USED.CW_VR1_EVEN",
"PerPkg": "1",
@@ -243,6 +270,7 @@
},
{
"BriefDescription": "R2 AK Ring in Use; Clockwise and Odd on VRing 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_R2_RING_AK_USED.CW_VR1_ODD",
"PerPkg": "1",
@@ -252,6 +280,7 @@
},
{
"BriefDescription": "R2 BL Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_R2_RING_BL_USED.CCW",
"PerPkg": "1",
@@ -261,6 +290,7 @@
},
{
"BriefDescription": "R2 BL Ring in Use; Counterclockwise and Even on VRing 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_R2_RING_BL_USED.CCW_VR0_EVEN",
"PerPkg": "1",
@@ -270,6 +300,7 @@
},
{
"BriefDescription": "R2 BL Ring in Use; Counterclockwise and Odd on VRing 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_R2_RING_BL_USED.CCW_VR0_ODD",
"PerPkg": "1",
@@ -279,6 +310,7 @@
},
{
"BriefDescription": "R2 BL Ring in Use; Counterclockwise and Even on VRing 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_R2_RING_BL_USED.CCW_VR1_EVEN",
"PerPkg": "1",
@@ -288,6 +320,7 @@
},
{
"BriefDescription": "R2 BL Ring in Use; Counterclockwise and Odd on VRing 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_R2_RING_BL_USED.CCW_VR1_ODD",
"PerPkg": "1",
@@ -297,6 +330,7 @@
},
{
"BriefDescription": "R2 BL Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_R2_RING_BL_USED.CW",
"PerPkg": "1",
@@ -306,6 +340,7 @@
},
{
"BriefDescription": "R2 BL Ring in Use; Clockwise and Even on VRing 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_R2_RING_BL_USED.CW_VR0_EVEN",
"PerPkg": "1",
@@ -315,6 +350,7 @@
},
{
"BriefDescription": "R2 BL Ring in Use; Clockwise and Odd on VRing 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_R2_RING_BL_USED.CW_VR0_ODD",
"PerPkg": "1",
@@ -324,6 +360,7 @@
},
{
"BriefDescription": "R2 BL Ring in Use; Clockwise and Even on VRing 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_R2_RING_BL_USED.CW_VR1_EVEN",
"PerPkg": "1",
@@ -333,6 +370,7 @@
},
{
"BriefDescription": "R2 BL Ring in Use; Clockwise and Odd on VRing 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_R2_RING_BL_USED.CW_VR1_ODD",
"PerPkg": "1",
@@ -342,6 +380,7 @@
},
{
"BriefDescription": "R2 IV Ring in Use; Any",
+ "Counter": "0,1,2,3",
"EventCode": "0xA",
"EventName": "UNC_R2_RING_IV_USED.ANY",
"PerPkg": "1",
@@ -351,6 +390,7 @@
},
{
"BriefDescription": "R2 IV Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0xa",
"EventName": "UNC_R2_RING_IV_USED.CCW",
"PerPkg": "1",
@@ -360,6 +400,7 @@
},
{
"BriefDescription": "R2 IV Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
"EventCode": "0xa",
"EventName": "UNC_R2_RING_IV_USED.CW",
"PerPkg": "1",
@@ -369,6 +410,7 @@
},
{
"BriefDescription": "AK Ingress Bounced",
+ "Counter": "0",
"EventCode": "0x12",
"EventName": "UNC_R2_RxR_AK_BOUNCES",
"PerPkg": "1",
@@ -377,6 +419,7 @@
},
{
"BriefDescription": "AK Ingress Bounced; Counterclockwise",
+ "Counter": "0",
"EventCode": "0x12",
"EventName": "UNC_R2_RxR_AK_BOUNCES.CCW",
"PerPkg": "1",
@@ -386,6 +429,7 @@
},
{
"BriefDescription": "AK Ingress Bounced; Clockwise",
+ "Counter": "0",
"EventCode": "0x12",
"EventName": "UNC_R2_RxR_AK_BOUNCES.CW",
"PerPkg": "1",
@@ -395,6 +439,7 @@
},
{
"BriefDescription": "Ingress Cycles Not Empty; NCB",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_R2_RxR_CYCLES_NE.NCB",
"PerPkg": "1",
@@ -404,6 +449,7 @@
},
{
"BriefDescription": "Ingress Cycles Not Empty; NCS",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_R2_RxR_CYCLES_NE.NCS",
"PerPkg": "1",
@@ -413,6 +459,7 @@
},
{
"BriefDescription": "Ingress Allocations; NCB",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_R2_RxR_INSERTS.NCB",
"PerPkg": "1",
@@ -422,6 +469,7 @@
},
{
"BriefDescription": "Ingress Allocations; NCS",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_R2_RxR_INSERTS.NCS",
"PerPkg": "1",
@@ -431,6 +479,7 @@
},
{
"BriefDescription": "Ingress Occupancy Accumulator; DRS",
+ "Counter": "0",
"EventCode": "0x13",
"EventName": "UNC_R2_RxR_OCCUPANCY.DRS",
"PerPkg": "1",
@@ -440,6 +489,7 @@
},
{
"BriefDescription": "Egress Cycles Full; AD",
+ "Counter": "0",
"EventCode": "0x25",
"EventName": "UNC_R2_TxR_CYCLES_FULL.AD",
"PerPkg": "1",
@@ -449,6 +499,7 @@
},
{
"BriefDescription": "Egress Cycles Full; AK",
+ "Counter": "0",
"EventCode": "0x25",
"EventName": "UNC_R2_TxR_CYCLES_FULL.AK",
"PerPkg": "1",
@@ -458,6 +509,7 @@
},
{
"BriefDescription": "Egress Cycles Full; BL",
+ "Counter": "0",
"EventCode": "0x25",
"EventName": "UNC_R2_TxR_CYCLES_FULL.BL",
"PerPkg": "1",
@@ -467,6 +519,7 @@
},
{
"BriefDescription": "Egress Cycles Not Empty; AD",
+ "Counter": "0",
"EventCode": "0x23",
"EventName": "UNC_R2_TxR_CYCLES_NE.AD",
"PerPkg": "1",
@@ -476,6 +529,7 @@
},
{
"BriefDescription": "Egress Cycles Not Empty; AK",
+ "Counter": "0",
"EventCode": "0x23",
"EventName": "UNC_R2_TxR_CYCLES_NE.AK",
"PerPkg": "1",
@@ -485,6 +539,7 @@
},
{
"BriefDescription": "Egress Cycles Not Empty; BL",
+ "Counter": "0",
"EventCode": "0x23",
"EventName": "UNC_R2_TxR_CYCLES_NE.BL",
"PerPkg": "1",
@@ -494,6 +549,7 @@
},
{
"BriefDescription": "Egress CCW NACK; AD CCW",
+ "Counter": "0,1",
"EventCode": "0x28",
"EventName": "UNC_R2_TxR_NACK_CCW.AD",
"PerPkg": "1",
@@ -503,6 +559,7 @@
},
{
"BriefDescription": "Egress CCW NACK; AK CCW",
+ "Counter": "0,1",
"EventCode": "0x28",
"EventName": "UNC_R2_TxR_NACK_CCW.AK",
"PerPkg": "1",
@@ -512,6 +569,7 @@
},
{
"BriefDescription": "Egress CCW NACK; BL CCW",
+ "Counter": "0,1",
"EventCode": "0x28",
"EventName": "UNC_R2_TxR_NACK_CCW.BL",
"PerPkg": "1",
@@ -521,6 +579,7 @@
},
{
"BriefDescription": "Egress CW NACK; AD CW",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R2_TxR_NACK_CW.AD",
"PerPkg": "1",
@@ -530,6 +589,7 @@
},
{
"BriefDescription": "Egress CW NACK; AK CW",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R2_TxR_NACK_CW.AK",
"PerPkg": "1",
@@ -539,6 +599,7 @@
},
{
"BriefDescription": "Egress CW NACK; BL CW",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R2_TxR_NACK_CW.BL",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/uncore-memory.json b/tools/perf/pmu-events/arch/x86/ivytown/uncore-memory.json
index 65509342d56a..1406d220df2d 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/uncore-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "DRAM Activate Count; Activate due to Write",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_M_ACT_COUNT.BYP",
"PerPkg": "1",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "DRAM Activate Count; Activate due to Read",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_M_ACT_COUNT.RD",
"PerPkg": "1",
@@ -19,6 +21,7 @@
},
{
"BriefDescription": "DRAM Activate Count; Activate due to Write",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_M_ACT_COUNT.WR",
"PerPkg": "1",
@@ -28,6 +31,7 @@
},
{
"BriefDescription": "ACT command issued by 2 cycle bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0xa1",
"EventName": "UNC_M_BYP_CMDS.ACT",
"PerPkg": "1",
@@ -36,6 +40,7 @@
},
{
"BriefDescription": "CAS command issued by 2 cycle bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0xa1",
"EventName": "UNC_M_BYP_CMDS.CAS",
"PerPkg": "1",
@@ -44,6 +49,7 @@
},
{
"BriefDescription": "PRE command issued by 2 cycle bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0xa1",
"EventName": "UNC_M_BYP_CMDS.PRE",
"PerPkg": "1",
@@ -52,6 +58,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM WR_CAS (w/ and w/out auto-pre)",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.ALL",
"PerPkg": "1",
@@ -61,6 +68,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM Reads (RD_CAS + Underfills)",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.RD",
"PerPkg": "1",
@@ -70,6 +78,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM RD_CAS (w/ and w/out auto-pre)",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.RD_REG",
"PerPkg": "1",
@@ -79,6 +88,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Read CAS issued in RMM",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.RD_RMM",
"PerPkg": "1",
@@ -87,6 +97,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Underfill Read Issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
"PerPkg": "1",
@@ -96,6 +107,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Read CAS issued in WMM",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.RD_WMM",
"PerPkg": "1",
@@ -104,6 +116,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM WR_CAS (both Modes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.WR",
"PerPkg": "1",
@@ -113,6 +126,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Read Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.WR_RMM",
"PerPkg": "1",
@@ -122,6 +136,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Write Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.WR_WMM",
"PerPkg": "1",
@@ -131,12 +146,14 @@
},
{
"BriefDescription": "DRAM Clockticks",
+ "Counter": "0,1,2,3",
"EventName": "UNC_M_DCLOCKTICKS",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "DRAM Precharge All Commands",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_M_DRAM_PRE_ALL",
"PerPkg": "1",
@@ -145,6 +162,7 @@
},
{
"BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_M_DRAM_REFRESH.HIGH",
"PerPkg": "1",
@@ -154,6 +172,7 @@
},
{
"BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_M_DRAM_REFRESH.PANIC",
"PerPkg": "1",
@@ -163,6 +182,7 @@
},
{
"BriefDescription": "ECC Correctable Errors",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_M_ECC_CORRECTABLE_ERRORS",
"PerPkg": "1",
@@ -171,6 +191,7 @@
},
{
"BriefDescription": "Cycles in a Major Mode; Isoch Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_M_MAJOR_MODES.ISOCH",
"PerPkg": "1",
@@ -180,6 +201,7 @@
},
{
"BriefDescription": "Cycles in a Major Mode; Partial Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_M_MAJOR_MODES.PARTIAL",
"PerPkg": "1",
@@ -189,6 +211,7 @@
},
{
"BriefDescription": "Cycles in a Major Mode; Read Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_M_MAJOR_MODES.READ",
"PerPkg": "1",
@@ -198,6 +221,7 @@
},
{
"BriefDescription": "Cycles in a Major Mode; Write Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_M_MAJOR_MODES.WRITE",
"PerPkg": "1",
@@ -207,6 +231,7 @@
},
{
"BriefDescription": "Channel DLLOFF Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M_POWER_CHANNEL_DLLOFF",
"PerPkg": "1",
@@ -215,6 +240,7 @@
},
{
"BriefDescription": "Channel PPD Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_M_POWER_CHANNEL_PPD",
"PerPkg": "1",
@@ -223,6 +249,7 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK0",
"PerPkg": "1",
@@ -232,6 +259,7 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK1",
"PerPkg": "1",
@@ -241,6 +269,7 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK2",
"PerPkg": "1",
@@ -250,6 +279,7 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK3",
"PerPkg": "1",
@@ -259,6 +289,7 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK4",
"PerPkg": "1",
@@ -268,6 +299,7 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK5",
"PerPkg": "1",
@@ -277,6 +309,7 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK6",
"PerPkg": "1",
@@ -286,6 +319,7 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK7",
"PerPkg": "1",
@@ -295,6 +329,7 @@
},
{
"BriefDescription": "Critical Throttle Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M_POWER_CRITICAL_THROTTLE_CYCLES",
"PerPkg": "1",
@@ -302,6 +337,7 @@
"Unit": "iMC"
},
{
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M_POWER_PCU_THROTTLING",
"PerPkg": "1",
@@ -309,6 +345,7 @@
},
{
"BriefDescription": "Clock-Enabled Self-Refresh",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M_POWER_SELF_REFRESH",
"PerPkg": "1",
@@ -317,6 +354,7 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK0",
"PerPkg": "1",
@@ -326,6 +364,7 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK1",
"PerPkg": "1",
@@ -335,6 +374,7 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK2",
"PerPkg": "1",
@@ -344,6 +384,7 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK3",
"PerPkg": "1",
@@ -353,6 +394,7 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK4",
"PerPkg": "1",
@@ -362,6 +404,7 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK5",
"PerPkg": "1",
@@ -371,6 +414,7 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK6",
"PerPkg": "1",
@@ -380,6 +424,7 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK7",
"PerPkg": "1",
@@ -389,6 +434,7 @@
},
{
"BriefDescription": "Read Preemption Count; Read over Read Preemption",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_M_PREEMPTION.RD_PREEMPT_RD",
"PerPkg": "1",
@@ -398,6 +444,7 @@
},
{
"BriefDescription": "Read Preemption Count; Read over Write Preemption",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_M_PREEMPTION.RD_PREEMPT_WR",
"PerPkg": "1",
@@ -407,6 +454,7 @@
},
{
"BriefDescription": "DRAM Precharge commands.; Precharge due to bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.BYP",
"PerPkg": "1",
@@ -416,6 +464,7 @@
},
{
"BriefDescription": "DRAM Precharge commands.; Precharge due to timer expiration",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.PAGE_CLOSE",
"PerPkg": "1",
@@ -425,6 +474,7 @@
},
{
"BriefDescription": "DRAM Precharge commands.; Precharges due to page miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.PAGE_MISS",
"PerPkg": "1",
@@ -434,6 +484,7 @@
},
{
"BriefDescription": "DRAM Precharge commands.; Precharge due to read",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.RD",
"PerPkg": "1",
@@ -443,6 +494,7 @@
},
{
"BriefDescription": "DRAM Precharge commands.; Precharge due to write",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.WR",
"PerPkg": "1",
@@ -452,6 +504,7 @@
},
{
"BriefDescription": "Read CAS issued with HIGH priority",
+ "Counter": "0,1,2,3",
"EventCode": "0xa0",
"EventName": "UNC_M_RD_CAS_PRIO.HIGH",
"PerPkg": "1",
@@ -460,6 +513,7 @@
},
{
"BriefDescription": "Read CAS issued with LOW priority",
+ "Counter": "0,1,2,3",
"EventCode": "0xa0",
"EventName": "UNC_M_RD_CAS_PRIO.LOW",
"PerPkg": "1",
@@ -468,6 +522,7 @@
},
{
"BriefDescription": "Read CAS issued with MEDIUM priority",
+ "Counter": "0,1,2,3",
"EventCode": "0xa0",
"EventName": "UNC_M_RD_CAS_PRIO.MED",
"PerPkg": "1",
@@ -476,6 +531,7 @@
},
{
"BriefDescription": "Read CAS issued with PANIC NON ISOCH priority (starved)",
+ "Counter": "0,1,2,3",
"EventCode": "0xa0",
"EventName": "UNC_M_RD_CAS_PRIO.PANIC",
"PerPkg": "1",
@@ -484,6 +540,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xb0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK0",
"PerPkg": "1",
@@ -492,6 +549,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xb0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK1",
"PerPkg": "1",
@@ -500,6 +558,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xb0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK2",
"PerPkg": "1",
@@ -508,6 +567,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xb0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK3",
"PerPkg": "1",
@@ -516,6 +576,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xb0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK4",
"PerPkg": "1",
@@ -524,6 +585,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xb0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK5",
"PerPkg": "1",
@@ -532,6 +594,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xb0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK6",
"PerPkg": "1",
@@ -540,6 +603,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xb0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK7",
"PerPkg": "1",
@@ -548,6 +612,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK0",
"PerPkg": "1",
@@ -556,6 +621,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK1",
"PerPkg": "1",
@@ -564,6 +630,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK2",
"PerPkg": "1",
@@ -572,6 +639,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK3",
"PerPkg": "1",
@@ -580,6 +648,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK4",
"PerPkg": "1",
@@ -588,6 +657,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK5",
"PerPkg": "1",
@@ -596,6 +666,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK6",
"PerPkg": "1",
@@ -604,6 +675,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK7",
"PerPkg": "1",
@@ -612,6 +684,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK0",
"PerPkg": "1",
@@ -620,6 +693,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK1",
"PerPkg": "1",
@@ -628,6 +702,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK2",
"PerPkg": "1",
@@ -636,6 +711,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK3",
"PerPkg": "1",
@@ -644,6 +720,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK4",
"PerPkg": "1",
@@ -652,6 +729,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK5",
"PerPkg": "1",
@@ -660,6 +738,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK6",
"PerPkg": "1",
@@ -668,6 +747,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK7",
"PerPkg": "1",
@@ -676,6 +756,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANK0",
"PerPkg": "1",
@@ -684,6 +765,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANK1",
"PerPkg": "1",
@@ -692,6 +774,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANK2",
"PerPkg": "1",
@@ -700,6 +783,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANK3",
"PerPkg": "1",
@@ -708,6 +792,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANK4",
"PerPkg": "1",
@@ -716,6 +801,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANK5",
"PerPkg": "1",
@@ -724,6 +810,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANK6",
"PerPkg": "1",
@@ -732,6 +819,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANK7",
"PerPkg": "1",
@@ -740,6 +828,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK0",
"PerPkg": "1",
@@ -748,6 +837,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK1",
"PerPkg": "1",
@@ -756,6 +846,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK2",
"PerPkg": "1",
@@ -764,6 +855,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK3",
"PerPkg": "1",
@@ -772,6 +864,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK4",
"PerPkg": "1",
@@ -780,6 +873,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK5",
"PerPkg": "1",
@@ -788,6 +882,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK6",
"PerPkg": "1",
@@ -796,6 +891,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK7",
"PerPkg": "1",
@@ -804,6 +900,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK0",
"PerPkg": "1",
@@ -812,6 +909,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK1",
"PerPkg": "1",
@@ -820,6 +918,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK2",
"PerPkg": "1",
@@ -828,6 +927,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK3",
"PerPkg": "1",
@@ -836,6 +936,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK4",
"PerPkg": "1",
@@ -844,6 +945,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK5",
"PerPkg": "1",
@@ -852,6 +954,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK6",
"PerPkg": "1",
@@ -860,6 +963,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK7",
"PerPkg": "1",
@@ -868,6 +972,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK0",
"PerPkg": "1",
@@ -876,6 +981,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK1",
"PerPkg": "1",
@@ -884,6 +990,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK2",
"PerPkg": "1",
@@ -892,6 +999,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK3",
"PerPkg": "1",
@@ -900,6 +1008,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK4",
"PerPkg": "1",
@@ -908,6 +1017,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK5",
"PerPkg": "1",
@@ -916,6 +1026,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK6",
"PerPkg": "1",
@@ -924,6 +1035,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK7",
"PerPkg": "1",
@@ -932,6 +1044,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK0",
"PerPkg": "1",
@@ -940,6 +1053,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK1",
"PerPkg": "1",
@@ -948,6 +1062,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK2",
"PerPkg": "1",
@@ -956,6 +1071,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK3",
"PerPkg": "1",
@@ -964,6 +1080,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK4",
"PerPkg": "1",
@@ -972,6 +1089,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK5",
"PerPkg": "1",
@@ -980,6 +1098,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK6",
"PerPkg": "1",
@@ -988,6 +1107,7 @@
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK7",
"PerPkg": "1",
@@ -996,6 +1116,7 @@
},
{
"BriefDescription": "Read Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M_RPQ_CYCLES_NE",
"PerPkg": "1",
@@ -1004,6 +1125,7 @@
},
{
"BriefDescription": "Read Pending Queue Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M_RPQ_INSERTS",
"PerPkg": "1",
@@ -1012,6 +1134,7 @@
},
{
"BriefDescription": "VMSE MXB write buffer occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_M_VMSE_MXB_WR_OCCUPANCY",
"PerPkg": "1",
@@ -1019,6 +1142,7 @@
},
{
"BriefDescription": "VMSE WR PUSH issued; VMSE write PUSH issued in RMM",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M_VMSE_WR_PUSH.RMM",
"PerPkg": "1",
@@ -1027,6 +1151,7 @@
},
{
"BriefDescription": "VMSE WR PUSH issued; VMSE write PUSH issued in WMM",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M_VMSE_WR_PUSH.WMM",
"PerPkg": "1",
@@ -1035,6 +1160,7 @@
},
{
"BriefDescription": "Transition from WMM to RMM because of low threshold; Transition from WMM to RMM because of starve counter",
+ "Counter": "0,1,2,3",
"EventCode": "0xc0",
"EventName": "UNC_M_WMM_TO_RMM.LOW_THRESH",
"PerPkg": "1",
@@ -1043,6 +1169,7 @@
},
{
"BriefDescription": "Transition from WMM to RMM because of low threshold",
+ "Counter": "0,1,2,3",
"EventCode": "0xc0",
"EventName": "UNC_M_WMM_TO_RMM.STARVE",
"PerPkg": "1",
@@ -1051,6 +1178,7 @@
},
{
"BriefDescription": "Transition from WMM to RMM because of low threshold",
+ "Counter": "0,1,2,3",
"EventCode": "0xc0",
"EventName": "UNC_M_WMM_TO_RMM.VMSE_RETRY",
"PerPkg": "1",
@@ -1059,6 +1187,7 @@
},
{
"BriefDescription": "Write Pending Queue Full Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M_WPQ_CYCLES_FULL",
"PerPkg": "1",
@@ -1067,6 +1196,7 @@
},
{
"BriefDescription": "Write Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M_WPQ_CYCLES_NE",
"PerPkg": "1",
@@ -1075,6 +1205,7 @@
},
{
"BriefDescription": "Write Pending Queue Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M_WPQ_INSERTS",
"PerPkg": "1",
@@ -1083,6 +1214,7 @@
},
{
"BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_M_WPQ_READ_HIT",
"PerPkg": "1",
@@ -1091,6 +1223,7 @@
},
{
"BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M_WPQ_WRITE_HIT",
"PerPkg": "1",
@@ -1099,6 +1232,7 @@
},
{
"BriefDescription": "Not getting the requested Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_M_WRONG_MM",
"PerPkg": "1",
@@ -1106,6 +1240,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xb8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK0",
"PerPkg": "1",
@@ -1114,6 +1249,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xb8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK1",
"PerPkg": "1",
@@ -1122,6 +1258,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xb8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK2",
"PerPkg": "1",
@@ -1130,6 +1267,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xb8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK3",
"PerPkg": "1",
@@ -1138,6 +1276,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xb8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK4",
"PerPkg": "1",
@@ -1146,6 +1285,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xb8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK5",
"PerPkg": "1",
@@ -1154,6 +1294,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xb8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK6",
"PerPkg": "1",
@@ -1162,6 +1303,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xb8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK7",
"PerPkg": "1",
@@ -1170,6 +1312,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK0",
"PerPkg": "1",
@@ -1178,6 +1321,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK1",
"PerPkg": "1",
@@ -1186,6 +1330,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK2",
"PerPkg": "1",
@@ -1194,6 +1339,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK3",
"PerPkg": "1",
@@ -1202,6 +1348,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK4",
"PerPkg": "1",
@@ -1210,6 +1357,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK5",
"PerPkg": "1",
@@ -1218,6 +1366,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK6",
"PerPkg": "1",
@@ -1226,6 +1375,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK7",
"PerPkg": "1",
@@ -1234,6 +1384,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANK0",
"PerPkg": "1",
@@ -1242,6 +1393,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANK1",
"PerPkg": "1",
@@ -1250,6 +1402,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANK2",
"PerPkg": "1",
@@ -1258,6 +1411,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANK3",
"PerPkg": "1",
@@ -1266,6 +1420,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANK4",
"PerPkg": "1",
@@ -1274,6 +1429,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANK5",
"PerPkg": "1",
@@ -1282,6 +1438,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANK6",
"PerPkg": "1",
@@ -1290,6 +1447,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANK7",
"PerPkg": "1",
@@ -1298,6 +1456,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANK0",
"PerPkg": "1",
@@ -1306,6 +1465,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANK1",
"PerPkg": "1",
@@ -1314,6 +1474,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANK2",
"PerPkg": "1",
@@ -1322,6 +1483,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANK3",
"PerPkg": "1",
@@ -1330,6 +1492,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANK4",
"PerPkg": "1",
@@ -1338,6 +1501,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANK5",
"PerPkg": "1",
@@ -1346,6 +1510,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANK6",
"PerPkg": "1",
@@ -1354,6 +1519,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANK7",
"PerPkg": "1",
@@ -1362,6 +1528,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK0",
"PerPkg": "1",
@@ -1370,6 +1537,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK1",
"PerPkg": "1",
@@ -1378,6 +1546,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK2",
"PerPkg": "1",
@@ -1386,6 +1555,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK3",
"PerPkg": "1",
@@ -1394,6 +1564,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK4",
"PerPkg": "1",
@@ -1402,6 +1573,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK5",
"PerPkg": "1",
@@ -1410,6 +1582,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK6",
"PerPkg": "1",
@@ -1418,6 +1591,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK7",
"PerPkg": "1",
@@ -1426,6 +1600,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK0",
"PerPkg": "1",
@@ -1434,6 +1609,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK1",
"PerPkg": "1",
@@ -1442,6 +1618,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK2",
"PerPkg": "1",
@@ -1450,6 +1627,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK3",
"PerPkg": "1",
@@ -1458,6 +1636,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK4",
"PerPkg": "1",
@@ -1466,6 +1645,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK5",
"PerPkg": "1",
@@ -1474,6 +1654,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK6",
"PerPkg": "1",
@@ -1482,6 +1663,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK7",
"PerPkg": "1",
@@ -1490,6 +1672,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK0",
"PerPkg": "1",
@@ -1498,6 +1681,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK1",
"PerPkg": "1",
@@ -1506,6 +1690,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK2",
"PerPkg": "1",
@@ -1514,6 +1699,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK3",
"PerPkg": "1",
@@ -1522,6 +1708,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK4",
"PerPkg": "1",
@@ -1530,6 +1717,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK5",
"PerPkg": "1",
@@ -1538,6 +1726,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK6",
"PerPkg": "1",
@@ -1546,6 +1735,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK7",
"PerPkg": "1",
@@ -1554,6 +1744,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK0",
"PerPkg": "1",
@@ -1562,6 +1753,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK1",
"PerPkg": "1",
@@ -1570,6 +1762,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK2",
"PerPkg": "1",
@@ -1578,6 +1771,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK3",
"PerPkg": "1",
@@ -1586,6 +1780,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK4",
"PerPkg": "1",
@@ -1594,6 +1789,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK5",
"PerPkg": "1",
@@ -1602,6 +1798,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK6",
"PerPkg": "1",
@@ -1610,6 +1807,7 @@
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK7",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/uncore-power.json b/tools/perf/pmu-events/arch/x86/ivytown/uncore-power.json
index ad6c531a9e38..a4bdffe7c1f8 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/uncore-power.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/uncore-power.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "pclk Cycles",
+ "Counter": "0,1,2,3",
"EventName": "UNC_P_CLOCKTICKS",
"PerPkg": "1",
"PublicDescription": "The PCU runs off a fixed 800 MHz clock. This event counts the number of pclk cycles measured while the counter was enabled. The pclk, like the Memory Controller's dclk, counts at a constant rate making it a good measure of actual wall time.",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Core 0 C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_P_CORE0_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -16,6 +18,7 @@
},
{
"BriefDescription": "Core 10 C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x7a",
"EventName": "UNC_P_CORE10_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -24,6 +27,7 @@
},
{
"BriefDescription": "Core 11 C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x7b",
"EventName": "UNC_P_CORE11_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -32,6 +36,7 @@
},
{
"BriefDescription": "Core 12 C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x7c",
"EventName": "UNC_P_CORE12_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -40,6 +45,7 @@
},
{
"BriefDescription": "Core 13 C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x7d",
"EventName": "UNC_P_CORE13_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -48,6 +54,7 @@
},
{
"BriefDescription": "Core 14 C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x7e",
"EventName": "UNC_P_CORE14_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -56,6 +63,7 @@
},
{
"BriefDescription": "Core 1 C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_P_CORE1_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -64,6 +72,7 @@
},
{
"BriefDescription": "Core 2 C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_P_CORE2_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -72,6 +81,7 @@
},
{
"BriefDescription": "Core 3 C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x73",
"EventName": "UNC_P_CORE3_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -80,6 +90,7 @@
},
{
"BriefDescription": "Core 4 C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "UNC_P_CORE4_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -88,6 +99,7 @@
},
{
"BriefDescription": "Core 5 C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x75",
"EventName": "UNC_P_CORE5_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -96,6 +108,7 @@
},
{
"BriefDescription": "Core 6 C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x76",
"EventName": "UNC_P_CORE6_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -104,6 +117,7 @@
},
{
"BriefDescription": "Core 7 C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x77",
"EventName": "UNC_P_CORE7_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -112,6 +126,7 @@
},
{
"BriefDescription": "Core 8 C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x78",
"EventName": "UNC_P_CORE8_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -120,6 +135,7 @@
},
{
"BriefDescription": "Core 9 C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "UNC_P_CORE9_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -128,6 +144,7 @@
},
{
"BriefDescription": "Deep C State Rejection - Core 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE0",
"PerPkg": "1",
@@ -136,6 +153,7 @@
},
{
"BriefDescription": "Deep C State Rejection - Core 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE1",
"PerPkg": "1",
@@ -144,6 +162,7 @@
},
{
"BriefDescription": "Deep C State Rejection - Core 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE10",
"PerPkg": "1",
@@ -152,6 +171,7 @@
},
{
"BriefDescription": "Deep C State Rejection - Core 11",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE11",
"PerPkg": "1",
@@ -160,6 +180,7 @@
},
{
"BriefDescription": "Deep C State Rejection - Core 12",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE12",
"PerPkg": "1",
@@ -168,6 +189,7 @@
},
{
"BriefDescription": "Deep C State Rejection - Core 13",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE13",
"PerPkg": "1",
@@ -176,6 +198,7 @@
},
{
"BriefDescription": "Deep C State Rejection - Core 14",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE14",
"PerPkg": "1",
@@ -184,6 +207,7 @@
},
{
"BriefDescription": "Deep C State Rejection - Core 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE2",
"PerPkg": "1",
@@ -192,6 +216,7 @@
},
{
"BriefDescription": "Deep C State Rejection - Core 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x1a",
"EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE3",
"PerPkg": "1",
@@ -200,6 +225,7 @@
},
{
"BriefDescription": "Deep C State Rejection - Core 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE4",
"PerPkg": "1",
@@ -208,6 +234,7 @@
},
{
"BriefDescription": "Deep C State Rejection - Core 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x1c",
"EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE5",
"PerPkg": "1",
@@ -216,6 +243,7 @@
},
{
"BriefDescription": "Deep C State Rejection - Core 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x1d",
"EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE6",
"PerPkg": "1",
@@ -224,6 +252,7 @@
},
{
"BriefDescription": "Deep C State Rejection - Core 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE7",
"PerPkg": "1",
@@ -232,6 +261,7 @@
},
{
"BriefDescription": "Deep C State Rejection - Core 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x1f",
"EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE8",
"PerPkg": "1",
@@ -240,6 +270,7 @@
},
{
"BriefDescription": "Deep C State Rejection - Core 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE9",
"PerPkg": "1",
@@ -248,6 +279,7 @@
},
{
"BriefDescription": "Core 0 C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_P_DEMOTIONS_CORE0",
"PerPkg": "1",
@@ -256,6 +288,7 @@
},
{
"BriefDescription": "Core 1 C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x1f",
"EventName": "UNC_P_DEMOTIONS_CORE1",
"PerPkg": "1",
@@ -264,6 +297,7 @@
},
{
"BriefDescription": "Core 10 C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_P_DEMOTIONS_CORE10",
"PerPkg": "1",
@@ -272,6 +306,7 @@
},
{
"BriefDescription": "Core 11 C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_P_DEMOTIONS_CORE11",
"PerPkg": "1",
@@ -280,6 +315,7 @@
},
{
"BriefDescription": "Core 12 C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_P_DEMOTIONS_CORE12",
"PerPkg": "1",
@@ -288,6 +324,7 @@
},
{
"BriefDescription": "Core 13 C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_P_DEMOTIONS_CORE13",
"PerPkg": "1",
@@ -296,6 +333,7 @@
},
{
"BriefDescription": "Core 14 C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_P_DEMOTIONS_CORE14",
"PerPkg": "1",
@@ -304,6 +342,7 @@
},
{
"BriefDescription": "Core 2 C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_P_DEMOTIONS_CORE2",
"PerPkg": "1",
@@ -312,6 +351,7 @@
},
{
"BriefDescription": "Core 3 C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_P_DEMOTIONS_CORE3",
"PerPkg": "1",
@@ -320,6 +360,7 @@
},
{
"BriefDescription": "Core 4 C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_P_DEMOTIONS_CORE4",
"PerPkg": "1",
@@ -328,6 +369,7 @@
},
{
"BriefDescription": "Core 5 C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_P_DEMOTIONS_CORE5",
"PerPkg": "1",
@@ -336,6 +378,7 @@
},
{
"BriefDescription": "Core 6 C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_P_DEMOTIONS_CORE6",
"PerPkg": "1",
@@ -344,6 +387,7 @@
},
{
"BriefDescription": "Core 7 C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_P_DEMOTIONS_CORE7",
"PerPkg": "1",
@@ -352,6 +396,7 @@
},
{
"BriefDescription": "Core 8 C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_P_DEMOTIONS_CORE8",
"PerPkg": "1",
@@ -360,6 +405,7 @@
},
{
"BriefDescription": "Core 9 C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_P_DEMOTIONS_CORE9",
"PerPkg": "1",
@@ -368,6 +414,7 @@
},
{
"BriefDescription": "Frequency Residency",
+ "Counter": "0,1,2,3",
"EventCode": "0xb",
"EventName": "UNC_P_FREQ_BAND0_CYCLES",
"PerPkg": "1",
@@ -376,6 +423,7 @@
},
{
"BriefDescription": "Frequency Residency",
+ "Counter": "0,1,2,3",
"EventCode": "0xc",
"EventName": "UNC_P_FREQ_BAND1_CYCLES",
"PerPkg": "1",
@@ -384,6 +432,7 @@
},
{
"BriefDescription": "Frequency Residency",
+ "Counter": "0,1,2,3",
"EventCode": "0xd",
"EventName": "UNC_P_FREQ_BAND2_CYCLES",
"PerPkg": "1",
@@ -392,6 +441,7 @@
},
{
"BriefDescription": "Frequency Residency",
+ "Counter": "0,1,2,3",
"EventCode": "0xe",
"EventName": "UNC_P_FREQ_BAND3_CYCLES",
"PerPkg": "1",
@@ -400,6 +450,7 @@
},
{
"BriefDescription": "Current Strongest Upper Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_P_FREQ_MAX_CURRENT_CYCLES",
"PerPkg": "1",
@@ -408,6 +459,7 @@
},
{
"BriefDescription": "Thermal Strongest Upper Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES",
"PerPkg": "1",
@@ -416,6 +468,7 @@
},
{
"BriefDescription": "OS Strongest Upper Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_P_FREQ_MAX_OS_CYCLES",
"PerPkg": "1",
@@ -424,6 +477,7 @@
},
{
"BriefDescription": "Power Strongest Upper Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_P_FREQ_MAX_POWER_CYCLES",
"PerPkg": "1",
@@ -432,6 +486,7 @@
},
{
"BriefDescription": "IO P Limit Strongest Lower Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_P_FREQ_MIN_IO_P_CYCLES",
"PerPkg": "1",
@@ -440,6 +495,7 @@
},
{
"BriefDescription": "Perf P Limit Strongest Lower Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x62",
"EventName": "UNC_P_FREQ_MIN_PERF_P_CYCLES",
"PerPkg": "1",
@@ -448,6 +504,7 @@
},
{
"BriefDescription": "Cycles spent changing Frequency",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_P_FREQ_TRANS_CYCLES",
"PerPkg": "1",
@@ -456,6 +513,7 @@
},
{
"BriefDescription": "Memory Phase Shedding Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x2f",
"EventName": "UNC_P_MEMORY_PHASE_SHEDDING_CYCLES",
"PerPkg": "1",
@@ -464,6 +522,7 @@
},
{
"BriefDescription": "Package C State Exit Latency",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_P_PKG_C_EXIT_LATENCY",
"PerPkg": "1",
@@ -472,6 +531,7 @@
},
{
"BriefDescription": "Package C State Exit Latency",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_P_PKG_C_EXIT_LATENCY_SEL",
"PerPkg": "1",
@@ -480,6 +540,7 @@
},
{
"BriefDescription": "Package C State Residency - C0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_P_PKG_C_STATE_RESIDENCY_C0_CYCLES",
"PerPkg": "1",
@@ -488,6 +549,7 @@
},
{
"BriefDescription": "Package C State Residency - C2",
+ "Counter": "0,1,2,3",
"EventCode": "0x2b",
"EventName": "UNC_P_PKG_C_STATE_RESIDENCY_C2_CYCLES",
"PerPkg": "1",
@@ -496,6 +558,7 @@
},
{
"BriefDescription": "Package C State Residency - C3",
+ "Counter": "0,1,2,3",
"EventCode": "0x2c",
"EventName": "UNC_P_PKG_C_STATE_RESIDENCY_C3_CYCLES",
"PerPkg": "1",
@@ -504,6 +567,7 @@
},
{
"BriefDescription": "Package C State Residency - C6",
+ "Counter": "0,1,2,3",
"EventCode": "0x2d",
"EventName": "UNC_P_PKG_C_STATE_RESIDENCY_C6_CYCLES",
"PerPkg": "1",
@@ -512,6 +576,7 @@
},
{
"BriefDescription": "Number of cores in C-State; C0 and C1",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
"Filter": "occ_sel=1",
@@ -521,6 +586,7 @@
},
{
"BriefDescription": "Number of cores in C-State; C3",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
"Filter": "occ_sel=2",
@@ -530,6 +596,7 @@
},
{
"BriefDescription": "Number of cores in C-State; C6 and C7",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
"Filter": "occ_sel=3",
@@ -539,6 +606,7 @@
},
{
"BriefDescription": "External Prochot",
+ "Counter": "0,1,2,3",
"EventCode": "0xa",
"EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
"PerPkg": "1",
@@ -547,6 +615,7 @@
},
{
"BriefDescription": "Internal Prochot",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_P_PROCHOT_INTERNAL_CYCLES",
"PerPkg": "1",
@@ -555,6 +624,7 @@
},
{
"BriefDescription": "Total Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "UNC_P_TOTAL_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -563,6 +633,7 @@
},
{
"BriefDescription": "Cycles Changing Voltage",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_P_VOLT_TRANS_CYCLES_CHANGE",
"PerPkg": "1",
@@ -571,6 +642,7 @@
},
{
"BriefDescription": "Cycles Decreasing Voltage",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_P_VOLT_TRANS_CYCLES_DECREASE",
"PerPkg": "1",
@@ -579,6 +651,7 @@
},
{
"BriefDescription": "Cycles Increasing Voltage",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_P_VOLT_TRANS_CYCLES_INCREASE",
"PerPkg": "1",
@@ -587,6 +660,7 @@
},
{
"BriefDescription": "VR Hot",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_P_VR_HOT_CYCLES",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/virtual-memory.json b/tools/perf/pmu-events/arch/x86/ivytown/virtual-memory.json
index 410763dd4394..b9b70d8beb43 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/virtual-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.DEMAND_LD_WALK_COMPLETED",
"SampleAfterValue": "100003",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Demand load cycles page miss handler (PMH) is busy with this walk.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.DEMAND_LD_WALK_DURATION",
"SampleAfterValue": "2000003",
@@ -15,6 +17,7 @@
},
{
"BriefDescription": "Page walk for a large page completed for Demand load.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.LARGE_PAGE_WALK_COMPLETED",
"SampleAfterValue": "100003",
@@ -22,6 +25,7 @@
},
{
"BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes an page walk of any page size.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "Misses in all TLB levels that cause a page walk of any page size from demand loads.",
@@ -30,6 +34,7 @@
},
{
"BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x5F",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"PublicDescription": "Counts load operations that missed 1st level DTLB but hit the 2nd level.",
@@ -38,6 +43,7 @@
},
{
"BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"PublicDescription": "Misses in all TLB levels that caused page walk completed of any size by demand loads.",
@@ -46,6 +52,7 @@
},
{
"BriefDescription": "Demand load cycles page miss handler (PMH) is busy with this walk.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
"PublicDescription": "Cycle PMH is busy with a walk due to demand loads.",
@@ -54,6 +61,7 @@
},
{
"BriefDescription": "Store misses in all DTLB levels that cause page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "Miss in all TLB levels causes a page walk of any page size (4K/2M/4M/1G).",
@@ -62,6 +70,7 @@
},
{
"BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.STLB_HIT",
"PublicDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
@@ -70,6 +79,7 @@
},
{
"BriefDescription": "Store misses in all DTLB levels that cause completed page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"PublicDescription": "Miss in all TLB levels causes a page walk that completes of any page size (4K/2M/4M/1G).",
@@ -78,6 +88,7 @@
},
{
"BriefDescription": "Cycles when PMH is busy with page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_DURATION",
"PublicDescription": "Cycles PMH is busy with this walk.",
@@ -86,6 +97,7 @@
},
{
"BriefDescription": "Cycle count for an Extended Page table walk. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x4F",
"EventName": "EPT.WALK_CYCLES",
"SampleAfterValue": "2000003",
@@ -93,6 +105,7 @@
},
{
"BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAE",
"EventName": "ITLB.ITLB_FLUSH",
"PublicDescription": "Counts the number of ITLB flushes, includes 4k/2M/4M pages.",
@@ -101,6 +114,7 @@
},
{
"BriefDescription": "Completed page walks in ITLB due to STLB load misses for large pages",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.LARGE_PAGE_WALK_COMPLETED",
"PublicDescription": "Completed page walks in ITLB due to STLB load misses for large pages.",
@@ -109,6 +123,7 @@
},
{
"BriefDescription": "Misses at all ITLB levels that cause page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "Misses in all ITLB levels that cause page walks.",
@@ -117,6 +132,7 @@
},
{
"BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.STLB_HIT",
"PublicDescription": "Number of cache load STLB hits. No page walk.",
@@ -125,6 +141,7 @@
},
{
"BriefDescription": "Misses in all ITLB levels that cause completed page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
"PublicDescription": "Misses in all ITLB levels that cause completed page walks.",
@@ -133,6 +150,7 @@
},
{
"BriefDescription": "Cycles when PMH is busy with page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_DURATION",
"PublicDescription": "Cycle PMH is busy with a walk.",
@@ -141,6 +159,7 @@
},
{
"BriefDescription": "DTLB flush attempts of the thread-specific entries",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "TLB_FLUSH.DTLB_THREAD",
"PublicDescription": "DTLB flush attempts of the thread-specific entries.",
@@ -149,6 +168,7 @@
},
{
"BriefDescription": "STLB flush attempts",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "TLB_FLUSH.STLB_ANY",
"PublicDescription": "Count number of STLB flush attempts.",
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/cache.json b/tools/perf/pmu-events/arch/x86/jaketown/cache.json
index b9769d39940c..ab3713c469e3 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/cache.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Allocated L1D data cache lines in M state.",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "L1D.ALLOCATED_IN_M",
"SampleAfterValue": "2000003",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Cache lines in M state evicted out of L1D due to Snoop HitM or dirty line replacement.",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "L1D.ALL_M_REPLACEMENT",
"SampleAfterValue": "2000003",
@@ -15,6 +17,7 @@
},
{
"BriefDescription": "L1D data cache lines in M state evicted due to replacement.",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "L1D.EVICTION",
"SampleAfterValue": "2000003",
@@ -22,6 +25,7 @@
},
{
"BriefDescription": "L1D data line replacements.",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "L1D.REPLACEMENT",
"PublicDescription": "This event counts L1D data line replacements. Replacements occur when a new line is brought into the cache, causing eviction of a line loaded earlier.",
@@ -30,6 +34,7 @@
},
{
"BriefDescription": "Cycles when dispatched loads are cancelled due to L1D bank conflicts with other load ports.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xBF",
"EventName": "L1D_BLOCKS.BANK_CONFLICT_CYCLES",
@@ -38,6 +43,7 @@
},
{
"BriefDescription": "Cycles a demand request was blocked due to Fill Buffers unavailability.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.FB_FULL",
@@ -46,6 +52,7 @@
},
{
"BriefDescription": "L1D miss outstanding duration in cycles.",
+ "Counter": "2",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING",
"SampleAfterValue": "2000003",
@@ -53,6 +60,7 @@
},
{
"BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "Counter": "2",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING_CYCLES",
@@ -62,6 +70,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "Counter": "2",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
@@ -70,6 +79,7 @@
},
{
"BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in any state.",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L2_L1D_WB_RQSTS.ALL",
"SampleAfterValue": "200003",
@@ -77,6 +87,7 @@
},
{
"BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in E state.",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L2_L1D_WB_RQSTS.HIT_E",
"SampleAfterValue": "200003",
@@ -84,6 +95,7 @@
},
{
"BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in M state.",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L2_L1D_WB_RQSTS.HIT_M",
"SampleAfterValue": "200003",
@@ -91,6 +103,7 @@
},
{
"BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in S state.",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L2_L1D_WB_RQSTS.HIT_S",
"SampleAfterValue": "200003",
@@ -98,6 +111,7 @@
},
{
"BriefDescription": "Count the number of modified Lines evicted from L1 and missed L2. (Non-rejected WBs from the DCU.).",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L2_L1D_WB_RQSTS.MISS",
"SampleAfterValue": "200003",
@@ -105,6 +119,7 @@
},
{
"BriefDescription": "L2 cache lines filling L2.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.ALL",
"PublicDescription": "This event counts the number of L2 cache lines brought into the L2 cache. Lines are filled into the L2 cache when there was an L2 miss.",
@@ -113,6 +128,7 @@
},
{
"BriefDescription": "L2 cache lines in E state filling L2.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.E",
"SampleAfterValue": "100003",
@@ -120,6 +136,7 @@
},
{
"BriefDescription": "L2 cache lines in I state filling L2.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.I",
"SampleAfterValue": "100003",
@@ -127,6 +144,7 @@
},
{
"BriefDescription": "L2 cache lines in S state filling L2.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.S",
"SampleAfterValue": "100003",
@@ -134,6 +152,7 @@
},
{
"BriefDescription": "Clean L2 cache lines evicted by demand.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.DEMAND_CLEAN",
"SampleAfterValue": "100003",
@@ -141,6 +160,7 @@
},
{
"BriefDescription": "Dirty L2 cache lines evicted by demand.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.DEMAND_DIRTY",
"SampleAfterValue": "100003",
@@ -148,6 +168,7 @@
},
{
"BriefDescription": "Dirty L2 cache lines filling the L2.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.DIRTY_ALL",
"SampleAfterValue": "100003",
@@ -155,6 +176,7 @@
},
{
"BriefDescription": "Clean L2 cache lines evicted by L2 prefetch.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.PF_CLEAN",
"SampleAfterValue": "100003",
@@ -162,6 +184,7 @@
},
{
"BriefDescription": "Dirty L2 cache lines evicted by L2 prefetch.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.PF_DIRTY",
"SampleAfterValue": "100003",
@@ -169,6 +192,7 @@
},
{
"BriefDescription": "L2 code requests.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_CODE_RD",
"SampleAfterValue": "200003",
@@ -176,6 +200,7 @@
},
{
"BriefDescription": "Demand Data Read requests.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
"SampleAfterValue": "200003",
@@ -183,6 +208,7 @@
},
{
"BriefDescription": "Requests from L2 hardware prefetchers.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_PF",
"SampleAfterValue": "200003",
@@ -190,6 +216,7 @@
},
{
"BriefDescription": "RFO requests to L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_RFO",
"SampleAfterValue": "200003",
@@ -197,6 +224,7 @@
},
{
"BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_HIT",
"SampleAfterValue": "200003",
@@ -204,6 +232,7 @@
},
{
"BriefDescription": "L2 cache misses when fetching instructions.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_MISS",
"SampleAfterValue": "200003",
@@ -211,6 +240,7 @@
},
{
"BriefDescription": "Demand Data Read requests that hit L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
"SampleAfterValue": "200003",
@@ -218,6 +248,7 @@
},
{
"BriefDescription": "Requests from the L2 hardware prefetchers that hit L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.PF_HIT",
"SampleAfterValue": "200003",
@@ -225,6 +256,7 @@
},
{
"BriefDescription": "Requests from the L2 hardware prefetchers that miss L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.PF_MISS",
"SampleAfterValue": "200003",
@@ -232,6 +264,7 @@
},
{
"BriefDescription": "RFO requests that hit L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_HIT",
"SampleAfterValue": "200003",
@@ -239,6 +272,7 @@
},
{
"BriefDescription": "RFO requests that miss L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_MISS",
"SampleAfterValue": "200003",
@@ -246,6 +280,7 @@
},
{
"BriefDescription": "RFOs that access cache lines in any state.",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_STORE_LOCK_RQSTS.ALL",
"SampleAfterValue": "200003",
@@ -253,6 +288,7 @@
},
{
"BriefDescription": "RFOs that hit cache lines in E state.",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_STORE_LOCK_RQSTS.HIT_E",
"SampleAfterValue": "200003",
@@ -260,6 +296,7 @@
},
{
"BriefDescription": "RFOs that hit cache lines in M state.",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_STORE_LOCK_RQSTS.HIT_M",
"SampleAfterValue": "200003",
@@ -267,6 +304,7 @@
},
{
"BriefDescription": "RFOs that miss cache lines.",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_STORE_LOCK_RQSTS.MISS",
"SampleAfterValue": "200003",
@@ -274,6 +312,7 @@
},
{
"BriefDescription": "L2 or LLC HW prefetches that access L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.ALL_PF",
"SampleAfterValue": "200003",
@@ -281,6 +320,7 @@
},
{
"BriefDescription": "Transactions accessing L2 pipe.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.ALL_REQUESTS",
"SampleAfterValue": "200003",
@@ -288,6 +328,7 @@
},
{
"BriefDescription": "L2 cache accesses when fetching instructions.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.CODE_RD",
"SampleAfterValue": "200003",
@@ -295,6 +336,7 @@
},
{
"BriefDescription": "Demand Data Read requests that access L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.DEMAND_DATA_RD",
"SampleAfterValue": "200003",
@@ -302,6 +344,7 @@
},
{
"BriefDescription": "L1D writebacks that access L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.L1D_WB",
"SampleAfterValue": "200003",
@@ -309,6 +352,7 @@
},
{
"BriefDescription": "L2 fill requests that access L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.L2_FILL",
"SampleAfterValue": "200003",
@@ -316,6 +360,7 @@
},
{
"BriefDescription": "L2 writebacks that access L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.L2_WB",
"SampleAfterValue": "200003",
@@ -323,6 +368,7 @@
},
{
"BriefDescription": "RFO requests that access L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.RFO",
"SampleAfterValue": "200003",
@@ -330,6 +376,7 @@
},
{
"BriefDescription": "Cycles when L1D is locked.",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
"SampleAfterValue": "2000003",
@@ -337,6 +384,7 @@
},
{
"BriefDescription": "Core-originated cacheable demand requests missed LLC.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.MISS",
"SampleAfterValue": "100003",
@@ -344,6 +392,7 @@
},
{
"BriefDescription": "Core-originated cacheable demand requests that refer to LLC.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
"SampleAfterValue": "100003",
@@ -351,6 +400,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were LLC and cross-core snoop hits in on-pkg core cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT",
"PublicDescription": "This event counts retired load uops that hit in the last-level cache (L3) and were found in a non-modified state in a neighboring core's private cache (same package). Since the last level cache is inclusive, hits to the L3 may require snooping the private L2 caches of any cores on the same socket that have the line. In this case, a snoop was required, and another L2 had the line in a non-modified state.",
@@ -359,6 +409,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were HitM responses from shared LLC.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM",
"PublicDescription": "This event counts retired load uops that hit in the last-level cache (L3) and were found in a non-modified state in a neighboring core's private cache (same package). Since the last level cache is inclusive, hits to the L3 may require snooping the private L2 caches of any cores on the same socket that have the line. In this case, a snoop was required, and another L2 had the line in a modified state, so the line had to be invalidated in that L2 cache and transferred to the requesting L2.",
@@ -367,6 +418,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were LLC hit and cross-core snoop missed in on-pkg core cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS",
"SampleAfterValue": "20011",
@@ -374,6 +426,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were hits in LLC without snoops required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE",
"SampleAfterValue": "100003",
@@ -381,6 +434,7 @@
},
{
"BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
+ "Counter": "0,1,2,3",
"EventCode": "0xD3",
"EventName": "MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM",
"SampleAfterValue": "100007",
@@ -388,6 +442,7 @@
},
{
"BriefDescription": "Data from remote DRAM either Snoop not needed or Snoop Miss (RspI)",
+ "Counter": "0,1,2,3",
"EventCode": "0xD3",
"EventName": "MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_DRAM",
"SampleAfterValue": "100007",
@@ -395,6 +450,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
"PEBS": "1",
@@ -403,6 +459,7 @@
},
{
"BriefDescription": "Retired load uops with L1 cache hits as data sources.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
"PEBS": "1",
@@ -411,6 +468,7 @@
},
{
"BriefDescription": "Retired load uops with L2 cache hits as data sources.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
"PEBS": "1",
@@ -419,6 +477,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were data hits in LLC without snoops required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.LLC_HIT",
"PublicDescription": "This event counts retired load uops that hit in the last-level (L3) cache without snoops required.",
@@ -427,6 +486,7 @@
},
{
"BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.LLC_MISS",
"SampleAfterValue": "100007",
@@ -434,6 +494,7 @@
},
{
"BriefDescription": "All retired load uops.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
"PEBS": "1",
@@ -443,6 +504,7 @@
},
{
"BriefDescription": "All retired store uops.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"PEBS": "1",
@@ -452,6 +514,7 @@
},
{
"BriefDescription": "Retired load uops with locked access.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"PEBS": "1",
@@ -460,6 +523,7 @@
},
{
"BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"PEBS": "1",
@@ -469,6 +533,7 @@
},
{
"BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"PEBS": "1",
@@ -478,6 +543,7 @@
},
{
"BriefDescription": "Retired load uops that miss the STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
"PEBS": "1",
@@ -486,6 +552,7 @@
},
{
"BriefDescription": "Retired store uops that miss the STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
"PEBS": "1",
@@ -494,6 +561,7 @@
},
{
"BriefDescription": "Demand and prefetch data reads.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
"SampleAfterValue": "100003",
@@ -501,6 +569,7 @@
},
{
"BriefDescription": "Cacheable and non-cacheable code read requests.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
"SampleAfterValue": "100003",
@@ -508,6 +577,7 @@
},
{
"BriefDescription": "Demand Data Read requests sent to uncore.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
"SampleAfterValue": "100003",
@@ -515,6 +585,7 @@
},
{
"BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
"SampleAfterValue": "100003",
@@ -522,6 +593,7 @@
},
{
"BriefDescription": "Cases when offcore requests buffer cannot take more entries for core.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
"SampleAfterValue": "2000003",
@@ -529,6 +601,7 @@
},
{
"BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
"SampleAfterValue": "2000003",
@@ -536,6 +609,7 @@
},
{
"BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
@@ -544,6 +618,7 @@
},
{
"BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
@@ -552,6 +627,7 @@
},
{
"BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
@@ -560,6 +636,7 @@
},
{
"BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
"SampleAfterValue": "2000003",
@@ -567,6 +644,7 @@
},
{
"BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_C6",
@@ -575,6 +653,7 @@
},
{
"BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
"SampleAfterValue": "2000003",
@@ -582,6 +661,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -591,6 +671,7 @@
},
{
"BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -600,6 +681,7 @@
},
{
"BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -609,6 +691,7 @@
},
{
"BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -618,6 +701,7 @@
},
{
"BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and sibling core snoop returned a clean response",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -627,6 +711,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads that hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -636,6 +721,7 @@
},
{
"BriefDescription": "Counts prefetch data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -645,6 +731,7 @@
},
{
"BriefDescription": "Counts prefetch data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -654,6 +741,7 @@
},
{
"BriefDescription": "Counts prefetch data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -663,6 +751,7 @@
},
{
"BriefDescription": "Counts prefetch data reads that hit in the LLC and sibling core snoop returned a clean response",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -672,6 +761,7 @@
},
{
"BriefDescription": "Counts all data/code/rfo references (demand & prefetch)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -681,6 +771,7 @@
},
{
"BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -690,6 +781,7 @@
},
{
"BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -699,6 +791,7 @@
},
{
"BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -708,6 +801,7 @@
},
{
"BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -717,6 +811,7 @@
},
{
"BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC and sibling core snoop returned a clean response",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -726,6 +821,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch prefetch RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -735,6 +831,7 @@
},
{
"BriefDescription": "Counts all writebacks from the core to the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -744,6 +841,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -753,6 +851,7 @@
},
{
"BriefDescription": "Counts all demand code reads that hit in the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -762,6 +861,7 @@
},
{
"BriefDescription": "Counts all demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -771,6 +871,7 @@
},
{
"BriefDescription": "Counts all demand data reads that hit in the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -780,6 +881,7 @@
},
{
"BriefDescription": "Counts demand data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -789,6 +891,7 @@
},
{
"BriefDescription": "Counts demand data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -798,6 +901,7 @@
},
{
"BriefDescription": "Counts demand data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -807,6 +911,7 @@
},
{
"BriefDescription": "Counts demand data reads that hit in the LLC and sibling core snoop returned a clean response",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -816,6 +921,7 @@
},
{
"BriefDescription": "Counts all demand rfo's",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -825,6 +931,7 @@
},
{
"BriefDescription": "Counts L2 hints sent to LLC to keep a line from being evicted out of the core caches",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.LRU_HINTS",
"MSRIndex": "0x1a6,0x1a7",
@@ -834,6 +941,7 @@
},
{
"BriefDescription": "Counts miscellaneous accesses that include port i/o, MMIO and uncacheable memory accesses",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.PORTIO_MMIO_UC",
"MSRIndex": "0x1a6,0x1a7",
@@ -843,6 +951,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) code reads that hit in the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -852,6 +961,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -861,6 +971,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -870,6 +981,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -879,6 +991,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -888,6 +1001,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoops sent to sibling cores return clean response",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -897,6 +1011,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -906,6 +1021,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -915,6 +1031,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -924,6 +1041,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -933,6 +1051,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -942,6 +1061,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops sent to sibling cores return clean response",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -951,6 +1071,7 @@
},
{
"BriefDescription": "Counts requests where the address of an atomic lock instruction spans a cache line boundary or the lock instruction is executed on uncacheable address",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.SPLIT_LOCK_UC_LOCK.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -960,6 +1081,7 @@
},
{
"BriefDescription": "Counts non-temporal stores",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -969,6 +1091,7 @@
},
{
"BriefDescription": "Split locks in SQ.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF4",
"EventName": "SQ_MISC.SPLIT_LOCK",
"SampleAfterValue": "100003",
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/counter.json b/tools/perf/pmu-events/arch/x86/jaketown/counter.json
new file mode 100644
index 000000000000..fac24dfeb23f
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/jaketown/counter.json
@@ -0,0 +1,52 @@
+[
+ {
+ "Unit": "core",
+ "CountersNumFixed": "3",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "CBOX",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "PCU",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "UBOX",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "2"
+ },
+ {
+ "Unit": "QPI",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "R3QPI",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "3"
+ },
+ {
+ "Unit": "R2PCIe",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "HA",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "iMC",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "IRP",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "2"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/floating-point.json b/tools/perf/pmu-events/arch/x86/jaketown/floating-point.json
index 79e8f403c426..8b570829e2e0 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/floating-point.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Cycles with any input/output SSE or FP assist.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.ANY",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Number of SIMD FP assists due to input values.",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.SIMD_INPUT",
"SampleAfterValue": "100003",
@@ -16,6 +18,7 @@
},
{
"BriefDescription": "Number of SIMD FP assists due to Output values.",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.SIMD_OUTPUT",
"SampleAfterValue": "100003",
@@ -23,6 +26,7 @@
},
{
"BriefDescription": "Number of X87 assists due to input value.",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.X87_INPUT",
"SampleAfterValue": "100003",
@@ -30,6 +34,7 @@
},
{
"BriefDescription": "Number of X87 assists due to output value.",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.X87_OUTPUT",
"SampleAfterValue": "100003",
@@ -37,6 +42,7 @@
},
{
"BriefDescription": "Number of SSE* or AVX-128 FP Computational packed double-precision uops issued this cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE",
"SampleAfterValue": "2000003",
@@ -44,6 +50,7 @@
},
{
"BriefDescription": "Number of SSE* or AVX-128 FP Computational packed single-precision uops issued this cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_PACKED_SINGLE",
"SampleAfterValue": "2000003",
@@ -51,6 +58,7 @@
},
{
"BriefDescription": "Number of SSE* or AVX-128 FP Computational scalar double-precision uops issued this cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE",
"SampleAfterValue": "2000003",
@@ -58,6 +66,7 @@
},
{
"BriefDescription": "Number of SSE* or AVX-128 FP Computational scalar single-precision uops issued this cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE",
"SampleAfterValue": "2000003",
@@ -65,6 +74,7 @@
},
{
"BriefDescription": "Number of FP Computational Uops Executed this cycle. The number of FADD, FSUB, FCOM, FMULs, integer MULs and IMULs, FDIVs, FPREMs, FSQRTS, integer DIVs, and IDIVs. This event does not distinguish an FADD used in the middle of a transcendental flow from a s.",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.X87",
"SampleAfterValue": "2000003",
@@ -72,6 +82,7 @@
},
{
"BriefDescription": "Number of GSSE memory assist for stores. GSSE microcode assist is being invoked whenever the hardware is unable to properly handle GSSE-256b operations.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "OTHER_ASSISTS.AVX_STORE",
"SampleAfterValue": "100003",
@@ -79,6 +90,7 @@
},
{
"BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "OTHER_ASSISTS.AVX_TO_SSE",
"SampleAfterValue": "100003",
@@ -86,6 +98,7 @@
},
{
"BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "OTHER_ASSISTS.SSE_TO_AVX",
"SampleAfterValue": "100003",
@@ -93,6 +106,7 @@
},
{
"BriefDescription": "Number of AVX-256 Computational FP double precision uops issued this cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "SIMD_FP_256.PACKED_DOUBLE",
"SampleAfterValue": "2000003",
@@ -100,6 +114,7 @@
},
{
"BriefDescription": "Number of GSSE-256 Computational FP single precision uops issued this cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "SIMD_FP_256.PACKED_SINGLE",
"SampleAfterValue": "2000003",
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/frontend.json b/tools/perf/pmu-events/arch/x86/jaketown/frontend.json
index 754ee2749485..3cb468da7011 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/frontend.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "Counter": "0,1,2,3",
"EventCode": "0xE6",
"EventName": "BACLEARS.ANY",
"SampleAfterValue": "100003",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switches.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "DSB2MITE_SWITCHES.COUNT",
"SampleAfterValue": "2000003",
@@ -15,6 +17,7 @@
},
{
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
"PublicDescription": "This event counts the cycles attributed to a switch from the Decoded Stream Buffer (DSB), which holds decoded instructions, to the legacy decode pipeline. It excludes cycles when the back-end cannot accept new micro-ops. The penalty for these switches is potentially several cycles of instruction starvation, where no micro-ops are delivered to the back-end.",
@@ -23,6 +26,7 @@
},
{
"BriefDescription": "Cases of cancelling valid Decode Stream Buffer (DSB) fill not because of exceeding way limit.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAC",
"EventName": "DSB_FILL.ALL_CANCEL",
"SampleAfterValue": "2000003",
@@ -30,6 +34,7 @@
},
{
"BriefDescription": "Cycles when Decode Stream Buffer (DSB) fill encounter more than 3 Decode Stream Buffer (DSB) lines.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAC",
"EventName": "DSB_FILL.EXCEED_DSB_LINES",
"SampleAfterValue": "2000003",
@@ -37,6 +42,7 @@
},
{
"BriefDescription": "Cases of cancelling valid DSB fill not because of exceeding way limit.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAC",
"EventName": "DSB_FILL.OTHER_CANCEL",
"SampleAfterValue": "2000003",
@@ -44,6 +50,7 @@
},
{
"BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.HIT",
"SampleAfterValue": "2000003",
@@ -51,6 +58,7 @@
},
{
"BriefDescription": "Instruction cache, streaming buffer and victim cache misses.",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.MISSES",
"PublicDescription": "This event counts the number of instruction cache, streaming buffer and victim cache misses. Counting includes unchacheable accesses.",
@@ -59,6 +67,7 @@
},
{
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0x79",
"EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
@@ -67,6 +76,7 @@
},
{
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
@@ -75,6 +85,7 @@
},
{
"BriefDescription": "Cycles MITE is delivering 4 Uops.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0x79",
"EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
@@ -83,6 +94,7 @@
},
{
"BriefDescription": "Cycles MITE is delivering any Uop.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
@@ -91,6 +103,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.DSB_CYCLES",
@@ -99,6 +112,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.DSB_UOPS",
"SampleAfterValue": "2000003",
@@ -106,6 +120,7 @@
},
{
"BriefDescription": "Instruction Decode Queue (IDQ) empty cycles.",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.EMPTY",
"SampleAfterValue": "2000003",
@@ -113,6 +128,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MITE_ALL_UOPS",
"SampleAfterValue": "2000003",
@@ -120,6 +136,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MITE_CYCLES",
@@ -128,6 +145,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MITE_UOPS",
"SampleAfterValue": "2000003",
@@ -135,6 +153,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MS_CYCLES",
@@ -144,6 +163,7 @@
},
{
"BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MS_DSB_CYCLES",
@@ -152,6 +172,7 @@
},
{
"BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequencer (MS) is busy.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x79",
@@ -161,6 +182,7 @@
},
{
"BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy.",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_DSB_UOPS",
"SampleAfterValue": "2000003",
@@ -168,6 +190,7 @@
},
{
"BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy.",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_MITE_UOPS",
"SampleAfterValue": "2000003",
@@ -175,6 +198,7 @@
},
{
"BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x79",
@@ -184,6 +208,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy.",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_UOPS",
"SampleAfterValue": "2000003",
@@ -191,6 +216,7 @@
},
{
"BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled .",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
"PublicDescription": "This event counts the number of uops not delivered to the back-end per cycle, per thread, when the back-end was not stalled. In the ideal case 4 uops can be delivered each cycle. The event counts the undelivered uops - so if 3 were delivered in one cycle, the counter would be incremented by 1 for that cycle (4 - 3). If the back-end is stalled, the count for this event is not incremented even when uops were not delivered, because the back-end would not have been able to accept them. This event is used in determining the front-end bound category of the top-down pipeline slots characterization.",
@@ -199,6 +225,7 @@
},
{
"BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
@@ -207,6 +234,7 @@
},
{
"BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
@@ -216,6 +244,7 @@
},
{
"BriefDescription": "Cycles when 1 or more uops were delivered to the by the front end.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_GE_1_UOP_DELIV.CORE",
@@ -225,6 +254,7 @@
},
{
"BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
+ "Counter": "0,1,2,3",
"CounterMask": "3",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
@@ -233,6 +263,7 @@
},
{
"BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
@@ -241,6 +272,7 @@
},
{
"BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json b/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json
index fc8c3f785be1..f8c18741b360 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json
@@ -73,7 +73,7 @@
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
"MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "1 - (tma_frontend_bound + tma_bad_speculation + tma_retiring)",
- "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvOB;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
"MetricThreshold": "tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL1",
@@ -94,7 +94,7 @@
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * tma_bad_speculation",
- "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
+ "MetricGroup": "BadSpec;BrMispredicts;BvMP;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
"MetricName": "tma_branch_mispredicts",
"MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
@@ -124,7 +124,7 @@
{
"BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
"MetricExpr": "ARITH.FPU_DIV_ACTIVE / tma_info_core_core_clks",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
"MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_UOPS",
@@ -152,7 +152,7 @@
{
"BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
"MetricExpr": "(7 * DTLB_LOAD_MISSES.STLB_HIT + DTLB_LOAD_MISSES.WALK_DURATION) / tma_info_thread_clks",
- "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
+ "MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
"MetricName": "tma_dtlb_load",
"MetricThreshold": "tma_dtlb_load > 0.1",
"PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_UOPS_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store",
@@ -226,7 +226,7 @@
{
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / tma_info_thread_slots",
- "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvFB;BvIO;PGO;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_frontend_bound",
"MetricThreshold": "tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL1",
@@ -296,13 +296,13 @@
},
{
"BriefDescription": "Average CPU Utilization (percentage)",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "tma_info_system_cpus_utilized / #num_cpus_online",
"MetricGroup": "HPC;Summary",
"MetricName": "tma_info_system_cpu_utilization"
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "#num_cpus_online * tma_info_system_cpu_utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized"
},
@@ -419,7 +419,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
"MetricExpr": "(12 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION) / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_itlb_misses",
"MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: ITLB_MISSES.WALK_COMPLETED",
@@ -458,7 +458,7 @@
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_bad_speculation - tma_branch_mispredicts",
- "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
+ "MetricGroup": "BadSpec;BvMS;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
"MetricName": "tma_machine_clears",
"MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
@@ -468,7 +468,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=6@) / tma_info_thread_clks",
- "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
"MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_info_system_dram_bw_use",
@@ -477,7 +477,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM)",
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth",
- "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
+ "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
"MetricName": "tma_mem_latency",
"MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: ",
@@ -525,7 +525,7 @@
{
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / tma_info_thread_slots",
- "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvUW;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_retiring",
"MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL1",
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/memory.json b/tools/perf/pmu-events/arch/x86/jaketown/memory.json
index a71e630fd030..41200f0e0df6 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/memory.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
"PublicDescription": "This event counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from memory disambiguation, external snoops, or cross SMT-HW-thread snoop (stores) hitting load buffers. Machine clears can have a significant performance impact if they are happening frequently.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Loads with latency value being above 128.",
+ "Counter": "3",
"EventCode": "0xCD",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
"MSRIndex": "0x3F6",
@@ -19,6 +21,7 @@
},
{
"BriefDescription": "Loads with latency value being above 16.",
+ "Counter": "3",
"EventCode": "0xCD",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
"MSRIndex": "0x3F6",
@@ -29,6 +32,7 @@
},
{
"BriefDescription": "Loads with latency value being above 256.",
+ "Counter": "3",
"EventCode": "0xCD",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
"MSRIndex": "0x3F6",
@@ -39,6 +43,7 @@
},
{
"BriefDescription": "Loads with latency value being above 32.",
+ "Counter": "3",
"EventCode": "0xCD",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
"MSRIndex": "0x3F6",
@@ -49,6 +54,7 @@
},
{
"BriefDescription": "Loads with latency value being above 4 .",
+ "Counter": "3",
"EventCode": "0xCD",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
"MSRIndex": "0x3F6",
@@ -59,6 +65,7 @@
},
{
"BriefDescription": "Loads with latency value being above 512.",
+ "Counter": "3",
"EventCode": "0xCD",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
"MSRIndex": "0x3F6",
@@ -69,6 +76,7 @@
},
{
"BriefDescription": "Loads with latency value being above 64.",
+ "Counter": "3",
"EventCode": "0xCD",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
"MSRIndex": "0x3F6",
@@ -79,6 +87,7 @@
},
{
"BriefDescription": "Loads with latency value being above 8.",
+ "Counter": "3",
"EventCode": "0xCD",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
"MSRIndex": "0x3F6",
@@ -89,6 +98,7 @@
},
{
"BriefDescription": "Sample stores and collect precise store operation via PEBS record. PMC3 only. (Precise Event - PEBS).",
+ "Counter": "3",
"EventCode": "0xCD",
"EventName": "MEM_TRANS_RETIRED.PRECISE_STORE",
"PEBS": "2",
@@ -97,6 +107,7 @@
},
{
"BriefDescription": "Speculative cache line split load uops dispatched to L1 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "MISALIGN_MEM_REF.LOADS",
"SampleAfterValue": "2000003",
@@ -104,6 +115,7 @@
},
{
"BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "MISALIGN_MEM_REF.STORES",
"SampleAfterValue": "2000003",
@@ -111,6 +123,7 @@
},
{
"BriefDescription": "This event counts all LLC misses for all demand and L2 prefetches. LLC prefetches are excluded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DEMAND_MLC_PREF_READS.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -120,6 +133,7 @@
},
{
"BriefDescription": "Counts all local dram accesses for all demand and L2 prefetches. LLC prefetches are excluded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DEMAND_MLC_PREF_READS.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -129,6 +143,7 @@
},
{
"BriefDescription": "This event counts all remote cache-to-cache transfers (includes HITM and HIT-Forward) for all demand and L2 prefetches. LLC prefetches are excluded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DEMAND_MLC_PREF_READS.LLC_MISS.REMOTE_HITM_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
@@ -138,6 +153,7 @@
},
{
"BriefDescription": "Counts all demand code reads that miss the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -147,6 +163,7 @@
},
{
"BriefDescription": "Counts all demand code reads that miss the LLC and the data returned from local dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -156,6 +173,7 @@
},
{
"BriefDescription": "Counts all demand code reads that miss the LLC and the data returned from remote dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -165,6 +183,7 @@
},
{
"BriefDescription": "Counts all demand code reads that miss the LLC the data is found in M state in remote cache and forwarded from there",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -174,6 +193,7 @@
},
{
"BriefDescription": "Counts all demand code reads that miss the LLC and the data forwarded from remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
@@ -183,6 +203,7 @@
},
{
"BriefDescription": "Counts demand data reads that miss the LLC and the data returned from remote & local dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.ANY_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -192,6 +213,7 @@
},
{
"BriefDescription": "Counts demand data reads that miss in the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -201,6 +223,7 @@
},
{
"BriefDescription": "Counts demand data reads that miss the LLC and the data returned from local dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -210,6 +233,7 @@
},
{
"BriefDescription": "Counts demand data reads that miss the LLC and the data returned from remote dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -219,6 +243,7 @@
},
{
"BriefDescription": "Counts demand data reads that miss the LLC the data is found in M state in remote cache and forwarded from there",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -228,6 +253,7 @@
},
{
"BriefDescription": "Counts demand data reads that miss the LLC and the data forwarded from remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
@@ -237,6 +263,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) code reads that miss the LLC and the data returned from remote & local dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -246,6 +273,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data returned from remote & local dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.ANY_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -255,6 +283,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss in the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -264,6 +293,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data returned from local dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -273,6 +303,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data returned from remote dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -282,6 +313,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC the data is found in M state in remote cache and forwarded from there",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -291,6 +323,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data forwarded from remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
@@ -300,6 +333,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss in the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -309,6 +343,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops sent to sibling cores return clean response",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/metricgroups.json b/tools/perf/pmu-events/arch/x86/jaketown/metricgroups.json
index a2c27794c0d8..7dc7eb0d3dd3 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/metricgroups.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/metricgroups.json
@@ -5,7 +5,18 @@
"BigFootprint": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"BrMispredicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Branches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvBC": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvCB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvFB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvIO": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvML": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMP": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMS": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMT": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvOB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvUW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"CacheHits": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "CacheMisses": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Compute": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Cor": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"DSB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/other.json b/tools/perf/pmu-events/arch/x86/jaketown/other.json
index 9f96121baef8..42692fa24b6c 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/other.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/other.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Unhalted core cycles when the thread is in ring 0.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "CPL_CYCLES.RING0",
"SampleAfterValue": "2000003",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Number of intervals between processor halts while thread is in ring 0.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x5C",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "CPL_CYCLES.RING123",
"SampleAfterValue": "2000003",
@@ -24,6 +27,7 @@
},
{
"BriefDescription": "Hardware Prefetch requests that miss the L1D cache. This accounts for both L1 streamer and IP-based (IPP) HW prefetchers. A request is being counted each time it access the cache & miss it, including if a block is applicable or if hit the Fill Buffer for .",
+ "Counter": "0,1,2,3",
"EventCode": "0x4E",
"EventName": "HW_PRE_REQ.DL1_MISS",
"SampleAfterValue": "2000003",
@@ -31,6 +35,7 @@
},
{
"BriefDescription": "Valid instructions written to IQ per cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "INSTS_WRITTEN_TO_IQ.INSTS",
"SampleAfterValue": "2000003",
@@ -38,6 +43,7 @@
},
{
"BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock.",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "LOCK_CYCLES.SPLIT_LOCK_UC_LOCK_DURATION",
"SampleAfterValue": "2000003",
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/pipeline.json b/tools/perf/pmu-events/arch/x86/jaketown/pipeline.json
index d0edfdec9f01..ca0694c33de1 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/pipeline.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "This event counts executed load operations with all the following traits: 1. addressing of the format [base + offset], 2. the offset is between 1 and 2047, 3. the address specified in the base register is in one page and the address [base+offset] is in an.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "AGU_BYPASS_CANCEL.COUNT",
"SampleAfterValue": "100003",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Divide operations executed.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x14",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "Cycles when divider is busy executing divide operations.",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "ARITH.FPU_DIV_ACTIVE",
"SampleAfterValue": "2000003",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Speculative and retired branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_BRANCHES",
"SampleAfterValue": "200003",
@@ -32,6 +36,7 @@
},
{
"BriefDescription": "Speculative and retired macro-conditional branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
"SampleAfterValue": "200003",
@@ -39,6 +44,7 @@
},
{
"BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
"SampleAfterValue": "200003",
@@ -46,6 +52,7 @@
},
{
"BriefDescription": "Speculative and retired direct near calls.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
"SampleAfterValue": "200003",
@@ -53,6 +60,7 @@
},
{
"BriefDescription": "Speculative and retired indirect branches excluding calls and returns.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
"SampleAfterValue": "200003",
@@ -60,6 +68,7 @@
},
{
"BriefDescription": "Speculative and retired indirect return branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
"SampleAfterValue": "200003",
@@ -67,6 +76,7 @@
},
{
"BriefDescription": "Not taken macro-conditional branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL",
"SampleAfterValue": "200003",
@@ -74,6 +84,7 @@
},
{
"BriefDescription": "Taken speculative and retired macro-conditional branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL",
"SampleAfterValue": "200003",
@@ -81,6 +92,7 @@
},
{
"BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP",
"SampleAfterValue": "200003",
@@ -88,6 +100,7 @@
},
{
"BriefDescription": "Taken speculative and retired direct near calls.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
"SampleAfterValue": "200003",
@@ -95,6 +108,7 @@
},
{
"BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
"SampleAfterValue": "200003",
@@ -102,6 +116,7 @@
},
{
"BriefDescription": "Taken speculative and retired indirect calls.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
"SampleAfterValue": "200003",
@@ -109,6 +124,7 @@
},
{
"BriefDescription": "Taken speculative and retired indirect branches with return mnemonic.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
"SampleAfterValue": "200003",
@@ -116,12 +132,14 @@
},
{
"BriefDescription": "All (macro) branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"SampleAfterValue": "400009"
},
{
"BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS).",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
"PEBS": "2",
@@ -130,6 +148,7 @@
},
{
"BriefDescription": "Conditional branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
"PEBS": "1",
@@ -138,6 +157,7 @@
},
{
"BriefDescription": "Far branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"SampleAfterValue": "100007",
@@ -145,6 +165,7 @@
},
{
"BriefDescription": "Direct and indirect near call instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
"PEBS": "1",
@@ -153,6 +174,7 @@
},
{
"BriefDescription": "Return instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
"PEBS": "1",
@@ -161,6 +183,7 @@
},
{
"BriefDescription": "Taken branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
"PEBS": "1",
@@ -169,6 +192,7 @@
},
{
"BriefDescription": "Not taken branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NOT_TAKEN",
"SampleAfterValue": "400009",
@@ -176,6 +200,7 @@
},
{
"BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.ALL_BRANCHES",
"SampleAfterValue": "200003",
@@ -183,6 +208,7 @@
},
{
"BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
"SampleAfterValue": "200003",
@@ -190,6 +216,7 @@
},
{
"BriefDescription": "Speculative and retired mispredicted direct near calls.",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.ALL_DIRECT_NEAR_CALL",
"SampleAfterValue": "200003",
@@ -197,6 +224,7 @@
},
{
"BriefDescription": "Mispredicted indirect branches excluding calls and returns.",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
"SampleAfterValue": "200003",
@@ -204,6 +232,7 @@
},
{
"BriefDescription": "Speculative mispredicted indirect branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.INDIRECT",
"PublicDescription": "Counts speculatively miss-predicted indirect branches at execution time. Counts for indirect near CALL or JMP instructions (RET excluded).",
@@ -212,6 +241,7 @@
},
{
"BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
"SampleAfterValue": "200003",
@@ -219,6 +249,7 @@
},
{
"BriefDescription": "Taken speculative and retired mispredicted macro conditional branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
"SampleAfterValue": "200003",
@@ -226,6 +257,7 @@
},
{
"BriefDescription": "Taken speculative and retired mispredicted direct near calls.",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_DIRECT_NEAR_CALL",
"SampleAfterValue": "200003",
@@ -233,6 +265,7 @@
},
{
"BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns.",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
"SampleAfterValue": "200003",
@@ -240,6 +273,7 @@
},
{
"BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
"SampleAfterValue": "200003",
@@ -247,6 +281,7 @@
},
{
"BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic.",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
"SampleAfterValue": "200003",
@@ -254,12 +289,14 @@
},
{
"BriefDescription": "All mispredicted macro branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"SampleAfterValue": "400009"
},
{
"BriefDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS).",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
"PEBS": "2",
@@ -268,6 +305,7 @@
},
{
"BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.CONDITIONAL",
"PEBS": "1",
@@ -276,6 +314,7 @@
},
{
"BriefDescription": "Direct and indirect mispredicted near call instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.NEAR_CALL",
"PEBS": "1",
@@ -284,6 +323,7 @@
},
{
"BriefDescription": "Mispredicted not taken branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.NOT_TAKEN",
"PEBS": "1",
@@ -292,6 +332,7 @@
},
{
"BriefDescription": "Mispredicted taken branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.TAKEN",
"PEBS": "1",
@@ -300,6 +341,7 @@
},
{
"BriefDescription": "Count XClk pulses when this thread is unhalted and the other is halted.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "2000003",
@@ -307,6 +349,7 @@
},
{
"BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
"SampleAfterValue": "2000003",
@@ -315,6 +358,7 @@
{
"AnyThread": "1",
"BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
"SampleAfterValue": "2000003",
@@ -322,6 +366,7 @@
},
{
"BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "2000003",
@@ -329,6 +374,7 @@
},
{
"BriefDescription": "Reference cycles when the core is not in halt state.",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"SampleAfterValue": "2000003",
@@ -336,6 +382,7 @@
},
{
"BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.REF_XCLK",
"SampleAfterValue": "2000003",
@@ -344,6 +391,7 @@
{
"AnyThread": "1",
"BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
"SampleAfterValue": "2000003",
@@ -351,6 +399,7 @@
},
{
"BriefDescription": "Core cycles when the thread is not in halt state.",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"SampleAfterValue": "2000003",
@@ -359,12 +408,14 @@
{
"AnyThread": "1",
"BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
"SampleAfterValue": "2000003",
"UMask": "0x2"
},
{
"BriefDescription": "Thread cycles when thread is not in halt state.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
"SampleAfterValue": "2000003"
@@ -372,12 +423,14 @@
{
"AnyThread": "1",
"BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
"SampleAfterValue": "2000003"
},
{
"BriefDescription": "Each cycle there was a miss-pending demand load this thread, increment by 1. Note this is in DCU and connected to Umask 1. Miss Pending demand load should be deduced by OR-ing increment bits of DCACHE_MISS_PEND.PENDING.",
+ "Counter": "2",
"CounterMask": "2",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
@@ -386,6 +439,7 @@
},
{
"BriefDescription": "Each cycle there was a MLC-miss pending demand load this thread (i.e. Non-completed valid SQ entry allocated for demand load and waiting for Uncore), increment by 1. Note this is in MLC and connected to Umask 0.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
@@ -394,6 +448,7 @@
},
{
"BriefDescription": "Each cycle there was no dispatch for this thread, increment by 1. Note this is connect to Umask 2. No dispatch can be deduced from the UOPS_EXECUTED event.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_NO_DISPATCH",
@@ -402,6 +457,7 @@
},
{
"BriefDescription": "Each cycle there was a miss-pending demand load this thread and no uops dispatched, increment by 1. Note this is in DCU and connected to Umask 1 and 2. Miss Pending demand load should be deduced by OR-ing increment bits of DCACHE_MISS_PEND.PENDING.",
+ "Counter": "2",
"CounterMask": "6",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
@@ -410,6 +466,7 @@
},
{
"BriefDescription": "Each cycle there was a MLC-miss pending demand load and no uops dispatched on this thread (i.e. Non-completed valid SQ entry allocated for demand load and waiting for Uncore), increment by 1. Note this is in MLC and connected to Umask 0 and 2.",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
@@ -418,6 +475,7 @@
},
{
"BriefDescription": "Stall cycles because IQ is full.",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.IQ_FULL",
"SampleAfterValue": "2000003",
@@ -425,6 +483,7 @@
},
{
"BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.LCP",
"SampleAfterValue": "2000003",
@@ -432,6 +491,7 @@
},
{
"BriefDescription": "Instructions retired from execution.",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers.",
"SampleAfterValue": "2000003",
@@ -439,12 +499,14 @@
},
{
"BriefDescription": "Number of instructions retired. General Counter - architectural event.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.ANY_P",
"SampleAfterValue": "2000003"
},
{
"BriefDescription": "Instructions retired. (Precise Event - PEBS).",
+ "Counter": "1",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.PREC_DIST",
"PEBS": "2",
@@ -453,6 +515,7 @@
},
{
"BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread.",
+ "Counter": "0,1,2,3",
"EventCode": "0x0D",
"EventName": "INT_MISC.RAT_STALL_CYCLES",
"SampleAfterValue": "2000003",
@@ -460,6 +523,7 @@
},
{
"BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x0D",
"EventName": "INT_MISC.RECOVERY_CYCLES",
@@ -469,6 +533,7 @@
{
"AnyThread": "1",
"BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x0D",
"EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
@@ -477,6 +542,7 @@
},
{
"BriefDescription": "Number of occurrences waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x0D",
@@ -486,6 +552,7 @@
},
{
"BriefDescription": "Number of cases where any load ends up with a valid block-code written to the load buffer (including blocks due to Memory Order Buffer (MOB), Data Cache Unit (DCU), TLB, but load has no DCU miss).",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.ALL_BLOCK",
"SampleAfterValue": "100003",
@@ -493,6 +560,7 @@
},
{
"BriefDescription": "Loads delayed due to SB blocks, preceding store operations with known addresses but unknown data.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.DATA_UNKNOWN",
"SampleAfterValue": "100003",
@@ -500,6 +568,7 @@
},
{
"BriefDescription": "This event counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.NO_SR",
"SampleAfterValue": "100003",
@@ -507,6 +576,7 @@
},
{
"BriefDescription": "Cases when loads get true Block-on-Store blocking code preventing store forwarding.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.STORE_FORWARD",
"PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceding smaller uncompleted store. See the table of not supported store forwards in the Intel? 64 and IA-32 Architectures Optimization Reference Manual. The penalty for blocked store forwarding is that the load must wait for the store to complete before it can be issued.",
@@ -515,6 +585,7 @@
},
{
"BriefDescription": "False dependencies in MOB due to partial compare.",
+ "Counter": "0,1,2,3",
"EventCode": "0x07",
"EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
"PublicDescription": "Aliasing occurs when a load is issued after a store and their memory addresses are offset by 4K. This event counts the number of loads that aliased with a preceding store, resulting in an extended address check in the pipeline. The enhanced address check typically has a performance penalty of 5 cycles.",
@@ -523,6 +594,7 @@
},
{
"BriefDescription": "This event counts the number of times that load operations are temporarily blocked because of older stores, with addresses that are not yet known. A load operation may incur more than one block of this type.",
+ "Counter": "0,1,2,3",
"EventCode": "0x07",
"EventName": "LD_BLOCKS_PARTIAL.ALL_STA_BLOCK",
"SampleAfterValue": "100003",
@@ -530,6 +602,7 @@
},
{
"BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch.",
+ "Counter": "0,1,2,3",
"EventCode": "0x4C",
"EventName": "LOAD_HIT_PRE.HW_PF",
"SampleAfterValue": "100003",
@@ -537,6 +610,7 @@
},
{
"BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch.",
+ "Counter": "0,1,2,3",
"EventCode": "0x4C",
"EventName": "LOAD_HIT_PRE.SW_PF",
"SampleAfterValue": "100003",
@@ -544,6 +618,7 @@
},
{
"BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xA8",
"EventName": "LSD.CYCLES_4_UOPS",
@@ -552,6 +627,7 @@
},
{
"BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA8",
"EventName": "LSD.CYCLES_ACTIVE",
@@ -560,6 +636,7 @@
},
{
"BriefDescription": "Number of Uops delivered by the LSD.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA8",
"EventName": "LSD.UOPS",
"SampleAfterValue": "2000003",
@@ -567,6 +644,7 @@
},
{
"BriefDescription": "Number of machine clears (nukes) of any type.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xc3",
@@ -576,6 +654,7 @@
},
{
"BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.MASKMOV",
"PublicDescription": "Maskmov false fault - counts number of time ucode passes through Maskmov flow due to instruction's mask being 0 while the flow was completed without raising a fault.",
@@ -584,6 +663,7 @@
},
{
"BriefDescription": "Self-modifying code (SMC) detected.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.SMC",
"PublicDescription": "This event is incremented when self-modifying code (SMC) is detected, which causes a machine clear. Machine clears can have a significant performance impact if they are happening frequently.",
@@ -592,6 +672,7 @@
},
{
"BriefDescription": "Retired instructions experiencing ITLB misses.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "OTHER_ASSISTS.ITLB_MISS_RETIRED",
"SampleAfterValue": "100003",
@@ -599,6 +680,7 @@
},
{
"BriefDescription": "Increments the number of flags-merge uops in flight each cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x59",
"EventName": "PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP",
"SampleAfterValue": "2000003",
@@ -606,6 +688,7 @@
},
{
"BriefDescription": "Performance sensitive flags-merging uops added by Sandy Bridge u-arch.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x59",
"EventName": "PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP_CYCLES",
@@ -615,6 +698,7 @@
},
{
"BriefDescription": "Multiply packed/scalar single precision uops allocated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x59",
"EventName": "PARTIAL_RAT_STALLS.MUL_SINGLE_UOP",
"SampleAfterValue": "2000003",
@@ -622,6 +706,7 @@
},
{
"BriefDescription": "Cycles with at least one slow LEA uop being allocated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x59",
"EventName": "PARTIAL_RAT_STALLS.SLOW_LEA_WINDOW",
"PublicDescription": "This event counts the number of cycles with at least one slow LEA uop being allocated. A uop is generally considered as slow LEA if it has three sources (for example, two sources and immediate) regardless of whether it is a result of LEA instruction or not. Examples of the slow LEA uop are or uops with base, index, and offset source operands using base and index reqisters, where base is EBR/RBP/R13, using RIP relative or 16-bit addressing modes. See the Intel? 64 and IA-32 Architectures Optimization Reference Manual for more details about slow LEA instructions.",
@@ -630,6 +715,7 @@
},
{
"BriefDescription": "Resource-related stall cycles.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.ANY",
"SampleAfterValue": "2000003",
@@ -637,6 +723,7 @@
},
{
"BriefDescription": "Counts the cycles of stall due to lack of load buffers.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.LB",
"SampleAfterValue": "2000003",
@@ -644,6 +731,7 @@
},
{
"BriefDescription": "Resource stalls due to load or store buffers all being in use.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.LB_SB",
"SampleAfterValue": "2000003",
@@ -651,6 +739,7 @@
},
{
"BriefDescription": "Resource stalls due to memory buffers or Reservation Station (RS) being fully utilized.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.MEM_RS",
"SampleAfterValue": "2000003",
@@ -658,6 +747,7 @@
},
{
"BriefDescription": "Resource stalls due to Rob being full, FCSW, MXCSR and OTHER.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.OOO_RSRC",
"SampleAfterValue": "2000003",
@@ -665,6 +755,7 @@
},
{
"BriefDescription": "Cycles stalled due to re-order buffer full.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.ROB",
"SampleAfterValue": "2000003",
@@ -672,6 +763,7 @@
},
{
"BriefDescription": "Cycles stalled due to no eligible RS entry available.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.RS",
"SampleAfterValue": "2000003",
@@ -679,6 +771,7 @@
},
{
"BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.SB",
"SampleAfterValue": "2000003",
@@ -686,6 +779,7 @@
},
{
"BriefDescription": "Cycles with either free list is empty.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5B",
"EventName": "RESOURCE_STALLS2.ALL_FL_EMPTY",
"SampleAfterValue": "2000003",
@@ -693,6 +787,7 @@
},
{
"BriefDescription": "Resource stalls2 control structures full for physical registers.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5B",
"EventName": "RESOURCE_STALLS2.ALL_PRF_CONTROL",
"SampleAfterValue": "2000003",
@@ -700,6 +795,7 @@
},
{
"BriefDescription": "Cycles when Allocator is stalled if BOB is full and new branch needs it.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5B",
"EventName": "RESOURCE_STALLS2.BOB_FULL",
"SampleAfterValue": "2000003",
@@ -707,6 +803,7 @@
},
{
"BriefDescription": "Resource stalls out of order resources full.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5B",
"EventName": "RESOURCE_STALLS2.OOO_RSRC",
"SampleAfterValue": "2000003",
@@ -714,6 +811,7 @@
},
{
"BriefDescription": "Count cases of saving new LBR.",
+ "Counter": "0,1,2,3",
"EventCode": "0xCC",
"EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
"SampleAfterValue": "2000003",
@@ -721,6 +819,7 @@
},
{
"BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5E",
"EventName": "RS_EVENTS.EMPTY_CYCLES",
"SampleAfterValue": "2000003",
@@ -728,6 +827,7 @@
},
{
"BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x5E",
@@ -738,6 +838,7 @@
},
{
"BriefDescription": "Uops dispatched from any thread.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_DISPATCHED.CORE",
"SampleAfterValue": "2000003",
@@ -745,6 +846,7 @@
},
{
"BriefDescription": "Uops dispatched per thread.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_DISPATCHED.THREAD",
"SampleAfterValue": "2000003",
@@ -752,6 +854,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are dispatched to port 0.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_0",
"SampleAfterValue": "2000003",
@@ -760,6 +863,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are dispatched to port 0.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_0_CORE",
"SampleAfterValue": "2000003",
@@ -767,6 +871,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are dispatched to port 1.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_1",
"SampleAfterValue": "2000003",
@@ -775,6 +880,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are dispatched to port 1.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_1_CORE",
"SampleAfterValue": "2000003",
@@ -782,6 +888,7 @@
},
{
"BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 2.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_2",
"SampleAfterValue": "2000003",
@@ -790,6 +897,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when load or STA uops are dispatched to port 2.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_2_CORE",
"SampleAfterValue": "2000003",
@@ -797,6 +905,7 @@
},
{
"BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 3.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_3",
"SampleAfterValue": "2000003",
@@ -805,6 +914,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when load or STA uops are dispatched to port 3.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_3_CORE",
"SampleAfterValue": "2000003",
@@ -812,6 +922,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are dispatched to port 4.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_4",
"SampleAfterValue": "2000003",
@@ -820,6 +931,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are dispatched to port 4.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_4_CORE",
"SampleAfterValue": "2000003",
@@ -827,6 +939,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are dispatched to port 5.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_5",
"SampleAfterValue": "2000003",
@@ -835,6 +948,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are dispatched to port 5.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_5_CORE",
"SampleAfterValue": "2000003",
@@ -842,6 +956,7 @@
},
{
"BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
@@ -850,6 +965,7 @@
},
{
"BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
@@ -858,6 +974,7 @@
},
{
"BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
@@ -866,6 +983,7 @@
},
{
"BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
@@ -874,6 +992,7 @@
},
{
"BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
"Invert": "1",
@@ -882,6 +1001,7 @@
},
{
"BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS).",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.ANY",
"PublicDescription": "This event counts the number of Uops issued by the front-end of the pipeilne to the back-end.",
@@ -891,6 +1011,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
@@ -900,6 +1021,7 @@
},
{
"BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.STALL_CYCLES",
@@ -909,6 +1031,7 @@
},
{
"BriefDescription": "Actually retired uops.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.ALL",
"PEBS": "1",
@@ -918,6 +1041,7 @@
},
{
"BriefDescription": "Cycles without actually retired uops.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.CORE_STALL_CYCLES",
@@ -927,6 +1051,7 @@
},
{
"BriefDescription": "Retirement slots used.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.RETIRE_SLOTS",
"PEBS": "1",
@@ -936,6 +1061,7 @@
},
{
"BriefDescription": "Cycles without actually retired uops.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.STALL_CYCLES",
@@ -945,6 +1071,7 @@
},
{
"BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "Counter": "0,1,2,3",
"CounterMask": "10",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.TOTAL_CYCLES",
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/uncore-cache.json b/tools/perf/pmu-events/arch/x86/jaketown/uncore-cache.json
index 63395e7ee0ce..8508becead5a 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/uncore-cache.json
@@ -1,12 +1,14 @@
[
{
"BriefDescription": "Uncore Clocks",
+ "Counter": "0,1,2,3",
"EventName": "UNC_C_CLOCKTICKS",
"PerPkg": "1",
"Unit": "CBOX"
},
{
"BriefDescription": "Counter 0 Occupancy",
+ "Counter": "1,2,3",
"EventCode": "0x1f",
"EventName": "UNC_C_COUNTER0_OCCUPANCY",
"PerPkg": "1",
@@ -14,6 +16,7 @@
"Unit": "CBOX"
},
{
+ "Counter": "0,1",
"EventCode": "0x21",
"EventName": "UNC_C_ISMQ_DRD_MISS_OCC",
"PerPkg": "1",
@@ -21,6 +24,7 @@
},
{
"BriefDescription": "Cache Lookups; Data Read Request",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.DATA_READ",
"PerPkg": "1",
@@ -30,6 +34,7 @@
},
{
"BriefDescription": "Cache Lookups; RTID",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.NID",
"PerPkg": "1",
@@ -39,6 +44,7 @@
},
{
"BriefDescription": "Cache Lookups; External Snoop Request",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.REMOTE_SNOOP",
"PerPkg": "1",
@@ -48,6 +54,7 @@
},
{
"BriefDescription": "Cache Lookups; Write Requests",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.WRITE",
"PerPkg": "1",
@@ -57,6 +64,7 @@
},
{
"BriefDescription": "Lines Victimized; Lines in E state",
+ "Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.E_STATE",
"PerPkg": "1",
@@ -66,6 +74,7 @@
},
{
"BriefDescription": "Lines Victimized",
+ "Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.MISS",
"PerPkg": "1",
@@ -75,6 +84,7 @@
},
{
"BriefDescription": "Lines Victimized; Lines in M state",
+ "Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.M_STATE",
"PerPkg": "1",
@@ -84,6 +94,7 @@
},
{
"BriefDescription": "Lines Victimized; Victimized Lines that Match NID",
+ "Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.NID",
"PerPkg": "1",
@@ -93,6 +104,7 @@
},
{
"BriefDescription": "Lines Victimized; Lines in S State",
+ "Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.S_STATE",
"PerPkg": "1",
@@ -102,6 +114,7 @@
},
{
"BriefDescription": "Cbo Misc; RFO HitS",
+ "Counter": "0,1",
"EventCode": "0x39",
"EventName": "UNC_C_MISC.RFO_HIT_S",
"PerPkg": "1",
@@ -111,6 +124,7 @@
},
{
"BriefDescription": "Cbo Misc; Silent Snoop Eviction",
+ "Counter": "0,1",
"EventCode": "0x39",
"EventName": "UNC_C_MISC.RSPI_WAS_FSE",
"PerPkg": "1",
@@ -120,6 +134,7 @@
},
{
"BriefDescription": "Cbo Misc",
+ "Counter": "0,1",
"EventCode": "0x39",
"EventName": "UNC_C_MISC.STARTED",
"PerPkg": "1",
@@ -129,6 +144,7 @@
},
{
"BriefDescription": "Cbo Misc; Write Combining Aliasing",
+ "Counter": "0,1",
"EventCode": "0x39",
"EventName": "UNC_C_MISC.WC_ALIASING",
"PerPkg": "1",
@@ -138,6 +154,7 @@
},
{
"BriefDescription": "AD Ring In Use; Down and Even",
+ "Counter": "2,3",
"EventCode": "0x1b",
"EventName": "UNC_C_RING_AD_USED.DOWN_EVEN",
"PerPkg": "1",
@@ -147,6 +164,7 @@
},
{
"BriefDescription": "AD Ring In Use; Down and Odd",
+ "Counter": "2,3",
"EventCode": "0x1b",
"EventName": "UNC_C_RING_AD_USED.DOWN_ODD",
"PerPkg": "1",
@@ -156,6 +174,7 @@
},
{
"BriefDescription": "AD Ring In Use; Up and Even",
+ "Counter": "2,3",
"EventCode": "0x1b",
"EventName": "UNC_C_RING_AD_USED.UP_EVEN",
"PerPkg": "1",
@@ -165,6 +184,7 @@
},
{
"BriefDescription": "AD Ring In Use; Up and Odd",
+ "Counter": "2,3",
"EventCode": "0x1b",
"EventName": "UNC_C_RING_AD_USED.UP_ODD",
"PerPkg": "1",
@@ -174,6 +194,7 @@
},
{
"BriefDescription": "AK Ring In Use; Down and Even",
+ "Counter": "2,3",
"EventCode": "0x1c",
"EventName": "UNC_C_RING_AK_USED.DOWN_EVEN",
"PerPkg": "1",
@@ -183,6 +204,7 @@
},
{
"BriefDescription": "AK Ring In Use; Down and Odd",
+ "Counter": "2,3",
"EventCode": "0x1c",
"EventName": "UNC_C_RING_AK_USED.DOWN_ODD",
"PerPkg": "1",
@@ -192,6 +214,7 @@
},
{
"BriefDescription": "AK Ring In Use; Up and Even",
+ "Counter": "2,3",
"EventCode": "0x1c",
"EventName": "UNC_C_RING_AK_USED.UP_EVEN",
"PerPkg": "1",
@@ -201,6 +224,7 @@
},
{
"BriefDescription": "AK Ring In Use; Up and Odd",
+ "Counter": "2,3",
"EventCode": "0x1c",
"EventName": "UNC_C_RING_AK_USED.UP_ODD",
"PerPkg": "1",
@@ -210,6 +234,7 @@
},
{
"BriefDescription": "BL Ring in Use; Down and Even",
+ "Counter": "2,3",
"EventCode": "0x1d",
"EventName": "UNC_C_RING_BL_USED.DOWN_EVEN",
"PerPkg": "1",
@@ -219,6 +244,7 @@
},
{
"BriefDescription": "BL Ring in Use; Down and Odd",
+ "Counter": "2,3",
"EventCode": "0x1d",
"EventName": "UNC_C_RING_BL_USED.DOWN_ODD",
"PerPkg": "1",
@@ -228,6 +254,7 @@
},
{
"BriefDescription": "BL Ring in Use; Up and Even",
+ "Counter": "2,3",
"EventCode": "0x1d",
"EventName": "UNC_C_RING_BL_USED.UP_EVEN",
"PerPkg": "1",
@@ -237,6 +264,7 @@
},
{
"BriefDescription": "BL Ring in Use; Up and Odd",
+ "Counter": "2,3",
"EventCode": "0x1d",
"EventName": "UNC_C_RING_BL_USED.UP_ODD",
"PerPkg": "1",
@@ -246,6 +274,7 @@
},
{
"BriefDescription": "Number of LLC responses that bounced on the Ring.; Acknowledgements to core",
+ "Counter": "0,1",
"EventCode": "0x5",
"EventName": "UNC_C_RING_BOUNCES.AK_CORE",
"PerPkg": "1",
@@ -254,6 +283,7 @@
},
{
"BriefDescription": "Number of LLC responses that bounced on the Ring.; Data Responses to core",
+ "Counter": "0,1",
"EventCode": "0x5",
"EventName": "UNC_C_RING_BOUNCES.BL_CORE",
"PerPkg": "1",
@@ -262,6 +292,7 @@
},
{
"BriefDescription": "Number of LLC responses that bounced on the Ring.; Snoops of processor's cache.",
+ "Counter": "0,1",
"EventCode": "0x5",
"EventName": "UNC_C_RING_BOUNCES.IV_CORE",
"PerPkg": "1",
@@ -270,6 +301,7 @@
},
{
"BriefDescription": "BL Ring in Use; Any",
+ "Counter": "2,3",
"EventCode": "0x1e",
"EventName": "UNC_C_RING_IV_USED.ANY",
"PerPkg": "1",
@@ -278,6 +310,7 @@
"Unit": "CBOX"
},
{
+ "Counter": "0,1",
"EventCode": "0x6",
"EventName": "UNC_C_RING_SINK_STARVED.AD_CACHE",
"PerPkg": "1",
@@ -285,6 +318,7 @@
"Unit": "CBOX"
},
{
+ "Counter": "0,1",
"EventCode": "0x6",
"EventName": "UNC_C_RING_SINK_STARVED.AK_CORE",
"PerPkg": "1",
@@ -292,6 +326,7 @@
"Unit": "CBOX"
},
{
+ "Counter": "0,1",
"EventCode": "0x6",
"EventName": "UNC_C_RING_SINK_STARVED.BL_CORE",
"PerPkg": "1",
@@ -299,6 +334,7 @@
"Unit": "CBOX"
},
{
+ "Counter": "0,1",
"EventCode": "0x6",
"EventName": "UNC_C_RING_SINK_STARVED.IV_CORE",
"PerPkg": "1",
@@ -306,6 +342,7 @@
"Unit": "CBOX"
},
{
+ "Counter": "0,1",
"EventCode": "0x7",
"EventName": "UNC_C_RING_SRC_THRTL",
"PerPkg": "1",
@@ -313,6 +350,7 @@
},
{
"BriefDescription": "Ingress Arbiter Blocking Cycles; IRQ",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_C_RxR_EXT_STARVED.IPQ",
"PerPkg": "1",
@@ -322,6 +360,7 @@
},
{
"BriefDescription": "Ingress Arbiter Blocking Cycles; IPQ",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_C_RxR_EXT_STARVED.IRQ",
"PerPkg": "1",
@@ -331,6 +370,7 @@
},
{
"BriefDescription": "Ingress Arbiter Blocking Cycles; ISMQ",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_C_RxR_EXT_STARVED.ISMQ",
"PerPkg": "1",
@@ -340,6 +380,7 @@
},
{
"BriefDescription": "Ingress Arbiter Blocking Cycles; ISMQ_BID",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_C_RxR_EXT_STARVED.ISMQ_BIDS",
"PerPkg": "1",
@@ -349,6 +390,7 @@
},
{
"BriefDescription": "Ingress Allocations; IPQ",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_C_RxR_INSERTS.IPQ",
"PerPkg": "1",
@@ -358,6 +400,7 @@
},
{
"BriefDescription": "Ingress Allocations; IRQ",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_C_RxR_INSERTS.IRQ",
"PerPkg": "1",
@@ -367,6 +410,7 @@
},
{
"BriefDescription": "Ingress Allocations; IRQ Rejected",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_C_RxR_INSERTS.IRQ_REJECTED",
"PerPkg": "1",
@@ -376,6 +420,7 @@
},
{
"BriefDescription": "Ingress Allocations; VFIFO",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_C_RxR_INSERTS.VFIFO",
"PerPkg": "1",
@@ -385,6 +430,7 @@
},
{
"BriefDescription": "Ingress Internal Starvation Cycles; IPQ",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_C_RxR_INT_STARVED.IPQ",
"PerPkg": "1",
@@ -394,6 +440,7 @@
},
{
"BriefDescription": "Ingress Internal Starvation Cycles; IRQ",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_C_RxR_INT_STARVED.IRQ",
"PerPkg": "1",
@@ -403,6 +450,7 @@
},
{
"BriefDescription": "Ingress Internal Starvation Cycles; ISMQ",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_C_RxR_INT_STARVED.ISMQ",
"PerPkg": "1",
@@ -412,6 +460,7 @@
},
{
"BriefDescription": "Probe Queue Retries; Address Conflict",
+ "Counter": "0,1",
"EventCode": "0x31",
"EventName": "UNC_C_RxR_IPQ_RETRY.ADDR_CONFLICT",
"PerPkg": "1",
@@ -421,6 +470,7 @@
},
{
"BriefDescription": "Probe Queue Retries; Any Reject",
+ "Counter": "0,1",
"EventCode": "0x31",
"EventName": "UNC_C_RxR_IPQ_RETRY.ANY",
"PerPkg": "1",
@@ -430,6 +480,7 @@
},
{
"BriefDescription": "Probe Queue Retries; No Egress Credits",
+ "Counter": "0,1",
"EventCode": "0x31",
"EventName": "UNC_C_RxR_IPQ_RETRY.FULL",
"PerPkg": "1",
@@ -439,6 +490,7 @@
},
{
"BriefDescription": "Probe Queue Retries; No QPI Credits",
+ "Counter": "0,1",
"EventCode": "0x31",
"EventName": "UNC_C_RxR_IPQ_RETRY.QPI_CREDITS",
"PerPkg": "1",
@@ -448,6 +500,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects; Address Conflict",
+ "Counter": "0,1",
"EventCode": "0x32",
"EventName": "UNC_C_RxR_IRQ_RETRY.ADDR_CONFLICT",
"PerPkg": "1",
@@ -456,6 +509,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects; Any Reject",
+ "Counter": "0,1",
"EventCode": "0x32",
"EventName": "UNC_C_RxR_IRQ_RETRY.ANY",
"PerPkg": "1",
@@ -464,6 +518,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects; No Egress Credits",
+ "Counter": "0,1",
"EventCode": "0x32",
"EventName": "UNC_C_RxR_IRQ_RETRY.FULL",
"PerPkg": "1",
@@ -472,6 +527,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects; No QPI Credits",
+ "Counter": "0,1",
"EventCode": "0x32",
"EventName": "UNC_C_RxR_IRQ_RETRY.QPI_CREDITS",
"PerPkg": "1",
@@ -480,6 +536,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects; No RTIDs",
+ "Counter": "0,1",
"EventCode": "0x32",
"EventName": "UNC_C_RxR_IRQ_RETRY.RTID",
"PerPkg": "1",
@@ -488,6 +545,7 @@
},
{
"BriefDescription": "ISMQ Retries; Any Reject",
+ "Counter": "0,1",
"EventCode": "0x33",
"EventName": "UNC_C_RxR_ISMQ_RETRY.ANY",
"PerPkg": "1",
@@ -497,6 +555,7 @@
},
{
"BriefDescription": "ISMQ Retries; No Egress Credits",
+ "Counter": "0,1",
"EventCode": "0x33",
"EventName": "UNC_C_RxR_ISMQ_RETRY.FULL",
"PerPkg": "1",
@@ -506,6 +565,7 @@
},
{
"BriefDescription": "ISMQ Retries; No IIO Credits",
+ "Counter": "0,1",
"EventCode": "0x33",
"EventName": "UNC_C_RxR_ISMQ_RETRY.IIO_CREDITS",
"PerPkg": "1",
@@ -515,6 +575,7 @@
},
{
"BriefDescription": "ISMQ Retries; No QPI Credits",
+ "Counter": "0,1",
"EventCode": "0x33",
"EventName": "UNC_C_RxR_ISMQ_RETRY.QPI_CREDITS",
"PerPkg": "1",
@@ -524,6 +585,7 @@
},
{
"BriefDescription": "ISMQ Retries; No RTIDs",
+ "Counter": "0,1",
"EventCode": "0x33",
"EventName": "UNC_C_RxR_ISMQ_RETRY.RTID",
"PerPkg": "1",
@@ -533,6 +595,7 @@
},
{
"BriefDescription": "Ingress Occupancy; IPQ",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_C_RxR_OCCUPANCY.IPQ",
"PerPkg": "1",
@@ -542,6 +605,7 @@
},
{
"BriefDescription": "Ingress Occupancy; IRQ",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_C_RxR_OCCUPANCY.IRQ",
"PerPkg": "1",
@@ -551,6 +615,7 @@
},
{
"BriefDescription": "Ingress Occupancy; IRQ Rejected",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_C_RxR_OCCUPANCY.IRQ_REJECTED",
"PerPkg": "1",
@@ -560,6 +625,7 @@
},
{
"BriefDescription": "Ingress Occupancy; VFIFO",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_C_RxR_OCCUPANCY.VFIFO",
"PerPkg": "1",
@@ -569,6 +635,7 @@
},
{
"BriefDescription": "TOR Inserts; Evictions",
+ "Counter": "0,1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.EVICTION",
"PerPkg": "1",
@@ -578,6 +645,7 @@
},
{
"BriefDescription": "TOR Inserts; Miss All",
+ "Counter": "0,1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.MISS_ALL",
"PerPkg": "1",
@@ -587,6 +655,7 @@
},
{
"BriefDescription": "TOR Inserts; Miss Opcode Match",
+ "Counter": "0,1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.MISS_OPCODE",
"PerPkg": "1",
@@ -596,6 +665,7 @@
},
{
"BriefDescription": "TOR Inserts; NID Matched",
+ "Counter": "0,1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.NID_ALL",
"PerPkg": "1",
@@ -605,6 +675,7 @@
},
{
"BriefDescription": "TOR Inserts; NID Matched Evictions",
+ "Counter": "0,1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.NID_EVICTION",
"PerPkg": "1",
@@ -614,6 +685,7 @@
},
{
"BriefDescription": "TOR Inserts; NID Matched Miss All",
+ "Counter": "0,1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.NID_MISS_ALL",
"PerPkg": "1",
@@ -623,6 +695,7 @@
},
{
"BriefDescription": "TOR Inserts; NID and Opcode Matched Miss",
+ "Counter": "0,1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.NID_MISS_OPCODE",
"PerPkg": "1",
@@ -632,6 +705,7 @@
},
{
"BriefDescription": "TOR Inserts; NID and Opcode Matched",
+ "Counter": "0,1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.NID_OPCODE",
"PerPkg": "1",
@@ -641,6 +715,7 @@
},
{
"BriefDescription": "TOR Inserts; NID Matched Writebacks",
+ "Counter": "0,1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.NID_WB",
"PerPkg": "1",
@@ -650,6 +725,7 @@
},
{
"BriefDescription": "TOR Inserts; Opcode Match",
+ "Counter": "0,1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.OPCODE",
"PerPkg": "1",
@@ -659,6 +735,7 @@
},
{
"BriefDescription": "TOR Inserts; Writebacks",
+ "Counter": "0,1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.WB",
"PerPkg": "1",
@@ -668,6 +745,7 @@
},
{
"BriefDescription": "TOR Occupancy; Any",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.ALL",
"PerPkg": "1",
@@ -677,6 +755,7 @@
},
{
"BriefDescription": "TOR Occupancy; Evictions",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.EVICTION",
"PerPkg": "1",
@@ -686,6 +765,7 @@
},
{
"BriefDescription": "TOR Occupancy; Miss All",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.MISS_ALL",
"PerPkg": "1",
@@ -695,6 +775,7 @@
},
{
"BriefDescription": "TOR Occupancy; Miss Opcode Match",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.MISS_OPCODE",
"PerPkg": "1",
@@ -704,6 +785,7 @@
},
{
"BriefDescription": "TOR Occupancy; NID Matched",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.NID_ALL",
"PerPkg": "1",
@@ -713,6 +795,7 @@
},
{
"BriefDescription": "TOR Occupancy; NID Matched Evictions",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.NID_EVICTION",
"PerPkg": "1",
@@ -722,6 +805,7 @@
},
{
"BriefDescription": "TOR Occupancy; NID Matched",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.NID_MISS_ALL",
"PerPkg": "1",
@@ -731,6 +815,7 @@
},
{
"BriefDescription": "TOR Occupancy; NID and Opcode Matched Miss",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.NID_MISS_OPCODE",
"PerPkg": "1",
@@ -740,6 +825,7 @@
},
{
"BriefDescription": "TOR Occupancy; NID and Opcode Matched",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.NID_OPCODE",
"PerPkg": "1",
@@ -749,6 +835,7 @@
},
{
"BriefDescription": "TOR Occupancy; Opcode Match",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.OPCODE",
"PerPkg": "1",
@@ -757,6 +844,7 @@
"Unit": "CBOX"
},
{
+ "Counter": "0,1",
"EventCode": "0x4",
"EventName": "UNC_C_TxR_ADS_USED",
"PerPkg": "1",
@@ -764,6 +852,7 @@
},
{
"BriefDescription": "Egress Allocations; AD - Cachebo",
+ "Counter": "0,1",
"EventCode": "0x2",
"EventName": "UNC_C_TxR_INSERTS.AD_CACHE",
"PerPkg": "1",
@@ -773,6 +862,7 @@
},
{
"BriefDescription": "Egress Allocations; AD - Corebo",
+ "Counter": "0,1",
"EventCode": "0x2",
"EventName": "UNC_C_TxR_INSERTS.AD_CORE",
"PerPkg": "1",
@@ -782,6 +872,7 @@
},
{
"BriefDescription": "Egress Allocations; AK - Cachebo",
+ "Counter": "0,1",
"EventCode": "0x2",
"EventName": "UNC_C_TxR_INSERTS.AK_CACHE",
"PerPkg": "1",
@@ -791,6 +882,7 @@
},
{
"BriefDescription": "Egress Allocations; AK - Corebo",
+ "Counter": "0,1",
"EventCode": "0x2",
"EventName": "UNC_C_TxR_INSERTS.AK_CORE",
"PerPkg": "1",
@@ -800,6 +892,7 @@
},
{
"BriefDescription": "Egress Allocations; BL - Cacheno",
+ "Counter": "0,1",
"EventCode": "0x2",
"EventName": "UNC_C_TxR_INSERTS.BL_CACHE",
"PerPkg": "1",
@@ -809,6 +902,7 @@
},
{
"BriefDescription": "Egress Allocations; BL - Corebo",
+ "Counter": "0,1",
"EventCode": "0x2",
"EventName": "UNC_C_TxR_INSERTS.BL_CORE",
"PerPkg": "1",
@@ -818,6 +912,7 @@
},
{
"BriefDescription": "Egress Allocations; IV - Cachebo",
+ "Counter": "0,1",
"EventCode": "0x2",
"EventName": "UNC_C_TxR_INSERTS.IV_CACHE",
"PerPkg": "1",
@@ -827,6 +922,7 @@
},
{
"BriefDescription": "Injection Starvation; Onto AK Ring",
+ "Counter": "0,1",
"EventCode": "0x3",
"EventName": "UNC_C_TxR_STARVED.AK",
"PerPkg": "1",
@@ -836,6 +932,7 @@
},
{
"BriefDescription": "Injection Starvation; Onto BL Ring",
+ "Counter": "0,1",
"EventCode": "0x3",
"EventName": "UNC_C_TxR_STARVED.BL",
"PerPkg": "1",
@@ -845,6 +942,7 @@
},
{
"BriefDescription": "HA to iMC Bypass; Not Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_H_BYPASS_IMC.NOT_TAKEN",
"PerPkg": "1",
@@ -854,6 +952,7 @@
},
{
"BriefDescription": "HA to iMC Bypass; Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_H_BYPASS_IMC.TAKEN",
"PerPkg": "1",
@@ -863,6 +962,7 @@
},
{
"BriefDescription": "uclks",
+ "Counter": "0,1,2,3",
"EventName": "UNC_H_CLOCKTICKS",
"PerPkg": "1",
"PublicDescription": "Counts the number of uclks in the HA. This will be slightly different than the count in the Ubox because of enable/freeze delays. The HA is on the other side of the die from the fixed Ubox uclk counter, so the drift could be somewhat larger than in units that are closer like the QPI Agent.",
@@ -870,6 +970,7 @@
},
{
"BriefDescription": "Conflict Checks; Conflict Detected",
+ "Counter": "0,1,2,3",
"EventCode": "0xb",
"EventName": "UNC_H_CONFLICT_CYCLES.CONFLICT",
"PerPkg": "1",
@@ -878,6 +979,7 @@
},
{
"BriefDescription": "Conflict Checks; No Conflict",
+ "Counter": "0,1,2,3",
"EventCode": "0xb",
"EventName": "UNC_H_CONFLICT_CYCLES.NO_CONFLICT",
"PerPkg": "1",
@@ -886,6 +988,7 @@
},
{
"BriefDescription": "Direct2Core Messages Sent",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_H_DIRECT2CORE_COUNT",
"PerPkg": "1",
@@ -894,6 +997,7 @@
},
{
"BriefDescription": "Cycles when Direct2Core was Disabled",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_H_DIRECT2CORE_CYCLES_DISABLED",
"PerPkg": "1",
@@ -902,6 +1006,7 @@
},
{
"BriefDescription": "Number of Reads that had Direct2Core Overridden",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_H_DIRECT2CORE_TXN_OVERRIDE",
"PerPkg": "1",
@@ -910,6 +1015,7 @@
},
{
"BriefDescription": "Directory Lookups; Snoop Not Needed",
+ "Counter": "0,1,2,3",
"EventCode": "0xc",
"EventName": "UNC_H_DIRECTORY_LOOKUP.NO_SNP",
"PerPkg": "1",
@@ -919,6 +1025,7 @@
},
{
"BriefDescription": "Directory Lookups; Snoop Needed",
+ "Counter": "0,1,2,3",
"EventCode": "0xc",
"EventName": "UNC_H_DIRECTORY_LOOKUP.SNP",
"PerPkg": "1",
@@ -928,6 +1035,7 @@
},
{
"BriefDescription": "Directory Updates; Any Directory Update",
+ "Counter": "0,1,2,3",
"EventCode": "0xd",
"EventName": "UNC_H_DIRECTORY_UPDATE.ANY",
"PerPkg": "1",
@@ -937,6 +1045,7 @@
},
{
"BriefDescription": "Directory Updates; Directory Clear",
+ "Counter": "0,1,2,3",
"EventCode": "0xd",
"EventName": "UNC_H_DIRECTORY_UPDATE.CLEAR",
"PerPkg": "1",
@@ -946,6 +1055,7 @@
},
{
"BriefDescription": "Directory Updates; Directory Set",
+ "Counter": "0,1,2,3",
"EventCode": "0xd",
"EventName": "UNC_H_DIRECTORY_UPDATE.SET",
"PerPkg": "1",
@@ -955,6 +1065,7 @@
},
{
"BriefDescription": "Cycles without QPI Ingress Credits; AD to QPI Link 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI0",
"PerPkg": "1",
@@ -964,6 +1075,7 @@
},
{
"BriefDescription": "Cycles without QPI Ingress Credits; AD to QPI Link 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI1",
"PerPkg": "1",
@@ -973,6 +1085,7 @@
},
{
"BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI0",
"PerPkg": "1",
@@ -982,6 +1095,7 @@
},
{
"BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI1",
"PerPkg": "1",
@@ -991,6 +1105,7 @@
},
{
"BriefDescription": "Retry Events",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_H_IMC_RETRY",
"PerPkg": "1",
@@ -998,6 +1113,7 @@
},
{
"BriefDescription": "HA to iMC Full Line Writes Issued; All Writes",
+ "Counter": "0,1,2,3",
"EventCode": "0x1a",
"EventName": "UNC_H_IMC_WRITES.ALL",
"PerPkg": "1",
@@ -1007,6 +1123,7 @@
},
{
"BriefDescription": "HA to iMC Full Line Writes Issued; Full Line Non-ISOCH",
+ "Counter": "0,1,2,3",
"EventCode": "0x1a",
"EventName": "UNC_H_IMC_WRITES.FULL",
"PerPkg": "1",
@@ -1016,6 +1133,7 @@
},
{
"BriefDescription": "HA to iMC Full Line Writes Issued; ISOCH Full Line",
+ "Counter": "0,1,2,3",
"EventCode": "0x1a",
"EventName": "UNC_H_IMC_WRITES.FULL_ISOCH",
"PerPkg": "1",
@@ -1025,6 +1143,7 @@
},
{
"BriefDescription": "HA to iMC Full Line Writes Issued; Partial Non-ISOCH",
+ "Counter": "0,1,2,3",
"EventCode": "0x1a",
"EventName": "UNC_H_IMC_WRITES.PARTIAL",
"PerPkg": "1",
@@ -1034,6 +1153,7 @@
},
{
"BriefDescription": "HA to iMC Full Line Writes Issued; ISOCH Partial",
+ "Counter": "0,1,2,3",
"EventCode": "0x1a",
"EventName": "UNC_H_IMC_WRITES.PARTIAL_ISOCH",
"PerPkg": "1",
@@ -1043,6 +1163,7 @@
},
{
"BriefDescription": "Read and Write Requests; Reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.READS",
"PerPkg": "1",
@@ -1052,6 +1173,7 @@
},
{
"BriefDescription": "Read and Write Requests; Writes",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.WRITES",
"PerPkg": "1",
@@ -1061,6 +1183,7 @@
},
{
"BriefDescription": "HA AD Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x3e",
"EventName": "UNC_H_RING_AD_USED.CCW_EVEN",
"PerPkg": "1",
@@ -1070,6 +1193,7 @@
},
{
"BriefDescription": "HA AD Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x3e",
"EventName": "UNC_H_RING_AD_USED.CCW_ODD",
"PerPkg": "1",
@@ -1079,6 +1203,7 @@
},
{
"BriefDescription": "HA AD Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x3e",
"EventName": "UNC_H_RING_AD_USED.CW_EVEN",
"PerPkg": "1",
@@ -1088,6 +1213,7 @@
},
{
"BriefDescription": "HA AD Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x3e",
"EventName": "UNC_H_RING_AD_USED.CW_ODD",
"PerPkg": "1",
@@ -1097,6 +1223,7 @@
},
{
"BriefDescription": "HA AK Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x3f",
"EventName": "UNC_H_RING_AK_USED.CCW_EVEN",
"PerPkg": "1",
@@ -1106,6 +1233,7 @@
},
{
"BriefDescription": "HA AK Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x3f",
"EventName": "UNC_H_RING_AK_USED.CCW_ODD",
"PerPkg": "1",
@@ -1115,6 +1243,7 @@
},
{
"BriefDescription": "HA AK Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x3f",
"EventName": "UNC_H_RING_AK_USED.CW_EVEN",
"PerPkg": "1",
@@ -1124,6 +1253,7 @@
},
{
"BriefDescription": "HA AK Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x3f",
"EventName": "UNC_H_RING_AK_USED.CW_ODD",
"PerPkg": "1",
@@ -1133,6 +1263,7 @@
},
{
"BriefDescription": "HA BL Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_H_RING_BL_USED.CCW_EVEN",
"PerPkg": "1",
@@ -1142,6 +1273,7 @@
},
{
"BriefDescription": "HA BL Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_H_RING_BL_USED.CCW_ODD",
"PerPkg": "1",
@@ -1151,6 +1283,7 @@
},
{
"BriefDescription": "HA BL Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_H_RING_BL_USED.CW_EVEN",
"PerPkg": "1",
@@ -1160,6 +1293,7 @@
},
{
"BriefDescription": "HA BL Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_H_RING_BL_USED.CW_ODD",
"PerPkg": "1",
@@ -1169,6 +1303,7 @@
},
{
"BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN0",
"PerPkg": "1",
@@ -1178,6 +1313,7 @@
},
{
"BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN1",
"PerPkg": "1",
@@ -1187,6 +1323,7 @@
},
{
"BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN2",
"PerPkg": "1",
@@ -1196,6 +1333,7 @@
},
{
"BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN3",
"PerPkg": "1",
@@ -1205,6 +1343,7 @@
},
{
"BriefDescription": "iMC RPQ Credits Empty - Special; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN0",
"PerPkg": "1",
@@ -1214,6 +1353,7 @@
},
{
"BriefDescription": "iMC RPQ Credits Empty - Special; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN1",
"PerPkg": "1",
@@ -1223,6 +1363,7 @@
},
{
"BriefDescription": "iMC RPQ Credits Empty - Special; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN2",
"PerPkg": "1",
@@ -1232,6 +1373,7 @@
},
{
"BriefDescription": "iMC RPQ Credits Empty - Special; Channel 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN3",
"PerPkg": "1",
@@ -1241,6 +1383,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_H_TAD_REQUESTS_G0.REGION0",
"PerPkg": "1",
@@ -1250,6 +1393,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_H_TAD_REQUESTS_G0.REGION1",
"PerPkg": "1",
@@ -1259,6 +1403,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_H_TAD_REQUESTS_G0.REGION2",
"PerPkg": "1",
@@ -1268,6 +1413,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_H_TAD_REQUESTS_G0.REGION3",
"PerPkg": "1",
@@ -1277,6 +1423,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_H_TAD_REQUESTS_G0.REGION4",
"PerPkg": "1",
@@ -1286,6 +1433,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_H_TAD_REQUESTS_G0.REGION5",
"PerPkg": "1",
@@ -1295,6 +1443,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_H_TAD_REQUESTS_G0.REGION6",
"PerPkg": "1",
@@ -1304,6 +1453,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_H_TAD_REQUESTS_G0.REGION7",
"PerPkg": "1",
@@ -1313,6 +1463,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x1c",
"EventName": "UNC_H_TAD_REQUESTS_G1.REGION10",
"PerPkg": "1",
@@ -1322,6 +1473,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 11",
+ "Counter": "0,1,2,3",
"EventCode": "0x1c",
"EventName": "UNC_H_TAD_REQUESTS_G1.REGION11",
"PerPkg": "1",
@@ -1331,6 +1483,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x1c",
"EventName": "UNC_H_TAD_REQUESTS_G1.REGION8",
"PerPkg": "1",
@@ -1340,6 +1493,7 @@
},
{
"BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x1c",
"EventName": "UNC_H_TAD_REQUESTS_G1.REGION9",
"PerPkg": "1",
@@ -1349,6 +1503,7 @@
},
{
"BriefDescription": "Tracker Allocations; All Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_H_TRACKER_INSERTS.ALL",
"PerPkg": "1",
@@ -1358,6 +1513,7 @@
},
{
"BriefDescription": "Outbound NDR Ring Transactions; Non-data Responses",
+ "Counter": "0,1,2,3",
"EventCode": "0xf",
"EventName": "UNC_H_TxR_AD.NDR",
"PerPkg": "1",
@@ -1367,6 +1523,7 @@
},
{
"BriefDescription": "Outbound NDR Ring Transactions; Snoops",
+ "Counter": "0,1,2,3",
"EventCode": "0xf",
"EventName": "UNC_H_TxR_AD.SNP",
"PerPkg": "1",
@@ -1376,6 +1533,7 @@
},
{
"BriefDescription": "AD Egress Full; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_H_TxR_AD_CYCLES_FULL.ALL",
"PerPkg": "1",
@@ -1384,6 +1542,7 @@
},
{
"BriefDescription": "AD Egress Full; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_H_TxR_AD_CYCLES_FULL.SCHED0",
"PerPkg": "1",
@@ -1392,6 +1551,7 @@
},
{
"BriefDescription": "AD Egress Full; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_H_TxR_AD_CYCLES_FULL.SCHED1",
"PerPkg": "1",
@@ -1400,6 +1560,7 @@
},
{
"BriefDescription": "AD Egress Not Empty; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_H_TxR_AD_CYCLES_NE.ALL",
"PerPkg": "1",
@@ -1408,6 +1569,7 @@
},
{
"BriefDescription": "AD Egress Not Empty; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_H_TxR_AD_CYCLES_NE.SCHED0",
"PerPkg": "1",
@@ -1416,6 +1578,7 @@
},
{
"BriefDescription": "AD Egress Not Empty; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_H_TxR_AD_CYCLES_NE.SCHED1",
"PerPkg": "1",
@@ -1424,6 +1587,7 @@
},
{
"BriefDescription": "AD Egress Allocations; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_H_TxR_AD_INSERTS.ALL",
"PerPkg": "1",
@@ -1432,6 +1596,7 @@
},
{
"BriefDescription": "AD Egress Allocations; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_H_TxR_AD_INSERTS.SCHED0",
"PerPkg": "1",
@@ -1440,6 +1605,7 @@
},
{
"BriefDescription": "AD Egress Allocations; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_H_TxR_AD_INSERTS.SCHED1",
"PerPkg": "1",
@@ -1448,6 +1614,7 @@
},
{
"BriefDescription": "AD Egress Occupancy; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_H_TxR_AD_OCCUPANCY.ALL",
"PerPkg": "1",
@@ -1456,6 +1623,7 @@
},
{
"BriefDescription": "AD Egress Occupancy; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_H_TxR_AD_OCCUPANCY.SCHED0",
"PerPkg": "1",
@@ -1464,6 +1632,7 @@
},
{
"BriefDescription": "AD Egress Occupancy; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_H_TxR_AD_OCCUPANCY.SCHED1",
"PerPkg": "1",
@@ -1472,6 +1641,7 @@
},
{
"BriefDescription": "AK Egress Full; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_H_TxR_AK_CYCLES_FULL.ALL",
"PerPkg": "1",
@@ -1480,6 +1650,7 @@
},
{
"BriefDescription": "AK Egress Full; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_H_TxR_AK_CYCLES_FULL.SCHED0",
"PerPkg": "1",
@@ -1488,6 +1659,7 @@
},
{
"BriefDescription": "AK Egress Full; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_H_TxR_AK_CYCLES_FULL.SCHED1",
"PerPkg": "1",
@@ -1496,6 +1668,7 @@
},
{
"BriefDescription": "AK Egress Not Empty; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_H_TxR_AK_CYCLES_NE.ALL",
"PerPkg": "1",
@@ -1504,6 +1677,7 @@
},
{
"BriefDescription": "AK Egress Not Empty; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_H_TxR_AK_CYCLES_NE.SCHED0",
"PerPkg": "1",
@@ -1512,6 +1686,7 @@
},
{
"BriefDescription": "AK Egress Not Empty; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_H_TxR_AK_CYCLES_NE.SCHED1",
"PerPkg": "1",
@@ -1520,6 +1695,7 @@
},
{
"BriefDescription": "AK Egress Allocations; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x2f",
"EventName": "UNC_H_TxR_AK_INSERTS.ALL",
"PerPkg": "1",
@@ -1528,6 +1704,7 @@
},
{
"BriefDescription": "AK Egress Allocations; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2f",
"EventName": "UNC_H_TxR_AK_INSERTS.SCHED0",
"PerPkg": "1",
@@ -1536,6 +1713,7 @@
},
{
"BriefDescription": "AK Egress Allocations; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x2f",
"EventName": "UNC_H_TxR_AK_INSERTS.SCHED1",
"PerPkg": "1",
@@ -1544,6 +1722,7 @@
},
{
"BriefDescription": "Outbound NDR Ring Transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xe",
"EventName": "UNC_H_TxR_AK_NDR",
"PerPkg": "1",
@@ -1552,6 +1731,7 @@
},
{
"BriefDescription": "AK Egress Occupancy; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_H_TxR_AK_OCCUPANCY.ALL",
"PerPkg": "1",
@@ -1560,6 +1740,7 @@
},
{
"BriefDescription": "AK Egress Occupancy; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_H_TxR_AK_OCCUPANCY.SCHED0",
"PerPkg": "1",
@@ -1568,6 +1749,7 @@
},
{
"BriefDescription": "AK Egress Occupancy; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_H_TxR_AK_OCCUPANCY.SCHED1",
"PerPkg": "1",
@@ -1576,6 +1758,7 @@
},
{
"BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_H_TxR_BL.DRS_CACHE",
"PerPkg": "1",
@@ -1585,6 +1768,7 @@
},
{
"BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Core",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_H_TxR_BL.DRS_CORE",
"PerPkg": "1",
@@ -1594,6 +1778,7 @@
},
{
"BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to QPI",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_H_TxR_BL.DRS_QPI",
"PerPkg": "1",
@@ -1603,6 +1788,7 @@
},
{
"BriefDescription": "BL Egress Full; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x36",
"EventName": "UNC_H_TxR_BL_CYCLES_FULL.ALL",
"PerPkg": "1",
@@ -1611,6 +1797,7 @@
},
{
"BriefDescription": "BL Egress Full; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x36",
"EventName": "UNC_H_TxR_BL_CYCLES_FULL.SCHED0",
"PerPkg": "1",
@@ -1619,6 +1806,7 @@
},
{
"BriefDescription": "BL Egress Full; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x36",
"EventName": "UNC_H_TxR_BL_CYCLES_FULL.SCHED1",
"PerPkg": "1",
@@ -1627,6 +1815,7 @@
},
{
"BriefDescription": "BL Egress Not Empty; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_H_TxR_BL_CYCLES_NE.ALL",
"PerPkg": "1",
@@ -1635,6 +1824,7 @@
},
{
"BriefDescription": "BL Egress Not Empty; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_H_TxR_BL_CYCLES_NE.SCHED0",
"PerPkg": "1",
@@ -1643,6 +1833,7 @@
},
{
"BriefDescription": "BL Egress Not Empty; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_H_TxR_BL_CYCLES_NE.SCHED1",
"PerPkg": "1",
@@ -1651,6 +1842,7 @@
},
{
"BriefDescription": "BL Egress Allocations; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_H_TxR_BL_INSERTS.ALL",
"PerPkg": "1",
@@ -1659,6 +1851,7 @@
},
{
"BriefDescription": "BL Egress Allocations; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_H_TxR_BL_INSERTS.SCHED0",
"PerPkg": "1",
@@ -1667,6 +1860,7 @@
},
{
"BriefDescription": "BL Egress Allocations; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_H_TxR_BL_INSERTS.SCHED1",
"PerPkg": "1",
@@ -1675,6 +1869,7 @@
},
{
"BriefDescription": "BL Egress Occupancy; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_H_TxR_BL_OCCUPANCY.ALL",
"PerPkg": "1",
@@ -1683,6 +1878,7 @@
},
{
"BriefDescription": "BL Egress Occupancy; Scheduler 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_H_TxR_BL_OCCUPANCY.SCHED0",
"PerPkg": "1",
@@ -1691,6 +1887,7 @@
},
{
"BriefDescription": "BL Egress Occupancy; Scheduler 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_H_TxR_BL_OCCUPANCY.SCHED1",
"PerPkg": "1",
@@ -1699,6 +1896,7 @@
},
{
"BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN0",
"PerPkg": "1",
@@ -1708,6 +1906,7 @@
},
{
"BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN1",
"PerPkg": "1",
@@ -1717,6 +1916,7 @@
},
{
"BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN2",
"PerPkg": "1",
@@ -1726,6 +1926,7 @@
},
{
"BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN3",
"PerPkg": "1",
@@ -1735,6 +1936,7 @@
},
{
"BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN0",
"PerPkg": "1",
@@ -1744,6 +1946,7 @@
},
{
"BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN1",
"PerPkg": "1",
@@ -1753,6 +1956,7 @@
},
{
"BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN2",
"PerPkg": "1",
@@ -1762,6 +1966,7 @@
},
{
"BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN3",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/jaketown/uncore-interconnect.json
index 0fc907e5cf3c..36b1946f06f2 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/uncore-interconnect.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Address Match (Conflict) Count; Conflict Merges",
+ "Counter": "0,1",
"EventCode": "0x17",
"EventName": "UNC_I_ADDRESS_MATCH.MERGE_COUNT",
"PerPkg": "1",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "Address Match (Conflict) Count; Conflict Stalls",
+ "Counter": "0,1",
"EventCode": "0x17",
"EventName": "UNC_I_ADDRESS_MATCH.STALL_COUNT",
"PerPkg": "1",
@@ -19,6 +21,7 @@
},
{
"BriefDescription": "Write Ack Pending Occupancy; Any Source",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_I_CACHE_ACK_PENDING_OCCUPANCY.ANY",
"PerPkg": "1",
@@ -28,6 +31,7 @@
},
{
"BriefDescription": "Write Ack Pending Occupancy; Select Source",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_I_CACHE_ACK_PENDING_OCCUPANCY.SOURCE",
"PerPkg": "1",
@@ -37,6 +41,7 @@
},
{
"BriefDescription": "Outstanding Write Ownership Occupancy; Any Source",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_CACHE_OWN_OCCUPANCY.ANY",
"PerPkg": "1",
@@ -46,6 +51,7 @@
},
{
"BriefDescription": "Outstanding Write Ownership Occupancy; Select Source",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_CACHE_OWN_OCCUPANCY.SOURCE",
"PerPkg": "1",
@@ -55,6 +61,7 @@
},
{
"BriefDescription": "Outstanding Read Occupancy; Any Source",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_I_CACHE_READ_OCCUPANCY.ANY",
"PerPkg": "1",
@@ -64,6 +71,7 @@
},
{
"BriefDescription": "Outstanding Read Occupancy; Select Source",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_I_CACHE_READ_OCCUPANCY.SOURCE",
"PerPkg": "1",
@@ -73,6 +81,7 @@
},
{
"BriefDescription": "Total Write Cache Occupancy; Any Source",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.ANY",
"PerPkg": "1",
@@ -82,6 +91,7 @@
},
{
"BriefDescription": "Total Write Cache Occupancy; Select Source",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.SOURCE",
"PerPkg": "1",
@@ -91,6 +101,7 @@
},
{
"BriefDescription": "Outstanding Write Occupancy; Any Source",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_I_CACHE_WRITE_OCCUPANCY.ANY",
"PerPkg": "1",
@@ -100,6 +111,7 @@
},
{
"BriefDescription": "Outstanding Write Occupancy; Select Source",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_I_CACHE_WRITE_OCCUPANCY.SOURCE",
"PerPkg": "1",
@@ -109,12 +121,14 @@
},
{
"BriefDescription": "Clocks in the IRP",
+ "Counter": "0,1",
"EventName": "UNC_I_CLOCKTICKS",
"PerPkg": "1",
"PublicDescription": "Number of clocks in the IRP.",
"Unit": "IRP"
},
{
+ "Counter": "0,1",
"EventCode": "0xB",
"EventName": "UNC_I_RxR_AK_CYCLES_FULL",
"PerPkg": "1",
@@ -123,6 +137,7 @@
},
{
"BriefDescription": "AK Ingress Occupancy",
+ "Counter": "0,1",
"EventCode": "0xA",
"EventName": "UNC_I_RxR_AK_INSERTS",
"PerPkg": "1",
@@ -130,6 +145,7 @@
"Unit": "IRP"
},
{
+ "Counter": "0,1",
"EventCode": "0xC",
"EventName": "UNC_I_RxR_AK_OCCUPANCY",
"PerPkg": "1",
@@ -137,6 +153,7 @@
"Unit": "IRP"
},
{
+ "Counter": "0,1",
"EventCode": "0x4",
"EventName": "UNC_I_RxR_BL_DRS_CYCLES_FULL",
"PerPkg": "1",
@@ -145,6 +162,7 @@
},
{
"BriefDescription": "BL Ingress Occupancy - DRS",
+ "Counter": "0,1",
"EventCode": "0x1",
"EventName": "UNC_I_RxR_BL_DRS_INSERTS",
"PerPkg": "1",
@@ -152,6 +170,7 @@
"Unit": "IRP"
},
{
+ "Counter": "0,1",
"EventCode": "0x7",
"EventName": "UNC_I_RxR_BL_DRS_OCCUPANCY",
"PerPkg": "1",
@@ -159,6 +178,7 @@
"Unit": "IRP"
},
{
+ "Counter": "0,1",
"EventCode": "0x5",
"EventName": "UNC_I_RxR_BL_NCB_CYCLES_FULL",
"PerPkg": "1",
@@ -167,6 +187,7 @@
},
{
"BriefDescription": "BL Ingress Occupancy - NCB",
+ "Counter": "0,1",
"EventCode": "0x2",
"EventName": "UNC_I_RxR_BL_NCB_INSERTS",
"PerPkg": "1",
@@ -174,6 +195,7 @@
"Unit": "IRP"
},
{
+ "Counter": "0,1",
"EventCode": "0x8",
"EventName": "UNC_I_RxR_BL_NCB_OCCUPANCY",
"PerPkg": "1",
@@ -181,6 +203,7 @@
"Unit": "IRP"
},
{
+ "Counter": "0,1",
"EventCode": "0x6",
"EventName": "UNC_I_RxR_BL_NCS_CYCLES_FULL",
"PerPkg": "1",
@@ -189,6 +212,7 @@
},
{
"BriefDescription": "BL Ingress Occupancy - NCS",
+ "Counter": "0,1",
"EventCode": "0x3",
"EventName": "UNC_I_RxR_BL_NCS_INSERTS",
"PerPkg": "1",
@@ -196,6 +220,7 @@
"Unit": "IRP"
},
{
+ "Counter": "0,1",
"EventCode": "0x9",
"EventName": "UNC_I_RxR_BL_NCS_OCCUPANCY",
"PerPkg": "1",
@@ -204,6 +229,7 @@
},
{
"BriefDescription": "Tickle Count; Ownership Lost",
+ "Counter": "0,1",
"EventCode": "0x16",
"EventName": "UNC_I_TICKLES.LOST_OWNERSHIP",
"PerPkg": "1",
@@ -213,6 +239,7 @@
},
{
"BriefDescription": "Tickle Count; Data Returned",
+ "Counter": "0,1",
"EventCode": "0x16",
"EventName": "UNC_I_TICKLES.TOP_OF_QUEUE",
"PerPkg": "1",
@@ -222,6 +249,7 @@
},
{
"BriefDescription": "Inbound Transaction Count; Read Prefetches",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_TRANSACTIONS.PD_PREFETCHES",
"PerPkg": "1",
@@ -231,6 +259,7 @@
},
{
"BriefDescription": "Inbound Transaction Count; Reads",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_TRANSACTIONS.READS",
"PerPkg": "1",
@@ -240,6 +269,7 @@
},
{
"BriefDescription": "Inbound Transaction Count; Writes",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_TRANSACTIONS.WRITES",
"PerPkg": "1",
@@ -249,6 +279,7 @@
},
{
"BriefDescription": "No AD Egress Credit Stalls",
+ "Counter": "0,1",
"EventCode": "0x18",
"EventName": "UNC_I_TxR_AD_STALL_CREDIT_CYCLES",
"PerPkg": "1",
@@ -257,6 +288,7 @@
},
{
"BriefDescription": "No BL Egress Credit Stalls",
+ "Counter": "0,1",
"EventCode": "0x19",
"EventName": "UNC_I_TxR_BL_STALL_CREDIT_CYCLES",
"PerPkg": "1",
@@ -265,6 +297,7 @@
},
{
"BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
"EventCode": "0xE",
"EventName": "UNC_I_TxR_DATA_INSERTS_NCB",
"PerPkg": "1",
@@ -273,6 +306,7 @@
},
{
"BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
"EventCode": "0xF",
"EventName": "UNC_I_TxR_DATA_INSERTS_NCS",
"PerPkg": "1",
@@ -281,6 +315,7 @@
},
{
"BriefDescription": "Outbound Request Queue Occupancy",
+ "Counter": "0,1",
"EventCode": "0xD",
"EventName": "UNC_I_TxR_REQUEST_OCCUPANCY",
"PerPkg": "1",
@@ -289,6 +324,7 @@
},
{
"BriefDescription": "Write Ordering Stalls",
+ "Counter": "0,1",
"EventCode": "0x1A",
"EventName": "UNC_I_WRITE_ORDERING_STALL_CYCLES",
"PerPkg": "1",
@@ -297,6 +333,7 @@
},
{
"BriefDescription": "Number of qfclks",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_Q_CLOCKTICKS",
"PerPkg": "1",
@@ -305,6 +342,7 @@
},
{
"BriefDescription": "Count of CTO Events",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_Q_CTO_COUNT",
"PerPkg": "1",
@@ -313,6 +351,7 @@
},
{
"BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS",
"PerPkg": "1",
@@ -322,6 +361,7 @@
},
{
"BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress and RBT",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS_RBT",
"PerPkg": "1",
@@ -331,6 +371,7 @@
},
{
"BriefDescription": "Direct 2 Core Spawning; Spawn Failure - RBT Not Set",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_DIRECT2CORE.FAILURE_RBT",
"PerPkg": "1",
@@ -340,6 +381,7 @@
},
{
"BriefDescription": "Direct 2 Core Spawning; Spawn Success",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_DIRECT2CORE.SUCCESS",
"PerPkg": "1",
@@ -349,6 +391,7 @@
},
{
"BriefDescription": "Cycles in L1",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_Q_L1_POWER_CYCLES",
"PerPkg": "1",
@@ -357,6 +400,7 @@
},
{
"BriefDescription": "Cycles in L0p",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_Q_RxL0P_POWER_CYCLES",
"PerPkg": "1",
@@ -365,6 +409,7 @@
},
{
"BriefDescription": "Cycles in L0",
+ "Counter": "0,1,2,3",
"EventCode": "0xf",
"EventName": "UNC_Q_RxL0_POWER_CYCLES",
"PerPkg": "1",
@@ -373,6 +418,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Bypassed",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_Q_RxL_BYPASSED",
"PerPkg": "1",
@@ -381,6 +427,7 @@
},
{
"BriefDescription": "CRC Errors Detected; LinkInit",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_Q_RxL_CRC_ERRORS.LINK_INIT",
"PerPkg": "1",
@@ -390,6 +437,7 @@
},
{
"BriefDescription": "CRC Errors Detected; Normal Operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_Q_RxL_CRC_ERRORS.NORMAL_OP",
"PerPkg": "1",
@@ -399,6 +447,7 @@
},
{
"BriefDescription": "VN0 Credit Consumed; DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.DRS",
"PerPkg": "1",
@@ -408,6 +457,7 @@
},
{
"BriefDescription": "VN0 Credit Consumed; HOM",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.HOM",
"PerPkg": "1",
@@ -417,6 +467,7 @@
},
{
"BriefDescription": "VN0 Credit Consumed; NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NCB",
"PerPkg": "1",
@@ -426,6 +477,7 @@
},
{
"BriefDescription": "VN0 Credit Consumed; NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NCS",
"PerPkg": "1",
@@ -435,6 +487,7 @@
},
{
"BriefDescription": "VN0 Credit Consumed; NDR",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NDR",
"PerPkg": "1",
@@ -444,6 +497,7 @@
},
{
"BriefDescription": "VN0 Credit Consumed; SNP",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.SNP",
"PerPkg": "1",
@@ -453,6 +507,7 @@
},
{
"BriefDescription": "VNA Credit Consumed",
+ "Counter": "0,1,2,3",
"EventCode": "0x1d",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VNA",
"PerPkg": "1",
@@ -461,6 +516,7 @@
},
{
"BriefDescription": "RxQ Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0xa",
"EventName": "UNC_Q_RxL_CYCLES_NE",
"PerPkg": "1",
@@ -469,6 +525,7 @@
},
{
"BriefDescription": "Flits Received - Group 0; Data Tx Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_Q_RxL_FLITS_G0.DATA",
"PerPkg": "1",
@@ -478,6 +535,7 @@
},
{
"BriefDescription": "Flits Received - Group 0; Idle and Null Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_Q_RxL_FLITS_G0.IDLE",
"PerPkg": "1",
@@ -487,6 +545,7 @@
},
{
"BriefDescription": "Flits Received - Group 0; Non-Data protocol Tx Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_Q_RxL_FLITS_G0.NON_DATA",
"PerPkg": "1",
@@ -496,6 +555,7 @@
},
{
"BriefDescription": "Flits Received - Group 1; DRS Flits (both Header and Data)",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_RxL_FLITS_G1.DRS",
"PerPkg": "1",
@@ -505,6 +565,7 @@
},
{
"BriefDescription": "Flits Received - Group 1; DRS Data Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_RxL_FLITS_G1.DRS_DATA",
"PerPkg": "1",
@@ -514,6 +575,7 @@
},
{
"BriefDescription": "Flits Received - Group 1; DRS Header Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_RxL_FLITS_G1.DRS_NONDATA",
"PerPkg": "1",
@@ -523,6 +585,7 @@
},
{
"BriefDescription": "Flits Received - Group 1; HOM Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_RxL_FLITS_G1.HOM",
"PerPkg": "1",
@@ -532,6 +595,7 @@
},
{
"BriefDescription": "Flits Received - Group 1; HOM Non-Request Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_RxL_FLITS_G1.HOM_NONREQ",
"PerPkg": "1",
@@ -541,6 +605,7 @@
},
{
"BriefDescription": "Flits Received - Group 1; HOM Request Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_RxL_FLITS_G1.HOM_REQ",
"PerPkg": "1",
@@ -550,6 +615,7 @@
},
{
"BriefDescription": "Flits Received - Group 1; SNP Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_RxL_FLITS_G1.SNP",
"PerPkg": "1",
@@ -559,6 +625,7 @@
},
{
"BriefDescription": "Flits Received - Group 2; Non-Coherent Rx Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_Q_RxL_FLITS_G2.NCB",
"PerPkg": "1",
@@ -568,6 +635,7 @@
},
{
"BriefDescription": "Flits Received - Group 2; Non-Coherent data Rx Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_Q_RxL_FLITS_G2.NCB_DATA",
"PerPkg": "1",
@@ -577,6 +645,7 @@
},
{
"BriefDescription": "Flits Received - Group 2; Non-Coherent non-data Rx Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_Q_RxL_FLITS_G2.NCB_NONDATA",
"PerPkg": "1",
@@ -586,6 +655,7 @@
},
{
"BriefDescription": "Flits Received - Group 2; Non-Coherent standard Rx Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_Q_RxL_FLITS_G2.NCS",
"PerPkg": "1",
@@ -595,6 +665,7 @@
},
{
"BriefDescription": "Flits Received - Group 2; Non-Data Response Rx Flits - AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_Q_RxL_FLITS_G2.NDR_AD",
"PerPkg": "1",
@@ -604,6 +675,7 @@
},
{
"BriefDescription": "Flits Received - Group 2; Non-Data Response Rx Flits - AK",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_Q_RxL_FLITS_G2.NDR_AK",
"PerPkg": "1",
@@ -613,6 +685,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_Q_RxL_INSERTS",
"PerPkg": "1",
@@ -621,6 +694,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_Q_RxL_INSERTS_DRS",
"PerPkg": "1",
@@ -629,6 +703,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - HOM",
+ "Counter": "0,1,2,3",
"EventCode": "0xc",
"EventName": "UNC_Q_RxL_INSERTS_HOM",
"PerPkg": "1",
@@ -637,6 +712,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0xa",
"EventName": "UNC_Q_RxL_INSERTS_NCB",
"PerPkg": "1",
@@ -645,6 +721,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0xb",
"EventName": "UNC_Q_RxL_INSERTS_NCS",
"PerPkg": "1",
@@ -653,6 +730,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - NDR",
+ "Counter": "0,1,2,3",
"EventCode": "0xe",
"EventName": "UNC_Q_RxL_INSERTS_NDR",
"PerPkg": "1",
@@ -661,6 +739,7 @@
},
{
"BriefDescription": "Rx Flit Buffer Allocations - SNP",
+ "Counter": "0,1,2,3",
"EventCode": "0xd",
"EventName": "UNC_Q_RxL_INSERTS_SNP",
"PerPkg": "1",
@@ -669,6 +748,7 @@
},
{
"BriefDescription": "RxQ Occupancy - All Packets",
+ "Counter": "0,1,2,3",
"EventCode": "0xb",
"EventName": "UNC_Q_RxL_OCCUPANCY",
"PerPkg": "1",
@@ -677,6 +757,7 @@
},
{
"BriefDescription": "RxQ Occupancy - DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_Q_RxL_OCCUPANCY_DRS",
"PerPkg": "1",
@@ -685,6 +766,7 @@
},
{
"BriefDescription": "RxQ Occupancy - HOM",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_Q_RxL_OCCUPANCY_HOM",
"PerPkg": "1",
@@ -693,6 +775,7 @@
},
{
"BriefDescription": "RxQ Occupancy - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_Q_RxL_OCCUPANCY_NCB",
"PerPkg": "1",
@@ -701,6 +784,7 @@
},
{
"BriefDescription": "RxQ Occupancy - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_Q_RxL_OCCUPANCY_NCS",
"PerPkg": "1",
@@ -709,6 +793,7 @@
},
{
"BriefDescription": "RxQ Occupancy - NDR",
+ "Counter": "0,1,2,3",
"EventCode": "0x1a",
"EventName": "UNC_Q_RxL_OCCUPANCY_NDR",
"PerPkg": "1",
@@ -717,6 +802,7 @@
},
{
"BriefDescription": "RxQ Occupancy - SNP",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_Q_RxL_OCCUPANCY_SNP",
"PerPkg": "1",
@@ -725,6 +811,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI; BGF Stall - HOM",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_Q_RxL_STALLS.BGF_DRS",
"PerPkg": "1",
@@ -734,6 +821,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI; BGF Stall - DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_Q_RxL_STALLS.BGF_HOM",
"PerPkg": "1",
@@ -743,6 +831,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI; BGF Stall - SNP",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_Q_RxL_STALLS.BGF_NCB",
"PerPkg": "1",
@@ -752,6 +841,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI; BGF Stall - NDR",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_Q_RxL_STALLS.BGF_NCS",
"PerPkg": "1",
@@ -761,6 +851,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI; BGF Stall - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_Q_RxL_STALLS.BGF_NDR",
"PerPkg": "1",
@@ -770,6 +861,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI; BGF Stall - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_Q_RxL_STALLS.BGF_SNP",
"PerPkg": "1",
@@ -779,6 +871,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI; Egress Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_Q_RxL_STALLS.EGRESS_CREDITS",
"PerPkg": "1",
@@ -788,6 +881,7 @@
},
{
"BriefDescription": "Stalls Sending to R3QPI; GV",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_Q_RxL_STALLS.GV",
"PerPkg": "1",
@@ -797,6 +891,7 @@
},
{
"BriefDescription": "Cycles in L0p",
+ "Counter": "0,1,2,3",
"EventCode": "0xd",
"EventName": "UNC_Q_TxL0P_POWER_CYCLES",
"PerPkg": "1",
@@ -805,6 +900,7 @@
},
{
"BriefDescription": "Cycles in L0",
+ "Counter": "0,1,2,3",
"EventCode": "0xc",
"EventName": "UNC_Q_TxL0_POWER_CYCLES",
"PerPkg": "1",
@@ -813,6 +909,7 @@
},
{
"BriefDescription": "Tx Flit Buffer Bypassed",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_Q_TxL_BYPASSED",
"PerPkg": "1",
@@ -821,6 +918,7 @@
},
{
"BriefDescription": "Cycles Stalled with no LLR Credits; LLR is almost full",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_TxL_CRC_NO_CREDITS.ALMOST_FULL",
"PerPkg": "1",
@@ -830,6 +928,7 @@
},
{
"BriefDescription": "Cycles Stalled with no LLR Credits; LLR is full",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_TxL_CRC_NO_CREDITS.FULL",
"PerPkg": "1",
@@ -839,6 +938,7 @@
},
{
"BriefDescription": "Tx Flit Buffer Cycles not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_Q_TxL_CYCLES_NE",
"PerPkg": "1",
@@ -847,6 +947,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 0; Data Tx Flits",
+ "Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G0.DATA",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.",
@@ -855,6 +956,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 0; Idle and Null Flits",
+ "Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G0.IDLE",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.",
@@ -863,6 +965,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 0; Non-Data protocol Tx Flits",
+ "Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G0.NON_DATA",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.",
@@ -871,6 +974,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 1; DRS Flits (both Header and Data)",
+ "Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G1.DRS",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
@@ -879,6 +983,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 1; DRS Data Flits",
+ "Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G1.DRS_DATA",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
@@ -887,6 +992,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 1; DRS Header Flits",
+ "Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G1.DRS_NONDATA",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
@@ -895,6 +1001,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 1; HOM Flits",
+ "Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G1.HOM",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
@@ -903,6 +1010,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 1; HOM Non-Request Flits",
+ "Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G1.HOM_NONREQ",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
@@ -911,6 +1019,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 1; HOM Request Flits",
+ "Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G1.HOM_REQ",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
@@ -919,6 +1028,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 1; SNP Flits",
+ "Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G1.SNP",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
@@ -927,6 +1037,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 2; Non-Coherent Bypass Tx Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_Q_TxL_FLITS_G2.NCB",
"PerPkg": "1",
@@ -936,6 +1047,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 2; Non-Coherent data Tx Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_Q_TxL_FLITS_G2.NCB_DATA",
"PerPkg": "1",
@@ -945,6 +1057,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 2; Non-Coherent non-data Tx Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_Q_TxL_FLITS_G2.NCB_NONDATA",
"PerPkg": "1",
@@ -954,6 +1067,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 2; Non-Coherent standard Tx Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_Q_TxL_FLITS_G2.NCS",
"PerPkg": "1",
@@ -963,6 +1077,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 2; Non-Data Response Tx Flits - AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_Q_TxL_FLITS_G2.NDR_AD",
"PerPkg": "1",
@@ -972,6 +1087,7 @@
},
{
"BriefDescription": "Flits Transferred - Group 2; Non-Data Response Tx Flits - AK",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_Q_TxL_FLITS_G2.NDR_AK",
"PerPkg": "1",
@@ -981,6 +1097,7 @@
},
{
"BriefDescription": "Tx Flit Buffer Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_Q_TxL_INSERTS",
"PerPkg": "1",
@@ -989,6 +1106,7 @@
},
{
"BriefDescription": "Tx Flit Buffer Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_Q_TxL_OCCUPANCY",
"PerPkg": "1",
@@ -997,6 +1115,7 @@
},
{
"BriefDescription": "VNA Credits Returned",
+ "Counter": "0,1,2,3",
"EventCode": "0x1c",
"EventName": "UNC_Q_VNA_CREDIT_RETURNS",
"PerPkg": "1",
@@ -1005,6 +1124,7 @@
},
{
"BriefDescription": "VNA Credits Pending Return - Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_Q_VNA_CREDIT_RETURN_OCCUPANCY",
"PerPkg": "1",
@@ -1013,6 +1133,7 @@
},
{
"BriefDescription": "Number of uclks in domain",
+ "Counter": "0,1,2",
"EventCode": "0x1",
"EventName": "UNC_R3_CLOCKTICKS",
"PerPkg": "1",
@@ -1021,6 +1142,7 @@
},
{
"BriefDescription": "to IIO BL Credit Acquired",
+ "Counter": "0,1",
"EventCode": "0x20",
"EventName": "UNC_R3_IIO_CREDITS_ACQUIRED.DRS",
"PerPkg": "1",
@@ -1030,6 +1152,7 @@
},
{
"BriefDescription": "to IIO BL Credit Acquired",
+ "Counter": "0,1",
"EventCode": "0x20",
"EventName": "UNC_R3_IIO_CREDITS_ACQUIRED.NCB",
"PerPkg": "1",
@@ -1039,6 +1162,7 @@
},
{
"BriefDescription": "to IIO BL Credit Acquired",
+ "Counter": "0,1",
"EventCode": "0x20",
"EventName": "UNC_R3_IIO_CREDITS_ACQUIRED.NCS",
"PerPkg": "1",
@@ -1048,6 +1172,7 @@
},
{
"BriefDescription": "to IIO BL Credit Rejected",
+ "Counter": "0,1",
"EventCode": "0x21",
"EventName": "UNC_R3_IIO_CREDITS_REJECT.DRS",
"PerPkg": "1",
@@ -1057,6 +1182,7 @@
},
{
"BriefDescription": "to IIO BL Credit Rejected",
+ "Counter": "0,1",
"EventCode": "0x21",
"EventName": "UNC_R3_IIO_CREDITS_REJECT.NCB",
"PerPkg": "1",
@@ -1066,6 +1192,7 @@
},
{
"BriefDescription": "to IIO BL Credit Rejected",
+ "Counter": "0,1",
"EventCode": "0x21",
"EventName": "UNC_R3_IIO_CREDITS_REJECT.NCS",
"PerPkg": "1",
@@ -1075,6 +1202,7 @@
},
{
"BriefDescription": "to IIO BL Credit In Use",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_R3_IIO_CREDITS_USED.DRS",
"PerPkg": "1",
@@ -1084,6 +1212,7 @@
},
{
"BriefDescription": "to IIO BL Credit In Use",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_R3_IIO_CREDITS_USED.NCB",
"PerPkg": "1",
@@ -1093,6 +1222,7 @@
},
{
"BriefDescription": "to IIO BL Credit In Use",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_R3_IIO_CREDITS_USED.NCS",
"PerPkg": "1",
@@ -1102,6 +1232,7 @@
},
{
"BriefDescription": "R3 AD Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2",
"EventCode": "0x7",
"EventName": "UNC_R3_RING_AD_USED.CCW_EVEN",
"PerPkg": "1",
@@ -1111,6 +1242,7 @@
},
{
"BriefDescription": "R3 AD Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2",
"EventCode": "0x7",
"EventName": "UNC_R3_RING_AD_USED.CCW_ODD",
"PerPkg": "1",
@@ -1120,6 +1252,7 @@
},
{
"BriefDescription": "R3 AD Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2",
"EventCode": "0x7",
"EventName": "UNC_R3_RING_AD_USED.CW_EVEN",
"PerPkg": "1",
@@ -1129,6 +1262,7 @@
},
{
"BriefDescription": "R3 AD Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2",
"EventCode": "0x7",
"EventName": "UNC_R3_RING_AD_USED.CW_ODD",
"PerPkg": "1",
@@ -1138,6 +1272,7 @@
},
{
"BriefDescription": "R3 AK Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2",
"EventCode": "0x8",
"EventName": "UNC_R3_RING_AK_USED.CCW_EVEN",
"PerPkg": "1",
@@ -1147,6 +1282,7 @@
},
{
"BriefDescription": "R3 AK Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2",
"EventCode": "0x8",
"EventName": "UNC_R3_RING_AK_USED.CCW_ODD",
"PerPkg": "1",
@@ -1156,6 +1292,7 @@
},
{
"BriefDescription": "R3 AK Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2",
"EventCode": "0x8",
"EventName": "UNC_R3_RING_AK_USED.CW_EVEN",
"PerPkg": "1",
@@ -1165,6 +1302,7 @@
},
{
"BriefDescription": "R3 AK Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2",
"EventCode": "0x8",
"EventName": "UNC_R3_RING_AK_USED.CW_ODD",
"PerPkg": "1",
@@ -1174,6 +1312,7 @@
},
{
"BriefDescription": "R3 BL Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2",
"EventCode": "0x9",
"EventName": "UNC_R3_RING_BL_USED.CCW_EVEN",
"PerPkg": "1",
@@ -1183,6 +1322,7 @@
},
{
"BriefDescription": "R3 BL Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2",
"EventCode": "0x9",
"EventName": "UNC_R3_RING_BL_USED.CCW_ODD",
"PerPkg": "1",
@@ -1192,6 +1332,7 @@
},
{
"BriefDescription": "R3 BL Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2",
"EventCode": "0x9",
"EventName": "UNC_R3_RING_BL_USED.CW_EVEN",
"PerPkg": "1",
@@ -1201,6 +1342,7 @@
},
{
"BriefDescription": "R3 BL Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2",
"EventCode": "0x9",
"EventName": "UNC_R3_RING_BL_USED.CW_ODD",
"PerPkg": "1",
@@ -1210,6 +1352,7 @@
},
{
"BriefDescription": "R3 IV Ring in Use; Any",
+ "Counter": "0,1,2",
"EventCode": "0xa",
"EventName": "UNC_R3_RING_IV_USED.ANY",
"PerPkg": "1",
@@ -1219,6 +1362,7 @@
},
{
"BriefDescription": "Ingress Bypassed",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_R3_RxR_BYPASSED.AD",
"PerPkg": "1",
@@ -1228,6 +1372,7 @@
},
{
"BriefDescription": "Ingress Cycles Not Empty; DRS",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_R3_RxR_CYCLES_NE.DRS",
"PerPkg": "1",
@@ -1237,6 +1382,7 @@
},
{
"BriefDescription": "Ingress Cycles Not Empty; HOM",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_R3_RxR_CYCLES_NE.HOM",
"PerPkg": "1",
@@ -1246,6 +1392,7 @@
},
{
"BriefDescription": "Ingress Cycles Not Empty; NCB",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_R3_RxR_CYCLES_NE.NCB",
"PerPkg": "1",
@@ -1255,6 +1402,7 @@
},
{
"BriefDescription": "Ingress Cycles Not Empty; NCS",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_R3_RxR_CYCLES_NE.NCS",
"PerPkg": "1",
@@ -1264,6 +1412,7 @@
},
{
"BriefDescription": "Ingress Cycles Not Empty; NDR",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_R3_RxR_CYCLES_NE.NDR",
"PerPkg": "1",
@@ -1273,6 +1422,7 @@
},
{
"BriefDescription": "Ingress Cycles Not Empty; SNP",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_R3_RxR_CYCLES_NE.SNP",
"PerPkg": "1",
@@ -1282,6 +1432,7 @@
},
{
"BriefDescription": "Ingress Allocations; DRS",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_R3_RxR_INSERTS.DRS",
"PerPkg": "1",
@@ -1291,6 +1442,7 @@
},
{
"BriefDescription": "Ingress Allocations; HOM",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_R3_RxR_INSERTS.HOM",
"PerPkg": "1",
@@ -1300,6 +1452,7 @@
},
{
"BriefDescription": "Ingress Allocations; NCB",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_R3_RxR_INSERTS.NCB",
"PerPkg": "1",
@@ -1309,6 +1462,7 @@
},
{
"BriefDescription": "Ingress Allocations; NCS",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_R3_RxR_INSERTS.NCS",
"PerPkg": "1",
@@ -1318,6 +1472,7 @@
},
{
"BriefDescription": "Ingress Allocations; NDR",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_R3_RxR_INSERTS.NDR",
"PerPkg": "1",
@@ -1327,6 +1482,7 @@
},
{
"BriefDescription": "Ingress Allocations; SNP",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_R3_RxR_INSERTS.SNP",
"PerPkg": "1",
@@ -1336,6 +1492,7 @@
},
{
"BriefDescription": "Ingress Occupancy Accumulator; DRS",
+ "Counter": "0",
"EventCode": "0x13",
"EventName": "UNC_R3_RxR_OCCUPANCY.DRS",
"PerPkg": "1",
@@ -1345,6 +1502,7 @@
},
{
"BriefDescription": "Ingress Occupancy Accumulator; HOM",
+ "Counter": "0",
"EventCode": "0x13",
"EventName": "UNC_R3_RxR_OCCUPANCY.HOM",
"PerPkg": "1",
@@ -1354,6 +1512,7 @@
},
{
"BriefDescription": "Ingress Occupancy Accumulator; NCB",
+ "Counter": "0",
"EventCode": "0x13",
"EventName": "UNC_R3_RxR_OCCUPANCY.NCB",
"PerPkg": "1",
@@ -1363,6 +1522,7 @@
},
{
"BriefDescription": "Ingress Occupancy Accumulator; NCS",
+ "Counter": "0",
"EventCode": "0x13",
"EventName": "UNC_R3_RxR_OCCUPANCY.NCS",
"PerPkg": "1",
@@ -1372,6 +1532,7 @@
},
{
"BriefDescription": "Ingress Occupancy Accumulator; NDR",
+ "Counter": "0",
"EventCode": "0x13",
"EventName": "UNC_R3_RxR_OCCUPANCY.NDR",
"PerPkg": "1",
@@ -1381,6 +1542,7 @@
},
{
"BriefDescription": "Ingress Occupancy Accumulator; SNP",
+ "Counter": "0",
"EventCode": "0x13",
"EventName": "UNC_R3_RxR_OCCUPANCY.SNP",
"PerPkg": "1",
@@ -1390,6 +1552,7 @@
},
{
"BriefDescription": "VN0 Credit Acquisition Failed on DRS; DRS Message Class",
+ "Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_R3_VN0_CREDITS_REJECT.DRS",
"PerPkg": "1",
@@ -1399,6 +1562,7 @@
},
{
"BriefDescription": "VN0 Credit Acquisition Failed on DRS; HOM Message Class",
+ "Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_R3_VN0_CREDITS_REJECT.HOM",
"PerPkg": "1",
@@ -1408,6 +1572,7 @@
},
{
"BriefDescription": "VN0 Credit Acquisition Failed on DRS; NCB Message Class",
+ "Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_R3_VN0_CREDITS_REJECT.NCB",
"PerPkg": "1",
@@ -1417,6 +1582,7 @@
},
{
"BriefDescription": "VN0 Credit Acquisition Failed on DRS; NCS Message Class",
+ "Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_R3_VN0_CREDITS_REJECT.NCS",
"PerPkg": "1",
@@ -1426,6 +1592,7 @@
},
{
"BriefDescription": "VN0 Credit Acquisition Failed on DRS; NDR Message Class",
+ "Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_R3_VN0_CREDITS_REJECT.NDR",
"PerPkg": "1",
@@ -1435,6 +1602,7 @@
},
{
"BriefDescription": "VN0 Credit Acquisition Failed on DRS; SNP Message Class",
+ "Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_R3_VN0_CREDITS_REJECT.SNP",
"PerPkg": "1",
@@ -1444,6 +1612,7 @@
},
{
"BriefDescription": "VN0 Credit Used; DRS Message Class",
+ "Counter": "0,1",
"EventCode": "0x36",
"EventName": "UNC_R3_VN0_CREDITS_USED.DRS",
"PerPkg": "1",
@@ -1453,6 +1622,7 @@
},
{
"BriefDescription": "VN0 Credit Used; HOM Message Class",
+ "Counter": "0,1",
"EventCode": "0x36",
"EventName": "UNC_R3_VN0_CREDITS_USED.HOM",
"PerPkg": "1",
@@ -1462,6 +1632,7 @@
},
{
"BriefDescription": "VN0 Credit Used; NCB Message Class",
+ "Counter": "0,1",
"EventCode": "0x36",
"EventName": "UNC_R3_VN0_CREDITS_USED.NCB",
"PerPkg": "1",
@@ -1471,6 +1642,7 @@
},
{
"BriefDescription": "VN0 Credit Used; NCS Message Class",
+ "Counter": "0,1",
"EventCode": "0x36",
"EventName": "UNC_R3_VN0_CREDITS_USED.NCS",
"PerPkg": "1",
@@ -1480,6 +1652,7 @@
},
{
"BriefDescription": "VN0 Credit Used; NDR Message Class",
+ "Counter": "0,1",
"EventCode": "0x36",
"EventName": "UNC_R3_VN0_CREDITS_USED.NDR",
"PerPkg": "1",
@@ -1489,6 +1662,7 @@
},
{
"BriefDescription": "VN0 Credit Used; SNP Message Class",
+ "Counter": "0,1",
"EventCode": "0x36",
"EventName": "UNC_R3_VN0_CREDITS_USED.SNP",
"PerPkg": "1",
@@ -1498,6 +1672,7 @@
},
{
"BriefDescription": "VNA credit Acquisitions",
+ "Counter": "0,1",
"EventCode": "0x33",
"EventName": "UNC_R3_VNA_CREDITS_ACQUIRED",
"PerPkg": "1",
@@ -1506,6 +1681,7 @@
},
{
"BriefDescription": "VNA Credit Reject; DRS Message Class",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_R3_VNA_CREDITS_REJECT.DRS",
"PerPkg": "1",
@@ -1515,6 +1691,7 @@
},
{
"BriefDescription": "VNA Credit Reject; HOM Message Class",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_R3_VNA_CREDITS_REJECT.HOM",
"PerPkg": "1",
@@ -1524,6 +1701,7 @@
},
{
"BriefDescription": "VNA Credit Reject; NCB Message Class",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_R3_VNA_CREDITS_REJECT.NCB",
"PerPkg": "1",
@@ -1533,6 +1711,7 @@
},
{
"BriefDescription": "VNA Credit Reject; NCS Message Class",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_R3_VNA_CREDITS_REJECT.NCS",
"PerPkg": "1",
@@ -1542,6 +1721,7 @@
},
{
"BriefDescription": "VNA Credit Reject; NDR Message Class",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_R3_VNA_CREDITS_REJECT.NDR",
"PerPkg": "1",
@@ -1551,6 +1731,7 @@
},
{
"BriefDescription": "VNA Credit Reject; SNP Message Class",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_R3_VNA_CREDITS_REJECT.SNP",
"PerPkg": "1",
@@ -1560,6 +1741,7 @@
},
{
"BriefDescription": "Cycles with no VNA credits available",
+ "Counter": "0,1",
"EventCode": "0x31",
"EventName": "UNC_R3_VNA_CREDIT_CYCLES_OUT",
"PerPkg": "1",
@@ -1568,6 +1750,7 @@
},
{
"BriefDescription": "Cycles with 1 or more VNA credits in use",
+ "Counter": "0,1",
"EventCode": "0x32",
"EventName": "UNC_R3_VNA_CREDIT_CYCLES_USED",
"PerPkg": "1",
@@ -1575,12 +1758,14 @@
"Unit": "R3QPI"
},
{
+ "Counter": "0,1",
"EventName": "UNC_U_CLOCKTICKS",
"PerPkg": "1",
"Unit": "UBOX"
},
{
"BriefDescription": "VLW Received",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.DOORBELL_RCVD",
"PerPkg": "1",
@@ -1590,6 +1775,7 @@
},
{
"BriefDescription": "VLW Received",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.INT_PRIO",
"PerPkg": "1",
@@ -1599,6 +1785,7 @@
},
{
"BriefDescription": "VLW Received",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.IPI_RCVD",
"PerPkg": "1",
@@ -1608,6 +1795,7 @@
},
{
"BriefDescription": "VLW Received",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.MSI_RCVD",
"PerPkg": "1",
@@ -1617,6 +1805,7 @@
},
{
"BriefDescription": "VLW Received",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.VLW_RCVD",
"PerPkg": "1",
@@ -1626,6 +1815,7 @@
},
{
"BriefDescription": "Filter Match",
+ "Counter": "0,1",
"EventCode": "0x41",
"EventName": "UNC_U_FILTER_MATCH.DISABLE",
"PerPkg": "1",
@@ -1635,6 +1825,7 @@
},
{
"BriefDescription": "Filter Match",
+ "Counter": "0,1",
"EventCode": "0x41",
"EventName": "UNC_U_FILTER_MATCH.ENABLE",
"PerPkg": "1",
@@ -1644,6 +1835,7 @@
},
{
"BriefDescription": "Filter Match",
+ "Counter": "0,1",
"EventCode": "0x41",
"EventName": "UNC_U_FILTER_MATCH.U2C_DISABLE",
"PerPkg": "1",
@@ -1653,6 +1845,7 @@
},
{
"BriefDescription": "Filter Match",
+ "Counter": "0,1",
"EventCode": "0x41",
"EventName": "UNC_U_FILTER_MATCH.U2C_ENABLE",
"PerPkg": "1",
@@ -1662,6 +1855,7 @@
},
{
"BriefDescription": "IDI Lock/SplitLock Cycles",
+ "Counter": "0,1",
"EventCode": "0x44",
"EventName": "UNC_U_LOCK_CYCLES",
"PerPkg": "1",
@@ -1670,6 +1864,7 @@
},
{
"BriefDescription": "MsgCh Requests by Size; 4B Requests",
+ "Counter": "0,1",
"EventCode": "0x47",
"EventName": "UNC_U_MSG_CHNL_SIZE_COUNT.4B",
"PerPkg": "1",
@@ -1679,6 +1874,7 @@
},
{
"BriefDescription": "MsgCh Requests by Size; 8B Requests",
+ "Counter": "0,1",
"EventCode": "0x47",
"EventName": "UNC_U_MSG_CHNL_SIZE_COUNT.8B",
"PerPkg": "1",
@@ -1688,6 +1884,7 @@
},
{
"BriefDescription": "Cycles PHOLD Assert to Ack; ACK to Deassert",
+ "Counter": "0,1",
"EventCode": "0x45",
"EventName": "UNC_U_PHOLD_CYCLES.ACK_TO_DEASSERT",
"PerPkg": "1",
@@ -1697,6 +1894,7 @@
},
{
"BriefDescription": "Cycles PHOLD Assert to Ack; Assert to ACK",
+ "Counter": "0,1",
"EventCode": "0x45",
"EventName": "UNC_U_PHOLD_CYCLES.ASSERT_TO_ACK",
"PerPkg": "1",
@@ -1706,6 +1904,7 @@
},
{
"BriefDescription": "RACU Request",
+ "Counter": "0,1",
"EventCode": "0x46",
"EventName": "UNC_U_RACU_REQUESTS.COUNT",
"PerPkg": "1",
@@ -1714,6 +1913,7 @@
},
{
"BriefDescription": "Monitor Sent to T0; Correctable Machine Check",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.CMC",
"PerPkg": "1",
@@ -1723,6 +1923,7 @@
},
{
"BriefDescription": "Monitor Sent to T0; Livelock",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.LIVELOCK",
"PerPkg": "1",
@@ -1732,6 +1933,7 @@
},
{
"BriefDescription": "Monitor Sent to T0; LTError",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.LTERROR",
"PerPkg": "1",
@@ -1741,6 +1943,7 @@
},
{
"BriefDescription": "Monitor Sent to T0; Monitor T0",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.MONITOR_T0",
"PerPkg": "1",
@@ -1750,6 +1953,7 @@
},
{
"BriefDescription": "Monitor Sent to T0; Monitor T1",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.MONITOR_T1",
"PerPkg": "1",
@@ -1759,6 +1963,7 @@
},
{
"BriefDescription": "Monitor Sent to T0; Other",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.OTHER",
"PerPkg": "1",
@@ -1768,6 +1973,7 @@
},
{
"BriefDescription": "Monitor Sent to T0; Trap",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.TRAP",
"PerPkg": "1",
@@ -1777,6 +1983,7 @@
},
{
"BriefDescription": "Monitor Sent to T0; Uncorrectable Machine Check",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.UMC",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/uncore-io.json b/tools/perf/pmu-events/arch/x86/jaketown/uncore-io.json
index b1ce5f77675e..c49f11aca14e 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/uncore-io.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/uncore-io.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Number of uclks in domain",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_R2_CLOCKTICKS",
"PerPkg": "1",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "R2PCIe IIO Credit Acquired; DRS",
+ "Counter": "0,1",
"EventCode": "0x33",
"EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.DRS",
"PerPkg": "1",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "R2PCIe IIO Credit Acquired; NCB",
+ "Counter": "0,1",
"EventCode": "0x33",
"EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.NCB",
"PerPkg": "1",
@@ -27,6 +30,7 @@
},
{
"BriefDescription": "R2PCIe IIO Credit Acquired; NCS",
+ "Counter": "0,1",
"EventCode": "0x33",
"EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.NCS",
"PerPkg": "1",
@@ -36,6 +40,7 @@
},
{
"BriefDescription": "R2PCIe IIO Failed to Acquire a Credit; DRS",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_R2_IIO_CREDITS_REJECT.DRS",
"PerPkg": "1",
@@ -45,6 +50,7 @@
},
{
"BriefDescription": "R2PCIe IIO Failed to Acquire a Credit; NCB",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_R2_IIO_CREDITS_REJECT.NCB",
"PerPkg": "1",
@@ -54,6 +60,7 @@
},
{
"BriefDescription": "R2PCIe IIO Failed to Acquire a Credit; NCS",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_R2_IIO_CREDITS_REJECT.NCS",
"PerPkg": "1",
@@ -63,6 +70,7 @@
},
{
"BriefDescription": "R2PCIe IIO Credits in Use; DRS",
+ "Counter": "0,1",
"EventCode": "0x32",
"EventName": "UNC_R2_IIO_CREDITS_USED.DRS",
"PerPkg": "1",
@@ -72,6 +80,7 @@
},
{
"BriefDescription": "R2PCIe IIO Credits in Use; NCB",
+ "Counter": "0,1",
"EventCode": "0x32",
"EventName": "UNC_R2_IIO_CREDITS_USED.NCB",
"PerPkg": "1",
@@ -81,6 +90,7 @@
},
{
"BriefDescription": "R2PCIe IIO Credits in Use; NCS",
+ "Counter": "0,1",
"EventCode": "0x32",
"EventName": "UNC_R2_IIO_CREDITS_USED.NCS",
"PerPkg": "1",
@@ -90,6 +100,7 @@
},
{
"BriefDescription": "R2 AD Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_R2_RING_AD_USED.CCW_EVEN",
"PerPkg": "1",
@@ -99,6 +110,7 @@
},
{
"BriefDescription": "R2 AD Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_R2_RING_AD_USED.CCW_ODD",
"PerPkg": "1",
@@ -108,6 +120,7 @@
},
{
"BriefDescription": "R2 AD Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_R2_RING_AD_USED.CW_EVEN",
"PerPkg": "1",
@@ -117,6 +130,7 @@
},
{
"BriefDescription": "R2 AD Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_R2_RING_AD_USED.CW_ODD",
"PerPkg": "1",
@@ -126,6 +140,7 @@
},
{
"BriefDescription": "R2 AK Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_R2_RING_AK_USED.CCW_EVEN",
"PerPkg": "1",
@@ -135,6 +150,7 @@
},
{
"BriefDescription": "R2 AK Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_R2_RING_AK_USED.CCW_ODD",
"PerPkg": "1",
@@ -144,6 +160,7 @@
},
{
"BriefDescription": "R2 AK Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_R2_RING_AK_USED.CW_EVEN",
"PerPkg": "1",
@@ -153,6 +170,7 @@
},
{
"BriefDescription": "R2 AK Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_R2_RING_AK_USED.CW_ODD",
"PerPkg": "1",
@@ -162,6 +180,7 @@
},
{
"BriefDescription": "R2 BL Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_R2_RING_BL_USED.CCW_EVEN",
"PerPkg": "1",
@@ -171,6 +190,7 @@
},
{
"BriefDescription": "R2 BL Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_R2_RING_BL_USED.CCW_ODD",
"PerPkg": "1",
@@ -180,6 +200,7 @@
},
{
"BriefDescription": "R2 BL Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_R2_RING_BL_USED.CW_EVEN",
"PerPkg": "1",
@@ -189,6 +210,7 @@
},
{
"BriefDescription": "R2 BL Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_R2_RING_BL_USED.CW_ODD",
"PerPkg": "1",
@@ -198,6 +220,7 @@
},
{
"BriefDescription": "R2 IV Ring in Use; Any",
+ "Counter": "0,1,2,3",
"EventCode": "0xa",
"EventName": "UNC_R2_RING_IV_USED.ANY",
"PerPkg": "1",
@@ -207,6 +230,7 @@
},
{
"BriefDescription": "AK Ingress Bounced",
+ "Counter": "0",
"EventCode": "0x12",
"EventName": "UNC_R2_RxR_AK_BOUNCES",
"PerPkg": "1",
@@ -215,6 +239,7 @@
},
{
"BriefDescription": "Ingress Cycles Not Empty; DRS",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_R2_RxR_CYCLES_NE.DRS",
"PerPkg": "1",
@@ -224,6 +249,7 @@
},
{
"BriefDescription": "Ingress Cycles Not Empty; NCB",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_R2_RxR_CYCLES_NE.NCB",
"PerPkg": "1",
@@ -233,6 +259,7 @@
},
{
"BriefDescription": "Ingress Cycles Not Empty; NCS",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_R2_RxR_CYCLES_NE.NCS",
"PerPkg": "1",
@@ -242,6 +269,7 @@
},
{
"BriefDescription": "Egress Cycles Full; AD",
+ "Counter": "0",
"EventCode": "0x25",
"EventName": "UNC_R2_TxR_CYCLES_FULL.AD",
"PerPkg": "1",
@@ -251,6 +279,7 @@
},
{
"BriefDescription": "Egress Cycles Full; AK",
+ "Counter": "0",
"EventCode": "0x25",
"EventName": "UNC_R2_TxR_CYCLES_FULL.AK",
"PerPkg": "1",
@@ -260,6 +289,7 @@
},
{
"BriefDescription": "Egress Cycles Full; BL",
+ "Counter": "0",
"EventCode": "0x25",
"EventName": "UNC_R2_TxR_CYCLES_FULL.BL",
"PerPkg": "1",
@@ -269,6 +299,7 @@
},
{
"BriefDescription": "Egress Cycles Not Empty; AD",
+ "Counter": "0",
"EventCode": "0x23",
"EventName": "UNC_R2_TxR_CYCLES_NE.AD",
"PerPkg": "1",
@@ -278,6 +309,7 @@
},
{
"BriefDescription": "Egress Cycles Not Empty; AK",
+ "Counter": "0",
"EventCode": "0x23",
"EventName": "UNC_R2_TxR_CYCLES_NE.AK",
"PerPkg": "1",
@@ -287,6 +319,7 @@
},
{
"BriefDescription": "Egress Cycles Not Empty; BL",
+ "Counter": "0",
"EventCode": "0x23",
"EventName": "UNC_R2_TxR_CYCLES_NE.BL",
"PerPkg": "1",
@@ -296,6 +329,7 @@
},
{
"BriefDescription": "Egress NACK; AD",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R2_TxR_NACKS.AD",
"PerPkg": "1",
@@ -305,6 +339,7 @@
},
{
"BriefDescription": "Egress NACK; AK",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R2_TxR_NACKS.AK",
"PerPkg": "1",
@@ -314,6 +349,7 @@
},
{
"BriefDescription": "Egress NACK; BL",
+ "Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R2_TxR_NACKS.BL",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/uncore-memory.json b/tools/perf/pmu-events/arch/x86/jaketown/uncore-memory.json
index 6dcc9415a462..c94e22cdb535 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/uncore-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "DRAM Activate Count",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_M_ACT_COUNT",
"PerPkg": "1",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM WR_CAS (w/ and w/out auto-pre)",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.ALL",
"PerPkg": "1",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM Reads (RD_CAS + Underfills)",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.RD",
"PerPkg": "1",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM RD_CAS (w/ and w/out auto-pre)",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.RD_REG",
"PerPkg": "1",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Underfill Read Issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
"PerPkg": "1",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM WR_CAS (both Modes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.WR",
"PerPkg": "1",
@@ -49,6 +55,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Read Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.WR_RMM",
"PerPkg": "1",
@@ -57,6 +64,7 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Write Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.WR_WMM",
"PerPkg": "1",
@@ -65,6 +73,7 @@
},
{
"BriefDescription": "uclks",
+ "Counter": "0,1,2,3",
"EventName": "UNC_M_CLOCKTICKS",
"PerPkg": "1",
"PublicDescription": "Uncore Fixed Counter - uclks",
@@ -72,6 +81,7 @@
},
{
"BriefDescription": "DRAM Precharge All Commands",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_M_DRAM_PRE_ALL",
"PerPkg": "1",
@@ -80,6 +90,7 @@
},
{
"BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_M_DRAM_REFRESH.HIGH",
"PerPkg": "1",
@@ -89,6 +100,7 @@
},
{
"BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_M_DRAM_REFRESH.PANIC",
"PerPkg": "1",
@@ -98,6 +110,7 @@
},
{
"BriefDescription": "ECC Correctable Errors",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_M_ECC_CORRECTABLE_ERRORS",
"PerPkg": "1",
@@ -106,6 +119,7 @@
},
{
"BriefDescription": "Cycles in a Major Mode; Isoch Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_M_MAJOR_MODES.ISOCH",
"PerPkg": "1",
@@ -115,6 +129,7 @@
},
{
"BriefDescription": "Cycles in a Major Mode; Partial Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_M_MAJOR_MODES.PARTIAL",
"PerPkg": "1",
@@ -124,6 +139,7 @@
},
{
"BriefDescription": "Cycles in a Major Mode; Read Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_M_MAJOR_MODES.READ",
"PerPkg": "1",
@@ -133,6 +149,7 @@
},
{
"BriefDescription": "Cycles in a Major Mode; Write Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_M_MAJOR_MODES.WRITE",
"PerPkg": "1",
@@ -142,6 +159,7 @@
},
{
"BriefDescription": "Channel DLLOFF Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M_POWER_CHANNEL_DLLOFF",
"PerPkg": "1",
@@ -150,6 +168,7 @@
},
{
"BriefDescription": "Channel PPD Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_M_POWER_CHANNEL_PPD",
"PerPkg": "1",
@@ -158,6 +177,7 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK0",
"PerPkg": "1",
@@ -167,6 +187,7 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK1",
"PerPkg": "1",
@@ -176,6 +197,7 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK2",
"PerPkg": "1",
@@ -185,6 +207,7 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK3",
"PerPkg": "1",
@@ -194,6 +217,7 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK4",
"PerPkg": "1",
@@ -203,6 +227,7 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK5",
"PerPkg": "1",
@@ -212,6 +237,7 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK6",
"PerPkg": "1",
@@ -221,6 +247,7 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK7",
"PerPkg": "1",
@@ -230,6 +257,7 @@
},
{
"BriefDescription": "Critical Throttle Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M_POWER_CRITICAL_THROTTLE_CYCLES",
"PerPkg": "1",
@@ -238,6 +266,7 @@
},
{
"BriefDescription": "Clock-Enabled Self-Refresh",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M_POWER_SELF_REFRESH",
"PerPkg": "1",
@@ -246,6 +275,7 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK0",
"PerPkg": "1",
@@ -255,6 +285,7 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK1",
"PerPkg": "1",
@@ -264,6 +295,7 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK2",
"PerPkg": "1",
@@ -273,6 +305,7 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK3",
"PerPkg": "1",
@@ -282,6 +315,7 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK4",
"PerPkg": "1",
@@ -291,6 +325,7 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK5",
"PerPkg": "1",
@@ -300,6 +335,7 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK6",
"PerPkg": "1",
@@ -309,6 +345,7 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK7",
"PerPkg": "1",
@@ -318,6 +355,7 @@
},
{
"BriefDescription": "Read Preemption Count; Read over Read Preemption",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_M_PREEMPTION.RD_PREEMPT_RD",
"PerPkg": "1",
@@ -327,6 +365,7 @@
},
{
"BriefDescription": "Read Preemption Count; Read over Write Preemption",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_M_PREEMPTION.RD_PREEMPT_WR",
"PerPkg": "1",
@@ -336,6 +375,7 @@
},
{
"BriefDescription": "DRAM Precharge commands.; Precharge due to timer expiration",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.PAGE_CLOSE",
"PerPkg": "1",
@@ -345,6 +385,7 @@
},
{
"BriefDescription": "DRAM Precharge commands.; Precharges due to page miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.PAGE_MISS",
"PerPkg": "1",
@@ -354,6 +395,7 @@
},
{
"BriefDescription": "Read Pending Queue Full Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_M_RPQ_CYCLES_FULL",
"PerPkg": "1",
@@ -362,6 +404,7 @@
},
{
"BriefDescription": "Read Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M_RPQ_CYCLES_NE",
"PerPkg": "1",
@@ -370,6 +413,7 @@
},
{
"BriefDescription": "Read Pending Queue Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M_RPQ_INSERTS",
"PerPkg": "1",
@@ -378,6 +422,7 @@
},
{
"BriefDescription": "Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M_RPQ_OCCUPANCY",
"PerPkg": "1",
@@ -386,6 +431,7 @@
},
{
"BriefDescription": "Write Pending Queue Full Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M_WPQ_CYCLES_FULL",
"PerPkg": "1",
@@ -394,6 +440,7 @@
},
{
"BriefDescription": "Write Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M_WPQ_CYCLES_NE",
"PerPkg": "1",
@@ -402,6 +449,7 @@
},
{
"BriefDescription": "Write Pending Queue Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M_WPQ_INSERTS",
"PerPkg": "1",
@@ -410,6 +458,7 @@
},
{
"BriefDescription": "Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "UNC_M_WPQ_OCCUPANCY",
"PerPkg": "1",
@@ -418,6 +467,7 @@
},
{
"BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_M_WPQ_READ_HIT",
"PerPkg": "1",
@@ -426,6 +476,7 @@
},
{
"BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M_WPQ_WRITE_HIT",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json b/tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json
index 6f98fc1728e6..1dffd2999d70 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "pclk Cycles",
+ "Counter": "0,1,2,3",
"EventName": "UNC_P_CLOCKTICKS",
"PerPkg": "1",
"PublicDescription": "The PCU runs off a fixed 800 MHz clock. This event counts the number of pclk cycles measured while the counter was enabled. The pclk, like the Memory Controller's dclk, counts at a constant rate making it a good measure of actual wall time.",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_P_CORE0_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -16,6 +18,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_P_CORE1_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -24,6 +27,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_P_CORE2_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -32,6 +36,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_P_CORE3_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -40,6 +45,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_P_CORE4_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -48,6 +54,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_P_CORE5_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -56,6 +63,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_P_CORE6_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -64,6 +72,7 @@
},
{
"BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xa",
"EventName": "UNC_P_CORE7_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -72,6 +81,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_P_DEMOTIONS_CORE0",
"PerPkg": "1",
@@ -80,6 +90,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x1f",
"EventName": "UNC_P_DEMOTIONS_CORE1",
"PerPkg": "1",
@@ -88,6 +99,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_P_DEMOTIONS_CORE2",
"PerPkg": "1",
@@ -96,6 +108,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_P_DEMOTIONS_CORE3",
"PerPkg": "1",
@@ -104,6 +117,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_P_DEMOTIONS_CORE4",
"PerPkg": "1",
@@ -112,6 +126,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_P_DEMOTIONS_CORE5",
"PerPkg": "1",
@@ -120,6 +135,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_P_DEMOTIONS_CORE6",
"PerPkg": "1",
@@ -128,6 +144,7 @@
},
{
"BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_P_DEMOTIONS_CORE7",
"PerPkg": "1",
@@ -136,6 +153,7 @@
},
{
"BriefDescription": "Frequency Residency",
+ "Counter": "0,1,2,3",
"EventCode": "0xb",
"EventName": "UNC_P_FREQ_BAND0_CYCLES",
"PerPkg": "1",
@@ -144,6 +162,7 @@
},
{
"BriefDescription": "Frequency Residency",
+ "Counter": "0,1,2,3",
"EventCode": "0xc",
"EventName": "UNC_P_FREQ_BAND1_CYCLES",
"PerPkg": "1",
@@ -152,6 +171,7 @@
},
{
"BriefDescription": "Frequency Residency",
+ "Counter": "0,1,2,3",
"EventCode": "0xd",
"EventName": "UNC_P_FREQ_BAND2_CYCLES",
"PerPkg": "1",
@@ -160,6 +180,7 @@
},
{
"BriefDescription": "Frequency Residency",
+ "Counter": "0,1,2,3",
"EventCode": "0xe",
"EventName": "UNC_P_FREQ_BAND3_CYCLES",
"PerPkg": "1",
@@ -168,6 +189,7 @@
},
{
"BriefDescription": "Current Strongest Upper Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_P_FREQ_MAX_CURRENT_CYCLES",
"PerPkg": "1",
@@ -176,6 +198,7 @@
},
{
"BriefDescription": "Thermal Strongest Upper Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES",
"PerPkg": "1",
@@ -184,6 +207,7 @@
},
{
"BriefDescription": "OS Strongest Upper Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_P_FREQ_MAX_OS_CYCLES",
"PerPkg": "1",
@@ -192,6 +216,7 @@
},
{
"BriefDescription": "Power Strongest Upper Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_P_FREQ_MAX_POWER_CYCLES",
"PerPkg": "1",
@@ -200,6 +225,7 @@
},
{
"BriefDescription": "IO P Limit Strongest Lower Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_P_FREQ_MIN_IO_P_CYCLES",
"PerPkg": "1",
@@ -208,6 +234,7 @@
},
{
"BriefDescription": "Perf P Limit Strongest Lower Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_P_FREQ_MIN_PERF_P_CYCLES",
"PerPkg": "1",
@@ -216,6 +243,7 @@
},
{
"BriefDescription": "Cycles spent changing Frequency",
+ "Counter": "0,1,2,3",
"EventName": "UNC_P_FREQ_TRANS_CYCLES",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the system is changing frequency. This can not be filtered by thread ID. One can also use it with the occupancy counter that monitors number of threads in C0 to estimate the performance impact that frequency transitions had on the system.",
@@ -223,6 +251,7 @@
},
{
"BriefDescription": "Memory Phase Shedding Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x2f",
"EventName": "UNC_P_MEMORY_PHASE_SHEDDING_CYCLES",
"PerPkg": "1",
@@ -231,6 +260,7 @@
},
{
"BriefDescription": "Number of cores in C0",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
"Filter": "occ_sel=1",
@@ -240,6 +270,7 @@
},
{
"BriefDescription": "Number of cores in C0",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
"Filter": "occ_sel=2",
@@ -249,6 +280,7 @@
},
{
"BriefDescription": "Number of cores in C0",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
"Filter": "occ_sel=3",
@@ -258,6 +290,7 @@
},
{
"BriefDescription": "External Prochot",
+ "Counter": "0,1,2,3",
"EventCode": "0xa",
"EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
"PerPkg": "1",
@@ -266,6 +299,7 @@
},
{
"BriefDescription": "Internal Prochot",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_P_PROCHOT_INTERNAL_CYCLES",
"PerPkg": "1",
@@ -274,6 +308,7 @@
},
{
"BriefDescription": "Total Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xb",
"EventName": "UNC_P_TOTAL_TRANSITION_CYCLES",
"PerPkg": "1",
@@ -282,6 +317,7 @@
},
{
"BriefDescription": "Cycles Changing Voltage",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_P_VOLT_TRANS_CYCLES_CHANGE",
"PerPkg": "1",
@@ -290,6 +326,7 @@
},
{
"BriefDescription": "Cycles Decreasing Voltage",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_P_VOLT_TRANS_CYCLES_DECREASE",
"PerPkg": "1",
@@ -298,6 +335,7 @@
},
{
"BriefDescription": "Cycles Increasing Voltage",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_P_VOLT_TRANS_CYCLES_INCREASE",
"PerPkg": "1",
@@ -306,6 +344,7 @@
},
{
"BriefDescription": "VR Hot",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_P_VR_HOT_CYCLES",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/virtual-memory.json b/tools/perf/pmu-events/arch/x86/jaketown/virtual-memory.json
index fa08d355b97e..e0f6eb95455d 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/virtual-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Load misses in all DTLB levels that cause page walks.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
"SampleAfterValue": "100003",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"PublicDescription": "This event counts load operations that miss the first DTLB level but hit the second and do not cause any page walks. The penalty in this case is approximately 7 cycles.",
@@ -16,6 +18,7 @@
},
{
"BriefDescription": "Load misses at all DTLB levels that cause completed page walks.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"SampleAfterValue": "100003",
@@ -23,6 +26,7 @@
},
{
"BriefDescription": "Cycles when PMH is busy with page walks.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
"PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by DTLB load misses.",
@@ -31,6 +35,7 @@
},
{
"BriefDescription": "Store misses in all DTLB levels that cause page walks.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
"SampleAfterValue": "100003",
@@ -38,6 +43,7 @@
},
{
"BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.STLB_HIT",
"SampleAfterValue": "100003",
@@ -45,6 +51,7 @@
},
{
"BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"SampleAfterValue": "100003",
@@ -52,6 +59,7 @@
},
{
"BriefDescription": "Cycles when PMH is busy with page walks.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_DURATION",
"SampleAfterValue": "2000003",
@@ -59,6 +67,7 @@
},
{
"BriefDescription": "Cycle count for an Extended Page table walk. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x4F",
"EventName": "EPT.WALK_CYCLES",
"SampleAfterValue": "2000003",
@@ -66,6 +75,7 @@
},
{
"BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAE",
"EventName": "ITLB.ITLB_FLUSH",
"SampleAfterValue": "100007",
@@ -73,6 +83,7 @@
},
{
"BriefDescription": "Misses at all ITLB levels that cause page walks.",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
"SampleAfterValue": "100003",
@@ -80,6 +91,7 @@
},
{
"BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.STLB_HIT",
"SampleAfterValue": "100003",
@@ -87,6 +99,7 @@
},
{
"BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
"SampleAfterValue": "100003",
@@ -94,6 +107,7 @@
},
{
"BriefDescription": "Cycles when PMH is busy with page walks.",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_DURATION",
"PublicDescription": "This event count cycles when Page Miss Handler (PMH) is servicing page walks caused by ITLB misses.",
@@ -102,6 +116,7 @@
},
{
"BriefDescription": "DTLB flush attempts of the thread-specific entries.",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "TLB_FLUSH.DTLB_THREAD",
"SampleAfterValue": "100007",
@@ -109,6 +124,7 @@
},
{
"BriefDescription": "STLB flush attempts.",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "TLB_FLUSH.STLB_ANY",
"SampleAfterValue": "100007",
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/cache.json b/tools/perf/pmu-events/arch/x86/knightslanding/cache.json
index 8da3a5a7be73..a9f905bc19d2 100644
--- a/tools/perf/pmu-events/arch/x86/knightslanding/cache.json
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/cache.json
@@ -1,12 +1,14 @@
[
{
"BriefDescription": "Counts the number of MEC requests that were not accepted into the L2Q because of any L2 queue reject condition. There is no concept of at-ret here. It might include requests due to instructions in the speculative path.",
+ "Counter": "0,1",
"EventCode": "0x31",
"EventName": "CORE_REJECT_L2Q.ALL",
"SampleAfterValue": "200003"
},
{
"BriefDescription": "This event counts the number of core cycles the fetch stalls because of an icache miss. This is a cumulative count of cycles the NIP stalled for all icache misses.",
+ "Counter": "0,1",
"EventCode": "0x86",
"EventName": "FETCH_STALL.ICACHE_FILL_PENDING_CYCLES",
"SampleAfterValue": "200003",
@@ -14,6 +16,7 @@
},
{
"BriefDescription": "Counts the number of L2HWP allocated into XQ GP",
+ "Counter": "0,1",
"EventCode": "0x3E",
"EventName": "L2_PREFETCHER.ALLOC_XQ",
"SampleAfterValue": "100007",
@@ -21,6 +24,7 @@
},
{
"BriefDescription": "Counts the number of L2 cache misses",
+ "Counter": "0,1",
"EventCode": "0x2E",
"EventName": "L2_REQUESTS.MISS",
"SampleAfterValue": "200003",
@@ -28,6 +32,7 @@
},
{
"BriefDescription": "Counts the total number of L2 cache references.",
+ "Counter": "0,1",
"EventCode": "0x2E",
"EventName": "L2_REQUESTS.REFERENCE",
"SampleAfterValue": "200003",
@@ -35,12 +40,14 @@
},
{
"BriefDescription": "Counts the number of MEC requests from the L2Q that reference a cache line (cacheable requests) excluding SW prefetches filling only to L2 cache and L1 evictions (automatically excludes L2HWP, UC, WC) that were rejected - Multiple repeated rejects should be counted multiple times",
+ "Counter": "0,1",
"EventCode": "0x30",
"EventName": "L2_REQUESTS_REJECT.ALL",
"SampleAfterValue": "200003"
},
{
"BriefDescription": "Counts all the load micro-ops retired",
+ "Counter": "0,1",
"EventCode": "0x04",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
"PublicDescription": "This event counts the number of load micro-ops retired.",
@@ -49,6 +56,7 @@
},
{
"BriefDescription": "Counts all the store micro-ops retired",
+ "Counter": "0,1",
"EventCode": "0x04",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"PublicDescription": "This event counts the number of store micro-ops retired.",
@@ -57,6 +65,7 @@
},
{
"BriefDescription": "Counts the loads retired that get the data from the other core in the same tile in M state (Precise Event)",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0x04",
"EventName": "MEM_UOPS_RETIRED.HITM",
@@ -67,6 +76,7 @@
},
{
"BriefDescription": "Counts the number of load micro-ops retired that miss in L1 D cache",
+ "Counter": "0,1",
"EventCode": "0x04",
"EventName": "MEM_UOPS_RETIRED.L1_MISS_LOADS",
"PublicDescription": "This event counts the number of load micro-ops retired that miss in L1 Data cache. Note that prefetch misses will not be counted.",
@@ -75,6 +85,7 @@
},
{
"BriefDescription": "Counts the number of load micro-ops retired that hit in the L2 (Precise Event)",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0x04",
"EventName": "MEM_UOPS_RETIRED.L2_HIT_LOADS",
@@ -85,6 +96,7 @@
},
{
"BriefDescription": "Counts the number of load micro-ops retired that miss in the L2 (Precise Event)",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0x04",
"EventName": "MEM_UOPS_RETIRED.L2_MISS_LOADS",
@@ -95,6 +107,7 @@
},
{
"BriefDescription": "Counts the number of load micro-ops retired that caused micro TLB miss",
+ "Counter": "0,1",
"EventCode": "0x04",
"EventName": "MEM_UOPS_RETIRED.UTLB_MISS_LOADS",
"SampleAfterValue": "200003",
@@ -102,6 +115,7 @@
},
{
"BriefDescription": "Counts the matrix events specified by MSR_OFFCORE_RESPx",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE",
"SampleAfterValue": "100007",
@@ -109,6 +123,7 @@
},
{
"BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for any response",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -118,6 +133,7 @@
},
{
"BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_FAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
@@ -127,6 +143,7 @@
},
{
"BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_FAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -136,6 +153,7 @@
},
{
"BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -145,6 +163,7 @@
},
{
"BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_NEAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
@@ -154,6 +173,7 @@
},
{
"BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -163,6 +183,7 @@
},
{
"BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -172,6 +193,7 @@
},
{
"BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses which hit its own tile's L2 with data in E state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
@@ -181,6 +203,7 @@
},
{
"BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses which hit its own tile's L2 with data in F state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -190,6 +213,7 @@
},
{
"BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses which hit its own tile's L2 with data in M state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -199,6 +223,7 @@
},
{
"BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses which hit its own tile's L2 with data in S state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
@@ -208,6 +233,7 @@
},
{
"BriefDescription": "Counts Demand code reads and prefetch code read requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -217,6 +243,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for any response",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -226,6 +253,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_FAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
@@ -235,6 +263,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_FAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -244,6 +273,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -253,6 +283,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_NEAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
@@ -262,6 +293,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -271,6 +303,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -280,6 +313,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses which hit its own tile's L2 with data in E state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
@@ -289,6 +323,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses which hit its own tile's L2 with data in F state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -298,6 +333,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses which hit its own tile's L2 with data in M state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -307,6 +343,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses which hit its own tile's L2 with data in S state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
@@ -316,6 +353,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -325,6 +363,7 @@
},
{
"BriefDescription": "Counts any Prefetch requests that accounts for any response",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -334,6 +373,7 @@
},
{
"BriefDescription": "Counts any Prefetch requests that accounts for responses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_FAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
@@ -343,6 +383,7 @@
},
{
"BriefDescription": "Counts any Prefetch requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_FAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -352,6 +393,7 @@
},
{
"BriefDescription": "Counts any Prefetch requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -361,6 +403,7 @@
},
{
"BriefDescription": "Counts any Prefetch requests that accounts for responses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_NEAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
@@ -370,6 +413,7 @@
},
{
"BriefDescription": "Counts any Prefetch requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -379,6 +423,7 @@
},
{
"BriefDescription": "Counts any Prefetch requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -388,6 +433,7 @@
},
{
"BriefDescription": "Counts any Prefetch requests that accounts for responses which hit its own tile's L2 with data in E state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
@@ -397,6 +443,7 @@
},
{
"BriefDescription": "Counts any Prefetch requests that accounts for responses which hit its own tile's L2 with data in F state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -406,6 +453,7 @@
},
{
"BriefDescription": "Counts any Prefetch requests that accounts for responses which hit its own tile's L2 with data in M state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -415,6 +463,7 @@
},
{
"BriefDescription": "Counts any Prefetch requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -424,6 +473,7 @@
},
{
"BriefDescription": "Counts any Read request that accounts for any response",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_READ.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -433,6 +483,7 @@
},
{
"BriefDescription": "Counts any Read request that accounts for responses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_FAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
@@ -442,6 +493,7 @@
},
{
"BriefDescription": "Counts any Read request that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_FAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -451,6 +503,7 @@
},
{
"BriefDescription": "Counts any Read request that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -460,6 +513,7 @@
},
{
"BriefDescription": "Counts any Read request that accounts for responses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_NEAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
@@ -469,6 +523,7 @@
},
{
"BriefDescription": "Counts any Read request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -478,6 +533,7 @@
},
{
"BriefDescription": "Counts any Read request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -487,6 +543,7 @@
},
{
"BriefDescription": "Counts any Read request that accounts for responses which hit its own tile's L2 with data in E state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
@@ -496,6 +553,7 @@
},
{
"BriefDescription": "Counts any Read request that accounts for responses which hit its own tile's L2 with data in F state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -505,6 +563,7 @@
},
{
"BriefDescription": "Counts any Read request that accounts for responses which hit its own tile's L2 with data in M state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -514,6 +573,7 @@
},
{
"BriefDescription": "Counts any Read request that accounts for responses which hit its own tile's L2 with data in S state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
@@ -523,6 +583,7 @@
},
{
"BriefDescription": "Counts any Read request that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_READ.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -532,6 +593,7 @@
},
{
"BriefDescription": "Counts any request that accounts for any response",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -541,6 +603,7 @@
},
{
"BriefDescription": "Counts any request that accounts for responses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_FAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
@@ -550,6 +613,7 @@
},
{
"BriefDescription": "Counts any request that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_FAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -559,6 +623,7 @@
},
{
"BriefDescription": "Counts any request that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -568,6 +633,7 @@
},
{
"BriefDescription": "Counts any request that accounts for responses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_NEAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
@@ -577,6 +643,7 @@
},
{
"BriefDescription": "Counts any request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -586,6 +653,7 @@
},
{
"BriefDescription": "Counts any request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -595,6 +663,7 @@
},
{
"BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in E state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
@@ -604,6 +673,7 @@
},
{
"BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in F state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -613,6 +683,7 @@
},
{
"BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in M state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -622,6 +693,7 @@
},
{
"BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in S state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
@@ -631,6 +703,7 @@
},
{
"BriefDescription": "Accounts for responses which miss its own tile's L2.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -640,6 +713,7 @@
},
{
"BriefDescription": "Counts any request that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -649,6 +723,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data write requests that accounts for any response",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -658,6 +733,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_FAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
@@ -667,6 +743,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_FAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -676,6 +753,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -685,6 +763,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_NEAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
@@ -694,6 +773,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -703,6 +783,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -712,6 +793,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data write requests that accounts for responses which hit its own tile's L2 with data in E state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
@@ -721,6 +803,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data write requests that accounts for responses which hit its own tile's L2 with data in F state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -730,6 +813,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data write requests that accounts for responses which hit its own tile's L2 with data in M state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -739,6 +823,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data write requests that accounts for responses which hit its own tile's L2 with data in S state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
@@ -748,6 +833,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data write requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -757,6 +843,7 @@
},
{
"BriefDescription": "Counts Bus locks and split lock requests that accounts for any response",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -766,6 +853,7 @@
},
{
"BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_FAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
@@ -775,6 +863,7 @@
},
{
"BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_FAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -784,6 +873,7 @@
},
{
"BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -793,6 +883,7 @@
},
{
"BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_NEAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
@@ -802,6 +893,7 @@
},
{
"BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -811,6 +903,7 @@
},
{
"BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -820,6 +913,7 @@
},
{
"BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in E state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
@@ -829,6 +923,7 @@
},
{
"BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in F state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -838,6 +933,7 @@
},
{
"BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in M state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -847,6 +943,7 @@
},
{
"BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in S state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
@@ -856,6 +953,7 @@
},
{
"BriefDescription": "Counts Bus locks and split lock requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -865,6 +963,7 @@
},
{
"BriefDescription": "Counts demand code reads and prefetch code reads that accounts for any response",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -874,6 +973,7 @@
},
{
"BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_FAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
@@ -883,6 +983,7 @@
},
{
"BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_FAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -892,6 +993,7 @@
},
{
"BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -901,6 +1003,7 @@
},
{
"BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_NEAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
@@ -910,6 +1013,7 @@
},
{
"BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -919,6 +1023,7 @@
},
{
"BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -928,6 +1033,7 @@
},
{
"BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in E state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
@@ -937,6 +1043,7 @@
},
{
"BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in F state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -946,6 +1053,7 @@
},
{
"BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in M state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -955,6 +1063,7 @@
},
{
"BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in S state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
@@ -964,6 +1073,7 @@
},
{
"BriefDescription": "Counts demand code reads and prefetch code reads that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -973,6 +1083,7 @@
},
{
"BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for any response",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -982,6 +1093,7 @@
},
{
"BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_FAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -991,6 +1103,7 @@
},
{
"BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -1000,6 +1113,7 @@
},
{
"BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -1009,6 +1123,7 @@
},
{
"BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -1018,6 +1133,7 @@
},
{
"BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in E state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
@@ -1027,6 +1143,7 @@
},
{
"BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in F state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -1036,6 +1153,7 @@
},
{
"BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in M state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -1045,6 +1163,7 @@
},
{
"BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in S state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
@@ -1054,6 +1173,7 @@
},
{
"BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -1063,6 +1183,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data writes that accounts for any response",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1072,6 +1193,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data writes that accounts for responses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_FAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1081,6 +1203,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data writes that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_FAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -1090,6 +1213,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data writes that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -1099,6 +1223,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data writes that accounts for responses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_NEAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1108,6 +1233,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data writes that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -1117,6 +1243,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data writes that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -1126,6 +1253,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in E state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
@@ -1135,6 +1263,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in F state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -1144,6 +1273,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in M state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -1153,6 +1283,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in S state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
@@ -1162,6 +1293,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data writes that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -1171,6 +1303,7 @@
},
{
"BriefDescription": "Counts Full streaming stores (WC and should be programmed on PMC1) that accounts for any response",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1180,6 +1313,7 @@
},
{
"BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for any response",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1189,6 +1323,7 @@
},
{
"BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_FAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1198,6 +1333,7 @@
},
{
"BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_FAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -1207,6 +1343,7 @@
},
{
"BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -1216,6 +1353,7 @@
},
{
"BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_NEAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1225,6 +1363,7 @@
},
{
"BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -1234,6 +1373,7 @@
},
{
"BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -1243,6 +1383,7 @@
},
{
"BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses which hit its own tile's L2 with data in E state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
@@ -1252,6 +1393,7 @@
},
{
"BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses which hit its own tile's L2 with data in F state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -1261,6 +1403,7 @@
},
{
"BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses which hit its own tile's L2 with data in M state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -1270,6 +1413,7 @@
},
{
"BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses which hit its own tile's L2 with data in S state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
@@ -1279,6 +1423,7 @@
},
{
"BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -1288,6 +1433,7 @@
},
{
"BriefDescription": "Counts Partial streaming stores (WC and should be programmed on PMC1) that accounts for any response",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.ANY_RESPONSE",
"MSRIndex": "0x1a7",
@@ -1297,6 +1443,7 @@
},
{
"BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for any response",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.ANY_RESPONSE",
"MSRIndex": "0x1a7",
@@ -1306,6 +1453,7 @@
},
{
"BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_FAR_TILE",
"MSRIndex": "0x1a7",
@@ -1315,6 +1463,7 @@
},
{
"BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_FAR_TILE_E_F",
"MSRIndex": "0x1a7",
@@ -1324,6 +1473,7 @@
},
{
"BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a7",
@@ -1333,6 +1483,7 @@
},
{
"BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_NEAR_TILE",
"MSRIndex": "0x1a7",
@@ -1342,6 +1493,7 @@
},
{
"BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a7",
@@ -1351,6 +1503,7 @@
},
{
"BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a7",
@@ -1360,6 +1513,7 @@
},
{
"BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in E state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a7",
@@ -1369,6 +1523,7 @@
},
{
"BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in F state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a7",
@@ -1378,6 +1533,7 @@
},
{
"BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in M state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a7",
@@ -1387,6 +1543,7 @@
},
{
"BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in S state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a7",
@@ -1396,6 +1553,7 @@
},
{
"BriefDescription": "Counts L1 data HW prefetches that accounts for any response",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1405,6 +1563,7 @@
},
{
"BriefDescription": "Counts L1 data HW prefetches that accounts for responses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_FAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1414,6 +1573,7 @@
},
{
"BriefDescription": "Counts L1 data HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_FAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -1423,6 +1583,7 @@
},
{
"BriefDescription": "Counts L1 data HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -1432,6 +1593,7 @@
},
{
"BriefDescription": "Counts L1 data HW prefetches that accounts for responses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_NEAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1441,6 +1603,7 @@
},
{
"BriefDescription": "Counts L1 data HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -1450,6 +1613,7 @@
},
{
"BriefDescription": "Counts L1 data HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -1459,6 +1623,7 @@
},
{
"BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in E state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
@@ -1468,6 +1633,7 @@
},
{
"BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in F state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -1477,6 +1643,7 @@
},
{
"BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in M state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -1486,6 +1653,7 @@
},
{
"BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in S state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
@@ -1495,6 +1663,7 @@
},
{
"BriefDescription": "Counts L1 data HW prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -1504,6 +1673,7 @@
},
{
"BriefDescription": "Counts L2 code HW prefetches that accounts for any response",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1513,6 +1683,7 @@
},
{
"BriefDescription": "Counts L2 code HW prefetches that accounts for responses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_FAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1522,6 +1693,7 @@
},
{
"BriefDescription": "Counts L2 code HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_FAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -1531,6 +1703,7 @@
},
{
"BriefDescription": "Counts L2 code HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -1540,6 +1713,7 @@
},
{
"BriefDescription": "Counts L2 code HW prefetches that accounts for responses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_NEAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1549,6 +1723,7 @@
},
{
"BriefDescription": "Counts L2 code HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -1558,6 +1733,7 @@
},
{
"BriefDescription": "Counts L2 code HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -1567,6 +1743,7 @@
},
{
"BriefDescription": "Counts L2 code HW prefetches that accounts for responses which hit its own tile's L2 with data in E state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
@@ -1576,6 +1753,7 @@
},
{
"BriefDescription": "Counts L2 code HW prefetches that accounts for responses which hit its own tile's L2 with data in F state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -1585,6 +1763,7 @@
},
{
"BriefDescription": "Counts L2 code HW prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -1594,6 +1773,7 @@
},
{
"BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for any response",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1603,6 +1783,7 @@
},
{
"BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_FAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -1612,6 +1793,7 @@
},
{
"BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -1621,6 +1803,7 @@
},
{
"BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_NEAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1630,6 +1813,7 @@
},
{
"BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -1639,6 +1823,7 @@
},
{
"BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -1648,6 +1833,7 @@
},
{
"BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in E state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
@@ -1657,6 +1843,7 @@
},
{
"BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in F state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -1666,6 +1853,7 @@
},
{
"BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in M state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -1675,6 +1863,7 @@
},
{
"BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in S state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
@@ -1684,6 +1873,7 @@
},
{
"BriefDescription": "Counts Software Prefetches that accounts for any response",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1693,6 +1883,7 @@
},
{
"BriefDescription": "Counts Software Prefetches that accounts for responses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_FAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1702,6 +1893,7 @@
},
{
"BriefDescription": "Counts Software Prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_FAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -1711,6 +1903,7 @@
},
{
"BriefDescription": "Counts Software Prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -1720,6 +1913,7 @@
},
{
"BriefDescription": "Counts Software Prefetches that accounts for responses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_NEAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1729,6 +1923,7 @@
},
{
"BriefDescription": "Counts Software Prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -1738,6 +1933,7 @@
},
{
"BriefDescription": "Counts Software Prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -1747,6 +1943,7 @@
},
{
"BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in E state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
@@ -1756,6 +1953,7 @@
},
{
"BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in F state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -1765,6 +1963,7 @@
},
{
"BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in M state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -1774,6 +1973,7 @@
},
{
"BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in S state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
@@ -1783,6 +1983,7 @@
},
{
"BriefDescription": "Counts Software Prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -1792,6 +1993,7 @@
},
{
"BriefDescription": "Counts all streaming stores (WC and should be programmed on PMC1) that accounts for any response",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
"MSRIndex": "0x1a7",
@@ -1801,6 +2003,7 @@
},
{
"BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for any response",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1810,6 +2013,7 @@
},
{
"BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_FAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -1819,6 +2023,7 @@
},
{
"BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -1828,6 +2033,7 @@
},
{
"BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_NEAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1837,6 +2043,7 @@
},
{
"BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -1846,6 +2053,7 @@
},
{
"BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -1855,6 +2063,7 @@
},
{
"BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses which hit its own tile's L2 with data in E state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
@@ -1864,6 +2073,7 @@
},
{
"BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses which hit its own tile's L2 with data in F state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
@@ -1873,6 +2083,7 @@
},
{
"BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses which hit its own tile's L2 with data in M state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -1882,6 +2093,7 @@
},
{
"BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses which hit its own tile's L2 with data in S state",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
@@ -1891,6 +2103,7 @@
},
{
"BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.OUTSTANDING",
"MSRIndex": "0x1a6",
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/counter.json b/tools/perf/pmu-events/arch/x86/knightslanding/counter.json
new file mode 100644
index 000000000000..4ce9f30a4fe5
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/counter.json
@@ -0,0 +1,37 @@
+[
+ {
+ "Unit": "core",
+ "CountersNumFixed": "3",
+ "CountersNumGeneric": "2"
+ },
+ {
+ "Unit": "CHA",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "EDC_ECLK",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "EDC_UCLK",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "iMC_DCLK",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "iMC_UCLK",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": 4
+ },
+ {
+ "Unit": "M2PCIe",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/floating-point.json b/tools/perf/pmu-events/arch/x86/knightslanding/floating-point.json
index 089aa3ef345d..29c0ff23957a 100644
--- a/tools/perf/pmu-events/arch/x86/knightslanding/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/floating-point.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of floating operations retired that required microcode assists",
+ "Counter": "0,1",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.FP_ASSIST",
"PublicDescription": "This event counts the number of times that the pipeline stalled due to FP operations needing assists.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Counts the number of packed SSE, AVX, AVX2, AVX-512 micro-ops (both floating point and integer) except for loads (memory-to-register mov-type micro-ops), packed byte and word multiplies.",
+ "Counter": "0,1",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.PACKED_SIMD",
"PublicDescription": "The length of the packed operation (128bits, 256bits or 512bits) is not taken into account when updating the counter; all count the same (+1). \r\nMask (k) registers are ignored. For example: a micro-op operating with a mask that only enables one element or even zero elements will still trigger this counter (+1)\r\nThis event is defined at the micro-op level and not instruction level. Most instructions are implemented with one micro-op but not all.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Counts the number of scalar SSE, AVX, AVX2, AVX-512 micro-ops except for loads (memory-to-register mov-type micro ops), division, sqrt.",
+ "Counter": "0,1",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.SCALAR_SIMD",
"PublicDescription": "This event is defined at the micro-op level and not instruction level. Most instructions are implemented with one micro-op but not all.",
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/frontend.json b/tools/perf/pmu-events/arch/x86/knightslanding/frontend.json
index 9001f5019848..63343a0d1e86 100644
--- a/tools/perf/pmu-events/arch/x86/knightslanding/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/frontend.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of times the front end resteers for any branch as a result of another branch handling mechanism in the front end.",
+ "Counter": "0,1",
"EventCode": "0xE6",
"EventName": "BACLEARS.ALL",
"SampleAfterValue": "200003",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Counts the number of times the front end resteers for conditional branches as a result of another branch handling mechanism in the front end.",
+ "Counter": "0,1",
"EventCode": "0xE6",
"EventName": "BACLEARS.COND",
"SampleAfterValue": "200003",
@@ -15,6 +17,7 @@
},
{
"BriefDescription": "Counts the number of times the front end resteers for RET branches as a result of another branch handling mechanism in the front end.",
+ "Counter": "0,1",
"EventCode": "0xE6",
"EventName": "BACLEARS.RETURN",
"SampleAfterValue": "200003",
@@ -22,6 +25,7 @@
},
{
"BriefDescription": "Counts all instruction fetches, including uncacheable fetches.",
+ "Counter": "0,1",
"EventCode": "0x80",
"EventName": "ICACHE.ACCESSES",
"SampleAfterValue": "200003",
@@ -29,6 +33,7 @@
},
{
"BriefDescription": "Counts all instruction fetches that hit the instruction cache.",
+ "Counter": "0,1",
"EventCode": "0x80",
"EventName": "ICACHE.HIT",
"SampleAfterValue": "200003",
@@ -36,6 +41,7 @@
},
{
"BriefDescription": "Counts all instruction fetches that miss the instruction cache or produce memory requests. An instruction fetch miss is counted only once and not once for every cycle it is outstanding.",
+ "Counter": "0,1",
"EventCode": "0x80",
"EventName": "ICACHE.MISSES",
"SampleAfterValue": "200003",
@@ -43,6 +49,7 @@
},
{
"BriefDescription": "Counts the number of times the MSROM starts a flow of uops.",
+ "Counter": "0,1",
"EventCode": "0xE7",
"EventName": "MS_DECODED.MS_ENTRY",
"SampleAfterValue": "200003",
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/memory.json b/tools/perf/pmu-events/arch/x86/knightslanding/memory.json
index b0361f6f0dd9..7e4518986bb9 100644
--- a/tools/perf/pmu-events/arch/x86/knightslanding/memory.json
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of times the machine clears due to memory ordering hazards",
+ "Counter": "0,1",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
"SampleAfterValue": "200003",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from DDR (local and far)",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.DDR",
"MSRIndex": "0x1a6,0x1a7",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for data responses from DRAM Far.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -26,6 +29,7 @@
},
{
"BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for data responses from DRAM Local.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.DDR_NEAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -35,6 +39,7 @@
},
{
"BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from MCDRAM (local and far)",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.MCDRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -44,6 +49,7 @@
},
{
"BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -53,6 +59,7 @@
},
{
"BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for data responses from MCDRAM Local.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.MCDRAM_NEAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -62,6 +69,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from DDR (local and far)",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.DDR",
"MSRIndex": "0x1a6,0x1a7",
@@ -71,6 +79,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for data responses from DRAM Far.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -80,6 +89,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for data responses from DRAM Local.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.DDR_NEAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -89,6 +99,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from MCDRAM (local and far)",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.MCDRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -98,6 +109,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -107,6 +119,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for data responses from MCDRAM Local.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.MCDRAM_NEAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -116,6 +129,7 @@
},
{
"BriefDescription": "Counts any Prefetch requests that accounts for data responses from DRAM Far.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -125,6 +139,7 @@
},
{
"BriefDescription": "Counts any Prefetch requests that accounts for data responses from DRAM Local.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.DDR_NEAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -134,6 +149,7 @@
},
{
"BriefDescription": "Counts any Prefetch requests that accounts for responses from MCDRAM (local and far)",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.MCDRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -143,6 +159,7 @@
},
{
"BriefDescription": "Counts any Prefetch requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -152,6 +169,7 @@
},
{
"BriefDescription": "Counts any Prefetch requests that accounts for data responses from MCDRAM Local.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.MCDRAM_NEAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -161,6 +179,7 @@
},
{
"BriefDescription": "Counts any Read request that accounts for responses from DDR (local and far)",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_READ.DDR",
"MSRIndex": "0x1a6,0x1a7",
@@ -170,6 +189,7 @@
},
{
"BriefDescription": "Counts any Read request that accounts for data responses from DRAM Far.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_READ.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -179,6 +199,7 @@
},
{
"BriefDescription": "Counts any Read request that accounts for data responses from DRAM Local.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_READ.DDR_NEAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -188,6 +209,7 @@
},
{
"BriefDescription": "Counts any Read request that accounts for responses from MCDRAM (local and far)",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_READ.MCDRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -197,6 +219,7 @@
},
{
"BriefDescription": "Counts any Read request that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_READ.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -206,6 +229,7 @@
},
{
"BriefDescription": "Counts any Read request that accounts for data responses from MCDRAM Local.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_READ.MCDRAM_NEAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -215,6 +239,7 @@
},
{
"BriefDescription": "Counts any request that accounts for responses from DDR (local and far)",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.DDR",
"MSRIndex": "0x1a6,0x1a7",
@@ -224,6 +249,7 @@
},
{
"BriefDescription": "Counts any request that accounts for data responses from DRAM Far.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -233,6 +259,7 @@
},
{
"BriefDescription": "Counts any request that accounts for data responses from DRAM Local.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.DDR_NEAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -242,6 +269,7 @@
},
{
"BriefDescription": "Counts any request that accounts for responses from MCDRAM (local and far)",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.MCDRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -251,6 +279,7 @@
},
{
"BriefDescription": "Counts any request that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -260,6 +289,7 @@
},
{
"BriefDescription": "Counts any request that accounts for data responses from MCDRAM Local.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.MCDRAM_NEAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -269,6 +299,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from DDR (local and far)",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.DDR",
"MSRIndex": "0x1a6,0x1a7",
@@ -278,6 +309,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data write requests that accounts for data responses from DRAM Far.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -287,6 +319,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data write requests that accounts for data responses from DRAM Local.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.DDR_NEAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -296,6 +329,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from MCDRAM (local and far)",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.MCDRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -305,6 +339,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data write requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -314,6 +349,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data write requests that accounts for data responses from MCDRAM Local.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.MCDRAM_NEAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -323,6 +359,7 @@
},
{
"BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from DDR (local and far)",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.DDR",
"MSRIndex": "0x1a6,0x1a7",
@@ -332,6 +369,7 @@
},
{
"BriefDescription": "Counts Bus locks and split lock requests that accounts for data responses from DRAM Far.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -341,6 +379,7 @@
},
{
"BriefDescription": "Counts Bus locks and split lock requests that accounts for data responses from DRAM Local.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.DDR_NEAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -350,6 +389,7 @@
},
{
"BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from MCDRAM (local and far)",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.MCDRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -359,6 +399,7 @@
},
{
"BriefDescription": "Counts Bus locks and split lock requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -368,6 +409,7 @@
},
{
"BriefDescription": "Counts Bus locks and split lock requests that accounts for data responses from MCDRAM Local.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.MCDRAM_NEAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -377,6 +419,7 @@
},
{
"BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from DDR (local and far)",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.DDR",
"MSRIndex": "0x1a6,0x1a7",
@@ -386,6 +429,7 @@
},
{
"BriefDescription": "Counts demand code reads and prefetch code reads that accounts for data responses from DRAM Far.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -395,6 +439,7 @@
},
{
"BriefDescription": "Counts demand code reads and prefetch code reads that accounts for data responses from DRAM Local.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.DDR_NEAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -404,6 +449,7 @@
},
{
"BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from MCDRAM (local and far)",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.MCDRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -413,6 +459,7 @@
},
{
"BriefDescription": "Counts demand code reads and prefetch code reads that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -422,6 +469,7 @@
},
{
"BriefDescription": "Counts demand code reads and prefetch code reads that accounts for data responses from MCDRAM Local.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.MCDRAM_NEAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -431,6 +479,7 @@
},
{
"BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from DDR (local and far)",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.DDR",
"MSRIndex": "0x1a6,0x1a7",
@@ -440,6 +489,7 @@
},
{
"BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for data responses from DRAM Far.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -449,6 +499,7 @@
},
{
"BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for data responses from DRAM Local.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.DDR_NEAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -458,6 +509,7 @@
},
{
"BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from MCDRAM (local and far)",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.MCDRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -467,6 +519,7 @@
},
{
"BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -476,6 +529,7 @@
},
{
"BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for data responses from MCDRAM Local.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.MCDRAM_NEAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -485,6 +539,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data writes that accounts for responses from DDR (local and far)",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.DDR",
"MSRIndex": "0x1a6,0x1a7",
@@ -494,6 +549,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data writes that accounts for data responses from DRAM Far.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -503,6 +559,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data writes that accounts for data responses from DRAM Local.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.DDR_NEAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -512,6 +569,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data writes that accounts for responses from MCDRAM (local and far)",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.MCDRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -521,6 +579,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data writes that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -530,6 +589,7 @@
},
{
"BriefDescription": "Counts Demand cacheable data writes that accounts for data responses from MCDRAM Local.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.MCDRAM_NEAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -539,6 +599,7 @@
},
{
"BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from DDR (local and far)",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.DDR",
"MSRIndex": "0x1a6,0x1a7",
@@ -548,6 +609,7 @@
},
{
"BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for data responses from DRAM Far.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -557,6 +619,7 @@
},
{
"BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for data responses from DRAM Local.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.DDR_NEAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -566,6 +629,7 @@
},
{
"BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from MCDRAM (local and far)",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.MCDRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -575,6 +639,7 @@
},
{
"BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -584,6 +649,7 @@
},
{
"BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for data responses from MCDRAM Local.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.MCDRAM_NEAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -593,6 +659,7 @@
},
{
"BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from any NON_DRAM system address. This includes MMIO transactions",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -602,6 +669,7 @@
},
{
"BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for data responses from DRAM Far.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.DDR_FAR",
"MSRIndex": "0x1a7",
@@ -611,6 +679,7 @@
},
{
"BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for data responses from DRAM Local.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.DDR_NEAR",
"MSRIndex": "0x1a7",
@@ -620,6 +689,7 @@
},
{
"BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from MCDRAM (local and far)",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.MCDRAM",
"MSRIndex": "0x1a7",
@@ -629,6 +699,7 @@
},
{
"BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.MCDRAM_FAR",
"MSRIndex": "0x1a7",
@@ -638,6 +709,7 @@
},
{
"BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for data responses from MCDRAM Local.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.MCDRAM_NEAR",
"MSRIndex": "0x1a7",
@@ -647,6 +719,7 @@
},
{
"BriefDescription": "Counts L1 data HW prefetches that accounts for responses from DDR (local and far)",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.DDR",
"MSRIndex": "0x1a6,0x1a7",
@@ -656,6 +729,7 @@
},
{
"BriefDescription": "Counts L1 data HW prefetches that accounts for data responses from DRAM Far.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -665,6 +739,7 @@
},
{
"BriefDescription": "Counts L1 data HW prefetches that accounts for data responses from DRAM Local.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.DDR_NEAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -674,6 +749,7 @@
},
{
"BriefDescription": "Counts L1 data HW prefetches that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -683,6 +759,7 @@
},
{
"BriefDescription": "Counts L1 data HW prefetches that accounts for data responses from MCDRAM Local.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.MCDRAM_NEAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -692,6 +769,7 @@
},
{
"BriefDescription": "Counts L2 code HW prefetches that accounts for responses from DDR (local and far)",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.DDR",
"MSRIndex": "0x1a6,0x1a7",
@@ -701,6 +779,7 @@
},
{
"BriefDescription": "Counts L2 code HW prefetches that accounts for data responses from DRAM Far.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -710,6 +789,7 @@
},
{
"BriefDescription": "Counts L2 code HW prefetches that accounts for data responses from DRAM Local.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.DDR_NEAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -719,6 +799,7 @@
},
{
"BriefDescription": "Counts L2 code HW prefetches that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -728,6 +809,7 @@
},
{
"BriefDescription": "Counts L2 code HW prefetches that accounts for data responses from MCDRAM Local.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.MCDRAM_NEAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -737,6 +819,7 @@
},
{
"BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from DDR (local and far)",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.DDR",
"MSRIndex": "0x1a6,0x1a7",
@@ -746,6 +829,7 @@
},
{
"BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for data responses from DRAM Far.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -755,6 +839,7 @@
},
{
"BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for data responses from DRAM Local.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.DDR_NEAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -764,6 +849,7 @@
},
{
"BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from MCDRAM (local and far)",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.MCDRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -773,6 +859,7 @@
},
{
"BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -782,6 +869,7 @@
},
{
"BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for data responses from MCDRAM Local.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.MCDRAM_NEAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -791,6 +879,7 @@
},
{
"BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from any NON_DRAM system address. This includes MMIO transactions",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -800,6 +889,7 @@
},
{
"BriefDescription": "Counts Software Prefetches that accounts for responses from DDR (local and far)",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.DDR",
"MSRIndex": "0x1a6,0x1a7",
@@ -809,6 +899,7 @@
},
{
"BriefDescription": "Counts Software Prefetches that accounts for data responses from DRAM Far.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -818,6 +909,7 @@
},
{
"BriefDescription": "Counts Software Prefetches that accounts for data responses from DRAM Local.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.DDR_NEAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -827,6 +919,7 @@
},
{
"BriefDescription": "Counts Software Prefetches that accounts for responses from MCDRAM (local and far)",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.MCDRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -836,6 +929,7 @@
},
{
"BriefDescription": "Counts Software Prefetches that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -845,6 +939,7 @@
},
{
"BriefDescription": "Counts Software Prefetches that accounts for data responses from MCDRAM Local.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.MCDRAM_NEAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -854,6 +949,7 @@
},
{
"BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from DDR (local and far)",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.DDR",
"MSRIndex": "0x1a6,0x1a7",
@@ -863,6 +959,7 @@
},
{
"BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for data responses from DRAM Far.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -872,6 +969,7 @@
},
{
"BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for data responses from DRAM Local.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.DDR_NEAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -881,6 +979,7 @@
},
{
"BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from MCDRAM (local and far)",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.MCDRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -890,6 +989,7 @@
},
{
"BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
@@ -899,6 +999,7 @@
},
{
"BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for data responses from MCDRAM Local.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.MCDRAM_NEAR",
"MSRIndex": "0x1a6,0x1a7",
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json b/tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json
index 5b2e71750976..37d679ed8061 100644
--- a/tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of branch instructions retired (Precise Event)",
+ "Counter": "0,1",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Counts the number of near CALL branch instructions retired. (Precise Event)",
+ "Counter": "0,1",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.CALL",
"PEBS": "1",
@@ -16,6 +18,7 @@
},
{
"BriefDescription": "Counts the number of far branch instructions retired. (Precise Event)",
+ "Counter": "0,1",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"PEBS": "1",
@@ -24,6 +27,7 @@
},
{
"BriefDescription": "Counts the number of near indirect CALL branch instructions retired. (Precise Event)",
+ "Counter": "0,1",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.IND_CALL",
"PEBS": "1",
@@ -32,6 +36,7 @@
},
{
"BriefDescription": "Counts the number of branch instructions retired that were conditional jumps. (Precise Event)",
+ "Counter": "0,1",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.JCC",
"PEBS": "1",
@@ -40,6 +45,7 @@
},
{
"BriefDescription": "Counts the number of branch instructions retired that were near indirect CALL or near indirect JMP. (Precise Event)",
+ "Counter": "0,1",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NON_RETURN_IND",
"PEBS": "1",
@@ -48,6 +54,7 @@
},
{
"BriefDescription": "Counts the number of near relative CALL branch instructions retired. (Precise Event)",
+ "Counter": "0,1",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.REL_CALL",
"PEBS": "1",
@@ -56,6 +63,7 @@
},
{
"BriefDescription": "Counts the number of near RET branch instructions retired. (Precise Event)",
+ "Counter": "0,1",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.RETURN",
"PEBS": "1",
@@ -64,6 +72,7 @@
},
{
"BriefDescription": "Counts the number of branch instructions retired that were conditional jumps and predicted taken. (Precise Event)",
+ "Counter": "0,1",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.TAKEN_JCC",
"PEBS": "1",
@@ -72,6 +81,7 @@
},
{
"BriefDescription": "Counts the number of mispredicted branch instructions retired (Precise Event)",
+ "Counter": "0,1",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -79,6 +89,7 @@
},
{
"BriefDescription": "Counts the number of mispredicted near CALL branch instructions retired. (Precise Event)",
+ "Counter": "0,1",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.CALL",
"PEBS": "1",
@@ -87,6 +98,7 @@
},
{
"BriefDescription": "Counts the number of mispredicted far branch instructions retired. (Precise Event)",
+ "Counter": "0,1",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.FAR_BRANCH",
"PEBS": "1",
@@ -95,6 +107,7 @@
},
{
"BriefDescription": "Counts the number of mispredicted near indirect CALL branch instructions retired. (Precise Event)",
+ "Counter": "0,1",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.IND_CALL",
"PEBS": "1",
@@ -103,6 +116,7 @@
},
{
"BriefDescription": "Counts the number of mispredicted branch instructions retired that were conditional jumps. (Precise Event)",
+ "Counter": "0,1",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.JCC",
"PEBS": "1",
@@ -111,6 +125,7 @@
},
{
"BriefDescription": "Counts the number of mispredicted branch instructions retired that were near indirect CALL or near indirect JMP. (Precise Event)",
+ "Counter": "0,1",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.NON_RETURN_IND",
"PEBS": "1",
@@ -119,6 +134,7 @@
},
{
"BriefDescription": "Counts the number of mispredicted near relative CALL branch instructions retired. (Precise Event)",
+ "Counter": "0,1",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.REL_CALL",
"PEBS": "1",
@@ -127,6 +143,7 @@
},
{
"BriefDescription": "Counts the number of mispredicted near RET branch instructions retired. (Precise Event)",
+ "Counter": "0,1",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.RETURN",
"PEBS": "1",
@@ -135,6 +152,7 @@
},
{
"BriefDescription": "Counts the number of mispredicted branch instructions retired that were conditional jumps and predicted taken. (Precise Event)",
+ "Counter": "0,1",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.TAKEN_JCC",
"PEBS": "1",
@@ -143,6 +161,7 @@
},
{
"BriefDescription": "Counts the number of unhalted reference clock cycles",
+ "Counter": "0,1",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.REF",
"SampleAfterValue": "2000003",
@@ -150,12 +169,14 @@
},
{
"BriefDescription": "Fixed Counter: Counts the number of unhalted reference clock cycles",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"SampleAfterValue": "2000003",
"UMask": "0x3"
},
{
"BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter",
"SampleAfterValue": "2000003",
@@ -163,12 +184,14 @@
},
{
"BriefDescription": "Counts the number of unhalted core clock cycles",
+ "Counter": "0,1",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
"SampleAfterValue": "2000003"
},
{
"BriefDescription": "Cycles the number of core cycles when divider is busy. Does not imply a stall waiting for the divider.",
+ "Counter": "0,1",
"EventCode": "0xCD",
"EventName": "CYCLES_DIV_BUSY.ALL",
"PublicDescription": "This event counts cycles when the divider is busy. More specifically cycles when the divide unit is unable to accept a new divide uop because it is busy processing a previously dispatched uop. The cycles will be counted irrespective of whether or not another divide uop is waiting to enter the divide unit (from the RS). This event counts integer divides, x87 divides, divss, divsd, sqrtss, sqrtsd event and does not count vector divides.",
@@ -177,6 +200,7 @@
},
{
"BriefDescription": "Fixed Counter: Counts the number of instructions retired",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PublicDescription": "This event counts the number of instructions that retire. For instructions that consist of multiple micro-ops, this event counts exactly once, as the last micro-op of the instruction retires. The event continues counting while instructions retire, including during interrupt service routines caused by hardware interrupts, faults or traps.",
"SampleAfterValue": "2000003",
@@ -184,12 +208,14 @@
},
{
"BriefDescription": "Counts the total number of instructions retired",
+ "Counter": "0,1",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.ANY_P",
"SampleAfterValue": "2000003"
},
{
"BriefDescription": "Counts the number of instructions retired (Precise Event)",
+ "Counter": "0",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.ANY_PS",
"PEBS": "2",
@@ -197,6 +223,7 @@
},
{
"BriefDescription": "Counts all machine clears",
+ "Counter": "0,1",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.ALL",
"SampleAfterValue": "200003",
@@ -204,6 +231,7 @@
},
{
"BriefDescription": "Counts the number of times that the machine clears due to program modifying data within 1K of a recently fetched code page",
+ "Counter": "0,1",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.SMC",
"SampleAfterValue": "200003",
@@ -211,6 +239,7 @@
},
{
"BriefDescription": "Counts the total number of core cycles when no micro-ops are allocated for any reason.",
+ "Counter": "0,1",
"EventCode": "0xCA",
"EventName": "NO_ALLOC_CYCLES.ALL",
"SampleAfterValue": "200003",
@@ -218,6 +247,7 @@
},
{
"BriefDescription": "Counts the number of core cycles when no micro-ops are allocated and the alloc pipe is stalled waiting for a mispredicted branch to retire.",
+ "Counter": "0,1",
"EventCode": "0xCA",
"EventName": "NO_ALLOC_CYCLES.MISPREDICTS",
"PublicDescription": "This event counts the number of core cycles when no uops are allocated and the alloc pipe is stalled waiting for a mispredicted branch to retire.",
@@ -226,6 +256,7 @@
},
{
"BriefDescription": "Counts the number of core cycles when no micro-ops are allocated, the IQ is empty, and no other condition is blocking allocation.",
+ "Counter": "0,1",
"EventCode": "0xCA",
"EventName": "NO_ALLOC_CYCLES.NOT_DELIVERED",
"PublicDescription": "This event counts the number of core cycles when no uops are allocated, the instruction queue is empty and the alloc pipe is stalled waiting for instructions to be fetched.",
@@ -234,6 +265,7 @@
},
{
"BriefDescription": "Counts the number of core cycles when no micro-ops are allocated and a RATstall (caused by reservation station full) is asserted.",
+ "Counter": "0,1",
"EventCode": "0xCA",
"EventName": "NO_ALLOC_CYCLES.RAT_STALL",
"SampleAfterValue": "200003",
@@ -241,6 +273,7 @@
},
{
"BriefDescription": "Counts the number of core cycles when no micro-ops are allocated and the ROB is full",
+ "Counter": "0,1",
"EventCode": "0xCA",
"EventName": "NO_ALLOC_CYCLES.ROB_FULL",
"SampleAfterValue": "200003",
@@ -248,6 +281,7 @@
},
{
"BriefDescription": "Counts any retired load that was pushed into the recycle queue for any reason.",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "RECYCLEQ.ANY_LD",
"SampleAfterValue": "200003",
@@ -255,6 +289,7 @@
},
{
"BriefDescription": "Counts any retired store that was pushed into the recycle queue for any reason.",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "RECYCLEQ.ANY_ST",
"SampleAfterValue": "200003",
@@ -262,6 +297,7 @@
},
{
"BriefDescription": "Counts the number of occurrences a retired load gets blocked because its address overlaps with a store whose data is not ready",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "RECYCLEQ.LD_BLOCK_STD_NOTREADY",
"SampleAfterValue": "200003",
@@ -269,6 +305,7 @@
},
{
"BriefDescription": "Counts the number of occurrences a retired load gets blocked because its address partially overlaps with a store (Precise Event)",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0x03",
"EventName": "RECYCLEQ.LD_BLOCK_ST_FORWARD",
@@ -279,6 +316,7 @@
},
{
"BriefDescription": "Counts the number of occurrences a retired load was pushed into the rehab queue because it sees a cache line split. Each split should be counted only once. (Precise Event)",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0x03",
"EventName": "RECYCLEQ.LD_SPLITS",
@@ -289,6 +327,7 @@
},
{
"BriefDescription": "Counts all the retired locked loads. It does not include stores because we would double count if we count stores",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "RECYCLEQ.LOCK",
"SampleAfterValue": "200003",
@@ -296,6 +335,7 @@
},
{
"BriefDescription": "Counts the store micro-ops retired that were pushed in the rehab queue because the store address buffer is full",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "RECYCLEQ.STA_FULL",
"SampleAfterValue": "200003",
@@ -303,6 +343,7 @@
},
{
"BriefDescription": "Counts the number of occurrences a retired store that is a cache line split. Each split should be counted only once.",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "RECYCLEQ.ST_SPLITS",
"PublicDescription": "This event counts the number of retired store that experienced a cache line boundary split(Precise Event). Note that each spilt should be counted only once.",
@@ -311,6 +352,7 @@
},
{
"BriefDescription": "Counts the total number of core cycles allocation pipeline is stalled when any one of the reservation stations is full.",
+ "Counter": "0,1",
"EventCode": "0xCB",
"EventName": "RS_FULL_STALL.ALL",
"SampleAfterValue": "200003",
@@ -318,6 +360,7 @@
},
{
"BriefDescription": "Counts the number of core cycles when allocation pipeline is stalled and is waiting for a free MEC reservation station entry.",
+ "Counter": "0,1",
"EventCode": "0xCB",
"EventName": "RS_FULL_STALL.MEC",
"SampleAfterValue": "200003",
@@ -325,6 +368,7 @@
},
{
"BriefDescription": "Counts the number of micro-ops retired",
+ "Counter": "0,1",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.ALL",
"PublicDescription": "This event counts the number of micro-ops (uops) retired. The processor decodes complex macro instructions into a sequence of simpler uops. Most instructions are composed of one or two uops. Some instructions are decoded into longer sequences such as repeat instructions, floating point transcendental instructions, and assists.",
@@ -333,6 +377,7 @@
},
{
"BriefDescription": "Counts the number of micro-ops retired that are from the complex flows issued by the micro-sequencer (MS).",
+ "Counter": "0,1",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.MS",
"PublicDescription": "This event counts the number of micro-ops retired that were supplied from MSROM.",
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/uncore-cache.json b/tools/perf/pmu-events/arch/x86/knightslanding/uncore-cache.json
index 120e4813d82a..1550b6457965 100644
--- a/tools/perf/pmu-events/arch/x86/knightslanding/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/uncore-cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent -IPQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.IPQ_HIT",
"PerPkg": "1",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent -IPQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.IPQ_MISS",
"PerPkg": "1",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent -IRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.IRQ_HIT",
"PerPkg": "1",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent -IRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.IRQ_MISS",
"PerPkg": "1",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent -IRQ or PRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.LOC_ALL",
"PerPkg": "1",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent -PRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.PRQ_HIT",
"PerPkg": "1",
@@ -49,6 +55,7 @@
},
{
"BriefDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent -PRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.PRQ_MISS",
"PerPkg": "1",
@@ -57,6 +64,7 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR0",
"PerPkg": "1",
@@ -65,6 +73,7 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR1",
"PerPkg": "1",
@@ -73,6 +82,7 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR2",
"PerPkg": "1",
@@ -81,6 +91,7 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR3",
"PerPkg": "1",
@@ -89,6 +100,7 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR4",
"PerPkg": "1",
@@ -97,6 +109,7 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR5",
"PerPkg": "1",
@@ -105,6 +118,7 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR6",
"PerPkg": "1",
@@ -113,6 +127,7 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR7",
"PerPkg": "1",
@@ -121,6 +136,7 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired For Transgress 0-7",
+ "Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "UNC_H_AG0_AD_CRD_ACQUIRED_EXT.ANY_OF_TGR0_THRU_TGR7",
"PerPkg": "1",
@@ -129,6 +145,7 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "UNC_H_AG0_AD_CRD_ACQUIRED_EXT.TGR8",
"PerPkg": "1",
@@ -137,6 +154,7 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR0",
"PerPkg": "1",
@@ -145,6 +163,7 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR1",
"PerPkg": "1",
@@ -153,6 +172,7 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR2",
"PerPkg": "1",
@@ -161,6 +181,7 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR3",
"PerPkg": "1",
@@ -169,6 +190,7 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR4",
"PerPkg": "1",
@@ -177,6 +199,7 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR5",
"PerPkg": "1",
@@ -185,6 +208,7 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR6",
"PerPkg": "1",
@@ -193,6 +217,7 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR7",
"PerPkg": "1",
@@ -201,6 +226,7 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy For Transgress 0-7",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY_EXT.ANY_OF_TGR0_THRU_TGR7",
"PerPkg": "1",
@@ -209,6 +235,7 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY_EXT.TGR8",
"PerPkg": "1",
@@ -217,6 +244,7 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR0",
"PerPkg": "1",
@@ -225,6 +253,7 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR1",
"PerPkg": "1",
@@ -233,6 +262,7 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR2",
"PerPkg": "1",
@@ -241,6 +271,7 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR3",
"PerPkg": "1",
@@ -249,6 +280,7 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR4",
"PerPkg": "1",
@@ -257,6 +289,7 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR5",
"PerPkg": "1",
@@ -265,6 +298,7 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR6",
"PerPkg": "1",
@@ -273,6 +307,7 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR7",
"PerPkg": "1",
@@ -281,6 +316,7 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired For Transgress 0-7",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "UNC_H_AG0_BL_CRD_ACQUIRED_EXT.ANY_OF_TGR0_THRU_TGR7",
"PerPkg": "1",
@@ -289,6 +325,7 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "UNC_H_AG0_BL_CRD_ACQUIRED_EXT.TGR8",
"PerPkg": "1",
@@ -297,6 +334,7 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR0",
"PerPkg": "1",
@@ -305,6 +343,7 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR1",
"PerPkg": "1",
@@ -313,6 +352,7 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR2",
"PerPkg": "1",
@@ -321,6 +361,7 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR3",
"PerPkg": "1",
@@ -329,6 +370,7 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR4",
"PerPkg": "1",
@@ -337,6 +379,7 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR5",
"PerPkg": "1",
@@ -345,6 +388,7 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR6",
"PerPkg": "1",
@@ -353,6 +397,7 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR7",
"PerPkg": "1",
@@ -361,6 +406,7 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy For Transgress 0-7",
+ "Counter": "0,1,2,3",
"EventCode": "0x8B",
"EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY_EXT.ANY_OF_TGR0_THRU_TGR7",
"PerPkg": "1",
@@ -369,6 +415,7 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x8B",
"EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY_EXT.TGR8",
"PerPkg": "1",
@@ -377,6 +424,7 @@
},
{
"BriefDescription": "Stall on No AD Transgress Credits For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_AD.TGR0",
"PerPkg": "1",
@@ -385,6 +433,7 @@
},
{
"BriefDescription": "Stall on No AD Transgress Credits For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_AD.TGR1",
"PerPkg": "1",
@@ -393,6 +442,7 @@
},
{
"BriefDescription": "Stall on No AD Transgress Credits For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_AD.TGR2",
"PerPkg": "1",
@@ -401,6 +451,7 @@
},
{
"BriefDescription": "Stall on No AD Transgress Credits For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_AD.TGR3",
"PerPkg": "1",
@@ -409,6 +460,7 @@
},
{
"BriefDescription": "Stall on No AD Transgress Credits For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_AD.TGR4",
"PerPkg": "1",
@@ -417,6 +469,7 @@
},
{
"BriefDescription": "Stall on No AD Transgress Credits For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_AD.TGR5",
"PerPkg": "1",
@@ -425,6 +478,7 @@
},
{
"BriefDescription": "Stall on No AD Transgress Credits For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_AD.TGR6",
"PerPkg": "1",
@@ -433,6 +487,7 @@
},
{
"BriefDescription": "Stall on No AD Transgress Credits For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_AD.TGR7",
"PerPkg": "1",
@@ -441,6 +496,7 @@
},
{
"BriefDescription": "Stall on No AD Transgress Credits For Transgress 0-7",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_AD_EXT.ANY_OF_TGR0_THRU_TGR7",
"PerPkg": "1",
@@ -449,6 +505,7 @@
},
{
"BriefDescription": "Stall on No AD Transgress Credits For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_AD_EXT.TGR8",
"PerPkg": "1",
@@ -457,6 +514,7 @@
},
{
"BriefDescription": "Stall on No AD Transgress Credits For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_BL.TGR0",
"PerPkg": "1",
@@ -465,6 +523,7 @@
},
{
"BriefDescription": "Stall on No AD Transgress Credits For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_BL.TGR1",
"PerPkg": "1",
@@ -473,6 +532,7 @@
},
{
"BriefDescription": "Stall on No AD Transgress Credits For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_BL.TGR2",
"PerPkg": "1",
@@ -481,6 +541,7 @@
},
{
"BriefDescription": "Stall on No AD Transgress Credits For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_BL.TGR3",
"PerPkg": "1",
@@ -489,6 +550,7 @@
},
{
"BriefDescription": "Stall on No AD Transgress Credits For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_BL.TGR4",
"PerPkg": "1",
@@ -497,6 +559,7 @@
},
{
"BriefDescription": "Stall on No AD Transgress Credits For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_BL.TGR5",
"PerPkg": "1",
@@ -505,6 +568,7 @@
},
{
"BriefDescription": "Stall on No AD Transgress Credits For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_BL.TGR6",
"PerPkg": "1",
@@ -513,6 +577,7 @@
},
{
"BriefDescription": "Stall on No AD Transgress Credits For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_BL.TGR7",
"PerPkg": "1",
@@ -521,6 +586,7 @@
},
{
"BriefDescription": "Stall on No AD Transgress Credits For Transgress 0-7",
+ "Counter": "0,1,2,3",
"EventCode": "0xD5",
"EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_BL_EXT.ANY_OF_TGR0_THRU_TGR7",
"PerPkg": "1",
@@ -529,6 +595,7 @@
},
{
"BriefDescription": "Stall on No AD Transgress Credits For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xD5",
"EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_BL_EXT.TGR8",
"PerPkg": "1",
@@ -537,6 +604,7 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR0",
"PerPkg": "1",
@@ -545,6 +613,7 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR1",
"PerPkg": "1",
@@ -553,6 +622,7 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR2",
"PerPkg": "1",
@@ -561,6 +631,7 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR3",
"PerPkg": "1",
@@ -569,6 +640,7 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR4",
"PerPkg": "1",
@@ -577,6 +649,7 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR5",
"PerPkg": "1",
@@ -585,6 +658,7 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR6",
"PerPkg": "1",
@@ -593,6 +667,7 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR7",
"PerPkg": "1",
@@ -601,6 +676,7 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired For Transgress 0-7",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_H_AG1_AD_CRD_ACQUIRED_EXT.ANY_OF_TGR0_THRU_TGR7",
"PerPkg": "1",
@@ -609,6 +685,7 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_H_AG1_AD_CRD_ACQUIRED_EXT.TGR8",
"PerPkg": "1",
@@ -617,6 +694,7 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR0",
"PerPkg": "1",
@@ -625,6 +703,7 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR1",
"PerPkg": "1",
@@ -633,6 +712,7 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR2",
"PerPkg": "1",
@@ -641,6 +721,7 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR3",
"PerPkg": "1",
@@ -649,6 +730,7 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR4",
"PerPkg": "1",
@@ -657,6 +739,7 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR5",
"PerPkg": "1",
@@ -665,6 +748,7 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR6",
"PerPkg": "1",
@@ -673,6 +757,7 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR7",
"PerPkg": "1",
@@ -681,6 +766,7 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy For Transgress 0-7",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY_EXT.ANY_OF_TGR0_THRU_TGR7",
"PerPkg": "1",
@@ -689,6 +775,7 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY_EXT.TGR8",
"PerPkg": "1",
@@ -697,6 +784,7 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_H_AG1_BL_CRD_ACQUIRED.TGR0",
"PerPkg": "1",
@@ -705,6 +793,7 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_H_AG1_BL_CRD_ACQUIRED.TGR1",
"PerPkg": "1",
@@ -713,6 +802,7 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_H_AG1_BL_CRD_ACQUIRED.TGR2",
"PerPkg": "1",
@@ -721,6 +811,7 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_H_AG1_BL_CRD_ACQUIRED.TGR3",
"PerPkg": "1",
@@ -729,6 +820,7 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_H_AG1_BL_CRD_ACQUIRED.TGR4",
"PerPkg": "1",
@@ -737,6 +829,7 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_H_AG1_BL_CRD_ACQUIRED.TGR5",
"PerPkg": "1",
@@ -745,6 +838,7 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_H_AG1_BL_CRD_ACQUIRED.TGR6",
"PerPkg": "1",
@@ -753,6 +847,7 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_H_AG1_BL_CRD_ACQUIRED.TGR7",
"PerPkg": "1",
@@ -761,6 +856,7 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired For Transgress 0-7",
+ "Counter": "0,1,2,3",
"EventCode": "0x8D",
"EventName": "UNC_H_AG1_BL_CRD_ACQUIRED_EXT.ANY_OF_TGR0_THRU_TGR7",
"PerPkg": "1",
@@ -769,6 +865,7 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x8D",
"EventName": "UNC_H_AG1_BL_CRD_ACQUIRED_EXT.TGR8",
"PerPkg": "1",
@@ -777,6 +874,7 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR0",
"PerPkg": "1",
@@ -785,6 +883,7 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR1",
"PerPkg": "1",
@@ -793,6 +892,7 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR2",
"PerPkg": "1",
@@ -801,6 +901,7 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR3",
"PerPkg": "1",
@@ -809,6 +910,7 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR4",
"PerPkg": "1",
@@ -817,6 +919,7 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR5",
"PerPkg": "1",
@@ -825,6 +928,7 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR6",
"PerPkg": "1",
@@ -833,6 +937,7 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR7",
"PerPkg": "1",
@@ -841,6 +946,7 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy For Transgress 0-7",
+ "Counter": "0,1,2,3",
"EventCode": "0x8F",
"EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY_EXT.ANY_OF_TGR0_THRU_TGR7",
"PerPkg": "1",
@@ -849,6 +955,7 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x8F",
"EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY_EXT.TGR8",
"PerPkg": "1",
@@ -857,6 +964,7 @@
},
{
"BriefDescription": "Stall on No AD Transgress Credits For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_AD.TGR0",
"PerPkg": "1",
@@ -865,6 +973,7 @@
},
{
"BriefDescription": "Stall on No AD Transgress Credits For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_AD.TGR1",
"PerPkg": "1",
@@ -873,6 +982,7 @@
},
{
"BriefDescription": "Stall on No AD Transgress Credits For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_AD.TGR2",
"PerPkg": "1",
@@ -881,6 +991,7 @@
},
{
"BriefDescription": "Stall on No AD Transgress Credits For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_AD.TGR3",
"PerPkg": "1",
@@ -889,6 +1000,7 @@
},
{
"BriefDescription": "Stall on No AD Transgress Credits For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_AD.TGR4",
"PerPkg": "1",
@@ -897,6 +1009,7 @@
},
{
"BriefDescription": "Stall on No AD Transgress Credits For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_AD.TGR5",
"PerPkg": "1",
@@ -905,6 +1018,7 @@
},
{
"BriefDescription": "Stall on No AD Transgress Credits For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_AD.TGR6",
"PerPkg": "1",
@@ -913,6 +1027,7 @@
},
{
"BriefDescription": "Stall on No AD Transgress Credits For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_AD.TGR7",
"PerPkg": "1",
@@ -921,6 +1036,7 @@
},
{
"BriefDescription": "Stall on No AD Transgress Credits For Transgress 0-7",
+ "Counter": "0,1,2,3",
"EventCode": "0xD3",
"EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_AD_EXT.ANY_OF_TGR0_THRU_TGR7",
"PerPkg": "1",
@@ -929,6 +1045,7 @@
},
{
"BriefDescription": "Stall on No AD Transgress Credits For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xD3",
"EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_AD_EXT.TGR8",
"PerPkg": "1",
@@ -937,6 +1054,7 @@
},
{
"BriefDescription": "Stall on No AD Transgress Credits For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_BL.TGR0",
"PerPkg": "1",
@@ -945,6 +1063,7 @@
},
{
"BriefDescription": "Stall on No AD Transgress Credits For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_BL.TGR1",
"PerPkg": "1",
@@ -953,6 +1072,7 @@
},
{
"BriefDescription": "Stall on No AD Transgress Credits For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_BL.TGR2",
"PerPkg": "1",
@@ -961,6 +1081,7 @@
},
{
"BriefDescription": "Stall on No AD Transgress Credits For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_BL.TGR3",
"PerPkg": "1",
@@ -969,6 +1090,7 @@
},
{
"BriefDescription": "Stall on No AD Transgress Credits For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_BL.TGR4",
"PerPkg": "1",
@@ -977,6 +1099,7 @@
},
{
"BriefDescription": "Stall on No AD Transgress Credits For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_BL.TGR5",
"PerPkg": "1",
@@ -985,6 +1108,7 @@
},
{
"BriefDescription": "Stall on No AD Transgress Credits For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_BL.TGR6",
"PerPkg": "1",
@@ -993,6 +1117,7 @@
},
{
"BriefDescription": "Stall on No AD Transgress Credits For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_BL.TGR7",
"PerPkg": "1",
@@ -1001,6 +1126,7 @@
},
{
"BriefDescription": "Stall on No AD Transgress Credits For Transgress 0-7",
+ "Counter": "0,1,2,3",
"EventCode": "0xD7",
"EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_BL_EXT.ANY_OF_TGR0_THRU_TGR7",
"PerPkg": "1",
@@ -1009,6 +1135,7 @@
},
{
"BriefDescription": "Stall on No AD Transgress Credits For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xD7",
"EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_BL_EXT.TGR8",
"PerPkg": "1",
@@ -1017,6 +1144,7 @@
},
{
"BriefDescription": "Cache Lookups. Counts the number of times the LLC was accessed. Writeback transactions from L2 to the LLC This includes all write transactions -- both Cacheable and UC.",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_H_CACHE_LINES_VICTIMIZED.E_STATE",
"PerPkg": "1",
@@ -1025,6 +1153,7 @@
},
{
"BriefDescription": "Cache Lookups. Counts the number of times the LLC was accessed. Filters for any transaction originating from the IPQ or IRQ. This does not include lookups originating from the ISMQ.",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_H_CACHE_LINES_VICTIMIZED.F_STATE",
"PerPkg": "1",
@@ -1033,6 +1162,7 @@
},
{
"BriefDescription": "Lines Victimized that Match NID",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_H_CACHE_LINES_VICTIMIZED.LOCAL",
"PerPkg": "1",
@@ -1041,6 +1171,7 @@
},
{
"BriefDescription": "Cache Lookups. Counts the number of times the LLC was accessed. Read transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_H_CACHE_LINES_VICTIMIZED.M_STATE",
"PerPkg": "1",
@@ -1049,6 +1180,7 @@
},
{
"BriefDescription": "Lines Victimized that Does Not Match NID",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_H_CACHE_LINES_VICTIMIZED.REMOTE",
"PerPkg": "1",
@@ -1057,6 +1189,7 @@
},
{
"BriefDescription": "Cache Lookups. Counts the number of times the LLC was accessed. Filters for only snoop requests coming from the remote socket(s) through the IPQ.",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_H_CACHE_LINES_VICTIMIZED.S_STATE",
"PerPkg": "1",
@@ -1065,6 +1198,7 @@
},
{
"BriefDescription": "Uncore Clocks",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "UNC_H_CLOCK",
"PerPkg": "1",
@@ -1072,6 +1206,7 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_H_EGRESS_HORZ_ADS_USED.AD",
"PerPkg": "1",
@@ -1080,6 +1215,7 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_H_EGRESS_HORZ_ADS_USED.AK",
"PerPkg": "1",
@@ -1088,6 +1224,7 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_H_EGRESS_HORZ_ADS_USED.BL",
"PerPkg": "1",
@@ -1096,6 +1233,7 @@
},
{
"BriefDescription": "CMS Horizontal Egress Bypass. AD ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x9F",
"EventName": "UNC_H_EGRESS_HORZ_BYPASS.AD",
"PerPkg": "1",
@@ -1104,6 +1242,7 @@
},
{
"BriefDescription": "CMS Horizontal Egress Bypass. AK ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x9F",
"EventName": "UNC_H_EGRESS_HORZ_BYPASS.AK",
"PerPkg": "1",
@@ -1112,6 +1251,7 @@
},
{
"BriefDescription": "CMS Horizontal Egress Bypass. BL ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x9F",
"EventName": "UNC_H_EGRESS_HORZ_BYPASS.BL",
"PerPkg": "1",
@@ -1120,6 +1260,7 @@
},
{
"BriefDescription": "CMS Horizontal Egress Bypass. IV ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x9F",
"EventName": "UNC_H_EGRESS_HORZ_BYPASS.IV",
"PerPkg": "1",
@@ -1128,6 +1269,7 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_H_EGRESS_HORZ_CYCLES_FULL.AD",
"PerPkg": "1",
@@ -1136,6 +1278,7 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full AK",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_H_EGRESS_HORZ_CYCLES_FULL.AK",
"PerPkg": "1",
@@ -1144,6 +1287,7 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_H_EGRESS_HORZ_CYCLES_FULL.BL",
"PerPkg": "1",
@@ -1152,6 +1296,7 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_H_EGRESS_HORZ_CYCLES_FULL.IV",
"PerPkg": "1",
@@ -1160,6 +1305,7 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_H_EGRESS_HORZ_CYCLES_NE.AD",
"PerPkg": "1",
@@ -1168,6 +1314,7 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty AK",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_H_EGRESS_HORZ_CYCLES_NE.AK",
"PerPkg": "1",
@@ -1176,6 +1323,7 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_H_EGRESS_HORZ_CYCLES_NE.BL",
"PerPkg": "1",
@@ -1184,6 +1332,7 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_H_EGRESS_HORZ_CYCLES_NE.IV",
"PerPkg": "1",
@@ -1192,6 +1341,7 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_H_EGRESS_HORZ_INSERTS.AD",
"PerPkg": "1",
@@ -1200,6 +1350,7 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts AK",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_H_EGRESS_HORZ_INSERTS.AK",
"PerPkg": "1",
@@ -1208,6 +1359,7 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_H_EGRESS_HORZ_INSERTS.BL",
"PerPkg": "1",
@@ -1216,6 +1368,7 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_H_EGRESS_HORZ_INSERTS.IV",
"PerPkg": "1",
@@ -1224,6 +1377,7 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_H_EGRESS_HORZ_NACK.AD",
"PerPkg": "1",
@@ -1232,6 +1386,7 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_H_EGRESS_HORZ_NACK.AK",
"PerPkg": "1",
@@ -1240,6 +1395,7 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_H_EGRESS_HORZ_NACK.BL",
"PerPkg": "1",
@@ -1248,6 +1404,7 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_H_EGRESS_HORZ_NACK.IV",
"PerPkg": "1",
@@ -1256,6 +1413,7 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy AD",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_H_EGRESS_HORZ_OCCUPANCY.AD",
"PerPkg": "1",
@@ -1264,6 +1422,7 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy AK",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_H_EGRESS_HORZ_OCCUPANCY.AK",
"PerPkg": "1",
@@ -1272,6 +1431,7 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy BL",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_H_EGRESS_HORZ_OCCUPANCY.BL",
"PerPkg": "1",
@@ -1280,6 +1440,7 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_H_EGRESS_HORZ_OCCUPANCY.IV",
"PerPkg": "1",
@@ -1288,6 +1449,7 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation",
+ "Counter": "0,1,2,3",
"EventCode": "0x9B",
"EventName": "UNC_H_EGRESS_HORZ_STARVED.AD",
"PerPkg": "1",
@@ -1296,6 +1458,7 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation",
+ "Counter": "0,1,2,3",
"EventCode": "0x9B",
"EventName": "UNC_H_EGRESS_HORZ_STARVED.AK",
"PerPkg": "1",
@@ -1304,6 +1467,7 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation",
+ "Counter": "0,1,2,3",
"EventCode": "0x9B",
"EventName": "UNC_H_EGRESS_HORZ_STARVED.BL",
"PerPkg": "1",
@@ -1312,6 +1476,7 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation",
+ "Counter": "0,1,2,3",
"EventCode": "0x9B",
"EventName": "UNC_H_EGRESS_HORZ_STARVED.IV",
"PerPkg": "1",
@@ -1320,6 +1485,7 @@
},
{
"BriefDescription": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "Counter": "0,1,2,3",
"EventCode": "0xAE",
"EventName": "UNC_H_EGRESS_ORDERING.IV_SNP_GO_DN",
"PerPkg": "1",
@@ -1328,6 +1494,7 @@
},
{
"BriefDescription": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "Counter": "0,1,2,3",
"EventCode": "0xAE",
"EventName": "UNC_H_EGRESS_ORDERING.IV_SNP_GO_UP",
"PerPkg": "1",
@@ -1336,6 +1503,7 @@
},
{
"BriefDescription": "CMS Vertical ADS Used",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_H_EGRESS_VERT_ADS_USED.AD_AG0",
"PerPkg": "1",
@@ -1344,6 +1512,7 @@
},
{
"BriefDescription": "CMS Vertical ADS Used",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_H_EGRESS_VERT_ADS_USED.AD_AG1",
"PerPkg": "1",
@@ -1352,6 +1521,7 @@
},
{
"BriefDescription": "CMS Vertical ADS Used",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_H_EGRESS_VERT_ADS_USED.AK_AG0",
"PerPkg": "1",
@@ -1360,6 +1530,7 @@
},
{
"BriefDescription": "CMS Vertical ADS Used",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_H_EGRESS_VERT_ADS_USED.AK_AG1",
"PerPkg": "1",
@@ -1368,6 +1539,7 @@
},
{
"BriefDescription": "CMS Vertical ADS Used",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_H_EGRESS_VERT_ADS_USED.BL_AG0",
"PerPkg": "1",
@@ -1376,6 +1548,7 @@
},
{
"BriefDescription": "CMS Vertical ADS Used",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_H_EGRESS_VERT_ADS_USED.BL_AG1",
"PerPkg": "1",
@@ -1384,6 +1557,7 @@
},
{
"BriefDescription": "CMS Vertical Egress Bypass. AD ring agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_H_EGRESS_VERT_BYPASS.AD_AG0",
"PerPkg": "1",
@@ -1392,6 +1566,7 @@
},
{
"BriefDescription": "CMS Vertical Egress Bypass. AD ring agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_H_EGRESS_VERT_BYPASS.AD_AG1",
"PerPkg": "1",
@@ -1400,6 +1575,7 @@
},
{
"BriefDescription": "CMS Vertical Egress Bypass. AK ring agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_H_EGRESS_VERT_BYPASS.AK_AG0",
"PerPkg": "1",
@@ -1408,6 +1584,7 @@
},
{
"BriefDescription": "CMS Vertical Egress Bypass. AK ring agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_H_EGRESS_VERT_BYPASS.AK_AG1",
"PerPkg": "1",
@@ -1416,6 +1593,7 @@
},
{
"BriefDescription": "CMS Vertical Egress Bypass. BL ring agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_H_EGRESS_VERT_BYPASS.BL_AG0",
"PerPkg": "1",
@@ -1424,6 +1602,7 @@
},
{
"BriefDescription": "CMS Vertical Egress Bypass. BL ring agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_H_EGRESS_VERT_BYPASS.BL_AG1",
"PerPkg": "1",
@@ -1432,6 +1611,7 @@
},
{
"BriefDescription": "CMS Vertical Egress Bypass. IV ring agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_H_EGRESS_VERT_BYPASS.IV",
"PerPkg": "1",
@@ -1440,6 +1620,7 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_H_EGRESS_VERT_CYCLES_FULL.AD_AG0",
"PerPkg": "1",
@@ -1448,6 +1629,7 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_H_EGRESS_VERT_CYCLES_FULL.AD_AG1",
"PerPkg": "1",
@@ -1456,6 +1638,7 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_H_EGRESS_VERT_CYCLES_FULL.AK_AG0",
"PerPkg": "1",
@@ -1464,6 +1647,7 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_H_EGRESS_VERT_CYCLES_FULL.AK_AG1",
"PerPkg": "1",
@@ -1472,6 +1656,7 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_H_EGRESS_VERT_CYCLES_FULL.BL_AG0",
"PerPkg": "1",
@@ -1480,6 +1665,7 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_H_EGRESS_VERT_CYCLES_FULL.BL_AG1",
"PerPkg": "1",
@@ -1488,6 +1674,7 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full IV - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_H_EGRESS_VERT_CYCLES_FULL.IV_AG0",
"PerPkg": "1",
@@ -1496,6 +1683,7 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_H_EGRESS_VERT_CYCLES_NE.AD_AG0",
"PerPkg": "1",
@@ -1504,6 +1692,7 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_H_EGRESS_VERT_CYCLES_NE.AD_AG1",
"PerPkg": "1",
@@ -1512,6 +1701,7 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_H_EGRESS_VERT_CYCLES_NE.AK_AG0",
"PerPkg": "1",
@@ -1520,6 +1710,7 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_H_EGRESS_VERT_CYCLES_NE.AK_AG1",
"PerPkg": "1",
@@ -1528,6 +1719,7 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_H_EGRESS_VERT_CYCLES_NE.BL_AG0",
"PerPkg": "1",
@@ -1536,6 +1728,7 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_H_EGRESS_VERT_CYCLES_NE.BL_AG1",
"PerPkg": "1",
@@ -1544,6 +1737,7 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty IV - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_H_EGRESS_VERT_CYCLES_NE.IV_AG0",
"PerPkg": "1",
@@ -1552,6 +1746,7 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_H_EGRESS_VERT_INSERTS.AD_AG0",
"PerPkg": "1",
@@ -1560,6 +1755,7 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_H_EGRESS_VERT_INSERTS.AD_AG1",
"PerPkg": "1",
@@ -1568,6 +1764,7 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_H_EGRESS_VERT_INSERTS.AK_AG0",
"PerPkg": "1",
@@ -1576,6 +1773,7 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_H_EGRESS_VERT_INSERTS.AK_AG1",
"PerPkg": "1",
@@ -1584,6 +1782,7 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_H_EGRESS_VERT_INSERTS.BL_AG0",
"PerPkg": "1",
@@ -1592,6 +1791,7 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_H_EGRESS_VERT_INSERTS.BL_AG1",
"PerPkg": "1",
@@ -1600,6 +1800,7 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations IV - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_H_EGRESS_VERT_INSERTS.IV_AG0",
"PerPkg": "1",
@@ -1608,6 +1809,7 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_H_EGRESS_VERT_NACK.AD_AG0",
"PerPkg": "1",
@@ -1616,6 +1818,7 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_H_EGRESS_VERT_NACK.AD_AG1",
"PerPkg": "1",
@@ -1624,6 +1827,7 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs Onto AK Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_H_EGRESS_VERT_NACK.AK_AG0",
"PerPkg": "1",
@@ -1632,6 +1836,7 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_H_EGRESS_VERT_NACK.AK_AG1",
"PerPkg": "1",
@@ -1640,6 +1845,7 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs Onto BL Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_H_EGRESS_VERT_NACK.BL_AG0",
"PerPkg": "1",
@@ -1648,6 +1854,7 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_H_EGRESS_VERT_NACK.BL_AG1",
"PerPkg": "1",
@@ -1656,6 +1863,7 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_H_EGRESS_VERT_NACK.IV_AG0",
"PerPkg": "1",
@@ -1664,6 +1872,7 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_H_EGRESS_VERT_OCCUPANCY.AD_AG0",
"PerPkg": "1",
@@ -1672,6 +1881,7 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_H_EGRESS_VERT_OCCUPANCY.AD_AG1",
"PerPkg": "1",
@@ -1680,6 +1890,7 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_H_EGRESS_VERT_OCCUPANCY.AK_AG0",
"PerPkg": "1",
@@ -1688,6 +1899,7 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_H_EGRESS_VERT_OCCUPANCY.AK_AG1",
"PerPkg": "1",
@@ -1696,6 +1908,7 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_H_EGRESS_VERT_OCCUPANCY.BL_AG0",
"PerPkg": "1",
@@ -1704,6 +1917,7 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_H_EGRESS_VERT_OCCUPANCY.BL_AG1",
"PerPkg": "1",
@@ -1712,6 +1926,7 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy IV - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_H_EGRESS_VERT_OCCUPANCY.IV_AG0",
"PerPkg": "1",
@@ -1720,6 +1935,7 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_H_EGRESS_VERT_STARVED.AD_AG0",
"PerPkg": "1",
@@ -1728,6 +1944,7 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_H_EGRESS_VERT_STARVED.AD_AG1",
"PerPkg": "1",
@@ -1736,6 +1953,7 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation Onto AK Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_H_EGRESS_VERT_STARVED.AK_AG0",
"PerPkg": "1",
@@ -1744,6 +1962,7 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_H_EGRESS_VERT_STARVED.AK_AG1",
"PerPkg": "1",
@@ -1752,6 +1971,7 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation Onto BL Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_H_EGRESS_VERT_STARVED.BL_AG0",
"PerPkg": "1",
@@ -1760,6 +1980,7 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_H_EGRESS_VERT_STARVED.BL_AG1",
"PerPkg": "1",
@@ -1768,6 +1989,7 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_H_EGRESS_VERT_STARVED.IV_AG0",
"PerPkg": "1",
@@ -1776,6 +1998,7 @@
},
{
"BriefDescription": "Counts cycles source throttling is asserted - horizontal",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_H_FAST_ASSERTED.HORZ",
"PerPkg": "1",
@@ -1784,6 +2007,7 @@
},
{
"BriefDescription": "Counts cycles source throttling is asserted - vertical",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_H_FAST_ASSERTED.VERT",
"PerPkg": "1",
@@ -1791,6 +2015,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop - Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_H_HORZ_RING_AD_IN_USE.LEFT_EVEN",
"PerPkg": "1",
@@ -1799,6 +2024,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop - Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_H_HORZ_RING_AD_IN_USE.LEFT_ODD",
"PerPkg": "1",
@@ -1807,6 +2033,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop - Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_H_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
"PerPkg": "1",
@@ -1815,6 +2042,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop - Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_H_HORZ_RING_AD_IN_USE.RIGHT_ODD",
"PerPkg": "1",
@@ -1823,6 +2051,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop - Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xA9",
"EventName": "UNC_H_HORZ_RING_AK_IN_USE.LEFT_EVEN",
"PerPkg": "1",
@@ -1831,6 +2060,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop - Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xA9",
"EventName": "UNC_H_HORZ_RING_AK_IN_USE.LEFT_ODD",
"PerPkg": "1",
@@ -1839,6 +2069,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop - Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xA9",
"EventName": "UNC_H_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
"PerPkg": "1",
@@ -1847,6 +2078,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop - Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xA9",
"EventName": "UNC_H_HORZ_RING_AK_IN_USE.RIGHT_ODD",
"PerPkg": "1",
@@ -1855,6 +2087,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop - Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_H_HORZ_RING_BL_IN_USE.LEFT_EVEN",
"PerPkg": "1",
@@ -1863,6 +2096,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop - Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_H_HORZ_RING_BL_IN_USE.LEFT_ODD",
"PerPkg": "1",
@@ -1871,6 +2105,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop - Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_H_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
"PerPkg": "1",
@@ -1879,6 +2114,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop - Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_H_HORZ_RING_BL_IN_USE.RIGHT_ODD",
"PerPkg": "1",
@@ -1887,6 +2123,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop - Left",
+ "Counter": "0,1,2,3",
"EventCode": "0xAD",
"EventName": "UNC_H_HORZ_RING_IV_IN_USE.LEFT",
"PerPkg": "1",
@@ -1895,6 +2132,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop - Right",
+ "Counter": "0,1,2,3",
"EventCode": "0xAD",
"EventName": "UNC_H_HORZ_RING_IV_IN_USE.RIGHT",
"PerPkg": "1",
@@ -1903,6 +2141,7 @@
},
{
"BriefDescription": "Ingress Allocations. Counts number of allocations per cycle into the specified Ingress queue. - IPQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_H_INGRESS_INSERTS.IPQ",
"PerPkg": "1",
@@ -1911,6 +2150,7 @@
},
{
"BriefDescription": "Ingress Allocations. Counts number of allocations per cycle into the specified Ingress queue. - IRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_H_INGRESS_INSERTS.IRQ",
"PerPkg": "1",
@@ -1919,6 +2159,7 @@
},
{
"BriefDescription": "Ingress Allocations. Counts number of allocations per cycle into the specified Ingress queue. - IRQ Rejected",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_H_INGRESS_INSERTS.IRQ_REJ",
"PerPkg": "1",
@@ -1927,6 +2168,7 @@
},
{
"BriefDescription": "Ingress Allocations. Counts number of allocations per cycle into the specified Ingress queue. - PRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_H_INGRESS_INSERTS.PRQ",
"PerPkg": "1",
@@ -1935,6 +2177,7 @@
},
{
"BriefDescription": "Ingress Allocations. Counts number of allocations per cycle into the specified Ingress queue. - PRQ Rejected",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_H_INGRESS_INSERTS.PRQ_REJ",
"PerPkg": "1",
@@ -1943,6 +2186,7 @@
},
{
"BriefDescription": "Cycles with the IPQ in Internal Starvation.",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_H_INGRESS_INT_STARVED.IPQ",
"PerPkg": "1",
@@ -1951,6 +2195,7 @@
},
{
"BriefDescription": "Cycles with the IRQ in Internal Starvation.",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_H_INGRESS_INT_STARVED.IRQ",
"PerPkg": "1",
@@ -1959,6 +2204,7 @@
},
{
"BriefDescription": "Cycles with the ISMQ in Internal Starvation.",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_H_INGRESS_INT_STARVED.ISMQ",
"PerPkg": "1",
@@ -1967,6 +2213,7 @@
},
{
"BriefDescription": "Ingress internal starvation cycles. Counts cycles in internal starvation. This occurs when one or more of the entries in the ingress queue are being starved out by other entries in the queue.",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_H_INGRESS_INT_STARVED.PRQ",
"PerPkg": "1",
@@ -1975,6 +2222,7 @@
},
{
"BriefDescription": "Ingress Occupancy. Counts number of entries in the specified Ingress queue in each cycle. - IPQ",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_H_INGRESS_OCCUPANCY.IPQ",
"PerPkg": "1",
@@ -1983,6 +2231,7 @@
},
{
"BriefDescription": "Ingress Occupancy. Counts number of entries in the specified Ingress queue in each cycle. - IRQ",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_H_INGRESS_OCCUPANCY.IRQ",
"PerPkg": "1",
@@ -1991,6 +2240,7 @@
},
{
"BriefDescription": "Ingress Occupancy. Counts number of entries in the specified Ingress queue in each cycle. - IRQ Rejected",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_H_INGRESS_OCCUPANCY.IRQ_REJ",
"PerPkg": "1",
@@ -1999,6 +2249,7 @@
},
{
"BriefDescription": "Ingress Occupancy. Counts number of entries in the specified Ingress queue in each cycle. - PRQ",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_H_INGRESS_OCCUPANCY.PRQ",
"PerPkg": "1",
@@ -2007,6 +2258,7 @@
},
{
"BriefDescription": "Ingress Occupancy. Counts number of entries in the specified Ingress queue in each cycle. - PRQ Rejected",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_H_INGRESS_OCCUPANCY.PRQ_REJ",
"PerPkg": "1",
@@ -2015,6 +2267,7 @@
},
{
"BriefDescription": "Ingress Probe Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_H_INGRESS_RETRY_IPQ0_REJECT.AD_REQ_VN0",
"PerPkg": "1",
@@ -2023,6 +2276,7 @@
},
{
"BriefDescription": "Ingress Probe Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_H_INGRESS_RETRY_IPQ0_REJECT.AD_RSP_VN0",
"PerPkg": "1",
@@ -2031,6 +2285,7 @@
},
{
"BriefDescription": "Ingress Probe Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_H_INGRESS_RETRY_IPQ0_REJECT.AK_NON_UPI",
"PerPkg": "1",
@@ -2039,6 +2294,7 @@
},
{
"BriefDescription": "Ingress Probe Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_H_INGRESS_RETRY_IPQ0_REJECT.BL_NCB_VN0",
"PerPkg": "1",
@@ -2047,6 +2303,7 @@
},
{
"BriefDescription": "Ingress Probe Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_H_INGRESS_RETRY_IPQ0_REJECT.BL_NCS_VN0",
"PerPkg": "1",
@@ -2055,6 +2312,7 @@
},
{
"BriefDescription": "Ingress Probe Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_H_INGRESS_RETRY_IPQ0_REJECT.BL_RSP_VN0",
"PerPkg": "1",
@@ -2063,6 +2321,7 @@
},
{
"BriefDescription": "Ingress Probe Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_H_INGRESS_RETRY_IPQ0_REJECT.BL_WB_VN0",
"PerPkg": "1",
@@ -2071,6 +2330,7 @@
},
{
"BriefDescription": "Ingress Probe Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_H_INGRESS_RETRY_IPQ0_REJECT.IV_NON_UPI",
"PerPkg": "1",
@@ -2079,6 +2339,7 @@
},
{
"BriefDescription": "Ingress Probe Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_H_INGRESS_RETRY_IPQ1_REJECT.ALLOW_SNP",
"PerPkg": "1",
@@ -2087,6 +2348,7 @@
},
{
"BriefDescription": "Ingress Probe Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_H_INGRESS_RETRY_IPQ1_REJECT.ANY_REJECT_IPQ0",
"PerPkg": "1",
@@ -2095,6 +2357,7 @@
},
{
"BriefDescription": "Ingress Probe Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_H_INGRESS_RETRY_IPQ1_REJECT.PA_MATCH",
"PerPkg": "1",
@@ -2103,6 +2366,7 @@
},
{
"BriefDescription": "Ingress Probe Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_H_INGRESS_RETRY_IPQ1_REJECT.SF_VICTIM",
"PerPkg": "1",
@@ -2111,6 +2375,7 @@
},
{
"BriefDescription": "Ingress Probe Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_H_INGRESS_RETRY_IPQ1_REJECT.SF_WAY",
"PerPkg": "1",
@@ -2119,6 +2384,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_H_INGRESS_RETRY_IRQ0_REJECT.AD_REQ_VN0",
"PerPkg": "1",
@@ -2127,6 +2393,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_H_INGRESS_RETRY_IRQ0_REJECT.AD_RSP_VN0",
"PerPkg": "1",
@@ -2135,6 +2402,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_H_INGRESS_RETRY_IRQ0_REJECT.AK_NON_UPI",
"PerPkg": "1",
@@ -2143,6 +2411,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_H_INGRESS_RETRY_IRQ0_REJECT.BL_NCB_VN0",
"PerPkg": "1",
@@ -2151,6 +2420,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_H_INGRESS_RETRY_IRQ0_REJECT.BL_NCS_VN0",
"PerPkg": "1",
@@ -2159,6 +2429,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_H_INGRESS_RETRY_IRQ0_REJECT.BL_RSP_VN0",
"PerPkg": "1",
@@ -2167,6 +2438,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_H_INGRESS_RETRY_IRQ0_REJECT.BL_WB_VN0",
"PerPkg": "1",
@@ -2175,6 +2447,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_H_INGRESS_RETRY_IRQ0_REJECT.IV_NON_UPI",
"PerPkg": "1",
@@ -2183,6 +2456,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_H_INGRESS_RETRY_IRQ1_REJECT.ALLOW_SNP",
"PerPkg": "1",
@@ -2191,6 +2465,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_H_INGRESS_RETRY_IRQ1_REJECT.ANY_REJECT_IRQ0",
"PerPkg": "1",
@@ -2199,6 +2474,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_H_INGRESS_RETRY_IRQ1_REJECT.PA_MATCH",
"PerPkg": "1",
@@ -2207,6 +2483,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_H_INGRESS_RETRY_IRQ1_REJECT.SF_VICTIM",
"PerPkg": "1",
@@ -2215,6 +2492,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_H_INGRESS_RETRY_IRQ1_REJECT.SF_WAY",
"PerPkg": "1",
@@ -2223,6 +2501,7 @@
},
{
"BriefDescription": "ISMQ Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_H_INGRESS_RETRY_ISMQ0_REJECT.AD_REQ_VN0",
"PerPkg": "1",
@@ -2231,6 +2510,7 @@
},
{
"BriefDescription": "ISMQ Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_H_INGRESS_RETRY_ISMQ0_REJECT.AD_RSP_VN0",
"PerPkg": "1",
@@ -2239,6 +2519,7 @@
},
{
"BriefDescription": "ISMQ Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_H_INGRESS_RETRY_ISMQ0_REJECT.AK_NON_UPI",
"PerPkg": "1",
@@ -2247,6 +2528,7 @@
},
{
"BriefDescription": "ISMQ Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_H_INGRESS_RETRY_ISMQ0_REJECT.BL_NCB_VN0",
"PerPkg": "1",
@@ -2255,6 +2537,7 @@
},
{
"BriefDescription": "ISMQ Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_H_INGRESS_RETRY_ISMQ0_REJECT.BL_NCS_VN0",
"PerPkg": "1",
@@ -2263,6 +2546,7 @@
},
{
"BriefDescription": "ISMQ Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_H_INGRESS_RETRY_ISMQ0_REJECT.BL_RSP_VN0",
"PerPkg": "1",
@@ -2271,6 +2555,7 @@
},
{
"BriefDescription": "ISMQ Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_H_INGRESS_RETRY_ISMQ0_REJECT.BL_WB_VN0",
"PerPkg": "1",
@@ -2279,6 +2564,7 @@
},
{
"BriefDescription": "ISMQ Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_H_INGRESS_RETRY_ISMQ0_REJECT.IV_NON_UPI",
"PerPkg": "1",
@@ -2287,6 +2573,7 @@
},
{
"BriefDescription": "ISMQ Retries",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_H_INGRESS_RETRY_ISMQ0_RETRY.AD_REQ_VN0",
"PerPkg": "1",
@@ -2295,6 +2582,7 @@
},
{
"BriefDescription": "ISMQ Retries",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_H_INGRESS_RETRY_ISMQ0_RETRY.AD_RSP_VN0",
"PerPkg": "1",
@@ -2303,6 +2591,7 @@
},
{
"BriefDescription": "ISMQ Retries",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_H_INGRESS_RETRY_ISMQ0_RETRY.AK_NON_UPI",
"PerPkg": "1",
@@ -2311,6 +2600,7 @@
},
{
"BriefDescription": "ISMQ Retries",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_H_INGRESS_RETRY_ISMQ0_RETRY.BL_NCB_VN0",
"PerPkg": "1",
@@ -2319,6 +2609,7 @@
},
{
"BriefDescription": "ISMQ Retries",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_H_INGRESS_RETRY_ISMQ0_RETRY.BL_NCS_VN0",
"PerPkg": "1",
@@ -2327,6 +2618,7 @@
},
{
"BriefDescription": "ISMQ Retries",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_H_INGRESS_RETRY_ISMQ0_RETRY.BL_RSP_VN0",
"PerPkg": "1",
@@ -2335,6 +2627,7 @@
},
{
"BriefDescription": "ISMQ Retries",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_H_INGRESS_RETRY_ISMQ0_RETRY.BL_WB_VN0",
"PerPkg": "1",
@@ -2343,6 +2636,7 @@
},
{
"BriefDescription": "ISMQ Retries",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_H_INGRESS_RETRY_ISMQ0_RETRY.IV_NON_UPI",
"PerPkg": "1",
@@ -2351,6 +2645,7 @@
},
{
"BriefDescription": "Other Queue Retries",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_H_INGRESS_RETRY_OTHER0_RETRY.AD_REQ_VN0",
"PerPkg": "1",
@@ -2359,6 +2654,7 @@
},
{
"BriefDescription": "Other Queue Retries",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_H_INGRESS_RETRY_OTHER0_RETRY.AD_RSP_VN0",
"PerPkg": "1",
@@ -2367,6 +2663,7 @@
},
{
"BriefDescription": "Other Queue Retries",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_H_INGRESS_RETRY_OTHER0_RETRY.AK_NON_UPI",
"PerPkg": "1",
@@ -2375,6 +2672,7 @@
},
{
"BriefDescription": "Other Queue Retries",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_H_INGRESS_RETRY_OTHER0_RETRY.BL_NCB_VN0",
"PerPkg": "1",
@@ -2383,6 +2681,7 @@
},
{
"BriefDescription": "Other Queue Retries",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_H_INGRESS_RETRY_OTHER0_RETRY.BL_NCS_VN0",
"PerPkg": "1",
@@ -2391,6 +2690,7 @@
},
{
"BriefDescription": "Other Queue Retries",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_H_INGRESS_RETRY_OTHER0_RETRY.BL_RSP_VN0",
"PerPkg": "1",
@@ -2399,6 +2699,7 @@
},
{
"BriefDescription": "Other Queue Retries",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_H_INGRESS_RETRY_OTHER0_RETRY.BL_WB_VN0",
"PerPkg": "1",
@@ -2407,6 +2708,7 @@
},
{
"BriefDescription": "Other Queue Retries",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_H_INGRESS_RETRY_OTHER0_RETRY.IV_NON_UPI",
"PerPkg": "1",
@@ -2415,6 +2717,7 @@
},
{
"BriefDescription": "Other Queue Retries",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_H_INGRESS_RETRY_OTHER1_RETRY.ALLOW_SNP",
"PerPkg": "1",
@@ -2423,6 +2726,7 @@
},
{
"BriefDescription": "Other Queue Retries",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_H_INGRESS_RETRY_OTHER1_RETRY.ANY_REJECT_IRQ0",
"PerPkg": "1",
@@ -2431,6 +2735,7 @@
},
{
"BriefDescription": "Other Queue Retries",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_H_INGRESS_RETRY_OTHER1_RETRY.PA_MATCH",
"PerPkg": "1",
@@ -2439,6 +2744,7 @@
},
{
"BriefDescription": "Other Queue Retries",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_H_INGRESS_RETRY_OTHER1_RETRY.SF_VICTIM",
"PerPkg": "1",
@@ -2447,6 +2753,7 @@
},
{
"BriefDescription": "Other Queue Retries",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_H_INGRESS_RETRY_OTHER1_RETRY.SF_WAY",
"PerPkg": "1",
@@ -2455,6 +2762,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_H_INGRESS_RETRY_PRQ0_REJECT.AD_REQ_VN0",
"PerPkg": "1",
@@ -2463,6 +2771,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_H_INGRESS_RETRY_PRQ0_REJECT.AD_RSP_VN0",
"PerPkg": "1",
@@ -2471,6 +2780,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_H_INGRESS_RETRY_PRQ0_REJECT.AK_NON_UPI",
"PerPkg": "1",
@@ -2479,6 +2789,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_H_INGRESS_RETRY_PRQ0_REJECT.BL_NCB_VN0",
"PerPkg": "1",
@@ -2487,6 +2798,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_H_INGRESS_RETRY_PRQ0_REJECT.BL_NCS_VN0",
"PerPkg": "1",
@@ -2495,6 +2807,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_H_INGRESS_RETRY_PRQ0_REJECT.BL_RSP_VN0",
"PerPkg": "1",
@@ -2503,6 +2816,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_H_INGRESS_RETRY_PRQ0_REJECT.BL_WB_VN0",
"PerPkg": "1",
@@ -2511,6 +2825,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_H_INGRESS_RETRY_PRQ0_REJECT.IV_NON_UPI",
"PerPkg": "1",
@@ -2519,6 +2834,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_INGRESS_RETRY_PRQ1_REJECT.ALLOW_SNP",
"PerPkg": "1",
@@ -2527,6 +2843,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_INGRESS_RETRY_PRQ1_REJECT.ANY_REJECT_IRQ0",
"PerPkg": "1",
@@ -2535,6 +2852,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_INGRESS_RETRY_PRQ1_REJECT.PA_MATCH",
"PerPkg": "1",
@@ -2543,6 +2861,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_INGRESS_RETRY_PRQ1_REJECT.SF_VICTIM",
"PerPkg": "1",
@@ -2551,6 +2870,7 @@
},
{
"BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_INGRESS_RETRY_PRQ1_REJECT.SF_WAY",
"PerPkg": "1",
@@ -2559,6 +2879,7 @@
},
{
"BriefDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_H_INGRESS_RETRY_REQ_Q0_RETRY.AD_REQ_VN0",
"PerPkg": "1",
@@ -2567,6 +2888,7 @@
},
{
"BriefDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_H_INGRESS_RETRY_REQ_Q0_RETRY.AD_RSP_VN0",
"PerPkg": "1",
@@ -2575,6 +2897,7 @@
},
{
"BriefDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_H_INGRESS_RETRY_REQ_Q0_RETRY.AK_NON_UPI",
"PerPkg": "1",
@@ -2583,6 +2906,7 @@
},
{
"BriefDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_H_INGRESS_RETRY_REQ_Q0_RETRY.BL_NCB_VN0",
"PerPkg": "1",
@@ -2591,6 +2915,7 @@
},
{
"BriefDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_H_INGRESS_RETRY_REQ_Q0_RETRY.BL_NCS_VN0",
"PerPkg": "1",
@@ -2599,6 +2924,7 @@
},
{
"BriefDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_H_INGRESS_RETRY_REQ_Q0_RETRY.BL_RSP_VN0",
"PerPkg": "1",
@@ -2607,6 +2933,7 @@
},
{
"BriefDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_H_INGRESS_RETRY_REQ_Q0_RETRY.BL_WB_VN0",
"PerPkg": "1",
@@ -2615,6 +2942,7 @@
},
{
"BriefDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_H_INGRESS_RETRY_REQ_Q0_RETRY.IV_NON_UPI",
"PerPkg": "1",
@@ -2623,6 +2951,7 @@
},
{
"BriefDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_H_INGRESS_RETRY_REQ_Q1_RETRY.ALLOW_SNP",
"PerPkg": "1",
@@ -2631,6 +2960,7 @@
},
{
"BriefDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_H_INGRESS_RETRY_REQ_Q1_RETRY.ANY_REJECT_IRQ0",
"PerPkg": "1",
@@ -2639,6 +2969,7 @@
},
{
"BriefDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_H_INGRESS_RETRY_REQ_Q1_RETRY.PA_MATCH",
"PerPkg": "1",
@@ -2647,6 +2978,7 @@
},
{
"BriefDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_H_INGRESS_RETRY_REQ_Q1_RETRY.SF_VICTIM",
"PerPkg": "1",
@@ -2655,6 +2987,7 @@
},
{
"BriefDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_H_INGRESS_RETRY_REQ_Q1_RETRY.SF_WAY",
"PerPkg": "1",
@@ -2663,6 +2996,7 @@
},
{
"BriefDescription": "Miscellaneous events in the Cbo. CV0 Prefetch Miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_H_MISC.CV0_PREF_MISS",
"PerPkg": "1",
@@ -2671,6 +3005,7 @@
},
{
"BriefDescription": "Miscellaneous events in the Cbo. CV0 Prefetch Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_H_MISC.CV0_PREF_VIC",
"PerPkg": "1",
@@ -2679,6 +3014,7 @@
},
{
"BriefDescription": "Miscellaneous events in the Cbo. RFO HitS",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_H_MISC.RFO_HIT_S",
"PerPkg": "1",
@@ -2687,6 +3023,7 @@
},
{
"BriefDescription": "Miscellaneous events in the Cbo. Silent Snoop Eviction",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_H_MISC.RSPI_WAS_FSE",
"PerPkg": "1",
@@ -2695,6 +3032,7 @@
},
{
"BriefDescription": "Miscellaneous events in the Cbo. Write Combining Aliasing",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_H_MISC.WC_ALIASING",
"PerPkg": "1",
@@ -2703,6 +3041,7 @@
},
{
"BriefDescription": "Number of incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_H_RING_BOUNCES_HORZ.AD",
"PerPkg": "1",
@@ -2711,6 +3050,7 @@
},
{
"BriefDescription": "Number of incoming messages from the Horizontal ring that were bounced, by ring type - Acknowledgements to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_H_RING_BOUNCES_HORZ.AK",
"PerPkg": "1",
@@ -2719,6 +3059,7 @@
},
{
"BriefDescription": "Number of incoming messages from the Horizontal ring that were bounced, by ring type - Data Responses to core.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_H_RING_BOUNCES_HORZ.BL",
"PerPkg": "1",
@@ -2727,6 +3068,7 @@
},
{
"BriefDescription": "Number of incoming messages from the Horizontal ring that were bounced, by ring type - Snoops of processor's cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_H_RING_BOUNCES_HORZ.IV",
"PerPkg": "1",
@@ -2735,6 +3077,7 @@
},
{
"BriefDescription": "Number of incoming messages from the Vertical ring that were bounced, by ring type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_H_RING_BOUNCES_VERT.AD",
"PerPkg": "1",
@@ -2743,6 +3086,7 @@
},
{
"BriefDescription": "Number of incoming messages from the Vertical ring that were bounced, by ring type - Acknowledgements to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_H_RING_BOUNCES_VERT.AK",
"PerPkg": "1",
@@ -2751,6 +3095,7 @@
},
{
"BriefDescription": "Number of incoming messages from the Vertical ring that were bounced, by ring type - Data Responses to core.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_H_RING_BOUNCES_VERT.BL",
"PerPkg": "1",
@@ -2759,6 +3104,7 @@
},
{
"BriefDescription": "Number of incoming messages from the Vertical ring that were bounced, by ring type - Snoops of processor's cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_H_RING_BOUNCES_VERT.IV",
"PerPkg": "1",
@@ -2767,6 +3113,7 @@
},
{
"BriefDescription": "Horizontal ring sink starvation count - AD ring",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_H_RING_SINK_STARVED_HORZ.AD",
"PerPkg": "1",
@@ -2775,6 +3122,7 @@
},
{
"BriefDescription": "Horizontal ring sink starvation count - AK ring",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_H_RING_SINK_STARVED_HORZ.AK",
"PerPkg": "1",
@@ -2783,6 +3131,7 @@
},
{
"BriefDescription": "Horizontal ring sink starvation count - BL ring",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_H_RING_SINK_STARVED_HORZ.BL",
"PerPkg": "1",
@@ -2791,6 +3140,7 @@
},
{
"BriefDescription": "Horizontal ring sink starvation count - IV ring",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_H_RING_SINK_STARVED_HORZ.IV",
"PerPkg": "1",
@@ -2799,6 +3149,7 @@
},
{
"BriefDescription": "Vertical ring sink starvation count - AD ring",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_H_RING_SINK_STARVED_VERT.AD",
"PerPkg": "1",
@@ -2807,6 +3158,7 @@
},
{
"BriefDescription": "Vertical ring sink starvation count - AK ring",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_H_RING_SINK_STARVED_VERT.AK",
"PerPkg": "1",
@@ -2815,6 +3167,7 @@
},
{
"BriefDescription": "Vertical ring sink starvation count - BL ring",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_H_RING_SINK_STARVED_VERT.BL",
"PerPkg": "1",
@@ -2823,6 +3176,7 @@
},
{
"BriefDescription": "Vertical ring sink starvation count - IV ring",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_H_RING_SINK_STARVED_VERT.IV",
"PerPkg": "1",
@@ -2831,6 +3185,7 @@
},
{
"BriefDescription": "Counts cycles in throttle mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_H_RING_SRC_THRTL",
"PerPkg": "1",
@@ -2838,6 +3193,7 @@
},
{
"BriefDescription": "Cache Lookups. Counts the number of times the LLC was accessed. Filters for any transaction originating from the IPQ or IRQ. This does not include lookups originating from the ISMQ.",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_H_SF_LOOKUP.ANY",
"PerPkg": "1",
@@ -2846,6 +3202,7 @@
},
{
"BriefDescription": "Cache Lookups. Counts the number of times the LLC was accessed. Read transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_H_SF_LOOKUP.DATA_READ",
"PerPkg": "1",
@@ -2854,6 +3211,7 @@
},
{
"BriefDescription": "Cache Lookups. Counts the number of times the LLC was accessed. Filters for only snoop requests coming from the remote socket(s) through the IPQ.",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_H_SF_LOOKUP.REMOTE_SNOOP",
"PerPkg": "1",
@@ -2862,6 +3220,7 @@
},
{
"BriefDescription": "Cache Lookups. Counts the number of times the LLC was accessed. Writeback transactions from L2 to the LLC This includes all write transactions -- both Cacheable and UC.",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_H_SF_LOOKUP.WRITE",
"PerPkg": "1",
@@ -2870,6 +3229,7 @@
},
{
"BriefDescription": "Transgress Injection Starvation. Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_H_TG_INGRESS_BUSY_STARVED.AD_BNC",
"PerPkg": "1",
@@ -2878,6 +3238,7 @@
},
{
"BriefDescription": "Transgress Injection Starvation. Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_H_TG_INGRESS_BUSY_STARVED.AD_CRD",
"PerPkg": "1",
@@ -2886,6 +3247,7 @@
},
{
"BriefDescription": "Transgress Injection Starvation. Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_H_TG_INGRESS_BUSY_STARVED.BL_BNC",
"PerPkg": "1",
@@ -2894,6 +3256,7 @@
},
{
"BriefDescription": "Transgress Injection Starvation. Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_H_TG_INGRESS_BUSY_STARVED.BL_CRD",
"PerPkg": "1",
@@ -2902,6 +3265,7 @@
},
{
"BriefDescription": "Transgress Ingress Bypass. Number of packets bypassing the CMS Ingress .",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_H_TG_INGRESS_BYPASS.AD_BNC",
"PerPkg": "1",
@@ -2910,6 +3274,7 @@
},
{
"BriefDescription": "Transgress Ingress Bypass. Number of packets bypassing the CMS Ingress .",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_H_TG_INGRESS_BYPASS.AD_CRD",
"PerPkg": "1",
@@ -2918,6 +3283,7 @@
},
{
"BriefDescription": "Transgress Ingress Bypass. Number of packets bypassing the CMS Ingress .",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_H_TG_INGRESS_BYPASS.AK_BNC",
"PerPkg": "1",
@@ -2926,6 +3292,7 @@
},
{
"BriefDescription": "Transgress Ingress Bypass. Number of packets bypassing the CMS Ingress .",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_H_TG_INGRESS_BYPASS.BL_BNC",
"PerPkg": "1",
@@ -2934,6 +3301,7 @@
},
{
"BriefDescription": "Transgress Ingress Bypass. Number of packets bypassing the CMS Ingress .",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_H_TG_INGRESS_BYPASS.BL_CRD",
"PerPkg": "1",
@@ -2942,6 +3310,7 @@
},
{
"BriefDescription": "Transgress Ingress Bypass. Number of packets bypassing the CMS Ingress .",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_H_TG_INGRESS_BYPASS.IV_BNC",
"PerPkg": "1",
@@ -2950,6 +3319,7 @@
},
{
"BriefDescription": "Transgress Injection Starvation. Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_H_TG_INGRESS_CRD_STARVED.AD_BNC",
"PerPkg": "1",
@@ -2958,6 +3328,7 @@
},
{
"BriefDescription": "Transgress Injection Starvation. Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_H_TG_INGRESS_CRD_STARVED.AD_CRD",
"PerPkg": "1",
@@ -2966,6 +3337,7 @@
},
{
"BriefDescription": "Transgress Injection Starvation. Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_H_TG_INGRESS_CRD_STARVED.AK_BNC",
"PerPkg": "1",
@@ -2974,6 +3346,7 @@
},
{
"BriefDescription": "Transgress Injection Starvation. Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_H_TG_INGRESS_CRD_STARVED.BL_BNC",
"PerPkg": "1",
@@ -2982,6 +3355,7 @@
},
{
"BriefDescription": "Transgress Injection Starvation. Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_H_TG_INGRESS_CRD_STARVED.BL_CRD",
"PerPkg": "1",
@@ -2990,6 +3364,7 @@
},
{
"BriefDescription": "Transgress Injection Starvation. Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_H_TG_INGRESS_CRD_STARVED.IFV",
"PerPkg": "1",
@@ -2998,6 +3373,7 @@
},
{
"BriefDescription": "Transgress Injection Starvation. Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_H_TG_INGRESS_CRD_STARVED.IV_BNC",
"PerPkg": "1",
@@ -3006,6 +3382,7 @@
},
{
"BriefDescription": "Transgress Ingress Allocations. Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_H_TG_INGRESS_INSERTS.AD_BNC",
"PerPkg": "1",
@@ -3014,6 +3391,7 @@
},
{
"BriefDescription": "Transgress Ingress Allocations. Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_H_TG_INGRESS_INSERTS.AD_CRD",
"PerPkg": "1",
@@ -3022,6 +3400,7 @@
},
{
"BriefDescription": "Transgress Ingress Allocations. Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_H_TG_INGRESS_INSERTS.AK_BNC",
"PerPkg": "1",
@@ -3030,6 +3409,7 @@
},
{
"BriefDescription": "Transgress Ingress Allocations. Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_H_TG_INGRESS_INSERTS.BL_BNC",
"PerPkg": "1",
@@ -3038,6 +3418,7 @@
},
{
"BriefDescription": "Transgress Ingress Allocations. Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_H_TG_INGRESS_INSERTS.BL_CRD",
"PerPkg": "1",
@@ -3046,6 +3427,7 @@
},
{
"BriefDescription": "Transgress Ingress Allocations. Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_H_TG_INGRESS_INSERTS.IV_BNC",
"PerPkg": "1",
@@ -3054,6 +3436,7 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy. Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_H_TG_INGRESS_OCCUPANCY.AD_BNC",
"PerPkg": "1",
@@ -3062,6 +3445,7 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy. Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_H_TG_INGRESS_OCCUPANCY.AD_CRD",
"PerPkg": "1",
@@ -3070,6 +3454,7 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy. Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_H_TG_INGRESS_OCCUPANCY.AK_BNC",
"PerPkg": "1",
@@ -3078,6 +3463,7 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy. Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_H_TG_INGRESS_OCCUPANCY.BL_BNC",
"PerPkg": "1",
@@ -3086,6 +3472,7 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy. Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_H_TG_INGRESS_OCCUPANCY.BL_CRD",
"PerPkg": "1",
@@ -3094,6 +3481,7 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy. Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_H_TG_INGRESS_OCCUPANCY.IV_BNC",
"PerPkg": "1",
@@ -3102,6 +3490,7 @@
},
{
"BriefDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent -SF/LLC Evictions",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_H_TOR_INSERTS.EVICT",
"PerPkg": "1",
@@ -3110,6 +3499,7 @@
},
{
"BriefDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent -Hit (Not a Miss)",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_H_TOR_INSERTS.HIT",
"PerPkg": "1",
@@ -3118,6 +3508,7 @@
},
{
"BriefDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent -IPQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_H_TOR_INSERTS.IPQ",
"PerPkg": "1",
@@ -3126,6 +3517,7 @@
},
{
"BriefDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent -IRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_H_TOR_INSERTS.IRQ",
"PerPkg": "1",
@@ -3134,6 +3526,7 @@
},
{
"BriefDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent -Miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_H_TOR_INSERTS.MISS",
"PerPkg": "1",
@@ -3142,6 +3535,7 @@
},
{
"BriefDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent -PRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_H_TOR_INSERTS.PRQ",
"PerPkg": "1",
@@ -3150,6 +3544,7 @@
},
{
"BriefDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent -SF/LLC Evictions",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_H_TOR_OCCUPANCY.EVICT",
"PerPkg": "1",
@@ -3158,6 +3553,7 @@
},
{
"BriefDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent -Hit (Not a Miss)",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_H_TOR_OCCUPANCY.HIT",
"PerPkg": "1",
@@ -3166,6 +3562,7 @@
},
{
"BriefDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent -IPQ",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_H_TOR_OCCUPANCY.IPQ",
"PerPkg": "1",
@@ -3174,6 +3571,7 @@
},
{
"BriefDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent -IPQ hit",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_H_TOR_OCCUPANCY.IPQ_HIT",
"PerPkg": "1",
@@ -3182,6 +3580,7 @@
},
{
"BriefDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent -IPQ miss",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_H_TOR_OCCUPANCY.IPQ_MISS",
"PerPkg": "1",
@@ -3190,6 +3589,7 @@
},
{
"BriefDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent -IRQ or PRQ",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_H_TOR_OCCUPANCY.IRQ",
"PerPkg": "1",
@@ -3198,6 +3598,7 @@
},
{
"BriefDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent -IRQ or PRQ hit",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_H_TOR_OCCUPANCY.IRQ_HIT",
"PerPkg": "1",
@@ -3206,6 +3607,7 @@
},
{
"BriefDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent -IRQ or PRQ miss",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_H_TOR_OCCUPANCY.IRQ_MISS",
"PerPkg": "1",
@@ -3214,6 +3616,7 @@
},
{
"BriefDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent -Miss",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_H_TOR_OCCUPANCY.MISS",
"PerPkg": "1",
@@ -3222,6 +3625,7 @@
},
{
"BriefDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent -PRQ",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_H_TOR_OCCUPANCY.PRQ",
"PerPkg": "1",
@@ -3230,6 +3634,7 @@
},
{
"BriefDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent -PRQ hit",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_H_TOR_OCCUPANCY.PRQ_HIT",
"PerPkg": "1",
@@ -3238,6 +3643,7 @@
},
{
"BriefDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent -PRQ miss",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_H_TOR_OCCUPANCY.PRQ_MISS",
"PerPkg": "1",
@@ -3246,12 +3652,14 @@
},
{
"BriefDescription": "Uncore Clocks",
+ "Counter": "0,1,2,3",
"EventName": "UNC_H_U_CLOCKTICKS",
"PerPkg": "1",
"Unit": "CHA"
},
{
"BriefDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop - Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_H_VERT_RING_AD_IN_USE.DN_EVEN",
"PerPkg": "1",
@@ -3260,6 +3668,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop - Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_H_VERT_RING_AD_IN_USE.DN_ODD",
"PerPkg": "1",
@@ -3268,6 +3677,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop - Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_H_VERT_RING_AD_IN_USE.UP_EVEN",
"PerPkg": "1",
@@ -3276,6 +3686,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop - Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_H_VERT_RING_AD_IN_USE.UP_ODD",
"PerPkg": "1",
@@ -3284,6 +3695,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop - Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xA8",
"EventName": "UNC_H_VERT_RING_AK_IN_USE.DN_EVEN",
"PerPkg": "1",
@@ -3292,6 +3704,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop - Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xA8",
"EventName": "UNC_H_VERT_RING_AK_IN_USE.DN_ODD",
"PerPkg": "1",
@@ -3300,6 +3713,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop - Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xA8",
"EventName": "UNC_H_VERT_RING_AK_IN_USE.UP_EVEN",
"PerPkg": "1",
@@ -3308,6 +3722,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop - Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xA8",
"EventName": "UNC_H_VERT_RING_AK_IN_USE.UP_ODD",
"PerPkg": "1",
@@ -3316,6 +3731,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop - Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_H_VERT_RING_BL_IN_USE.DN_EVEN",
"PerPkg": "1",
@@ -3324,6 +3740,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop - Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_H_VERT_RING_BL_IN_USE.DN_ODD",
"PerPkg": "1",
@@ -3332,6 +3749,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop - Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_H_VERT_RING_BL_IN_USE.UP_EVEN",
"PerPkg": "1",
@@ -3340,6 +3758,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop - Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_H_VERT_RING_BL_IN_USE.UP_ODD",
"PerPkg": "1",
@@ -3348,6 +3767,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop - Down",
+ "Counter": "0,1,2,3",
"EventCode": "0xAC",
"EventName": "UNC_H_VERT_RING_IV_IN_USE.DN",
"PerPkg": "1",
@@ -3356,6 +3776,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop - Up",
+ "Counter": "0,1,2,3",
"EventCode": "0xAC",
"EventName": "UNC_H_VERT_RING_IV_IN_USE.UP",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/uncore-io.json b/tools/perf/pmu-events/arch/x86/knightslanding/uncore-io.json
index 898f7e425cd4..7df7650e1a57 100644
--- a/tools/perf/pmu-events/arch/x86/knightslanding/uncore-io.json
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/uncore-io.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Egress (to CMS) Cycles Full. Counts the number of cycles when the M2PCIe Egress is full. AD_0",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2P_EGRESS_CYCLES_FULL.AD_0",
"PerPkg": "1",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Full. Counts the number of cycles when the M2PCIe Egress is full. AD_1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2P_EGRESS_CYCLES_FULL.AD_1",
"PerPkg": "1",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Full. Counts the number of cycles when the M2PCIe Egress is full. AK_0",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2P_EGRESS_CYCLES_FULL.AK_0",
"PerPkg": "1",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Full. Counts the number of cycles when the M2PCIe Egress is full. AK_1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2P_EGRESS_CYCLES_FULL.AK_1",
"PerPkg": "1",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Full. Counts the number of cycles when the M2PCIe Egress is full. BL_0",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2P_EGRESS_CYCLES_FULL.BL_0",
"PerPkg": "1",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Full. Counts the number of cycles when the M2PCIe Egress is full. BL_1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2P_EGRESS_CYCLES_FULL.BL_1",
"PerPkg": "1",
@@ -49,6 +55,7 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Not Empty. Counts the number of cycles when the M2PCIe Egress is not empty. AD_0",
+ "Counter": "0,1",
"EventCode": "0x23",
"EventName": "UNC_M2P_EGRESS_CYCLES_NE.AD_0",
"PerPkg": "1",
@@ -57,6 +64,7 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Not Empty. Counts the number of cycles when the M2PCIe Egress is not empty. AD_1",
+ "Counter": "0,1",
"EventCode": "0x23",
"EventName": "UNC_M2P_EGRESS_CYCLES_NE.AD_1",
"PerPkg": "1",
@@ -65,6 +73,7 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Not Empty. Counts the number of cycles when the M2PCIe Egress is not empty. AK_0",
+ "Counter": "0,1",
"EventCode": "0x23",
"EventName": "UNC_M2P_EGRESS_CYCLES_NE.AK_0",
"PerPkg": "1",
@@ -73,6 +82,7 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Not Empty. Counts the number of cycles when the M2PCIe Egress is not empty. AK_1",
+ "Counter": "0,1",
"EventCode": "0x23",
"EventName": "UNC_M2P_EGRESS_CYCLES_NE.AK_1",
"PerPkg": "1",
@@ -81,6 +91,7 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Not Empty. Counts the number of cycles when the M2PCIe Egress is not empty. BL_0",
+ "Counter": "0,1",
"EventCode": "0x23",
"EventName": "UNC_M2P_EGRESS_CYCLES_NE.BL_0",
"PerPkg": "1",
@@ -89,6 +100,7 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Not Empty. Counts the number of cycles when the M2PCIe Egress is not empty. BL_1",
+ "Counter": "0,1",
"EventCode": "0x23",
"EventName": "UNC_M2P_EGRESS_CYCLES_NE.BL_1",
"PerPkg": "1",
@@ -97,6 +109,7 @@
},
{
"BriefDescription": "Egress (to CMS) Ingress. Counts the number of number of messages inserted into the the M2PCIe Egress queue. AD_0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2P_EGRESS_INSERTS.AD_0",
"PerPkg": "1",
@@ -105,6 +118,7 @@
},
{
"BriefDescription": "Egress (to CMS) Ingress. Counts the number of number of messages inserted into the the M2PCIe Egress queue. AD_1",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2P_EGRESS_INSERTS.AD_1",
"PerPkg": "1",
@@ -113,6 +127,7 @@
},
{
"BriefDescription": "Egress (to CMS) Ingress. Counts the number of number of messages inserted into the the M2PCIe Egress queue. AK_0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2P_EGRESS_INSERTS.AK_0",
"PerPkg": "1",
@@ -121,6 +136,7 @@
},
{
"BriefDescription": "Egress (to CMS) Ingress. Counts the number of number of messages inserted into the the M2PCIe Egress queue. AK_1",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2P_EGRESS_INSERTS.AK_1",
"PerPkg": "1",
@@ -129,6 +145,7 @@
},
{
"BriefDescription": "Egress (to CMS) Ingress. Counts the number of number of messages inserted into the the M2PCIe Egress queue. AK_CRD_0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2P_EGRESS_INSERTS.AK_CRD_0",
"PerPkg": "1",
@@ -137,6 +154,7 @@
},
{
"BriefDescription": "Egress (to CMS) Ingress. Counts the number of number of messages inserted into the the M2PCIe Egress queue. AK_CRD_1",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2P_EGRESS_INSERTS.AK_CRD_1",
"PerPkg": "1",
@@ -145,6 +163,7 @@
},
{
"BriefDescription": "Egress (to CMS) Ingress. Counts the number of number of messages inserted into the the M2PCIe Egress queue. BL_0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2P_EGRESS_INSERTS.BL_0",
"PerPkg": "1",
@@ -153,6 +172,7 @@
},
{
"BriefDescription": "Egress (to CMS) Ingress. Counts the number of number of messages inserted into the the M2PCIe Egress queue. BL_1",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2P_EGRESS_INSERTS.BL_1",
"PerPkg": "1",
@@ -161,6 +181,7 @@
},
{
"BriefDescription": "Ingress Queue Cycles Not Empty. Counts the number of cycles when the M2PCIe Ingress is not empty.ALL",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M2P_INGRESS_CYCLES_NE.ALL",
"PerPkg": "1",
@@ -169,6 +190,7 @@
},
{
"BriefDescription": "Ingress Queue Cycles Not Empty. Counts the number of cycles when the M2PCIe Ingress is not empty.CBO_IDI",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M2P_INGRESS_CYCLES_NE.CBO_IDI",
"PerPkg": "1",
@@ -177,6 +199,7 @@
},
{
"BriefDescription": "Ingress Queue Cycles Not Empty. Counts the number of cycles when the M2PCIe Ingress is not empty.CBO_NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M2P_INGRESS_CYCLES_NE.CBO_NCB",
"PerPkg": "1",
@@ -185,6 +208,7 @@
},
{
"BriefDescription": "Ingress Queue Cycles Not Empty. Counts the number of cycles when the M2PCIe Ingress is not empty.CBO_NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M2P_INGRESS_CYCLES_NE.CBO_NCS",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/uncore-memory.json b/tools/perf/pmu-events/arch/x86/knightslanding/uncore-memory.json
index fb752974179b..f137dfde8481 100644
--- a/tools/perf/pmu-events/arch/x86/knightslanding/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/uncore-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of read requests and streaming stores that hit in MCDRAM cache and the data in MCDRAM is clean with respect to DDR. This event is only valid in cache and hybrid memory mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_E_EDC_ACCESS.HIT_CLEAN",
"PerPkg": "1",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Counts the number of read requests and streaming stores that hit in MCDRAM cache and the data in MCDRAM is dirty with respect to DDR. This event is only valid in cache and hybrid memory mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_E_EDC_ACCESS.HIT_DIRTY",
"PerPkg": "1",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Counts the number of read requests and streaming stores that miss in MCDRAM cache and the data evicted from the MCDRAM is clean with respect to DDR. This event is only valid in cache and hybrid memory mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_E_EDC_ACCESS.MISS_CLEAN",
"PerPkg": "1",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Counts the number of read requests and streaming stores that miss in MCDRAM cache and the data evicted from the MCDRAM is dirty with respect to DDR. This event is only valid in cache and hybrid memory mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_E_EDC_ACCESS.MISS_DIRTY",
"PerPkg": "1",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Number of EDC Hits or Misses. Miss I",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_E_EDC_ACCESS.MISS_INVALID",
"PerPkg": "1",
@@ -41,12 +46,14 @@
},
{
"BriefDescription": "ECLK count",
+ "Counter": "0,1,2,3",
"EventName": "UNC_E_E_CLOCKTICKS",
"PerPkg": "1",
"Unit": "EDC_ECLK"
},
{
"BriefDescription": "Counts the number of read requests received by the MCDRAM controller. This event is valid in all three memory modes: flat, cache and hybrid. In cache and hybrid memory mode, this event counts all read requests as well as streaming stores that hit or miss in the MCDRAM cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_E_RPQ_INSERTS",
"PerPkg": "1",
@@ -55,12 +62,14 @@
},
{
"BriefDescription": "UCLK count",
+ "Counter": "0,1,2,3",
"EventName": "UNC_E_U_CLOCKTICKS",
"PerPkg": "1",
"Unit": "EDC_UCLK"
},
{
"BriefDescription": "Counts the number of write requests received by the MCDRAM controller. This event is valid in all three memory modes: flat, cache and hybrid. In cache and hybrid memory mode, this event counts all streaming stores, writebacks and, read requests that miss in MCDRAM cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_E_WPQ_INSERTS",
"PerPkg": "1",
@@ -69,6 +78,7 @@
},
{
"BriefDescription": "CAS All",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M_CAS_COUNT.ALL",
"PerPkg": "1",
@@ -77,6 +87,7 @@
},
{
"BriefDescription": "CAS Reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M_CAS_COUNT.RD",
"PerPkg": "1",
@@ -85,6 +96,7 @@
},
{
"BriefDescription": "CAS Writes",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M_CAS_COUNT.WR",
"PerPkg": "1",
@@ -93,12 +105,14 @@
},
{
"BriefDescription": "DCLK count",
+ "Counter": "0,1,2,3",
"EventName": "UNC_M_D_CLOCKTICKS",
"PerPkg": "1",
"Unit": "iMC_DCLK"
},
{
"BriefDescription": "UCLK count",
+ "Counter": "0,1,2,3",
"EventName": "UNC_M_U_CLOCKTICKS",
"PerPkg": "1",
"Unit": "iMC_UCLK"
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json b/tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json
index 9be30a33b43b..cf3c5f4f4fb7 100644
--- a/tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of load micro-ops retired that cause a DTLB miss (Precise Event)",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0x04",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS_LOADS",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "Counts the total number of core cycles for all the page walks. The cycles for page walks started in speculative path will also be included.",
+ "Counter": "0,1",
"EventCode": "0x05",
"EventName": "PAGE_WALKS.CYCLES",
"PublicDescription": "This event counts every cycle when a data (D) page walk or instruction (I) page walk is in progress.",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "Counts the total number of core cycles for all the D-side page walks. The cycles for page walks started in speculative path will also be included.",
+ "Counter": "0,1",
"EventCode": "0x05",
"EventName": "PAGE_WALKS.D_SIDE_CYCLES",
"SampleAfterValue": "200003",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Counts the total D-side page walks that are completed or started. The page walks started in the speculative path will also be counted",
+ "Counter": "0,1",
"EdgeDetect": "1",
"EventCode": "0x05",
"EventName": "PAGE_WALKS.D_SIDE_WALKS",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Counts the total number of core cycles for all the I-side page walks. The cycles for page walks started in speculative path will also be included.",
+ "Counter": "0,1",
"EventCode": "0x05",
"EventName": "PAGE_WALKS.I_SIDE_CYCLES",
"PublicDescription": "This event counts every cycle when an I-side (walks due to an instruction fetch) page walk is in progress.",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "Counts the total I-side page walks that are completed.",
+ "Counter": "0,1",
"EdgeDetect": "1",
"EventCode": "0x05",
"EventName": "PAGE_WALKS.I_SIDE_WALKS",
@@ -49,6 +55,7 @@
},
{
"BriefDescription": "Counts the total page walks that are completed (I-side and D-side)",
+ "Counter": "0,1",
"EdgeDetect": "1",
"EventCode": "0x05",
"EventName": "PAGE_WALKS.WALKS",
diff --git a/tools/perf/pmu-events/arch/x86/lunarlake/cache.json b/tools/perf/pmu-events/arch/x86/lunarlake/cache.json
index fb48be357c4e..759714618e08 100644
--- a/tools/perf/pmu-events/arch/x86/lunarlake/cache.json
+++ b/tools/perf/pmu-events/arch/x86/lunarlake/cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of L2 Cache Accesses Counts the total number of L2 Cache Accesses - sum of hits, misses, rejects front door requests for CRd/DRd/RFO/ItoM/L2 Prefetches only, per core event",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x24",
"EventName": "L2_REQUEST.ALL",
"PublicDescription": "Counts the number of L2 Cache Accesses Counts the total number of L2 Cache Accesses - sum of hits, misses, rejects front door requests for CRd/DRd/RFO/ItoM/L2 Prefetches only.",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "Counts the number of cacheable memory requests that miss in the LLC. Counts on a per core basis.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.MISS",
"PublicDescription": "Counts the number of cacheable memory requests that miss in the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the platform has an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
@@ -19,6 +21,7 @@
},
{
"BriefDescription": "Core-originated cacheable requests that missed L3 (Except hardware prefetches to the L3)",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.MISS",
"PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.",
@@ -28,6 +31,7 @@
},
{
"BriefDescription": "Counts the number of cacheable memory requests that access the LLC. Counts on a per core basis.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
"PublicDescription": "Counts the number of cacheable memory requests that access the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the platform has an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
@@ -37,6 +41,7 @@
},
{
"BriefDescription": "Core-originated cacheable requests that refer to L3 (Except hardware prefetches to the L3)",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
"PublicDescription": "Counts core-originated cacheable requests to the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.",
@@ -46,6 +51,7 @@
},
{
"BriefDescription": "Retired load instructions.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ALL_LOADS",
@@ -57,6 +63,7 @@
},
{
"BriefDescription": "Retired store instructions.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ALL_STORES",
@@ -68,6 +75,7 @@
},
{
"BriefDescription": "Counts the number of load uops retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
@@ -78,6 +86,7 @@
},
{
"BriefDescription": "Counts the number of store uops retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
@@ -88,6 +97,7 @@
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_1024",
@@ -100,6 +110,7 @@
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_128",
@@ -112,6 +123,7 @@
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_16",
@@ -124,6 +136,7 @@
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_2048",
@@ -136,6 +149,7 @@
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_256",
@@ -148,6 +162,7 @@
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_32",
@@ -160,6 +175,7 @@
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_4",
@@ -172,6 +188,7 @@
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_512",
@@ -184,6 +201,7 @@
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_64",
@@ -196,6 +214,7 @@
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_8",
@@ -208,6 +227,7 @@
},
{
"BriefDescription": "Counts the number of stores uops retired same as MEM_UOPS_RETIRED.ALL_STORES",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.STORE_LATENCY",
diff --git a/tools/perf/pmu-events/arch/x86/lunarlake/frontend.json b/tools/perf/pmu-events/arch/x86/lunarlake/frontend.json
index 3a24934e8d6e..0327bece0f94 100644
--- a/tools/perf/pmu-events/arch/x86/lunarlake/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/lunarlake/frontend.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts every time the code stream enters into a new cache line by walking sequential from the previous line or being redirected by a jump.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x80",
"EventName": "ICACHE.ACCESSES",
"SampleAfterValue": "200003",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Counts every time the code stream enters into a new cache line by walking sequential from the previous line or being redirected by a jump and the instruction cache registers bytes are not present. -",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x80",
"EventName": "ICACHE.MISSES",
"SampleAfterValue": "200003",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "This event counts a subset of the Topdown Slots event that were no operation was delivered to the back-end pipeline due to instruction fetch limitations when the back-end could have accepted more operations. Common examples include instruction cache misses or x86 instruction decode limitations.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x9c",
"EventName": "IDQ_BUBBLES.CORE",
"PublicDescription": "This event counts a subset of the Topdown Slots event that were no operation was delivered to the back-end pipeline due to instruction fetch limitations when the back-end could have accepted more operations. Common examples include instruction cache misses or x86 instruction decode limitations. Software can use this event as the numerator for the Frontend Bound metric (or top-level category) of the Top-down Microarchitecture Analysis method.",
diff --git a/tools/perf/pmu-events/arch/x86/lunarlake/memory.json b/tools/perf/pmu-events/arch/x86/lunarlake/memory.json
index 9c188d80b7b9..3d12e226d5ef 100644
--- a/tools/perf/pmu-events/arch/x86/lunarlake/memory.json
+++ b/tools/perf/pmu-events/arch/x86/lunarlake/memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 1024 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_1024",
@@ -14,6 +15,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
@@ -27,6 +29,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
@@ -40,6 +43,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 2048 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_2048",
@@ -53,6 +57,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
@@ -66,6 +71,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
@@ -79,6 +85,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
@@ -92,6 +99,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
@@ -105,6 +113,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
@@ -118,6 +127,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
@@ -131,6 +141,7 @@
},
{
"BriefDescription": "Retired memory store access operations. A PDist event for PEBS Store Latency Facility.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.STORE_SAMPLE",
@@ -142,6 +153,7 @@
},
{
"BriefDescription": "Counts cacheable demand data reads were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -152,6 +164,7 @@
},
{
"BriefDescription": "Counts demand data reads that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -162,6 +175,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership, including SWPREFETCHW which is an RFO were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -172,6 +186,7 @@
},
{
"BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
diff --git a/tools/perf/pmu-events/arch/x86/lunarlake/other.json b/tools/perf/pmu-events/arch/x86/lunarlake/other.json
index 377f717db6cc..0b49b4684c4b 100644
--- a/tools/perf/pmu-events/arch/x86/lunarlake/other.json
+++ b/tools/perf/pmu-events/arch/x86/lunarlake/other.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts cacheable demand data reads Catch all value for any response types - this includes response types not define in the OCR. If this is set all other response types will be ignored",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -11,6 +12,7 @@
},
{
"BriefDescription": "Counts demand data reads that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -21,6 +23,7 @@
},
{
"BriefDescription": "Counts cacheable demand data reads were supplied by DRAM.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_DATA_RD.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -31,6 +34,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -41,6 +45,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership, including SWPREFETCHW which is an RFO Catch all value for any response types - this includes response types not define in the OCR. If this is set all other response types will be ignored",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -51,6 +56,7 @@
},
{
"BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
diff --git a/tools/perf/pmu-events/arch/x86/lunarlake/pipeline.json b/tools/perf/pmu-events/arch/x86/lunarlake/pipeline.json
index 2c9f85ec8c4a..220c2115fec9 100644
--- a/tools/perf/pmu-events/arch/x86/lunarlake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/lunarlake/pipeline.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the total number of branch instructions retired for all branch types.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "All branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -19,6 +21,7 @@
},
{
"BriefDescription": "Counts the total number of mispredicted branch instructions retired for all branch types.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -28,6 +31,7 @@
},
{
"BriefDescription": "All mispredicted branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -37,6 +41,7 @@
},
{
"BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.CORE",
"SampleAfterValue": "2000003",
"UMask": "0x2",
@@ -44,6 +49,7 @@
},
{
"BriefDescription": "Core cycles when the core is not in a halt state.",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.CORE",
"PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the programmable counters available for other events.",
"SampleAfterValue": "2000003",
@@ -52,6 +58,7 @@
},
{
"BriefDescription": "Counts the number of unhalted core clock cycles [This event is alias to CPU_CLK_UNHALTED.THREAD_P]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.CORE_P",
"SampleAfterValue": "2000003",
@@ -59,6 +66,7 @@
},
{
"BriefDescription": "Thread cycles when thread is not in halt state [This event is alias to CPU_CLK_UNHALTED.THREAD_P]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.CORE_P",
"PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time. [This event is alias to CPU_CLK_UNHALTED.THREAD_P]",
@@ -67,6 +75,7 @@
},
{
"BriefDescription": "Fixed Counter: Counts the number of unhalted reference clock cycles",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"SampleAfterValue": "2000003",
"UMask": "0x3",
@@ -74,6 +83,7 @@
},
{
"BriefDescription": "Reference cycles when the core is not in halt state.",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
"SampleAfterValue": "2000003",
@@ -82,6 +92,7 @@
},
{
"BriefDescription": "Counts the number of unhalted reference clock cycles",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.REF_TSC_P",
"PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is not affected by core frequency changes and increments at a fixed frequency that is also used for the Time Stamp Counter (TSC). This event uses a programmable general purpose performance counter.",
@@ -91,6 +102,7 @@
},
{
"BriefDescription": "Reference cycles when the core is not in halt state.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.REF_TSC_P",
"PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
@@ -100,6 +112,7 @@
},
{
"BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"SampleAfterValue": "2000003",
"UMask": "0x2",
@@ -107,6 +120,7 @@
},
{
"BriefDescription": "Core cycles when the thread is not in a halt state.",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the programmable counters available for other events.",
"SampleAfterValue": "2000003",
@@ -115,6 +129,7 @@
},
{
"BriefDescription": "Counts the number of unhalted core clock cycles [This event is alias to CPU_CLK_UNHALTED.CORE_P]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
"SampleAfterValue": "2000003",
@@ -122,6 +137,7 @@
},
{
"BriefDescription": "Thread cycles when thread is not in halt state [This event is alias to CPU_CLK_UNHALTED.CORE_P]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
"PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time. [This event is alias to CPU_CLK_UNHALTED.CORE_P]",
@@ -130,6 +146,7 @@
},
{
"BriefDescription": "Fixed Counter: Counts the number of instructions retired",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PEBS": "1",
"SampleAfterValue": "2000003",
@@ -138,6 +155,7 @@
},
{
"BriefDescription": "Number of instructions retired. Fixed Counter - architectural event",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PEBS": "1",
"PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
@@ -147,6 +165,7 @@
},
{
"BriefDescription": "Counts the number of instructions retired",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.ANY_P",
"PEBS": "1",
@@ -155,6 +174,7 @@
},
{
"BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.ANY_P",
"PEBS": "1",
@@ -164,6 +184,7 @@
},
{
"BriefDescription": "Counts the number of occurrences a retired load gets blocked because its address partially overlaps with an older store (size mismatch) - unknown_sta/bad_forward",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.STORE_FORWARD",
"PEBS": "1",
@@ -173,6 +194,7 @@
},
{
"BriefDescription": "Loads blocked due to overlapping with a preceding store that cannot be forwarded.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.STORE_FORWARD",
"PublicDescription": "Counts the number of times where store forwarding was prevented for a load operation. The most common case is a load blocked due to the address of memory access (partially) overlapping with a preceding uncompleted store. Note: See the table of not supported store forwards in the Optimization Guide.",
@@ -182,6 +204,7 @@
},
{
"BriefDescription": "Counts the number of LBR entries recorded. Requires LBRs to be enabled in IA32_LBR_CTL.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe4",
"EventName": "MISC_RETIRED.LBR_INSERTS",
"PEBS": "1",
@@ -191,6 +214,7 @@
},
{
"BriefDescription": "LBR record is inserted",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xe4",
"EventName": "MISC_RETIRED.LBR_INSERTS",
"PEBS": "1",
@@ -200,6 +224,7 @@
},
{
"BriefDescription": "This event counts a subset of the Topdown Slots event that were not consumed by the back-end pipeline due to lack of back-end resources, as a result of memory subsystem delays, execution units limitations, or other conditions.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xa4",
"EventName": "TOPDOWN.BACKEND_BOUND_SLOTS",
"PublicDescription": "This event counts a subset of the Topdown Slots event that were not consumed by the back-end pipeline due to lack of back-end resources, as a result of memory subsystem delays, execution units limitations, or other conditions. Software can use this event as the numerator for the Backend Bound metric (or top-level category) of the Top-down Microarchitecture Analysis method.",
@@ -209,6 +234,7 @@
},
{
"BriefDescription": "TMA slots available for an unhalted logical processor. Fixed counter - architectural event",
+ "Counter": "Fixed counter 3",
"EventName": "TOPDOWN.SLOTS",
"PublicDescription": "Number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method (TMA). Software can use this event as the denominator for the top-level metrics of the TMA method. This architectural event is counted on a designated fixed counter (Fixed Counter 3).",
"SampleAfterValue": "10000003",
@@ -217,6 +243,7 @@
},
{
"BriefDescription": "TMA slots available for an unhalted logical processor. General counter - architectural event",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xa4",
"EventName": "TOPDOWN.SLOTS_P",
"PublicDescription": "Counts the number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method.",
@@ -226,6 +253,7 @@
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. [This event is alias to TOPDOWN_BAD_SPECULATION.ALL_P]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.ALL",
"SampleAfterValue": "1000003",
@@ -233,6 +261,7 @@
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. [This event is alias to TOPDOWN_BAD_SPECULATION.ALL]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.ALL_P",
"SampleAfterValue": "1000003",
@@ -240,6 +269,7 @@
},
{
"BriefDescription": "Counts the number of retirement slots not consumed due to backend stalls [This event is alias to TOPDOWN_BE_BOUND.ALL_P]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa4",
"EventName": "TOPDOWN_BE_BOUND.ALL",
"SampleAfterValue": "1000003",
@@ -248,6 +278,7 @@
},
{
"BriefDescription": "Counts the number of retirement slots not consumed due to backend stalls [This event is alias to TOPDOWN_BE_BOUND.ALL]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa4",
"EventName": "TOPDOWN_BE_BOUND.ALL_P",
"SampleAfterValue": "1000003",
@@ -256,6 +287,7 @@
},
{
"BriefDescription": "Fixed Counter: Counts the number of retirement slots not consumed due to front end stalls",
+ "Counter": "37",
"EventName": "TOPDOWN_FE_BOUND.ALL",
"SampleAfterValue": "1000003",
"UMask": "0x6",
@@ -263,6 +295,7 @@
},
{
"BriefDescription": "Counts the number of retirement slots not consumed due to front end stalls",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x9c",
"EventName": "TOPDOWN_FE_BOUND.ALL_P",
"SampleAfterValue": "1000003",
@@ -271,6 +304,7 @@
},
{
"BriefDescription": "Fixed Counter: Counts the number of consumed retirement slots.",
+ "Counter": "38",
"EventName": "TOPDOWN_RETIRING.ALL",
"PEBS": "1",
"SampleAfterValue": "1000003",
@@ -279,6 +313,7 @@
},
{
"BriefDescription": "Counts the number of consumed retirement slots.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc2",
"EventName": "TOPDOWN_RETIRING.ALL_P",
"PEBS": "1",
@@ -288,6 +323,7 @@
},
{
"BriefDescription": "This event counts a subset of the Topdown Slots event that are utilized by operations that eventually get retired (committed) by the processor pipeline. Usually, this event positively correlates with higher performance for example, as measured by the instructions-per-cycle metric.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.SLOTS",
"PublicDescription": "This event counts a subset of the Topdown Slots event that are utilized by operations that eventually get retired (committed) by the processor pipeline. Usually, this event positively correlates with higher performance for example, as measured by the instructions-per-cycle metric. Software can use this event as the numerator for the Retiring metric (or top-level category) of the Top-down Microarchitecture Analysis method.",
diff --git a/tools/perf/pmu-events/arch/x86/lunarlake/virtual-memory.json b/tools/perf/pmu-events/arch/x86/lunarlake/virtual-memory.json
index bb9458799f1c..59af79e3466e 100644
--- a/tools/perf/pmu-events/arch/x86/lunarlake/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/lunarlake/virtual-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of page walks completed due to load DTLB misses to any page size.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x12",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (all page sizes) caused by demand data loads. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -19,6 +21,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to store DTLB misses to any page size.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
@@ -28,6 +31,7 @@
},
{
"BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x13",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (all page sizes) caused by demand data stores. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -37,6 +41,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to any page size.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
@@ -46,6 +51,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x11",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (all page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
diff --git a/tools/perf/pmu-events/arch/x86/mapfile.csv b/tools/perf/pmu-events/arch/x86/mapfile.csv
index c9891630be10..d503aa7e3594 100644
--- a/tools/perf/pmu-events/arch/x86/mapfile.csv
+++ b/tools/perf/pmu-events/arch/x86/mapfile.csv
@@ -1,38 +1,38 @@
Family-model,Version,Filename,EventType
-GenuineIntel-6-(97|9A|B7|BA|BF),v1.24,alderlake,core
-GenuineIntel-6-BE,v1.24,alderlaken,core
+GenuineIntel-6-(97|9A|B7|BA|BF),v1.27,alderlake,core
+GenuineIntel-6-BE,v1.27,alderlaken,core
GenuineIntel-6-(1C|26|27|35|36),v5,bonnell,core
GenuineIntel-6-(3D|47),v29,broadwell,core
GenuineIntel-6-56,v11,broadwellde,core
GenuineIntel-6-4F,v22,broadwellx,core
-GenuineIntel-6-55-[56789ABCDEF],v1.21,cascadelakex,core
-GenuineIntel-6-9[6C],v1.04,elkhartlake,core
-GenuineIntel-6-CF,v1.06,emeraldrapids,core
+GenuineIntel-6-55-[56789ABCDEF],v1.22,cascadelakex,core
+GenuineIntel-6-9[6C],v1.05,elkhartlake,core
+GenuineIntel-6-CF,v1.09,emeraldrapids,core
GenuineIntel-6-5[CF],v13,goldmont,core
GenuineIntel-6-7A,v1.01,goldmontplus,core
-GenuineIntel-6-B6,v1.02,grandridge,core
-GenuineIntel-6-A[DE],v1.01,graniterapids,core
+GenuineIntel-6-B6,v1.03,grandridge,core
+GenuineIntel-6-A[DE],v1.02,graniterapids,core
GenuineIntel-6-(3C|45|46),v35,haswell,core
GenuineIntel-6-3F,v28,haswellx,core
-GenuineIntel-6-7[DE],v1.21,icelake,core
-GenuineIntel-6-6[AC],v1.24,icelakex,core
+GenuineIntel-6-7[DE],v1.22,icelake,core
+GenuineIntel-6-6[AC],v1.26,icelakex,core
GenuineIntel-6-3A,v24,ivybridge,core
GenuineIntel-6-3E,v24,ivytown,core
GenuineIntel-6-2D,v24,jaketown,core
GenuineIntel-6-(57|85),v16,knightslanding,core
GenuineIntel-6-BD,v1.01,lunarlake,core
-GenuineIntel-6-A[AC],v1.08,meteorlake,core
+GenuineIntel-6-A[AC],v1.10,meteorlake,core
GenuineIntel-6-1[AEF],v4,nehalemep,core
GenuineIntel-6-2E,v4,nehalemex,core
-GenuineIntel-6-A7,v1.02,rocketlake,core
+GenuineIntel-6-A7,v1.03,rocketlake,core
GenuineIntel-6-2A,v19,sandybridge,core
-GenuineIntel-6-8F,v1.20,sapphirerapids,core
-GenuineIntel-6-AF,v1.02,sierraforest,core
+GenuineIntel-6-8F,v1.23,sapphirerapids,core
+GenuineIntel-6-AF,v1.04,sierraforest,core
GenuineIntel-6-(37|4A|4C|4D|5A),v15,silvermont,core
-GenuineIntel-6-(4E|5E|8E|9E|A5|A6),v58,skylake,core
-GenuineIntel-6-55-[01234],v1.33,skylakex,core
-GenuineIntel-6-86,v1.22,snowridgex,core
-GenuineIntel-6-8[CD],v1.15,tigerlake,core
+GenuineIntel-6-(4E|5E|8E|9E|A5|A6),v59,skylake,core
+GenuineIntel-6-55-[01234],v1.35,skylakex,core
+GenuineIntel-6-86,v1.23,snowridgex,core
+GenuineIntel-6-8[CD],v1.16,tigerlake,core
GenuineIntel-6-2C,v5,westmereep-dp,core
GenuineIntel-6-25,v4,westmereep-sp,core
GenuineIntel-6-2F,v4,westmereex,core
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/cache.json b/tools/perf/pmu-events/arch/x86/meteorlake/cache.json
index af7acb15f661..908e3c7f6d6e 100644
--- a/tools/perf/pmu-events/arch/x86/meteorlake/cache.json
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "L1D.HWPF_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "L1D.HWPF_MISS",
"SampleAfterValue": "1000003",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Counts the number of cache lines replaced in L1 data cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "L1D.REPLACEMENT",
"PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "Number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.FB_FULL",
"PublicDescription": "Counts number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
@@ -27,6 +30,7 @@
},
{
"BriefDescription": "Number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x48",
@@ -38,6 +42,7 @@
},
{
"BriefDescription": "Number of cycles a demand request has waited due to L1D due to lack of L2 resources.",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.L2_STALLS",
"PublicDescription": "Counts number of cycles a demand request has waited due to L1D due to lack of L2 resources. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
@@ -47,6 +52,7 @@
},
{
"BriefDescription": "Number of L1D misses that are outstanding",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING",
"PublicDescription": "Counts number of L1D misses that are outstanding in each cycle, that is each cycle the number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch. Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
@@ -56,6 +62,7 @@
},
{
"BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING_CYCLES",
@@ -66,6 +73,7 @@
},
{
"BriefDescription": "L2 cache lines filling L2",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "L2_LINES_IN.ALL",
"PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
@@ -75,6 +83,7 @@
},
{
"BriefDescription": "Modified cache lines that are evicted by L2 cache when triggered by an L2 cache fill.",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_LINES_OUT.NON_SILENT",
"PublicDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines are in Modified state. Modified lines are written back to L3",
@@ -84,6 +93,7 @@
},
{
"BriefDescription": "Non-modified cache lines that are silently dropped by L2 cache when triggered by an L2 cache fill.",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_LINES_OUT.SILENT",
"PublicDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared or Exclusive state. A non-threaded event.",
@@ -92,7 +102,18 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Cache lines that have been L2 hardware prefetched but not used by demand accesses",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x26",
+ "EventName": "L2_LINES_OUT.USELESS_HWPF",
+ "PublicDescription": "Counts the number of cache lines that have been prefetched by the L2 hardware prefetcher but not used by demand access when evicted from the L2 cache",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "All accesses to L2 cache [This event is alias to L2_RQSTS.REFERENCES]",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_REQUEST.ALL",
"PublicDescription": "Counts all requests that were hit or true misses in L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. [This event is alias to L2_RQSTS.REFERENCES]",
@@ -102,6 +123,7 @@
},
{
"BriefDescription": "All requests that hit L2 cache. [This event is alias to L2_RQSTS.HIT]",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_REQUEST.HIT",
"PublicDescription": "Counts all requests that hit L2 cache. [This event is alias to L2_RQSTS.HIT]",
@@ -111,6 +133,7 @@
},
{
"BriefDescription": "Read requests with true-miss in L2 cache [This event is alias to L2_RQSTS.MISS]",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_REQUEST.MISS",
"PublicDescription": "Counts read requests of any type with true-miss in the L2 cache. True-miss excludes L2 misses that were merged with ongoing L2 misses. [This event is alias to L2_RQSTS.MISS]",
@@ -120,6 +143,7 @@
},
{
"BriefDescription": "L2 code requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_CODE_RD",
"PublicDescription": "Counts the total number of L2 code requests.",
@@ -129,6 +153,7 @@
},
{
"BriefDescription": "Demand Data Read access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
"PublicDescription": "Counts Demand Data Read requests accessing the L2 cache. These requests may hit or miss L2 cache. True-miss exclude misses that were merged with ongoing L2 misses. An access is counted once.",
@@ -138,6 +163,7 @@
},
{
"BriefDescription": "Demand requests that miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_MISS",
"PublicDescription": "Counts demand requests that miss L2 cache.",
@@ -147,6 +173,7 @@
},
{
"BriefDescription": "Demand requests to L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
"PublicDescription": "Counts demand requests to L2 cache.",
@@ -156,6 +183,7 @@
},
{
"BriefDescription": "L2_RQSTS.ALL_HWPF",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_HWPF",
"SampleAfterValue": "200003",
@@ -164,6 +192,7 @@
},
{
"BriefDescription": "RFO requests to L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_RFO",
"PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
@@ -173,6 +202,7 @@
},
{
"BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_HIT",
"PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
@@ -182,6 +212,7 @@
},
{
"BriefDescription": "L2 cache misses when fetching instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_MISS",
"PublicDescription": "Counts L2 cache misses when fetching instructions.",
@@ -191,6 +222,7 @@
},
{
"BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
"PublicDescription": "Counts the number of demand Data Read requests initiated by load instructions that hit L2 cache.",
@@ -200,6 +232,7 @@
},
{
"BriefDescription": "Demand Data Read miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
"PublicDescription": "Counts demand Data Read requests with true-miss in the L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. An access is counted once.",
@@ -209,6 +242,7 @@
},
{
"BriefDescription": "All requests that hit L2 cache. [This event is alias to L2_REQUEST.HIT]",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.HIT",
"PublicDescription": "Counts all requests that hit L2 cache. [This event is alias to L2_REQUEST.HIT]",
@@ -218,6 +252,7 @@
},
{
"BriefDescription": "L2_RQSTS.HWPF_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.HWPF_MISS",
"SampleAfterValue": "200003",
@@ -226,6 +261,7 @@
},
{
"BriefDescription": "Read requests with true-miss in L2 cache [This event is alias to L2_REQUEST.MISS]",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.MISS",
"PublicDescription": "Counts read requests of any type with true-miss in the L2 cache. True-miss excludes L2 misses that were merged with ongoing L2 misses. [This event is alias to L2_REQUEST.MISS]",
@@ -235,6 +271,7 @@
},
{
"BriefDescription": "All accesses to L2 cache [This event is alias to L2_REQUEST.ALL]",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.REFERENCES",
"PublicDescription": "Counts all requests that were hit or true misses in L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. [This event is alias to L2_REQUEST.ALL]",
@@ -244,6 +281,7 @@
},
{
"BriefDescription": "RFO requests that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_HIT",
"PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
@@ -253,6 +291,7 @@
},
{
"BriefDescription": "RFO requests that miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_MISS",
"PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
@@ -261,7 +300,28 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "SW prefetch requests that hit L2 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.SWPF_HIT",
+ "PublicDescription": "Counts Software prefetch requests that hit the L2 cache. Accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions when FB is not full.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "SW prefetch requests that miss L2 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.SWPF_MISS",
+ "PublicDescription": "Counts Software prefetch requests that miss the L2 cache. Accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions when FB is not full.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x28",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "L2 writebacks that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "L2_TRANS.L2_WB",
"PublicDescription": "Counts L2 writebacks that access L2 cache.",
@@ -271,6 +331,7 @@
},
{
"BriefDescription": "Cycles when L1D is locked",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
"PublicDescription": "This event counts the number of cycles when the L1D is locked. It is a superset of the 0x1 mask (BUS_LOCK_CLOCKS.BUS_LOCK_DURATION).",
@@ -280,15 +341,17 @@
},
{
"BriefDescription": "Counts the number of cacheable memory requests that miss in the LLC. Counts on a per core basis.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.MISS",
- "PublicDescription": "Counts the number of cacheable memory requests that miss in the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the platform has an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
+ "PublicDescription": "Counts the number of cacheable memory requests that miss in the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the core has access to an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
"SampleAfterValue": "200003",
"UMask": "0x41",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Core-originated cacheable requests that missed L3 (Except hardware prefetches to the L3)",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.MISS",
"PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.",
@@ -298,15 +361,17 @@
},
{
"BriefDescription": "Counts the number of cacheable memory requests that access the LLC. Counts on a per core basis.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
- "PublicDescription": "Counts the number of cacheable memory requests that access the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the platform has an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
+ "PublicDescription": "Counts the number of cacheable memory requests that access the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the core has access to an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
"SampleAfterValue": "200003",
"UMask": "0x4f",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Core-originated cacheable requests that refer to L3 (Except hardware prefetches to the L3)",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
"PublicDescription": "Counts core-originated cacheable requests to the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.",
@@ -316,6 +381,7 @@
},
{
"BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an instruction cache or TLB miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x35",
"EventName": "MEM_BOUND_STALLS_IFETCH.ALL",
"SampleAfterValue": "1000003",
@@ -324,6 +390,7 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in the L2 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x35",
"EventName": "MEM_BOUND_STALLS_IFETCH.L2_HIT",
"PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or Translation Lookaside Buffer (TLB) miss which hit in the L2 cache.",
@@ -333,6 +400,7 @@
},
{
"BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an icache or itlb miss which hit in the LLC.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x35",
"EventName": "MEM_BOUND_STALLS_IFETCH.LLC_HIT",
"SampleAfterValue": "1000003",
@@ -341,6 +409,7 @@
},
{
"BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an icache or itlb miss which missed all the caches.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x35",
"EventName": "MEM_BOUND_STALLS_IFETCH.LLC_MISS",
"SampleAfterValue": "1000003",
@@ -349,6 +418,7 @@
},
{
"BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an L1 demand load miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS_LOAD.ALL",
"SampleAfterValue": "1000003",
@@ -357,6 +427,7 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to a demand load which hit in the L2 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS_LOAD.L2_HIT",
"PublicDescription": "Counts the number of cycles a core is stalled due to a demand load which hit in the L2 cache.",
@@ -366,6 +437,7 @@
},
{
"BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to a demand load miss which hit in the LLC.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS_LOAD.LLC_HIT",
"SampleAfterValue": "1000003",
@@ -374,6 +446,7 @@
},
{
"BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to a demand load miss which missed all the local caches.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS_LOAD.LLC_MISS",
"SampleAfterValue": "1000003",
@@ -382,6 +455,7 @@
},
{
"BriefDescription": "Retired load instructions.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ALL_LOADS",
@@ -393,6 +467,7 @@
},
{
"BriefDescription": "Retired store instructions.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ALL_STORES",
@@ -404,6 +479,7 @@
},
{
"BriefDescription": "All retired memory instructions.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ANY",
@@ -415,6 +491,7 @@
},
{
"BriefDescription": "Retired load instructions with locked access.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.LOCK_LOADS",
@@ -426,6 +503,7 @@
},
{
"BriefDescription": "Retired load instructions that split across a cacheline boundary.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
@@ -437,6 +515,7 @@
},
{
"BriefDescription": "Retired store instructions that split across a cacheline boundary.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.SPLIT_STORES",
@@ -448,6 +527,7 @@
},
{
"BriefDescription": "Retired load instructions that hit the STLB.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.STLB_HIT_LOADS",
@@ -459,6 +539,7 @@
},
{
"BriefDescription": "Retired store instructions that hit the STLB.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.STLB_HIT_STORES",
@@ -470,6 +551,7 @@
},
{
"BriefDescription": "Retired load instructions that miss the STLB.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
@@ -481,6 +563,7 @@
},
{
"BriefDescription": "Retired store instructions that miss the STLB.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
@@ -492,6 +575,7 @@
},
{
"BriefDescription": "Completed demand load uops that miss the L1 d-cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "MEM_LOAD_COMPLETED.L1_MISS_ANY",
"PublicDescription": "Number of completed demand load requests that missed the L1 data cache including shadow misses (FB hits, merge to an ongoing L1D miss)",
@@ -501,6 +585,7 @@
},
{
"BriefDescription": "Retired load instructions whose data sources were HitM responses from shared L3",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD",
@@ -511,29 +596,8 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache",
- "Data_LA": "1",
- "EventCode": "0xd2",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT",
- "PEBS": "1",
- "PublicDescription": "Counts retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache.",
- "SampleAfterValue": "20011",
- "UMask": "0x2",
- "Unit": "cpu_core"
- },
- {
- "BriefDescription": "Retired load instructions whose data sources were HitM responses from shared L3",
- "Data_LA": "1",
- "EventCode": "0xd2",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM",
- "PEBS": "1",
- "PublicDescription": "Counts retired load instructions whose data sources were HitM responses from shared L3.",
- "SampleAfterValue": "20011",
- "UMask": "0x4",
- "Unit": "cpu_core"
- },
- {
"BriefDescription": "Retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
@@ -545,6 +609,7 @@
},
{
"BriefDescription": "Retired load instructions whose data sources were hits in L3 without snoops required",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
@@ -556,6 +621,7 @@
},
{
"BriefDescription": "Retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD",
@@ -567,6 +633,7 @@
},
{
"BriefDescription": "Retired load instructions which data sources missed L3 but serviced from local dram",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
@@ -578,6 +645,7 @@
},
{
"BriefDescription": "Retired instructions with at least 1 uncacheable load or lock.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd4",
"EventName": "MEM_LOAD_MISC_RETIRED.UC",
@@ -589,6 +657,7 @@
},
{
"BriefDescription": "Number of completed demand load requests that missed the L1, but hit the FB(fill buffer), because a preceding miss to the same cacheline initiated the line to be brought into L1, but data is not yet ready in L1.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.FB_HIT",
@@ -600,6 +669,7 @@
},
{
"BriefDescription": "Retired load instructions with L1 cache hits as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L1_HIT",
@@ -611,6 +681,7 @@
},
{
"BriefDescription": "Retired load instructions missed L1 cache as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L1_MISS",
@@ -622,6 +693,7 @@
},
{
"BriefDescription": "Retired load instructions with L2 cache hits as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L2_HIT",
@@ -633,6 +705,7 @@
},
{
"BriefDescription": "Retired load instructions missed L2 cache as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L2_MISS",
@@ -644,6 +717,7 @@
},
{
"BriefDescription": "Retired load instructions with L3 cache hits as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L3_HIT",
@@ -655,6 +729,7 @@
},
{
"BriefDescription": "Retired load instructions missed L3 cache as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L3_MISS",
@@ -666,69 +741,70 @@
},
{
"BriefDescription": "Counts the number of load ops retired that miss the L3 cache and hit in DRAM",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xd4",
"EventName": "MEM_LOAD_UOPS_MISC_RETIRED.LOCAL_DRAM",
- "PEBS": "1",
"SampleAfterValue": "1000003",
"UMask": "0x2",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Counts the number of load ops retired that hit the L1 data cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x1",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Counts the number of load ops retired that miss in the L1 data cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x40",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Counts the number of load ops retired that hit in the L2 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x2",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Counts the number of load ops retired that miss in the L2 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x80",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Counts the number of load ops retired that hit in the L3 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x1c",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Counts the number of loads that hit in a write combining buffer (WCB), excluding the first load that caused the WCB to allocate.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.WCB_HIT",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x20",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Counts the number of cycles that uops are blocked for any of the following reasons: load buffer, store buffer or RSV full.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x04",
"EventName": "MEM_SCHEDULER_BLOCK.ALL",
"SampleAfterValue": "20003",
@@ -737,6 +813,7 @@
},
{
"BriefDescription": "Counts the number of cycles that uops are blocked due to a load buffer full condition.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x04",
"EventName": "MEM_SCHEDULER_BLOCK.LD_BUF",
"SampleAfterValue": "20003",
@@ -745,6 +822,7 @@
},
{
"BriefDescription": "Counts the number of cycles that uops are blocked due to an RSV full condition.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x04",
"EventName": "MEM_SCHEDULER_BLOCK.RSV",
"SampleAfterValue": "20003",
@@ -753,6 +831,7 @@
},
{
"BriefDescription": "Counts the number of cycles that uops are blocked due to a store buffer full condition.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x04",
"EventName": "MEM_SCHEDULER_BLOCK.ST_BUF",
"SampleAfterValue": "20003",
@@ -761,6 +840,7 @@
},
{
"BriefDescription": "MEM_STORE_RETIRED.L2_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "MEM_STORE_RETIRED.L2_HIT",
"SampleAfterValue": "200003",
@@ -769,196 +849,197 @@
},
{
"BriefDescription": "Counts the number of load ops retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x81",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Counts the number of store ops retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x82",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_1024",
"MSRIndex": "0x3F6",
"MSRValue": "0x400",
- "PEBS": "2",
"SampleAfterValue": "1000003",
"UMask": "0x5",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_128",
"MSRIndex": "0x3F6",
"MSRValue": "0x80",
- "PEBS": "2",
"SampleAfterValue": "1000003",
"UMask": "0x5",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_16",
"MSRIndex": "0x3F6",
"MSRValue": "0x10",
- "PEBS": "2",
"SampleAfterValue": "1000003",
"UMask": "0x5",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_2048",
"MSRIndex": "0x3F6",
"MSRValue": "0x800",
- "PEBS": "2",
"SampleAfterValue": "1000003",
"UMask": "0x5",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_256",
"MSRIndex": "0x3F6",
"MSRValue": "0x100",
- "PEBS": "2",
"SampleAfterValue": "1000003",
"UMask": "0x5",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_32",
"MSRIndex": "0x3F6",
"MSRValue": "0x20",
- "PEBS": "2",
"SampleAfterValue": "1000003",
"UMask": "0x5",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_4",
"MSRIndex": "0x3F6",
"MSRValue": "0x4",
- "PEBS": "2",
"SampleAfterValue": "1000003",
"UMask": "0x5",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_512",
"MSRIndex": "0x3F6",
"MSRValue": "0x200",
- "PEBS": "2",
"SampleAfterValue": "1000003",
"UMask": "0x5",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_64",
"MSRIndex": "0x3F6",
"MSRValue": "0x40",
- "PEBS": "2",
"SampleAfterValue": "1000003",
"UMask": "0x5",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_8",
"MSRIndex": "0x3F6",
"MSRValue": "0x8",
- "PEBS": "2",
"SampleAfterValue": "1000003",
"UMask": "0x5",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Counts the number of load uops retired that performed one or more locks",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x21",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Counts the number of memory uops retired that were splits.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.SPLIT",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x43",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Counts the number of retired split load uops.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x41",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Counts the number of retired split store uops.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x42",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Counts the number of stores uops retired same as MEM_UOPS_RETIRED.ALL_STORES",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.STORE_LATENCY",
- "PEBS": "2",
"SampleAfterValue": "1000003",
"UMask": "0x6",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Retired memory uops for any access",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe5",
"EventName": "MEM_UOP_RETIRED.ANY",
"PublicDescription": "Number of retired micro-operations (uops) for load or store memory accesses",
@@ -968,6 +1049,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -978,6 +1060,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -988,6 +1071,7 @@
},
{
"BriefDescription": "Counts demand data reads that resulted in a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -998,6 +1082,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1008,6 +1093,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1018,6 +1104,7 @@
},
{
"BriefDescription": "Counts demand data reads that resulted in a snoop hit in another cores caches which forwarded the unmodified data to the requesting core.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1028,6 +1115,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_RFO.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1038,6 +1126,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1048,6 +1137,7 @@
},
{
"BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that resulted in a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1058,6 +1148,7 @@
},
{
"BriefDescription": "Any memory transaction that reached the SQ.",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
"PublicDescription": "Counts memory transactions reached the super queue including requests initiated by the core, all L3 prefetches, page walks, etc..",
@@ -1067,6 +1158,7 @@
},
{
"BriefDescription": "Demand and prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "OFFCORE_REQUESTS.DATA_RD",
"PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
@@ -1076,6 +1168,7 @@
},
{
"BriefDescription": "Cacheable and Non-Cacheable code read requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
"PublicDescription": "Counts both cacheable and Non-Cacheable code read requests.",
@@ -1085,6 +1178,7 @@
},
{
"BriefDescription": "Demand Data Read requests sent to uncore",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
"PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
@@ -1094,6 +1188,7 @@
},
{
"BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
"PublicDescription": "Counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
@@ -1103,6 +1198,7 @@
},
{
"BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x20",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
@@ -1113,6 +1209,7 @@
},
{
"BriefDescription": "Cycles with offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x20",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
@@ -1123,6 +1220,7 @@
},
{
"BriefDescription": "Cycles where at least 1 outstanding demand data read request is pending.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x20",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
@@ -1132,6 +1230,7 @@
},
{
"BriefDescription": "Cycles with offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x20",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
@@ -1142,6 +1241,7 @@
},
{
"BriefDescription": "OFFCORE_REQUESTS_OUTSTANDING.DATA_RD",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DATA_RD",
"SampleAfterValue": "1000003",
@@ -1150,6 +1250,7 @@
},
{
"BriefDescription": "Offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore, every cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
"PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
@@ -1159,6 +1260,7 @@
},
{
"BriefDescription": "For every cycle, increments by the number of outstanding demand data read requests pending.",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
"PublicDescription": "For every cycle, increments by the number of outstanding demand data read requests pending. Requests are considered outstanding from the time they miss the core's L2 cache until the transaction completion message is sent to the requestor.",
@@ -1167,16 +1269,8 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
- "CounterMask": "6",
- "EventCode": "0x20",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
- "SampleAfterValue": "2000003",
- "UMask": "0x1",
- "Unit": "cpu_core"
- },
- {
"BriefDescription": "Store Read transactions pending for off-core. Highly correlated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
"PublicDescription": "Counts the number of off-core outstanding read-for-ownership (RFO) store transactions every cycle. An RFO transaction is considered to be in the Off-core outstanding state between L2 cache miss and transaction completion.",
@@ -1186,6 +1280,7 @@
},
{
"BriefDescription": "Counts bus locks, accounts for cache line split locks and UC locks.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2c",
"EventName": "SQ_MISC.BUS_LOCK",
"PublicDescription": "Counts the more expensive bus lock needed to enforce cache coherency for certain memory accesses that need to be done atomically. Can be created by issuing an atomic instruction (via the LOCK prefix) which causes a cache line split or accesses uncacheable memory.",
@@ -1194,7 +1289,17 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of PREFETCHNTA, PREFETCHW, PREFETCHT0, PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "SW_PREFETCH_ACCESS.ANY",
+ "SampleAfterValue": "100003",
+ "UMask": "0xf",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Number of PREFETCHNTA instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "SW_PREFETCH_ACCESS.NTA",
"PublicDescription": "Counts the number of PREFETCHNTA instructions executed.",
@@ -1204,6 +1309,7 @@
},
{
"BriefDescription": "Number of PREFETCHW instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
"PublicDescription": "Counts the number of PREFETCHW instructions executed.",
@@ -1213,6 +1319,7 @@
},
{
"BriefDescription": "Number of PREFETCHT0 instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "SW_PREFETCH_ACCESS.T0",
"PublicDescription": "Counts the number of PREFETCHT0 instructions executed.",
@@ -1222,6 +1329,7 @@
},
{
"BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "SW_PREFETCH_ACCESS.T1_T2",
"PublicDescription": "Counts the number of PREFETCHT1 or PREFETCHT2 instructions executed.",
@@ -1231,6 +1339,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to an icache miss",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.ICACHE",
"SampleAfterValue": "1000003",
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/floating-point.json b/tools/perf/pmu-events/arch/x86/meteorlake/floating-point.json
index 30e604d2120f..28dc5e06ee31 100644
--- a/tools/perf/pmu-events/arch/x86/meteorlake/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/floating-point.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of cycles when any of the floating point dividers are active.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xcd",
"EventName": "ARITH.FPDIV_ACTIVE",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "This event counts the cycles the floating point divider is busy.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xb0",
"EventName": "ARITH.FPDIV_ACTIVE",
@@ -19,6 +21,7 @@
},
{
"BriefDescription": "Counts all microcode FP assists.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc1",
"EventName": "ASSISTS.FP",
"PublicDescription": "Counts all microcode Floating Point assists.",
@@ -28,6 +31,7 @@
},
{
"BriefDescription": "ASSISTS.SSE_AVX_MIX",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc1",
"EventName": "ASSISTS.SSE_AVX_MIX",
"SampleAfterValue": "1000003",
@@ -36,6 +40,7 @@
},
{
"BriefDescription": "FP_ARITH_DISPATCHED.PORT_0 [This event is alias to FP_ARITH_DISPATCHED.V0]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb3",
"EventName": "FP_ARITH_DISPATCHED.PORT_0",
"SampleAfterValue": "2000003",
@@ -44,6 +49,7 @@
},
{
"BriefDescription": "FP_ARITH_DISPATCHED.PORT_1 [This event is alias to FP_ARITH_DISPATCHED.V1]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb3",
"EventName": "FP_ARITH_DISPATCHED.PORT_1",
"SampleAfterValue": "2000003",
@@ -52,6 +58,7 @@
},
{
"BriefDescription": "FP_ARITH_DISPATCHED.PORT_5 [This event is alias to FP_ARITH_DISPATCHED.V2]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb3",
"EventName": "FP_ARITH_DISPATCHED.PORT_5",
"SampleAfterValue": "2000003",
@@ -60,6 +67,7 @@
},
{
"BriefDescription": "FP_ARITH_DISPATCHED.V0 [This event is alias to FP_ARITH_DISPATCHED.PORT_0]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb3",
"EventName": "FP_ARITH_DISPATCHED.V0",
"SampleAfterValue": "2000003",
@@ -68,6 +76,7 @@
},
{
"BriefDescription": "FP_ARITH_DISPATCHED.V1 [This event is alias to FP_ARITH_DISPATCHED.PORT_1]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb3",
"EventName": "FP_ARITH_DISPATCHED.V1",
"SampleAfterValue": "2000003",
@@ -76,6 +85,7 @@
},
{
"BriefDescription": "FP_ARITH_DISPATCHED.V2 [This event is alias to FP_ARITH_DISPATCHED.PORT_5]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb3",
"EventName": "FP_ARITH_DISPATCHED.V2",
"SampleAfterValue": "2000003",
@@ -84,6 +94,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -93,6 +104,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
"PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -102,6 +114,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -111,6 +124,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
"PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -120,6 +134,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 128-bit packed single and 256-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and packed double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.4_FLOPS",
"PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision and 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point and packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -129,6 +144,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired; some instructions will count twice as noted below. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 RANGE SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR",
"PublicDescription": "Number of SSE/AVX computational scalar single precision and double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -138,6 +154,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -147,6 +164,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
"PublicDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -156,6 +174,7 @@
},
{
"BriefDescription": "Number of any Vector retired FP arithmetic instructions",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.VECTOR",
"PublicDescription": "Number of any Vector retired FP arithmetic instructions. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -165,53 +184,108 @@
},
{
"BriefDescription": "Counts the number of all types of floating point operations per uop with all default weighting",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc8",
"EventName": "FP_FLOPS_RETIRED.ALL",
- "PEBS": "1",
"SampleAfterValue": "1000003",
"UMask": "0x3",
"Unit": "cpu_atom"
},
{
"BriefDescription": "This event is deprecated. [This event is alias to FP_FLOPS_RETIRED.FP64]",
+ "Counter": "0,1,2,3,4,5,6,7",
"Deprecated": "1",
"EventCode": "0xc8",
"EventName": "FP_FLOPS_RETIRED.DP",
- "PEBS": "1",
"SampleAfterValue": "1000003",
"UMask": "0x1",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Counts the number of floating point operations that produce 32 bit single precision results [This event is alias to FP_FLOPS_RETIRED.SP]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc8",
"EventName": "FP_FLOPS_RETIRED.FP32",
- "PEBS": "1",
"SampleAfterValue": "1000003",
"UMask": "0x2",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Counts the number of floating point operations that produce 64 bit double precision results [This event is alias to FP_FLOPS_RETIRED.DP]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc8",
"EventName": "FP_FLOPS_RETIRED.FP64",
- "PEBS": "1",
"SampleAfterValue": "1000003",
"UMask": "0x1",
"Unit": "cpu_atom"
},
{
"BriefDescription": "This event is deprecated. [This event is alias to FP_FLOPS_RETIRED.FP32]",
+ "Counter": "0,1,2,3,4,5,6,7",
"Deprecated": "1",
"EventCode": "0xc8",
"EventName": "FP_FLOPS_RETIRED.SP",
- "PEBS": "1",
"SampleAfterValue": "1000003",
"UMask": "0x2",
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the total number of floating point retired instructions.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_INST_RETIRED.128B_DP",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of retired instructions whose sources are a packed 128 bit single precision floating point. This may be SSE or AVX.128 operations.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_INST_RETIRED.128B_SP",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of retired instructions whose sources are a packed 256 bit double precision floating point.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_INST_RETIRED.256B_DP",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of retired instructions whose sources are a scalar 32bit single precision floating point.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_INST_RETIRED.32B_SP",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of retired instructions whose sources are a scalar 64 bit double precision floating point.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_INST_RETIRED.64B_DP",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops executed on floating point and vector integer store data port.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb2",
+ "EventName": "FP_VINT_UOPS_EXECUTED.STD",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of floating point operations retired that required microcode assist.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.FP_ASSIST",
"PublicDescription": "Counts the number of floating point operations retired that required microcode assist, which is not a reflection of the number of FP operations, instructions or uops.",
@@ -221,9 +295,9 @@
},
{
"BriefDescription": "Counts the number of floating point divide uops retired (x87 and sse, including x87 sqrt).",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.FPDIV",
- "PEBS": "1",
"SampleAfterValue": "2000003",
"UMask": "0x8",
"Unit": "cpu_atom"
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/frontend.json b/tools/perf/pmu-events/arch/x86/meteorlake/frontend.json
index f3b7b211afb5..b6c52f7385fc 100644
--- a/tools/perf/pmu-events/arch/x86/meteorlake/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/frontend.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the total number of BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe6",
"EventName": "BACLEARS.ANY",
"PublicDescription": "Counts the total number of BACLEARS, which occur when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "Clears due to Unknown Branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "BACLEARS.ANY",
"PublicDescription": "Number of times the front-end is resteered when it finds a branch instruction in a fetch line. This is called Unknown Branch which occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
@@ -19,6 +21,7 @@
},
{
"BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "DECODE.LCP",
"PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.",
@@ -28,6 +31,7 @@
},
{
"BriefDescription": "Cycles the Microcode Sequencer is busy.",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "DECODE.MS_BUSY",
"SampleAfterValue": "500009",
@@ -36,6 +40,7 @@
},
{
"BriefDescription": "DSB-to-MITE switch true penalty cycles.",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
"PublicDescription": "Decode Stream Buffer (DSB) is a Uop-cache that holds translations of previously fetched instructions that were decoded by the legacy x86 decode pipeline (MITE). This event counts fetch penalty cycles when a transition occurs from DSB to MITE.",
@@ -44,15 +49,8 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "DSB_FILL.FB_STALL_OT",
- "EventCode": "0x62",
- "EventName": "DSB_FILL.FB_STALL_OT",
- "SampleAfterValue": "1000003",
- "UMask": "0x10",
- "Unit": "cpu_core"
- },
- {
"BriefDescription": "Retired ANT branches",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.ANY_ANT",
"MSRIndex": "0x3F7",
@@ -65,6 +63,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced DSB miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.ANY_DSB_MISS",
"MSRIndex": "0x3F7",
@@ -77,6 +76,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced a critical DSB miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.DSB_MISS",
"MSRIndex": "0x3F7",
@@ -89,15 +89,16 @@
},
{
"BriefDescription": "Counts the number of instructions retired that were tagged because empty issue slots were seen before the uop due to ITLB miss",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.ITLB_MISS",
- "PEBS": "1",
"SampleAfterValue": "1000003",
"UMask": "0x10",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Retired Instructions who experienced iTLB true miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.ITLB_MISS",
"MSRIndex": "0x3F7",
@@ -110,6 +111,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.L1I_MISS",
"MSRIndex": "0x3F7",
@@ -122,6 +124,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.L2_MISS",
"MSRIndex": "0x3F7",
@@ -134,6 +137,7 @@
},
{
"BriefDescription": "Retired instructions after front-end starvation of at least 1 cycle",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_1",
"MSRIndex": "0x3F7",
@@ -146,6 +150,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
"MSRIndex": "0x3F7",
@@ -158,6 +163,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
"MSRIndex": "0x3F7",
@@ -170,6 +176,7 @@
},
{
"BriefDescription": "Retired instructions after front-end starvation of at least 2 cycles",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
"MSRIndex": "0x3F7",
@@ -182,6 +189,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
"MSRIndex": "0x3F7",
@@ -194,6 +202,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
"MSRIndex": "0x3F7",
@@ -206,6 +215,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
"MSRIndex": "0x3F7",
@@ -218,6 +228,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
"MSRIndex": "0x3F7",
@@ -230,6 +241,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
"MSRIndex": "0x3F7",
@@ -242,6 +254,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
"MSRIndex": "0x3F7",
@@ -254,6 +267,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
"MSRIndex": "0x3F7",
@@ -266,6 +280,7 @@
},
{
"BriefDescription": "Mispredicted Retired ANT branches",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.MISP_ANT",
"MSRIndex": "0x3F7",
@@ -278,6 +293,7 @@
},
{
"BriefDescription": "FRONTEND_RETIRED.MS_FLOWS",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.MS_FLOWS",
"MSRIndex": "0x3F7",
@@ -289,6 +305,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.STLB_MISS",
"MSRIndex": "0x3F7",
@@ -301,6 +318,7 @@
},
{
"BriefDescription": "FRONTEND_RETIRED.UNKNOWN_BRANCH",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.UNKNOWN_BRANCH",
"MSRIndex": "0x3F7",
@@ -312,6 +330,7 @@
},
{
"BriefDescription": "Counts every time the code stream enters into a new cache line by walking sequential from the previous line or being redirected by a jump.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x80",
"EventName": "ICACHE.ACCESSES",
"SampleAfterValue": "200003",
@@ -320,6 +339,7 @@
},
{
"BriefDescription": "Counts every time the code stream enters into a new cache line by walking sequential from the previous line or being redirected by a jump and the instruction cache registers bytes are not present. -",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x80",
"EventName": "ICACHE.MISSES",
"SampleAfterValue": "200003",
@@ -328,6 +348,7 @@
},
{
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE_DATA.STALLS",
"PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The decode pipeline works at a 32 Byte granularity.",
@@ -337,6 +358,7 @@
},
{
"BriefDescription": "ICACHE_DATA.STALL_PERIODS",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x80",
@@ -346,16 +368,8 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
- "EventCode": "0x83",
- "EventName": "ICACHE_TAG.HIT",
- "PublicDescription": "Counts instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity. Accounts for both cacheable and uncacheable accesses.",
- "SampleAfterValue": "200003",
- "UMask": "0x1",
- "Unit": "cpu_core"
- },
- {
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "ICACHE_TAG.STALLS",
"PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
@@ -365,6 +379,7 @@
},
{
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.DSB_CYCLES_ANY",
@@ -375,6 +390,7 @@
},
{
"BriefDescription": "Cycles DSB is delivering optimal number of Uops",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0x79",
"EventName": "IDQ.DSB_CYCLES_OK",
@@ -385,6 +401,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.DSB_UOPS",
"PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
@@ -394,6 +411,7 @@
},
{
"BriefDescription": "Cycles MITE is delivering any Uop",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MITE_CYCLES_ANY",
@@ -404,6 +422,7 @@
},
{
"BriefDescription": "Cycles MITE is delivering optimal number of Uops",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0x79",
"EventName": "IDQ.MITE_CYCLES_OK",
@@ -414,6 +433,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MITE_UOPS",
"PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
@@ -423,6 +443,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to IDQ while MS is busy",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MS_CYCLES_ANY",
@@ -433,6 +454,7 @@
},
{
"BriefDescription": "Number of switches from DSB or MITE to the MS",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x79",
@@ -444,6 +466,7 @@
},
{
"BriefDescription": "Uops initiated by MITE or Decode Stream Buffer (DSB) and delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_UOPS",
"PublicDescription": "Counts the number of uops initiated by MITE or Decode Stream Buffer (DSB) and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
@@ -452,16 +475,18 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "This event counts a subset of the Topdown Slots event that were no operation was delivered to the back-end pipeline due to instruction fetch limitations when the back-end could have accepted more operations. Common examples include instruction cache misses or x86 instruction decode limitations.",
+ "BriefDescription": "This event counts a subset of the Topdown Slots event that when no operation was delivered to the back-end pipeline due to instruction fetch limitations when the back-end could have accepted more operations. Common examples include instruction cache misses or x86 instruction decode limitations.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x9c",
"EventName": "IDQ_BUBBLES.CORE",
- "PublicDescription": "This event counts a subset of the Topdown Slots event that were no operation was delivered to the back-end pipeline due to instruction fetch limitations when the back-end could have accepted more operations. Common examples include instruction cache misses or x86 instruction decode limitations. The count may be distributed among unhalted logical processors (hyper-threads) who share the same physical core, in processors that support Intel Hyper-Threading Technology. Software can use this event as the numerator for the Frontend Bound metric (or top-level category) of the Top-down Microarchitecture Analysis method.",
+ "PublicDescription": "This event counts a subset of the Topdown Slots event that when no operation was delivered to the back-end pipeline due to instruction fetch limitations when the back-end could have accepted more operations. Common examples include instruction cache misses or x86 instruction decode limitations. The count may be distributed among unhalted logical processors (hyper-threads) who share the same physical core, in processors that support Intel Hyper-Threading Technology. Software can use this event as the numerator for the Frontend Bound metric (or top-level category) of the Top-down Microarchitecture Analysis method.",
"SampleAfterValue": "1000003",
"UMask": "0x1",
"Unit": "cpu_core"
},
{
"BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled [This event is alias to IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE]",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "6",
"EventCode": "0x9c",
"EventName": "IDQ_BUBBLES.CYCLES_0_UOPS_DELIV.CORE",
@@ -472,6 +497,7 @@
},
{
"BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled [This event is alias to IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK]",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0x9c",
"EventName": "IDQ_BUBBLES.CYCLES_FE_WAS_OK",
@@ -483,6 +509,7 @@
},
{
"BriefDescription": "Uops not delivered by IDQ when backend of the machine is not stalled",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x9c",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
"PublicDescription": "Counts the number of uops not delivered to by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
@@ -492,6 +519,7 @@
},
{
"BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled [This event is alias to IDQ_BUBBLES.CYCLES_0_UOPS_DELIV.CORE]",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "6",
"EventCode": "0x9c",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
@@ -502,6 +530,7 @@
},
{
"BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled [This event is alias to IDQ_BUBBLES.CYCLES_FE_WAS_OK]",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0x9c",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/memory.json b/tools/perf/pmu-events/arch/x86/meteorlake/memory.json
index 617d0e255fd5..b464a8ab32ca 100644
--- a/tools/perf/pmu-events/arch/x86/meteorlake/memory.json
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Cycles while L3 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L3_MISS",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
@@ -19,6 +21,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to any number of reasons, including an L1 miss, WCB full, pagewalk, store address block or store data block, on a load that retires.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x05",
"EventName": "LD_HEAD.ANY_AT_RET",
"SampleAfterValue": "1000003",
@@ -27,6 +30,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to a core bound stall including a store address match, a DTLB miss or a page walk that detains the load from retiring.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x05",
"EventName": "LD_HEAD.L1_BOUND_AT_RET",
"SampleAfterValue": "1000003",
@@ -35,6 +39,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a DL1 miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x05",
"EventName": "LD_HEAD.L1_MISS_AT_RET",
"SampleAfterValue": "1000003",
@@ -43,6 +48,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to other block cases.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x05",
"EventName": "LD_HEAD.OTHER_AT_RET",
"PublicDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to other block cases such as pipeline conflicts, fences, etc.",
@@ -52,6 +58,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a pagewalk.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x05",
"EventName": "LD_HEAD.PGWALK_AT_RET",
"SampleAfterValue": "1000003",
@@ -60,6 +67,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a store address match.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x05",
"EventName": "LD_HEAD.ST_ADDR_AT_RET",
"SampleAfterValue": "1000003",
@@ -68,6 +76,7 @@
},
{
"BriefDescription": "Counts the number of machine clears due to memory ordering caused by a snoop from an external agent. Does not count internally generated machine clears such as those due to memory disambiguation.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
"SampleAfterValue": "20003",
@@ -76,6 +85,7 @@
},
{
"BriefDescription": "Number of machine clears due to memory ordering conflicts.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
"PublicDescription": "Counts the number of Machine Clears detected dye to memory ordering. Memory Ordering Machine Clears may apply when a memory read may not conform to the memory ordering rules of the x86 architecture",
@@ -85,6 +95,7 @@
},
{
"BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0x47",
"EventName": "MEMORY_ACTIVITY.CYCLES_L1D_MISS",
@@ -94,6 +105,7 @@
},
{
"BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "3",
"EventCode": "0x47",
"EventName": "MEMORY_ACTIVITY.STALLS_L1D_MISS",
@@ -103,6 +115,7 @@
},
{
"BriefDescription": "Execution stalls while L2 cache miss demand cacheable load request is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"EventCode": "0x47",
"EventName": "MEMORY_ACTIVITY.STALLS_L2_MISS",
@@ -113,6 +126,7 @@
},
{
"BriefDescription": "Execution stalls while L3 cache miss demand cacheable load request is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "9",
"EventCode": "0x47",
"EventName": "MEMORY_ACTIVITY.STALLS_L3_MISS",
@@ -122,23 +136,8 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "MEMORY_ORDERING.MD_NUKE",
- "EventCode": "0x09",
- "EventName": "MEMORY_ORDERING.MD_NUKE",
- "SampleAfterValue": "100003",
- "UMask": "0x1",
- "Unit": "cpu_core"
- },
- {
- "BriefDescription": "Counts the number of memory ordering machine clears due to memory renaming.",
- "EventCode": "0x09",
- "EventName": "MEMORY_ORDERING.MRN_NUKE",
- "SampleAfterValue": "100003",
- "UMask": "0x2",
- "Unit": "cpu_core"
- },
- {
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 1024 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_1024",
@@ -152,6 +151,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
@@ -165,6 +165,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
@@ -178,6 +179,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 2048 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_2048",
@@ -191,6 +193,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
@@ -204,6 +207,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
@@ -217,6 +221,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
@@ -230,6 +235,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
@@ -243,6 +249,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
@@ -256,6 +263,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
@@ -269,6 +277,7 @@
},
{
"BriefDescription": "Retired memory store access operations. A PDist event for PEBS Store Latency Facility.",
+ "Counter": "0",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.STORE_SAMPLE",
@@ -280,24 +289,25 @@
},
{
"BriefDescription": "Counts misaligned loads that are 4K page splits.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x13",
"EventName": "MISALIGN_MEM_REF.LOAD_PAGE_SPLIT",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x2",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Counts misaligned stores that are 4K page splits.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x13",
"EventName": "MISALIGN_MEM_REF.STORE_PAGE_SPLIT",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x4",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Counts demand data reads that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -308,6 +318,7 @@
},
{
"BriefDescription": "Counts demand data reads that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -318,6 +329,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -328,6 +340,7 @@
},
{
"BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -338,6 +351,7 @@
},
{
"BriefDescription": "Counts demand data read requests that miss the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
"SampleAfterValue": "100003",
@@ -346,6 +360,7 @@
},
{
"BriefDescription": "Cycles where data return is pending for a Demand Data Read request who miss L3 cache.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x20",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD",
@@ -356,21 +371,12 @@
},
{
"BriefDescription": "For every cycle, increments by the number of demand data read requests pending that are known to have missed the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD",
"PublicDescription": "For every cycle, increments by the number of demand data read requests pending that are known to have missed the L3 cache. Note that this does not capture all elapsed cycles while requests are outstanding - only cycles from when the requests were known by the requesting core to have missed the L3 cache.",
"SampleAfterValue": "2000003",
"UMask": "0x10",
"Unit": "cpu_core"
- },
- {
- "BriefDescription": "Cycles where the core is waiting on at least 6 outstanding demand data read requests known to have missed the L3 cache.",
- "CounterMask": "6",
- "EventCode": "0x20",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD_GE_6",
- "PublicDescription": "Cycles where the core is waiting on at least 6 outstanding demand data read requests known to have missed the L3 cache. Note that this event does not capture all elapsed cycles while the requests are outstanding - only cycles from when the requests were known to have missed the L3 cache.",
- "SampleAfterValue": "2000003",
- "UMask": "0x10",
- "Unit": "cpu_core"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/other.json b/tools/perf/pmu-events/arch/x86/meteorlake/other.json
index 0bc2cb2eabb3..53d23d8decc6 100644
--- a/tools/perf/pmu-events/arch/x86/meteorlake/other.json
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/other.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "ASSISTS.PAGE_FAULT",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc1",
"EventName": "ASSISTS.PAGE_FAULT",
"SampleAfterValue": "1000003",
@@ -9,16 +10,17 @@
},
{
"BriefDescription": "This event is deprecated. [This event is alias to MISC_RETIRED.LBR_INSERTS]",
+ "Counter": "0,1,2,3,4,5,6,7",
"Deprecated": "1",
"EventCode": "0xe4",
"EventName": "LBR_INSERTS.ANY",
- "PEBS": "1",
"SampleAfterValue": "1000003",
"UMask": "0x1",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Counts demand data reads that have any type of response.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -29,6 +31,7 @@
},
{
"BriefDescription": "Counts demand data reads that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -39,6 +42,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_DATA_RD.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -49,6 +53,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -59,6 +64,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -69,6 +75,7 @@
},
{
"BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -79,6 +86,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_RFO.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -89,6 +97,7 @@
},
{
"BriefDescription": "Counts streaming stores that have any type of response.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -99,6 +108,7 @@
},
{
"BriefDescription": "Counts streaming stores that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -109,6 +119,7 @@
},
{
"BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa5",
"EventName": "RS.EMPTY",
"PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into starvation periods (e.g. branch mispredictions or i-cache misses)",
@@ -118,6 +129,7 @@
},
{
"BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xa5",
@@ -129,7 +141,8 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "RS.EMPTY_RESOURCE",
+ "BriefDescription": "Cycles when RS was empty and a resource allocation stall is asserted",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa5",
"EventName": "RS.EMPTY_RESOURCE",
"SampleAfterValue": "1000003",
@@ -138,6 +151,7 @@
},
{
"BriefDescription": "Counts the number of issue slots in a UMWAIT or TPAUSE instruction where no uop issues due to the instruction putting the CPU into the C0.1 activity state.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x75",
"EventName": "SERIALIZATION.C01_MS_SCB",
"SampleAfterValue": "200003",
@@ -146,6 +160,7 @@
},
{
"BriefDescription": "Cycles the uncore cannot take further requests",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x2d",
"EventName": "XQ.FULL_CYCLES",
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json b/tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json
index 5ff4a7a32250..bc806c7330f4 100644
--- a/tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of cycles when any of the dividers are active.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xcd",
"EventName": "ARITH.DIV_ACTIVE",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "Cycles when divide unit is busy executing divide or square root operations.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xb0",
"EventName": "ARITH.DIV_ACTIVE",
@@ -20,6 +22,7 @@
},
{
"BriefDescription": "This event counts the cycles the integer divider is busy.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xb0",
"EventName": "ARITH.IDIV_ACTIVE",
@@ -29,6 +32,7 @@
},
{
"BriefDescription": "Number of occurrences where a microcode assist is invoked by hardware.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc1",
"EventName": "ASSISTS.ANY",
"PublicDescription": "Counts the number of occurrences where a microcode assist is invoked by hardware. Examples include AD (page Access Dirty), FP and AVX related assists.",
@@ -38,15 +42,16 @@
},
{
"BriefDescription": "Counts the total number of branch instructions retired for all branch types.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "PEBS": "1",
"PublicDescription": "Counts the total number of instructions in which the instruction pointer (IP) of the processor is resteered due to a branch instruction and the branch instruction successfully retires. All branch type instructions are accounted for.",
"SampleAfterValue": "200003",
"Unit": "cpu_atom"
},
{
"BriefDescription": "All branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -56,15 +61,16 @@
},
{
"BriefDescription": "Counts the number of retired JCC (Jump on Conditional Code) branch instructions retired, includes both taken and not taken branches.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x7e",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND",
"PEBS": "1",
@@ -75,6 +81,7 @@
},
{
"BriefDescription": "Not taken branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_NTAKEN",
"PEBS": "1",
@@ -85,15 +92,16 @@
},
{
"BriefDescription": "Counts the number of taken JCC (Jump on Conditional Code) branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_TAKEN",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0xfe",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Taken conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_TAKEN",
"PEBS": "1",
@@ -104,15 +112,16 @@
},
{
"BriefDescription": "Counts the number of far branch instructions retired, includes far jump, far call and return, and interrupt call and return.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0xbf",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Far branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"PEBS": "1",
@@ -123,15 +132,16 @@
},
{
"BriefDescription": "Counts the number of near indirect JMP and near indirect CALL branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.INDIRECT",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0xeb",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Indirect near branch instructions retired (excluding returns)",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.INDIRECT",
"PEBS": "1",
@@ -142,34 +152,35 @@
},
{
"BriefDescription": "Counts the number of near indirect CALL branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.INDIRECT_CALL",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0xfb",
"Unit": "cpu_atom"
},
{
"BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.INDIRECT_CALL",
+ "Counter": "0,1,2,3,4,5,6,7",
"Deprecated": "1",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.IND_CALL",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0xfb",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Counts the number of near CALL branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0xf9",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Direct and indirect near call instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
"PEBS": "1",
@@ -180,15 +191,16 @@
},
{
"BriefDescription": "Counts the number of near RET branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0xf7",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Return instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
"PEBS": "1",
@@ -199,6 +211,7 @@
},
{
"BriefDescription": "Taken branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
"PEBS": "1",
@@ -209,15 +222,16 @@
},
{
"BriefDescription": "Counts the total number of mispredicted branch instructions retired for all branch types.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "PEBS": "1",
"PublicDescription": "Counts the total number of mispredicted branch instructions retired. All branch type instructions are accounted for. Prediction of the branch target address enables the processor to begin executing instructions before the non-speculative execution path is known. The branch prediction unit (BPU) predicts the target address based on the instruction pointer (IP) of the branch and on the execution path through which execution reached this IP. A branch misprediction occurs when the prediction is wrong, and results in discarding all instructions executed in the speculative path and re-fetching from the correct path.",
"SampleAfterValue": "200003",
"Unit": "cpu_atom"
},
{
"BriefDescription": "All mispredicted branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -227,6 +241,7 @@
},
{
"BriefDescription": "All mispredicted branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES_COST",
"PEBS": "1",
@@ -236,15 +251,16 @@
},
{
"BriefDescription": "Counts the number of mispredicted JCC (Jump on Conditional Code) branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x7e",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND",
"PEBS": "1",
@@ -255,6 +271,7 @@
},
{
"BriefDescription": "Mispredicted conditional branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_COST",
"PEBS": "1",
@@ -264,6 +281,7 @@
},
{
"BriefDescription": "Mispredicted non-taken conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_NTAKEN",
"PEBS": "1",
@@ -274,6 +292,7 @@
},
{
"BriefDescription": "Mispredicted non-taken conditional branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_NTAKEN_COST",
"PEBS": "1",
@@ -283,15 +302,16 @@
},
{
"BriefDescription": "Counts the number of mispredicted taken JCC (Jump on Conditional Code) branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_TAKEN",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0xfe",
"Unit": "cpu_atom"
},
{
"BriefDescription": "number of branch instructions retired that were mispredicted and taken.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_TAKEN",
"PEBS": "1",
@@ -302,6 +322,7 @@
},
{
"BriefDescription": "Mispredicted taken conditional branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_TAKEN_COST",
"PEBS": "1",
@@ -311,15 +332,16 @@
},
{
"BriefDescription": "Counts the number of mispredicted near indirect JMP and near indirect CALL branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0xeb",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Miss-predicted near indirect branch instructions retired (excluding returns)",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT",
"PEBS": "1",
@@ -330,15 +352,16 @@
},
{
"BriefDescription": "Counts the number of mispredicted near indirect CALL branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT_CALL",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0xfb",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Mispredicted indirect CALL retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT_CALL",
"PEBS": "1",
@@ -349,6 +372,7 @@
},
{
"BriefDescription": "Mispredicted indirect CALL retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT_CALL_COST",
"PEBS": "1",
@@ -358,6 +382,7 @@
},
{
"BriefDescription": "Mispredicted near indirect branch instructions retired (excluding returns). This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT_COST",
"PEBS": "1",
@@ -367,15 +392,16 @@
},
{
"BriefDescription": "Counts the number of mispredicted near taken branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x80",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
"PEBS": "1",
@@ -386,6 +412,7 @@
},
{
"BriefDescription": "Mispredicted taken near branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN_COST",
"PEBS": "1",
@@ -395,6 +422,7 @@
},
{
"BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.RET",
"PEBS": "1",
@@ -405,15 +433,16 @@
},
{
"BriefDescription": "Counts the number of mispredicted near RET branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.RETURN",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0xf7",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Mispredicted ret instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.RET_COST",
"PEBS": "1",
@@ -423,6 +452,7 @@
},
{
"BriefDescription": "Core clocks when the thread is in the C0.1 light-weight slower wakeup time but more power saving optimized state.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xec",
"EventName": "CPU_CLK_UNHALTED.C01",
"PublicDescription": "Counts core clocks when the thread is in the C0.1 light-weight slower wakeup time but more power saving optimized state. This state can be entered via the TPAUSE or UMWAIT instructions.",
@@ -432,6 +462,7 @@
},
{
"BriefDescription": "Core clocks when the thread is in the C0.2 light-weight faster wakeup time but less power saving optimized state.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xec",
"EventName": "CPU_CLK_UNHALTED.C02",
"PublicDescription": "Counts core clocks when the thread is in the C0.2 light-weight faster wakeup time but less power saving optimized state. This state can be entered via the TPAUSE or UMWAIT instructions.",
@@ -441,6 +472,7 @@
},
{
"BriefDescription": "Core clocks when the thread is in the C0.1 or C0.2 or running a PAUSE in C0 ACPI state.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xec",
"EventName": "CPU_CLK_UNHALTED.C0_WAIT",
"PublicDescription": "Counts core clocks when the thread is in the C0.1 or C0.2 power saving optimized states (TPAUSE or UMWAIT instructions) or running the PAUSE instruction.",
@@ -450,6 +482,7 @@
},
{
"BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.CORE",
"SampleAfterValue": "2000003",
"UMask": "0x2",
@@ -457,6 +490,7 @@
},
{
"BriefDescription": "Counts the number of unhalted core clock cycles [This event is alias to CPU_CLK_UNHALTED.THREAD_P]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.CORE_P",
"SampleAfterValue": "2000003",
@@ -464,6 +498,7 @@
},
{
"BriefDescription": "Cycle counts are evenly distributed between active threads in the Core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xec",
"EventName": "CPU_CLK_UNHALTED.DISTRIBUTED",
"PublicDescription": "This event distributes cycle counts between active hyperthreads, i.e., those in C0. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If all other hyperthreads are inactive (or disabled or do not exist), all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
@@ -473,6 +508,7 @@
},
{
"BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
"PublicDescription": "Counts Core crystal clock cycles when current thread is unhalted and the other thread is halted.",
@@ -482,6 +518,7 @@
},
{
"BriefDescription": "CPU_CLK_UNHALTED.PAUSE",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xec",
"EventName": "CPU_CLK_UNHALTED.PAUSE",
"SampleAfterValue": "2000003",
@@ -490,6 +527,7 @@
},
{
"BriefDescription": "CPU_CLK_UNHALTED.PAUSE_INST",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xec",
@@ -500,6 +538,7 @@
},
{
"BriefDescription": "Core crystal clock cycles. Cycle counts are evenly distributed between active threads in the Core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.REF_DISTRIBUTED",
"PublicDescription": "This event distributes Core crystal clock cycle counts between active hyperthreads, i.e., those in C0 sleep-state. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If one thread is active in a core, all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
@@ -509,6 +548,7 @@
},
{
"BriefDescription": "Fixed Counter: Counts the number of unhalted reference clock cycles",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"SampleAfterValue": "2000003",
"UMask": "0x3",
@@ -516,6 +556,7 @@
},
{
"BriefDescription": "Reference cycles when the core is not in halt state.",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
"SampleAfterValue": "2000003",
@@ -524,6 +565,7 @@
},
{
"BriefDescription": "Counts the number of unhalted reference clock cycles at TSC frequency.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.REF_TSC_P",
"PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is not affected by core frequency changes and increments at a fixed frequency that is also used for the Time Stamp Counter (TSC). This event uses a programmable general purpose performance counter.",
@@ -533,6 +575,7 @@
},
{
"BriefDescription": "Reference cycles when the core is not in halt state.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.REF_TSC_P",
"PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
@@ -542,6 +585,7 @@
},
{
"BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"SampleAfterValue": "2000003",
"UMask": "0x2",
@@ -549,6 +593,7 @@
},
{
"BriefDescription": "Core cycles when the thread is not in halt state",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events.",
"SampleAfterValue": "2000003",
@@ -557,6 +602,7 @@
},
{
"BriefDescription": "Counts the number of unhalted core clock cycles [This event is alias to CPU_CLK_UNHALTED.CORE_P]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
"SampleAfterValue": "2000003",
@@ -564,6 +610,7 @@
},
{
"BriefDescription": "Thread cycles when thread is not in halt state",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
"PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
@@ -572,6 +619,7 @@
},
{
"BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "8",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
@@ -581,6 +629,7 @@
},
{
"BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
@@ -590,6 +639,7 @@
},
{
"BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "16",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
@@ -599,6 +649,7 @@
},
{
"BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "12",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
@@ -608,6 +659,7 @@
},
{
"BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
@@ -617,6 +669,7 @@
},
{
"BriefDescription": "Total execution stalls.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "4",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
@@ -626,6 +679,7 @@
},
{
"BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.1_PORTS_UTIL",
"PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
@@ -634,7 +688,17 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Cycles total of 2 or 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.2_3_PORTS_UTIL",
+ "SampleAfterValue": "2000003",
+ "UMask": "0xc",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.2_PORTS_UTIL",
"PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
@@ -644,6 +708,7 @@
},
{
"BriefDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.3_PORTS_UTIL",
"PublicDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
@@ -653,6 +718,7 @@
},
{
"BriefDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.4_PORTS_UTIL",
"PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.",
@@ -662,6 +728,7 @@
},
{
"BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "5",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.BOUND_ON_LOADS",
@@ -671,6 +738,7 @@
},
{
"BriefDescription": "Cycles where the Store Buffer was full and no loads caused an execution stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "2",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.BOUND_ON_STORES",
@@ -681,6 +749,7 @@
},
{
"BriefDescription": "Cycles no uop executed while RS was not empty, the SB was not full and there was no outstanding load.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.EXE_BOUND_0_PORTS",
"PublicDescription": "Number of cycles total of 0 uops executed on all ports, Reservation Station (RS) was not empty, the Store Buffer (SB) was not full and there was no outstanding load.",
@@ -690,6 +759,7 @@
},
{
"BriefDescription": "Instruction decoders utilized in a cycle",
+ "Counter": "0,1,2,3",
"EventCode": "0x75",
"EventName": "INST_DECODED.DECODERS",
"PublicDescription": "Number of decoders utilized in a cycle when the MITE (legacy decode pipeline) fetches instructions.",
@@ -699,6 +769,7 @@
},
{
"BriefDescription": "Fixed Counter: Counts the number of instructions retired",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PEBS": "1",
"SampleAfterValue": "2000003",
@@ -707,6 +778,7 @@
},
{
"BriefDescription": "Number of instructions retired. Fixed Counter - architectural event",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PEBS": "1",
"PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
@@ -716,14 +788,15 @@
},
{
"BriefDescription": "Counts the number of instructions retired",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.ANY_P",
- "PEBS": "1",
"SampleAfterValue": "2000003",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.ANY_P",
"PEBS": "1",
@@ -733,6 +806,7 @@
},
{
"BriefDescription": "INST_RETIRED.MACRO_FUSED",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.MACRO_FUSED",
"PEBS": "1",
@@ -742,6 +816,7 @@
},
{
"BriefDescription": "Retired NOP instructions.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.NOP",
"PEBS": "1",
@@ -752,6 +827,7 @@
},
{
"BriefDescription": "Precise instruction retired with PEBS precise-distribution",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.PREC_DIST",
"PEBS": "1",
"PublicDescription": "A version of INST_RETIRED that allows for a precise distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR++) feature to fix bias in how retired instructions get sampled. Use on Fixed Counter 0.",
@@ -761,6 +837,7 @@
},
{
"BriefDescription": "Iterations of Repeat string retired instructions.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.REP_ITERATION",
"PEBS": "1",
@@ -770,17 +847,8 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Cycles the Backend cluster is recovering after a miss-speculation or a Store Buffer or Load Buffer drain stall.",
- "CounterMask": "1",
- "EventCode": "0xad",
- "EventName": "INT_MISC.ALL_RECOVERY_CYCLES",
- "PublicDescription": "Counts cycles the Backend cluster is recovering after a miss-speculation or a Store Buffer or Load Buffer drain stall.",
- "SampleAfterValue": "2000003",
- "UMask": "0x3",
- "Unit": "cpu_core"
- },
- {
"BriefDescription": "Clears speculative count",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xad",
@@ -792,6 +860,7 @@
},
{
"BriefDescription": "Counts cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xad",
"EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
"PublicDescription": "Cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
@@ -800,16 +869,8 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread",
- "EventCode": "0xad",
- "EventName": "INT_MISC.RAT_STALLS",
- "PublicDescription": "This event counts the number of cycles during which Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the current thread. This also includes the cycles during which the Allocator is serving another thread.",
- "SampleAfterValue": "1000003",
- "UMask": "0x8",
- "Unit": "cpu_core"
- },
- {
"BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xad",
"EventName": "INT_MISC.RECOVERY_CYCLES",
"PublicDescription": "Counts core cycles when the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
@@ -819,6 +880,7 @@
},
{
"BriefDescription": "Bubble cycles of BAClear (Unknown Branch).",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xad",
"EventName": "INT_MISC.UNKNOWN_BRANCH_CYCLES",
"MSRIndex": "0x3F7",
@@ -829,6 +891,7 @@
},
{
"BriefDescription": "TMA slots where uops got dropped",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xad",
"EventName": "INT_MISC.UOP_DROPPING",
"PublicDescription": "Estimated number of Top-down Microarchitecture Analysis slots that got dropped due to non front-end reasons",
@@ -838,6 +901,7 @@
},
{
"BriefDescription": "INT_VEC_RETIRED.128BIT",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe7",
"EventName": "INT_VEC_RETIRED.128BIT",
"SampleAfterValue": "1000003",
@@ -846,6 +910,7 @@
},
{
"BriefDescription": "INT_VEC_RETIRED.256BIT",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe7",
"EventName": "INT_VEC_RETIRED.256BIT",
"SampleAfterValue": "1000003",
@@ -854,6 +919,7 @@
},
{
"BriefDescription": "integer ADD, SUB, SAD 128-bit vector instructions.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe7",
"EventName": "INT_VEC_RETIRED.ADD_128",
"PublicDescription": "Number of retired integer ADD/SUB (regular or horizontal), SAD 128-bit vector instructions.",
@@ -863,6 +929,7 @@
},
{
"BriefDescription": "integer ADD, SUB, SAD 256-bit vector instructions.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe7",
"EventName": "INT_VEC_RETIRED.ADD_256",
"PublicDescription": "Number of retired integer ADD/SUB (regular or horizontal), SAD 256-bit vector instructions.",
@@ -872,6 +939,7 @@
},
{
"BriefDescription": "INT_VEC_RETIRED.MUL_256",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe7",
"EventName": "INT_VEC_RETIRED.MUL_256",
"SampleAfterValue": "1000003",
@@ -880,6 +948,7 @@
},
{
"BriefDescription": "INT_VEC_RETIRED.SHUFFLES",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe7",
"EventName": "INT_VEC_RETIRED.SHUFFLES",
"SampleAfterValue": "1000003",
@@ -888,6 +957,7 @@
},
{
"BriefDescription": "INT_VEC_RETIRED.VNNI_128",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe7",
"EventName": "INT_VEC_RETIRED.VNNI_128",
"SampleAfterValue": "1000003",
@@ -896,6 +966,7 @@
},
{
"BriefDescription": "INT_VEC_RETIRED.VNNI_256",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe7",
"EventName": "INT_VEC_RETIRED.VNNI_256",
"SampleAfterValue": "1000003",
@@ -904,15 +975,16 @@
},
{
"BriefDescription": "Counts the number of retired loads that are blocked because it initially appears to be store forward blocked, but subsequently is shown not to be blocked based on 4K alias check.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.ADDRESS_ALIAS",
- "PEBS": "1",
"SampleAfterValue": "1000003",
"UMask": "0x4",
"Unit": "cpu_atom"
},
{
"BriefDescription": "False dependencies in MOB due to partial compare on address.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.ADDRESS_ALIAS",
"PublicDescription": "Counts the number of times a load got blocked due to false dependencies in MOB due to partial compare on address.",
@@ -922,15 +994,16 @@
},
{
"BriefDescription": "Counts the number of retired loads that are blocked because its address exactly matches an older store whose data is not ready.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.DATA_UNKNOWN",
- "PEBS": "1",
"SampleAfterValue": "1000003",
"UMask": "0x1",
"Unit": "cpu_atom"
},
{
"BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.NO_SR",
"PublicDescription": "Counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
@@ -940,15 +1013,16 @@
},
{
"BriefDescription": "Counts the number of retired loads that are blocked because its address partially overlapped with an older store.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.STORE_FORWARD",
- "PEBS": "1",
"SampleAfterValue": "1000003",
"UMask": "0x2",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Loads blocked due to overlapping with a preceding store that cannot be forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.STORE_FORWARD",
"PublicDescription": "Counts the number of times where store forwarding was prevented for a load operation. The most common case is a load blocked due to the address of memory access (partially) overlapping with a preceding uncompleted store. Note: See the table of not supported store forwards in the Optimization Guide.",
@@ -957,7 +1031,18 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4c",
+ "EventName": "LOAD_HIT_PREFETCH.SWPF",
+ "PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xa8",
"EventName": "LSD.CYCLES_ACTIVE",
@@ -968,6 +1053,7 @@
},
{
"BriefDescription": "Cycles optimal number of Uops delivered by the LSD, but did not come from the decoder.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "6",
"EventCode": "0xa8",
"EventName": "LSD.CYCLES_OK",
@@ -978,6 +1064,7 @@
},
{
"BriefDescription": "Number of Uops delivered by the LSD.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa8",
"EventName": "LSD.UOPS",
"PublicDescription": "Counts the number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
@@ -986,7 +1073,16 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the total number of machine clears for any reason including, but not limited to, memory ordering, memory disambiguation, SMC, and FP assist.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.ANY",
+ "SampleAfterValue": "20003",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Number of machine clears (nukes) of any type.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xc3",
@@ -998,6 +1094,7 @@
},
{
"BriefDescription": "Counts the number of machine clears due to memory ordering in which an internal load passes an older store within the same CPU.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.DISAMBIGUATION",
"SampleAfterValue": "20003",
@@ -1005,7 +1102,17 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of machines clears due to memory renaming.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.MRN_NUKE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of machine clears due to a page fault. Counts both I-Side and D-Side (Loads/Stores) page faults. A page fault occurs when either the page is not present, or an access violation occurs.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.PAGE_FAULT",
"SampleAfterValue": "20003",
@@ -1014,6 +1121,7 @@
},
{
"BriefDescription": "Counts the number of machine clears that flush the pipeline and restart the machine with the use of microcode due to SMC, MEMORY_ORDERING, FP_ASSISTS, PAGE_FAULT, DISAMBIGUATION, and FPC_VIRTUAL_TRAP.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.SLOW",
"SampleAfterValue": "20003",
@@ -1022,6 +1130,7 @@
},
{
"BriefDescription": "Counts the number of machine clears due to program modifying data (self modifying code) within 1K of a recently fetched code page.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.SMC",
"SampleAfterValue": "20003",
@@ -1030,6 +1139,7 @@
},
{
"BriefDescription": "Self-modifying code (SMC) detected.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.SMC",
"PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
@@ -1039,6 +1149,7 @@
},
{
"BriefDescription": "LFENCE instructions retired",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe0",
"EventName": "MISC2_RETIRED.LFENCE",
"PublicDescription": "number of LFENCE retired instructions",
@@ -1048,15 +1159,26 @@
},
{
"BriefDescription": "Counts the number of Last Branch Record (LBR) entries. Requires LBRs to be enabled and configured in IA32_LBR_CTL. [This event is alias to LBR_INSERTS.ANY]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe4",
"EventName": "MISC_RETIRED.LBR_INSERTS",
- "PEBS": "1",
"SampleAfterValue": "1000003",
"UMask": "0x1",
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Increments whenever there is an update to the LBR array.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xcc",
+ "EventName": "MISC_RETIRED.LBR_INSERTS",
+ "PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR enable via IA32_DEBUGCTL MSR and branch type selection via MSR_LBR_SELECT.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Counts cycles where the pipeline is stalled due to serializing operations.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa2",
"EventName": "RESOURCE_STALLS.SCOREBOARD",
"SampleAfterValue": "100003",
@@ -1065,6 +1187,7 @@
},
{
"BriefDescription": "This event counts a subset of the Topdown Slots event that were not consumed by the back-end pipeline due to lack of back-end resources, as a result of memory subsystem delays, execution units limitations, or other conditions.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa4",
"EventName": "TOPDOWN.BACKEND_BOUND_SLOTS",
"PublicDescription": "This event counts a subset of the Topdown Slots event that were not consumed by the back-end pipeline due to lack of back-end resources, as a result of memory subsystem delays, execution units limitations, or other conditions. The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core, in processors that support Intel Hyper-Threading Technology. Software can use this event as the numerator for the Backend Bound metric (or top-level category) of the Top-down Microarchitecture Analysis method.",
@@ -1074,6 +1197,7 @@
},
{
"BriefDescription": "TMA slots wasted due to incorrect speculations.",
+ "Counter": "0",
"EventCode": "0xa4",
"EventName": "TOPDOWN.BAD_SPEC_SLOTS",
"PublicDescription": "Number of slots of TMA method that were wasted due to incorrect speculation. It covers all types of control-flow or data-related mis-speculations.",
@@ -1083,6 +1207,7 @@
},
{
"BriefDescription": "TMA slots wasted due to incorrect speculation by branch mispredictions",
+ "Counter": "0",
"EventCode": "0xa4",
"EventName": "TOPDOWN.BR_MISPREDICT_SLOTS",
"PublicDescription": "Number of TMA slots that were wasted due to incorrect speculation by (any type of) branch mispredictions. This event estimates number of speculative operations that were issued but not retired as well as the out-of-order engine recovery past a branch misprediction.",
@@ -1092,6 +1217,7 @@
},
{
"BriefDescription": "TOPDOWN.MEMORY_BOUND_SLOTS",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa4",
"EventName": "TOPDOWN.MEMORY_BOUND_SLOTS",
"SampleAfterValue": "10000003",
@@ -1100,6 +1226,7 @@
},
{
"BriefDescription": "TMA slots available for an unhalted logical processor. Fixed counter - architectural event",
+ "Counter": "Fixed counter 3",
"EventName": "TOPDOWN.SLOTS",
"PublicDescription": "Number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method (TMA). The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core. Software can use this event as the denominator for the top-level metrics of the TMA method. This architectural event is counted on a designated fixed counter (Fixed Counter 3).",
"SampleAfterValue": "10000003",
@@ -1108,6 +1235,7 @@
},
{
"BriefDescription": "TMA slots available for an unhalted logical processor. General counter - architectural event",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa4",
"EventName": "TOPDOWN.SLOTS_P",
"PublicDescription": "Counts the number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method. The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core.",
@@ -1117,6 +1245,7 @@
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. [This event is alias to TOPDOWN_BAD_SPECULATION.ALL_P]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.ALL",
"PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window, including relevant microcode flows, and while uops are not yet available in the instruction queue (IQ) or until an FE_BOUND event occurs besides OTHER and CISC. Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear. [This event is alias to TOPDOWN_BAD_SPECULATION.ALL_P]",
@@ -1125,6 +1254,7 @@
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. [This event is alias to TOPDOWN_BAD_SPECULATION.ALL]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.ALL_P",
"PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window, including relevant microcode flows, and while uops are not yet available in the instruction queue (IQ) or until an FE_BOUND event occurs besides OTHER and CISC. Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear. [This event is alias to TOPDOWN_BAD_SPECULATION.ALL]",
@@ -1133,6 +1263,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to Fast Nukes such as Memory Ordering Machine clears and MRN nukes",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.FASTNUKE",
"SampleAfterValue": "1000003",
@@ -1141,6 +1272,7 @@
},
{
"BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a machine clear (nuke) of any kind including memory ordering and memory disambiguation.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS",
"SampleAfterValue": "1000003",
@@ -1149,6 +1281,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to Branch Mispredict",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.MISPREDICT",
"SampleAfterValue": "1000003",
@@ -1157,6 +1290,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to a machine clear (nuke).",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.NUKE",
"SampleAfterValue": "1000003",
@@ -1165,6 +1299,7 @@
},
{
"BriefDescription": "Counts the number of retirement slots not consumed due to backend stalls [This event is alias to TOPDOWN_BE_BOUND.ALL_P]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.ALL",
"SampleAfterValue": "1000003",
@@ -1172,6 +1307,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to due to certain allocation restrictions",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS",
"SampleAfterValue": "1000003",
@@ -1180,6 +1316,7 @@
},
{
"BriefDescription": "Counts the number of retirement slots not consumed due to backend stalls [This event is alias to TOPDOWN_BE_BOUND.ALL]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.ALL_P",
"SampleAfterValue": "1000003",
@@ -1187,6 +1324,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to memory reservation stall (scheduler not being able to accept another uop). This could be caused by RSV full or load/store buffer block.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.MEM_SCHEDULER",
"SampleAfterValue": "1000003",
@@ -1195,6 +1333,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to IEC and FPC RAT stalls - which can be due to the FIQ and IEC reservation station stall (integer, FP and SIMD scheduler not being able to accept another uop. )",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER",
"SampleAfterValue": "1000003",
@@ -1203,6 +1342,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to mrbl stall. A 'marble' refers to a physical register file entry, also known as the physical destination (PDST).",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.REGISTER",
"SampleAfterValue": "1000003",
@@ -1211,6 +1351,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to ROB full",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.REORDER_BUFFER",
"SampleAfterValue": "1000003",
@@ -1219,6 +1360,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to iq/jeu scoreboards or ms scb",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.SERIALIZATION",
"SampleAfterValue": "1000003",
@@ -1227,6 +1369,7 @@
},
{
"BriefDescription": "Counts the number of retirement slots not consumed due to front end stalls [This event is alias to TOPDOWN_FE_BOUND.ALL_P]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.ALL",
"SampleAfterValue": "1000003",
@@ -1234,6 +1377,7 @@
},
{
"BriefDescription": "Counts the number of retirement slots not consumed due to front end stalls [This event is alias to TOPDOWN_FE_BOUND.ALL]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.ALL_P",
"SampleAfterValue": "1000003",
@@ -1241,6 +1385,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BAClear",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.BRANCH_DETECT",
"SampleAfterValue": "1000003",
@@ -1249,6 +1394,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BTClear",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.BRANCH_RESTEER",
"SampleAfterValue": "1000003",
@@ -1257,6 +1403,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to ms",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.CISC",
"SampleAfterValue": "1000003",
@@ -1265,6 +1412,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to decode stall",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.DECODE",
"SampleAfterValue": "1000003",
@@ -1273,6 +1421,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.FRONTEND_BANDWIDTH",
"SampleAfterValue": "1000003",
@@ -1281,6 +1430,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to latency related stalls including BACLEARs, BTCLEARs, ITLB misses, and ICache misses.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.FRONTEND_LATENCY",
"SampleAfterValue": "1000003",
@@ -1289,6 +1439,7 @@
},
{
"BriefDescription": "This event is deprecated. [This event is alias to TOPDOWN_FE_BOUND.ITLB_MISS]",
+ "Counter": "0,1,2,3,4,5,6,7",
"Deprecated": "1",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.ITLB",
@@ -1298,6 +1449,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to itlb miss [This event is alias to TOPDOWN_FE_BOUND.ITLB]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.ITLB_MISS",
"SampleAfterValue": "1000003",
@@ -1306,6 +1458,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend that do not categorize into any other common frontend stall",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.OTHER",
"SampleAfterValue": "1000003",
@@ -1314,6 +1467,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to predecode wrong",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.PREDECODE",
"SampleAfterValue": "1000003",
@@ -1321,23 +1475,24 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of consumed retirement slots. Similar to UOPS_RETIRED.ALL [This event is alias to TOPDOWN_RETIRING.ALL_P]",
+ "BriefDescription": "Counts the number of consumed retirement slots. [This event is alias to TOPDOWN_RETIRING.ALL_P]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x72",
"EventName": "TOPDOWN_RETIRING.ALL",
- "PEBS": "1",
"SampleAfterValue": "1000003",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of consumed retirement slots. Similar to UOPS_RETIRED.ALL [This event is alias to TOPDOWN_RETIRING.ALL]",
+ "BriefDescription": "Counts the number of consumed retirement slots. [This event is alias to TOPDOWN_RETIRING.ALL]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x72",
"EventName": "TOPDOWN_RETIRING.ALL_P",
- "PEBS": "1",
"SampleAfterValue": "1000003",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Number of non dec-by-all uops decoded by decoder",
+ "Counter": "0,1,2,3",
"EventCode": "0x76",
"EventName": "UOPS_DECODED.DEC0_UOPS",
"PublicDescription": "This event counts the number of not dec-by-all uops decoded by decoder 0.",
@@ -1347,6 +1502,7 @@
},
{
"BriefDescription": "Uops executed on port 0",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb2",
"EventName": "UOPS_DISPATCHED.PORT_0",
"PublicDescription": "Number of uops dispatch to execution port 0.",
@@ -1356,6 +1512,7 @@
},
{
"BriefDescription": "Uops executed on port 1",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb2",
"EventName": "UOPS_DISPATCHED.PORT_1",
"PublicDescription": "Number of uops dispatch to execution port 1.",
@@ -1365,6 +1522,7 @@
},
{
"BriefDescription": "Uops executed on ports 2, 3 and 10",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb2",
"EventName": "UOPS_DISPATCHED.PORT_2_3_10",
"PublicDescription": "Number of uops dispatch to execution ports 2, 3 and 10",
@@ -1374,6 +1532,7 @@
},
{
"BriefDescription": "Uops executed on ports 4 and 9",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb2",
"EventName": "UOPS_DISPATCHED.PORT_4_9",
"PublicDescription": "Number of uops dispatch to execution ports 4 and 9",
@@ -1383,6 +1542,7 @@
},
{
"BriefDescription": "Uops executed on ports 5 and 11",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb2",
"EventName": "UOPS_DISPATCHED.PORT_5_11",
"PublicDescription": "Number of uops dispatch to execution ports 5 and 11",
@@ -1392,6 +1552,7 @@
},
{
"BriefDescription": "Uops executed on port 6",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb2",
"EventName": "UOPS_DISPATCHED.PORT_6",
"PublicDescription": "Number of uops dispatch to execution port 6.",
@@ -1401,6 +1562,7 @@
},
{
"BriefDescription": "Uops executed on ports 7 and 8",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb2",
"EventName": "UOPS_DISPATCHED.PORT_7_8",
"PublicDescription": "Number of uops dispatch to execution ports 7 and 8.",
@@ -1410,6 +1572,7 @@
},
{
"BriefDescription": "Number of uops executed on the core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE",
"PublicDescription": "Counts the number of uops executed from any thread.",
@@ -1419,6 +1582,7 @@
},
{
"BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
@@ -1429,6 +1593,7 @@
},
{
"BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "2",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
@@ -1439,6 +1604,7 @@
},
{
"BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "3",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
@@ -1449,6 +1615,7 @@
},
{
"BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "4",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
@@ -1459,6 +1626,7 @@
},
{
"BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_1",
@@ -1469,6 +1637,7 @@
},
{
"BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "2",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_2",
@@ -1479,6 +1648,7 @@
},
{
"BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "3",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_3",
@@ -1489,6 +1659,7 @@
},
{
"BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "4",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_4",
@@ -1499,6 +1670,7 @@
},
{
"BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.STALLS",
@@ -1510,6 +1682,7 @@
},
{
"BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.THREAD",
"SampleAfterValue": "2000003",
@@ -1518,6 +1691,7 @@
},
{
"BriefDescription": "Counts the number of x87 uops dispatched.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.X87",
"PublicDescription": "Counts the number of x87 uops executed.",
@@ -1527,6 +1701,7 @@
},
{
"BriefDescription": "Counts the number of uops issued by the front end every cycle.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x0e",
"EventName": "UOPS_ISSUED.ANY",
"PublicDescription": "Counts the number of uops issued by the front end every cycle. When 4-uops are requested and only 2-uops are delivered, the event counts 2. Uops_issued correlates to the number of ROB entries. If uop takes 2 ROB slots it counts as 2 uops_issued.",
@@ -1535,6 +1710,7 @@
},
{
"BriefDescription": "Uops that RAT issues to RS",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xae",
"EventName": "UOPS_ISSUED.ANY",
"PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
@@ -1544,6 +1720,7 @@
},
{
"BriefDescription": "UOPS_ISSUED.CYCLES",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xae",
"EventName": "UOPS_ISSUED.CYCLES",
@@ -1552,26 +1729,16 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Cycles when RAT does not issue Uops to RS for the thread",
- "CounterMask": "1",
- "EventCode": "0xae",
- "EventName": "UOPS_ISSUED.STALLS",
- "Invert": "1",
- "PublicDescription": "Counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
- "SampleAfterValue": "1000003",
- "UMask": "0x1",
- "Unit": "cpu_core"
- },
- {
"BriefDescription": "Counts the total number of uops retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.ALL",
- "PEBS": "1",
"SampleAfterValue": "2000003",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Cycles with retired uop(s).",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.CYCLES",
@@ -1582,6 +1749,7 @@
},
{
"BriefDescription": "Retired uops except the last uop of each instruction.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.HEAVY",
"PublicDescription": "Counts the number of retired micro-operations (uops) except the last uop of each instruction. An instruction that is decoded into less than two uops does not contribute to the count.",
@@ -1591,24 +1759,25 @@
},
{
"BriefDescription": "Counts the number of integer divide uops retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.IDIV",
- "PEBS": "1",
"SampleAfterValue": "2000003",
"UMask": "0x10",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Counts the number of uops that are from the complex flows issued by the micro-sequencer (MS). This includes uops from flows due to complex instructions, faults, assists, and inserted flows.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.MS",
- "PEBS": "1",
"SampleAfterValue": "2000003",
"UMask": "0x1",
"Unit": "cpu_atom"
},
{
"BriefDescription": "UOPS_RETIRED.MS",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.MS",
"MSRIndex": "0x3F7",
@@ -1619,6 +1788,7 @@
},
{
"BriefDescription": "This event counts a subset of the Topdown Slots event that are utilized by operations that eventually get retired (committed) by the processor pipeline. Usually, this event positively correlates with higher performance for example, as measured by the instructions-per-cycle metric.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.SLOTS",
"PublicDescription": "This event counts a subset of the Topdown Slots event that are utilized by operations that eventually get retired (committed) by the processor pipeline. Usually, this event positively correlates with higher performance for example, as measured by the instructions-per-cycle metric. Software can use this event as the numerator for the Retiring metric (or top-level category) of the Top-down Microarchitecture Analysis method.",
@@ -1628,6 +1798,7 @@
},
{
"BriefDescription": "Cycles without actually retired uops.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.STALLS",
@@ -1638,21 +1809,10 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Cycles with less than 10 actually retired uops.",
- "CounterMask": "10",
- "EventCode": "0xc2",
- "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
- "Invert": "1",
- "PublicDescription": "Counts the number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.",
- "SampleAfterValue": "1000003",
- "UMask": "0x2",
- "Unit": "cpu_core"
- },
- {
"BriefDescription": "Counts the number of x87 uops retired, includes those in ms flows",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.X87",
- "PEBS": "1",
"SampleAfterValue": "2000003",
"UMask": "0x2",
"Unit": "cpu_atom"
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/uncore-cache.json b/tools/perf/pmu-events/arch/x86/meteorlake/uncore-cache.json
index 188843be4caf..f294852dfbe6 100644
--- a/tools/perf/pmu-events/arch/x86/meteorlake/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/uncore-cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Number of all entries allocated. Includes also retries.",
+ "Counter": "0,1",
"EventCode": "0x35",
"EventName": "UNC_HAC_CBO_TOR_ALLOCATION.ALL",
"PerPkg": "1",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Asserted on coherent DRD + DRdPref allocations into the queue. Cacheable only",
+ "Counter": "0,1",
"EventCode": "0x35",
"EventName": "UNC_HAC_CBO_TOR_ALLOCATION.DRD",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/meteorlake/uncore-interconnect.json
index 901d8510f90f..a2f4386a8379 100644
--- a/tools/perf/pmu-events/arch/x86/meteorlake/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/uncore-interconnect.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Each cycle counts number of coherent reads pending on data return from memory controller that were issued by any core.",
+ "Counter": "0",
"EventCode": "0x85",
"EventName": "UNC_ARB_DAT_OCCUPANCY.RD",
"PerPkg": "1",
@@ -9,14 +10,17 @@
},
{
"BriefDescription": "Number of entries allocated. Account for Any type: e.g. Snoop, etc.",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_HAC_ARB_COH_TRK_REQUESTS.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "HAC_ARB"
},
{
"BriefDescription": "Number of all coherent Data Read entries. Doesn't include prefetches",
+ "Counter": "0,1",
"EventCode": "0x81",
"EventName": "UNC_HAC_ARB_REQ_TRK_REQUEST.DRD",
"PerPkg": "1",
@@ -25,6 +29,7 @@
},
{
"BriefDescription": "Number of all CMI transactions",
+ "Counter": "0,1",
"EventCode": "0x8A",
"EventName": "UNC_HAC_ARB_TRANSACTIONS.ALL",
"PerPkg": "1",
@@ -33,6 +38,7 @@
},
{
"BriefDescription": "Number of all CMI reads",
+ "Counter": "0,1",
"EventCode": "0x8A",
"EventName": "UNC_HAC_ARB_TRANSACTIONS.READS",
"PerPkg": "1",
@@ -41,6 +47,7 @@
},
{
"BriefDescription": "Number of all CMI writes not including Mflush",
+ "Counter": "0,1",
"EventCode": "0x8A",
"EventName": "UNC_HAC_ARB_TRANSACTIONS.WRITES",
"PerPkg": "1",
@@ -49,6 +56,7 @@
},
{
"BriefDescription": "Total number of all outgoing entries allocated. Accounts for Coherent and non-coherent traffic.",
+ "Counter": "0,1",
"EventCode": "0x81",
"EventName": "UNC_HAC_ARB_TRK_REQUESTS.ALL",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/uncore-memory.json b/tools/perf/pmu-events/arch/x86/meteorlake/uncore-memory.json
index c9d248d1042e..783a4f7fd05b 100644
--- a/tools/perf/pmu-events/arch/x86/meteorlake/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/uncore-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts every CAS read command sent from the Memory Controller 0 to DRAM (sum of all channels).",
+ "Counter": "0",
"EventCode": "0xff",
"EventName": "UNC_MC0_RDCAS_COUNT_FREERUN",
"PerPkg": "1",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "Counts every read and write request entering the Memory Controller 0.",
+ "Counter": "2",
"EventCode": "0xff",
"EventName": "UNC_MC0_TOTAL_REQCOUNT_FREERUN",
"PerPkg": "1",
@@ -19,6 +21,7 @@
},
{
"BriefDescription": "Counts every CAS write command sent from the Memory Controller 0 to DRAM (sum of all channels).",
+ "Counter": "1",
"EventCode": "0xff",
"EventName": "UNC_MC0_WRCAS_COUNT_FREERUN",
"PerPkg": "1",
@@ -28,6 +31,7 @@
},
{
"BriefDescription": "Counts every CAS read command sent from the Memory Controller 1 to DRAM (sum of all channels).",
+ "Counter": "3",
"EventCode": "0xff",
"EventName": "UNC_MC1_RDCAS_COUNT_FREERUN",
"PerPkg": "1",
@@ -37,6 +41,7 @@
},
{
"BriefDescription": "Counts every read and write request entering the Memory Controller 1.",
+ "Counter": "5",
"EventCode": "0xff",
"EventName": "UNC_MC1_TOTAL_REQCOUNT_FREERUN",
"PerPkg": "1",
@@ -46,6 +51,7 @@
},
{
"BriefDescription": "Counts every CAS write command sent from the Memory Controller 1 to DRAM (sum of all channels).",
+ "Counter": "4",
"EventCode": "0xff",
"EventName": "UNC_MC1_WRCAS_COUNT_FREERUN",
"PerPkg": "1",
@@ -55,6 +61,7 @@
},
{
"BriefDescription": "ACT command for a read request sent to DRAM",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x24",
"EventName": "UNC_M_ACT_COUNT_RD",
"PerPkg": "1",
@@ -62,6 +69,7 @@
},
{
"BriefDescription": "ACT command sent to DRAM",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x26",
"EventName": "UNC_M_ACT_COUNT_TOTAL",
"PerPkg": "1",
@@ -69,6 +77,7 @@
},
{
"BriefDescription": "ACT command for a write request sent to DRAM",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x25",
"EventName": "UNC_M_ACT_COUNT_WR",
"PerPkg": "1",
@@ -76,6 +85,7 @@
},
{
"BriefDescription": "Read CAS command sent to DRAM",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x22",
"EventName": "UNC_M_CAS_COUNT_RD",
"PerPkg": "1",
@@ -83,6 +93,7 @@
},
{
"BriefDescription": "Write CAS command sent to DRAM",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x23",
"EventName": "UNC_M_CAS_COUNT_WR",
"PerPkg": "1",
@@ -90,6 +101,7 @@
},
{
"BriefDescription": "PRE command sent to DRAM due to page table idle timer expiration",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x28",
"EventName": "UNC_M_PRE_COUNT_IDLE",
"PerPkg": "1",
@@ -97,6 +109,7 @@
},
{
"BriefDescription": "PRE command sent to DRAM for a read/write request",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x27",
"EventName": "UNC_M_PRE_COUNT_PAGE_MISS",
"PerPkg": "1",
@@ -104,6 +117,7 @@
},
{
"BriefDescription": "Number of bytes read from DRAM, in 32B chunks. Counter increments by 1 after receiving 32B chunk data.",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x3A",
"EventName": "UNC_M_RD_DATA",
"PerPkg": "1",
@@ -111,6 +125,7 @@
},
{
"BriefDescription": "Total number of read and write byte transfers to/from DRAM, in 32B chunks. Counter increments by 1 after sending or receiving 32B chunk data.",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x3C",
"EventName": "UNC_M_TOTAL_DATA",
"PerPkg": "1",
@@ -118,6 +133,7 @@
},
{
"BriefDescription": "Number of bytes written to DRAM, in 32B chunks. Counter increments by 1 after sending 32B chunk data.",
+ "Counter": "0,1,2,3,4",
"EventCode": "0x3B",
"EventName": "UNC_M_WR_DATA",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/uncore-other.json b/tools/perf/pmu-events/arch/x86/meteorlake/uncore-other.json
index 2af92e43b28a..1ac5b5ef8094 100644
--- a/tools/perf/pmu-events/arch/x86/meteorlake/uncore-other.json
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/uncore-other.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "This 48-bit fixed counter counts the UCLK cycles.",
+ "Counter": "FIXED",
"EventCode": "0xff",
"EventName": "UNC_CLOCK.SOCKET",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/virtual-memory.json b/tools/perf/pmu-events/arch/x86/meteorlake/virtual-memory.json
index 55798e64c58a..305b96b26a4e 100644
--- a/tools/perf/pmu-events/arch/x86/meteorlake/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/virtual-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of first level TLB misses but second level hits due to a demand load that did not start a page walk. Accounts for all page sizes. Will result in a DTLB write from STLB.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"SampleAfterValue": "200003",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Loads that miss the DTLB and hit the STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for a demand load.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x12",
"EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
@@ -28,6 +31,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to load DTLB misses.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"SampleAfterValue": "200003",
@@ -36,6 +40,7 @@
},
{
"BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (all page sizes) caused by demand data loads. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -45,6 +50,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data load to a 1G page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
"PublicDescription": "Counts completed page walks (1G sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -54,6 +60,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 2M or 4M page.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 2M or 4M pages. Includes page walks that page fault.",
@@ -63,6 +70,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data load to a 2M/4M page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -72,6 +80,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 4K page.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.",
@@ -81,6 +90,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data load to a 4K page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts completed page walks (4K sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -90,6 +100,7 @@
},
{
"BriefDescription": "Counts the number of page walks outstanding for Loads (demand or SW prefetch) in PMH every cycle.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding for Loads (demand or SW prefetch) in PMH every cycle. A PMH page walk is outstanding from page walk start till PMH becomes idle again (ready to serve next walk). Includes EPT-walk intervals.",
@@ -99,6 +110,7 @@
},
{
"BriefDescription": "Number of page walks outstanding for a demand load in the PMH each cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding for a demand load in the PMH (Page Miss Handler) each cycle.",
@@ -108,6 +120,7 @@
},
{
"BriefDescription": "Counts the number of first level TLB misses but second level hits due to stores that did not start a page walk. Accounts for all pages sizes. Will result in a DTLB write from STLB.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.STLB_HIT",
"SampleAfterValue": "2000003",
@@ -116,6 +129,7 @@
},
{
"BriefDescription": "Stores that miss the DTLB and hit the STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "DTLB_STORE_MISSES.STLB_HIT",
"PublicDescription": "Counts stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
@@ -125,6 +139,7 @@
},
{
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x13",
"EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
@@ -135,6 +150,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to store DTLB misses to a 1G page.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"SampleAfterValue": "2000003",
@@ -143,6 +159,7 @@
},
{
"BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (all page sizes) caused by demand data stores. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -152,6 +169,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data store to a 1G page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
"PublicDescription": "Counts completed page walks (1G sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -161,6 +179,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to store DTLB misses to a 2M or 4M page.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 2M or 4M pages. Includes page walks that page fault.",
@@ -170,6 +189,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data store to a 2M/4M page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -179,6 +199,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to store DTLB misses to a 4K page.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.",
@@ -188,6 +209,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data store to a 4K page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts completed page walks (4K sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -197,6 +219,7 @@
},
{
"BriefDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for stores every cycle.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for stores every cycle. A PMH page walk is outstanding from page walk start till PMH becomes idle again (ready to serve next walk). Includes EPT-walk intervals.",
@@ -206,6 +229,7 @@
},
{
"BriefDescription": "Number of page walks outstanding for a store in the PMH each cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "DTLB_STORE_MISSES.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding for a store in the PMH (Page Miss Handler) each cycle.",
@@ -215,6 +239,7 @@
},
{
"BriefDescription": "Counts the number of page walks initiated by a instruction fetch that missed the first and second level TLBs.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.MISS_CAUSED_WALK",
"SampleAfterValue": "1000003",
@@ -223,6 +248,7 @@
},
{
"BriefDescription": "Counts the number of first level TLB misses but second level hits due to an instruction fetch that did not start a page walk. Account for all pages sizes. Will result in an ITLB write from STLB.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.STLB_HIT",
"SampleAfterValue": "2000003",
@@ -231,6 +257,7 @@
},
{
"BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "ITLB_MISSES.STLB_HIT",
"PublicDescription": "Counts instruction fetch requests that miss the ITLB (Instruction TLB) and hit the STLB (Second-level TLB).",
@@ -240,6 +267,7 @@
},
{
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x11",
"EventName": "ITLB_MISSES.WALK_ACTIVE",
@@ -250,6 +278,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to any page size.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
@@ -259,6 +288,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (all page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
@@ -268,6 +298,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to a 2M or 4M page.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 2M or 4M pages. Includes page walks that page fault.",
@@ -277,6 +308,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts completed page walks (2M/4M page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
@@ -286,6 +318,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to a 4K page.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.",
@@ -295,6 +328,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts completed page walks (4K page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
@@ -304,6 +338,7 @@
},
{
"BriefDescription": "Counts the number of page walks outstanding for iside in PMH every cycle.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding for iside in PMH every cycle. A PMH page walk is outstanding from page walk start till PMH becomes idle again (ready to serve next walk). Includes EPT-walk intervals. Walks could be counted by edge detecting on this event, but would count restarted suspended walks.",
@@ -313,6 +348,7 @@
},
{
"BriefDescription": "Number of page walks outstanding for an outstanding code request in the PMH each cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "ITLB_MISSES.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding for an outstanding code (instruction fetch) request in the PMH (Page Miss Handler) each cycle.",
@@ -322,6 +358,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a DTLB miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x05",
"EventName": "LD_HEAD.DTLB_MISS_AT_RET",
"SampleAfterValue": "1000003",
diff --git a/tools/perf/pmu-events/arch/x86/nehalemep/cache.json b/tools/perf/pmu-events/arch/x86/nehalemep/cache.json
index 5113a4e059e4..b90026df2ce7 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemep/cache.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemep/cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Cycles L1D locked",
+ "Counter": "0,1",
"EventCode": "0x63",
"EventName": "CACHE_LOCK_CYCLES.L1D",
"SampleAfterValue": "2000000",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Cycles L1D and L2 locked",
+ "Counter": "0,1",
"EventCode": "0x63",
"EventName": "CACHE_LOCK_CYCLES.L1D_L2",
"SampleAfterValue": "2000000",
@@ -15,6 +17,7 @@
},
{
"BriefDescription": "L1D cache lines replaced in M state",
+ "Counter": "0,1",
"EventCode": "0x51",
"EventName": "L1D.M_EVICT",
"SampleAfterValue": "2000000",
@@ -22,6 +25,7 @@
},
{
"BriefDescription": "L1D cache lines allocated in the M state",
+ "Counter": "0,1",
"EventCode": "0x51",
"EventName": "L1D.M_REPL",
"SampleAfterValue": "2000000",
@@ -29,6 +33,7 @@
},
{
"BriefDescription": "L1D snoop eviction of cache lines in M state",
+ "Counter": "0,1",
"EventCode": "0x51",
"EventName": "L1D.M_SNOOP_EVICT",
"SampleAfterValue": "2000000",
@@ -36,6 +41,7 @@
},
{
"BriefDescription": "L1 data cache lines allocated",
+ "Counter": "0,1",
"EventCode": "0x51",
"EventName": "L1D.REPL",
"SampleAfterValue": "2000000",
@@ -43,6 +49,7 @@
},
{
"BriefDescription": "All references to the L1 data cache",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "L1D_ALL_REF.ANY",
"SampleAfterValue": "2000000",
@@ -50,6 +57,7 @@
},
{
"BriefDescription": "L1 data cacheable reads and writes",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "L1D_ALL_REF.CACHEABLE",
"SampleAfterValue": "2000000",
@@ -57,6 +65,7 @@
},
{
"BriefDescription": "L1 data cache read in E state",
+ "Counter": "0,1",
"EventCode": "0x40",
"EventName": "L1D_CACHE_LD.E_STATE",
"SampleAfterValue": "2000000",
@@ -64,6 +73,7 @@
},
{
"BriefDescription": "L1 data cache read in I state (misses)",
+ "Counter": "0,1",
"EventCode": "0x40",
"EventName": "L1D_CACHE_LD.I_STATE",
"SampleAfterValue": "2000000",
@@ -71,6 +81,7 @@
},
{
"BriefDescription": "L1 data cache reads",
+ "Counter": "0,1",
"EventCode": "0x40",
"EventName": "L1D_CACHE_LD.MESI",
"SampleAfterValue": "2000000",
@@ -78,6 +89,7 @@
},
{
"BriefDescription": "L1 data cache read in M state",
+ "Counter": "0,1",
"EventCode": "0x40",
"EventName": "L1D_CACHE_LD.M_STATE",
"SampleAfterValue": "2000000",
@@ -85,6 +97,7 @@
},
{
"BriefDescription": "L1 data cache read in S state",
+ "Counter": "0,1",
"EventCode": "0x40",
"EventName": "L1D_CACHE_LD.S_STATE",
"SampleAfterValue": "2000000",
@@ -92,6 +105,7 @@
},
{
"BriefDescription": "L1 data cache load locks in E state",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "L1D_CACHE_LOCK.E_STATE",
"SampleAfterValue": "2000000",
@@ -99,6 +113,7 @@
},
{
"BriefDescription": "L1 data cache load lock hits",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "L1D_CACHE_LOCK.HIT",
"SampleAfterValue": "2000000",
@@ -106,6 +121,7 @@
},
{
"BriefDescription": "L1 data cache load locks in M state",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "L1D_CACHE_LOCK.M_STATE",
"SampleAfterValue": "2000000",
@@ -113,6 +129,7 @@
},
{
"BriefDescription": "L1 data cache load locks in S state",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "L1D_CACHE_LOCK.S_STATE",
"SampleAfterValue": "2000000",
@@ -120,6 +137,7 @@
},
{
"BriefDescription": "L1D load lock accepted in fill buffer",
+ "Counter": "0,1",
"EventCode": "0x53",
"EventName": "L1D_CACHE_LOCK_FB_HIT",
"SampleAfterValue": "2000000",
@@ -127,6 +145,7 @@
},
{
"BriefDescription": "L1D prefetch load lock accepted in fill buffer",
+ "Counter": "0,1",
"EventCode": "0x52",
"EventName": "L1D_CACHE_PREFETCH_LOCK_FB_HIT",
"SampleAfterValue": "2000000",
@@ -134,6 +153,7 @@
},
{
"BriefDescription": "L1 data cache stores in E state",
+ "Counter": "0,1",
"EventCode": "0x41",
"EventName": "L1D_CACHE_ST.E_STATE",
"SampleAfterValue": "2000000",
@@ -141,6 +161,7 @@
},
{
"BriefDescription": "L1 data cache stores in M state",
+ "Counter": "0,1",
"EventCode": "0x41",
"EventName": "L1D_CACHE_ST.M_STATE",
"SampleAfterValue": "2000000",
@@ -148,6 +169,7 @@
},
{
"BriefDescription": "L1 data cache stores in S state",
+ "Counter": "0,1",
"EventCode": "0x41",
"EventName": "L1D_CACHE_ST.S_STATE",
"SampleAfterValue": "2000000",
@@ -155,6 +177,7 @@
},
{
"BriefDescription": "L1D hardware prefetch misses",
+ "Counter": "0,1",
"EventCode": "0x4E",
"EventName": "L1D_PREFETCH.MISS",
"SampleAfterValue": "200000",
@@ -162,6 +185,7 @@
},
{
"BriefDescription": "L1D hardware prefetch requests",
+ "Counter": "0,1",
"EventCode": "0x4E",
"EventName": "L1D_PREFETCH.REQUESTS",
"SampleAfterValue": "200000",
@@ -169,6 +193,7 @@
},
{
"BriefDescription": "L1D hardware prefetch requests triggered",
+ "Counter": "0,1",
"EventCode": "0x4E",
"EventName": "L1D_PREFETCH.TRIGGERS",
"SampleAfterValue": "200000",
@@ -176,6 +201,7 @@
},
{
"BriefDescription": "L1 writebacks to L2 in E state",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L1D_WB_L2.E_STATE",
"SampleAfterValue": "100000",
@@ -183,6 +209,7 @@
},
{
"BriefDescription": "L1 writebacks to L2 in I state (misses)",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L1D_WB_L2.I_STATE",
"SampleAfterValue": "100000",
@@ -190,6 +217,7 @@
},
{
"BriefDescription": "All L1 writebacks to L2",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L1D_WB_L2.MESI",
"SampleAfterValue": "100000",
@@ -197,6 +225,7 @@
},
{
"BriefDescription": "L1 writebacks to L2 in M state",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L1D_WB_L2.M_STATE",
"SampleAfterValue": "100000",
@@ -204,6 +233,7 @@
},
{
"BriefDescription": "L1 writebacks to L2 in S state",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L1D_WB_L2.S_STATE",
"SampleAfterValue": "100000",
@@ -211,6 +241,7 @@
},
{
"BriefDescription": "All L2 data requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.ANY",
"SampleAfterValue": "200000",
@@ -218,6 +249,7 @@
},
{
"BriefDescription": "L2 data demand loads in E state",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.DEMAND.E_STATE",
"SampleAfterValue": "200000",
@@ -225,6 +257,7 @@
},
{
"BriefDescription": "L2 data demand loads in I state (misses)",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.DEMAND.I_STATE",
"SampleAfterValue": "200000",
@@ -232,6 +265,7 @@
},
{
"BriefDescription": "L2 data demand requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.DEMAND.MESI",
"SampleAfterValue": "200000",
@@ -239,6 +273,7 @@
},
{
"BriefDescription": "L2 data demand loads in M state",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.DEMAND.M_STATE",
"SampleAfterValue": "200000",
@@ -246,6 +281,7 @@
},
{
"BriefDescription": "L2 data demand loads in S state",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.DEMAND.S_STATE",
"SampleAfterValue": "200000",
@@ -253,6 +289,7 @@
},
{
"BriefDescription": "L2 data prefetches in E state",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.PREFETCH.E_STATE",
"SampleAfterValue": "200000",
@@ -260,6 +297,7 @@
},
{
"BriefDescription": "L2 data prefetches in the I state (misses)",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.PREFETCH.I_STATE",
"SampleAfterValue": "200000",
@@ -267,6 +305,7 @@
},
{
"BriefDescription": "All L2 data prefetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.PREFETCH.MESI",
"SampleAfterValue": "200000",
@@ -274,6 +313,7 @@
},
{
"BriefDescription": "L2 data prefetches in M state",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.PREFETCH.M_STATE",
"SampleAfterValue": "200000",
@@ -281,6 +321,7 @@
},
{
"BriefDescription": "L2 data prefetches in the S state",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.PREFETCH.S_STATE",
"SampleAfterValue": "200000",
@@ -288,6 +329,7 @@
},
{
"BriefDescription": "L2 lines allocated",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.ANY",
"SampleAfterValue": "100000",
@@ -295,6 +337,7 @@
},
{
"BriefDescription": "L2 lines allocated in the E state",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.E_STATE",
"SampleAfterValue": "100000",
@@ -302,6 +345,7 @@
},
{
"BriefDescription": "L2 lines allocated in the S state",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.S_STATE",
"SampleAfterValue": "100000",
@@ -309,6 +353,7 @@
},
{
"BriefDescription": "L2 lines evicted",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.ANY",
"SampleAfterValue": "100000",
@@ -316,6 +361,7 @@
},
{
"BriefDescription": "L2 lines evicted by a demand request",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.DEMAND_CLEAN",
"SampleAfterValue": "100000",
@@ -323,6 +369,7 @@
},
{
"BriefDescription": "L2 modified lines evicted by a demand request",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.DEMAND_DIRTY",
"SampleAfterValue": "100000",
@@ -330,6 +377,7 @@
},
{
"BriefDescription": "L2 lines evicted by a prefetch request",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.PREFETCH_CLEAN",
"SampleAfterValue": "100000",
@@ -337,6 +385,7 @@
},
{
"BriefDescription": "L2 modified lines evicted by a prefetch request",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.PREFETCH_DIRTY",
"SampleAfterValue": "100000",
@@ -344,6 +393,7 @@
},
{
"BriefDescription": "L2 instruction fetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.IFETCHES",
"SampleAfterValue": "200000",
@@ -351,6 +401,7 @@
},
{
"BriefDescription": "L2 instruction fetch hits",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.IFETCH_HIT",
"SampleAfterValue": "200000",
@@ -358,6 +409,7 @@
},
{
"BriefDescription": "L2 instruction fetch misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.IFETCH_MISS",
"SampleAfterValue": "200000",
@@ -365,6 +417,7 @@
},
{
"BriefDescription": "L2 load hits",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.LD_HIT",
"SampleAfterValue": "200000",
@@ -372,6 +425,7 @@
},
{
"BriefDescription": "L2 load misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.LD_MISS",
"SampleAfterValue": "200000",
@@ -379,6 +433,7 @@
},
{
"BriefDescription": "L2 requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.LOADS",
"SampleAfterValue": "200000",
@@ -386,6 +441,7 @@
},
{
"BriefDescription": "All L2 misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.MISS",
"SampleAfterValue": "200000",
@@ -393,6 +449,7 @@
},
{
"BriefDescription": "All L2 prefetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.PREFETCHES",
"SampleAfterValue": "200000",
@@ -400,6 +457,7 @@
},
{
"BriefDescription": "L2 prefetch hits",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.PREFETCH_HIT",
"SampleAfterValue": "200000",
@@ -407,6 +465,7 @@
},
{
"BriefDescription": "L2 prefetch misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.PREFETCH_MISS",
"SampleAfterValue": "200000",
@@ -414,6 +473,7 @@
},
{
"BriefDescription": "All L2 requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.REFERENCES",
"SampleAfterValue": "200000",
@@ -421,6 +481,7 @@
},
{
"BriefDescription": "L2 RFO requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFOS",
"SampleAfterValue": "200000",
@@ -428,6 +489,7 @@
},
{
"BriefDescription": "L2 RFO hits",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_HIT",
"SampleAfterValue": "200000",
@@ -435,6 +497,7 @@
},
{
"BriefDescription": "L2 RFO misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_MISS",
"SampleAfterValue": "200000",
@@ -442,6 +505,7 @@
},
{
"BriefDescription": "All L2 transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.ANY",
"SampleAfterValue": "200000",
@@ -449,6 +513,7 @@
},
{
"BriefDescription": "L2 fill transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.FILL",
"SampleAfterValue": "200000",
@@ -456,6 +521,7 @@
},
{
"BriefDescription": "L2 instruction fetch transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.IFETCH",
"SampleAfterValue": "200000",
@@ -463,6 +529,7 @@
},
{
"BriefDescription": "L1D writeback to L2 transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.L1D_WB",
"SampleAfterValue": "200000",
@@ -470,6 +537,7 @@
},
{
"BriefDescription": "L2 Load transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.LOAD",
"SampleAfterValue": "200000",
@@ -477,6 +545,7 @@
},
{
"BriefDescription": "L2 prefetch transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.PREFETCH",
"SampleAfterValue": "200000",
@@ -484,6 +553,7 @@
},
{
"BriefDescription": "L2 RFO transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.RFO",
"SampleAfterValue": "200000",
@@ -491,6 +561,7 @@
},
{
"BriefDescription": "L2 writeback to LLC transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.WB",
"SampleAfterValue": "200000",
@@ -498,6 +569,7 @@
},
{
"BriefDescription": "L2 demand lock RFOs in E state",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.LOCK.E_STATE",
"SampleAfterValue": "100000",
@@ -505,6 +577,7 @@
},
{
"BriefDescription": "All demand L2 lock RFOs that hit the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.LOCK.HIT",
"SampleAfterValue": "100000",
@@ -512,6 +585,7 @@
},
{
"BriefDescription": "L2 demand lock RFOs in I state (misses)",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.LOCK.I_STATE",
"SampleAfterValue": "100000",
@@ -519,6 +593,7 @@
},
{
"BriefDescription": "All demand L2 lock RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.LOCK.MESI",
"SampleAfterValue": "100000",
@@ -526,6 +601,7 @@
},
{
"BriefDescription": "L2 demand lock RFOs in M state",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.LOCK.M_STATE",
"SampleAfterValue": "100000",
@@ -533,6 +609,7 @@
},
{
"BriefDescription": "L2 demand lock RFOs in S state",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.LOCK.S_STATE",
"SampleAfterValue": "100000",
@@ -540,6 +617,7 @@
},
{
"BriefDescription": "All L2 demand store RFOs that hit the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.RFO.HIT",
"SampleAfterValue": "100000",
@@ -547,6 +625,7 @@
},
{
"BriefDescription": "L2 demand store RFOs in I state (misses)",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.RFO.I_STATE",
"SampleAfterValue": "100000",
@@ -554,6 +633,7 @@
},
{
"BriefDescription": "All L2 demand store RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.RFO.MESI",
"SampleAfterValue": "100000",
@@ -561,6 +641,7 @@
},
{
"BriefDescription": "L2 demand store RFOs in M state",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.RFO.M_STATE",
"SampleAfterValue": "100000",
@@ -568,6 +649,7 @@
},
{
"BriefDescription": "L2 demand store RFOs in S state",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.RFO.S_STATE",
"SampleAfterValue": "100000",
@@ -575,6 +657,7 @@
},
{
"BriefDescription": "Longest latency cache miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.MISS",
"SampleAfterValue": "100000",
@@ -582,6 +665,7 @@
},
{
"BriefDescription": "Longest latency cache reference",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
"SampleAfterValue": "200000",
@@ -589,6 +673,7 @@
},
{
"BriefDescription": "Memory instructions retired above 0 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_0",
"MSRIndex": "0x3F6",
@@ -598,6 +683,7 @@
},
{
"BriefDescription": "Memory instructions retired above 1024 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_1024",
"MSRIndex": "0x3F6",
@@ -608,6 +694,7 @@
},
{
"BriefDescription": "Memory instructions retired above 128 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_128",
"MSRIndex": "0x3F6",
@@ -618,6 +705,7 @@
},
{
"BriefDescription": "Memory instructions retired above 16 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_16",
"MSRIndex": "0x3F6",
@@ -628,6 +716,7 @@
},
{
"BriefDescription": "Memory instructions retired above 16384 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_16384",
"MSRIndex": "0x3F6",
@@ -638,6 +727,7 @@
},
{
"BriefDescription": "Memory instructions retired above 2048 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_2048",
"MSRIndex": "0x3F6",
@@ -648,6 +738,7 @@
},
{
"BriefDescription": "Memory instructions retired above 256 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_256",
"MSRIndex": "0x3F6",
@@ -658,6 +749,7 @@
},
{
"BriefDescription": "Memory instructions retired above 32 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_32",
"MSRIndex": "0x3F6",
@@ -668,6 +760,7 @@
},
{
"BriefDescription": "Memory instructions retired above 32768 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_32768",
"MSRIndex": "0x3F6",
@@ -678,6 +771,7 @@
},
{
"BriefDescription": "Memory instructions retired above 4 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_4",
"MSRIndex": "0x3F6",
@@ -688,6 +782,7 @@
},
{
"BriefDescription": "Memory instructions retired above 4096 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_4096",
"MSRIndex": "0x3F6",
@@ -698,6 +793,7 @@
},
{
"BriefDescription": "Memory instructions retired above 512 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_512",
"MSRIndex": "0x3F6",
@@ -708,6 +804,7 @@
},
{
"BriefDescription": "Memory instructions retired above 64 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_64",
"MSRIndex": "0x3F6",
@@ -718,6 +815,7 @@
},
{
"BriefDescription": "Memory instructions retired above 8 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_8",
"MSRIndex": "0x3F6",
@@ -728,6 +826,7 @@
},
{
"BriefDescription": "Memory instructions retired above 8192 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_8192",
"MSRIndex": "0x3F6",
@@ -738,6 +837,7 @@
},
{
"BriefDescription": "Instructions retired which contains a load (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LOADS",
"PEBS": "1",
@@ -746,6 +846,7 @@
},
{
"BriefDescription": "Instructions retired which contains a store (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.STORES",
"PEBS": "1",
@@ -754,6 +855,7 @@
},
{
"BriefDescription": "Retired loads that miss L1D and hit an previously allocated LFB (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "MEM_LOAD_RETIRED.HIT_LFB",
"PEBS": "1",
@@ -762,6 +864,7 @@
},
{
"BriefDescription": "Retired loads that hit the L1 data cache (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "MEM_LOAD_RETIRED.L1D_HIT",
"PEBS": "1",
@@ -770,6 +873,7 @@
},
{
"BriefDescription": "Retired loads that hit the L2 cache (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "MEM_LOAD_RETIRED.L2_HIT",
"PEBS": "1",
@@ -778,6 +882,7 @@
},
{
"BriefDescription": "Retired loads that miss the LLC cache (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "MEM_LOAD_RETIRED.LLC_MISS",
"PEBS": "1",
@@ -786,6 +891,7 @@
},
{
"BriefDescription": "Retired loads that hit valid versions in the LLC cache (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "MEM_LOAD_RETIRED.LLC_UNSHARED_HIT",
"PEBS": "1",
@@ -794,6 +900,7 @@
},
{
"BriefDescription": "Retired loads that hit sibling core's L2 in modified or unmodified states (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "MEM_LOAD_RETIRED.OTHER_CORE_L2_HIT_HITM",
"PEBS": "1",
@@ -802,6 +909,7 @@
},
{
"BriefDescription": "Load instructions retired with a data source of local DRAM or locally homed remote hitm (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xF",
"EventName": "MEM_UNCORE_RETIRED.LOCAL_DRAM",
"PEBS": "1",
@@ -810,6 +918,7 @@
},
{
"BriefDescription": "Load instructions retired that HIT modified data in sibling core (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xF",
"EventName": "MEM_UNCORE_RETIRED.OTHER_CORE_L2_HITM",
"PEBS": "1",
@@ -818,6 +927,7 @@
},
{
"BriefDescription": "Load instructions retired remote cache HIT data source (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xF",
"EventName": "MEM_UNCORE_RETIRED.REMOTE_CACHE_LOCAL_HOME_HIT",
"PEBS": "1",
@@ -826,6 +936,7 @@
},
{
"BriefDescription": "Load instructions retired remote DRAM and remote home-remote cache HITM (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xF",
"EventName": "MEM_UNCORE_RETIRED.REMOTE_DRAM",
"PEBS": "1",
@@ -834,6 +945,7 @@
},
{
"BriefDescription": "Load instructions retired IO (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xF",
"EventName": "MEM_UNCORE_RETIRED.UNCACHEABLE",
"PEBS": "1",
@@ -842,6 +954,7 @@
},
{
"BriefDescription": "Offcore L1 data cache writebacks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.L1D_WRITEBACK",
"SampleAfterValue": "100000",
@@ -849,6 +962,7 @@
},
{
"BriefDescription": "Offcore requests blocked due to Super Queue full",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "OFFCORE_REQUESTS_SQ_FULL",
"SampleAfterValue": "100000",
@@ -856,6 +970,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by any cache or DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -865,6 +980,7 @@
},
{
"BriefDescription": "All offcore data reads",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -874,6 +990,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by the IO, CSR, MMIO unit",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -883,6 +1000,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -892,6 +1010,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -901,6 +1020,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -910,6 +1030,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -919,6 +1040,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -928,6 +1050,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -937,6 +1060,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -946,6 +1070,7 @@
},
{
"BriefDescription": "Offcore data reads that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -955,6 +1080,7 @@
},
{
"BriefDescription": "Offcore data reads that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -964,6 +1090,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by any cache or DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -973,6 +1100,7 @@
},
{
"BriefDescription": "All offcore code reads",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -982,6 +1110,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by the IO, CSR, MMIO unit",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -991,6 +1120,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -1000,6 +1130,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -1009,6 +1140,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -1018,6 +1150,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -1027,6 +1160,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1036,6 +1170,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -1045,6 +1180,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1054,6 +1190,7 @@
},
{
"BriefDescription": "Offcore code reads that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -1063,6 +1200,7 @@
},
{
"BriefDescription": "Offcore code reads that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -1072,6 +1210,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by any cache or DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1081,6 +1220,7 @@
},
{
"BriefDescription": "All offcore requests",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -1090,6 +1230,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by the IO, CSR, MMIO unit",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -1099,6 +1240,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -1108,6 +1250,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -1117,6 +1260,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -1126,6 +1270,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -1135,6 +1280,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1144,6 +1290,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -1153,6 +1300,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1162,6 +1310,7 @@
},
{
"BriefDescription": "Offcore requests that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -1171,6 +1320,7 @@
},
{
"BriefDescription": "Offcore requests that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -1180,6 +1330,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by any cache or DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1189,6 +1340,7 @@
},
{
"BriefDescription": "All offcore RFO requests",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -1198,6 +1350,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by the IO, CSR, MMIO unit",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -1207,6 +1360,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -1216,6 +1370,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -1225,6 +1380,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -1234,6 +1390,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -1243,6 +1400,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1252,6 +1410,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -1261,6 +1420,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1270,6 +1430,7 @@
},
{
"BriefDescription": "Offcore RFO requests that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -1279,6 +1440,7 @@
},
{
"BriefDescription": "Offcore RFO requests that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -1288,6 +1450,7 @@
},
{
"BriefDescription": "Offcore writebacks to any cache or DRAM.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1297,6 +1460,7 @@
},
{
"BriefDescription": "All offcore writebacks",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -1306,6 +1470,7 @@
},
{
"BriefDescription": "Offcore writebacks to the IO, CSR, MMIO unit.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -1315,6 +1480,7 @@
},
{
"BriefDescription": "Offcore writebacks to the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -1324,6 +1490,7 @@
},
{
"BriefDescription": "Offcore writebacks to the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -1333,6 +1500,7 @@
},
{
"BriefDescription": "Offcore writebacks to the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -1342,6 +1510,7 @@
},
{
"BriefDescription": "Offcore writebacks to the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1351,6 +1520,7 @@
},
{
"BriefDescription": "Offcore writebacks to a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -1360,6 +1530,7 @@
},
{
"BriefDescription": "Offcore writebacks to a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1369,6 +1540,7 @@
},
{
"BriefDescription": "Offcore writebacks that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -1378,6 +1550,7 @@
},
{
"BriefDescription": "Offcore writebacks that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -1387,6 +1560,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by any cache or DRAM.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1396,6 +1570,7 @@
},
{
"BriefDescription": "All offcore code or data read requests",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -1405,6 +1580,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by the IO, CSR, MMIO unit.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -1414,6 +1590,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -1423,6 +1600,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -1432,6 +1610,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -1441,6 +1620,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -1450,6 +1630,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1459,6 +1640,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -1468,6 +1650,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1477,6 +1660,7 @@
},
{
"BriefDescription": "Offcore code or data read requests that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -1486,6 +1670,7 @@
},
{
"BriefDescription": "Offcore code or data read requests that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -1495,6 +1680,7 @@
},
{
"BriefDescription": "Offcore request = all data, response = any cache_dram",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1504,6 +1690,7 @@
},
{
"BriefDescription": "Offcore request = all data, response = any location",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -1513,6 +1700,7 @@
},
{
"BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the IO, CSR, MMIO unit",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -1522,6 +1710,7 @@
},
{
"BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -1531,6 +1720,7 @@
},
{
"BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -1540,6 +1730,7 @@
},
{
"BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -1549,6 +1740,7 @@
},
{
"BriefDescription": "Offcore request = all data, response = local cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -1558,6 +1750,7 @@
},
{
"BriefDescription": "Offcore request = all data, response = local cache or dram",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1567,6 +1760,7 @@
},
{
"BriefDescription": "Offcore request = all data, response = remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -1576,6 +1770,7 @@
},
{
"BriefDescription": "Offcore request = all data, response = remote cache or dram",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1585,6 +1780,7 @@
},
{
"BriefDescription": "Offcore data reads, RFOs, and prefetches that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -1594,6 +1790,7 @@
},
{
"BriefDescription": "Offcore data reads, RFOs, and prefetches that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -1603,6 +1800,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by any cache or DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1612,6 +1810,7 @@
},
{
"BriefDescription": "All offcore demand data requests",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -1621,6 +1820,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by the IO, CSR, MMIO unit.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -1630,6 +1830,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -1639,6 +1840,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -1648,6 +1850,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -1657,6 +1860,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -1666,6 +1870,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1675,6 +1880,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -1684,6 +1890,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1693,6 +1900,7 @@
},
{
"BriefDescription": "Offcore demand data requests that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -1702,6 +1910,7 @@
},
{
"BriefDescription": "Offcore demand data requests that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -1711,6 +1920,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by any cache or DRAM.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1720,6 +1930,7 @@
},
{
"BriefDescription": "All offcore demand data reads",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -1729,6 +1940,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by the IO, CSR, MMIO unit",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -1738,6 +1950,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -1747,6 +1960,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -1756,6 +1970,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -1765,6 +1980,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -1774,6 +1990,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1783,6 +2000,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -1792,6 +2010,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1801,6 +2020,7 @@
},
{
"BriefDescription": "Offcore demand data reads that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -1810,6 +2030,7 @@
},
{
"BriefDescription": "Offcore demand data reads that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -1819,6 +2040,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by any cache or DRAM.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1828,6 +2050,7 @@
},
{
"BriefDescription": "All offcore demand code reads",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -1837,6 +2060,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by the IO, CSR, MMIO unit",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -1846,6 +2070,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -1855,6 +2080,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -1864,6 +2090,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -1873,6 +2100,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -1882,6 +2110,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1891,6 +2120,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -1900,6 +2130,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1909,6 +2140,7 @@
},
{
"BriefDescription": "Offcore demand code reads that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -1918,6 +2150,7 @@
},
{
"BriefDescription": "Offcore demand code reads that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -1927,6 +2160,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by any cache or DRAM.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1936,6 +2170,7 @@
},
{
"BriefDescription": "All offcore demand RFO requests",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -1945,6 +2180,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by the IO, CSR, MMIO unit",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -1954,6 +2190,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -1963,6 +2200,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -1972,6 +2210,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -1981,6 +2220,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -1990,6 +2230,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1999,6 +2240,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -2008,6 +2250,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2017,6 +2260,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -2026,6 +2270,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -2035,6 +2280,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by any cache or DRAM.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2044,6 +2290,7 @@
},
{
"BriefDescription": "All offcore other requests",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -2053,6 +2300,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by the IO, CSR, MMIO unit",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -2062,6 +2310,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -2071,6 +2320,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -2080,6 +2330,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -2089,6 +2340,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -2098,6 +2350,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2107,6 +2360,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -2116,6 +2370,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2125,6 +2380,7 @@
},
{
"BriefDescription": "Offcore other requests that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -2134,6 +2390,7 @@
},
{
"BriefDescription": "Offcore other requests that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -2143,6 +2400,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by any cache or DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2152,6 +2410,7 @@
},
{
"BriefDescription": "All offcore prefetch data requests",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -2161,6 +2420,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by the IO, CSR, MMIO unit.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -2170,6 +2430,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -2179,6 +2440,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -2188,6 +2450,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -2197,6 +2460,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -2206,6 +2470,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2215,6 +2480,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -2224,6 +2490,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2233,6 +2500,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -2242,6 +2510,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -2251,6 +2520,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by any cache or DRAM.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2260,6 +2530,7 @@
},
{
"BriefDescription": "All offcore prefetch data reads",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -2269,6 +2540,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by the IO, CSR, MMIO unit",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -2278,6 +2550,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -2287,6 +2560,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -2296,6 +2570,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -2305,6 +2580,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -2314,6 +2590,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2323,6 +2600,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -2332,6 +2610,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2341,6 +2620,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -2350,6 +2630,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -2359,6 +2640,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by any cache or DRAM.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2368,6 +2650,7 @@
},
{
"BriefDescription": "All offcore prefetch code reads",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -2377,6 +2660,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by the IO, CSR, MMIO unit",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -2386,6 +2670,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -2395,6 +2680,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -2404,6 +2690,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -2413,6 +2700,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -2422,6 +2710,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2431,6 +2720,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -2440,6 +2730,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2449,6 +2740,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -2458,6 +2750,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -2467,6 +2760,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by any cache or DRAM.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2476,6 +2770,7 @@
},
{
"BriefDescription": "All offcore prefetch RFO requests",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -2485,6 +2780,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by the IO, CSR, MMIO unit",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -2494,6 +2790,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -2503,6 +2800,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -2512,6 +2810,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -2521,6 +2820,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -2530,6 +2830,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2539,6 +2840,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -2548,6 +2850,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2557,6 +2860,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -2566,6 +2870,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -2575,6 +2880,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by any cache or DRAM.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2584,6 +2890,7 @@
},
{
"BriefDescription": "All offcore prefetch requests",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -2593,6 +2900,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by the IO, CSR, MMIO unit",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -2602,6 +2910,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -2611,6 +2920,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -2620,6 +2930,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -2629,6 +2940,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -2638,6 +2950,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2647,6 +2960,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -2656,6 +2970,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2665,6 +2980,7 @@
},
{
"BriefDescription": "Offcore prefetch requests that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -2674,6 +2990,7 @@
},
{
"BriefDescription": "Offcore prefetch requests that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -2683,6 +3000,7 @@
},
{
"BriefDescription": "Super Queue lock splits across a cache line",
+ "Counter": "0,1,2,3",
"EventCode": "0xF4",
"EventName": "SQ_MISC.SPLIT_LOCK",
"SampleAfterValue": "2000000",
@@ -2690,6 +3008,7 @@
},
{
"BriefDescription": "Loads delayed with at-Retirement block code",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "STORE_BLOCKS.AT_RET",
"SampleAfterValue": "200000",
@@ -2697,6 +3016,7 @@
},
{
"BriefDescription": "Cacheable loads delayed with L1D block code",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "STORE_BLOCKS.L1D_BLOCK",
"SampleAfterValue": "200000",
diff --git a/tools/perf/pmu-events/arch/x86/nehalemep/counter.json b/tools/perf/pmu-events/arch/x86/nehalemep/counter.json
new file mode 100644
index 000000000000..ecf0795dceab
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/nehalemep/counter.json
@@ -0,0 +1,7 @@
+[
+ {
+ "Unit": "core",
+ "CountersNumFixed": "4",
+ "CountersNumGeneric": "4"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/nehalemep/floating-point.json b/tools/perf/pmu-events/arch/x86/nehalemep/floating-point.json
index 196ae1d9b157..9bac9313b65c 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemep/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemep/floating-point.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "X87 Floating point assists (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xF7",
"EventName": "FP_ASSIST.ALL",
"PEBS": "1",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "X87 Floating point assists for invalid input value (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xF7",
"EventName": "FP_ASSIST.INPUT",
"PEBS": "1",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "X87 Floating point assists for invalid output value (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xF7",
"EventName": "FP_ASSIST.OUTPUT",
"PEBS": "1",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "MMX Uops",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.MMX",
"SampleAfterValue": "2000000",
@@ -32,6 +36,7 @@
},
{
"BriefDescription": "SSE2 integer Uops",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE2_INTEGER",
"SampleAfterValue": "2000000",
@@ -39,6 +44,7 @@
},
{
"BriefDescription": "SSE* FP double precision Uops",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_DOUBLE_PRECISION",
"SampleAfterValue": "2000000",
@@ -46,6 +52,7 @@
},
{
"BriefDescription": "SSE and SSE2 FP Uops",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_FP",
"SampleAfterValue": "2000000",
@@ -53,6 +60,7 @@
},
{
"BriefDescription": "SSE FP packed Uops",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_FP_PACKED",
"SampleAfterValue": "2000000",
@@ -60,6 +68,7 @@
},
{
"BriefDescription": "SSE FP scalar Uops",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_FP_SCALAR",
"SampleAfterValue": "2000000",
@@ -67,6 +76,7 @@
},
{
"BriefDescription": "SSE* FP single precision Uops",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_SINGLE_PRECISION",
"SampleAfterValue": "2000000",
@@ -74,6 +84,7 @@
},
{
"BriefDescription": "Computational floating-point operations executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.X87",
"SampleAfterValue": "2000000",
@@ -81,6 +92,7 @@
},
{
"BriefDescription": "All Floating Point to and from MMX transitions",
+ "Counter": "0,1,2,3",
"EventCode": "0xCC",
"EventName": "FP_MMX_TRANS.ANY",
"SampleAfterValue": "2000000",
@@ -88,6 +100,7 @@
},
{
"BriefDescription": "Transitions from MMX to Floating Point instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0xCC",
"EventName": "FP_MMX_TRANS.TO_FP",
"SampleAfterValue": "2000000",
@@ -95,6 +108,7 @@
},
{
"BriefDescription": "Transitions from Floating Point to MMX instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0xCC",
"EventName": "FP_MMX_TRANS.TO_MMX",
"SampleAfterValue": "2000000",
@@ -102,6 +116,7 @@
},
{
"BriefDescription": "128 bit SIMD integer pack operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "SIMD_INT_128.PACK",
"SampleAfterValue": "200000",
@@ -109,6 +124,7 @@
},
{
"BriefDescription": "128 bit SIMD integer arithmetic operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "SIMD_INT_128.PACKED_ARITH",
"SampleAfterValue": "200000",
@@ -116,6 +132,7 @@
},
{
"BriefDescription": "128 bit SIMD integer logical operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "SIMD_INT_128.PACKED_LOGICAL",
"SampleAfterValue": "200000",
@@ -123,6 +140,7 @@
},
{
"BriefDescription": "128 bit SIMD integer multiply operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "SIMD_INT_128.PACKED_MPY",
"SampleAfterValue": "200000",
@@ -130,6 +148,7 @@
},
{
"BriefDescription": "128 bit SIMD integer shift operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "SIMD_INT_128.PACKED_SHIFT",
"SampleAfterValue": "200000",
@@ -137,6 +156,7 @@
},
{
"BriefDescription": "128 bit SIMD integer shuffle/move operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "SIMD_INT_128.SHUFFLE_MOVE",
"SampleAfterValue": "200000",
@@ -144,6 +164,7 @@
},
{
"BriefDescription": "128 bit SIMD integer unpack operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "SIMD_INT_128.UNPACK",
"SampleAfterValue": "200000",
@@ -151,6 +172,7 @@
},
{
"BriefDescription": "SIMD integer 64 bit pack operations",
+ "Counter": "0,1,2,3",
"EventCode": "0xFD",
"EventName": "SIMD_INT_64.PACK",
"SampleAfterValue": "200000",
@@ -158,6 +180,7 @@
},
{
"BriefDescription": "SIMD integer 64 bit arithmetic operations",
+ "Counter": "0,1,2,3",
"EventCode": "0xFD",
"EventName": "SIMD_INT_64.PACKED_ARITH",
"SampleAfterValue": "200000",
@@ -165,6 +188,7 @@
},
{
"BriefDescription": "SIMD integer 64 bit logical operations",
+ "Counter": "0,1,2,3",
"EventCode": "0xFD",
"EventName": "SIMD_INT_64.PACKED_LOGICAL",
"SampleAfterValue": "200000",
@@ -172,6 +196,7 @@
},
{
"BriefDescription": "SIMD integer 64 bit packed multiply operations",
+ "Counter": "0,1,2,3",
"EventCode": "0xFD",
"EventName": "SIMD_INT_64.PACKED_MPY",
"SampleAfterValue": "200000",
@@ -179,6 +204,7 @@
},
{
"BriefDescription": "SIMD integer 64 bit shift operations",
+ "Counter": "0,1,2,3",
"EventCode": "0xFD",
"EventName": "SIMD_INT_64.PACKED_SHIFT",
"SampleAfterValue": "200000",
@@ -186,6 +212,7 @@
},
{
"BriefDescription": "SIMD integer 64 bit shuffle/move operations",
+ "Counter": "0,1,2,3",
"EventCode": "0xFD",
"EventName": "SIMD_INT_64.SHUFFLE_MOVE",
"SampleAfterValue": "200000",
@@ -193,6 +220,7 @@
},
{
"BriefDescription": "SIMD integer 64 bit unpack operations",
+ "Counter": "0,1,2,3",
"EventCode": "0xFD",
"EventName": "SIMD_INT_64.UNPACK",
"SampleAfterValue": "200000",
diff --git a/tools/perf/pmu-events/arch/x86/nehalemep/frontend.json b/tools/perf/pmu-events/arch/x86/nehalemep/frontend.json
index f7f28510e3ae..c561ac24d91d 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemep/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemep/frontend.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Instructions decoded",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "MACRO_INSTS.DECODED",
"SampleAfterValue": "2000000",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Macro-fused instructions decoded",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "MACRO_INSTS.FUSIONS_DECODED",
"SampleAfterValue": "2000000",
@@ -15,6 +17,7 @@
},
{
"BriefDescription": "Two Uop instructions decoded",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "TWO_UOP_INSTS_DECODED",
"SampleAfterValue": "2000000",
diff --git a/tools/perf/pmu-events/arch/x86/nehalemep/memory.json b/tools/perf/pmu-events/arch/x86/nehalemep/memory.json
index f810880a295e..dc732c8baf12 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemep/memory.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemep/memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Offcore data reads satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "Offcore data reads that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -19,6 +21,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -28,6 +31,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -37,6 +41,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -46,6 +51,7 @@
},
{
"BriefDescription": "Offcore code reads that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -55,6 +61,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -64,6 +71,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -73,6 +81,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -82,6 +91,7 @@
},
{
"BriefDescription": "Offcore requests that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -91,6 +101,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -100,6 +111,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -109,6 +121,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -118,6 +131,7 @@
},
{
"BriefDescription": "Offcore RFO requests that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -127,6 +141,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -136,6 +151,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -145,6 +161,7 @@
},
{
"BriefDescription": "Offcore writebacks to any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -154,6 +171,7 @@
},
{
"BriefDescription": "Offcore writebacks that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -163,6 +181,7 @@
},
{
"BriefDescription": "Offcore writebacks to the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -172,6 +191,7 @@
},
{
"BriefDescription": "Offcore writebacks to a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -181,6 +201,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -190,6 +211,7 @@
},
{
"BriefDescription": "Offcore code or data read requests that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -199,6 +221,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -208,6 +231,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -217,6 +241,7 @@
},
{
"BriefDescription": "Offcore request = all data, response = any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -226,6 +251,7 @@
},
{
"BriefDescription": "Offcore request = all data, response = any LLC miss",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -235,6 +261,7 @@
},
{
"BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the local DRAM.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -244,6 +271,7 @@
},
{
"BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -253,6 +281,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -262,6 +291,7 @@
},
{
"BriefDescription": "Offcore demand data requests that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -271,6 +301,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -280,6 +311,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -289,6 +321,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -298,6 +331,7 @@
},
{
"BriefDescription": "Offcore demand data reads that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -307,6 +341,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -316,6 +351,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -325,6 +361,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -334,6 +371,7 @@
},
{
"BriefDescription": "Offcore demand code reads that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -343,6 +381,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -352,6 +391,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -361,6 +401,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -370,6 +411,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -379,6 +421,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -388,6 +431,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -397,6 +441,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -406,6 +451,7 @@
},
{
"BriefDescription": "Offcore other requests that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -415,6 +461,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -424,6 +471,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -433,6 +481,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -442,6 +491,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -451,6 +501,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -460,6 +511,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -469,6 +521,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -478,6 +531,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -487,6 +541,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -496,6 +551,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -505,6 +561,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -514,6 +571,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -523,6 +581,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -532,6 +591,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -541,6 +601,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -550,6 +611,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -559,6 +621,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -568,6 +631,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -577,6 +641,7 @@
},
{
"BriefDescription": "Offcore prefetch requests that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -586,6 +651,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -595,6 +661,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_DRAM",
"MSRIndex": "0x1A6",
diff --git a/tools/perf/pmu-events/arch/x86/nehalemep/other.json b/tools/perf/pmu-events/arch/x86/nehalemep/other.json
index fb706cb51832..f6887b234b0e 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemep/other.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemep/other.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "ES segment renames",
+ "Counter": "0,1,2,3",
"EventCode": "0xD5",
"EventName": "ES_REG_RENAMES",
"SampleAfterValue": "2000000",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "I/O transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0x6C",
"EventName": "IO_TRANSACTIONS",
"SampleAfterValue": "2000000",
@@ -15,6 +17,7 @@
},
{
"BriefDescription": "L1I instruction fetch stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "L1I.CYCLES_STALLED",
"SampleAfterValue": "2000000",
@@ -22,6 +25,7 @@
},
{
"BriefDescription": "L1I instruction fetch hits",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "L1I.HITS",
"SampleAfterValue": "2000000",
@@ -29,6 +33,7 @@
},
{
"BriefDescription": "L1I instruction fetch misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "L1I.MISSES",
"SampleAfterValue": "2000000",
@@ -36,6 +41,7 @@
},
{
"BriefDescription": "L1I Instruction fetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "L1I.READS",
"SampleAfterValue": "2000000",
@@ -43,6 +49,7 @@
},
{
"BriefDescription": "Large ITLB hit",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "LARGE_ITLB.HIT",
"SampleAfterValue": "200000",
@@ -50,6 +57,7 @@
},
{
"BriefDescription": "All loads dispatched",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "LOAD_DISPATCH.ANY",
"SampleAfterValue": "2000000",
@@ -57,6 +65,7 @@
},
{
"BriefDescription": "Loads dispatched from the MOB",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "LOAD_DISPATCH.MOB",
"SampleAfterValue": "2000000",
@@ -64,6 +73,7 @@
},
{
"BriefDescription": "Loads dispatched that bypass the MOB",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "LOAD_DISPATCH.RS",
"SampleAfterValue": "2000000",
@@ -71,6 +81,7 @@
},
{
"BriefDescription": "Loads dispatched from stage 305",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "LOAD_DISPATCH.RS_DELAYED",
"SampleAfterValue": "2000000",
@@ -78,6 +89,7 @@
},
{
"BriefDescription": "False dependencies due to partial address aliasing",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "PARTIAL_ADDRESS_ALIAS",
"SampleAfterValue": "200000",
@@ -85,6 +97,7 @@
},
{
"BriefDescription": "All Store buffer stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "SB_DRAIN.ANY",
"SampleAfterValue": "200000",
@@ -92,6 +105,7 @@
},
{
"BriefDescription": "Segment rename stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "SEG_RENAME_STALLS",
"SampleAfterValue": "2000000",
@@ -99,6 +113,7 @@
},
{
"BriefDescription": "Thread responded HIT to snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "SNOOP_RESPONSE.HIT",
"SampleAfterValue": "100000",
@@ -106,6 +121,7 @@
},
{
"BriefDescription": "Thread responded HITE to snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "SNOOP_RESPONSE.HITE",
"SampleAfterValue": "100000",
@@ -113,6 +129,7 @@
},
{
"BriefDescription": "Thread responded HITM to snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "SNOOP_RESPONSE.HITM",
"SampleAfterValue": "100000",
@@ -120,6 +137,7 @@
},
{
"BriefDescription": "Super Queue full stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xF6",
"EventName": "SQ_FULL_STALL_CYCLES",
"SampleAfterValue": "2000000",
diff --git a/tools/perf/pmu-events/arch/x86/nehalemep/pipeline.json b/tools/perf/pmu-events/arch/x86/nehalemep/pipeline.json
index c45f2ffa861e..869c84fa7c60 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemep/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemep/pipeline.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Cycles the divider is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "ARITH.CYCLES_DIV_BUSY",
"SampleAfterValue": "2000000",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Divide Operations executed",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x14",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "Multiply operations executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "ARITH.MUL",
"SampleAfterValue": "2000000",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "BACLEAR asserted with bad target address",
+ "Counter": "0,1,2,3",
"EventCode": "0xE6",
"EventName": "BACLEAR.BAD_TARGET",
"SampleAfterValue": "2000000",
@@ -32,6 +36,7 @@
},
{
"BriefDescription": "BACLEAR asserted, regardless of cause",
+ "Counter": "0,1,2,3",
"EventCode": "0xE6",
"EventName": "BACLEAR.CLEAR",
"SampleAfterValue": "2000000",
@@ -39,6 +44,7 @@
},
{
"BriefDescription": "Instruction queue forced BACLEAR",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "BACLEAR_FORCE_IQ",
"SampleAfterValue": "2000000",
@@ -46,6 +52,7 @@
},
{
"BriefDescription": "Early Branch Prediciton Unit clears",
+ "Counter": "0,1,2,3",
"EventCode": "0xE8",
"EventName": "BPU_CLEARS.EARLY",
"SampleAfterValue": "2000000",
@@ -53,6 +60,7 @@
},
{
"BriefDescription": "Late Branch Prediction Unit clears",
+ "Counter": "0,1,2,3",
"EventCode": "0xE8",
"EventName": "BPU_CLEARS.LATE",
"SampleAfterValue": "2000000",
@@ -60,6 +68,7 @@
},
{
"BriefDescription": "Branch prediction unit missed call or return",
+ "Counter": "0,1,2,3",
"EventCode": "0xE5",
"EventName": "BPU_MISSED_CALL_RET",
"SampleAfterValue": "2000000",
@@ -67,6 +76,7 @@
},
{
"BriefDescription": "Branch instructions decoded",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "BR_INST_DECODED",
"SampleAfterValue": "2000000",
@@ -74,6 +84,7 @@
},
{
"BriefDescription": "Branch instructions executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ANY",
"SampleAfterValue": "200000",
@@ -81,6 +92,7 @@
},
{
"BriefDescription": "Conditional branch instructions executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.COND",
"SampleAfterValue": "200000",
@@ -88,6 +100,7 @@
},
{
"BriefDescription": "Unconditional branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.DIRECT",
"SampleAfterValue": "200000",
@@ -95,6 +108,7 @@
},
{
"BriefDescription": "Unconditional call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.DIRECT_NEAR_CALL",
"SampleAfterValue": "20000",
@@ -102,6 +116,7 @@
},
{
"BriefDescription": "Indirect call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.INDIRECT_NEAR_CALL",
"SampleAfterValue": "20000",
@@ -109,6 +124,7 @@
},
{
"BriefDescription": "Indirect non call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.INDIRECT_NON_CALL",
"SampleAfterValue": "20000",
@@ -116,6 +132,7 @@
},
{
"BriefDescription": "Call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.NEAR_CALLS",
"SampleAfterValue": "20000",
@@ -123,6 +140,7 @@
},
{
"BriefDescription": "All non call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.NON_CALLS",
"SampleAfterValue": "200000",
@@ -130,6 +148,7 @@
},
{
"BriefDescription": "Indirect return branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.RETURN_NEAR",
"SampleAfterValue": "20000",
@@ -137,6 +156,7 @@
},
{
"BriefDescription": "Taken branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN",
"SampleAfterValue": "200000",
@@ -144,6 +164,7 @@
},
{
"BriefDescription": "Retired branch instructions (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -152,6 +173,7 @@
},
{
"BriefDescription": "Retired conditional branch instructions (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
"PEBS": "1",
@@ -160,6 +182,7 @@
},
{
"BriefDescription": "Retired near call instructions (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
"PEBS": "1",
@@ -168,6 +191,7 @@
},
{
"BriefDescription": "Mispredicted branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.ANY",
"SampleAfterValue": "20000",
@@ -175,6 +199,7 @@
},
{
"BriefDescription": "Mispredicted conditional branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.COND",
"SampleAfterValue": "20000",
@@ -182,6 +207,7 @@
},
{
"BriefDescription": "Mispredicted unconditional branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.DIRECT",
"SampleAfterValue": "20000",
@@ -189,6 +215,7 @@
},
{
"BriefDescription": "Mispredicted non call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.DIRECT_NEAR_CALL",
"SampleAfterValue": "2000",
@@ -196,6 +223,7 @@
},
{
"BriefDescription": "Mispredicted indirect call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.INDIRECT_NEAR_CALL",
"SampleAfterValue": "2000",
@@ -203,6 +231,7 @@
},
{
"BriefDescription": "Mispredicted indirect non call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.INDIRECT_NON_CALL",
"SampleAfterValue": "2000",
@@ -210,6 +239,7 @@
},
{
"BriefDescription": "Mispredicted call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.NEAR_CALLS",
"SampleAfterValue": "2000",
@@ -217,6 +247,7 @@
},
{
"BriefDescription": "Mispredicted non call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.NON_CALLS",
"SampleAfterValue": "20000",
@@ -224,6 +255,7 @@
},
{
"BriefDescription": "Mispredicted return branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.RETURN_NEAR",
"SampleAfterValue": "2000",
@@ -231,6 +263,7 @@
},
{
"BriefDescription": "Mispredicted taken branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN",
"SampleAfterValue": "20000",
@@ -238,6 +271,7 @@
},
{
"BriefDescription": "Mispredicted near retired calls (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.NEAR_CALL",
"PEBS": "1",
@@ -246,11 +280,13 @@
},
{
"BriefDescription": "Reference cycles when thread is not halted (fixed counter)",
+ "Counter": "Fixed counter 3",
"EventName": "CPU_CLK_UNHALTED.REF",
"SampleAfterValue": "2000000"
},
{
"BriefDescription": "Reference base clock (133 Mhz) cycles when thread is not halted (programmable counter)",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.REF_P",
"SampleAfterValue": "100000",
@@ -258,17 +294,20 @@
},
{
"BriefDescription": "Cycles when thread is not halted (fixed counter)",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"SampleAfterValue": "2000000"
},
{
"BriefDescription": "Cycles when thread is not halted (programmable counter)",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
"SampleAfterValue": "2000000"
},
{
"BriefDescription": "Total CPU cycles",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.TOTAL_CYCLES",
@@ -277,6 +316,7 @@
},
{
"BriefDescription": "Any Instruction Length Decoder stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.ANY",
"SampleAfterValue": "2000000",
@@ -284,6 +324,7 @@
},
{
"BriefDescription": "Instruction Queue full stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.IQ_FULL",
"SampleAfterValue": "2000000",
@@ -291,6 +332,7 @@
},
{
"BriefDescription": "Length Change Prefix stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.LCP",
"SampleAfterValue": "2000000",
@@ -298,6 +340,7 @@
},
{
"BriefDescription": "Stall cycles due to BPU MRU bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.MRU",
"SampleAfterValue": "2000000",
@@ -305,6 +348,7 @@
},
{
"BriefDescription": "Regen stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.REGEN",
"SampleAfterValue": "2000000",
@@ -312,6 +356,7 @@
},
{
"BriefDescription": "Instructions that must be decoded by decoder 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "INST_DECODED.DEC0",
"SampleAfterValue": "2000000",
@@ -319,6 +364,7 @@
},
{
"BriefDescription": "Instructions written to instruction queue.",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "INST_QUEUE_WRITES",
"SampleAfterValue": "2000000",
@@ -326,6 +372,7 @@
},
{
"BriefDescription": "Cycles instructions are written to the instruction queue",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "INST_QUEUE_WRITE_CYCLES",
"SampleAfterValue": "2000000",
@@ -333,11 +380,13 @@
},
{
"BriefDescription": "Instructions retired (fixed counter)",
+ "Counter": "Fixed counter 1",
"EventName": "INST_RETIRED.ANY",
"SampleAfterValue": "2000000"
},
{
"BriefDescription": "Instructions retired (Programmable counter and Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.ANY_P",
"PEBS": "1",
@@ -346,6 +395,7 @@
},
{
"BriefDescription": "Retired MMX instructions (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.MMX",
"PEBS": "1",
@@ -354,6 +404,7 @@
},
{
"BriefDescription": "Total cycles (Precise Event)",
+ "Counter": "0,1,2,3",
"CounterMask": "16",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.TOTAL_CYCLES",
@@ -364,6 +415,7 @@
},
{
"BriefDescription": "Total cycles (Precise Event)",
+ "Counter": "0,1,2,3",
"CounterMask": "16",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.TOTAL_CYCLES_PS",
@@ -374,6 +426,7 @@
},
{
"BriefDescription": "Retired floating-point operations (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.X87",
"PEBS": "1",
@@ -382,6 +435,7 @@
},
{
"BriefDescription": "Load operations conflicting with software prefetches",
+ "Counter": "0,1",
"EventCode": "0x4C",
"EventName": "LOAD_HIT_PRE",
"SampleAfterValue": "200000",
@@ -389,6 +443,7 @@
},
{
"BriefDescription": "Cycles when uops were delivered by the LSD",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA8",
"EventName": "LSD.ACTIVE",
@@ -397,6 +452,7 @@
},
{
"BriefDescription": "Cycles no uops were delivered by the LSD",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA8",
"EventName": "LSD.INACTIVE",
@@ -406,6 +462,7 @@
},
{
"BriefDescription": "Loops that can't stream from the instruction queue",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "LSD_OVERFLOW",
"SampleAfterValue": "2000000",
@@ -413,6 +470,7 @@
},
{
"BriefDescription": "Cycles machine clear asserted",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.CYCLES",
"SampleAfterValue": "20000",
@@ -420,6 +478,7 @@
},
{
"BriefDescription": "Execution pipeline restart due to Memory ordering conflicts",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.MEM_ORDER",
"SampleAfterValue": "20000",
@@ -427,6 +486,7 @@
},
{
"BriefDescription": "Self-Modifying Code detected",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.SMC",
"SampleAfterValue": "20000",
@@ -434,6 +494,7 @@
},
{
"BriefDescription": "All RAT stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "RAT_STALLS.ANY",
"SampleAfterValue": "2000000",
@@ -441,6 +502,7 @@
},
{
"BriefDescription": "Flag stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "RAT_STALLS.FLAGS",
"SampleAfterValue": "2000000",
@@ -448,6 +510,7 @@
},
{
"BriefDescription": "Partial register stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "RAT_STALLS.REGISTERS",
"SampleAfterValue": "2000000",
@@ -455,6 +518,7 @@
},
{
"BriefDescription": "ROB read port stalls cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "RAT_STALLS.ROB_READ_PORT",
"SampleAfterValue": "2000000",
@@ -462,6 +526,7 @@
},
{
"BriefDescription": "Scoreboard stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "RAT_STALLS.SCOREBOARD",
"SampleAfterValue": "2000000",
@@ -469,6 +534,7 @@
},
{
"BriefDescription": "Resource related stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.ANY",
"SampleAfterValue": "2000000",
@@ -476,6 +542,7 @@
},
{
"BriefDescription": "FPU control word write stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.FPCW",
"SampleAfterValue": "2000000",
@@ -483,6 +550,7 @@
},
{
"BriefDescription": "Load buffer stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.LOAD",
"SampleAfterValue": "2000000",
@@ -490,6 +558,7 @@
},
{
"BriefDescription": "MXCSR rename stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.MXCSR",
"SampleAfterValue": "2000000",
@@ -497,6 +566,7 @@
},
{
"BriefDescription": "Other Resource related stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.OTHER",
"SampleAfterValue": "2000000",
@@ -504,6 +574,7 @@
},
{
"BriefDescription": "ROB full stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.ROB_FULL",
"SampleAfterValue": "2000000",
@@ -511,6 +582,7 @@
},
{
"BriefDescription": "Reservation Station full stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.RS_FULL",
"SampleAfterValue": "2000000",
@@ -518,6 +590,7 @@
},
{
"BriefDescription": "Store buffer stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.STORE",
"SampleAfterValue": "2000000",
@@ -525,6 +598,7 @@
},
{
"BriefDescription": "SIMD Packed-Double Uops retired (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "SSEX_UOPS_RETIRED.PACKED_DOUBLE",
"PEBS": "1",
@@ -533,6 +607,7 @@
},
{
"BriefDescription": "SIMD Packed-Single Uops retired (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "SSEX_UOPS_RETIRED.PACKED_SINGLE",
"PEBS": "1",
@@ -541,6 +616,7 @@
},
{
"BriefDescription": "SIMD Scalar-Double Uops retired (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "SSEX_UOPS_RETIRED.SCALAR_DOUBLE",
"PEBS": "1",
@@ -549,6 +625,7 @@
},
{
"BriefDescription": "SIMD Scalar-Single Uops retired (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "SSEX_UOPS_RETIRED.SCALAR_SINGLE",
"PEBS": "1",
@@ -557,6 +634,7 @@
},
{
"BriefDescription": "SIMD Vector Integer Uops retired (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "SSEX_UOPS_RETIRED.VECTOR_INTEGER",
"PEBS": "1",
@@ -565,6 +643,7 @@
},
{
"BriefDescription": "Stack pointer instructions decoded",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "UOPS_DECODED.ESP_FOLDING",
"SampleAfterValue": "2000000",
@@ -572,6 +651,7 @@
},
{
"BriefDescription": "Stack pointer sync operations",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "UOPS_DECODED.ESP_SYNC",
"SampleAfterValue": "2000000",
@@ -579,6 +659,7 @@
},
{
"BriefDescription": "Uops decoded by Microcode Sequencer",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xD1",
"EventName": "UOPS_DECODED.MS_CYCLES_ACTIVE",
@@ -587,6 +668,7 @@
},
{
"BriefDescription": "Cycles no Uops are decoded",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xD1",
"EventName": "UOPS_DECODED.STALL_CYCLES",
@@ -597,6 +679,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles Uops executed on any port (core count)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_ACTIVE_CYCLES",
@@ -606,6 +689,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles Uops executed on ports 0-4 (core count)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_ACTIVE_CYCLES_NO_PORT5",
@@ -615,6 +699,7 @@
{
"AnyThread": "1",
"BriefDescription": "Uops executed on any port (core count)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xB1",
@@ -626,6 +711,7 @@
{
"AnyThread": "1",
"BriefDescription": "Uops executed on ports 0-4 (core count)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xB1",
@@ -637,6 +723,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles no Uops issued on any port (core count)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_STALL_CYCLES",
@@ -647,6 +734,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles no Uops issued on ports 0-4 (core count)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_STALL_CYCLES_NO_PORT5",
@@ -656,6 +744,7 @@
},
{
"BriefDescription": "Uops executed on port 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT0",
"SampleAfterValue": "2000000",
@@ -663,6 +752,7 @@
},
{
"BriefDescription": "Uops issued on ports 0, 1 or 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT015",
"SampleAfterValue": "2000000",
@@ -670,6 +760,7 @@
},
{
"BriefDescription": "Cycles no Uops issued on ports 0, 1 or 5",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT015_STALL_CYCLES",
@@ -679,6 +770,7 @@
},
{
"BriefDescription": "Uops executed on port 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT1",
"SampleAfterValue": "2000000",
@@ -687,6 +779,7 @@
{
"AnyThread": "1",
"BriefDescription": "Uops issued on ports 2, 3 or 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT234_CORE",
"SampleAfterValue": "2000000",
@@ -695,6 +788,7 @@
{
"AnyThread": "1",
"BriefDescription": "Uops executed on port 2 (core count)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT2_CORE",
"SampleAfterValue": "2000000",
@@ -703,6 +797,7 @@
{
"AnyThread": "1",
"BriefDescription": "Uops executed on port 3 (core count)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT3_CORE",
"SampleAfterValue": "2000000",
@@ -711,6 +806,7 @@
{
"AnyThread": "1",
"BriefDescription": "Uops executed on port 4 (core count)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT4_CORE",
"SampleAfterValue": "2000000",
@@ -718,6 +814,7 @@
},
{
"BriefDescription": "Uops executed on port 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT5",
"SampleAfterValue": "2000000",
@@ -725,6 +822,7 @@
},
{
"BriefDescription": "Uops issued",
+ "Counter": "0,1,2,3",
"EventCode": "0xE",
"EventName": "UOPS_ISSUED.ANY",
"SampleAfterValue": "2000000",
@@ -733,6 +831,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles no Uops were issued on any thread",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xE",
"EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
@@ -743,6 +842,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles Uops were issued on either thread",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xE",
"EventName": "UOPS_ISSUED.CYCLES_ALL_THREADS",
@@ -751,6 +851,7 @@
},
{
"BriefDescription": "Fused Uops issued",
+ "Counter": "0,1,2,3",
"EventCode": "0xE",
"EventName": "UOPS_ISSUED.FUSED",
"SampleAfterValue": "2000000",
@@ -758,6 +859,7 @@
},
{
"BriefDescription": "Cycles no Uops were issued",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xE",
"EventName": "UOPS_ISSUED.STALL_CYCLES",
@@ -767,6 +869,7 @@
},
{
"BriefDescription": "Cycles Uops are being retired",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.ACTIVE_CYCLES",
@@ -776,6 +879,7 @@
},
{
"BriefDescription": "Uops retired (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.ANY",
"PEBS": "1",
@@ -784,6 +888,7 @@
},
{
"BriefDescription": "Macro-fused Uops retired (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.MACRO_FUSED",
"PEBS": "1",
@@ -792,6 +897,7 @@
},
{
"BriefDescription": "Retirement slots used (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.RETIRE_SLOTS",
"PEBS": "1",
@@ -800,6 +906,7 @@
},
{
"BriefDescription": "Cycles Uops are not retiring (Precise Event)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.STALL_CYCLES",
@@ -810,6 +917,7 @@
},
{
"BriefDescription": "Total cycles using precise uop retired event (Precise Event)",
+ "Counter": "0,1,2,3",
"CounterMask": "16",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.TOTAL_CYCLES",
@@ -820,6 +928,7 @@
},
{
"BriefDescription": "Uop unfusions due to FP exceptions",
+ "Counter": "0,1,2,3",
"EventCode": "0xDB",
"EventName": "UOP_UNFUSION",
"SampleAfterValue": "2000000",
diff --git a/tools/perf/pmu-events/arch/x86/nehalemep/virtual-memory.json b/tools/perf/pmu-events/arch/x86/nehalemep/virtual-memory.json
index c434cd4ef4f1..e88c0802e679 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemep/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemep/virtual-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "DTLB load misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "DTLB_LOAD_MISSES.ANY",
"SampleAfterValue": "200000",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "DTLB load miss caused by low part of address",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "DTLB_LOAD_MISSES.PDE_MISS",
"SampleAfterValue": "200000",
@@ -15,6 +17,7 @@
},
{
"BriefDescription": "DTLB second level hit",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"SampleAfterValue": "2000000",
@@ -22,6 +25,7 @@
},
{
"BriefDescription": "DTLB load miss page walks complete",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"SampleAfterValue": "200000",
@@ -29,6 +33,7 @@
},
{
"BriefDescription": "DTLB misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_MISSES.ANY",
"SampleAfterValue": "200000",
@@ -36,6 +41,7 @@
},
{
"BriefDescription": "DTLB first level misses but second level hit",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_MISSES.STLB_HIT",
"SampleAfterValue": "200000",
@@ -43,6 +49,7 @@
},
{
"BriefDescription": "DTLB miss page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_MISSES.WALK_COMPLETED",
"SampleAfterValue": "200000",
@@ -50,6 +57,7 @@
},
{
"BriefDescription": "ITLB flushes",
+ "Counter": "0,1,2,3",
"EventCode": "0xAE",
"EventName": "ITLB_FLUSH",
"SampleAfterValue": "2000000",
@@ -57,6 +65,7 @@
},
{
"BriefDescription": "ITLB miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.ANY",
"SampleAfterValue": "200000",
@@ -64,6 +73,7 @@
},
{
"BriefDescription": "ITLB miss page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
"SampleAfterValue": "200000",
@@ -71,6 +81,7 @@
},
{
"BriefDescription": "Retired instructions that missed the ITLB (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC8",
"EventName": "ITLB_MISS_RETIRED",
"PEBS": "1",
@@ -79,6 +90,7 @@
},
{
"BriefDescription": "Retired loads that miss the DTLB (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "MEM_LOAD_RETIRED.DTLB_MISS",
"PEBS": "1",
@@ -87,6 +99,7 @@
},
{
"BriefDescription": "Retired stores that miss the DTLB (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC",
"EventName": "MEM_STORE_RETIRED.DTLB_MISS",
"PEBS": "1",
diff --git a/tools/perf/pmu-events/arch/x86/nehalemex/cache.json b/tools/perf/pmu-events/arch/x86/nehalemex/cache.json
index 0042e53fdc78..2c0ea6f8c4e0 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemex/cache.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemex/cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Cycles L1D locked",
+ "Counter": "0,1",
"EventCode": "0x63",
"EventName": "CACHE_LOCK_CYCLES.L1D",
"SampleAfterValue": "2000000",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Cycles L1D and L2 locked",
+ "Counter": "0,1",
"EventCode": "0x63",
"EventName": "CACHE_LOCK_CYCLES.L1D_L2",
"SampleAfterValue": "2000000",
@@ -15,6 +17,7 @@
},
{
"BriefDescription": "L1D cache lines replaced in M state",
+ "Counter": "0,1",
"EventCode": "0x51",
"EventName": "L1D.M_EVICT",
"SampleAfterValue": "2000000",
@@ -22,6 +25,7 @@
},
{
"BriefDescription": "L1D cache lines allocated in the M state",
+ "Counter": "0,1",
"EventCode": "0x51",
"EventName": "L1D.M_REPL",
"SampleAfterValue": "2000000",
@@ -29,6 +33,7 @@
},
{
"BriefDescription": "L1D snoop eviction of cache lines in M state",
+ "Counter": "0,1",
"EventCode": "0x51",
"EventName": "L1D.M_SNOOP_EVICT",
"SampleAfterValue": "2000000",
@@ -36,6 +41,7 @@
},
{
"BriefDescription": "L1 data cache lines allocated",
+ "Counter": "0,1",
"EventCode": "0x51",
"EventName": "L1D.REPL",
"SampleAfterValue": "2000000",
@@ -43,6 +49,7 @@
},
{
"BriefDescription": "All references to the L1 data cache",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "L1D_ALL_REF.ANY",
"SampleAfterValue": "2000000",
@@ -50,6 +57,7 @@
},
{
"BriefDescription": "L1 data cacheable reads and writes",
+ "Counter": "0,1",
"EventCode": "0x43",
"EventName": "L1D_ALL_REF.CACHEABLE",
"SampleAfterValue": "2000000",
@@ -57,6 +65,7 @@
},
{
"BriefDescription": "L1 data cache read in E state",
+ "Counter": "0,1",
"EventCode": "0x40",
"EventName": "L1D_CACHE_LD.E_STATE",
"SampleAfterValue": "2000000",
@@ -64,6 +73,7 @@
},
{
"BriefDescription": "L1 data cache read in I state (misses)",
+ "Counter": "0,1",
"EventCode": "0x40",
"EventName": "L1D_CACHE_LD.I_STATE",
"SampleAfterValue": "2000000",
@@ -71,6 +81,7 @@
},
{
"BriefDescription": "L1 data cache reads",
+ "Counter": "0,1",
"EventCode": "0x40",
"EventName": "L1D_CACHE_LD.MESI",
"SampleAfterValue": "2000000",
@@ -78,6 +89,7 @@
},
{
"BriefDescription": "L1 data cache read in M state",
+ "Counter": "0,1",
"EventCode": "0x40",
"EventName": "L1D_CACHE_LD.M_STATE",
"SampleAfterValue": "2000000",
@@ -85,6 +97,7 @@
},
{
"BriefDescription": "L1 data cache read in S state",
+ "Counter": "0,1",
"EventCode": "0x40",
"EventName": "L1D_CACHE_LD.S_STATE",
"SampleAfterValue": "2000000",
@@ -92,6 +105,7 @@
},
{
"BriefDescription": "L1 data cache load locks in E state",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "L1D_CACHE_LOCK.E_STATE",
"SampleAfterValue": "2000000",
@@ -99,6 +113,7 @@
},
{
"BriefDescription": "L1 data cache load lock hits",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "L1D_CACHE_LOCK.HIT",
"SampleAfterValue": "2000000",
@@ -106,6 +121,7 @@
},
{
"BriefDescription": "L1 data cache load locks in M state",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "L1D_CACHE_LOCK.M_STATE",
"SampleAfterValue": "2000000",
@@ -113,6 +129,7 @@
},
{
"BriefDescription": "L1 data cache load locks in S state",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "L1D_CACHE_LOCK.S_STATE",
"SampleAfterValue": "2000000",
@@ -120,6 +137,7 @@
},
{
"BriefDescription": "L1D load lock accepted in fill buffer",
+ "Counter": "0,1",
"EventCode": "0x53",
"EventName": "L1D_CACHE_LOCK_FB_HIT",
"SampleAfterValue": "2000000",
@@ -127,6 +145,7 @@
},
{
"BriefDescription": "L1D prefetch load lock accepted in fill buffer",
+ "Counter": "0,1",
"EventCode": "0x52",
"EventName": "L1D_CACHE_PREFETCH_LOCK_FB_HIT",
"SampleAfterValue": "2000000",
@@ -134,6 +153,7 @@
},
{
"BriefDescription": "L1 data cache stores in E state",
+ "Counter": "0,1",
"EventCode": "0x41",
"EventName": "L1D_CACHE_ST.E_STATE",
"SampleAfterValue": "2000000",
@@ -141,6 +161,7 @@
},
{
"BriefDescription": "L1 data cache stores in M state",
+ "Counter": "0,1",
"EventCode": "0x41",
"EventName": "L1D_CACHE_ST.M_STATE",
"SampleAfterValue": "2000000",
@@ -148,6 +169,7 @@
},
{
"BriefDescription": "L1 data cache stores in S state",
+ "Counter": "0,1",
"EventCode": "0x41",
"EventName": "L1D_CACHE_ST.S_STATE",
"SampleAfterValue": "2000000",
@@ -155,6 +177,7 @@
},
{
"BriefDescription": "L1D hardware prefetch misses",
+ "Counter": "0,1",
"EventCode": "0x4E",
"EventName": "L1D_PREFETCH.MISS",
"SampleAfterValue": "200000",
@@ -162,6 +185,7 @@
},
{
"BriefDescription": "L1D hardware prefetch requests",
+ "Counter": "0,1",
"EventCode": "0x4E",
"EventName": "L1D_PREFETCH.REQUESTS",
"SampleAfterValue": "200000",
@@ -169,6 +193,7 @@
},
{
"BriefDescription": "L1D hardware prefetch requests triggered",
+ "Counter": "0,1",
"EventCode": "0x4E",
"EventName": "L1D_PREFETCH.TRIGGERS",
"SampleAfterValue": "200000",
@@ -176,6 +201,7 @@
},
{
"BriefDescription": "L1 writebacks to L2 in E state",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L1D_WB_L2.E_STATE",
"SampleAfterValue": "100000",
@@ -183,6 +209,7 @@
},
{
"BriefDescription": "L1 writebacks to L2 in I state (misses)",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L1D_WB_L2.I_STATE",
"SampleAfterValue": "100000",
@@ -190,6 +217,7 @@
},
{
"BriefDescription": "All L1 writebacks to L2",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L1D_WB_L2.MESI",
"SampleAfterValue": "100000",
@@ -197,6 +225,7 @@
},
{
"BriefDescription": "L1 writebacks to L2 in M state",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L1D_WB_L2.M_STATE",
"SampleAfterValue": "100000",
@@ -204,6 +233,7 @@
},
{
"BriefDescription": "L1 writebacks to L2 in S state",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L1D_WB_L2.S_STATE",
"SampleAfterValue": "100000",
@@ -211,6 +241,7 @@
},
{
"BriefDescription": "All L2 data requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.ANY",
"SampleAfterValue": "200000",
@@ -218,6 +249,7 @@
},
{
"BriefDescription": "L2 data demand loads in E state",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.DEMAND.E_STATE",
"SampleAfterValue": "200000",
@@ -225,6 +257,7 @@
},
{
"BriefDescription": "L2 data demand loads in I state (misses)",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.DEMAND.I_STATE",
"SampleAfterValue": "200000",
@@ -232,6 +265,7 @@
},
{
"BriefDescription": "L2 data demand requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.DEMAND.MESI",
"SampleAfterValue": "200000",
@@ -239,6 +273,7 @@
},
{
"BriefDescription": "L2 data demand loads in M state",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.DEMAND.M_STATE",
"SampleAfterValue": "200000",
@@ -246,6 +281,7 @@
},
{
"BriefDescription": "L2 data demand loads in S state",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.DEMAND.S_STATE",
"SampleAfterValue": "200000",
@@ -253,6 +289,7 @@
},
{
"BriefDescription": "L2 data prefetches in E state",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.PREFETCH.E_STATE",
"SampleAfterValue": "200000",
@@ -260,6 +297,7 @@
},
{
"BriefDescription": "L2 data prefetches in the I state (misses)",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.PREFETCH.I_STATE",
"SampleAfterValue": "200000",
@@ -267,6 +305,7 @@
},
{
"BriefDescription": "All L2 data prefetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.PREFETCH.MESI",
"SampleAfterValue": "200000",
@@ -274,6 +313,7 @@
},
{
"BriefDescription": "L2 data prefetches in M state",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.PREFETCH.M_STATE",
"SampleAfterValue": "200000",
@@ -281,6 +321,7 @@
},
{
"BriefDescription": "L2 data prefetches in the S state",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.PREFETCH.S_STATE",
"SampleAfterValue": "200000",
@@ -288,6 +329,7 @@
},
{
"BriefDescription": "L2 lines allocated",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.ANY",
"SampleAfterValue": "100000",
@@ -295,6 +337,7 @@
},
{
"BriefDescription": "L2 lines allocated in the E state",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.E_STATE",
"SampleAfterValue": "100000",
@@ -302,6 +345,7 @@
},
{
"BriefDescription": "L2 lines allocated in the S state",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.S_STATE",
"SampleAfterValue": "100000",
@@ -309,6 +353,7 @@
},
{
"BriefDescription": "L2 lines evicted",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.ANY",
"SampleAfterValue": "100000",
@@ -316,6 +361,7 @@
},
{
"BriefDescription": "L2 lines evicted by a demand request",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.DEMAND_CLEAN",
"SampleAfterValue": "100000",
@@ -323,6 +369,7 @@
},
{
"BriefDescription": "L2 modified lines evicted by a demand request",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.DEMAND_DIRTY",
"SampleAfterValue": "100000",
@@ -330,6 +377,7 @@
},
{
"BriefDescription": "L2 lines evicted by a prefetch request",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.PREFETCH_CLEAN",
"SampleAfterValue": "100000",
@@ -337,6 +385,7 @@
},
{
"BriefDescription": "L2 modified lines evicted by a prefetch request",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.PREFETCH_DIRTY",
"SampleAfterValue": "100000",
@@ -344,6 +393,7 @@
},
{
"BriefDescription": "L2 instruction fetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.IFETCHES",
"SampleAfterValue": "200000",
@@ -351,6 +401,7 @@
},
{
"BriefDescription": "L2 instruction fetch hits",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.IFETCH_HIT",
"SampleAfterValue": "200000",
@@ -358,6 +409,7 @@
},
{
"BriefDescription": "L2 instruction fetch misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.IFETCH_MISS",
"SampleAfterValue": "200000",
@@ -365,6 +417,7 @@
},
{
"BriefDescription": "L2 load hits",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.LD_HIT",
"SampleAfterValue": "200000",
@@ -372,6 +425,7 @@
},
{
"BriefDescription": "L2 load misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.LD_MISS",
"SampleAfterValue": "200000",
@@ -379,6 +433,7 @@
},
{
"BriefDescription": "L2 requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.LOADS",
"SampleAfterValue": "200000",
@@ -386,6 +441,7 @@
},
{
"BriefDescription": "All L2 misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.MISS",
"SampleAfterValue": "200000",
@@ -393,6 +449,7 @@
},
{
"BriefDescription": "All L2 prefetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.PREFETCHES",
"SampleAfterValue": "200000",
@@ -400,6 +457,7 @@
},
{
"BriefDescription": "L2 prefetch hits",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.PREFETCH_HIT",
"SampleAfterValue": "200000",
@@ -407,6 +465,7 @@
},
{
"BriefDescription": "L2 prefetch misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.PREFETCH_MISS",
"SampleAfterValue": "200000",
@@ -414,6 +473,7 @@
},
{
"BriefDescription": "All L2 requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.REFERENCES",
"SampleAfterValue": "200000",
@@ -421,6 +481,7 @@
},
{
"BriefDescription": "L2 RFO requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFOS",
"SampleAfterValue": "200000",
@@ -428,6 +489,7 @@
},
{
"BriefDescription": "L2 RFO hits",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_HIT",
"SampleAfterValue": "200000",
@@ -435,6 +497,7 @@
},
{
"BriefDescription": "L2 RFO misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_MISS",
"SampleAfterValue": "200000",
@@ -442,6 +505,7 @@
},
{
"BriefDescription": "All L2 transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.ANY",
"SampleAfterValue": "200000",
@@ -449,6 +513,7 @@
},
{
"BriefDescription": "L2 fill transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.FILL",
"SampleAfterValue": "200000",
@@ -456,6 +521,7 @@
},
{
"BriefDescription": "L2 instruction fetch transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.IFETCH",
"SampleAfterValue": "200000",
@@ -463,6 +529,7 @@
},
{
"BriefDescription": "L1D writeback to L2 transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.L1D_WB",
"SampleAfterValue": "200000",
@@ -470,6 +537,7 @@
},
{
"BriefDescription": "L2 Load transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.LOAD",
"SampleAfterValue": "200000",
@@ -477,6 +545,7 @@
},
{
"BriefDescription": "L2 prefetch transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.PREFETCH",
"SampleAfterValue": "200000",
@@ -484,6 +553,7 @@
},
{
"BriefDescription": "L2 RFO transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.RFO",
"SampleAfterValue": "200000",
@@ -491,6 +561,7 @@
},
{
"BriefDescription": "L2 writeback to LLC transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.WB",
"SampleAfterValue": "200000",
@@ -498,6 +569,7 @@
},
{
"BriefDescription": "L2 demand lock RFOs in E state",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.LOCK.E_STATE",
"SampleAfterValue": "100000",
@@ -505,6 +577,7 @@
},
{
"BriefDescription": "All demand L2 lock RFOs that hit the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.LOCK.HIT",
"SampleAfterValue": "100000",
@@ -512,6 +585,7 @@
},
{
"BriefDescription": "L2 demand lock RFOs in I state (misses)",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.LOCK.I_STATE",
"SampleAfterValue": "100000",
@@ -519,6 +593,7 @@
},
{
"BriefDescription": "All demand L2 lock RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.LOCK.MESI",
"SampleAfterValue": "100000",
@@ -526,6 +601,7 @@
},
{
"BriefDescription": "L2 demand lock RFOs in M state",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.LOCK.M_STATE",
"SampleAfterValue": "100000",
@@ -533,6 +609,7 @@
},
{
"BriefDescription": "L2 demand lock RFOs in S state",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.LOCK.S_STATE",
"SampleAfterValue": "100000",
@@ -540,6 +617,7 @@
},
{
"BriefDescription": "All L2 demand store RFOs that hit the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.RFO.HIT",
"SampleAfterValue": "100000",
@@ -547,6 +625,7 @@
},
{
"BriefDescription": "L2 demand store RFOs in I state (misses)",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.RFO.I_STATE",
"SampleAfterValue": "100000",
@@ -554,6 +633,7 @@
},
{
"BriefDescription": "All L2 demand store RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.RFO.MESI",
"SampleAfterValue": "100000",
@@ -561,6 +641,7 @@
},
{
"BriefDescription": "L2 demand store RFOs in M state",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.RFO.M_STATE",
"SampleAfterValue": "100000",
@@ -568,6 +649,7 @@
},
{
"BriefDescription": "L2 demand store RFOs in S state",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.RFO.S_STATE",
"SampleAfterValue": "100000",
@@ -575,6 +657,7 @@
},
{
"BriefDescription": "Longest latency cache miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.MISS",
"SampleAfterValue": "100000",
@@ -582,6 +665,7 @@
},
{
"BriefDescription": "Longest latency cache reference",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
"SampleAfterValue": "200000",
@@ -589,6 +673,7 @@
},
{
"BriefDescription": "Memory instructions retired above 0 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_0",
"MSRIndex": "0x3F6",
@@ -598,6 +683,7 @@
},
{
"BriefDescription": "Memory instructions retired above 1024 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_1024",
"MSRIndex": "0x3F6",
@@ -608,6 +694,7 @@
},
{
"BriefDescription": "Memory instructions retired above 128 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_128",
"MSRIndex": "0x3F6",
@@ -618,6 +705,7 @@
},
{
"BriefDescription": "Memory instructions retired above 16 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_16",
"MSRIndex": "0x3F6",
@@ -628,6 +716,7 @@
},
{
"BriefDescription": "Memory instructions retired above 16384 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_16384",
"MSRIndex": "0x3F6",
@@ -638,6 +727,7 @@
},
{
"BriefDescription": "Memory instructions retired above 2048 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_2048",
"MSRIndex": "0x3F6",
@@ -648,6 +738,7 @@
},
{
"BriefDescription": "Memory instructions retired above 256 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_256",
"MSRIndex": "0x3F6",
@@ -658,6 +749,7 @@
},
{
"BriefDescription": "Memory instructions retired above 32 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_32",
"MSRIndex": "0x3F6",
@@ -668,6 +760,7 @@
},
{
"BriefDescription": "Memory instructions retired above 32768 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_32768",
"MSRIndex": "0x3F6",
@@ -678,6 +771,7 @@
},
{
"BriefDescription": "Memory instructions retired above 4 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_4",
"MSRIndex": "0x3F6",
@@ -688,6 +782,7 @@
},
{
"BriefDescription": "Memory instructions retired above 4096 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_4096",
"MSRIndex": "0x3F6",
@@ -698,6 +793,7 @@
},
{
"BriefDescription": "Memory instructions retired above 512 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_512",
"MSRIndex": "0x3F6",
@@ -708,6 +804,7 @@
},
{
"BriefDescription": "Memory instructions retired above 64 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_64",
"MSRIndex": "0x3F6",
@@ -718,6 +815,7 @@
},
{
"BriefDescription": "Memory instructions retired above 8 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_8",
"MSRIndex": "0x3F6",
@@ -728,6 +826,7 @@
},
{
"BriefDescription": "Memory instructions retired above 8192 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_8192",
"MSRIndex": "0x3F6",
@@ -738,6 +837,7 @@
},
{
"BriefDescription": "Instructions retired which contains a load (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LOADS",
"PEBS": "1",
@@ -746,6 +846,7 @@
},
{
"BriefDescription": "Instructions retired which contains a store (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.STORES",
"PEBS": "1",
@@ -754,6 +855,7 @@
},
{
"BriefDescription": "Retired loads that miss L1D and hit an previously allocated LFB (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "MEM_LOAD_RETIRED.HIT_LFB",
"PEBS": "1",
@@ -762,6 +864,7 @@
},
{
"BriefDescription": "Retired loads that hit the L1 data cache (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "MEM_LOAD_RETIRED.L1D_HIT",
"PEBS": "1",
@@ -770,6 +873,7 @@
},
{
"BriefDescription": "Retired loads that hit the L2 cache (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "MEM_LOAD_RETIRED.L2_HIT",
"PEBS": "1",
@@ -778,6 +882,7 @@
},
{
"BriefDescription": "Retired loads that miss the LLC cache (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "MEM_LOAD_RETIRED.LLC_MISS",
"PEBS": "1",
@@ -786,6 +891,7 @@
},
{
"BriefDescription": "Retired loads that hit valid versions in the LLC cache (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "MEM_LOAD_RETIRED.LLC_UNSHARED_HIT",
"PEBS": "1",
@@ -794,6 +900,7 @@
},
{
"BriefDescription": "Retired loads that hit sibling core's L2 in modified or unmodified states (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "MEM_LOAD_RETIRED.OTHER_CORE_L2_HIT_HITM",
"PEBS": "1",
@@ -802,6 +909,7 @@
},
{
"BriefDescription": "Offcore L1 data cache writebacks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.L1D_WRITEBACK",
"SampleAfterValue": "100000",
@@ -809,6 +917,7 @@
},
{
"BriefDescription": "Offcore requests blocked due to Super Queue full",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "OFFCORE_REQUESTS_SQ_FULL",
"SampleAfterValue": "100000",
@@ -816,6 +925,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by any cache or DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -825,6 +935,7 @@
},
{
"BriefDescription": "All offcore data reads",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -834,6 +945,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by the IO, CSR, MMIO unit",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -843,6 +955,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -852,6 +965,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -861,6 +975,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -870,6 +985,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -879,6 +995,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -888,6 +1005,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -897,6 +1015,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -906,6 +1025,7 @@
},
{
"BriefDescription": "Offcore data reads that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -915,6 +1035,7 @@
},
{
"BriefDescription": "Offcore data reads that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -924,6 +1045,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by any cache or DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -933,6 +1055,7 @@
},
{
"BriefDescription": "All offcore code reads",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -942,6 +1065,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by the IO, CSR, MMIO unit",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -951,6 +1075,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -960,6 +1085,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -969,6 +1095,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -978,6 +1105,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -987,6 +1115,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -996,6 +1125,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -1005,6 +1135,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1014,6 +1145,7 @@
},
{
"BriefDescription": "Offcore code reads that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -1023,6 +1155,7 @@
},
{
"BriefDescription": "Offcore code reads that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -1032,6 +1165,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by any cache or DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1041,6 +1175,7 @@
},
{
"BriefDescription": "All offcore requests",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -1050,6 +1185,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by the IO, CSR, MMIO unit",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -1059,6 +1195,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -1068,6 +1205,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -1077,6 +1215,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -1086,6 +1225,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -1095,6 +1235,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1104,6 +1245,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -1113,6 +1255,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1122,6 +1265,7 @@
},
{
"BriefDescription": "Offcore requests that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -1131,6 +1275,7 @@
},
{
"BriefDescription": "Offcore requests that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -1140,6 +1285,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by any cache or DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1149,6 +1295,7 @@
},
{
"BriefDescription": "All offcore RFO requests",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -1158,6 +1305,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by the IO, CSR, MMIO unit",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -1167,6 +1315,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -1176,6 +1325,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -1185,6 +1335,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -1194,6 +1345,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -1203,6 +1355,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1212,6 +1365,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -1221,6 +1375,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1230,6 +1385,7 @@
},
{
"BriefDescription": "Offcore RFO requests that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -1239,6 +1395,7 @@
},
{
"BriefDescription": "Offcore RFO requests that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -1248,6 +1405,7 @@
},
{
"BriefDescription": "Offcore writebacks to any cache or DRAM.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1257,6 +1415,7 @@
},
{
"BriefDescription": "All offcore writebacks",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -1266,6 +1425,7 @@
},
{
"BriefDescription": "Offcore writebacks to the IO, CSR, MMIO unit.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -1275,6 +1435,7 @@
},
{
"BriefDescription": "Offcore writebacks to the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -1284,6 +1445,7 @@
},
{
"BriefDescription": "Offcore writebacks to the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -1293,6 +1455,7 @@
},
{
"BriefDescription": "Offcore writebacks to the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -1302,6 +1465,7 @@
},
{
"BriefDescription": "Offcore writebacks to the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1311,6 +1475,7 @@
},
{
"BriefDescription": "Offcore writebacks to a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -1320,6 +1485,7 @@
},
{
"BriefDescription": "Offcore writebacks to a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1329,6 +1495,7 @@
},
{
"BriefDescription": "Offcore writebacks that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -1338,6 +1505,7 @@
},
{
"BriefDescription": "Offcore writebacks that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -1347,6 +1515,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by any cache or DRAM.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1356,6 +1525,7 @@
},
{
"BriefDescription": "All offcore code or data read requests",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -1365,6 +1535,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by the IO, CSR, MMIO unit.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -1374,6 +1545,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -1383,6 +1555,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -1392,6 +1565,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -1401,6 +1575,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -1410,6 +1585,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1419,6 +1595,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -1428,6 +1605,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1437,6 +1615,7 @@
},
{
"BriefDescription": "Offcore code or data read requests that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -1446,6 +1625,7 @@
},
{
"BriefDescription": "Offcore code or data read requests that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -1455,6 +1635,7 @@
},
{
"BriefDescription": "Offcore request = all data, response = any cache_dram",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1464,6 +1645,7 @@
},
{
"BriefDescription": "Offcore request = all data, response = any location",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -1473,6 +1655,7 @@
},
{
"BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the IO, CSR, MMIO unit",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -1482,6 +1665,7 @@
},
{
"BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -1491,6 +1675,7 @@
},
{
"BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -1500,6 +1685,7 @@
},
{
"BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -1509,6 +1695,7 @@
},
{
"BriefDescription": "Offcore request = all data, response = local cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -1518,6 +1705,7 @@
},
{
"BriefDescription": "Offcore request = all data, response = local cache or dram",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1527,6 +1715,7 @@
},
{
"BriefDescription": "Offcore request = all data, response = remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -1536,6 +1725,7 @@
},
{
"BriefDescription": "Offcore request = all data, response = remote cache or dram",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1545,6 +1735,7 @@
},
{
"BriefDescription": "Offcore data reads, RFOs, and prefetches that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -1554,6 +1745,7 @@
},
{
"BriefDescription": "Offcore data reads, RFOs, and prefetches that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -1563,6 +1755,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by any cache or DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1572,6 +1765,7 @@
},
{
"BriefDescription": "All offcore demand data requests",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -1581,6 +1775,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by the IO, CSR, MMIO unit.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -1590,6 +1785,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -1599,6 +1795,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -1608,6 +1805,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -1617,6 +1815,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -1626,6 +1825,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1635,6 +1835,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -1644,6 +1845,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1653,6 +1855,7 @@
},
{
"BriefDescription": "Offcore demand data requests that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -1662,6 +1865,7 @@
},
{
"BriefDescription": "Offcore demand data requests that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -1671,6 +1875,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by any cache or DRAM.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1680,6 +1885,7 @@
},
{
"BriefDescription": "All offcore demand data reads",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -1689,6 +1895,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by the IO, CSR, MMIO unit",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -1698,6 +1905,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -1707,6 +1915,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -1716,6 +1925,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -1725,6 +1935,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -1734,6 +1945,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1743,6 +1955,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -1752,6 +1965,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1761,6 +1975,7 @@
},
{
"BriefDescription": "Offcore demand data reads that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -1770,6 +1985,7 @@
},
{
"BriefDescription": "Offcore demand data reads that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -1779,6 +1995,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by any cache or DRAM.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1788,6 +2005,7 @@
},
{
"BriefDescription": "All offcore demand code reads",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -1797,6 +2015,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by the IO, CSR, MMIO unit",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -1806,6 +2025,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -1815,6 +2035,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -1824,6 +2045,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -1833,6 +2055,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -1842,6 +2065,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1851,6 +2075,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -1860,6 +2085,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1869,6 +2095,7 @@
},
{
"BriefDescription": "Offcore demand code reads that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -1878,6 +2105,7 @@
},
{
"BriefDescription": "Offcore demand code reads that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -1887,6 +2115,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by any cache or DRAM.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1896,6 +2125,7 @@
},
{
"BriefDescription": "All offcore demand RFO requests",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -1905,6 +2135,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by the IO, CSR, MMIO unit",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -1914,6 +2145,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -1923,6 +2155,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -1932,6 +2165,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -1941,6 +2175,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -1950,6 +2185,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1959,6 +2195,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -1968,6 +2205,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1977,6 +2215,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -1986,6 +2225,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -1995,6 +2235,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by any cache or DRAM.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2004,6 +2245,7 @@
},
{
"BriefDescription": "All offcore other requests",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -2013,6 +2255,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by the IO, CSR, MMIO unit",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -2022,6 +2265,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -2031,6 +2275,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -2040,6 +2285,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -2049,6 +2295,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -2058,6 +2305,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2067,6 +2315,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -2076,6 +2325,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2085,6 +2335,7 @@
},
{
"BriefDescription": "Offcore other requests that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -2094,6 +2345,7 @@
},
{
"BriefDescription": "Offcore other requests that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -2103,6 +2355,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by any cache or DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2112,6 +2365,7 @@
},
{
"BriefDescription": "All offcore prefetch data requests",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -2121,6 +2375,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by the IO, CSR, MMIO unit.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -2130,6 +2385,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -2139,6 +2395,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -2148,6 +2405,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -2157,6 +2415,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -2166,6 +2425,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2175,6 +2435,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -2184,6 +2445,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2193,6 +2455,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -2202,6 +2465,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -2211,6 +2475,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by any cache or DRAM.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2220,6 +2485,7 @@
},
{
"BriefDescription": "All offcore prefetch data reads",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -2229,6 +2495,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by the IO, CSR, MMIO unit",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -2238,6 +2505,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -2247,6 +2515,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -2256,6 +2525,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -2265,6 +2535,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -2274,6 +2545,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2283,6 +2555,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -2292,6 +2565,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2301,6 +2575,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -2310,6 +2585,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -2319,6 +2595,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by any cache or DRAM.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2328,6 +2605,7 @@
},
{
"BriefDescription": "All offcore prefetch code reads",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -2337,6 +2615,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by the IO, CSR, MMIO unit",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -2346,6 +2625,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -2355,6 +2635,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -2364,6 +2645,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -2373,6 +2655,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -2382,6 +2665,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2391,6 +2675,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -2400,6 +2685,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2409,6 +2695,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -2418,6 +2705,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -2427,6 +2715,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by any cache or DRAM.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2436,6 +2725,7 @@
},
{
"BriefDescription": "All offcore prefetch RFO requests",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -2445,6 +2735,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by the IO, CSR, MMIO unit",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -2454,6 +2745,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -2463,6 +2755,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -2472,6 +2765,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -2481,6 +2775,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -2490,6 +2785,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2499,6 +2795,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -2508,6 +2805,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2517,6 +2815,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -2526,6 +2825,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -2535,6 +2835,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by any cache or DRAM.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2544,6 +2845,7 @@
},
{
"BriefDescription": "All offcore prefetch requests",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -2553,6 +2855,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by the IO, CSR, MMIO unit",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -2562,6 +2865,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -2571,6 +2875,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -2580,6 +2885,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -2589,6 +2895,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -2598,6 +2905,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2607,6 +2915,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -2616,6 +2925,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2625,6 +2935,7 @@
},
{
"BriefDescription": "Offcore prefetch requests that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -2634,6 +2945,7 @@
},
{
"BriefDescription": "Offcore prefetch requests that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -2643,6 +2955,7 @@
},
{
"BriefDescription": "Super Queue lock splits across a cache line",
+ "Counter": "0,1,2,3",
"EventCode": "0xF4",
"EventName": "SQ_MISC.SPLIT_LOCK",
"SampleAfterValue": "2000000",
@@ -2650,6 +2963,7 @@
},
{
"BriefDescription": "Loads delayed with at-Retirement block code",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "STORE_BLOCKS.AT_RET",
"SampleAfterValue": "200000",
@@ -2657,6 +2971,7 @@
},
{
"BriefDescription": "Cacheable loads delayed with L1D block code",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "STORE_BLOCKS.L1D_BLOCK",
"SampleAfterValue": "200000",
diff --git a/tools/perf/pmu-events/arch/x86/nehalemex/counter.json b/tools/perf/pmu-events/arch/x86/nehalemex/counter.json
new file mode 100644
index 000000000000..ecf0795dceab
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/nehalemex/counter.json
@@ -0,0 +1,7 @@
+[
+ {
+ "Unit": "core",
+ "CountersNumFixed": "4",
+ "CountersNumGeneric": "4"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/nehalemex/floating-point.json b/tools/perf/pmu-events/arch/x86/nehalemex/floating-point.json
index 196ae1d9b157..9bac9313b65c 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemex/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemex/floating-point.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "X87 Floating point assists (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xF7",
"EventName": "FP_ASSIST.ALL",
"PEBS": "1",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "X87 Floating point assists for invalid input value (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xF7",
"EventName": "FP_ASSIST.INPUT",
"PEBS": "1",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "X87 Floating point assists for invalid output value (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xF7",
"EventName": "FP_ASSIST.OUTPUT",
"PEBS": "1",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "MMX Uops",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.MMX",
"SampleAfterValue": "2000000",
@@ -32,6 +36,7 @@
},
{
"BriefDescription": "SSE2 integer Uops",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE2_INTEGER",
"SampleAfterValue": "2000000",
@@ -39,6 +44,7 @@
},
{
"BriefDescription": "SSE* FP double precision Uops",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_DOUBLE_PRECISION",
"SampleAfterValue": "2000000",
@@ -46,6 +52,7 @@
},
{
"BriefDescription": "SSE and SSE2 FP Uops",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_FP",
"SampleAfterValue": "2000000",
@@ -53,6 +60,7 @@
},
{
"BriefDescription": "SSE FP packed Uops",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_FP_PACKED",
"SampleAfterValue": "2000000",
@@ -60,6 +68,7 @@
},
{
"BriefDescription": "SSE FP scalar Uops",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_FP_SCALAR",
"SampleAfterValue": "2000000",
@@ -67,6 +76,7 @@
},
{
"BriefDescription": "SSE* FP single precision Uops",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_SINGLE_PRECISION",
"SampleAfterValue": "2000000",
@@ -74,6 +84,7 @@
},
{
"BriefDescription": "Computational floating-point operations executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.X87",
"SampleAfterValue": "2000000",
@@ -81,6 +92,7 @@
},
{
"BriefDescription": "All Floating Point to and from MMX transitions",
+ "Counter": "0,1,2,3",
"EventCode": "0xCC",
"EventName": "FP_MMX_TRANS.ANY",
"SampleAfterValue": "2000000",
@@ -88,6 +100,7 @@
},
{
"BriefDescription": "Transitions from MMX to Floating Point instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0xCC",
"EventName": "FP_MMX_TRANS.TO_FP",
"SampleAfterValue": "2000000",
@@ -95,6 +108,7 @@
},
{
"BriefDescription": "Transitions from Floating Point to MMX instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0xCC",
"EventName": "FP_MMX_TRANS.TO_MMX",
"SampleAfterValue": "2000000",
@@ -102,6 +116,7 @@
},
{
"BriefDescription": "128 bit SIMD integer pack operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "SIMD_INT_128.PACK",
"SampleAfterValue": "200000",
@@ -109,6 +124,7 @@
},
{
"BriefDescription": "128 bit SIMD integer arithmetic operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "SIMD_INT_128.PACKED_ARITH",
"SampleAfterValue": "200000",
@@ -116,6 +132,7 @@
},
{
"BriefDescription": "128 bit SIMD integer logical operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "SIMD_INT_128.PACKED_LOGICAL",
"SampleAfterValue": "200000",
@@ -123,6 +140,7 @@
},
{
"BriefDescription": "128 bit SIMD integer multiply operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "SIMD_INT_128.PACKED_MPY",
"SampleAfterValue": "200000",
@@ -130,6 +148,7 @@
},
{
"BriefDescription": "128 bit SIMD integer shift operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "SIMD_INT_128.PACKED_SHIFT",
"SampleAfterValue": "200000",
@@ -137,6 +156,7 @@
},
{
"BriefDescription": "128 bit SIMD integer shuffle/move operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "SIMD_INT_128.SHUFFLE_MOVE",
"SampleAfterValue": "200000",
@@ -144,6 +164,7 @@
},
{
"BriefDescription": "128 bit SIMD integer unpack operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "SIMD_INT_128.UNPACK",
"SampleAfterValue": "200000",
@@ -151,6 +172,7 @@
},
{
"BriefDescription": "SIMD integer 64 bit pack operations",
+ "Counter": "0,1,2,3",
"EventCode": "0xFD",
"EventName": "SIMD_INT_64.PACK",
"SampleAfterValue": "200000",
@@ -158,6 +180,7 @@
},
{
"BriefDescription": "SIMD integer 64 bit arithmetic operations",
+ "Counter": "0,1,2,3",
"EventCode": "0xFD",
"EventName": "SIMD_INT_64.PACKED_ARITH",
"SampleAfterValue": "200000",
@@ -165,6 +188,7 @@
},
{
"BriefDescription": "SIMD integer 64 bit logical operations",
+ "Counter": "0,1,2,3",
"EventCode": "0xFD",
"EventName": "SIMD_INT_64.PACKED_LOGICAL",
"SampleAfterValue": "200000",
@@ -172,6 +196,7 @@
},
{
"BriefDescription": "SIMD integer 64 bit packed multiply operations",
+ "Counter": "0,1,2,3",
"EventCode": "0xFD",
"EventName": "SIMD_INT_64.PACKED_MPY",
"SampleAfterValue": "200000",
@@ -179,6 +204,7 @@
},
{
"BriefDescription": "SIMD integer 64 bit shift operations",
+ "Counter": "0,1,2,3",
"EventCode": "0xFD",
"EventName": "SIMD_INT_64.PACKED_SHIFT",
"SampleAfterValue": "200000",
@@ -186,6 +212,7 @@
},
{
"BriefDescription": "SIMD integer 64 bit shuffle/move operations",
+ "Counter": "0,1,2,3",
"EventCode": "0xFD",
"EventName": "SIMD_INT_64.SHUFFLE_MOVE",
"SampleAfterValue": "200000",
@@ -193,6 +220,7 @@
},
{
"BriefDescription": "SIMD integer 64 bit unpack operations",
+ "Counter": "0,1,2,3",
"EventCode": "0xFD",
"EventName": "SIMD_INT_64.UNPACK",
"SampleAfterValue": "200000",
diff --git a/tools/perf/pmu-events/arch/x86/nehalemex/frontend.json b/tools/perf/pmu-events/arch/x86/nehalemex/frontend.json
index f7f28510e3ae..c561ac24d91d 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemex/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemex/frontend.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Instructions decoded",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "MACRO_INSTS.DECODED",
"SampleAfterValue": "2000000",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Macro-fused instructions decoded",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "MACRO_INSTS.FUSIONS_DECODED",
"SampleAfterValue": "2000000",
@@ -15,6 +17,7 @@
},
{
"BriefDescription": "Two Uop instructions decoded",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "TWO_UOP_INSTS_DECODED",
"SampleAfterValue": "2000000",
diff --git a/tools/perf/pmu-events/arch/x86/nehalemex/memory.json b/tools/perf/pmu-events/arch/x86/nehalemex/memory.json
index f810880a295e..dc732c8baf12 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemex/memory.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemex/memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Offcore data reads satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "Offcore data reads that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -19,6 +21,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -28,6 +31,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -37,6 +41,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -46,6 +51,7 @@
},
{
"BriefDescription": "Offcore code reads that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -55,6 +61,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -64,6 +71,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -73,6 +81,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -82,6 +91,7 @@
},
{
"BriefDescription": "Offcore requests that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -91,6 +101,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -100,6 +111,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -109,6 +121,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -118,6 +131,7 @@
},
{
"BriefDescription": "Offcore RFO requests that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -127,6 +141,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -136,6 +151,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -145,6 +161,7 @@
},
{
"BriefDescription": "Offcore writebacks to any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -154,6 +171,7 @@
},
{
"BriefDescription": "Offcore writebacks that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -163,6 +181,7 @@
},
{
"BriefDescription": "Offcore writebacks to the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -172,6 +191,7 @@
},
{
"BriefDescription": "Offcore writebacks to a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -181,6 +201,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -190,6 +211,7 @@
},
{
"BriefDescription": "Offcore code or data read requests that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -199,6 +221,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -208,6 +231,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -217,6 +241,7 @@
},
{
"BriefDescription": "Offcore request = all data, response = any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -226,6 +251,7 @@
},
{
"BriefDescription": "Offcore request = all data, response = any LLC miss",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -235,6 +261,7 @@
},
{
"BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the local DRAM.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -244,6 +271,7 @@
},
{
"BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -253,6 +281,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -262,6 +291,7 @@
},
{
"BriefDescription": "Offcore demand data requests that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -271,6 +301,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -280,6 +311,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -289,6 +321,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -298,6 +331,7 @@
},
{
"BriefDescription": "Offcore demand data reads that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -307,6 +341,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -316,6 +351,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -325,6 +361,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -334,6 +371,7 @@
},
{
"BriefDescription": "Offcore demand code reads that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -343,6 +381,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -352,6 +391,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -361,6 +401,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -370,6 +411,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -379,6 +421,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -388,6 +431,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -397,6 +441,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -406,6 +451,7 @@
},
{
"BriefDescription": "Offcore other requests that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -415,6 +461,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -424,6 +471,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -433,6 +481,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -442,6 +491,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -451,6 +501,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -460,6 +511,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -469,6 +521,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -478,6 +531,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -487,6 +541,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -496,6 +551,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -505,6 +561,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -514,6 +571,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -523,6 +581,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -532,6 +591,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -541,6 +601,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -550,6 +611,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -559,6 +621,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -568,6 +631,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -577,6 +641,7 @@
},
{
"BriefDescription": "Offcore prefetch requests that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -586,6 +651,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -595,6 +661,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_DRAM",
"MSRIndex": "0x1A6",
diff --git a/tools/perf/pmu-events/arch/x86/nehalemex/other.json b/tools/perf/pmu-events/arch/x86/nehalemex/other.json
index fb706cb51832..f6887b234b0e 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemex/other.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemex/other.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "ES segment renames",
+ "Counter": "0,1,2,3",
"EventCode": "0xD5",
"EventName": "ES_REG_RENAMES",
"SampleAfterValue": "2000000",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "I/O transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0x6C",
"EventName": "IO_TRANSACTIONS",
"SampleAfterValue": "2000000",
@@ -15,6 +17,7 @@
},
{
"BriefDescription": "L1I instruction fetch stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "L1I.CYCLES_STALLED",
"SampleAfterValue": "2000000",
@@ -22,6 +25,7 @@
},
{
"BriefDescription": "L1I instruction fetch hits",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "L1I.HITS",
"SampleAfterValue": "2000000",
@@ -29,6 +33,7 @@
},
{
"BriefDescription": "L1I instruction fetch misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "L1I.MISSES",
"SampleAfterValue": "2000000",
@@ -36,6 +41,7 @@
},
{
"BriefDescription": "L1I Instruction fetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "L1I.READS",
"SampleAfterValue": "2000000",
@@ -43,6 +49,7 @@
},
{
"BriefDescription": "Large ITLB hit",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "LARGE_ITLB.HIT",
"SampleAfterValue": "200000",
@@ -50,6 +57,7 @@
},
{
"BriefDescription": "All loads dispatched",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "LOAD_DISPATCH.ANY",
"SampleAfterValue": "2000000",
@@ -57,6 +65,7 @@
},
{
"BriefDescription": "Loads dispatched from the MOB",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "LOAD_DISPATCH.MOB",
"SampleAfterValue": "2000000",
@@ -64,6 +73,7 @@
},
{
"BriefDescription": "Loads dispatched that bypass the MOB",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "LOAD_DISPATCH.RS",
"SampleAfterValue": "2000000",
@@ -71,6 +81,7 @@
},
{
"BriefDescription": "Loads dispatched from stage 305",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "LOAD_DISPATCH.RS_DELAYED",
"SampleAfterValue": "2000000",
@@ -78,6 +89,7 @@
},
{
"BriefDescription": "False dependencies due to partial address aliasing",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "PARTIAL_ADDRESS_ALIAS",
"SampleAfterValue": "200000",
@@ -85,6 +97,7 @@
},
{
"BriefDescription": "All Store buffer stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "SB_DRAIN.ANY",
"SampleAfterValue": "200000",
@@ -92,6 +105,7 @@
},
{
"BriefDescription": "Segment rename stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "SEG_RENAME_STALLS",
"SampleAfterValue": "2000000",
@@ -99,6 +113,7 @@
},
{
"BriefDescription": "Thread responded HIT to snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "SNOOP_RESPONSE.HIT",
"SampleAfterValue": "100000",
@@ -106,6 +121,7 @@
},
{
"BriefDescription": "Thread responded HITE to snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "SNOOP_RESPONSE.HITE",
"SampleAfterValue": "100000",
@@ -113,6 +129,7 @@
},
{
"BriefDescription": "Thread responded HITM to snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "SNOOP_RESPONSE.HITM",
"SampleAfterValue": "100000",
@@ -120,6 +137,7 @@
},
{
"BriefDescription": "Super Queue full stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xF6",
"EventName": "SQ_FULL_STALL_CYCLES",
"SampleAfterValue": "2000000",
diff --git a/tools/perf/pmu-events/arch/x86/nehalemex/pipeline.json b/tools/perf/pmu-events/arch/x86/nehalemex/pipeline.json
index c45f2ffa861e..869c84fa7c60 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemex/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemex/pipeline.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Cycles the divider is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "ARITH.CYCLES_DIV_BUSY",
"SampleAfterValue": "2000000",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Divide Operations executed",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x14",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "Multiply operations executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "ARITH.MUL",
"SampleAfterValue": "2000000",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "BACLEAR asserted with bad target address",
+ "Counter": "0,1,2,3",
"EventCode": "0xE6",
"EventName": "BACLEAR.BAD_TARGET",
"SampleAfterValue": "2000000",
@@ -32,6 +36,7 @@
},
{
"BriefDescription": "BACLEAR asserted, regardless of cause",
+ "Counter": "0,1,2,3",
"EventCode": "0xE6",
"EventName": "BACLEAR.CLEAR",
"SampleAfterValue": "2000000",
@@ -39,6 +44,7 @@
},
{
"BriefDescription": "Instruction queue forced BACLEAR",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "BACLEAR_FORCE_IQ",
"SampleAfterValue": "2000000",
@@ -46,6 +52,7 @@
},
{
"BriefDescription": "Early Branch Prediciton Unit clears",
+ "Counter": "0,1,2,3",
"EventCode": "0xE8",
"EventName": "BPU_CLEARS.EARLY",
"SampleAfterValue": "2000000",
@@ -53,6 +60,7 @@
},
{
"BriefDescription": "Late Branch Prediction Unit clears",
+ "Counter": "0,1,2,3",
"EventCode": "0xE8",
"EventName": "BPU_CLEARS.LATE",
"SampleAfterValue": "2000000",
@@ -60,6 +68,7 @@
},
{
"BriefDescription": "Branch prediction unit missed call or return",
+ "Counter": "0,1,2,3",
"EventCode": "0xE5",
"EventName": "BPU_MISSED_CALL_RET",
"SampleAfterValue": "2000000",
@@ -67,6 +76,7 @@
},
{
"BriefDescription": "Branch instructions decoded",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "BR_INST_DECODED",
"SampleAfterValue": "2000000",
@@ -74,6 +84,7 @@
},
{
"BriefDescription": "Branch instructions executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ANY",
"SampleAfterValue": "200000",
@@ -81,6 +92,7 @@
},
{
"BriefDescription": "Conditional branch instructions executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.COND",
"SampleAfterValue": "200000",
@@ -88,6 +100,7 @@
},
{
"BriefDescription": "Unconditional branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.DIRECT",
"SampleAfterValue": "200000",
@@ -95,6 +108,7 @@
},
{
"BriefDescription": "Unconditional call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.DIRECT_NEAR_CALL",
"SampleAfterValue": "20000",
@@ -102,6 +116,7 @@
},
{
"BriefDescription": "Indirect call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.INDIRECT_NEAR_CALL",
"SampleAfterValue": "20000",
@@ -109,6 +124,7 @@
},
{
"BriefDescription": "Indirect non call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.INDIRECT_NON_CALL",
"SampleAfterValue": "20000",
@@ -116,6 +132,7 @@
},
{
"BriefDescription": "Call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.NEAR_CALLS",
"SampleAfterValue": "20000",
@@ -123,6 +140,7 @@
},
{
"BriefDescription": "All non call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.NON_CALLS",
"SampleAfterValue": "200000",
@@ -130,6 +148,7 @@
},
{
"BriefDescription": "Indirect return branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.RETURN_NEAR",
"SampleAfterValue": "20000",
@@ -137,6 +156,7 @@
},
{
"BriefDescription": "Taken branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN",
"SampleAfterValue": "200000",
@@ -144,6 +164,7 @@
},
{
"BriefDescription": "Retired branch instructions (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -152,6 +173,7 @@
},
{
"BriefDescription": "Retired conditional branch instructions (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
"PEBS": "1",
@@ -160,6 +182,7 @@
},
{
"BriefDescription": "Retired near call instructions (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
"PEBS": "1",
@@ -168,6 +191,7 @@
},
{
"BriefDescription": "Mispredicted branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.ANY",
"SampleAfterValue": "20000",
@@ -175,6 +199,7 @@
},
{
"BriefDescription": "Mispredicted conditional branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.COND",
"SampleAfterValue": "20000",
@@ -182,6 +207,7 @@
},
{
"BriefDescription": "Mispredicted unconditional branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.DIRECT",
"SampleAfterValue": "20000",
@@ -189,6 +215,7 @@
},
{
"BriefDescription": "Mispredicted non call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.DIRECT_NEAR_CALL",
"SampleAfterValue": "2000",
@@ -196,6 +223,7 @@
},
{
"BriefDescription": "Mispredicted indirect call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.INDIRECT_NEAR_CALL",
"SampleAfterValue": "2000",
@@ -203,6 +231,7 @@
},
{
"BriefDescription": "Mispredicted indirect non call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.INDIRECT_NON_CALL",
"SampleAfterValue": "2000",
@@ -210,6 +239,7 @@
},
{
"BriefDescription": "Mispredicted call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.NEAR_CALLS",
"SampleAfterValue": "2000",
@@ -217,6 +247,7 @@
},
{
"BriefDescription": "Mispredicted non call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.NON_CALLS",
"SampleAfterValue": "20000",
@@ -224,6 +255,7 @@
},
{
"BriefDescription": "Mispredicted return branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.RETURN_NEAR",
"SampleAfterValue": "2000",
@@ -231,6 +263,7 @@
},
{
"BriefDescription": "Mispredicted taken branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN",
"SampleAfterValue": "20000",
@@ -238,6 +271,7 @@
},
{
"BriefDescription": "Mispredicted near retired calls (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.NEAR_CALL",
"PEBS": "1",
@@ -246,11 +280,13 @@
},
{
"BriefDescription": "Reference cycles when thread is not halted (fixed counter)",
+ "Counter": "Fixed counter 3",
"EventName": "CPU_CLK_UNHALTED.REF",
"SampleAfterValue": "2000000"
},
{
"BriefDescription": "Reference base clock (133 Mhz) cycles when thread is not halted (programmable counter)",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.REF_P",
"SampleAfterValue": "100000",
@@ -258,17 +294,20 @@
},
{
"BriefDescription": "Cycles when thread is not halted (fixed counter)",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"SampleAfterValue": "2000000"
},
{
"BriefDescription": "Cycles when thread is not halted (programmable counter)",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
"SampleAfterValue": "2000000"
},
{
"BriefDescription": "Total CPU cycles",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.TOTAL_CYCLES",
@@ -277,6 +316,7 @@
},
{
"BriefDescription": "Any Instruction Length Decoder stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.ANY",
"SampleAfterValue": "2000000",
@@ -284,6 +324,7 @@
},
{
"BriefDescription": "Instruction Queue full stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.IQ_FULL",
"SampleAfterValue": "2000000",
@@ -291,6 +332,7 @@
},
{
"BriefDescription": "Length Change Prefix stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.LCP",
"SampleAfterValue": "2000000",
@@ -298,6 +340,7 @@
},
{
"BriefDescription": "Stall cycles due to BPU MRU bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.MRU",
"SampleAfterValue": "2000000",
@@ -305,6 +348,7 @@
},
{
"BriefDescription": "Regen stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.REGEN",
"SampleAfterValue": "2000000",
@@ -312,6 +356,7 @@
},
{
"BriefDescription": "Instructions that must be decoded by decoder 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "INST_DECODED.DEC0",
"SampleAfterValue": "2000000",
@@ -319,6 +364,7 @@
},
{
"BriefDescription": "Instructions written to instruction queue.",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "INST_QUEUE_WRITES",
"SampleAfterValue": "2000000",
@@ -326,6 +372,7 @@
},
{
"BriefDescription": "Cycles instructions are written to the instruction queue",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "INST_QUEUE_WRITE_CYCLES",
"SampleAfterValue": "2000000",
@@ -333,11 +380,13 @@
},
{
"BriefDescription": "Instructions retired (fixed counter)",
+ "Counter": "Fixed counter 1",
"EventName": "INST_RETIRED.ANY",
"SampleAfterValue": "2000000"
},
{
"BriefDescription": "Instructions retired (Programmable counter and Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.ANY_P",
"PEBS": "1",
@@ -346,6 +395,7 @@
},
{
"BriefDescription": "Retired MMX instructions (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.MMX",
"PEBS": "1",
@@ -354,6 +404,7 @@
},
{
"BriefDescription": "Total cycles (Precise Event)",
+ "Counter": "0,1,2,3",
"CounterMask": "16",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.TOTAL_CYCLES",
@@ -364,6 +415,7 @@
},
{
"BriefDescription": "Total cycles (Precise Event)",
+ "Counter": "0,1,2,3",
"CounterMask": "16",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.TOTAL_CYCLES_PS",
@@ -374,6 +426,7 @@
},
{
"BriefDescription": "Retired floating-point operations (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.X87",
"PEBS": "1",
@@ -382,6 +435,7 @@
},
{
"BriefDescription": "Load operations conflicting with software prefetches",
+ "Counter": "0,1",
"EventCode": "0x4C",
"EventName": "LOAD_HIT_PRE",
"SampleAfterValue": "200000",
@@ -389,6 +443,7 @@
},
{
"BriefDescription": "Cycles when uops were delivered by the LSD",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA8",
"EventName": "LSD.ACTIVE",
@@ -397,6 +452,7 @@
},
{
"BriefDescription": "Cycles no uops were delivered by the LSD",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA8",
"EventName": "LSD.INACTIVE",
@@ -406,6 +462,7 @@
},
{
"BriefDescription": "Loops that can't stream from the instruction queue",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "LSD_OVERFLOW",
"SampleAfterValue": "2000000",
@@ -413,6 +470,7 @@
},
{
"BriefDescription": "Cycles machine clear asserted",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.CYCLES",
"SampleAfterValue": "20000",
@@ -420,6 +478,7 @@
},
{
"BriefDescription": "Execution pipeline restart due to Memory ordering conflicts",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.MEM_ORDER",
"SampleAfterValue": "20000",
@@ -427,6 +486,7 @@
},
{
"BriefDescription": "Self-Modifying Code detected",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.SMC",
"SampleAfterValue": "20000",
@@ -434,6 +494,7 @@
},
{
"BriefDescription": "All RAT stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "RAT_STALLS.ANY",
"SampleAfterValue": "2000000",
@@ -441,6 +502,7 @@
},
{
"BriefDescription": "Flag stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "RAT_STALLS.FLAGS",
"SampleAfterValue": "2000000",
@@ -448,6 +510,7 @@
},
{
"BriefDescription": "Partial register stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "RAT_STALLS.REGISTERS",
"SampleAfterValue": "2000000",
@@ -455,6 +518,7 @@
},
{
"BriefDescription": "ROB read port stalls cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "RAT_STALLS.ROB_READ_PORT",
"SampleAfterValue": "2000000",
@@ -462,6 +526,7 @@
},
{
"BriefDescription": "Scoreboard stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "RAT_STALLS.SCOREBOARD",
"SampleAfterValue": "2000000",
@@ -469,6 +534,7 @@
},
{
"BriefDescription": "Resource related stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.ANY",
"SampleAfterValue": "2000000",
@@ -476,6 +542,7 @@
},
{
"BriefDescription": "FPU control word write stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.FPCW",
"SampleAfterValue": "2000000",
@@ -483,6 +550,7 @@
},
{
"BriefDescription": "Load buffer stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.LOAD",
"SampleAfterValue": "2000000",
@@ -490,6 +558,7 @@
},
{
"BriefDescription": "MXCSR rename stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.MXCSR",
"SampleAfterValue": "2000000",
@@ -497,6 +566,7 @@
},
{
"BriefDescription": "Other Resource related stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.OTHER",
"SampleAfterValue": "2000000",
@@ -504,6 +574,7 @@
},
{
"BriefDescription": "ROB full stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.ROB_FULL",
"SampleAfterValue": "2000000",
@@ -511,6 +582,7 @@
},
{
"BriefDescription": "Reservation Station full stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.RS_FULL",
"SampleAfterValue": "2000000",
@@ -518,6 +590,7 @@
},
{
"BriefDescription": "Store buffer stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.STORE",
"SampleAfterValue": "2000000",
@@ -525,6 +598,7 @@
},
{
"BriefDescription": "SIMD Packed-Double Uops retired (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "SSEX_UOPS_RETIRED.PACKED_DOUBLE",
"PEBS": "1",
@@ -533,6 +607,7 @@
},
{
"BriefDescription": "SIMD Packed-Single Uops retired (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "SSEX_UOPS_RETIRED.PACKED_SINGLE",
"PEBS": "1",
@@ -541,6 +616,7 @@
},
{
"BriefDescription": "SIMD Scalar-Double Uops retired (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "SSEX_UOPS_RETIRED.SCALAR_DOUBLE",
"PEBS": "1",
@@ -549,6 +625,7 @@
},
{
"BriefDescription": "SIMD Scalar-Single Uops retired (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "SSEX_UOPS_RETIRED.SCALAR_SINGLE",
"PEBS": "1",
@@ -557,6 +634,7 @@
},
{
"BriefDescription": "SIMD Vector Integer Uops retired (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "SSEX_UOPS_RETIRED.VECTOR_INTEGER",
"PEBS": "1",
@@ -565,6 +643,7 @@
},
{
"BriefDescription": "Stack pointer instructions decoded",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "UOPS_DECODED.ESP_FOLDING",
"SampleAfterValue": "2000000",
@@ -572,6 +651,7 @@
},
{
"BriefDescription": "Stack pointer sync operations",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "UOPS_DECODED.ESP_SYNC",
"SampleAfterValue": "2000000",
@@ -579,6 +659,7 @@
},
{
"BriefDescription": "Uops decoded by Microcode Sequencer",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xD1",
"EventName": "UOPS_DECODED.MS_CYCLES_ACTIVE",
@@ -587,6 +668,7 @@
},
{
"BriefDescription": "Cycles no Uops are decoded",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xD1",
"EventName": "UOPS_DECODED.STALL_CYCLES",
@@ -597,6 +679,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles Uops executed on any port (core count)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_ACTIVE_CYCLES",
@@ -606,6 +689,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles Uops executed on ports 0-4 (core count)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_ACTIVE_CYCLES_NO_PORT5",
@@ -615,6 +699,7 @@
{
"AnyThread": "1",
"BriefDescription": "Uops executed on any port (core count)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xB1",
@@ -626,6 +711,7 @@
{
"AnyThread": "1",
"BriefDescription": "Uops executed on ports 0-4 (core count)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xB1",
@@ -637,6 +723,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles no Uops issued on any port (core count)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_STALL_CYCLES",
@@ -647,6 +734,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles no Uops issued on ports 0-4 (core count)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_STALL_CYCLES_NO_PORT5",
@@ -656,6 +744,7 @@
},
{
"BriefDescription": "Uops executed on port 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT0",
"SampleAfterValue": "2000000",
@@ -663,6 +752,7 @@
},
{
"BriefDescription": "Uops issued on ports 0, 1 or 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT015",
"SampleAfterValue": "2000000",
@@ -670,6 +760,7 @@
},
{
"BriefDescription": "Cycles no Uops issued on ports 0, 1 or 5",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT015_STALL_CYCLES",
@@ -679,6 +770,7 @@
},
{
"BriefDescription": "Uops executed on port 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT1",
"SampleAfterValue": "2000000",
@@ -687,6 +779,7 @@
{
"AnyThread": "1",
"BriefDescription": "Uops issued on ports 2, 3 or 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT234_CORE",
"SampleAfterValue": "2000000",
@@ -695,6 +788,7 @@
{
"AnyThread": "1",
"BriefDescription": "Uops executed on port 2 (core count)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT2_CORE",
"SampleAfterValue": "2000000",
@@ -703,6 +797,7 @@
{
"AnyThread": "1",
"BriefDescription": "Uops executed on port 3 (core count)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT3_CORE",
"SampleAfterValue": "2000000",
@@ -711,6 +806,7 @@
{
"AnyThread": "1",
"BriefDescription": "Uops executed on port 4 (core count)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT4_CORE",
"SampleAfterValue": "2000000",
@@ -718,6 +814,7 @@
},
{
"BriefDescription": "Uops executed on port 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT5",
"SampleAfterValue": "2000000",
@@ -725,6 +822,7 @@
},
{
"BriefDescription": "Uops issued",
+ "Counter": "0,1,2,3",
"EventCode": "0xE",
"EventName": "UOPS_ISSUED.ANY",
"SampleAfterValue": "2000000",
@@ -733,6 +831,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles no Uops were issued on any thread",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xE",
"EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
@@ -743,6 +842,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles Uops were issued on either thread",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xE",
"EventName": "UOPS_ISSUED.CYCLES_ALL_THREADS",
@@ -751,6 +851,7 @@
},
{
"BriefDescription": "Fused Uops issued",
+ "Counter": "0,1,2,3",
"EventCode": "0xE",
"EventName": "UOPS_ISSUED.FUSED",
"SampleAfterValue": "2000000",
@@ -758,6 +859,7 @@
},
{
"BriefDescription": "Cycles no Uops were issued",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xE",
"EventName": "UOPS_ISSUED.STALL_CYCLES",
@@ -767,6 +869,7 @@
},
{
"BriefDescription": "Cycles Uops are being retired",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.ACTIVE_CYCLES",
@@ -776,6 +879,7 @@
},
{
"BriefDescription": "Uops retired (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.ANY",
"PEBS": "1",
@@ -784,6 +888,7 @@
},
{
"BriefDescription": "Macro-fused Uops retired (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.MACRO_FUSED",
"PEBS": "1",
@@ -792,6 +897,7 @@
},
{
"BriefDescription": "Retirement slots used (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.RETIRE_SLOTS",
"PEBS": "1",
@@ -800,6 +906,7 @@
},
{
"BriefDescription": "Cycles Uops are not retiring (Precise Event)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.STALL_CYCLES",
@@ -810,6 +917,7 @@
},
{
"BriefDescription": "Total cycles using precise uop retired event (Precise Event)",
+ "Counter": "0,1,2,3",
"CounterMask": "16",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.TOTAL_CYCLES",
@@ -820,6 +928,7 @@
},
{
"BriefDescription": "Uop unfusions due to FP exceptions",
+ "Counter": "0,1,2,3",
"EventCode": "0xDB",
"EventName": "UOP_UNFUSION",
"SampleAfterValue": "2000000",
diff --git a/tools/perf/pmu-events/arch/x86/nehalemex/virtual-memory.json b/tools/perf/pmu-events/arch/x86/nehalemex/virtual-memory.json
index c434cd4ef4f1..e88c0802e679 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemex/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemex/virtual-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "DTLB load misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "DTLB_LOAD_MISSES.ANY",
"SampleAfterValue": "200000",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "DTLB load miss caused by low part of address",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "DTLB_LOAD_MISSES.PDE_MISS",
"SampleAfterValue": "200000",
@@ -15,6 +17,7 @@
},
{
"BriefDescription": "DTLB second level hit",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"SampleAfterValue": "2000000",
@@ -22,6 +25,7 @@
},
{
"BriefDescription": "DTLB load miss page walks complete",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"SampleAfterValue": "200000",
@@ -29,6 +33,7 @@
},
{
"BriefDescription": "DTLB misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_MISSES.ANY",
"SampleAfterValue": "200000",
@@ -36,6 +41,7 @@
},
{
"BriefDescription": "DTLB first level misses but second level hit",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_MISSES.STLB_HIT",
"SampleAfterValue": "200000",
@@ -43,6 +49,7 @@
},
{
"BriefDescription": "DTLB miss page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_MISSES.WALK_COMPLETED",
"SampleAfterValue": "200000",
@@ -50,6 +57,7 @@
},
{
"BriefDescription": "ITLB flushes",
+ "Counter": "0,1,2,3",
"EventCode": "0xAE",
"EventName": "ITLB_FLUSH",
"SampleAfterValue": "2000000",
@@ -57,6 +65,7 @@
},
{
"BriefDescription": "ITLB miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.ANY",
"SampleAfterValue": "200000",
@@ -64,6 +73,7 @@
},
{
"BriefDescription": "ITLB miss page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
"SampleAfterValue": "200000",
@@ -71,6 +81,7 @@
},
{
"BriefDescription": "Retired instructions that missed the ITLB (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC8",
"EventName": "ITLB_MISS_RETIRED",
"PEBS": "1",
@@ -79,6 +90,7 @@
},
{
"BriefDescription": "Retired loads that miss the DTLB (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "MEM_LOAD_RETIRED.DTLB_MISS",
"PEBS": "1",
@@ -87,6 +99,7 @@
},
{
"BriefDescription": "Retired stores that miss the DTLB (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC",
"EventName": "MEM_STORE_RETIRED.DTLB_MISS",
"PEBS": "1",
diff --git a/tools/perf/pmu-events/arch/x86/rocketlake/cache.json b/tools/perf/pmu-events/arch/x86/rocketlake/cache.json
index b0f54a6650fe..2e93b7835b41 100644
--- a/tools/perf/pmu-events/arch/x86/rocketlake/cache.json
+++ b/tools/perf/pmu-events/arch/x86/rocketlake/cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of cache lines replaced in L1 data cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "L1D.REPLACEMENT",
"PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.FB_FULL",
"PublicDescription": "Counts number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x48",
@@ -27,6 +30,7 @@
},
{
"BriefDescription": "Number of cycles a demand request has waited due to L1D due to lack of L2 resources.",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.L2_STALL",
"PublicDescription": "Counts number of cycles a demand request has waited due to L1D due to lack of L2 resources. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
@@ -35,6 +39,7 @@
},
{
"BriefDescription": "Number of L1D misses that are outstanding",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING",
"PublicDescription": "Counts number of L1D misses that are outstanding in each cycle, that is each cycle the number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch. Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
@@ -43,6 +48,7 @@
},
{
"BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING_CYCLES",
@@ -52,6 +58,7 @@
},
{
"BriefDescription": "L2 cache lines filling L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.ALL",
"PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
@@ -60,6 +67,7 @@
},
{
"BriefDescription": "Modified cache lines that are evicted by L2 cache when triggered by an L2 cache fill.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.NON_SILENT",
"PublicDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines are in Modified state. Modified lines are written back to L3",
@@ -68,6 +76,7 @@
},
{
"BriefDescription": "Non-modified cache lines that are silently dropped by L2 cache when triggered by an L2 cache fill.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.SILENT",
"PublicDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared or Exclusive state. A non-threaded event.",
@@ -76,6 +85,7 @@
},
{
"BriefDescription": "Cache lines that have been L2 hardware prefetched but not used by demand accesses",
+ "Counter": "0,1,2,3",
"EventCode": "0xf2",
"EventName": "L2_LINES_OUT.USELESS_HWPF",
"PublicDescription": "Counts the number of cache lines that have been prefetched by the L2 hardware prefetcher but not used by demand access when evicted from the L2 cache",
@@ -84,6 +94,7 @@
},
{
"BriefDescription": "L2 code requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_CODE_RD",
"PublicDescription": "Counts the total number of L2 code requests.",
@@ -92,6 +103,7 @@
},
{
"BriefDescription": "Demand Data Read requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
"PublicDescription": "Counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
@@ -100,6 +112,7 @@
},
{
"BriefDescription": "Demand requests that miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_MISS",
"PublicDescription": "Counts demand requests that miss L2 cache.",
@@ -108,6 +121,7 @@
},
{
"BriefDescription": "Demand requests to L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
"PublicDescription": "Counts demand requests to L2 cache.",
@@ -116,6 +130,7 @@
},
{
"BriefDescription": "RFO requests to L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_RFO",
"PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
@@ -124,6 +139,7 @@
},
{
"BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_HIT",
"PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
@@ -132,6 +148,7 @@
},
{
"BriefDescription": "L2 cache misses when fetching instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_MISS",
"PublicDescription": "Counts L2 cache misses when fetching instructions.",
@@ -140,6 +157,7 @@
},
{
"BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
"PublicDescription": "Counts the number of demand Data Read requests initiated by load instructions that hit L2 cache.",
@@ -148,6 +166,7 @@
},
{
"BriefDescription": "Demand Data Read miss L2, no rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
"PublicDescription": "Counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
@@ -156,6 +175,7 @@
},
{
"BriefDescription": "All requests that miss L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.MISS",
"PublicDescription": "Counts all requests that miss L2 cache.",
@@ -164,6 +184,7 @@
},
{
"BriefDescription": "All L2 requests.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.REFERENCES",
"PublicDescription": "Counts all L2 requests.",
@@ -172,6 +193,7 @@
},
{
"BriefDescription": "RFO requests that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_HIT",
"PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
@@ -180,6 +202,7 @@
},
{
"BriefDescription": "RFO requests that miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_MISS",
"PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
@@ -188,6 +211,7 @@
},
{
"BriefDescription": "SW prefetch requests that hit L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.SWPF_HIT",
"PublicDescription": "Counts Software prefetch requests that hit the L2 cache. Accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions when FB is not full.",
@@ -196,6 +220,7 @@
},
{
"BriefDescription": "SW prefetch requests that miss L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.SWPF_MISS",
"PublicDescription": "Counts Software prefetch requests that miss the L2 cache. Accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions when FB is not full.",
@@ -204,6 +229,7 @@
},
{
"BriefDescription": "L2 writebacks that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.L2_WB",
"PublicDescription": "Counts L2 writebacks that access L2 cache.",
@@ -212,6 +238,7 @@
},
{
"BriefDescription": "Core-originated cacheable requests that missed L3 (Except hardware prefetches to the L3)",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.MISS",
"PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.",
@@ -220,6 +247,7 @@
},
{
"BriefDescription": "Retired load instructions.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ALL_LOADS",
@@ -230,6 +258,7 @@
},
{
"BriefDescription": "Retired store instructions.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ALL_STORES",
@@ -240,6 +269,7 @@
},
{
"BriefDescription": "All retired memory instructions.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ANY",
@@ -250,6 +280,7 @@
},
{
"BriefDescription": "Retired load instructions with locked access.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.LOCK_LOADS",
@@ -260,6 +291,7 @@
},
{
"BriefDescription": "Retired load instructions that split across a cacheline boundary.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
@@ -270,6 +302,7 @@
},
{
"BriefDescription": "Retired store instructions that split across a cacheline boundary.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.SPLIT_STORES",
@@ -280,6 +313,7 @@
},
{
"BriefDescription": "Retired load instructions that miss the STLB.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
@@ -290,6 +324,7 @@
},
{
"BriefDescription": "Retired store instructions that miss the STLB.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
@@ -300,6 +335,7 @@
},
{
"BriefDescription": "Retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT",
@@ -310,6 +346,7 @@
},
{
"BriefDescription": "Retired load instructions whose data sources were HitM responses from shared L3",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM",
@@ -320,6 +357,7 @@
},
{
"BriefDescription": "Retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
@@ -330,6 +368,7 @@
},
{
"BriefDescription": "Retired load instructions whose data sources were hits in L3 without snoops required",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
@@ -340,6 +379,7 @@
},
{
"BriefDescription": "Retired instructions with at least 1 uncacheable load or Bus Lock.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd4",
"EventName": "MEM_LOAD_MISC_RETIRED.UC",
@@ -350,6 +390,7 @@
},
{
"BriefDescription": "Number of completed demand load requests that missed the L1, but hit the FB(fill buffer), because a preceding miss to the same cacheline initiated the line to be brought into L1, but data is not yet ready in L1.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.FB_HIT",
@@ -360,6 +401,7 @@
},
{
"BriefDescription": "Retired load instructions with L1 cache hits as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L1_HIT",
@@ -370,6 +412,7 @@
},
{
"BriefDescription": "Retired load instructions missed L1 cache as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L1_MISS",
@@ -380,6 +423,7 @@
},
{
"BriefDescription": "Retired load instructions with L2 cache hits as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L2_HIT",
@@ -390,6 +434,7 @@
},
{
"BriefDescription": "Retired load instructions missed L2 cache as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L2_MISS",
@@ -400,6 +445,7 @@
},
{
"BriefDescription": "Retired load instructions with L3 cache hits as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L3_HIT",
@@ -410,6 +456,7 @@
},
{
"BriefDescription": "Retired load instructions missed L3 cache as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L3_MISS",
@@ -420,6 +467,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a cacheline in the L3 where a snoop was sent or not.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -429,6 +477,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a cacheline in the L3 where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -438,6 +487,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a cacheline in the L3 where a snoop hit in another core, data forwarding is not required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -447,6 +497,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -456,6 +507,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -465,6 +517,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a cacheline in the L3 where a snoop was sent.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_SENT",
"MSRIndex": "0x1a6,0x1a7",
@@ -474,6 +527,7 @@
},
{
"BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop was sent or not.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -483,6 +537,7 @@
},
{
"BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -492,6 +547,7 @@
},
{
"BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop hit in another core, data forwarding is not required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -501,6 +557,7 @@
},
{
"BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -510,6 +567,7 @@
},
{
"BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -519,6 +577,7 @@
},
{
"BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop was sent.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_SENT",
"MSRIndex": "0x1a6,0x1a7",
@@ -528,6 +587,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop was sent or not.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -537,6 +597,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -546,6 +607,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop hit in another core, data forwarding is not required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -555,6 +617,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -564,6 +627,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -573,6 +637,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop was sent.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_SENT",
"MSRIndex": "0x1a6,0x1a7",
@@ -582,6 +647,7 @@
},
{
"BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that hit a cacheline in the L3 where a snoop was sent or not.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L1D_AND_SWPF.L3_HIT.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -591,6 +657,7 @@
},
{
"BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L1D_AND_SWPF.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -600,6 +667,7 @@
},
{
"BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L1D_AND_SWPF.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -609,6 +677,7 @@
},
{
"BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that hit a cacheline in the L3 where a snoop was sent or not.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -618,6 +687,7 @@
},
{
"BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that hit a cacheline in the L3 where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -627,6 +697,7 @@
},
{
"BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that hit a cacheline in the L3 where a snoop hit in another core, data forwarding is not required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -636,6 +707,7 @@
},
{
"BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -645,6 +717,7 @@
},
{
"BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -654,6 +727,7 @@
},
{
"BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that hit a cacheline in the L3 where a snoop was sent.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_SENT",
"MSRIndex": "0x1a6,0x1a7",
@@ -663,6 +737,7 @@
},
{
"BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that hit a cacheline in the L3 where a snoop was sent or not.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2_RFO.L3_HIT.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -672,6 +747,7 @@
},
{
"BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that hit a cacheline in the L3 where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -681,6 +757,7 @@
},
{
"BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that hit a cacheline in the L3 where a snoop hit in another core, data forwarding is not required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -690,6 +767,7 @@
},
{
"BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -699,6 +777,7 @@
},
{
"BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -708,6 +787,7 @@
},
{
"BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that hit a cacheline in the L3 where a snoop was sent.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_SENT",
"MSRIndex": "0x1a6,0x1a7",
@@ -717,6 +797,7 @@
},
{
"BriefDescription": "Counts hardware prefetches to the L3 only that hit a cacheline in the L3 where a snoop was sent or not.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L3.L3_HIT.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -726,6 +807,7 @@
},
{
"BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that hit a cacheline in the L3 where a snoop hit in another core, data forwarding is not required.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -735,6 +817,7 @@
},
{
"BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -744,6 +827,7 @@
},
{
"BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -753,6 +837,7 @@
},
{
"BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that hit a cacheline in the L3 where a snoop was sent.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT.SNOOP_SENT",
"MSRIndex": "0x1a6,0x1a7",
@@ -762,6 +847,7 @@
},
{
"BriefDescription": "Counts streaming stores that hit a cacheline in the L3 where a snoop was sent or not.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.STREAMING_WR.L3_HIT.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -771,6 +857,7 @@
},
{
"BriefDescription": "Demand and prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
"PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
@@ -779,6 +866,7 @@
},
{
"BriefDescription": "Counts memory transactions sent to the uncore.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
"PublicDescription": "Counts memory transactions sent to the uncore including requests initiated by the core, all L3 prefetches, reads resulting from page walks, and snoop responses.",
@@ -787,6 +875,7 @@
},
{
"BriefDescription": "Demand Data Read requests sent to uncore",
+ "Counter": "0,1,2,3",
"EventCode": "0xb0",
"EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
"PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
@@ -795,6 +884,7 @@
},
{
"BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "Counter": "0,1,2,3",
"EventCode": "0xb0",
"EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
"PublicDescription": "Counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
@@ -803,6 +893,7 @@
},
{
"BriefDescription": "For every cycle, increments by the number of outstanding data read requests pending.",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
"PublicDescription": "For every cycle, increments by the number of outstanding data read requests pending. Data read requests include cacheable demand reads and L2 prefetches, but do not include RFOs, code reads or prefetches to the L3. Reads due to page walks resulting from any request type will also be counted. Requests are considered outstanding from the time they miss the core's L2 cache until the transaction completion message is sent to the requestor.",
@@ -811,6 +902,7 @@
},
{
"BriefDescription": "Cycles where at least 1 outstanding data read request is pending.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
@@ -820,6 +912,7 @@
},
{
"BriefDescription": "Cycles where at least 1 outstanding Demand RFO request is pending.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
@@ -829,6 +922,7 @@
},
{
"BriefDescription": "For every cycle, increments by the number of outstanding demand data read requests pending.",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
"PublicDescription": "For every cycle, increments by the number of outstanding demand data read requests pending. Requests are considered outstanding from the time they miss the core's L2 cache until the transaction completion message is sent to the requestor.",
@@ -837,6 +931,7 @@
},
{
"BriefDescription": "Store Read transactions pending for off-core. Highly correlated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
"PublicDescription": "Counts the number of off-core outstanding read-for-ownership (RFO) store transactions every cycle. An RFO transaction is considered to be in the Off-core outstanding state between L2 cache miss and transaction completion.",
@@ -845,6 +940,7 @@
},
{
"BriefDescription": "Counts bus locks, accounts for cache line split locks and UC locks.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF4",
"EventName": "SQ_MISC.BUS_LOCK",
"PublicDescription": "Counts the more expensive bus lock needed to enforce cache coherency for certain memory accesses that need to be done atomically. Can be created by issuing an atomic instruction (via the LOCK prefix) which causes a cache line split or accesses uncacheable memory.",
@@ -853,6 +949,7 @@
},
{
"BriefDescription": "Cycles the queue waiting for offcore responses is full.",
+ "Counter": "0,1,2,3",
"EventCode": "0xf4",
"EventName": "SQ_MISC.SQ_FULL",
"PublicDescription": "Counts the cycles for which the thread is active and the queue waiting for responses from the uncore cannot take any more entries.",
@@ -860,7 +957,16 @@
"UMask": "0x4"
},
{
+ "BriefDescription": "Counts the number of PREFETCHNTA, PREFETCHW, PREFETCHT0, PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.ANY",
+ "SampleAfterValue": "100003",
+ "UMask": "0xf"
+ },
+ {
"BriefDescription": "Number of PREFETCHNTA instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "SW_PREFETCH_ACCESS.NTA",
"PublicDescription": "Counts the number of PREFETCHNTA instructions executed.",
@@ -869,6 +975,7 @@
},
{
"BriefDescription": "Number of PREFETCHW instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
"PublicDescription": "Counts the number of PREFETCHW instructions executed.",
@@ -877,6 +984,7 @@
},
{
"BriefDescription": "Number of PREFETCHT0 instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "SW_PREFETCH_ACCESS.T0",
"PublicDescription": "Counts the number of PREFETCHT0 instructions executed.",
@@ -885,6 +993,7 @@
},
{
"BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "SW_PREFETCH_ACCESS.T1_T2",
"PublicDescription": "Counts the number of PREFETCHT1 or PREFETCHT2 instructions executed.",
diff --git a/tools/perf/pmu-events/arch/x86/rocketlake/counter.json b/tools/perf/pmu-events/arch/x86/rocketlake/counter.json
new file mode 100644
index 000000000000..5a350072522a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/rocketlake/counter.json
@@ -0,0 +1,17 @@
+[
+ {
+ "Unit": "core",
+ "CountersNumFixed": "4",
+ "CountersNumGeneric": "8"
+ },
+ {
+ "Unit": "ARB",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "2"
+ },
+ {
+ "Unit": "CLOCK",
+ "CountersNumFixed": 1,
+ "CountersNumGeneric": "0"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/rocketlake/floating-point.json b/tools/perf/pmu-events/arch/x86/rocketlake/floating-point.json
index 85c26c889088..61ddce0c8db6 100644
--- a/tools/perf/pmu-events/arch/x86/rocketlake/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/rocketlake/floating-point.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts all microcode FP assists.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc1",
"EventName": "ASSISTS.FP",
"PublicDescription": "Counts all microcode Floating Point assists.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
"PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
"PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 128-bit packed single and 256-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and packed double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.4_FLOPS",
"PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision and 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point and packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -49,6 +55,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -57,6 +64,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE",
"PublicDescription": "Number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -65,6 +73,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision and 512-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RSQRT14 RCP RCP14 DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.8_FLOPS",
"PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision and 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision and double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RSQRT14 RCP RCP14 DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -73,6 +82,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired; some instructions will count twice as noted below. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR",
"PublicDescription": "Number of SSE/AVX computational scalar single precision and double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -81,6 +91,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -89,6 +100,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
"PublicDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -97,6 +109,7 @@
},
{
"BriefDescription": "Number of any Vector retired FP arithmetic instructions",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.VECTOR",
"SampleAfterValue": "1000003",
diff --git a/tools/perf/pmu-events/arch/x86/rocketlake/frontend.json b/tools/perf/pmu-events/arch/x86/rocketlake/frontend.json
index 2b539a08d2bf..e7c7d4d4152d 100644
--- a/tools/perf/pmu-events/arch/x86/rocketlake/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/rocketlake/frontend.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "Counter": "0,1,2,3",
"EventCode": "0xe6",
"EventName": "BACLEARS.ANY",
"PublicDescription": "Counts the number of times the front-end is resteered when it finds a branch instruction in a fetch line. This occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Stalls caused by changing prefix length of the instruction. [This event is alias to ILD_STALL.LCP]",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "DECODE.LCP",
"PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk. [This event is alias to ILD_STALL.LCP]",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE transitions count.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xab",
@@ -27,6 +30,7 @@
},
{
"BriefDescription": "DSB-to-MITE switch true penalty cycles.",
+ "Counter": "0,1,2,3",
"EventCode": "0xab",
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
"PublicDescription": "Decode Stream Buffer (DSB) is a Uop-cache that holds translations of previously fetched instructions that were decoded by the legacy x86 decode pipeline (MITE). This event counts fetch penalty cycles when a transition occurs from DSB to MITE.",
@@ -35,6 +39,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced DSB miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.ANY_DSB_MISS",
"MSRIndex": "0x3F7",
@@ -46,6 +51,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced a critical DSB miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.DSB_MISS",
"MSRIndex": "0x3F7",
@@ -57,6 +63,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced iTLB true miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.ITLB_MISS",
"MSRIndex": "0x3F7",
@@ -68,6 +75,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.L1I_MISS",
"MSRIndex": "0x3F7",
@@ -79,6 +87,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.L2_MISS",
"MSRIndex": "0x3F7",
@@ -90,6 +99,7 @@
},
{
"BriefDescription": "Retired instructions after front-end starvation of at least 1 cycle",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_1",
"MSRIndex": "0x3F7",
@@ -101,6 +111,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
"MSRIndex": "0x3F7",
@@ -112,6 +123,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
"MSRIndex": "0x3F7",
@@ -123,6 +135,7 @@
},
{
"BriefDescription": "Retired instructions after front-end starvation of at least 2 cycles",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
"MSRIndex": "0x3F7",
@@ -134,6 +147,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
"MSRIndex": "0x3F7",
@@ -145,6 +159,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
"MSRIndex": "0x3F7",
@@ -156,6 +171,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
"MSRIndex": "0x3F7",
@@ -167,6 +183,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
"MSRIndex": "0x3F7",
@@ -178,6 +195,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
"MSRIndex": "0x3F7",
@@ -189,6 +207,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
"MSRIndex": "0x3F7",
@@ -200,6 +219,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
"MSRIndex": "0x3F7",
@@ -211,6 +231,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.STLB_MISS",
"MSRIndex": "0x3F7",
@@ -222,6 +243,7 @@
},
{
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss. [This event is alias to ICACHE_DATA.STALLS]",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE_16B.IFDATA_STALL",
"PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity. [This event is alias to ICACHE_DATA.STALLS]",
@@ -230,6 +252,7 @@
},
{
"BriefDescription": "Instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "ICACHE_64B.IFTAG_HIT",
"PublicDescription": "Counts instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity. Accounts for both cacheable and uncacheable accesses.",
@@ -238,6 +261,7 @@
},
{
"BriefDescription": "Instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "ICACHE_64B.IFTAG_MISS",
"PublicDescription": "Counts instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity. Accounts for both cacheable and uncacheable accesses.",
@@ -246,6 +270,7 @@
},
{
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_TAG.STALLS]",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "ICACHE_64B.IFTAG_STALL",
"PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_TAG.STALLS]",
@@ -254,6 +279,7 @@
},
{
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss. [This event is alias to ICACHE_16B.IFDATA_STALL]",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE_DATA.STALLS",
"PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity. [This event is alias to ICACHE_16B.IFDATA_STALL]",
@@ -262,6 +288,7 @@
},
{
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_64B.IFTAG_STALL]",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "ICACHE_TAG.STALLS",
"PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_64B.IFTAG_STALL]",
@@ -270,6 +297,7 @@
},
{
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.DSB_CYCLES_ANY",
@@ -279,15 +307,17 @@
},
{
"BriefDescription": "Cycles DSB is delivering optimal number of Uops",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"EventCode": "0x79",
"EventName": "IDQ.DSB_CYCLES_OK",
- "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the DSB (Decode Stream Buffer) path. Count includes uops that may 'bypass' the IDQ.",
"SampleAfterValue": "2000003",
"UMask": "0x8"
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.DSB_UOPS",
"PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
@@ -296,6 +326,7 @@
},
{
"BriefDescription": "Cycles MITE is delivering any Uop",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MITE_CYCLES_ANY",
@@ -305,6 +336,7 @@
},
{
"BriefDescription": "Cycles MITE is delivering optimal number of Uops",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"EventCode": "0x79",
"EventName": "IDQ.MITE_CYCLES_OK",
@@ -314,6 +346,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MITE_UOPS",
"PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
@@ -322,6 +355,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to IDQ while MS is busy",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MS_CYCLES_ANY",
@@ -331,6 +365,7 @@
},
{
"BriefDescription": "Number of switches from DSB or MITE to the MS",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x79",
@@ -341,6 +376,7 @@
},
{
"BriefDescription": "Uops delivered to IDQ while MS is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_UOPS",
"PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS). Any instruction over 4 uops will be delivered by the MS. Some instructions such as transcendentals may additionally generate uops from the MS.",
@@ -349,6 +385,7 @@
},
{
"BriefDescription": "Uops not delivered by IDQ when backend of the machine is not stalled",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x9c",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
"PublicDescription": "Counts the number of uops not delivered to by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
@@ -357,6 +394,7 @@
},
{
"BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "5",
"EventCode": "0x9c",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
@@ -366,6 +404,7 @@
},
{
"BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
diff --git a/tools/perf/pmu-events/arch/x86/rocketlake/memory.json b/tools/perf/pmu-events/arch/x86/rocketlake/memory.json
index f84763220549..f73035f44330 100644
--- a/tools/perf/pmu-events/arch/x86/rocketlake/memory.json
+++ b/tools/perf/pmu-events/arch/x86/rocketlake/memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Cycles while L3 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L3_MISS",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one).",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED",
"PublicDescription": "Counts the number of times HLE abort was triggered.",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to unfriendly events (such as interrupts).",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_EVENTS",
"PublicDescription": "Counts the number of times an HLE execution aborted due to unfriendly events (such as interrupts).",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_MEM",
"PublicDescription": "Counts the number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_UNFRIENDLY",
"PublicDescription": "Counts the number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
@@ -49,6 +55,7 @@
},
{
"BriefDescription": "Number of times an HLE execution successfully committed",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.COMMIT",
"PublicDescription": "Counts the number of times HLE commit succeeded.",
@@ -57,6 +64,7 @@
},
{
"BriefDescription": "Number of times an HLE execution started.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc8",
"EventName": "HLE_RETIRED.START",
"PublicDescription": "Counts the number of times we entered an HLE region. Does not count nested transactions.",
@@ -65,6 +73,7 @@
},
{
"BriefDescription": "Number of machine clears due to memory ordering conflicts.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
"PublicDescription": "Counts the number of Machine Clears detected dye to memory ordering. Memory Ordering Machine Clears may apply when a memory read may not conform to the memory ordering rules of the x86 architecture",
@@ -73,6 +82,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
@@ -85,6 +95,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
@@ -97,6 +108,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
@@ -109,6 +121,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
@@ -121,6 +134,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
@@ -133,6 +147,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
@@ -145,6 +160,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
@@ -157,6 +173,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
@@ -169,6 +186,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that was not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -178,6 +196,7 @@
},
{
"BriefDescription": "Counts demand data reads that was not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -187,6 +206,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that was not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -196,6 +216,7 @@
},
{
"BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that was not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L1D_AND_SWPF.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -205,6 +226,7 @@
},
{
"BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that was not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2_DATA_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -214,6 +236,7 @@
},
{
"BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that was not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -223,6 +246,7 @@
},
{
"BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that was not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -232,6 +256,7 @@
},
{
"BriefDescription": "Counts streaming stores that was not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.STREAMING_WR.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -241,6 +266,7 @@
},
{
"BriefDescription": "Counts demand data read requests that miss the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xb0",
"EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
"SampleAfterValue": "100003",
@@ -248,6 +274,7 @@
},
{
"BriefDescription": "Cycles where at least one demand data read request known to have missed the L3 cache is pending.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD",
@@ -257,6 +284,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED",
"PEBS": "1",
@@ -266,6 +294,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_EVENTS",
"PublicDescription": "Counts the number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
@@ -274,6 +303,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_MEM",
"PublicDescription": "Counts the number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).",
@@ -282,6 +312,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_MEMTYPE",
"PublicDescription": "Counts the number of times an RTM execution aborted due to incompatible memory type.",
@@ -290,6 +321,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_UNFRIENDLY",
"PublicDescription": "Counts the number of times an RTM execution aborted due to HLE-unfriendly instructions.",
@@ -298,6 +330,7 @@
},
{
"BriefDescription": "Number of times an RTM execution successfully committed",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.COMMIT",
"PublicDescription": "Counts the number of times RTM commit succeeded.",
@@ -306,6 +339,7 @@
},
{
"BriefDescription": "Number of times an RTM execution started.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.START",
"PublicDescription": "Counts the number of times we entered an RTM region. Does not count nested transactions.",
@@ -314,6 +348,7 @@
},
{
"BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed inside a transactional region",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC2",
"PublicDescription": "Counts Unfriendly TSX abort triggered by a vzeroupper instruction.",
@@ -322,6 +357,7 @@
},
{
"BriefDescription": "Number of times an instruction execution caused the transactional nest count supported to be exceeded",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC3",
"PublicDescription": "Counts Unfriendly TSX abort triggered by a nest count that is too deep.",
@@ -330,6 +366,7 @@
},
{
"BriefDescription": "Speculatively counts the number of TSX aborts due to a data capacity limitation for transactional reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_CAPACITY_READ",
"PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional reads",
@@ -338,6 +375,7 @@
},
{
"BriefDescription": "Speculatively counts the number of TSX aborts due to a data capacity limitation for transactional writes.",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
"PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional writes.",
@@ -346,6 +384,7 @@
},
{
"BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_CONFLICT",
"PublicDescription": "Counts the number of times a TSX line had a cache conflict.",
@@ -354,6 +393,7 @@
},
{
"BriefDescription": "Number of times an HLE transactional execution aborted due to XRELEASE lock not satisfying the address and value requirements in the elision buffer",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
"PublicDescription": "Counts the number of times a TSX Abort was triggered due to release/commit but data and address mismatch.",
@@ -362,6 +402,7 @@
},
{
"BriefDescription": "Number of times an HLE transactional execution aborted due to NoAllocatedElisionBuffer being non-zero.",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
"PublicDescription": "Counts the number of times a TSX Abort was triggered due to commit but Lock Buffer not empty.",
@@ -370,6 +411,7 @@
},
{
"BriefDescription": "Number of times an HLE transactional execution aborted due to an unsupported read alignment from the elision buffer.",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
"PublicDescription": "Counts the number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer.",
@@ -378,6 +420,7 @@
},
{
"BriefDescription": "Number of times a HLE transactional region aborted due to a non XRELEASE prefixed instruction writing to an elided lock in the elision buffer",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
"PublicDescription": "Counts the number of times a TSX Abort was triggered due to a non-release/commit store to lock.",
@@ -386,6 +429,7 @@
},
{
"BriefDescription": "Number of times HLE lock could not be elided due to ElisionBufferAvailable being zero.",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
"PublicDescription": "Counts the number of times we could not allocate Lock Buffer.",
diff --git a/tools/perf/pmu-events/arch/x86/rocketlake/metricgroups.json b/tools/perf/pmu-events/arch/x86/rocketlake/metricgroups.json
index 5452a1448ded..3a88260194d1 100644
--- a/tools/perf/pmu-events/arch/x86/rocketlake/metricgroups.json
+++ b/tools/perf/pmu-events/arch/x86/rocketlake/metricgroups.json
@@ -5,7 +5,20 @@
"BigFootprint": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"BrMispredicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Branches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvBC": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvBO": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvCB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvFB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvIO": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvML": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMP": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMS": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMT": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvOB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvUW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"CacheHits": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "CacheMisses": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"CodeGen": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Compute": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Cor": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
diff --git a/tools/perf/pmu-events/arch/x86/rocketlake/other.json b/tools/perf/pmu-events/arch/x86/rocketlake/other.json
index 4fdc87339555..a96b2a989d3f 100644
--- a/tools/perf/pmu-events/arch/x86/rocketlake/other.json
+++ b/tools/perf/pmu-events/arch/x86/rocketlake/other.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the Non-AVX turbo schedule.",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "CORE_POWER.LVL0_TURBO_LICENSE",
"PublicDescription": "Counts Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX2 turbo schedule.",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "CORE_POWER.LVL1_TURBO_LICENSE",
"PublicDescription": "Counts Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX512 turbo schedule.",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "CORE_POWER.LVL2_TURBO_LICENSE",
"PublicDescription": "Core cycles where the core was running with power-delivery for license level 2 (introduced in Skylake Server microarchitecture). This includes high current AVX 512-bit instructions.",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -34,6 +38,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -43,6 +48,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -52,6 +58,7 @@
},
{
"BriefDescription": "Counts demand data reads that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -61,6 +68,7 @@
},
{
"BriefDescription": "Counts demand data reads that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -70,6 +78,7 @@
},
{
"BriefDescription": "Counts demand data reads that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -79,6 +88,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -88,6 +98,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -97,6 +108,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -106,6 +118,7 @@
},
{
"BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L1D_AND_SWPF.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -115,6 +128,7 @@
},
{
"BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L1D_AND_SWPF.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -124,6 +138,7 @@
},
{
"BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L1D_AND_SWPF.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -133,6 +148,7 @@
},
{
"BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -142,6 +158,7 @@
},
{
"BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2_DATA_RD.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -151,6 +168,7 @@
},
{
"BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2_DATA_RD.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -160,6 +178,7 @@
},
{
"BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -169,6 +188,7 @@
},
{
"BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2_RFO.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -178,6 +198,7 @@
},
{
"BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.HWPF_L2_RFO.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -187,6 +208,7 @@
},
{
"BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -196,6 +218,7 @@
},
{
"BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -205,6 +228,7 @@
},
{
"BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -214,6 +238,7 @@
},
{
"BriefDescription": "Counts streaming stores that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -223,6 +248,7 @@
},
{
"BriefDescription": "Counts streaming stores that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.STREAMING_WR.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -232,6 +258,7 @@
},
{
"BriefDescription": "Counts streaming stores that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.STREAMING_WR.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
diff --git a/tools/perf/pmu-events/arch/x86/rocketlake/pipeline.json b/tools/perf/pmu-events/arch/x86/rocketlake/pipeline.json
index c7313fd4fdf4..4fdf07c7beb7 100644
--- a/tools/perf/pmu-events/arch/x86/rocketlake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/rocketlake/pipeline.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Cycles when divide unit is busy executing divide or square root operations.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0x14",
"EventName": "ARITH.DIVIDER_ACTIVE",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "Number of occurrences where a microcode assist is invoked by hardware.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc1",
"EventName": "ASSISTS.ANY",
"PublicDescription": "Counts the number of occurrences where a microcode assist is invoked by hardware Examples include AD (page Access Dirty), FP and AVX related assists.",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "All branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -26,6 +29,7 @@
},
{
"BriefDescription": "Conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND",
"PEBS": "1",
@@ -35,6 +39,7 @@
},
{
"BriefDescription": "Not taken branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_NTAKEN",
"PEBS": "1",
@@ -44,6 +49,7 @@
},
{
"BriefDescription": "Taken conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_TAKEN",
"PEBS": "1",
@@ -53,6 +59,7 @@
},
{
"BriefDescription": "Far branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"PEBS": "1",
@@ -62,6 +69,7 @@
},
{
"BriefDescription": "Indirect near branch instructions retired (excluding returns)",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.INDIRECT",
"PEBS": "1",
@@ -71,6 +79,7 @@
},
{
"BriefDescription": "Direct and indirect near call instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
"PEBS": "1",
@@ -80,6 +89,7 @@
},
{
"BriefDescription": "Return instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
"PEBS": "1",
@@ -89,6 +99,7 @@
},
{
"BriefDescription": "Taken branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
"PEBS": "1",
@@ -98,6 +109,7 @@
},
{
"BriefDescription": "All mispredicted branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -106,6 +118,7 @@
},
{
"BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND",
"PEBS": "1",
@@ -115,6 +128,7 @@
},
{
"BriefDescription": "Mispredicted non-taken conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_NTAKEN",
"PEBS": "1",
@@ -124,6 +138,7 @@
},
{
"BriefDescription": "number of branch instructions retired that were mispredicted and taken.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_TAKEN",
"PEBS": "1",
@@ -133,6 +148,7 @@
},
{
"BriefDescription": "All miss-predicted indirect branch instructions retired (excluding RETs. TSX aborts is considered indirect branch).",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT",
"PEBS": "1",
@@ -142,6 +158,7 @@
},
{
"BriefDescription": "Mispredicted indirect CALL instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT_CALL",
"PEBS": "1",
@@ -151,6 +168,7 @@
},
{
"BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
"PEBS": "1",
@@ -160,6 +178,7 @@
},
{
"BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.RET",
"PEBS": "1",
@@ -169,6 +188,7 @@
},
{
"BriefDescription": "Cycle counts are evenly distributed between active threads in the Core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xec",
"EventName": "CPU_CLK_UNHALTED.DISTRIBUTED",
"PublicDescription": "This event distributes cycle counts between active hyperthreads, i.e., those in C0. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If all other hyperthreads are inactive (or disabled or do not exist), all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
@@ -177,6 +197,7 @@
},
{
"BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
"PublicDescription": "Counts Core crystal clock cycles when current thread is unhalted and the other thread is halted.",
@@ -185,6 +206,7 @@
},
{
"BriefDescription": "Core crystal clock cycles. Cycle counts are evenly distributed between active threads in the Core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.REF_DISTRIBUTED",
"PublicDescription": "This event distributes Core crystal clock cycle counts between active hyperthreads, i.e., those in C0 sleep-state. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If one thread is active in a core, all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
@@ -193,6 +215,7 @@
},
{
"BriefDescription": "Reference cycles when the core is not in halt state.",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
"SampleAfterValue": "2000003",
@@ -200,6 +223,7 @@
},
{
"BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.REF_XCLK",
"PublicDescription": "Counts core crystal clock cycles when the thread is unhalted.",
@@ -208,6 +232,7 @@
},
{
"BriefDescription": "Core cycles when the thread is not in halt state",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events.",
"SampleAfterValue": "2000003",
@@ -215,6 +240,7 @@
},
{
"BriefDescription": "Thread cycles when thread is not in halt state",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
"PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
@@ -222,6 +248,7 @@
},
{
"BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "8",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
@@ -230,6 +257,7 @@
},
{
"BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
@@ -238,6 +266,7 @@
},
{
"BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "16",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
@@ -246,6 +275,7 @@
},
{
"BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "12",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
@@ -254,6 +284,7 @@
},
{
"BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
@@ -262,6 +293,7 @@
},
{
"BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "20",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
@@ -270,6 +302,7 @@
},
{
"BriefDescription": "Total execution stalls.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "4",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
@@ -278,6 +311,7 @@
},
{
"BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.1_PORTS_UTIL",
"PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
@@ -286,6 +320,7 @@
},
{
"BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.2_PORTS_UTIL",
"PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
@@ -294,6 +329,7 @@
},
{
"BriefDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.3_PORTS_UTIL",
"PublicDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
@@ -302,6 +338,7 @@
},
{
"BriefDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.4_PORTS_UTIL",
"PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.",
@@ -310,6 +347,7 @@
},
{
"BriefDescription": "Cycles where the Store Buffer was full and no loads caused an execution stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "2",
"EventCode": "0xA6",
"EventName": "EXE_ACTIVITY.BOUND_ON_STORES",
@@ -319,6 +357,7 @@
},
{
"BriefDescription": "Stalls caused by changing prefix length of the instruction. [This event is alias to DECODE.LCP]",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.LCP",
"PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk. [This event is alias to DECODE.LCP]",
@@ -327,6 +366,7 @@
},
{
"BriefDescription": "Instruction decoders utilized in a cycle",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "INST_DECODED.DECODERS",
"PublicDescription": "Number of decoders utilized in a cycle when the MITE (legacy decode pipeline) fetches instructions.",
@@ -335,6 +375,7 @@
},
{
"BriefDescription": "Number of instructions retired. Fixed Counter - architectural event",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PEBS": "1",
"PublicDescription": "Counts the number of instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
@@ -343,6 +384,7 @@
},
{
"BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.ANY_P",
"PEBS": "1",
@@ -351,6 +393,7 @@
},
{
"BriefDescription": "Number of all retired NOP instructions.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.NOP",
"PEBS": "1",
@@ -359,6 +402,7 @@
},
{
"BriefDescription": "Precise instruction retired event with a reduced effect of PEBS shadow in IP distribution",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.PREC_DIST",
"PEBS": "1",
"PublicDescription": "A version of INST_RETIRED that allows for a more unbiased distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR) feature to mitigate some bias in how retired instructions get sampled. Use on Fixed Counter 0.",
@@ -367,6 +411,7 @@
},
{
"BriefDescription": "Cycles without actually retired instructions.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.STALL_CYCLES",
@@ -377,6 +422,7 @@
},
{
"BriefDescription": "Cycles the Backend cluster is recovering after a miss-speculation or a Store Buffer or Load Buffer drain stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0x0D",
"EventName": "INT_MISC.ALL_RECOVERY_CYCLES",
@@ -386,6 +432,7 @@
},
{
"BriefDescription": "Clears speculative count",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x0D",
@@ -396,6 +443,7 @@
},
{
"BriefDescription": "Counts cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x0d",
"EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
"PublicDescription": "Cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
@@ -404,6 +452,7 @@
},
{
"BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x0D",
"EventName": "INT_MISC.RECOVERY_CYCLES",
"PublicDescription": "Counts core cycles when the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
@@ -412,6 +461,7 @@
},
{
"BriefDescription": "TMA slots where uops got dropped",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x0d",
"EventName": "INT_MISC.UOP_DROPPING",
"PublicDescription": "Estimated number of Top-down Microarchitecture Analysis slots that got dropped due to non front-end reasons",
@@ -420,6 +470,7 @@
},
{
"BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.NO_SR",
"PublicDescription": "Counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
@@ -428,6 +479,7 @@
},
{
"BriefDescription": "Loads blocked due to overlapping with a preceding store that cannot be forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.STORE_FORWARD",
"PublicDescription": "Counts the number of times where store forwarding was prevented for a load operation. The most common case is a load blocked due to the address of memory access (partially) overlapping with a preceding uncompleted store. Note: See the table of not supported store forwards in the Optimization Guide.",
@@ -436,6 +488,7 @@
},
{
"BriefDescription": "False dependencies due to partial compare on address.",
+ "Counter": "0,1,2,3",
"EventCode": "0x07",
"EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
"PublicDescription": "Counts the number of times a load got blocked due to false dependencies due to partial compare on address.",
@@ -444,6 +497,7 @@
},
{
"BriefDescription": "Counts the number of demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.",
+ "Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "LOAD_HIT_PREFETCH.SWPF",
"PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
@@ -452,6 +506,7 @@
},
{
"BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA8",
"EventName": "LSD.CYCLES_ACTIVE",
@@ -461,6 +516,7 @@
},
{
"BriefDescription": "Cycles optimal number of Uops delivered by the LSD, but did not come from the decoder.",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"EventCode": "0xa8",
"EventName": "LSD.CYCLES_OK",
@@ -470,6 +526,7 @@
},
{
"BriefDescription": "Number of Uops delivered by the LSD.",
+ "Counter": "0,1,2,3",
"EventCode": "0xa8",
"EventName": "LSD.UOPS",
"PublicDescription": "Counts the number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
@@ -478,6 +535,7 @@
},
{
"BriefDescription": "Number of machine clears (nukes) of any type.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xc3",
@@ -488,6 +546,7 @@
},
{
"BriefDescription": "Self-modifying code (SMC) detected.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.SMC",
"PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
@@ -496,6 +555,7 @@
},
{
"BriefDescription": "Increments whenever there is an update to the LBR array.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xcc",
"EventName": "MISC_RETIRED.LBR_INSERTS",
"PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR to be enabled properly.",
@@ -504,6 +564,7 @@
},
{
"BriefDescription": "Number of retired PAUSE instructions. This event is not supported on first SKL and KBL products.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xcc",
"EventName": "MISC_RETIRED.PAUSE_INST",
"PublicDescription": "Counts number of retired PAUSE instructions. This event is not supported on first SKL and KBL products.",
@@ -512,6 +573,7 @@
},
{
"BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa2",
"EventName": "RESOURCE_STALLS.SB",
"PublicDescription": "Counts allocation stall cycles caused by the store buffer (SB) being full. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
@@ -520,6 +582,7 @@
},
{
"BriefDescription": "Counts cycles where the pipeline is stalled due to serializing operations.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa2",
"EventName": "RESOURCE_STALLS.SCOREBOARD",
"SampleAfterValue": "100003",
@@ -527,6 +590,7 @@
},
{
"BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x5e",
"EventName": "RS_EVENTS.EMPTY_CYCLES",
"PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into starvation periods (e.g. branch mispredictions or i-cache misses)",
@@ -535,6 +599,7 @@
},
{
"BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x5E",
@@ -546,6 +611,7 @@
},
{
"BriefDescription": "TMA slots where no uops were being issued due to lack of back-end resources.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa4",
"EventName": "TOPDOWN.BACKEND_BOUND_SLOTS",
"PublicDescription": "Counts the number of Top-down Microarchitecture Analysis (TMA) method's slots where no micro-operations were being issued from front-end to back-end of the machine due to lack of back-end resources.",
@@ -554,6 +620,7 @@
},
{
"BriefDescription": "TMA slots available for an unhalted logical processor. Fixed counter - architectural event",
+ "Counter": "Fixed counter 3",
"EventName": "TOPDOWN.SLOTS",
"PublicDescription": "Number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method (TMA). The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core. Software can use this event as the denominator for the top-level metrics of the TMA method. This architectural event is counted on a designated fixed counter (Fixed Counter 3).",
"SampleAfterValue": "10000003",
@@ -561,6 +628,7 @@
},
{
"BriefDescription": "TMA slots available for an unhalted logical processor. General counter - architectural event",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa4",
"EventName": "TOPDOWN.SLOTS_P",
"PublicDescription": "Counts the number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method. The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core.",
@@ -569,6 +637,7 @@
},
{
"BriefDescription": "Number of uops decoded out of instructions exclusively fetched by decoder 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UOPS_DECODED.DEC0",
"PublicDescription": "Uops exclusively fetched by decoder 0",
@@ -577,6 +646,7 @@
},
{
"BriefDescription": "Number of uops executed on port 0",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa1",
"EventName": "UOPS_DISPATCHED.PORT_0",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 0.",
@@ -585,6 +655,7 @@
},
{
"BriefDescription": "Number of uops executed on port 1",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa1",
"EventName": "UOPS_DISPATCHED.PORT_1",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 1.",
@@ -593,6 +664,7 @@
},
{
"BriefDescription": "Number of uops executed on port 2 and 3",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa1",
"EventName": "UOPS_DISPATCHED.PORT_2_3",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 2 and 3.",
@@ -601,6 +673,7 @@
},
{
"BriefDescription": "Number of uops executed on port 4 and 9",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa1",
"EventName": "UOPS_DISPATCHED.PORT_4_9",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 5 and 9.",
@@ -609,6 +682,7 @@
},
{
"BriefDescription": "Number of uops executed on port 5",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa1",
"EventName": "UOPS_DISPATCHED.PORT_5",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 5.",
@@ -617,6 +691,7 @@
},
{
"BriefDescription": "Number of uops executed on port 6",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa1",
"EventName": "UOPS_DISPATCHED.PORT_6",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 6.",
@@ -625,6 +700,7 @@
},
{
"BriefDescription": "Number of uops executed on port 7 and 8",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa1",
"EventName": "UOPS_DISPATCHED.PORT_7_8",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 7 and 8.",
@@ -633,6 +709,7 @@
},
{
"BriefDescription": "Number of uops executed on the core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE",
"PublicDescription": "Counts the number of uops executed from any thread.",
@@ -641,6 +718,7 @@
},
{
"BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
@@ -650,6 +728,7 @@
},
{
"BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "2",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
@@ -659,6 +738,7 @@
},
{
"BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
@@ -668,6 +748,7 @@
},
{
"BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "4",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
@@ -677,6 +758,7 @@
},
{
"BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_1",
@@ -686,6 +768,7 @@
},
{
"BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "2",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_2",
@@ -695,6 +778,7 @@
},
{
"BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "3",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_3",
@@ -704,6 +788,7 @@
},
{
"BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "4",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_4",
@@ -713,6 +798,7 @@
},
{
"BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.STALL_CYCLES",
@@ -723,6 +809,7 @@
},
{
"BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.THREAD",
"SampleAfterValue": "2000003",
@@ -730,6 +817,7 @@
},
{
"BriefDescription": "Counts the number of x87 uops dispatched.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.X87",
"PublicDescription": "Counts the number of x87 uops executed.",
@@ -738,6 +826,7 @@
},
{
"BriefDescription": "Uops that RAT issues to RS",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x0e",
"EventName": "UOPS_ISSUED.ANY",
"PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
@@ -746,6 +835,7 @@
},
{
"BriefDescription": "Cycles when RAT does not issue Uops to RS for the thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.STALL_CYCLES",
@@ -756,6 +846,7 @@
},
{
"BriefDescription": "Uops inserted at issue-stage in order to preserve upper bits of vector registers.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x0e",
"EventName": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH",
"PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to 'Mixing Intel AVX and Intel SSE Code' section of the Optimization Guide.",
@@ -764,6 +855,7 @@
},
{
"BriefDescription": "Retirement slots used.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.SLOTS",
"PublicDescription": "Counts the retirement slots used each cycle.",
@@ -772,6 +864,7 @@
},
{
"BriefDescription": "Cycles without actually retired uops.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.STALL_CYCLES",
@@ -782,6 +875,7 @@
},
{
"BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "10",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.TOTAL_CYCLES",
diff --git a/tools/perf/pmu-events/arch/x86/rocketlake/rkl-metrics.json b/tools/perf/pmu-events/arch/x86/rocketlake/rkl-metrics.json
index 1dad462e58b1..13474af97786 100644
--- a/tools/perf/pmu-events/arch/x86/rocketlake/rkl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/rocketlake/rkl-metrics.json
@@ -104,7 +104,7 @@
{
"BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists",
"MetricExpr": "34 * ASSISTS.ANY / tma_info_thread_slots",
- "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricGroup": "BvIO;TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_assists",
"MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
"PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: ASSISTS.ANY",
@@ -114,7 +114,7 @@
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
"DefaultMetricgroupName": "TopdownL1",
"MetricExpr": "topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 5 * INT_MISC.CLEARS_COUNT / tma_info_thread_slots",
- "MetricGroup": "Default;TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvOB;Default;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
"MetricThreshold": "tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL1;Default",
@@ -135,7 +135,7 @@
{
"BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions.",
"MetricExpr": "tma_light_operations * BR_INST_RETIRED.ALL_BRANCHES / (tma_retiring * tma_info_thread_slots)",
- "MetricGroup": "Branches;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_branch_instructions",
"MetricThreshold": "tma_branch_instructions > 0.1 & tma_light_operations > 0.6",
"ScaleUnit": "100%"
@@ -143,7 +143,7 @@
{
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
"MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * tma_bad_speculation",
- "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
+ "MetricGroup": "BadSpec;BrMispredicts;BvMP;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
"MetricName": "tma_branch_mispredicts",
"MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
@@ -181,7 +181,7 @@
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(29 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM + 23.5 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
- "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricGroup": "BvMS;DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_contested_accesses",
"MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS_PS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
@@ -201,7 +201,7 @@
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "23.5 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
- "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricGroup": "BvMS;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_data_sharing",
"MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT_PS. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
@@ -219,7 +219,7 @@
{
"BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
"MetricExpr": "ARITH.DIVIDER_ACTIVE / tma_info_thread_clks",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
"MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_ACTIVE",
@@ -250,13 +250,13 @@
"MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_dsb_switches",
"MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
"MetricExpr": "min(7 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_LOAD_MISSES.WALK_ACTIVE, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
- "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
+ "MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
"MetricName": "tma_dtlb_load",
"MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs, tma_info_bottleneck_memory_synchronization",
@@ -265,7 +265,7 @@
{
"BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
"MetricExpr": "(7 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_STORE_MISSES.WALK_ACTIVE) / tma_info_core_core_clks",
- "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
+ "MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
"MetricName": "tma_dtlb_store",
"MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load, tma_info_bottleneck_memory_data_tlbs, tma_info_bottleneck_memory_synchronization",
@@ -274,7 +274,7 @@
{
"BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing",
"MetricExpr": "32.5 * tma_info_system_core_frequency * OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM / tma_info_thread_clks",
- "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
+ "MetricGroup": "BvMS;DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
"MetricName": "tma_false_sharing",
"MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
@@ -283,7 +283,7 @@
{
"BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
"MetricExpr": "L1D_PEND_MISS.FB_FULL / tma_info_thread_clks",
- "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
+ "MetricGroup": "BvMS;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
"MetricName": "tma_fb_full",
"MetricThreshold": "tma_fb_full > 0.3",
"PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
@@ -296,7 +296,7 @@
"MetricName": "tma_fetch_bandwidth",
"MetricThreshold": "tma_fetch_bandwidth > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
{
@@ -338,7 +338,7 @@
},
{
"BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired",
- "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ / (tma_retiring * tma_info_thread_slots)",
+ "MetricExpr": "FP_ARITH_INST_RETIRED.SCALAR / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_scalar",
"MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
@@ -347,7 +347,7 @@
},
{
"BriefDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths",
- "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@ / (tma_retiring * tma_info_thread_slots)",
+ "MetricExpr": "FP_ARITH_INST_RETIRED.VECTOR / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_vector",
"MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
@@ -385,7 +385,7 @@
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
"DefaultMetricgroupName": "TopdownL1",
"MetricExpr": "topdown\\-fe\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) - INT_MISC.UOP_DROPPING / tma_info_thread_slots",
- "MetricGroup": "Default;PGO;TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvFB;BvIO;Default;PGO;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_frontend_bound",
"MetricThreshold": "tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL1;Default",
@@ -405,7 +405,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses",
"MetricExpr": "ICACHE_DATA.STALLS / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_icache_misses",
"MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS",
@@ -462,6 +462,27 @@
},
{
"BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts",
+ "MetricExpr": "tma_info_botlnk_l0_core_bound_likely",
+ "MetricGroup": "Cor;Metric;SMT",
+ "MetricName": "tma_info_botlnk_core_bound_likely",
+ "MetricThreshold": "tma_info_botlnk_core_bound_likely > 0.5"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck.",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + tma_fetch_bandwidth * tma_mite / (tma_mite + tma_dsb + tma_lsd))",
+ "MetricGroup": "DSBmiss;Fed;Scaled_Slots;tma_issueFB",
+ "MetricName": "tma_info_botlnk_dsb_misses",
+ "MetricThreshold": "tma_info_botlnk_dsb_misses > 10"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck.",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches))",
+ "MetricGroup": "Fed;FetchLat;IcMiss;Scaled_Slots;tma_issueFL",
+ "MetricName": "tma_info_botlnk_ic_misses",
+ "MetricThreshold": "tma_info_botlnk_ic_misses > 5"
+ },
+ {
+ "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(100 * (1 - tma_core_bound / tma_ports_utilization if tma_core_bound < tma_ports_utilization else 1) if tma_info_system_smt_2t_utilization > 0.5 else 0)",
"MetricGroup": "Cor;SMT",
@@ -469,13 +490,21 @@
"MetricThreshold": "tma_info_botlnk_l0_core_bound_likely > 0.5"
},
{
+ "BriefDescription": "Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck",
+ "MetricExpr": "100 * (tma_frontend_bound * (tma_fetch_bandwidth / (tma_fetch_bandwidth + tma_fetch_latency)) * (tma_dsb / (tma_dsb + tma_lsd + tma_mite)))",
+ "MetricGroup": "DSB;FetchBW;tma_issueFB",
+ "MetricName": "tma_info_botlnk_l2_dsb_bandwidth",
+ "MetricThreshold": "tma_info_botlnk_l2_dsb_bandwidth > 10",
+ "PublicDescription": "Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp"
+ },
+ {
"BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_lsd + tma_mite))",
"MetricGroup": "DSBmiss;Fed;tma_issueFB",
"MetricName": "tma_info_botlnk_l2_dsb_misses",
"MetricThreshold": "tma_info_botlnk_l2_dsb_misses > 10",
- "PublicDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp"
+ "PublicDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp"
},
{
"BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
@@ -487,39 +516,33 @@
"PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: "
},
{
- "BriefDescription": "Total pipeline cost of \"useful operations\" - the baseline operations not covered by Branching_Overhead nor Irregular_Overhead.",
- "MetricExpr": "100 * (tma_retiring - (BR_INST_RETIRED.ALL_BRANCHES + BR_INST_RETIRED.NEAR_CALL) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
- "MetricGroup": "Ret",
- "MetricName": "tma_info_bottleneck_base_non_br",
- "MetricThreshold": "tma_info_bottleneck_base_non_br > 20"
- },
- {
"BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)",
- "MetricGroup": "BigFootprint;Fed;Frontend;IcMiss;MemoryTLB",
+ "MetricGroup": "BigFootprint;BvBC;Fed;Frontend;IcMiss;MemoryTLB",
"MetricName": "tma_info_bottleneck_big_code",
"MetricThreshold": "tma_info_bottleneck_big_code > 20"
},
{
- "BriefDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls)",
- "MetricExpr": "100 * ((BR_INST_RETIRED.ALL_BRANCHES + BR_INST_RETIRED.NEAR_CALL) / tma_info_thread_slots)",
- "MetricGroup": "Ret",
+ "BriefDescription": "Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA",
+ "MetricExpr": "100 * ((BR_INST_RETIRED.ALL_BRANCHES + 2 * BR_INST_RETIRED.NEAR_CALL + INST_RETIRED.NOP) / tma_info_thread_slots)",
+ "MetricGroup": "BvBO;Ret",
"MetricName": "tma_info_bottleneck_branching_overhead",
- "MetricThreshold": "tma_info_bottleneck_branching_overhead > 5"
+ "MetricThreshold": "tma_info_bottleneck_branching_overhead > 5",
+ "PublicDescription": "Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA. Examples include function calls; loops and alignments. (A lower bound)"
},
{
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
- "MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_hit_latency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
+ "MetricGroup": "BvMB;Mem;MemoryBW;Offcore;tma_issueBW",
"MetricName": "tma_info_bottleneck_cache_memory_bandwidth",
"MetricThreshold": "tma_info_bottleneck_cache_memory_bandwidth > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full"
},
{
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
- "MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l1_hit_latency / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_hit_latency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
+ "MetricGroup": "BvML;Mem;MemoryLat;Offcore;tma_issueLat",
"MetricName": "tma_info_bottleneck_cache_memory_latency",
"MetricThreshold": "tma_info_bottleneck_cache_memory_latency > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks. Related metrics: tma_l3_hit_latency, tma_mem_latency"
@@ -527,23 +550,23 @@
{
"BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation",
"MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_ports_utilization + tma_serializing_operation)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))",
- "MetricGroup": "Cor;tma_issueComp",
+ "MetricGroup": "BvCB;Cor;tma_issueComp",
"MetricName": "tma_info_bottleneck_compute_bound_est",
"MetricThreshold": "tma_info_bottleneck_compute_bound_est > 20",
"PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. Related metrics: "
},
{
- "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks",
+ "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end)",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * (10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts)) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_info_bottleneck_big_code",
- "MetricGroup": "Fed;FetchBW;Frontend",
+ "MetricGroup": "BvFB;Fed;FetchBW;Frontend",
"MetricName": "tma_info_bottleneck_instruction_fetch_bw",
"MetricThreshold": "tma_info_bottleneck_instruction_fetch_bw > 20"
},
{
"BriefDescription": "Total pipeline cost of irregular execution (e.g",
"MetricExpr": "100 * (tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * (10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts)) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + tma_core_bound * RS_EVENTS.EMPTY_CYCLES / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
- "MetricGroup": "Bad;Cor;Ret;tma_issueMS",
+ "MetricGroup": "Bad;BvIO;Cor;Ret;tma_issueMS",
"MetricName": "tma_info_bottleneck_irregular_overhead",
"MetricThreshold": "tma_info_bottleneck_irregular_overhead > 10",
"PublicDescription": "Total pipeline cost of irregular execution (e.g. FP-assists in HPC, Wait time with work imbalance multithreaded workloads, overhead in system services or virtualized environments). Related metrics: tma_microcode_sequencer, tma_ms_switches"
@@ -551,8 +574,8 @@
{
"BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
- "MetricGroup": "Mem;MemoryTLB;Offcore;tma_issueTLB",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_hit_latency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
+ "MetricGroup": "BvMT;Mem;MemoryTLB;Offcore;tma_issueTLB",
"MetricName": "tma_info_bottleneck_memory_data_tlbs",
"MetricThreshold": "tma_info_bottleneck_memory_data_tlbs > 20",
"PublicDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs). Related metrics: tma_dtlb_load, tma_dtlb_store, tma_info_bottleneck_memory_synchronization"
@@ -560,7 +583,7 @@
{
"BriefDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)",
"MetricExpr": "100 * (tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * tma_false_sharing / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))",
- "MetricGroup": "Mem;Offcore;tma_issueTLB",
+ "MetricGroup": "BvMS;Mem;Offcore;tma_issueTLB",
"MetricName": "tma_info_bottleneck_memory_synchronization",
"MetricThreshold": "tma_info_bottleneck_memory_synchronization > 10",
"PublicDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors). Related metrics: tma_dtlb_load, tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs"
@@ -569,18 +592,25 @@
"BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
- "MetricGroup": "Bad;BadSpec;BrMispredicts;tma_issueBM",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts;BvMP;tma_issueBM",
"MetricName": "tma_info_bottleneck_mispredictions",
"MetricThreshold": "tma_info_bottleneck_mispredictions > 20",
"PublicDescription": "Total pipeline cost of Branch Misprediction related bottlenecks. Related metrics: tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost, tma_mispredicts_resteers"
},
{
- "BriefDescription": "Total pipeline cost of remaining bottlenecks (apart from those listed in the Info.Bottlenecks metrics class)",
- "MetricExpr": "100 - (tma_info_bottleneck_big_code + tma_info_bottleneck_instruction_fetch_bw + tma_info_bottleneck_mispredictions + tma_info_bottleneck_cache_memory_bandwidth + tma_info_bottleneck_cache_memory_latency + tma_info_bottleneck_memory_data_tlbs + tma_info_bottleneck_memory_synchronization + tma_info_bottleneck_compute_bound_est + tma_info_bottleneck_irregular_overhead + tma_info_bottleneck_branching_overhead + tma_info_bottleneck_base_non_br)",
- "MetricGroup": "Cor;Offcore",
+ "BriefDescription": "Total pipeline cost of remaining bottlenecks in the back-end",
+ "MetricExpr": "100 - (tma_info_bottleneck_big_code + tma_info_bottleneck_instruction_fetch_bw + tma_info_bottleneck_mispredictions + tma_info_bottleneck_cache_memory_bandwidth + tma_info_bottleneck_cache_memory_latency + tma_info_bottleneck_memory_data_tlbs + tma_info_bottleneck_memory_synchronization + tma_info_bottleneck_compute_bound_est + tma_info_bottleneck_irregular_overhead + tma_info_bottleneck_branching_overhead + tma_info_bottleneck_useful_work)",
+ "MetricGroup": "BvOB;Cor;Offcore",
"MetricName": "tma_info_bottleneck_other_bottlenecks",
"MetricThreshold": "tma_info_bottleneck_other_bottlenecks > 20",
- "PublicDescription": "Total pipeline cost of remaining bottlenecks (apart from those listed in the Info.Bottlenecks metrics class). Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls."
+ "PublicDescription": "Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls."
+ },
+ {
+ "BriefDescription": "Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead.",
+ "MetricExpr": "100 * (tma_retiring - (BR_INST_RETIRED.ALL_BRANCHES + 2 * BR_INST_RETIRED.NEAR_CALL + INST_RETIRED.NOP) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
+ "MetricGroup": "BvUW;Ret",
+ "MetricName": "tma_info_bottleneck_useful_work",
+ "MetricThreshold": "tma_info_bottleneck_useful_work > 20"
},
{
"BriefDescription": "Fraction of branches that are CALL or RET",
@@ -638,7 +668,7 @@
},
{
"BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
- "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@) / (2 * tma_info_core_core_clks)",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + FP_ARITH_INST_RETIRED.VECTOR) / (2 * tma_info_core_core_clks)",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "tma_info_core_fp_arith_utilization",
"PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
@@ -655,7 +685,7 @@
"MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
"MetricName": "tma_info_frontend_dsb_coverage",
"MetricThreshold": "tma_info_frontend_dsb_coverage < 0.7 & tma_info_thread_ipc / 5 > 0.35",
- "PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_inst_mix_iptb, tma_lcp"
+ "PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_inst_mix_iptb, tma_lcp"
},
{
"BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
@@ -721,7 +751,7 @@
},
{
"BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
- "MetricExpr": "INST_RETIRED.ANY / (cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR + FP_ARITH_INST_RETIRED.VECTOR)",
"MetricGroup": "Flops;InsType",
"MetricName": "tma_info_inst_mix_iparith",
"MetricThreshold": "tma_info_inst_mix_iparith < 10",
@@ -816,12 +846,24 @@
"MetricThreshold": "tma_info_inst_mix_ipswpf < 100"
},
{
- "BriefDescription": "Instruction per taken branch",
+ "BriefDescription": "Instructions per taken branch",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
"MetricName": "tma_info_inst_mix_iptb",
"MetricThreshold": "tma_info_inst_mix_iptb < 11",
- "PublicDescription": "Instruction per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_lcp"
+ "PublicDescription": "Instructions per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_lcp"
+ },
+ {
+ "BriefDescription": "\"Bus lock\" per kilo instruction",
+ "MetricExpr": "tma_info_memory_mix_bus_lock_pki",
+ "MetricGroup": "Mem;Metric",
+ "MetricName": "tma_info_memory_bus_lock_pki"
+ },
+ {
+ "BriefDescription": "STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
+ "MetricExpr": "tma_info_memory_tlb_code_stlb_mpki",
+ "MetricGroup": "Fed;MemoryTLB;Metric",
+ "MetricName": "tma_info_memory_code_stlb_mpki"
},
{
"BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
@@ -848,18 +890,30 @@
"MetricName": "tma_info_memory_core_l3_cache_fill_bw_2t"
},
{
+ "BriefDescription": "Average Parallel L2 cache miss data reads",
+ "MetricExpr": "tma_info_memory_latency_data_l2_mlp",
+ "MetricGroup": "Memory_BW;Metric;Offcore",
+ "MetricName": "tma_info_memory_data_l2_mlp"
+ },
+ {
"BriefDescription": "Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)",
"MetricExpr": "1e3 * MEM_LOAD_RETIRED.FB_HIT / INST_RETIRED.ANY",
"MetricGroup": "CacheHits;Mem",
"MetricName": "tma_info_memory_fb_hpki"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
"MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l1d_cache_fill_bw"
},
{
+ "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "tma_info_memory_l1d_cache_fill_bw",
+ "MetricGroup": "Core_Metric;Mem;MemoryBW",
+ "MetricName": "tma_info_memory_l1d_cache_fill_bw_2t"
+ },
+ {
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
"MetricExpr": "1e3 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
"MetricGroup": "CacheHits;Mem",
@@ -872,12 +926,18 @@
"MetricName": "tma_info_memory_l1mpki_load"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
"MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l2_cache_fill_bw"
},
{
+ "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "tma_info_memory_l2_cache_fill_bw",
+ "MetricGroup": "Core_Metric;Mem;MemoryBW",
+ "MetricName": "tma_info_memory_l2_cache_fill_bw_2t"
+ },
+ {
"BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
"MetricExpr": "1e3 * (L2_RQSTS.REFERENCES - L2_RQSTS.MISS) / INST_RETIRED.ANY",
"MetricGroup": "CacheHits;Mem",
@@ -908,18 +968,36 @@
"MetricName": "tma_info_memory_l2mpki_load"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Offcore requests (L2 cache miss) per kilo instruction for demand RFOs",
+ "MetricExpr": "1e3 * L2_RQSTS.RFO_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Offcore",
+ "MetricName": "tma_info_memory_l2mpki_rfo"
+ },
+ {
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
"MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW;Offcore",
"MetricName": "tma_info_memory_l3_cache_access_bw"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "tma_info_memory_l3_cache_access_bw",
+ "MetricGroup": "Core_Metric;Mem;MemoryBW;Offcore",
+ "MetricName": "tma_info_memory_l3_cache_access_bw_2t"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
"MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l3_cache_fill_bw"
},
{
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "tma_info_memory_l3_cache_fill_bw",
+ "MetricGroup": "Core_Metric;Mem;MemoryBW",
+ "MetricName": "tma_info_memory_l3_cache_fill_bw_2t"
+ },
+ {
"BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
"MetricExpr": "1e3 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
"MetricGroup": "Mem",
@@ -933,7 +1011,7 @@
},
{
"BriefDescription": "Average Latency for L2 cache miss demand Loads",
- "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "MetricExpr": "tma_info_memory_load_l2_miss_latency",
"MetricGroup": "Memory_Lat;Offcore",
"MetricName": "tma_info_memory_latency_load_l2_miss_latency"
},
@@ -950,12 +1028,36 @@
"MetricName": "tma_info_memory_latency_load_l3_miss_latency"
},
{
+ "BriefDescription": "Average Latency for L2 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "MetricGroup": "Clocks_Latency;Memory_Lat;Offcore",
+ "MetricName": "tma_info_memory_load_l2_miss_latency"
+ },
+ {
+ "BriefDescription": "Average Parallel L2 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=0x1@",
+ "MetricGroup": "Memory_BW;Metric;Offcore",
+ "MetricName": "tma_info_memory_load_l2_mlp"
+ },
+ {
+ "BriefDescription": "Average Latency for L3 cache miss demand Loads",
+ "MetricExpr": "cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,umask\\=0x0@ / OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
+ "MetricGroup": "Clocks_Latency;Memory_Lat;Offcore",
+ "MetricName": "tma_info_memory_load_l3_miss_latency"
+ },
+ {
"BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
"MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT)",
"MetricGroup": "Mem;MemoryBound;MemoryLat",
"MetricName": "tma_info_memory_load_miss_real_latency"
},
{
+ "BriefDescription": "STLB (2nd level TLB) data load speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
+ "MetricExpr": "tma_info_memory_tlb_load_stlb_mpki",
+ "MetricGroup": "Mem;MemoryTLB;Metric",
+ "MetricName": "tma_info_memory_load_stlb_mpki"
+ },
+ {
"BriefDescription": "\"Bus lock\" per kilo instruction",
"MetricExpr": "1e3 * SQ_MISC.BUS_LOCK / INST_RETIRED.ANY",
"MetricGroup": "Mem",
@@ -963,7 +1065,7 @@
},
{
"BriefDescription": "Un-cacheable retired load per kilo instruction",
- "MetricExpr": "1e3 * MEM_LOAD_MISC_RETIRED.UC / INST_RETIRED.ANY",
+ "MetricExpr": "tma_info_memory_uc_load_pki",
"MetricGroup": "Mem",
"MetricName": "tma_info_memory_mix_uc_load_pki"
},
@@ -975,6 +1077,19 @@
"PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)"
},
{
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "tma_info_memory_tlb_page_walks_utilization",
+ "MetricGroup": "Core_Metric;Mem;MemoryTLB",
+ "MetricName": "tma_info_memory_page_walks_utilization",
+ "MetricThreshold": "tma_info_memory_page_walks_utilization > 0.5"
+ },
+ {
+ "BriefDescription": "STLB (2nd level TLB) data store speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
+ "MetricExpr": "tma_info_memory_tlb_store_stlb_mpki",
+ "MetricGroup": "Mem;MemoryTLB;Metric",
+ "MetricName": "tma_info_memory_store_stlb_mpki"
+ },
+ {
"BriefDescription": "STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
"MetricExpr": "1e3 * ITLB_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
"MetricGroup": "Fed;MemoryTLB",
@@ -1000,12 +1115,36 @@
"MetricName": "tma_info_memory_tlb_store_stlb_mpki"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Un-cacheable retired load per kilo instruction",
+ "MetricExpr": "1e3 * MEM_LOAD_MISC_RETIRED.UC / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;Metric",
+ "MetricName": "tma_info_memory_uc_load_pki"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per core",
"MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@)",
"MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
"MetricName": "tma_info_pipeline_execute"
},
{
+ "BriefDescription": "Average number of uops fetched from DSB per cycle",
+ "MetricExpr": "IDQ.DSB_UOPS / IDQ.DSB_CYCLES_ANY",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "tma_info_pipeline_fetch_dsb"
+ },
+ {
+ "BriefDescription": "Average number of uops fetched from LSD per cycle",
+ "MetricExpr": "LSD.UOPS / LSD.CYCLES_ACTIVE",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "tma_info_pipeline_fetch_lsd"
+ },
+ {
+ "BriefDescription": "Average number of uops fetched from MITE per cycle",
+ "MetricExpr": "IDQ.MITE_UOPS / IDQ.MITE_CYCLES_ANY",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "tma_info_pipeline_fetch_mite"
+ },
+ {
"BriefDescription": "Instructions per a microcode Assist invocation",
"MetricExpr": "INST_RETIRED.ANY / ASSISTS.ANY",
"MetricGroup": "MicroSeq;Pipeline;Ret;Retire",
@@ -1027,13 +1166,13 @@
},
{
"BriefDescription": "Average CPU Utilization (percentage)",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "tma_info_system_cpus_utilized / #num_cpus_online",
"MetricGroup": "HPC;Summary",
"MetricName": "tma_info_system_cpu_utilization"
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "#num_cpus_online * tma_info_system_cpu_utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized"
},
@@ -1171,7 +1310,7 @@
"MetricThreshold": "tma_info_thread_uoppi > 1.05"
},
{
- "BriefDescription": "Instruction per taken branch",
+ "BriefDescription": "Uops per taken branch",
"MetricExpr": "tma_retiring * tma_info_thread_slots / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW",
"MetricName": "tma_info_thread_uptb",
@@ -1180,7 +1319,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
"MetricExpr": "ICACHE_TAG.STALLS / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_itlb_misses",
"MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS",
@@ -1196,10 +1335,19 @@
"ScaleUnit": "100%"
},
{
+ "BriefDescription": "This metric roughly estimates fraction of cycles with demand load accesses that hit the L1 cache",
+ "MetricExpr": "min(2 * (MEM_INST_RETIRED.ALL_LOADS - MEM_LOAD_RETIRED.FB_HIT - MEM_LOAD_RETIRED.L1_MISS) * 20 / 100, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
+ "MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_l1_hit_latency",
+ "MetricThreshold": "tma_l1_hit_latency > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles with demand load accesses that hit the L1 cache. The short latency of the L1 data cache may be exposed in pointer-chasing memory access patterns as an example. Sample with: MEM_LOAD_RETIRED.L1_HIT",
+ "ScaleUnit": "100%"
+ },
+ {
"BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) / (MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + L1D_PEND_MISS.FB_FULL_PERIODS) * ((CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks)",
- "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricGroup": "BvML;CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l2_bound",
"MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT_PS",
@@ -1218,7 +1366,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
"MetricExpr": "9 * tma_info_system_core_frequency * (MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2)) / tma_info_thread_clks",
- "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
+ "MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
"MetricName": "tma_l3_hit_latency",
"MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_info_bottleneck_cache_memory_latency, tma_mem_latency",
@@ -1230,7 +1378,7 @@
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_lcp",
"MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
- "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
"ScaleUnit": "100%"
},
{
@@ -1275,7 +1423,7 @@
"MetricGroup": "Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
"MetricName": "tma_lock_latency",
"MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
- "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS_PS. Related metrics: tma_store_latency",
+ "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS. Related metrics: tma_store_latency",
"ScaleUnit": "100%"
},
{
@@ -1290,7 +1438,7 @@
{
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears",
"MetricExpr": "max(0, tma_bad_speculation - tma_branch_mispredicts)",
- "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
+ "MetricGroup": "BadSpec;BvMS;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
"MetricName": "tma_machine_clears",
"MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
@@ -1300,7 +1448,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_thread_clks",
- "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
"MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_sq_full",
@@ -1309,7 +1457,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM)",
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth",
- "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
+ "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
"MetricName": "tma_mem_latency",
"MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_info_bottleneck_cache_memory_latency, tma_l3_hit_latency",
@@ -1346,7 +1494,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage",
"MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks",
- "MetricGroup": "BadSpec;BrMispredicts;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
+ "MetricGroup": "BadSpec;BrMispredicts;BvMP;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
"MetricName": "tma_mispredicts_resteers",
"MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost, tma_info_bottleneck_mispredictions",
@@ -1390,7 +1538,7 @@
{
"BriefDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions",
"MetricExpr": "tma_light_operations * INST_RETIRED.NOP / (tma_retiring * tma_info_thread_slots)",
- "MetricGroup": "Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
+ "MetricGroup": "BvBO;Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
"MetricName": "tma_nop_instructions",
"MetricThreshold": "tma_nop_instructions > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)",
"PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP",
@@ -1409,7 +1557,7 @@
{
"BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).",
"MetricExpr": "max(tma_branch_mispredicts * (1 - BR_MISP_RETIRED.ALL_BRANCHES / (INT_MISC.CLEARS_COUNT - MACHINE_CLEARS.COUNT)), 0.0001)",
- "MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
+ "MetricGroup": "BrMispredicts;BvIO;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_other_mispredicts",
"MetricThreshold": "tma_other_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
@@ -1417,7 +1565,7 @@
{
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.",
"MetricExpr": "max(tma_machine_clears * (1 - MACHINE_CLEARS.MEMORY_ORDERING / MACHINE_CLEARS.COUNT), 0.0001)",
- "MetricGroup": "Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group",
+ "MetricGroup": "BvIO;Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_other_nukes",
"MetricThreshold": "tma_other_nukes > 0.05 & (tma_machine_clears > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
@@ -1469,7 +1617,7 @@
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricExpr": "(cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ + tma_core_bound * RS_EVENTS.EMPTY_CYCLES) / tma_info_thread_clks * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) / tma_info_thread_clks",
+ "MetricExpr": "cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_0",
"MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
@@ -1497,7 +1645,7 @@
{
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
"MetricExpr": "UOPS_EXECUTED.CYCLES_GE_3 / tma_info_thread_clks",
- "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricGroup": "BvCB;PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_3m",
"MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Sample with: UOPS_EXECUTED.CYCLES_GE_3",
@@ -1507,7 +1655,7 @@
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
"DefaultMetricgroupName": "TopdownL1",
"MetricExpr": "topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * tma_info_thread_slots",
- "MetricGroup": "Default;TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvUW;Default;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_retiring",
"MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL1;Default",
@@ -1517,7 +1665,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations",
"MetricExpr": "RESOURCE_STALLS.SCOREBOARD / tma_info_thread_clks",
- "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO",
+ "MetricGroup": "BvIO;PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO",
"MetricName": "tma_serializing_operation",
"MetricThreshold": "tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: RESOURCE_STALLS.SCOREBOARD. Related metrics: tma_ms_switches",
@@ -1554,7 +1702,7 @@
{
"BriefDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors)",
"MetricExpr": "L1D_PEND_MISS.L2_STALL / tma_info_thread_clks",
- "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
+ "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
"MetricName": "tma_sq_full",
"MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth",
@@ -1582,7 +1730,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses",
"MetricExpr": "(L2_RQSTS.RFO_HIT * 10 * (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) + (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_thread_clks",
- "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
+ "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
"MetricName": "tma_store_latency",
"MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
@@ -1625,7 +1773,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears",
"MetricExpr": "10 * BACLEARS.ANY / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
"MetricName": "tma_unknown_branches",
"MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: BACLEARS.ANY",
diff --git a/tools/perf/pmu-events/arch/x86/rocketlake/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/rocketlake/uncore-interconnect.json
index 8027590f1776..3946d4e01a8c 100644
--- a/tools/perf/pmu-events/arch/x86/rocketlake/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/rocketlake/uncore-interconnect.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Number of entries allocated. Account for Any type: e.g. Snoop, etc.",
+ "Counter": "1",
"EventCode": "0x84",
"EventName": "UNC_ARB_COH_TRK_REQUESTS.ALL",
"PerPkg": "1",
@@ -8,55 +9,68 @@
"Unit": "ARB"
},
{
- "BriefDescription": "Each cycle counts number of any coherent request at memory controller that were issued by any core. This event is not supported on ICL products but is supported on RKL products.",
+ "BriefDescription": "Each cycle counts number of any coherent requests at memory controller that were issued by any core.",
+ "Counter": "0",
"EventCode": "0x85",
"EventName": "UNC_ARB_DAT_OCCUPANCY.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "ARB"
},
{
- "BriefDescription": "Each cycle counts number of coherent reads pending on data return from memory controller that were issued by any core. This event is not supported on ICL products but is supported on RKL products.",
+ "BriefDescription": "Each cycle counts number of coherent reads pending on data return from memory controller that were issued by any core.",
+ "Counter": "0",
"EventCode": "0x85",
"EventName": "UNC_ARB_DAT_OCCUPANCY.RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "ARB"
},
{
- "BriefDescription": "Each cycle count number of 'valid' coherent Data Read entries . Such entry is defined as valid when it is allocated till deallocation. Doesn't include prefetches. This event is not supported on ICL products but is supported on RKL products.",
+ "BriefDescription": "Each cycle counts number of valid coherent Data Read entries. Such entry is defined as valid when it is allocated until deallocation. Does not include prefetches.",
+ "Counter": "0",
"EventCode": "0x80",
"EventName": "UNC_ARB_REQ_TRK_OCCUPANCY.DRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "ARB"
},
{
"BriefDescription": "Number of all coherent Data Read entries. Doesn't include prefetches",
+ "Counter": "1",
"EventCode": "0x81",
"EventName": "UNC_ARB_REQ_TRK_REQUEST.DRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "ARB"
},
{
- "BriefDescription": "Each cycle counts number of all outgoing valid entries in ReqTrk. Such entry is defined as valid from its allocation in ReqTrk till deallocation. Accounts for Coherent and non-coherent traffic. This event is not supported on ICL products but is supported on RKL products.",
+ "BriefDescription": "Each cycle counts number of all outgoing valid entries in ReqTrk. Such entry is defined as valid from its allocation in ReqTrk until deallocation. Accounts for Coherent and non-coherent traffic.",
+ "Counter": "0",
"EventCode": "0x80",
"EventName": "UNC_ARB_TRK_OCCUPANCY.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "ARB"
},
{
- "BriefDescription": "Each cycle count number of 'valid' coherent Data Read entries . Such entry is defined as valid when it is allocated till deallocation. Doesn't include prefetches. This event is not supported on ICL products but is supported on RKL products.",
+ "BriefDescription": "Each cycle counts number of valid coherent Data Read entries. Such entry is defined as valid when it is allocated until deallocation. Does not include prefetches.",
+ "Counter": "0",
"EventCode": "0x80",
"EventName": "UNC_ARB_TRK_OCCUPANCY.RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "ARB"
},
{
"BriefDescription": "Total number of all outgoing entries allocated. Accounts for Coherent and non-coherent traffic.",
+ "Counter": "1",
"EventCode": "0x81",
"EventName": "UNC_ARB_TRK_REQUESTS.ALL",
"PerPkg": "1",
@@ -64,9 +78,11 @@
"Unit": "ARB"
},
{
- "BriefDescription": "Number of all coherent Data Read entries. Doesn't include prefetches. This event is not supported on ICL products but is supported on RKL products.",
+ "BriefDescription": "Counts number of all coherent Data Read entries. Does not include prefetches.",
+ "Counter": "0,1",
"EventCode": "0x81",
"EventName": "UNC_ARB_TRK_REQUESTS.RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "ARB"
diff --git a/tools/perf/pmu-events/arch/x86/rocketlake/uncore-other.json b/tools/perf/pmu-events/arch/x86/rocketlake/uncore-other.json
index c6596ba09195..cc8110ac020c 100644
--- a/tools/perf/pmu-events/arch/x86/rocketlake/uncore-other.json
+++ b/tools/perf/pmu-events/arch/x86/rocketlake/uncore-other.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "UNC_CLOCK.SOCKET",
+ "Counter": "FIXED",
"EventCode": "0xff",
"EventName": "UNC_CLOCK.SOCKET",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/rocketlake/virtual-memory.json b/tools/perf/pmu-events/arch/x86/rocketlake/virtual-memory.json
index b28f62ce1f39..3ff51040f84f 100644
--- a/tools/perf/pmu-events/arch/x86/rocketlake/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/rocketlake/virtual-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Loads that miss the DTLB and hit the STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for a demand load.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (all page sizes) caused by demand data loads. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -26,6 +29,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data load to a 2M/4M page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -34,6 +38,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data load to a 4K page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts completed page walks (4K sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -42,6 +47,7 @@
},
{
"BriefDescription": "Number of page walks outstanding for a demand load in the PMH each cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding for a demand load in the PMH (Page Miss Handler) each cycle.",
@@ -50,6 +56,7 @@
},
{
"BriefDescription": "Stores that miss the DTLB and hit the STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.STLB_HIT",
"PublicDescription": "Counts stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
@@ -58,6 +65,7 @@
},
{
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
@@ -67,6 +75,7 @@
},
{
"BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (all page sizes) caused by demand data stores. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -75,6 +84,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data store to a 2M/4M page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -83,6 +93,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data store to a 4K page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts completed page walks (4K sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -91,6 +102,7 @@
},
{
"BriefDescription": "Number of page walks outstanding for a store in the PMH each cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding for a store in the PMH (Page Miss Handler) each cycle.",
@@ -99,6 +111,7 @@
},
{
"BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.STLB_HIT",
"PublicDescription": "Counts instruction fetch requests that miss the ITLB (Instruction TLB) and hit the STLB (Second-level TLB).",
@@ -107,6 +120,7 @@
},
{
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_ACTIVE",
@@ -116,6 +130,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (all page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
@@ -124,6 +139,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts completed page walks (2M/4M page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
@@ -132,6 +148,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts completed page walks (4K page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
@@ -140,6 +157,7 @@
},
{
"BriefDescription": "Number of page walks outstanding for an outstanding code request in the PMH each cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding for an outstanding code (instruction fetch) request in the PMH (Page Miss Handler) each cycle.",
@@ -148,6 +166,7 @@
},
{
"BriefDescription": "DTLB flush attempts of the thread-specific entries",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "TLB_FLUSH.DTLB_THREAD",
"PublicDescription": "Counts the number of DTLB flush attempts of the thread-specific entries.",
@@ -156,6 +175,7 @@
},
{
"BriefDescription": "STLB flush attempts",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "TLB_FLUSH.STLB_ANY",
"PublicDescription": "Counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, etc.).",
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/cache.json b/tools/perf/pmu-events/arch/x86/sandybridge/cache.json
index 4e5572ee7dfe..b5b1e160eba1 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/cache.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Allocated L1D data cache lines in M state.",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "L1D.ALLOCATED_IN_M",
"SampleAfterValue": "2000003",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Cache lines in M state evicted out of L1D due to Snoop HitM or dirty line replacement.",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "L1D.ALL_M_REPLACEMENT",
"SampleAfterValue": "2000003",
@@ -15,6 +17,7 @@
},
{
"BriefDescription": "L1D data cache lines in M state evicted due to replacement.",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "L1D.EVICTION",
"SampleAfterValue": "2000003",
@@ -22,6 +25,7 @@
},
{
"BriefDescription": "L1D data line replacements.",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "L1D.REPLACEMENT",
"PublicDescription": "This event counts L1D data line replacements. Replacements occur when a new line is brought into the cache, causing eviction of a line loaded earlier.",
@@ -30,6 +34,7 @@
},
{
"BriefDescription": "Cycles when dispatched loads are cancelled due to L1D bank conflicts with other load ports.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xBF",
"EventName": "L1D_BLOCKS.BANK_CONFLICT_CYCLES",
@@ -38,6 +43,7 @@
},
{
"BriefDescription": "Cycles a demand request was blocked due to Fill Buffers unavailability.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.FB_FULL",
@@ -46,6 +52,7 @@
},
{
"BriefDescription": "L1D miss outstanding duration in cycles.",
+ "Counter": "2",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING",
"SampleAfterValue": "2000003",
@@ -53,6 +60,7 @@
},
{
"BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "Counter": "2",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING_CYCLES",
@@ -62,6 +70,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "Counter": "2",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
@@ -70,6 +79,7 @@
},
{
"BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in any state.",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L2_L1D_WB_RQSTS.ALL",
"SampleAfterValue": "200003",
@@ -77,6 +87,7 @@
},
{
"BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in E state.",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L2_L1D_WB_RQSTS.HIT_E",
"SampleAfterValue": "200003",
@@ -84,6 +95,7 @@
},
{
"BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in M state.",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L2_L1D_WB_RQSTS.HIT_M",
"SampleAfterValue": "200003",
@@ -91,6 +103,7 @@
},
{
"BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in S state.",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L2_L1D_WB_RQSTS.HIT_S",
"SampleAfterValue": "200003",
@@ -98,6 +111,7 @@
},
{
"BriefDescription": "Count the number of modified Lines evicted from L1 and missed L2. (Non-rejected WBs from the DCU.).",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L2_L1D_WB_RQSTS.MISS",
"SampleAfterValue": "200003",
@@ -105,6 +119,7 @@
},
{
"BriefDescription": "L2 cache lines filling L2.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.ALL",
"PublicDescription": "This event counts the number of L2 cache lines brought into the L2 cache. Lines are filled into the L2 cache when there was an L2 miss.",
@@ -113,6 +128,7 @@
},
{
"BriefDescription": "L2 cache lines in E state filling L2.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.E",
"SampleAfterValue": "100003",
@@ -120,6 +136,7 @@
},
{
"BriefDescription": "L2 cache lines in I state filling L2.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.I",
"SampleAfterValue": "100003",
@@ -127,6 +144,7 @@
},
{
"BriefDescription": "L2 cache lines in S state filling L2.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.S",
"SampleAfterValue": "100003",
@@ -134,6 +152,7 @@
},
{
"BriefDescription": "Clean L2 cache lines evicted by demand.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.DEMAND_CLEAN",
"SampleAfterValue": "100003",
@@ -141,6 +160,7 @@
},
{
"BriefDescription": "Dirty L2 cache lines evicted by demand.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.DEMAND_DIRTY",
"SampleAfterValue": "100003",
@@ -148,6 +168,7 @@
},
{
"BriefDescription": "Dirty L2 cache lines filling the L2.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.DIRTY_ALL",
"SampleAfterValue": "100003",
@@ -155,6 +176,7 @@
},
{
"BriefDescription": "Clean L2 cache lines evicted by L2 prefetch.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.PF_CLEAN",
"SampleAfterValue": "100003",
@@ -162,6 +184,7 @@
},
{
"BriefDescription": "Dirty L2 cache lines evicted by L2 prefetch.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.PF_DIRTY",
"SampleAfterValue": "100003",
@@ -169,6 +192,7 @@
},
{
"BriefDescription": "L2 code requests.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_CODE_RD",
"SampleAfterValue": "200003",
@@ -176,6 +200,7 @@
},
{
"BriefDescription": "Demand Data Read requests.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
"SampleAfterValue": "200003",
@@ -183,6 +208,7 @@
},
{
"BriefDescription": "Requests from L2 hardware prefetchers.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_PF",
"SampleAfterValue": "200003",
@@ -190,6 +216,7 @@
},
{
"BriefDescription": "RFO requests to L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_RFO",
"SampleAfterValue": "200003",
@@ -197,6 +224,7 @@
},
{
"BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_HIT",
"SampleAfterValue": "200003",
@@ -204,6 +232,7 @@
},
{
"BriefDescription": "L2 cache misses when fetching instructions.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_MISS",
"SampleAfterValue": "200003",
@@ -211,6 +240,7 @@
},
{
"BriefDescription": "Demand Data Read requests that hit L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
"SampleAfterValue": "200003",
@@ -218,6 +248,7 @@
},
{
"BriefDescription": "Requests from the L2 hardware prefetchers that hit L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.PF_HIT",
"SampleAfterValue": "200003",
@@ -225,6 +256,7 @@
},
{
"BriefDescription": "Requests from the L2 hardware prefetchers that miss L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.PF_MISS",
"SampleAfterValue": "200003",
@@ -232,6 +264,7 @@
},
{
"BriefDescription": "RFO requests that hit L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_HIT",
"SampleAfterValue": "200003",
@@ -239,6 +272,7 @@
},
{
"BriefDescription": "RFO requests that miss L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_MISS",
"SampleAfterValue": "200003",
@@ -246,6 +280,7 @@
},
{
"BriefDescription": "RFOs that access cache lines in any state.",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_STORE_LOCK_RQSTS.ALL",
"SampleAfterValue": "200003",
@@ -253,6 +288,7 @@
},
{
"BriefDescription": "RFOs that hit cache lines in E state.",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_STORE_LOCK_RQSTS.HIT_E",
"SampleAfterValue": "200003",
@@ -260,6 +296,7 @@
},
{
"BriefDescription": "RFOs that hit cache lines in M state.",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_STORE_LOCK_RQSTS.HIT_M",
"SampleAfterValue": "200003",
@@ -267,6 +304,7 @@
},
{
"BriefDescription": "RFOs that miss cache lines.",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_STORE_LOCK_RQSTS.MISS",
"SampleAfterValue": "200003",
@@ -274,6 +312,7 @@
},
{
"BriefDescription": "L2 or LLC HW prefetches that access L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.ALL_PF",
"SampleAfterValue": "200003",
@@ -281,6 +320,7 @@
},
{
"BriefDescription": "Transactions accessing L2 pipe.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.ALL_REQUESTS",
"SampleAfterValue": "200003",
@@ -288,6 +328,7 @@
},
{
"BriefDescription": "L2 cache accesses when fetching instructions.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.CODE_RD",
"SampleAfterValue": "200003",
@@ -295,6 +336,7 @@
},
{
"BriefDescription": "Demand Data Read requests that access L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.DEMAND_DATA_RD",
"SampleAfterValue": "200003",
@@ -302,6 +344,7 @@
},
{
"BriefDescription": "L1D writebacks that access L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.L1D_WB",
"SampleAfterValue": "200003",
@@ -309,6 +352,7 @@
},
{
"BriefDescription": "L2 fill requests that access L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.L2_FILL",
"SampleAfterValue": "200003",
@@ -316,6 +360,7 @@
},
{
"BriefDescription": "L2 writebacks that access L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.L2_WB",
"SampleAfterValue": "200003",
@@ -323,6 +368,7 @@
},
{
"BriefDescription": "RFO requests that access L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.RFO",
"SampleAfterValue": "200003",
@@ -330,6 +376,7 @@
},
{
"BriefDescription": "Cycles when L1D is locked.",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
"SampleAfterValue": "2000003",
@@ -337,6 +384,7 @@
},
{
"BriefDescription": "Core-originated cacheable demand requests missed LLC.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.MISS",
"SampleAfterValue": "100003",
@@ -344,6 +392,7 @@
},
{
"BriefDescription": "Core-originated cacheable demand requests that refer to LLC.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
"SampleAfterValue": "100003",
@@ -351,6 +400,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were LLC and cross-core snoop hits in on-pkg core cache. (Precise Event - PEBS).",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT",
"PEBS": "1",
@@ -360,6 +410,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were HitM responses from shared LLC. (Precise Event - PEBS).",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM",
"PEBS": "1",
@@ -369,6 +420,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were LLC hit and cross-core snoop missed in on-pkg core cache. (Precise Event - PEBS).",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS",
"PEBS": "1",
@@ -377,6 +429,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were hits in LLC without snoops required. (Precise Event - PEBS).",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE",
"PEBS": "1",
@@ -385,6 +438,7 @@
},
{
"BriefDescription": "Retired load uops with unknown information as data source in cache serviced the load. (Precise Event - PEBS).",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS",
"PEBS": "1",
@@ -394,6 +448,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready. (Precise Event - PEBS).",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
"PEBS": "1",
@@ -402,6 +457,7 @@
},
{
"BriefDescription": "Retired load uops with L1 cache hits as data sources. (Precise Event - PEBS).",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
"PEBS": "1",
@@ -410,6 +466,7 @@
},
{
"BriefDescription": "Retired load uops with L2 cache hits as data sources. (Precise Event - PEBS).",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
"PEBS": "1",
@@ -418,6 +475,7 @@
},
{
"BriefDescription": "Retired load uops which data sources were data hits in LLC without snoops required. (Precise Event - PEBS).",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.LLC_HIT",
"PEBS": "1",
@@ -427,6 +485,7 @@
},
{
"BriefDescription": "All retired load uops. (Precise Event - PEBS).",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
"PEBS": "1",
@@ -436,6 +495,7 @@
},
{
"BriefDescription": "All retired store uops. (Precise Event - PEBS).",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"PEBS": "1",
@@ -445,6 +505,7 @@
},
{
"BriefDescription": "Retired load uops with locked access. (Precise Event - PEBS).",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"PEBS": "1",
@@ -453,6 +514,7 @@
},
{
"BriefDescription": "Retired load uops that split across a cacheline boundary. (Precise Event - PEBS).",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"PEBS": "1",
@@ -462,6 +524,7 @@
},
{
"BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event - PEBS).",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"PEBS": "1",
@@ -471,6 +534,7 @@
},
{
"BriefDescription": "Retired load uops that miss the STLB. (Precise Event - PEBS).",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
"PEBS": "1",
@@ -479,6 +543,7 @@
},
{
"BriefDescription": "Retired store uops that miss the STLB. (Precise Event - PEBS).",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
"PEBS": "1",
@@ -487,6 +552,7 @@
},
{
"BriefDescription": "Demand and prefetch data reads.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
"SampleAfterValue": "100003",
@@ -494,6 +560,7 @@
},
{
"BriefDescription": "Cacheable and noncacheable code read requests.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
"SampleAfterValue": "100003",
@@ -501,6 +568,7 @@
},
{
"BriefDescription": "Demand Data Read requests sent to uncore.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
"SampleAfterValue": "100003",
@@ -508,6 +576,7 @@
},
{
"BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
"SampleAfterValue": "100003",
@@ -515,6 +584,7 @@
},
{
"BriefDescription": "Cases when offcore requests buffer cannot take more entries for core.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
"SampleAfterValue": "2000003",
@@ -522,6 +592,7 @@
},
{
"BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
"SampleAfterValue": "2000003",
@@ -529,6 +600,7 @@
},
{
"BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
@@ -537,6 +609,7 @@
},
{
"BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
@@ -545,6 +618,7 @@
},
{
"BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
@@ -553,6 +627,7 @@
},
{
"BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
"SampleAfterValue": "2000003",
@@ -560,6 +635,7 @@
},
{
"BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_C6",
@@ -568,6 +644,7 @@
},
{
"BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
"SampleAfterValue": "2000003",
@@ -575,6 +652,7 @@
},
{
"BriefDescription": "Counts demand & prefetch code reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -584,6 +662,7 @@
},
{
"BriefDescription": "Counts demand & prefetch code reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -593,6 +672,7 @@
},
{
"BriefDescription": "Counts demand & prefetch code reads that hit in the LLC and the snoops sent to sibling cores return clean response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -602,6 +682,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -611,6 +692,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads that hit in the LLC.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -620,6 +702,7 @@
},
{
"BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -629,6 +712,7 @@
},
{
"BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -638,6 +722,7 @@
},
{
"BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -647,6 +732,7 @@
},
{
"BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and the snoops sent to sibling cores return clean response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -656,6 +742,7 @@
},
{
"BriefDescription": "Counts all prefetch code reads that hit in the LLC.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -665,6 +752,7 @@
},
{
"BriefDescription": "Counts prefetch code reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -674,6 +762,7 @@
},
{
"BriefDescription": "Counts prefetch code reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -683,6 +772,7 @@
},
{
"BriefDescription": "Counts prefetch code reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -692,6 +782,7 @@
},
{
"BriefDescription": "Counts prefetch code reads that hit in the LLC and the snoops sent to sibling cores return clean response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -701,6 +792,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads that hit in the LLC.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -710,6 +802,7 @@
},
{
"BriefDescription": "Counts prefetch data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -719,6 +812,7 @@
},
{
"BriefDescription": "Counts prefetch data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -728,6 +822,7 @@
},
{
"BriefDescription": "Counts prefetch data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -737,6 +832,7 @@
},
{
"BriefDescription": "Counts prefetch data reads that hit in the LLC and the snoops sent to sibling cores return clean response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -746,6 +842,7 @@
},
{
"BriefDescription": "Counts all prefetch RFOs that hit in the LLC.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -755,6 +852,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -764,6 +862,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -773,6 +872,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -782,6 +882,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs that hit in the LLC and the snoops sent to sibling cores return clean response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -791,6 +892,7 @@
},
{
"BriefDescription": "Counts all data/code/rfo references (demand & prefetch) .",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -800,6 +902,7 @@
},
{
"BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -809,6 +912,7 @@
},
{
"BriefDescription": "Counts data/code/rfo reads (demand & prefetch) that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -818,6 +922,7 @@
},
{
"BriefDescription": "Counts data/code/rfo reads (demand & prefetch) that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -827,6 +932,7 @@
},
{
"BriefDescription": "Counts data/code/rfo reads (demand & prefetch) that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -836,6 +942,7 @@
},
{
"BriefDescription": "Counts data/code/rfo reads (demand & prefetch) that hit in the LLC and the snoops sent to sibling cores return clean response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -845,6 +952,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch prefetch RFOs .",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -854,6 +962,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs that hit in the LLC.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -863,6 +972,7 @@
},
{
"BriefDescription": "Counts demand & prefetch RFOs that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -872,6 +982,7 @@
},
{
"BriefDescription": "Counts demand & prefetch RFOs that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -881,6 +992,7 @@
},
{
"BriefDescription": "Counts demand & prefetch RFOs that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -890,6 +1002,7 @@
},
{
"BriefDescription": "Counts demand & prefetch RFOs that hit in the LLC and the snoops sent to sibling cores return clean response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -899,6 +1012,7 @@
},
{
"BriefDescription": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -908,6 +1022,7 @@
},
{
"BriefDescription": "REQUEST = DATA_INTO_CORE and RESPONSE = ANY_RESPONSE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -917,6 +1032,7 @@
},
{
"BriefDescription": "Counts all demand code reads.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -926,6 +1042,7 @@
},
{
"BriefDescription": "Counts all demand code reads that hit in the LLC.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -935,6 +1052,7 @@
},
{
"BriefDescription": "Counts demand code reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -944,6 +1062,7 @@
},
{
"BriefDescription": "Counts demand code reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -953,6 +1072,7 @@
},
{
"BriefDescription": "Counts demand code reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -962,6 +1082,7 @@
},
{
"BriefDescription": "Counts demand code reads that hit in the LLC and the snoops sent to sibling cores return clean response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -971,6 +1092,7 @@
},
{
"BriefDescription": "Counts all demand data reads .",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -980,6 +1102,7 @@
},
{
"BriefDescription": "Counts all demand data reads that hit in the LLC.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -989,6 +1112,7 @@
},
{
"BriefDescription": "Counts demand data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -998,6 +1122,7 @@
},
{
"BriefDescription": "Counts demand data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1007,6 +1132,7 @@
},
{
"BriefDescription": "Counts demand data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1016,6 +1142,7 @@
},
{
"BriefDescription": "Counts demand data reads that hit in the LLC and the snoops sent to sibling cores return clean response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1025,6 +1152,7 @@
},
{
"BriefDescription": "Counts all demand rfo's .",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1034,6 +1162,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) that hit in the LLC.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1043,6 +1172,7 @@
},
{
"BriefDescription": "Counts demand data writes (RFOs) that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1052,6 +1182,7 @@
},
{
"BriefDescription": "Counts demand data writes (RFOs) that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1061,6 +1192,7 @@
},
{
"BriefDescription": "Counts demand data writes (RFOs) that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1070,6 +1202,7 @@
},
{
"BriefDescription": "Counts demand data writes (RFOs) that hit in the LLC and the snoops sent to sibling cores return clean response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1079,6 +1212,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_RFO and RESPONSE = LLC_HIT_M and SNOOP = HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_M.HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1088,6 +1222,7 @@
},
{
"BriefDescription": "Counts miscellaneous accesses that include port i/o, MMIO and uncacheable memory accesses. It also includes L2 hints sent to LLC to keep a line from being evicted out of the core caches.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1097,6 +1232,7 @@
},
{
"BriefDescription": "Counts L2 hints sent to LLC to keep a line from being evicted out of the core caches.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.LRU_HINTS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1106,6 +1242,7 @@
},
{
"BriefDescription": "Counts miscellaneous accesses that include port i/o, MMIO and uncacheable memory accesses.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.PORTIO_MMIO_UC",
"MSRIndex": "0x1a6,0x1a7",
@@ -1115,6 +1252,7 @@
},
{
"BriefDescription": "REQUEST = PF_RFO and RESPONSE = ANY_RESPONSE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1124,6 +1262,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) code reads that hit in the LLC.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1133,6 +1272,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) code reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1142,6 +1282,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) code reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1151,6 +1292,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) code reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1160,6 +1302,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) code reads that hit in the LLC and the snoops sent to sibling cores return clean response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1169,6 +1312,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) data reads that hit in the LLC.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1178,6 +1322,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1187,6 +1332,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1196,6 +1342,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1205,6 +1352,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoops sent to sibling cores return clean response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1214,6 +1362,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the LLC.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1223,6 +1372,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) RFOs that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1232,6 +1382,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) RFOs that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1241,6 +1392,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) RFOs that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1250,6 +1402,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) RFOs that hit in the LLC and the snoops sent to sibling cores return clean response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1259,6 +1412,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the LLC.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1268,6 +1422,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1277,6 +1432,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1286,6 +1442,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1295,6 +1452,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the LLC and the snoops sent to sibling cores return clean response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1304,6 +1462,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the LLC.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1313,6 +1472,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1322,6 +1482,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1331,6 +1492,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1340,6 +1502,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops sent to sibling cores return clean response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1349,6 +1512,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the LLC.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1358,6 +1522,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) RFOs that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1367,6 +1532,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) RFOs that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1376,6 +1542,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) RFOs that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1385,6 +1552,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to LLC only) RFOs that hit in the LLC and the snoops sent to sibling cores return clean response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1394,6 +1562,7 @@
},
{
"BriefDescription": "REQUEST = PF_LLC_DATA_RD and RESPONSE = ANY_RESPONSE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1403,6 +1572,7 @@
},
{
"BriefDescription": "REQUEST = PF_LLC_IFETCH and RESPONSE = ANY_RESPONSE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L_IFETCH.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1412,6 +1582,7 @@
},
{
"BriefDescription": "Counts requests where the address of an atomic lock instruction spans a cache line boundary or the lock instruction is executed on uncacheable address.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.SPLIT_LOCK_UC_LOCK.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1421,6 +1592,7 @@
},
{
"BriefDescription": "Counts non-temporal stores.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1430,6 +1602,7 @@
},
{
"BriefDescription": "Split locks in SQ.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF4",
"EventName": "SQ_MISC.SPLIT_LOCK",
"SampleAfterValue": "100003",
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/counter.json b/tools/perf/pmu-events/arch/x86/sandybridge/counter.json
new file mode 100644
index 000000000000..35bb154900d7
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/counter.json
@@ -0,0 +1,17 @@
+[
+ {
+ "Unit": "core",
+ "CountersNumFixed": "3",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "ARB",
+ "CountersNumFixed": "1",
+ "CountersNumGeneric": "2"
+ },
+ {
+ "Unit": "CBOX",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "2"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/floating-point.json b/tools/perf/pmu-events/arch/x86/sandybridge/floating-point.json
index 79e8f403c426..8b570829e2e0 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/floating-point.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Cycles with any input/output SSE or FP assist.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.ANY",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Number of SIMD FP assists due to input values.",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.SIMD_INPUT",
"SampleAfterValue": "100003",
@@ -16,6 +18,7 @@
},
{
"BriefDescription": "Number of SIMD FP assists due to Output values.",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.SIMD_OUTPUT",
"SampleAfterValue": "100003",
@@ -23,6 +26,7 @@
},
{
"BriefDescription": "Number of X87 assists due to input value.",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.X87_INPUT",
"SampleAfterValue": "100003",
@@ -30,6 +34,7 @@
},
{
"BriefDescription": "Number of X87 assists due to output value.",
+ "Counter": "0,1,2,3",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.X87_OUTPUT",
"SampleAfterValue": "100003",
@@ -37,6 +42,7 @@
},
{
"BriefDescription": "Number of SSE* or AVX-128 FP Computational packed double-precision uops issued this cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE",
"SampleAfterValue": "2000003",
@@ -44,6 +50,7 @@
},
{
"BriefDescription": "Number of SSE* or AVX-128 FP Computational packed single-precision uops issued this cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_PACKED_SINGLE",
"SampleAfterValue": "2000003",
@@ -51,6 +58,7 @@
},
{
"BriefDescription": "Number of SSE* or AVX-128 FP Computational scalar double-precision uops issued this cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE",
"SampleAfterValue": "2000003",
@@ -58,6 +66,7 @@
},
{
"BriefDescription": "Number of SSE* or AVX-128 FP Computational scalar single-precision uops issued this cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE",
"SampleAfterValue": "2000003",
@@ -65,6 +74,7 @@
},
{
"BriefDescription": "Number of FP Computational Uops Executed this cycle. The number of FADD, FSUB, FCOM, FMULs, integer MULs and IMULs, FDIVs, FPREMs, FSQRTS, integer DIVs, and IDIVs. This event does not distinguish an FADD used in the middle of a transcendental flow from a s.",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.X87",
"SampleAfterValue": "2000003",
@@ -72,6 +82,7 @@
},
{
"BriefDescription": "Number of GSSE memory assist for stores. GSSE microcode assist is being invoked whenever the hardware is unable to properly handle GSSE-256b operations.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "OTHER_ASSISTS.AVX_STORE",
"SampleAfterValue": "100003",
@@ -79,6 +90,7 @@
},
{
"BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "OTHER_ASSISTS.AVX_TO_SSE",
"SampleAfterValue": "100003",
@@ -86,6 +98,7 @@
},
{
"BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "OTHER_ASSISTS.SSE_TO_AVX",
"SampleAfterValue": "100003",
@@ -93,6 +106,7 @@
},
{
"BriefDescription": "Number of AVX-256 Computational FP double precision uops issued this cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "SIMD_FP_256.PACKED_DOUBLE",
"SampleAfterValue": "2000003",
@@ -100,6 +114,7 @@
},
{
"BriefDescription": "Number of GSSE-256 Computational FP single precision uops issued this cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "SIMD_FP_256.PACKED_SINGLE",
"SampleAfterValue": "2000003",
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/frontend.json b/tools/perf/pmu-events/arch/x86/sandybridge/frontend.json
index 700716b42f1a..e95d1005e22f 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/frontend.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "Counter": "0,1,2,3",
"EventCode": "0xE6",
"EventName": "BACLEARS.ANY",
"SampleAfterValue": "100003",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switches.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "DSB2MITE_SWITCHES.COUNT",
"SampleAfterValue": "2000003",
@@ -15,6 +17,7 @@
},
{
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
"PublicDescription": "This event counts the cycles attributed to a switch from the Decoded Stream Buffer (DSB), which holds decoded instructions, to the legacy decode pipeline. It excludes cycles when the back-end cannot accept new micro-ops. The penalty for these switches is potentially several cycles of instruction starvation, where no micro-ops are delivered to the back-end.",
@@ -23,6 +26,7 @@
},
{
"BriefDescription": "Cases of cancelling valid Decode Stream Buffer (DSB) fill not because of exceeding way limit.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAC",
"EventName": "DSB_FILL.ALL_CANCEL",
"SampleAfterValue": "2000003",
@@ -30,6 +34,7 @@
},
{
"BriefDescription": "Cycles when Decode Stream Buffer (DSB) fill encounter more than 3 Decode Stream Buffer (DSB) lines.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAC",
"EventName": "DSB_FILL.EXCEED_DSB_LINES",
"SampleAfterValue": "2000003",
@@ -37,6 +42,7 @@
},
{
"BriefDescription": "Cases of cancelling valid DSB fill not because of exceeding way limit.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAC",
"EventName": "DSB_FILL.OTHER_CANCEL",
"SampleAfterValue": "2000003",
@@ -44,6 +50,7 @@
},
{
"BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.HIT",
"SampleAfterValue": "2000003",
@@ -51,6 +58,7 @@
},
{
"BriefDescription": "Instruction cache, streaming buffer and victim cache misses.",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.MISSES",
"PublicDescription": "This event counts the number of instruction cache, streaming buffer and victim cache misses. Counting includes unchacheable accesses.",
@@ -59,6 +67,7 @@
},
{
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0x79",
"EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
@@ -67,6 +76,7 @@
},
{
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
@@ -75,6 +85,7 @@
},
{
"BriefDescription": "Cycles MITE is delivering 4 Uops.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0x79",
"EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
@@ -83,6 +94,7 @@
},
{
"BriefDescription": "Cycles MITE is delivering any Uop.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
@@ -91,6 +103,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.DSB_CYCLES",
@@ -99,6 +112,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.DSB_UOPS",
"SampleAfterValue": "2000003",
@@ -106,6 +120,7 @@
},
{
"BriefDescription": "Instruction Decode Queue (IDQ) empty cycles.",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.EMPTY",
"SampleAfterValue": "2000003",
@@ -113,6 +128,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MITE_ALL_UOPS",
"SampleAfterValue": "2000003",
@@ -120,6 +136,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MITE_CYCLES",
@@ -128,6 +145,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MITE_UOPS",
"SampleAfterValue": "2000003",
@@ -135,6 +153,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MS_CYCLES",
@@ -144,6 +163,7 @@
},
{
"BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MS_DSB_CYCLES",
@@ -152,6 +172,7 @@
},
{
"BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequencer (MS) is busy.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x79",
@@ -161,6 +182,7 @@
},
{
"BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy.",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_DSB_UOPS",
"SampleAfterValue": "2000003",
@@ -168,6 +190,7 @@
},
{
"BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy.",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_MITE_UOPS",
"SampleAfterValue": "2000003",
@@ -175,6 +198,7 @@
},
{
"BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x79",
@@ -184,6 +208,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy.",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_UOPS",
"SampleAfterValue": "2000003",
@@ -191,6 +216,7 @@
},
{
"BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled .",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
"PublicDescription": "This event counts the number of uops not delivered to the back-end per cycle, per thread, when the back-end was not stalled. In the ideal case 4 uops can be delivered each cycle. The event counts the undelivered uops - so if 3 were delivered in one cycle, the counter would be incremented by 1 for that cycle (4 - 3). If the back-end is stalled, the count for this event is not incremented even when uops were not delivered, because the back-end would not have been able to accept them. This event is used in determining the front-end bound category of the top-down pipeline slots characterization.",
@@ -199,6 +225,7 @@
},
{
"BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
@@ -207,6 +234,7 @@
},
{
"BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
@@ -216,6 +244,7 @@
},
{
"BriefDescription": "Cycles when 1 or more uops were delivered to the by the front end.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_GE_1_UOP_DELIV.CORE",
@@ -225,6 +254,7 @@
},
{
"BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
+ "Counter": "0,1,2,3",
"CounterMask": "3",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
@@ -233,6 +263,7 @@
},
{
"BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
@@ -241,6 +272,7 @@
},
{
"BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/memory.json b/tools/perf/pmu-events/arch/x86/sandybridge/memory.json
index 0a6fc0136f4a..72b79b606c40 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/memory.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
"PublicDescription": "This event counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from memory disambiguation, external snoops, or cross SMT-HW-thread snoop (stores) hitting load buffers. Machine clears can have a significant performance impact if they are happening frequently.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Loads with latency value being above 128.",
+ "Counter": "3",
"EventCode": "0xCD",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
"MSRIndex": "0x3F6",
@@ -19,6 +21,7 @@
},
{
"BriefDescription": "Loads with latency value being above 16.",
+ "Counter": "3",
"EventCode": "0xCD",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
"MSRIndex": "0x3F6",
@@ -29,6 +32,7 @@
},
{
"BriefDescription": "Loads with latency value being above 256.",
+ "Counter": "3",
"EventCode": "0xCD",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
"MSRIndex": "0x3F6",
@@ -39,6 +43,7 @@
},
{
"BriefDescription": "Loads with latency value being above 32.",
+ "Counter": "3",
"EventCode": "0xCD",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
"MSRIndex": "0x3F6",
@@ -49,6 +54,7 @@
},
{
"BriefDescription": "Loads with latency value being above 4 .",
+ "Counter": "3",
"EventCode": "0xCD",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
"MSRIndex": "0x3F6",
@@ -59,6 +65,7 @@
},
{
"BriefDescription": "Loads with latency value being above 512.",
+ "Counter": "3",
"EventCode": "0xCD",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
"MSRIndex": "0x3F6",
@@ -69,6 +76,7 @@
},
{
"BriefDescription": "Loads with latency value being above 64.",
+ "Counter": "3",
"EventCode": "0xCD",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
"MSRIndex": "0x3F6",
@@ -79,6 +87,7 @@
},
{
"BriefDescription": "Loads with latency value being above 8.",
+ "Counter": "3",
"EventCode": "0xCD",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
"MSRIndex": "0x3F6",
@@ -89,6 +98,7 @@
},
{
"BriefDescription": "Sample stores and collect precise store operation via PEBS record. PMC3 only. (Precise Event - PEBS).",
+ "Counter": "3",
"EventCode": "0xCD",
"EventName": "MEM_TRANS_RETIRED.PRECISE_STORE",
"PEBS": "2",
@@ -97,6 +107,7 @@
},
{
"BriefDescription": "Speculative cache line split load uops dispatched to L1 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "MISALIGN_MEM_REF.LOADS",
"SampleAfterValue": "2000003",
@@ -104,6 +115,7 @@
},
{
"BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "MISALIGN_MEM_REF.STORES",
"SampleAfterValue": "2000003",
@@ -111,6 +123,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch code reads that miss the LLC and the data returned from dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -120,6 +133,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads that miss the LLC and the data returned from dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -129,6 +143,7 @@
},
{
"BriefDescription": "Counts all prefetch code reads that miss the LLC and the data returned from dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -138,6 +153,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads that miss the LLC and the data returned from dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -147,6 +163,7 @@
},
{
"BriefDescription": "Counts all prefetch RFOs that miss the LLC and the data returned from dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -156,6 +173,7 @@
},
{
"BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the LLC and the data returned from dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -165,6 +183,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs that miss the LLC and the data returned from dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -174,6 +193,7 @@
},
{
"BriefDescription": "REQUEST = ANY_REQUEST and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_MISS_LOCAL.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -184,6 +204,7 @@
},
{
"BriefDescription": "Counts LLC replacements.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN_SOCKET.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -194,6 +215,7 @@
},
{
"BriefDescription": "REQUEST = DATA_IN_SOCKET and RESPONSE = LLC_MISS_LOCAL and SNOOP = ANY_LLC_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN_SOCKET.LLC_MISS_LOCAL.ANY_LLC_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -203,6 +225,7 @@
},
{
"BriefDescription": "Counts demand code reads that miss the LLC and the data returned from dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -212,6 +235,7 @@
},
{
"BriefDescription": "Counts demand data reads that miss the LLC and the data returned from dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -221,6 +245,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_IFETCH and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_MISS_LOCAL.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -230,6 +255,7 @@
},
{
"BriefDescription": "Counts demand data writes (RFOs) that miss the LLC and the data returned from dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -239,6 +265,7 @@
},
{
"BriefDescription": "REQUEST = PF_DATA_RD and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_MISS_LOCAL.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -248,6 +275,7 @@
},
{
"BriefDescription": "REQUEST = PF_RFO and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_MISS_LOCAL.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -257,6 +285,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) code reads that miss the LLC and the data returned from dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -266,6 +295,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data returned from dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -275,6 +305,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the LLC and the data returned from dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -284,6 +315,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss the LLC and the data returned from dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -293,6 +325,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the LLC and the data returned from dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -302,6 +335,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the LLC and the data returned from dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -311,6 +345,7 @@
},
{
"BriefDescription": "REQUEST = PF_LLC_DATA_RD and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L_DATA_RD.LLC_MISS_LOCAL.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -320,6 +355,7 @@
},
{
"BriefDescription": "REQUEST = PF_LLC_IFETCH and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L_IFETCH.LLC_MISS_LOCAL.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -329,6 +365,7 @@
},
{
"BriefDescription": "Number of any page walk that had a miss in LLC. Does not necessary cause a SUSPEND.",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "PAGE_WALKS.LLC_MISS",
"SampleAfterValue": "100003",
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/metricgroups.json b/tools/perf/pmu-events/arch/x86/sandybridge/metricgroups.json
index a2c27794c0d8..7dc7eb0d3dd3 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/metricgroups.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/metricgroups.json
@@ -5,7 +5,18 @@
"BigFootprint": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"BrMispredicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Branches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvBC": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvCB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvFB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvIO": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvML": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMP": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMS": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMT": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvOB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvUW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"CacheHits": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "CacheMisses": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Compute": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Cor": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"DSB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/other.json b/tools/perf/pmu-events/arch/x86/sandybridge/other.json
index 9f96121baef8..42692fa24b6c 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/other.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/other.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Unhalted core cycles when the thread is in ring 0.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "CPL_CYCLES.RING0",
"SampleAfterValue": "2000003",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Number of intervals between processor halts while thread is in ring 0.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x5C",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "CPL_CYCLES.RING123",
"SampleAfterValue": "2000003",
@@ -24,6 +27,7 @@
},
{
"BriefDescription": "Hardware Prefetch requests that miss the L1D cache. This accounts for both L1 streamer and IP-based (IPP) HW prefetchers. A request is being counted each time it access the cache & miss it, including if a block is applicable or if hit the Fill Buffer for .",
+ "Counter": "0,1,2,3",
"EventCode": "0x4E",
"EventName": "HW_PRE_REQ.DL1_MISS",
"SampleAfterValue": "2000003",
@@ -31,6 +35,7 @@
},
{
"BriefDescription": "Valid instructions written to IQ per cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "INSTS_WRITTEN_TO_IQ.INSTS",
"SampleAfterValue": "2000003",
@@ -38,6 +43,7 @@
},
{
"BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock.",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "LOCK_CYCLES.SPLIT_LOCK_UC_LOCK_DURATION",
"SampleAfterValue": "2000003",
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json b/tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json
index ecaf94ccc9c7..f2198bab5586 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "This event counts executed load operations with all the following traits: 1. addressing of the format [base + offset], 2. the offset is between 1 and 2047, 3. the address specified in the base register is in one page and the address [base+offset] is in an.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "AGU_BYPASS_CANCEL.COUNT",
"SampleAfterValue": "100003",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Divide operations executed.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x14",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "Cycles when divider is busy executing divide operations.",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "ARITH.FPU_DIV_ACTIVE",
"SampleAfterValue": "2000003",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Speculative and retired branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_BRANCHES",
"SampleAfterValue": "200003",
@@ -32,6 +36,7 @@
},
{
"BriefDescription": "Speculative and retired macro-conditional branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
"SampleAfterValue": "200003",
@@ -39,6 +44,7 @@
},
{
"BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
"SampleAfterValue": "200003",
@@ -46,6 +52,7 @@
},
{
"BriefDescription": "Speculative and retired direct near calls.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
"SampleAfterValue": "200003",
@@ -53,6 +60,7 @@
},
{
"BriefDescription": "Speculative and retired indirect branches excluding calls and returns.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
"SampleAfterValue": "200003",
@@ -60,6 +68,7 @@
},
{
"BriefDescription": "Speculative and retired indirect return branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
"SampleAfterValue": "200003",
@@ -67,6 +76,7 @@
},
{
"BriefDescription": "Not taken macro-conditional branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL",
"SampleAfterValue": "200003",
@@ -74,6 +84,7 @@
},
{
"BriefDescription": "Taken speculative and retired macro-conditional branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL",
"SampleAfterValue": "200003",
@@ -81,6 +92,7 @@
},
{
"BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP",
"SampleAfterValue": "200003",
@@ -88,6 +100,7 @@
},
{
"BriefDescription": "Taken speculative and retired direct near calls.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
"SampleAfterValue": "200003",
@@ -95,6 +108,7 @@
},
{
"BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
"SampleAfterValue": "200003",
@@ -102,6 +116,7 @@
},
{
"BriefDescription": "Taken speculative and retired indirect calls.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
"SampleAfterValue": "200003",
@@ -109,6 +124,7 @@
},
{
"BriefDescription": "Taken speculative and retired indirect branches with return mnemonic.",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
"SampleAfterValue": "200003",
@@ -116,12 +132,14 @@
},
{
"BriefDescription": "All (macro) branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"SampleAfterValue": "400009"
},
{
"BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS).",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
"PEBS": "2",
@@ -130,6 +148,7 @@
},
{
"BriefDescription": "Conditional branch instructions retired. (Precise Event - PEBS).",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
"PEBS": "1",
@@ -138,6 +157,7 @@
},
{
"BriefDescription": "Far branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"SampleAfterValue": "100007",
@@ -145,6 +165,7 @@
},
{
"BriefDescription": "Direct and indirect near call instructions retired. (Precise Event - PEBS).",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
"PEBS": "1",
@@ -153,6 +174,7 @@
},
{
"BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3). (Precise Event - PEBS).",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
"PEBS": "1",
@@ -161,6 +183,7 @@
},
{
"BriefDescription": "Return instructions retired. (Precise Event - PEBS).",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
"PEBS": "1",
@@ -169,6 +192,7 @@
},
{
"BriefDescription": "Taken branch instructions retired. (Precise Event - PEBS).",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
"PEBS": "1",
@@ -177,6 +201,7 @@
},
{
"BriefDescription": "Not taken branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NOT_TAKEN",
"SampleAfterValue": "400009",
@@ -184,6 +209,7 @@
},
{
"BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.ALL_BRANCHES",
"SampleAfterValue": "200003",
@@ -191,6 +217,7 @@
},
{
"BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
"SampleAfterValue": "200003",
@@ -198,6 +225,7 @@
},
{
"BriefDescription": "Speculative and retired mispredicted direct near calls.",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.ALL_DIRECT_NEAR_CALL",
"SampleAfterValue": "200003",
@@ -205,6 +233,7 @@
},
{
"BriefDescription": "Mispredicted indirect branches excluding calls and returns.",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
"SampleAfterValue": "200003",
@@ -212,6 +241,7 @@
},
{
"BriefDescription": "Speculative mispredicted indirect branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.INDIRECT",
"PublicDescription": "Counts speculatively miss-predicted indirect branches at execution time. Counts for indirect near CALL or JMP instructions (RET excluded).",
@@ -220,6 +250,7 @@
},
{
"BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
"SampleAfterValue": "200003",
@@ -227,6 +258,7 @@
},
{
"BriefDescription": "Taken speculative and retired mispredicted macro conditional branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
"SampleAfterValue": "200003",
@@ -234,6 +266,7 @@
},
{
"BriefDescription": "Taken speculative and retired mispredicted direct near calls.",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_DIRECT_NEAR_CALL",
"SampleAfterValue": "200003",
@@ -241,6 +274,7 @@
},
{
"BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns.",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
"SampleAfterValue": "200003",
@@ -248,6 +282,7 @@
},
{
"BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
"SampleAfterValue": "200003",
@@ -255,6 +290,7 @@
},
{
"BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic.",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
"SampleAfterValue": "200003",
@@ -262,12 +298,14 @@
},
{
"BriefDescription": "All mispredicted macro branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"SampleAfterValue": "400009"
},
{
"BriefDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS).",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
"PEBS": "2",
@@ -276,6 +314,7 @@
},
{
"BriefDescription": "Mispredicted conditional branch instructions retired. (Precise Event - PEBS).",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.CONDITIONAL",
"PEBS": "1",
@@ -284,6 +323,7 @@
},
{
"BriefDescription": "Direct and indirect mispredicted near call instructions retired. (Precise Event - PEBS).",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.NEAR_CALL",
"PEBS": "1",
@@ -292,6 +332,7 @@
},
{
"BriefDescription": "Mispredicted not taken branch instructions retired.(Precise Event - PEBS).",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.NOT_TAKEN",
"PEBS": "1",
@@ -300,6 +341,7 @@
},
{
"BriefDescription": "Mispredicted taken branch instructions retired. (Precise Event - PEBS).",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.TAKEN",
"PEBS": "1",
@@ -308,6 +350,7 @@
},
{
"BriefDescription": "Count XClk pulses when this thread is unhalted and the other is halted.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "2000003",
@@ -315,6 +358,7 @@
},
{
"BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
"SampleAfterValue": "2000003",
@@ -323,6 +367,7 @@
{
"AnyThread": "1",
"BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
"SampleAfterValue": "2000003",
@@ -330,6 +375,7 @@
},
{
"BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "2000003",
@@ -337,6 +383,7 @@
},
{
"BriefDescription": "Reference cycles when the core is not in halt state.",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"SampleAfterValue": "2000003",
@@ -344,6 +391,7 @@
},
{
"BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.REF_XCLK",
"SampleAfterValue": "2000003",
@@ -352,6 +400,7 @@
{
"AnyThread": "1",
"BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
"SampleAfterValue": "2000003",
@@ -359,6 +408,7 @@
},
{
"BriefDescription": "Core cycles when the thread is not in halt state.",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"SampleAfterValue": "2000003",
@@ -367,12 +417,14 @@
{
"AnyThread": "1",
"BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
"SampleAfterValue": "2000003",
"UMask": "0x2"
},
{
"BriefDescription": "Thread cycles when thread is not in halt state.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
"SampleAfterValue": "2000003"
@@ -380,12 +432,14 @@
{
"AnyThread": "1",
"BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
"SampleAfterValue": "2000003"
},
{
"BriefDescription": "Each cycle there was a miss-pending demand load this thread, increment by 1. Note this is in DCU and connected to Umask 1. Miss Pending demand load should be deduced by OR-ing increment bits of DCACHE_MISS_PEND.PENDING.",
+ "Counter": "2",
"CounterMask": "2",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
@@ -394,6 +448,7 @@
},
{
"BriefDescription": "Each cycle there was a MLC-miss pending demand load this thread (i.e. Non-completed valid SQ entry allocated for demand load and waiting for Uncore), increment by 1. Note this is in MLC and connected to Umask 0.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
@@ -402,6 +457,7 @@
},
{
"BriefDescription": "Each cycle there was no dispatch for this thread, increment by 1. Note this is connect to Umask 2. No dispatch can be deduced from the UOPS_EXECUTED event.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_NO_DISPATCH",
@@ -410,6 +466,7 @@
},
{
"BriefDescription": "Each cycle there was a miss-pending demand load this thread and no uops dispatched, increment by 1. Note this is in DCU and connected to Umask 1 and 2. Miss Pending demand load should be deduced by OR-ing increment bits of DCACHE_MISS_PEND.PENDING.",
+ "Counter": "2",
"CounterMask": "6",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
@@ -418,6 +475,7 @@
},
{
"BriefDescription": "Each cycle there was a MLC-miss pending demand load and no uops dispatched on this thread (i.e. Non-completed valid SQ entry allocated for demand load and waiting for Uncore), increment by 1. Note this is in MLC and connected to Umask 0 and 2.",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
@@ -426,6 +484,7 @@
},
{
"BriefDescription": "Stall cycles because IQ is full.",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.IQ_FULL",
"SampleAfterValue": "2000003",
@@ -433,6 +492,7 @@
},
{
"BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.LCP",
"SampleAfterValue": "2000003",
@@ -440,6 +500,7 @@
},
{
"BriefDescription": "Instructions retired from execution.",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers.",
"SampleAfterValue": "2000003",
@@ -447,12 +508,14 @@
},
{
"BriefDescription": "Number of instructions retired. General Counter - architectural event.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.ANY_P",
"SampleAfterValue": "2000003"
},
{
"BriefDescription": "Instructions retired. (Precise Event - PEBS).",
+ "Counter": "1",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.PREC_DIST",
"PEBS": "2",
@@ -461,6 +524,7 @@
},
{
"BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread.",
+ "Counter": "0,1,2,3",
"EventCode": "0x0D",
"EventName": "INT_MISC.RAT_STALL_CYCLES",
"SampleAfterValue": "2000003",
@@ -468,6 +532,7 @@
},
{
"BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x0D",
"EventName": "INT_MISC.RECOVERY_CYCLES",
@@ -477,6 +542,7 @@
{
"AnyThread": "1",
"BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x0D",
"EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
@@ -485,6 +551,7 @@
},
{
"BriefDescription": "Number of occurrences waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x0D",
@@ -494,6 +561,7 @@
},
{
"BriefDescription": "Number of cases where any load ends up with a valid block-code written to the load buffer (including blocks due to Memory Order Buffer (MOB), Data Cache Unit (DCU), TLB, but load has no DCU miss).",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.ALL_BLOCK",
"SampleAfterValue": "100003",
@@ -501,6 +569,7 @@
},
{
"BriefDescription": "Loads delayed due to SB blocks, preceding store operations with known addresses but unknown data.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.DATA_UNKNOWN",
"SampleAfterValue": "100003",
@@ -508,6 +577,7 @@
},
{
"BriefDescription": "This event counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.NO_SR",
"SampleAfterValue": "100003",
@@ -515,6 +585,7 @@
},
{
"BriefDescription": "Cases when loads get true Block-on-Store blocking code preventing store forwarding.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.STORE_FORWARD",
"PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceding smaller uncompleted store. See the table of not supported store forwards in the Intel(R) 64 and IA-32 Architectures Optimization Reference Manual. The penalty for blocked store forwarding is that the load must wait for the store to complete before it can be issued.",
@@ -523,6 +594,7 @@
},
{
"BriefDescription": "False dependencies in MOB due to partial compare.",
+ "Counter": "0,1,2,3",
"EventCode": "0x07",
"EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
"PublicDescription": "Aliasing occurs when a load is issued after a store and their memory addresses are offset by 4K. This event counts the number of loads that aliased with a preceding store, resulting in an extended address check in the pipeline. The enhanced address check typically has a performance penalty of 5 cycles.",
@@ -531,6 +603,7 @@
},
{
"BriefDescription": "This event counts the number of times that load operations are temporarily blocked because of older stores, with addresses that are not yet known. A load operation may incur more than one block of this type.",
+ "Counter": "0,1,2,3",
"EventCode": "0x07",
"EventName": "LD_BLOCKS_PARTIAL.ALL_STA_BLOCK",
"SampleAfterValue": "100003",
@@ -538,6 +611,7 @@
},
{
"BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch.",
+ "Counter": "0,1,2,3",
"EventCode": "0x4C",
"EventName": "LOAD_HIT_PRE.HW_PF",
"SampleAfterValue": "100003",
@@ -545,6 +619,7 @@
},
{
"BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch.",
+ "Counter": "0,1,2,3",
"EventCode": "0x4C",
"EventName": "LOAD_HIT_PRE.SW_PF",
"SampleAfterValue": "100003",
@@ -552,6 +627,7 @@
},
{
"BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xA8",
"EventName": "LSD.CYCLES_4_UOPS",
@@ -560,6 +636,7 @@
},
{
"BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA8",
"EventName": "LSD.CYCLES_ACTIVE",
@@ -568,6 +645,7 @@
},
{
"BriefDescription": "Number of Uops delivered by the LSD.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA8",
"EventName": "LSD.UOPS",
"SampleAfterValue": "2000003",
@@ -575,6 +653,7 @@
},
{
"BriefDescription": "Number of machine clears (nukes) of any type.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xc3",
@@ -584,6 +663,7 @@
},
{
"BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.MASKMOV",
"PublicDescription": "Maskmov false fault - counts number of time ucode passes through Maskmov flow due to instruction's mask being 0 while the flow was completed without raising a fault.",
@@ -592,6 +672,7 @@
},
{
"BriefDescription": "Self-modifying code (SMC) detected.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.SMC",
"PublicDescription": "This event is incremented when self-modifying code (SMC) is detected, which causes a machine clear. Machine clears can have a significant performance impact if they are happening frequently.",
@@ -600,6 +681,7 @@
},
{
"BriefDescription": "Retired instructions experiencing ITLB misses.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "OTHER_ASSISTS.ITLB_MISS_RETIRED",
"SampleAfterValue": "100003",
@@ -607,6 +689,7 @@
},
{
"BriefDescription": "Increments the number of flags-merge uops in flight each cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x59",
"EventName": "PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP",
"SampleAfterValue": "2000003",
@@ -614,6 +697,7 @@
},
{
"BriefDescription": "Performance sensitive flags-merging uops added by Sandy Bridge u-arch.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x59",
"EventName": "PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP_CYCLES",
@@ -623,6 +707,7 @@
},
{
"BriefDescription": "Multiply packed/scalar single precision uops allocated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x59",
"EventName": "PARTIAL_RAT_STALLS.MUL_SINGLE_UOP",
"SampleAfterValue": "2000003",
@@ -630,6 +715,7 @@
},
{
"BriefDescription": "Cycles with at least one slow LEA uop being allocated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x59",
"EventName": "PARTIAL_RAT_STALLS.SLOW_LEA_WINDOW",
"PublicDescription": "This event counts the number of cycles with at least one slow LEA uop being allocated. A uop is generally considered as slow LEA if it has three sources (for example, two sources and immediate) regardless of whether it is a result of LEA instruction or not. Examples of the slow LEA uop are or uops with base, index, and offset source operands using base and index reqisters, where base is EBR/RBP/R13, using RIP relative or 16-bit addressing modes. See the Intel(R) 64 and IA-32 Architectures Optimization Reference Manual for more details about slow LEA instructions.",
@@ -638,6 +724,7 @@
},
{
"BriefDescription": "Resource-related stall cycles.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.ANY",
"SampleAfterValue": "2000003",
@@ -645,6 +732,7 @@
},
{
"BriefDescription": "Counts the cycles of stall due to lack of load buffers.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.LB",
"SampleAfterValue": "2000003",
@@ -652,6 +740,7 @@
},
{
"BriefDescription": "Resource stalls due to load or store buffers all being in use.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.LB_SB",
"SampleAfterValue": "2000003",
@@ -659,6 +748,7 @@
},
{
"BriefDescription": "Resource stalls due to memory buffers or Reservation Station (RS) being fully utilized.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.MEM_RS",
"SampleAfterValue": "2000003",
@@ -666,6 +756,7 @@
},
{
"BriefDescription": "Resource stalls due to Rob being full, FCSW, MXCSR and OTHER.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.OOO_RSRC",
"SampleAfterValue": "2000003",
@@ -673,6 +764,7 @@
},
{
"BriefDescription": "Cycles stalled due to re-order buffer full.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.ROB",
"SampleAfterValue": "2000003",
@@ -680,6 +772,7 @@
},
{
"BriefDescription": "Cycles stalled due to no eligible RS entry available.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.RS",
"SampleAfterValue": "2000003",
@@ -687,6 +780,7 @@
},
{
"BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.SB",
"SampleAfterValue": "2000003",
@@ -694,6 +788,7 @@
},
{
"BriefDescription": "Cycles with either free list is empty.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5B",
"EventName": "RESOURCE_STALLS2.ALL_FL_EMPTY",
"SampleAfterValue": "2000003",
@@ -701,6 +796,7 @@
},
{
"BriefDescription": "Resource stalls2 control structures full for physical registers.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5B",
"EventName": "RESOURCE_STALLS2.ALL_PRF_CONTROL",
"SampleAfterValue": "2000003",
@@ -708,6 +804,7 @@
},
{
"BriefDescription": "Cycles when Allocator is stalled if BOB is full and new branch needs it.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5B",
"EventName": "RESOURCE_STALLS2.BOB_FULL",
"SampleAfterValue": "2000003",
@@ -715,6 +812,7 @@
},
{
"BriefDescription": "Resource stalls out of order resources full.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5B",
"EventName": "RESOURCE_STALLS2.OOO_RSRC",
"SampleAfterValue": "2000003",
@@ -722,6 +820,7 @@
},
{
"BriefDescription": "Count cases of saving new LBR.",
+ "Counter": "0,1,2,3",
"EventCode": "0xCC",
"EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
"SampleAfterValue": "2000003",
@@ -729,6 +828,7 @@
},
{
"BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5E",
"EventName": "RS_EVENTS.EMPTY_CYCLES",
"SampleAfterValue": "2000003",
@@ -736,6 +836,7 @@
},
{
"BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x5E",
@@ -746,6 +847,7 @@
},
{
"BriefDescription": "Uops dispatched from any thread.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_DISPATCHED.CORE",
"SampleAfterValue": "2000003",
@@ -753,6 +855,7 @@
},
{
"BriefDescription": "Uops dispatched per thread.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_DISPATCHED.THREAD",
"SampleAfterValue": "2000003",
@@ -760,6 +863,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are dispatched to port 0.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_0",
"SampleAfterValue": "2000003",
@@ -768,6 +872,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are dispatched to port 0.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_0_CORE",
"SampleAfterValue": "2000003",
@@ -775,6 +880,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are dispatched to port 1.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_1",
"SampleAfterValue": "2000003",
@@ -783,6 +889,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are dispatched to port 1.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_1_CORE",
"SampleAfterValue": "2000003",
@@ -790,6 +897,7 @@
},
{
"BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 2.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_2",
"SampleAfterValue": "2000003",
@@ -798,6 +906,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when load or STA uops are dispatched to port 2.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_2_CORE",
"SampleAfterValue": "2000003",
@@ -805,6 +914,7 @@
},
{
"BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 3.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_3",
"SampleAfterValue": "2000003",
@@ -813,6 +923,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when load or STA uops are dispatched to port 3.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_3_CORE",
"SampleAfterValue": "2000003",
@@ -820,6 +931,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are dispatched to port 4.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_4",
"SampleAfterValue": "2000003",
@@ -828,6 +940,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are dispatched to port 4.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_4_CORE",
"SampleAfterValue": "2000003",
@@ -835,6 +948,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are dispatched to port 5.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_5",
"SampleAfterValue": "2000003",
@@ -843,6 +957,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles per core when uops are dispatched to port 5.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_5_CORE",
"SampleAfterValue": "2000003",
@@ -850,6 +965,7 @@
},
{
"BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
@@ -858,6 +974,7 @@
},
{
"BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
@@ -866,6 +983,7 @@
},
{
"BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
@@ -874,6 +992,7 @@
},
{
"BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
@@ -882,6 +1001,7 @@
},
{
"BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
"Invert": "1",
@@ -890,6 +1010,7 @@
},
{
"BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS).",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.ANY",
"PublicDescription": "This event counts the number of Uops issued by the front-end of the pipeilne to the back-end.",
@@ -899,6 +1020,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
@@ -908,6 +1030,7 @@
},
{
"BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.STALL_CYCLES",
@@ -917,6 +1040,7 @@
},
{
"BriefDescription": "Actually retired uops. (Precise Event - PEBS).",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.ALL",
"PEBS": "1",
@@ -926,6 +1050,7 @@
},
{
"BriefDescription": "Cycles without actually retired uops.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.CORE_STALL_CYCLES",
@@ -935,6 +1060,7 @@
},
{
"BriefDescription": "Retirement slots used. (Precise Event - PEBS).",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.RETIRE_SLOTS",
"PEBS": "1",
@@ -944,6 +1070,7 @@
},
{
"BriefDescription": "Cycles without actually retired uops.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.STALL_CYCLES",
@@ -953,6 +1080,7 @@
},
{
"BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "Counter": "0,1,2,3",
"CounterMask": "10",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.TOTAL_CYCLES",
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json b/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json
index ce836ebda542..ff2e515c744a 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json
@@ -73,7 +73,7 @@
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
"MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "1 - (tma_frontend_bound + tma_bad_speculation + tma_retiring)",
- "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvOB;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
"MetricThreshold": "tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL1",
@@ -94,7 +94,7 @@
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * tma_bad_speculation",
- "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
+ "MetricGroup": "BadSpec;BrMispredicts;BvMP;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
"MetricName": "tma_branch_mispredicts",
"MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
@@ -124,7 +124,7 @@
{
"BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
"MetricExpr": "ARITH.FPU_DIV_ACTIVE / tma_info_core_core_clks",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
"MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_UOPS",
@@ -152,7 +152,7 @@
{
"BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
"MetricExpr": "(7 * DTLB_LOAD_MISSES.STLB_HIT + DTLB_LOAD_MISSES.WALK_DURATION) / tma_info_thread_clks",
- "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
+ "MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
"MetricName": "tma_dtlb_load",
"MetricThreshold": "tma_dtlb_load > 0.1",
"PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_UOPS_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store",
@@ -226,7 +226,7 @@
{
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / tma_info_thread_slots",
- "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvFB;BvIO;PGO;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_frontend_bound",
"MetricThreshold": "tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL1",
@@ -296,13 +296,13 @@
},
{
"BriefDescription": "Average CPU Utilization (percentage)",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "tma_info_system_cpus_utilized / #num_cpus_online",
"MetricGroup": "HPC;Summary",
"MetricName": "tma_info_system_cpu_utilization"
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "#num_cpus_online * tma_info_system_cpu_utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized"
},
@@ -399,7 +399,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
"MetricExpr": "(12 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION) / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_itlb_misses",
"MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: ITLB_MISSES.WALK_COMPLETED",
@@ -438,7 +438,7 @@
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_bad_speculation - tma_branch_mispredicts",
- "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
+ "MetricGroup": "BadSpec;BvMS;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
"MetricName": "tma_machine_clears",
"MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
@@ -448,7 +448,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=6@) / tma_info_thread_clks",
- "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
"MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_info_system_dram_bw_use",
@@ -457,7 +457,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM)",
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth",
- "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
+ "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
"MetricName": "tma_mem_latency",
"MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: ",
@@ -505,7 +505,7 @@
{
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / tma_info_thread_slots",
- "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvUW;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_retiring",
"MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL1",
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/uncore-cache.json b/tools/perf/pmu-events/arch/x86/sandybridge/uncore-cache.json
index be9a3ed1a940..8379dae91be4 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/uncore-cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "L3 Lookup any request that access cache and found line in E or S-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.ANY_ES",
"PerPkg": "1",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "L3 Lookup any request that access cache and found line in I-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.ANY_I",
"PerPkg": "1",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "L3 Lookup any request that access cache and found line in M-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.ANY_M",
"PerPkg": "1",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "L3 Lookup any request that access cache and found line in MESI-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.ANY_MESI",
"PerPkg": "1",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "L3 Lookup external snoop request that access cache and found line in E or S-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.EXTSNP_ES",
"PerPkg": "1",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "L3 Lookup external snoop request that access cache and found line in I-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.EXTSNP_I",
"PerPkg": "1",
@@ -49,6 +55,7 @@
},
{
"BriefDescription": "L3 Lookup external snoop request that access cache and found line in M-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.EXTSNP_M",
"PerPkg": "1",
@@ -57,6 +64,7 @@
},
{
"BriefDescription": "L3 Lookup external snoop request that access cache and found line in MESI-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.EXTSNP_MESI",
"PerPkg": "1",
@@ -65,6 +73,7 @@
},
{
"BriefDescription": "L3 Lookup read request that access cache and found line in E or S-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.READ_ES",
"PerPkg": "1",
@@ -73,6 +82,7 @@
},
{
"BriefDescription": "L3 Lookup read request that access cache and found line in I-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.READ_I",
"PerPkg": "1",
@@ -81,6 +91,7 @@
},
{
"BriefDescription": "L3 Lookup read request that access cache and found line in M-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.READ_M",
"PerPkg": "1",
@@ -89,6 +100,7 @@
},
{
"BriefDescription": "L3 Lookup read request that access cache and found line in any MESI-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.READ_MESI",
"PerPkg": "1",
@@ -97,6 +109,7 @@
},
{
"BriefDescription": "L3 Lookup write request that access cache and found line in E or S-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_ES",
"PerPkg": "1",
@@ -105,6 +118,7 @@
},
{
"BriefDescription": "L3 Lookup write request that access cache and found line in I-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_I",
"PerPkg": "1",
@@ -113,6 +127,7 @@
},
{
"BriefDescription": "L3 Lookup write request that access cache and found line in M-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_M",
"PerPkg": "1",
@@ -121,6 +136,7 @@
},
{
"BriefDescription": "L3 Lookup write request that access cache and found line in MESI-state.",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_MESI",
"PerPkg": "1",
@@ -129,6 +145,7 @@
},
{
"BriefDescription": "A cross-core snoop resulted from L3 Eviction which hits a modified line in some processor core.",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_CBO_XSNP_RESPONSE.HITM_EVICTION",
"PerPkg": "1",
@@ -137,6 +154,7 @@
},
{
"BriefDescription": "An external snoop hits a modified line in some processor core.",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_CBO_XSNP_RESPONSE.HITM_EXTERNAL",
"PerPkg": "1",
@@ -145,6 +163,7 @@
},
{
"BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a modified line in some processor core.",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_CBO_XSNP_RESPONSE.HITM_XCORE",
"PerPkg": "1",
@@ -153,6 +172,7 @@
},
{
"BriefDescription": "A cross-core snoop resulted from L3 Eviction which hits a non-modified line in some processor core.",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_CBO_XSNP_RESPONSE.HIT_EVICTION",
"PerPkg": "1",
@@ -161,6 +181,7 @@
},
{
"BriefDescription": "An external snoop hits a non-modified line in some processor core.",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_CBO_XSNP_RESPONSE.HIT_EXTERNAL",
"PerPkg": "1",
@@ -169,6 +190,7 @@
},
{
"BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a non-modified line in some processor core.",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_CBO_XSNP_RESPONSE.HIT_XCORE",
"PerPkg": "1",
@@ -177,6 +199,7 @@
},
{
"BriefDescription": "A cross-core snoop resulted from L3 Eviction which misses in some processor core.",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_CBO_XSNP_RESPONSE.MISS_EVICTION",
"PerPkg": "1",
@@ -185,6 +208,7 @@
},
{
"BriefDescription": "An external snoop misses in some processor core.",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_CBO_XSNP_RESPONSE.MISS_EXTERNAL",
"PerPkg": "1",
@@ -193,6 +217,7 @@
},
{
"BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which misses in some processor core.",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_CBO_XSNP_RESPONSE.MISS_XCORE",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/sandybridge/uncore-interconnect.json
index c3252c094a9c..ba340e858ed4 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/uncore-interconnect.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Cycles weighted by number of requests pending in Coherency Tracker.",
+ "Counter": "0",
"EventCode": "0x83",
"EventName": "UNC_ARB_COH_TRK_OCCUPANCY.ALL",
"PerPkg": "1",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Number of requests allocated in Coherency Tracker.",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_ARB_COH_TRK_REQUESTS.ALL",
"PerPkg": "1",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Counts cycles weighted by the number of requests waiting for data returning from the memory controller. Accounts for coherent and non-coherent requests initiated by IA cores, processor graphic units, or LLC.",
+ "Counter": "0",
"EventCode": "0x80",
"EventName": "UNC_ARB_TRK_OCCUPANCY.ALL",
"PerPkg": "1",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Cycles with at least half of the requests outstanding are waiting for data return from memory controller. Account for coherent and non-coherent requests initiated by IA Cores, Processor Graphics Unit, or LLC.",
+ "Counter": "0,1",
"CounterMask": "10",
"EventCode": "0x80",
"EventName": "UNC_ARB_TRK_OCCUPANCY.CYCLES_OVER_HALF_FULL",
@@ -34,6 +38,7 @@
},
{
"BriefDescription": "Cycles with at least one request outstanding is waiting for data return from memory controller. Account for coherent and non-coherent requests initiated by IA Cores, Processor Graphics Unit, or LLC.",
+ "Counter": "0,1",
"CounterMask": "1",
"EventCode": "0x80",
"EventName": "UNC_ARB_TRK_OCCUPANCY.CYCLES_WITH_ANY_REQUEST",
@@ -43,6 +48,7 @@
},
{
"BriefDescription": "Counts the number of coherent and in-coherent requests initiated by IA cores, processor graphic units, or LLC.",
+ "Counter": "0,1",
"EventCode": "0x81",
"EventName": "UNC_ARB_TRK_REQUESTS.ALL",
"PerPkg": "1",
@@ -51,6 +57,7 @@
},
{
"BriefDescription": "Counts the number of LLC evictions allocated.",
+ "Counter": "0,1",
"EventCode": "0x81",
"EventName": "UNC_ARB_TRK_REQUESTS.EVICTIONS",
"PerPkg": "1",
@@ -59,6 +66,7 @@
},
{
"BriefDescription": "Counts the number of allocated write entries, include full, partial, and LLC evictions.",
+ "Counter": "0,1",
"EventCode": "0x81",
"EventName": "UNC_ARB_TRK_REQUESTS.WRITES",
"PerPkg": "1",
@@ -67,6 +75,7 @@
},
{
"BriefDescription": "This 48-bit fixed counter counts the UCLK cycles.",
+ "Counter": "Fixed",
"EventCode": "0xff",
"EventName": "UNC_CLOCK.SOCKET",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/virtual-memory.json b/tools/perf/pmu-events/arch/x86/sandybridge/virtual-memory.json
index fa08d355b97e..e0f6eb95455d 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/virtual-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Load misses in all DTLB levels that cause page walks.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
"SampleAfterValue": "100003",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"PublicDescription": "This event counts load operations that miss the first DTLB level but hit the second and do not cause any page walks. The penalty in this case is approximately 7 cycles.",
@@ -16,6 +18,7 @@
},
{
"BriefDescription": "Load misses at all DTLB levels that cause completed page walks.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"SampleAfterValue": "100003",
@@ -23,6 +26,7 @@
},
{
"BriefDescription": "Cycles when PMH is busy with page walks.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
"PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by DTLB load misses.",
@@ -31,6 +35,7 @@
},
{
"BriefDescription": "Store misses in all DTLB levels that cause page walks.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
"SampleAfterValue": "100003",
@@ -38,6 +43,7 @@
},
{
"BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.STLB_HIT",
"SampleAfterValue": "100003",
@@ -45,6 +51,7 @@
},
{
"BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"SampleAfterValue": "100003",
@@ -52,6 +59,7 @@
},
{
"BriefDescription": "Cycles when PMH is busy with page walks.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_DURATION",
"SampleAfterValue": "2000003",
@@ -59,6 +67,7 @@
},
{
"BriefDescription": "Cycle count for an Extended Page table walk. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x4F",
"EventName": "EPT.WALK_CYCLES",
"SampleAfterValue": "2000003",
@@ -66,6 +75,7 @@
},
{
"BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAE",
"EventName": "ITLB.ITLB_FLUSH",
"SampleAfterValue": "100007",
@@ -73,6 +83,7 @@
},
{
"BriefDescription": "Misses at all ITLB levels that cause page walks.",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
"SampleAfterValue": "100003",
@@ -80,6 +91,7 @@
},
{
"BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.STLB_HIT",
"SampleAfterValue": "100003",
@@ -87,6 +99,7 @@
},
{
"BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
"SampleAfterValue": "100003",
@@ -94,6 +107,7 @@
},
{
"BriefDescription": "Cycles when PMH is busy with page walks.",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_DURATION",
"PublicDescription": "This event count cycles when Page Miss Handler (PMH) is servicing page walks caused by ITLB misses.",
@@ -102,6 +116,7 @@
},
{
"BriefDescription": "DTLB flush attempts of the thread-specific entries.",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "TLB_FLUSH.DTLB_THREAD",
"SampleAfterValue": "100007",
@@ -109,6 +124,7 @@
},
{
"BriefDescription": "STLB flush attempts.",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "TLB_FLUSH.STLB_ANY",
"SampleAfterValue": "100007",
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/cache.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/cache.json
index b0447aad0dfc..eec7bf6ebd53 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/cache.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "L1D.HWPF_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "L1D.HWPF_MISS",
"SampleAfterValue": "1000003",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Counts the number of cache lines replaced in L1 data cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "L1D.REPLACEMENT",
"PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
@@ -16,6 +18,7 @@
},
{
"BriefDescription": "Number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.FB_FULL",
"PublicDescription": "Counts number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
@@ -24,6 +27,7 @@
},
{
"BriefDescription": "Number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x48",
@@ -34,6 +38,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event L1D_PEND_MISS.L2_STALLS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.L2_STALL",
@@ -42,6 +47,7 @@
},
{
"BriefDescription": "Number of cycles a demand request has waited due to L1D due to lack of L2 resources.",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.L2_STALLS",
"PublicDescription": "Counts number of cycles a demand request has waited due to L1D due to lack of L2 resources. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
@@ -50,6 +56,7 @@
},
{
"BriefDescription": "Number of L1D misses that are outstanding",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING",
"PublicDescription": "Counts number of L1D misses that are outstanding in each cycle, that is each cycle the number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch. Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
@@ -58,6 +65,7 @@
},
{
"BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING_CYCLES",
@@ -67,6 +75,7 @@
},
{
"BriefDescription": "L2 cache lines filling L2",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "L2_LINES_IN.ALL",
"PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
@@ -74,14 +83,17 @@
"UMask": "0x1f"
},
{
- "BriefDescription": "L2_LINES_OUT.NON_SILENT",
+ "BriefDescription": "Modified cache lines that are evicted by L2 cache when triggered by an L2 cache fill.",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_LINES_OUT.NON_SILENT",
+ "PublicDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines are in Modified state. Modified lines are written back to L3",
"SampleAfterValue": "200003",
"UMask": "0x2"
},
{
"BriefDescription": "Non-modified cache lines that are silently dropped by L2 cache when triggered by an L2 cache fill.",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_LINES_OUT.SILENT",
"PublicDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared or Exclusive state. A non-threaded event.",
@@ -90,6 +102,7 @@
},
{
"BriefDescription": "Cache lines that have been L2 hardware prefetched but not used by demand accesses",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_LINES_OUT.USELESS_HWPF",
"PublicDescription": "Counts the number of cache lines that have been prefetched by the L2 hardware prefetcher but not used by demand access when evicted from the L2 cache",
@@ -98,6 +111,7 @@
},
{
"BriefDescription": "All accesses to L2 cache [This event is alias to L2_RQSTS.REFERENCES]",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_REQUEST.ALL",
"PublicDescription": "Counts all requests that were hit or true misses in L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. [This event is alias to L2_RQSTS.REFERENCES]",
@@ -106,6 +120,7 @@
},
{
"BriefDescription": "Read requests with true-miss in L2 cache. [This event is alias to L2_RQSTS.MISS]",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_REQUEST.MISS",
"PublicDescription": "Counts read requests of any type with true-miss in the L2 cache. True-miss excludes L2 misses that were merged with ongoing L2 misses. [This event is alias to L2_RQSTS.MISS]",
@@ -114,6 +129,7 @@
},
{
"BriefDescription": "L2 code requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_CODE_RD",
"PublicDescription": "Counts the total number of L2 code requests.",
@@ -122,6 +138,7 @@
},
{
"BriefDescription": "Demand Data Read access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
"PublicDescription": "Counts Demand Data Read requests accessing the L2 cache. These requests may hit or miss L2 cache. True-miss exclude misses that were merged with ongoing L2 misses. An access is counted once.",
@@ -130,6 +147,7 @@
},
{
"BriefDescription": "Demand requests that miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_MISS",
"PublicDescription": "Counts demand requests that miss L2 cache.",
@@ -138,6 +156,7 @@
},
{
"BriefDescription": "Demand requests to L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
"PublicDescription": "Counts demand requests to L2 cache.",
@@ -146,6 +165,7 @@
},
{
"BriefDescription": "L2_RQSTS.ALL_HWPF",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_HWPF",
"SampleAfterValue": "200003",
@@ -153,6 +173,7 @@
},
{
"BriefDescription": "RFO requests to L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_RFO",
"PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
@@ -161,6 +182,7 @@
},
{
"BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_HIT",
"PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
@@ -169,6 +191,7 @@
},
{
"BriefDescription": "L2 cache misses when fetching instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_MISS",
"PublicDescription": "Counts L2 cache misses when fetching instructions.",
@@ -177,6 +200,7 @@
},
{
"BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
"PublicDescription": "Counts the number of demand Data Read requests initiated by load instructions that hit L2 cache.",
@@ -185,6 +209,7 @@
},
{
"BriefDescription": "Demand Data Read miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
"PublicDescription": "Counts demand Data Read requests with true-miss in the L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. An access is counted once.",
@@ -193,6 +218,7 @@
},
{
"BriefDescription": "L2_RQSTS.HWPF_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.HWPF_MISS",
"SampleAfterValue": "200003",
@@ -200,6 +226,7 @@
},
{
"BriefDescription": "Read requests with true-miss in L2 cache. [This event is alias to L2_REQUEST.MISS]",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.MISS",
"PublicDescription": "Counts read requests of any type with true-miss in the L2 cache. True-miss excludes L2 misses that were merged with ongoing L2 misses. [This event is alias to L2_REQUEST.MISS]",
@@ -208,6 +235,7 @@
},
{
"BriefDescription": "All accesses to L2 cache [This event is alias to L2_REQUEST.ALL]",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.REFERENCES",
"PublicDescription": "Counts all requests that were hit or true misses in L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. [This event is alias to L2_REQUEST.ALL]",
@@ -216,6 +244,7 @@
},
{
"BriefDescription": "RFO requests that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_HIT",
"PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
@@ -224,6 +253,7 @@
},
{
"BriefDescription": "RFO requests that miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_MISS",
"PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
@@ -232,6 +262,7 @@
},
{
"BriefDescription": "SW prefetch requests that hit L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.SWPF_HIT",
"PublicDescription": "Counts Software prefetch requests that hit the L2 cache. Accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions when FB is not full.",
@@ -240,6 +271,7 @@
},
{
"BriefDescription": "SW prefetch requests that miss L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.SWPF_MISS",
"PublicDescription": "Counts Software prefetch requests that miss the L2 cache. Accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions when FB is not full.",
@@ -247,7 +279,17 @@
"UMask": "0x28"
},
{
+ "BriefDescription": "L2 writebacks that access L2 cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "L2_TRANS.L2_WB",
+ "PublicDescription": "Counts L2 writebacks that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x40"
+ },
+ {
"BriefDescription": "Core-originated cacheable requests that missed L3 (Except hardware prefetches to the L3)",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.MISS",
"PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.",
@@ -256,6 +298,7 @@
},
{
"BriefDescription": "Core-originated cacheable requests that refer to L3 (Except hardware prefetches to the L3)",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
"PublicDescription": "Counts core-originated cacheable requests to the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.",
@@ -264,6 +307,7 @@
},
{
"BriefDescription": "Retired load instructions.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ALL_LOADS",
@@ -274,6 +318,7 @@
},
{
"BriefDescription": "Retired store instructions.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ALL_STORES",
@@ -284,6 +329,7 @@
},
{
"BriefDescription": "All retired memory instructions.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ANY",
@@ -294,6 +340,7 @@
},
{
"BriefDescription": "Retired load instructions with locked access.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.LOCK_LOADS",
@@ -304,6 +351,7 @@
},
{
"BriefDescription": "Retired load instructions that split across a cacheline boundary.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
@@ -314,6 +362,7 @@
},
{
"BriefDescription": "Retired store instructions that split across a cacheline boundary.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.SPLIT_STORES",
@@ -324,6 +373,7 @@
},
{
"BriefDescription": "Retired load instructions that miss the STLB.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
@@ -334,6 +384,7 @@
},
{
"BriefDescription": "Retired store instructions that miss the STLB.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
@@ -344,6 +395,7 @@
},
{
"BriefDescription": "Completed demand load uops that miss the L1 d-cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "MEM_LOAD_COMPLETED.L1_MISS_ANY",
"PublicDescription": "Number of completed demand load requests that missed the L1 data cache including shadow misses (FB hits, merge to an ongoing L1D miss)",
@@ -352,6 +404,7 @@
},
{
"BriefDescription": "Retired load instructions whose data sources were HitM responses from shared L3",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD",
@@ -362,6 +415,7 @@
},
{
"BriefDescription": "Retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
@@ -372,6 +426,7 @@
},
{
"BriefDescription": "Retired load instructions whose data sources were hits in L3 without snoops required",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
@@ -382,6 +437,7 @@
},
{
"BriefDescription": "Retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD",
@@ -392,6 +448,7 @@
},
{
"BriefDescription": "Retired load instructions which data sources missed L3 but serviced from local dram",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
@@ -402,6 +459,7 @@
},
{
"BriefDescription": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM",
@@ -411,6 +469,7 @@
},
{
"BriefDescription": "Retired load instructions whose data sources was forwarded from a remote cache",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD",
@@ -421,6 +480,7 @@
},
{
"BriefDescription": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM",
@@ -430,6 +490,7 @@
},
{
"BriefDescription": "Retired load instructions with remote Intel(R) Optane(TM) DC persistent memory as the data source where the data request missed all caches.",
+ "Counter": "0,1,2,3",
"EventCode": "0xd3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM",
"PEBS": "1",
@@ -439,6 +500,7 @@
},
{
"BriefDescription": "Retired instructions with at least 1 uncacheable load or lock.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd4",
"EventName": "MEM_LOAD_MISC_RETIRED.UC",
@@ -449,6 +511,7 @@
},
{
"BriefDescription": "Number of completed demand load requests that missed the L1, but hit the FB(fill buffer), because a preceding miss to the same cacheline initiated the line to be brought into L1, but data is not yet ready in L1.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.FB_HIT",
@@ -459,6 +522,7 @@
},
{
"BriefDescription": "Retired load instructions with L1 cache hits as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L1_HIT",
@@ -469,6 +533,7 @@
},
{
"BriefDescription": "Retired load instructions missed L1 cache as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L1_MISS",
@@ -479,6 +544,7 @@
},
{
"BriefDescription": "Retired load instructions with L2 cache hits as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L2_HIT",
@@ -489,6 +555,7 @@
},
{
"BriefDescription": "Retired load instructions missed L2 cache as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L2_MISS",
@@ -499,6 +566,7 @@
},
{
"BriefDescription": "Retired load instructions with L3 cache hits as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L3_HIT",
@@ -509,6 +577,7 @@
},
{
"BriefDescription": "Retired load instructions missed L3 cache as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L3_MISS",
@@ -519,6 +588,7 @@
},
{
"BriefDescription": "Retired load instructions with local Intel(R) Optane(TM) DC persistent memory as the data source where the data request missed all caches.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.LOCAL_PMM",
@@ -529,6 +599,7 @@
},
{
"BriefDescription": "MEM_STORE_RETIRED.L2_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "MEM_STORE_RETIRED.L2_HIT",
"SampleAfterValue": "200003",
@@ -536,6 +607,7 @@
},
{
"BriefDescription": "Retired memory uops for any access",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe5",
"EventName": "MEM_UOP_RETIRED.ANY",
"PublicDescription": "Number of retired micro-operations (uops) for load or store memory accesses",
@@ -544,6 +616,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -553,6 +626,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that resulted in a snoop hit a modified line in another core's caches which forwarded the data.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -562,6 +636,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_CODE_RD.SNC_CACHE.HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -571,6 +646,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_CODE_RD.SNC_CACHE.HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -580,6 +656,7 @@
},
{
"BriefDescription": "Counts demand data reads that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -589,6 +666,7 @@
},
{
"BriefDescription": "Counts demand data reads that resulted in a snoop hit a modified line in another core's caches which forwarded the data.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -598,6 +676,7 @@
},
{
"BriefDescription": "Counts demand data reads that resulted in a snoop that hit in another core, which did not forward the data.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -607,6 +686,7 @@
},
{
"BriefDescription": "Counts demand data reads that resulted in a snoop hit in another core's caches which forwarded the unmodified data to the requesting core.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -616,6 +696,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by a cache on a remote socket where a snoop hit a modified line in another core's caches which forwarded the data.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.REMOTE_CACHE.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -625,6 +706,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by a cache on a remote socket where a snoop hit in another core's caches which forwarded the unmodified data to the requesting core.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.REMOTE_CACHE.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -634,6 +716,7 @@
},
{
"BriefDescription": "Counts demand data reads that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.SNC_CACHE.HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -643,6 +726,7 @@
},
{
"BriefDescription": "Counts demand data reads that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.SNC_CACHE.HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -652,6 +736,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_RFO.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -661,6 +746,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that resulted in a snoop hit a modified line in another core's caches which forwarded the data.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -670,6 +756,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_RFO.SNC_CACHE.HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -679,6 +766,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_RFO.SNC_CACHE.HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -688,6 +776,7 @@
},
{
"BriefDescription": "Counts hardware prefetches to the L3 only that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.HWPF_L3.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -697,6 +786,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -706,6 +796,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that resulted in a snoop hit a modified line in another core's caches which forwarded the data.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -715,6 +806,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that resulted in a snoop that hit in another core, which did not forward the data.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -724,6 +816,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that resulted in a snoop hit in another core's caches which forwarded the unmodified data to the requesting core.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -733,6 +826,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop was sent and data was returned (Modified or Not Modified).",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.REMOTE_CACHE.SNOOP_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -742,6 +836,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop hit a modified line in another core's caches which forwarded the data.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.REMOTE_CACHE.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -751,6 +846,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop hit in another core's caches which forwarded the unmodified data to the requesting core.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.REMOTE_CACHE.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -760,6 +856,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.SNC_CACHE.HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -769,6 +866,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.SNC_CACHE.HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -778,6 +876,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO), hardware prefetch RFOs (which bring data to L2), and software prefetches for exclusive ownership (PREFETCHW) that hit to a (M)odified cacheline in the L3 or snoop filter.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.RFO_TO_CORE.L3_HIT_M",
"MSRIndex": "0x1a6,0x1a7",
@@ -787,6 +886,7 @@
},
{
"BriefDescription": "Counts streaming stores that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.STREAMING_WR.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -796,6 +896,7 @@
},
{
"BriefDescription": "OFFCORE_REQUESTS.ALL_REQUESTS",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
"SampleAfterValue": "100003",
@@ -803,6 +904,7 @@
},
{
"BriefDescription": "Demand and prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "OFFCORE_REQUESTS.DATA_RD",
"PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
@@ -810,7 +912,17 @@
"UMask": "0x8"
},
{
+ "BriefDescription": "Cacheable and noncacheable code read requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
+ "PublicDescription": "Counts both cacheable and non-cacheable code read requests.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
"BriefDescription": "Demand Data Read requests sent to uncore",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
"PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
@@ -818,7 +930,17 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
+ "PublicDescription": "Counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
"BriefDescription": "This event is deprecated. Refer to new event OFFCORE_REQUESTS_OUTSTANDING.DATA_RD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x20",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
@@ -827,6 +949,7 @@
},
{
"BriefDescription": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x20",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
@@ -834,7 +957,18 @@
"UMask": "0x8"
},
{
+ "BriefDescription": "Cycles with offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
+ "PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
"BriefDescription": "Cycles where at least 1 outstanding demand data read request is pending.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x20",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
@@ -843,6 +977,7 @@
},
{
"BriefDescription": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x20",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
@@ -851,13 +986,24 @@
},
{
"BriefDescription": "OFFCORE_REQUESTS_OUTSTANDING.DATA_RD",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DATA_RD",
"SampleAfterValue": "1000003",
"UMask": "0x8"
},
{
+ "BriefDescription": "Offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore, every cycle.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
+ "PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
"BriefDescription": "For every cycle, increments by the number of outstanding demand data read requests pending.",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
"PublicDescription": "For every cycle, increments by the number of outstanding demand data read requests pending. Requests are considered outstanding from the time they miss the core's L2 cache until the transaction completion message is sent to the requestor.",
@@ -866,6 +1012,7 @@
},
{
"BriefDescription": "Counts bus locks, accounts for cache line split locks and UC locks.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2c",
"EventName": "SQ_MISC.BUS_LOCK",
"PublicDescription": "Counts the more expensive bus lock needed to enforce cache coherency for certain memory accesses that need to be done atomically. Can be created by issuing an atomic instruction (via the LOCK prefix) which causes a cache line split or accesses uncacheable memory.",
@@ -873,7 +1020,16 @@
"UMask": "0x10"
},
{
+ "BriefDescription": "Counts the number of PREFETCHNTA, PREFETCHW, PREFETCHT0, PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "SW_PREFETCH_ACCESS.ANY",
+ "SampleAfterValue": "100003",
+ "UMask": "0xf"
+ },
+ {
"BriefDescription": "Number of PREFETCHNTA instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "SW_PREFETCH_ACCESS.NTA",
"PublicDescription": "Counts the number of PREFETCHNTA instructions executed.",
@@ -882,6 +1038,7 @@
},
{
"BriefDescription": "Number of PREFETCHW instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
"PublicDescription": "Counts the number of PREFETCHW instructions executed.",
@@ -890,6 +1047,7 @@
},
{
"BriefDescription": "Number of PREFETCHT0 instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "SW_PREFETCH_ACCESS.T0",
"PublicDescription": "Counts the number of PREFETCHT0 instructions executed.",
@@ -898,6 +1056,7 @@
},
{
"BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "SW_PREFETCH_ACCESS.T1_T2",
"PublicDescription": "Counts the number of PREFETCHT1 or PREFETCHT2 instructions executed.",
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/counter.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/counter.json
new file mode 100644
index 000000000000..088d5954747c
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/counter.json
@@ -0,0 +1,82 @@
+[
+ {
+ "Unit": "core",
+ "CountersNumFixed": "4",
+ "CountersNumGeneric": "8"
+ },
+ {
+ "Unit": "PCU",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "IRP",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "2"
+ },
+ {
+ "Unit": "M2PCIe",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "IIO",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "iMC",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "M2M",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "M3UPI",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "UPI",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "CHA",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "CXLCM",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "8"
+ },
+ {
+ "Unit": "CXLDP",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "MCHBM",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "M2HBM",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "UBOX",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "2"
+ },
+ {
+ "Unit": "MDF",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/floating-point.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/floating-point.json
index 1bdefaf96287..bc475e163227 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/floating-point.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "ARITH.FPDIV_ACTIVE",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xb0",
"EventName": "ARITH.FPDIV_ACTIVE",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Counts all microcode FP assists.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc1",
"EventName": "ASSISTS.FP",
"PublicDescription": "Counts all microcode Floating Point assists.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "ASSISTS.SSE_AVX_MIX",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc1",
"EventName": "ASSISTS.SSE_AVX_MIX",
"SampleAfterValue": "1000003",
@@ -24,6 +27,7 @@
},
{
"BriefDescription": "FP_ARITH_DISPATCHED.PORT_0 [This event is alias to FP_ARITH_DISPATCHED.V0]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb3",
"EventName": "FP_ARITH_DISPATCHED.PORT_0",
"SampleAfterValue": "2000003",
@@ -31,6 +35,7 @@
},
{
"BriefDescription": "FP_ARITH_DISPATCHED.PORT_1 [This event is alias to FP_ARITH_DISPATCHED.V1]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb3",
"EventName": "FP_ARITH_DISPATCHED.PORT_1",
"SampleAfterValue": "2000003",
@@ -38,6 +43,7 @@
},
{
"BriefDescription": "FP_ARITH_DISPATCHED.PORT_5 [This event is alias to FP_ARITH_DISPATCHED.V2]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb3",
"EventName": "FP_ARITH_DISPATCHED.PORT_5",
"SampleAfterValue": "2000003",
@@ -45,6 +51,7 @@
},
{
"BriefDescription": "FP_ARITH_DISPATCHED.V0 [This event is alias to FP_ARITH_DISPATCHED.PORT_0]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb3",
"EventName": "FP_ARITH_DISPATCHED.V0",
"SampleAfterValue": "2000003",
@@ -52,6 +59,7 @@
},
{
"BriefDescription": "FP_ARITH_DISPATCHED.V1 [This event is alias to FP_ARITH_DISPATCHED.PORT_1]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb3",
"EventName": "FP_ARITH_DISPATCHED.V1",
"SampleAfterValue": "2000003",
@@ -59,6 +67,7 @@
},
{
"BriefDescription": "FP_ARITH_DISPATCHED.V2 [This event is alias to FP_ARITH_DISPATCHED.PORT_5]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb3",
"EventName": "FP_ARITH_DISPATCHED.V2",
"SampleAfterValue": "2000003",
@@ -66,6 +75,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -74,6 +84,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
"PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -82,6 +93,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -90,6 +102,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
"PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -98,6 +111,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 128-bit packed single and 256-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and packed double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.4_FLOPS",
"PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision and 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point and packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -106,6 +120,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -114,6 +129,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE",
"PublicDescription": "Number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -122,6 +138,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision and 512-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RSQRT14 RCP RCP14 DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.8_FLOPS",
"PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision and 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision and double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RSQRT14 RCP RCP14 DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -130,6 +147,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired; some instructions will count twice as noted below. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 RANGE SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR",
"PublicDescription": "Number of SSE/AVX computational scalar single precision and double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -138,6 +156,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -146,6 +165,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
"PublicDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -154,6 +174,7 @@
},
{
"BriefDescription": "Number of any Vector retired FP arithmetic instructions",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.VECTOR",
"PublicDescription": "Number of any Vector retired FP arithmetic instructions. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -162,6 +183,7 @@
},
{
"BriefDescription": "FP_ARITH_INST_RETIRED2.128B_PACKED_HALF",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xcf",
"EventName": "FP_ARITH_INST_RETIRED2.128B_PACKED_HALF",
"SampleAfterValue": "100003",
@@ -169,6 +191,7 @@
},
{
"BriefDescription": "FP_ARITH_INST_RETIRED2.256B_PACKED_HALF",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xcf",
"EventName": "FP_ARITH_INST_RETIRED2.256B_PACKED_HALF",
"SampleAfterValue": "100003",
@@ -176,6 +199,7 @@
},
{
"BriefDescription": "FP_ARITH_INST_RETIRED2.512B_PACKED_HALF",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xcf",
"EventName": "FP_ARITH_INST_RETIRED2.512B_PACKED_HALF",
"SampleAfterValue": "100003",
@@ -183,6 +207,7 @@
},
{
"BriefDescription": "FP_ARITH_INST_RETIRED2.COMPLEX_SCALAR_HALF",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xcf",
"EventName": "FP_ARITH_INST_RETIRED2.COMPLEX_SCALAR_HALF",
"SampleAfterValue": "100003",
@@ -190,6 +215,7 @@
},
{
"BriefDescription": "Number of all Scalar Half-Precision FP arithmetic instructions(1) retired - regular and complex.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xcf",
"EventName": "FP_ARITH_INST_RETIRED2.SCALAR",
"PublicDescription": "FP_ARITH_INST_RETIRED2.SCALAR",
@@ -198,6 +224,7 @@
},
{
"BriefDescription": "FP_ARITH_INST_RETIRED2.SCALAR_HALF",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xcf",
"EventName": "FP_ARITH_INST_RETIRED2.SCALAR_HALF",
"SampleAfterValue": "100003",
@@ -205,6 +232,7 @@
},
{
"BriefDescription": "Number of all Vector (also called packed) Half-Precision FP arithmetic instructions(1) retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xcf",
"EventName": "FP_ARITH_INST_RETIRED2.VECTOR",
"PublicDescription": "FP_ARITH_INST_RETIRED2.VECTOR",
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/frontend.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/frontend.json
index 93d99318a623..f6e3e40a3b20 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/frontend.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Clears due to Unknown Branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "BACLEARS.ANY",
"PublicDescription": "Number of times the front-end is resteered when it finds a branch instruction in a fetch line. This is called Unknown Branch which occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "DECODE.LCP",
"PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Cycles the Microcode Sequencer is busy.",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "DECODE.MS_BUSY",
"SampleAfterValue": "500009",
@@ -24,6 +27,7 @@
},
{
"BriefDescription": "DSB-to-MITE switch true penalty cycles.",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
"PublicDescription": "Decode Stream Buffer (DSB) is a Uop-cache that holds translations of previously fetched instructions that were decoded by the legacy x86 decode pipeline (MITE). This event counts fetch penalty cycles when a transition occurs from DSB to MITE.",
@@ -32,6 +36,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced DSB miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.ANY_DSB_MISS",
"MSRIndex": "0x3F7",
@@ -43,6 +48,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced a critical DSB miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.DSB_MISS",
"MSRIndex": "0x3F7",
@@ -54,6 +60,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced iTLB true miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.ITLB_MISS",
"MSRIndex": "0x3F7",
@@ -65,6 +72,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.L1I_MISS",
"MSRIndex": "0x3F7",
@@ -76,6 +84,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.L2_MISS",
"MSRIndex": "0x3F7",
@@ -87,6 +96,7 @@
},
{
"BriefDescription": "Retired instructions after front-end starvation of at least 1 cycle",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_1",
"MSRIndex": "0x3F7",
@@ -98,6 +108,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
"MSRIndex": "0x3F7",
@@ -109,6 +120,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
"MSRIndex": "0x3F7",
@@ -120,6 +132,7 @@
},
{
"BriefDescription": "Retired instructions after front-end starvation of at least 2 cycles",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
"MSRIndex": "0x3F7",
@@ -131,6 +144,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
"MSRIndex": "0x3F7",
@@ -142,6 +156,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
"MSRIndex": "0x3F7",
@@ -153,6 +168,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
"MSRIndex": "0x3F7",
@@ -164,6 +180,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
"MSRIndex": "0x3F7",
@@ -175,6 +192,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
"MSRIndex": "0x3F7",
@@ -186,6 +204,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
"MSRIndex": "0x3F7",
@@ -197,6 +216,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
"MSRIndex": "0x3F7",
@@ -208,6 +228,7 @@
},
{
"BriefDescription": "FRONTEND_RETIRED.MS_FLOWS",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.MS_FLOWS",
"MSRIndex": "0x3F7",
@@ -218,6 +239,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.STLB_MISS",
"MSRIndex": "0x3F7",
@@ -229,6 +251,7 @@
},
{
"BriefDescription": "FRONTEND_RETIRED.UNKNOWN_BRANCH",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.UNKNOWN_BRANCH",
"MSRIndex": "0x3F7",
@@ -239,6 +262,7 @@
},
{
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE_DATA.STALLS",
"PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The decode pipeline works at a 32 Byte granularity.",
@@ -246,7 +270,18 @@
"UMask": "0x4"
},
{
+ "BriefDescription": "ICACHE_DATA.STALL_PERIODS",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x80",
+ "EventName": "ICACHE_DATA.STALL_PERIODS",
+ "SampleAfterValue": "500009",
+ "UMask": "0x4"
+ },
+ {
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "ICACHE_TAG.STALLS",
"PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
@@ -255,6 +290,7 @@
},
{
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.DSB_CYCLES_ANY",
@@ -264,6 +300,7 @@
},
{
"BriefDescription": "Cycles DSB is delivering optimal number of Uops",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0x79",
"EventName": "IDQ.DSB_CYCLES_OK",
@@ -273,6 +310,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.DSB_UOPS",
"PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
@@ -281,6 +319,7 @@
},
{
"BriefDescription": "Cycles MITE is delivering any Uop",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MITE_CYCLES_ANY",
@@ -290,6 +329,7 @@
},
{
"BriefDescription": "Cycles MITE is delivering optimal number of Uops",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0x79",
"EventName": "IDQ.MITE_CYCLES_OK",
@@ -299,6 +339,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MITE_UOPS",
"PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
@@ -307,6 +348,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to IDQ while MS is busy",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MS_CYCLES_ANY",
@@ -316,6 +358,7 @@
},
{
"BriefDescription": "Number of switches from DSB or MITE to the MS",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x79",
@@ -326,6 +369,7 @@
},
{
"BriefDescription": "Uops delivered to IDQ while MS is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_UOPS",
"PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS).",
@@ -334,6 +378,7 @@
},
{
"BriefDescription": "Uops not delivered by IDQ when backend of the machine is not stalled [This event is alias to IDQ_UOPS_NOT_DELIVERED.CORE]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x9c",
"EventName": "IDQ_BUBBLES.CORE",
"PublicDescription": "Counts the number of uops not delivered to by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle. [This event is alias to IDQ_UOPS_NOT_DELIVERED.CORE]",
@@ -342,6 +387,7 @@
},
{
"BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled [This event is alias to IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE]",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "6",
"EventCode": "0x9c",
"EventName": "IDQ_BUBBLES.CYCLES_0_UOPS_DELIV.CORE",
@@ -351,6 +397,7 @@
},
{
"BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled [This event is alias to IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK]",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0x9c",
"EventName": "IDQ_BUBBLES.CYCLES_FE_WAS_OK",
@@ -361,6 +408,7 @@
},
{
"BriefDescription": "Uops not delivered by IDQ when backend of the machine is not stalled [This event is alias to IDQ_BUBBLES.CORE]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x9c",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
"PublicDescription": "Counts the number of uops not delivered to by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle. [This event is alias to IDQ_BUBBLES.CORE]",
@@ -369,6 +417,7 @@
},
{
"BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled [This event is alias to IDQ_BUBBLES.CYCLES_0_UOPS_DELIV.CORE]",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "6",
"EventCode": "0x9c",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
@@ -378,6 +427,7 @@
},
{
"BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled [This event is alias to IDQ_BUBBLES.CYCLES_FE_WAS_OK]",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0x9c",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/memory.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/memory.json
index 5420f529f491..2ea19539291b 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/memory.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Number of machine clears due to memory ordering conflicts.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
"PublicDescription": "Counts the number of Machine Clears detected dye to memory ordering. Memory Ordering Machine Clears may apply when a memory read may not conform to the memory ordering rules of the x86 architecture",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0x47",
"EventName": "MEMORY_ACTIVITY.CYCLES_L1D_MISS",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "3",
"EventCode": "0x47",
"EventName": "MEMORY_ACTIVITY.STALLS_L1D_MISS",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Execution stalls while L2 cache miss demand cacheable load request is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"EventCode": "0x47",
"EventName": "MEMORY_ACTIVITY.STALLS_L2_MISS",
@@ -42,6 +47,7 @@
},
{
"BriefDescription": "Execution stalls while L3 cache miss demand cacheable load request is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "9",
"EventCode": "0x47",
"EventName": "MEMORY_ACTIVITY.STALLS_L3_MISS",
@@ -50,7 +56,21 @@
"UMask": "0x9"
},
{
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 1024 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_1024",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x400",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 1024 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "53",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
@@ -63,6 +83,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
@@ -75,6 +96,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
@@ -87,6 +109,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
@@ -99,6 +122,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
@@ -111,6 +135,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
@@ -123,6 +148,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
@@ -135,6 +161,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
+ "Counter": "1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
@@ -147,6 +174,7 @@
},
{
"BriefDescription": "Retired memory store access operations. A PDist event for PEBS Store Latency Facility.",
+ "Counter": "0",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.STORE_SAMPLE",
@@ -157,6 +185,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the local socket's L1, L2, or L3 caches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -166,6 +195,7 @@
},
{
"BriefDescription": "Counts demand data reads that were not supplied by the local socket's L1, L2, or L3 caches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -175,6 +205,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the local socket's L1, L2, or L3 caches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -184,6 +215,7 @@
},
{
"BriefDescription": "Counts hardware prefetches to the L3 only that missed the local socket's L1, L2, and L3 caches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.HWPF_L3.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -193,6 +225,7 @@
},
{
"BriefDescription": "Counts hardware prefetches to the L3 only that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline is homed locally.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.HWPF_L3.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -202,6 +235,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -211,6 +245,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline is homed locally.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -220,6 +255,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that missed the L3 Cache and were supplied by the local socket (DRAM or PMM), whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts PMM or DRAM accesses that are controlled by the close or distant SNC Cluster. It does not count misses to the L3 which go to Local CXL Type 2 Memory or Local Non DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.L3_MISS_LOCAL_SOCKET",
"MSRIndex": "0x1a6,0x1a7",
@@ -229,6 +265,7 @@
},
{
"BriefDescription": "Counts streaming stores that missed the local socket's L1, L2, and L3 caches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.STREAMING_WR.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -238,6 +275,7 @@
},
{
"BriefDescription": "Counts streaming stores that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline is homed locally.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.STREAMING_WR.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -247,6 +285,7 @@
},
{
"BriefDescription": "Counts demand data read requests that miss the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
"SampleAfterValue": "100003",
@@ -254,6 +293,7 @@
},
{
"BriefDescription": "For every cycle, increments by the number of demand data read requests pending that are known to have missed the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD",
"PublicDescription": "For every cycle, increments by the number of demand data read requests pending that are known to have missed the L3 cache. Note that this does not capture all elapsed cycles while requests are outstanding - only cycles from when the requests were known by the requesting core to have missed the L3 cache.",
@@ -262,6 +302,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED",
"PEBS": "1",
@@ -271,6 +312,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_EVENTS",
"PublicDescription": "Counts the number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
@@ -279,6 +321,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_MEM",
"PublicDescription": "Counts the number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).",
@@ -287,6 +330,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_MEMTYPE",
"PublicDescription": "Counts the number of times an RTM execution aborted due to incompatible memory type.",
@@ -295,6 +339,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_UNFRIENDLY",
"PublicDescription": "Counts the number of times an RTM execution aborted due to HLE-unfriendly instructions.",
@@ -303,6 +348,7 @@
},
{
"BriefDescription": "Number of times an RTM execution successfully committed",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.COMMIT",
"PublicDescription": "Counts the number of times RTM commit succeeded.",
@@ -311,6 +357,7 @@
},
{
"BriefDescription": "Number of times an RTM execution started.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.START",
"PublicDescription": "Counts the number of times we entered an RTM region. Does not count nested transactions.",
@@ -319,6 +366,7 @@
},
{
"BriefDescription": "Speculatively counts the number of TSX aborts due to a data capacity limitation for transactional reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_CAPACITY_READ",
"PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional reads",
@@ -327,6 +375,7 @@
},
{
"BriefDescription": "Speculatively counts the number of TSX aborts due to a data capacity limitation for transactional writes.",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
"PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional writes.",
@@ -335,6 +384,7 @@
},
{
"BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_CONFLICT",
"PublicDescription": "Counts the number of times a TSX line had a cache conflict.",
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/metricgroups.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/metricgroups.json
index 81e5ca1c3078..e1de6c2675c4 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/metricgroups.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/metricgroups.json
@@ -5,8 +5,21 @@
"BigFootprint": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"BrMispredicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Branches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvBC": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvBO": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvCB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvFB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvIO": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvML": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMP": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMS": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMT": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvOB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvUW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"C0Wait": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"CacheHits": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "CacheMisses": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"CodeGen": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Compute": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Cor": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/other.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/other.json
index 442ef3807a9d..05d8f14956ee 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/other.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/other.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "ASSISTS.PAGE_FAULT",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc1",
"EventName": "ASSISTS.PAGE_FAULT",
"SampleAfterValue": "1000003",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Counts the cycles where the AMX (Advance Matrix Extension) unit is busy performing an operation.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb7",
"EventName": "EXE.AMX_BUSY",
"SampleAfterValue": "2000003",
@@ -15,6 +17,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -24,6 +27,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_CODE_RD.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_CODE_RD.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -42,6 +47,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_CODE_RD.SNC_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -51,6 +57,7 @@
},
{
"BriefDescription": "Counts demand data reads that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -60,6 +67,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -69,6 +77,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -78,6 +87,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by PMM attached to this socket, whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts PMM accesses that are controlled by the close or distant SNC Cluster.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.LOCAL_SOCKET_PMM",
"MSRIndex": "0x1a6,0x1a7",
@@ -87,6 +97,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by PMM.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.PMM",
"MSRIndex": "0x1a6,0x1a7",
@@ -96,6 +107,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by DRAM attached to another socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -105,6 +117,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by PMM attached to another socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.REMOTE_PMM",
"MSRIndex": "0x1a6,0x1a7",
@@ -114,6 +127,7 @@
},
{
"BriefDescription": "Counts demand data reads that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.SNC_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -123,6 +137,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -132,6 +147,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_RFO.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -141,6 +157,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_RFO.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -150,6 +167,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_RFO.SNC_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -159,6 +177,7 @@
},
{
"BriefDescription": "Counts data load hardware prefetch requests to the L1 data cache that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.HWPF_L1D.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -168,6 +187,7 @@
},
{
"BriefDescription": "Counts hardware prefetches (which bring data to L2) that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.HWPF_L2.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -177,6 +197,7 @@
},
{
"BriefDescription": "Counts hardware prefetches to the L3 only that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.HWPF_L3.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -186,6 +207,7 @@
},
{
"BriefDescription": "Counts hardware prefetches to the L3 only that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline was homed in a remote socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.HWPF_L3.REMOTE",
"MSRIndex": "0x1a6,0x1a7",
@@ -195,6 +217,7 @@
},
{
"BriefDescription": "Counts writebacks of modified cachelines and streaming stores that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.MODIFIED_WRITE.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -204,6 +227,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -213,6 +237,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -222,6 +247,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -231,6 +257,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts DRAM accesses that are controlled by the close or distant SNC Cluster.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.LOCAL_SOCKET_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -240,6 +267,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by PMM attached to this socket, whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts PMM accesses that are controlled by the close or distant SNC Cluster.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.LOCAL_SOCKET_PMM",
"MSRIndex": "0x1a6,0x1a7",
@@ -249,6 +277,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches and were supplied by a remote socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.REMOTE",
"MSRIndex": "0x1a6,0x1a7",
@@ -258,6 +287,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to another socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -267,6 +297,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM or PMM attached to another socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.REMOTE_MEMORY",
"MSRIndex": "0x1a6,0x1a7",
@@ -276,6 +307,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by PMM attached to another socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.REMOTE_PMM",
"MSRIndex": "0x1a6,0x1a7",
@@ -285,6 +317,7 @@
},
{
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.READS_TO_CORE.SNC_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -294,6 +327,7 @@
},
{
"BriefDescription": "Counts streaming stores that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -303,6 +337,7 @@
},
{
"BriefDescription": "Counts Demand RFOs, ItoM's, PREFECTHW's, Hardware RFO Prefetches to the L1/L2 and Streaming stores that likely resulted in a store to Memory (DRAM or PMM)",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.WRITE_ESTIMATE.MEMORY",
"MSRIndex": "0x1a6,0x1a7",
@@ -312,6 +347,7 @@
},
{
"BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa5",
"EventName": "RS.EMPTY",
"PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into starvation periods (e.g. branch mispredictions or i-cache misses)",
@@ -320,6 +356,7 @@
},
{
"BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xa5",
@@ -330,7 +367,16 @@
"UMask": "0x7"
},
{
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty due to a resource in the back-end",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY_RESOURCE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "This event is deprecated. Refer to new event RS.EMPTY_COUNT",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"Deprecated": "1",
"EdgeDetect": "1",
@@ -342,6 +388,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event RS.EMPTY",
+ "Counter": "0,1,2,3,4,5,6,7",
"Deprecated": "1",
"EventCode": "0xa5",
"EventName": "RS_EMPTY.CYCLES",
@@ -350,6 +397,7 @@
},
{
"BriefDescription": "Cycles the uncore cannot take further requests",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x2d",
"EventName": "XQ.FULL_CYCLES",
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json
index e2086bedeca8..5d5811f26151 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "This event is deprecated. Refer to new event ARITH.DIV_ACTIVE",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"Deprecated": "1",
"EventCode": "0xb0",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "Cycles when divide unit is busy executing divide or square root operations.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xb0",
"EventName": "ARITH.DIV_ACTIVE",
@@ -19,6 +21,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event ARITH.FPDIV_ACTIVE",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"Deprecated": "1",
"EventCode": "0xb0",
@@ -28,6 +31,7 @@
},
{
"BriefDescription": "This event counts the cycles the integer divider is busy.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xb0",
"EventName": "ARITH.IDIV_ACTIVE",
@@ -36,6 +40,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event ARITH.IDIV_ACTIVE",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"Deprecated": "1",
"EventCode": "0xb0",
@@ -45,6 +50,7 @@
},
{
"BriefDescription": "Number of occurrences where a microcode assist is invoked by hardware.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc1",
"EventName": "ASSISTS.ANY",
"PublicDescription": "Counts the number of occurrences where a microcode assist is invoked by hardware. Examples include AD (page Access Dirty), FP and AVX related assists.",
@@ -53,6 +59,7 @@
},
{
"BriefDescription": "All branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -61,6 +68,7 @@
},
{
"BriefDescription": "Conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND",
"PEBS": "1",
@@ -70,6 +78,7 @@
},
{
"BriefDescription": "Not taken branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_NTAKEN",
"PEBS": "1",
@@ -79,6 +88,7 @@
},
{
"BriefDescription": "Taken conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_TAKEN",
"PEBS": "1",
@@ -88,6 +98,7 @@
},
{
"BriefDescription": "Far branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"PEBS": "1",
@@ -97,6 +108,7 @@
},
{
"BriefDescription": "Indirect near branch instructions retired (excluding returns)",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.INDIRECT",
"PEBS": "1",
@@ -106,6 +118,7 @@
},
{
"BriefDescription": "Direct and indirect near call instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
"PEBS": "1",
@@ -115,6 +128,7 @@
},
{
"BriefDescription": "Return instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
"PEBS": "1",
@@ -124,6 +138,7 @@
},
{
"BriefDescription": "Taken branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
"PEBS": "1",
@@ -133,6 +148,7 @@
},
{
"BriefDescription": "All mispredicted branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -141,6 +157,7 @@
},
{
"BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND",
"PEBS": "1",
@@ -150,6 +167,7 @@
},
{
"BriefDescription": "Mispredicted non-taken conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_NTAKEN",
"PEBS": "1",
@@ -159,6 +177,7 @@
},
{
"BriefDescription": "number of branch instructions retired that were mispredicted and taken.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_TAKEN",
"PEBS": "1",
@@ -168,6 +187,7 @@
},
{
"BriefDescription": "Miss-predicted near indirect branch instructions retired (excluding returns)",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT",
"PEBS": "1",
@@ -177,6 +197,7 @@
},
{
"BriefDescription": "Mispredicted indirect CALL retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT_CALL",
"PEBS": "1",
@@ -186,6 +207,7 @@
},
{
"BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
"PEBS": "1",
@@ -195,6 +217,7 @@
},
{
"BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.RET",
"PEBS": "1",
@@ -204,6 +227,7 @@
},
{
"BriefDescription": "Core clocks when the thread is in the C0.1 light-weight slower wakeup time but more power saving optimized state.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xec",
"EventName": "CPU_CLK_UNHALTED.C01",
"PublicDescription": "Counts core clocks when the thread is in the C0.1 light-weight slower wakeup time but more power saving optimized state. This state can be entered via the TPAUSE or UMWAIT instructions.",
@@ -212,6 +236,7 @@
},
{
"BriefDescription": "Core clocks when the thread is in the C0.2 light-weight faster wakeup time but less power saving optimized state.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xec",
"EventName": "CPU_CLK_UNHALTED.C02",
"PublicDescription": "Counts core clocks when the thread is in the C0.2 light-weight faster wakeup time but less power saving optimized state. This state can be entered via the TPAUSE or UMWAIT instructions.",
@@ -220,6 +245,7 @@
},
{
"BriefDescription": "Core clocks when the thread is in the C0.1 or C0.2 or running a PAUSE in C0 ACPI state.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xec",
"EventName": "CPU_CLK_UNHALTED.C0_WAIT",
"PublicDescription": "Counts core clocks when the thread is in the C0.1 or C0.2 power saving optimized states (TPAUSE or UMWAIT instructions) or running the PAUSE instruction.",
@@ -228,6 +254,7 @@
},
{
"BriefDescription": "Cycle counts are evenly distributed between active threads in the Core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xec",
"EventName": "CPU_CLK_UNHALTED.DISTRIBUTED",
"PublicDescription": "This event distributes cycle counts between active hyperthreads, i.e., those in C0. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If all other hyperthreads are inactive (or disabled or do not exist), all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
@@ -236,6 +263,7 @@
},
{
"BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
"PublicDescription": "Counts Core crystal clock cycles when current thread is unhalted and the other thread is halted.",
@@ -244,6 +272,7 @@
},
{
"BriefDescription": "CPU_CLK_UNHALTED.PAUSE",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xec",
"EventName": "CPU_CLK_UNHALTED.PAUSE",
"SampleAfterValue": "2000003",
@@ -251,6 +280,7 @@
},
{
"BriefDescription": "CPU_CLK_UNHALTED.PAUSE_INST",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xec",
@@ -260,6 +290,7 @@
},
{
"BriefDescription": "Core crystal clock cycles. Cycle counts are evenly distributed between active threads in the Core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.REF_DISTRIBUTED",
"PublicDescription": "This event distributes Core crystal clock cycle counts between active hyperthreads, i.e., those in C0 sleep-state. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If one thread is active in a core, all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
@@ -268,6 +299,7 @@
},
{
"BriefDescription": "Reference cycles when the core is not in halt state.",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
"SampleAfterValue": "2000003",
@@ -275,6 +307,7 @@
},
{
"BriefDescription": "Reference cycles when the core is not in halt state.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.REF_TSC_P",
"PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
@@ -283,6 +316,7 @@
},
{
"BriefDescription": "Core cycles when the thread is not in halt state",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events.",
"SampleAfterValue": "2000003",
@@ -290,6 +324,7 @@
},
{
"BriefDescription": "Thread cycles when thread is not in halt state",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
"PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
@@ -297,6 +332,7 @@
},
{
"BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "8",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
@@ -305,6 +341,7 @@
},
{
"BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
@@ -313,6 +350,7 @@
},
{
"BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "16",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
@@ -321,6 +359,7 @@
},
{
"BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "12",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
@@ -329,6 +368,7 @@
},
{
"BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
@@ -337,6 +377,7 @@
},
{
"BriefDescription": "Total execution stalls.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "4",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
@@ -345,6 +386,7 @@
},
{
"BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.1_PORTS_UTIL",
"PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
@@ -352,7 +394,16 @@
"UMask": "0x2"
},
{
+ "BriefDescription": "Cycles total of 2 or 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.2_3_PORTS_UTIL",
+ "SampleAfterValue": "2000003",
+ "UMask": "0xc"
+ },
+ {
"BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.2_PORTS_UTIL",
"PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
@@ -361,6 +412,7 @@
},
{
"BriefDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.3_PORTS_UTIL",
"PublicDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
@@ -369,6 +421,7 @@
},
{
"BriefDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.4_PORTS_UTIL",
"PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.",
@@ -377,6 +430,7 @@
},
{
"BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "5",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.BOUND_ON_LOADS",
@@ -385,6 +439,7 @@
},
{
"BriefDescription": "Cycles where the Store Buffer was full and no loads caused an execution stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "2",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.BOUND_ON_STORES",
@@ -394,6 +449,7 @@
},
{
"BriefDescription": "Cycles no uop executed while RS was not empty, the SB was not full and there was no outstanding load.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.EXE_BOUND_0_PORTS",
"PublicDescription": "Number of cycles total of 0 uops executed on all ports, Reservation Station (RS) was not empty, the Store Buffer (SB) was not full and there was no outstanding load.",
@@ -402,6 +458,7 @@
},
{
"BriefDescription": "Instruction decoders utilized in a cycle",
+ "Counter": "0,1,2,3",
"EventCode": "0x75",
"EventName": "INST_DECODED.DECODERS",
"PublicDescription": "Number of decoders utilized in a cycle when the MITE (legacy decode pipeline) fetches instructions.",
@@ -410,6 +467,7 @@
},
{
"BriefDescription": "Number of instructions retired. Fixed Counter - architectural event",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PEBS": "1",
"PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
@@ -418,6 +476,7 @@
},
{
"BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.ANY_P",
"PEBS": "1",
@@ -426,6 +485,7 @@
},
{
"BriefDescription": "INST_RETIRED.MACRO_FUSED",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.MACRO_FUSED",
"PEBS": "1",
@@ -434,6 +494,7 @@
},
{
"BriefDescription": "Retired NOP instructions.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.NOP",
"PEBS": "1",
@@ -443,6 +504,7 @@
},
{
"BriefDescription": "Precise instruction retired with PEBS precise-distribution",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.PREC_DIST",
"PEBS": "1",
"PublicDescription": "A version of INST_RETIRED that allows for a precise distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR++) feature to fix bias in how retired instructions get sampled. Use on Fixed Counter 0.",
@@ -451,6 +513,7 @@
},
{
"BriefDescription": "Iterations of Repeat string retired instructions.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.REP_ITERATION",
"PEBS": "1",
@@ -460,6 +523,7 @@
},
{
"BriefDescription": "Clears speculative count",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xad",
@@ -470,6 +534,7 @@
},
{
"BriefDescription": "Counts cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xad",
"EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
"PublicDescription": "Cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
@@ -478,6 +543,7 @@
},
{
"BriefDescription": "INT_MISC.MBA_STALLS",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xad",
"EventName": "INT_MISC.MBA_STALLS",
"SampleAfterValue": "1000003",
@@ -485,6 +551,7 @@
},
{
"BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xad",
"EventName": "INT_MISC.RECOVERY_CYCLES",
"PublicDescription": "Counts core cycles when the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
@@ -493,6 +560,7 @@
},
{
"BriefDescription": "Bubble cycles of BAClear (Unknown Branch).",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xad",
"EventName": "INT_MISC.UNKNOWN_BRANCH_CYCLES",
"MSRIndex": "0x3F7",
@@ -502,6 +570,7 @@
},
{
"BriefDescription": "TMA slots where uops got dropped",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xad",
"EventName": "INT_MISC.UOP_DROPPING",
"PublicDescription": "Estimated number of Top-down Microarchitecture Analysis slots that got dropped due to non front-end reasons",
@@ -510,6 +579,7 @@
},
{
"BriefDescription": "INT_VEC_RETIRED.128BIT",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe7",
"EventName": "INT_VEC_RETIRED.128BIT",
"SampleAfterValue": "1000003",
@@ -517,6 +587,7 @@
},
{
"BriefDescription": "INT_VEC_RETIRED.256BIT",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe7",
"EventName": "INT_VEC_RETIRED.256BIT",
"SampleAfterValue": "1000003",
@@ -524,6 +595,7 @@
},
{
"BriefDescription": "integer ADD, SUB, SAD 128-bit vector instructions.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe7",
"EventName": "INT_VEC_RETIRED.ADD_128",
"PublicDescription": "Number of retired integer ADD/SUB (regular or horizontal), SAD 128-bit vector instructions.",
@@ -532,6 +604,7 @@
},
{
"BriefDescription": "integer ADD, SUB, SAD 256-bit vector instructions.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe7",
"EventName": "INT_VEC_RETIRED.ADD_256",
"PublicDescription": "Number of retired integer ADD/SUB (regular or horizontal), SAD 256-bit vector instructions.",
@@ -540,6 +613,7 @@
},
{
"BriefDescription": "INT_VEC_RETIRED.MUL_256",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe7",
"EventName": "INT_VEC_RETIRED.MUL_256",
"SampleAfterValue": "1000003",
@@ -547,6 +621,7 @@
},
{
"BriefDescription": "INT_VEC_RETIRED.SHUFFLES",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe7",
"EventName": "INT_VEC_RETIRED.SHUFFLES",
"SampleAfterValue": "1000003",
@@ -554,6 +629,7 @@
},
{
"BriefDescription": "INT_VEC_RETIRED.VNNI_128",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe7",
"EventName": "INT_VEC_RETIRED.VNNI_128",
"SampleAfterValue": "1000003",
@@ -561,6 +637,7 @@
},
{
"BriefDescription": "INT_VEC_RETIRED.VNNI_256",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe7",
"EventName": "INT_VEC_RETIRED.VNNI_256",
"SampleAfterValue": "1000003",
@@ -568,6 +645,7 @@
},
{
"BriefDescription": "False dependencies in MOB due to partial compare on address.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.ADDRESS_ALIAS",
"PublicDescription": "Counts the number of times a load got blocked due to false dependencies in MOB due to partial compare on address.",
@@ -576,6 +654,7 @@
},
{
"BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.NO_SR",
"PublicDescription": "Counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
@@ -584,6 +663,7 @@
},
{
"BriefDescription": "Loads blocked due to overlapping with a preceding store that cannot be forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.STORE_FORWARD",
"PublicDescription": "Counts the number of times where store forwarding was prevented for a load operation. The most common case is a load blocked due to the address of memory access (partially) overlapping with a preceding uncompleted store. Note: See the table of not supported store forwards in the Optimization Guide.",
@@ -592,6 +672,7 @@
},
{
"BriefDescription": "Counts the number of demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.",
+ "Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "LOAD_HIT_PREFETCH.SWPF",
"PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
@@ -600,6 +681,7 @@
},
{
"BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xa8",
"EventName": "LSD.CYCLES_ACTIVE",
@@ -609,6 +691,7 @@
},
{
"BriefDescription": "Cycles optimal number of Uops delivered by the LSD, but did not come from the decoder.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "6",
"EventCode": "0xa8",
"EventName": "LSD.CYCLES_OK",
@@ -618,6 +701,7 @@
},
{
"BriefDescription": "Number of Uops delivered by the LSD.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa8",
"EventName": "LSD.UOPS",
"PublicDescription": "Counts the number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
@@ -626,6 +710,7 @@
},
{
"BriefDescription": "Number of machine clears (nukes) of any type.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xc3",
@@ -636,6 +721,7 @@
},
{
"BriefDescription": "Self-modifying code (SMC) detected.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.SMC",
"PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
@@ -644,6 +730,7 @@
},
{
"BriefDescription": "LFENCE instructions retired",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe0",
"EventName": "MISC2_RETIRED.LFENCE",
"PublicDescription": "number of LFENCE retired instructions",
@@ -652,6 +739,7 @@
},
{
"BriefDescription": "Increments whenever there is an update to the LBR array.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xcc",
"EventName": "MISC_RETIRED.LBR_INSERTS",
"PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR enable via IA32_DEBUGCTL MSR and branch type selection via MSR_LBR_SELECT.",
@@ -660,6 +748,7 @@
},
{
"BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa2",
"EventName": "RESOURCE_STALLS.SB",
"PublicDescription": "Counts allocation stall cycles caused by the store buffer (SB) being full. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
@@ -668,6 +757,7 @@
},
{
"BriefDescription": "Counts cycles where the pipeline is stalled due to serializing operations.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa2",
"EventName": "RESOURCE_STALLS.SCOREBOARD",
"SampleAfterValue": "100003",
@@ -675,6 +765,7 @@
},
{
"BriefDescription": "TMA slots where no uops were being issued due to lack of back-end resources.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa4",
"EventName": "TOPDOWN.BACKEND_BOUND_SLOTS",
"PublicDescription": "Number of slots in TMA method where no micro-operations were being issued from front-end to back-end of the machine due to lack of back-end resources.",
@@ -683,6 +774,7 @@
},
{
"BriefDescription": "TMA slots wasted due to incorrect speculations.",
+ "Counter": "0",
"EventCode": "0xa4",
"EventName": "TOPDOWN.BAD_SPEC_SLOTS",
"PublicDescription": "Number of slots of TMA method that were wasted due to incorrect speculation. It covers all types of control-flow or data-related mis-speculations.",
@@ -691,6 +783,7 @@
},
{
"BriefDescription": "TMA slots wasted due to incorrect speculation by branch mispredictions",
+ "Counter": "0",
"EventCode": "0xa4",
"EventName": "TOPDOWN.BR_MISPREDICT_SLOTS",
"PublicDescription": "Number of TMA slots that were wasted due to incorrect speculation by (any type of) branch mispredictions. This event estimates number of speculative operations that were issued but not retired as well as the out-of-order engine recovery past a branch misprediction.",
@@ -699,6 +792,7 @@
},
{
"BriefDescription": "TOPDOWN.MEMORY_BOUND_SLOTS",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa4",
"EventName": "TOPDOWN.MEMORY_BOUND_SLOTS",
"SampleAfterValue": "10000003",
@@ -706,6 +800,7 @@
},
{
"BriefDescription": "TMA slots available for an unhalted logical processor. Fixed counter - architectural event",
+ "Counter": "Fixed counter 3",
"EventName": "TOPDOWN.SLOTS",
"PublicDescription": "Number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method (TMA). The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core. Software can use this event as the denominator for the top-level metrics of the TMA method. This architectural event is counted on a designated fixed counter (Fixed Counter 3).",
"SampleAfterValue": "10000003",
@@ -713,6 +808,7 @@
},
{
"BriefDescription": "TMA slots available for an unhalted logical processor. General counter - architectural event",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa4",
"EventName": "TOPDOWN.SLOTS_P",
"PublicDescription": "Counts the number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method. The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core.",
@@ -721,6 +817,7 @@
},
{
"BriefDescription": "UOPS_DECODED.DEC0_UOPS",
+ "Counter": "0,1,2,3",
"EventCode": "0x76",
"EventName": "UOPS_DECODED.DEC0_UOPS",
"SampleAfterValue": "1000003",
@@ -728,6 +825,7 @@
},
{
"BriefDescription": "Uops executed on port 0",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb2",
"EventName": "UOPS_DISPATCHED.PORT_0",
"PublicDescription": "Number of uops dispatch to execution port 0.",
@@ -736,6 +834,7 @@
},
{
"BriefDescription": "Uops executed on port 1",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb2",
"EventName": "UOPS_DISPATCHED.PORT_1",
"PublicDescription": "Number of uops dispatch to execution port 1.",
@@ -744,6 +843,7 @@
},
{
"BriefDescription": "Uops executed on ports 2, 3 and 10",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb2",
"EventName": "UOPS_DISPATCHED.PORT_2_3_10",
"PublicDescription": "Number of uops dispatch to execution ports 2, 3 and 10",
@@ -752,6 +852,7 @@
},
{
"BriefDescription": "Uops executed on ports 4 and 9",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb2",
"EventName": "UOPS_DISPATCHED.PORT_4_9",
"PublicDescription": "Number of uops dispatch to execution ports 4 and 9",
@@ -760,6 +861,7 @@
},
{
"BriefDescription": "Uops executed on ports 5 and 11",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb2",
"EventName": "UOPS_DISPATCHED.PORT_5_11",
"PublicDescription": "Number of uops dispatch to execution ports 5 and 11",
@@ -768,6 +870,7 @@
},
{
"BriefDescription": "Uops executed on port 6",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb2",
"EventName": "UOPS_DISPATCHED.PORT_6",
"PublicDescription": "Number of uops dispatch to execution port 6.",
@@ -776,6 +879,7 @@
},
{
"BriefDescription": "Uops executed on ports 7 and 8",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb2",
"EventName": "UOPS_DISPATCHED.PORT_7_8",
"PublicDescription": "Number of uops dispatch to execution ports 7 and 8.",
@@ -784,6 +888,7 @@
},
{
"BriefDescription": "Number of uops executed on the core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE",
"PublicDescription": "Counts the number of uops executed from any thread.",
@@ -792,6 +897,7 @@
},
{
"BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
@@ -801,6 +907,7 @@
},
{
"BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "2",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
@@ -810,6 +917,7 @@
},
{
"BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "3",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
@@ -819,6 +927,7 @@
},
{
"BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "4",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
@@ -828,6 +937,7 @@
},
{
"BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_1",
@@ -837,6 +947,7 @@
},
{
"BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "2",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_2",
@@ -846,6 +957,7 @@
},
{
"BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "3",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_3",
@@ -855,6 +967,7 @@
},
{
"BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "4",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_4",
@@ -864,6 +977,7 @@
},
{
"BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.STALLS",
@@ -874,6 +988,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UOPS_EXECUTED.STALLS",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"Deprecated": "1",
"EventCode": "0xb1",
@@ -884,6 +999,7 @@
},
{
"BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.THREAD",
"SampleAfterValue": "2000003",
@@ -891,6 +1007,7 @@
},
{
"BriefDescription": "Counts the number of x87 uops dispatched.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.X87",
"PublicDescription": "Counts the number of x87 uops executed.",
@@ -899,6 +1016,7 @@
},
{
"BriefDescription": "Uops that RAT issues to RS",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xae",
"EventName": "UOPS_ISSUED.ANY",
"PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
@@ -906,7 +1024,17 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "UOPS_ISSUED.CYCLES",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xae",
+ "EventName": "UOPS_ISSUED.CYCLES",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Cycles with retired uop(s).",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.CYCLES",
@@ -916,6 +1044,7 @@
},
{
"BriefDescription": "Retired uops except the last uop of each instruction.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.HEAVY",
"PublicDescription": "Counts the number of retired micro-operations (uops) except the last uop of each instruction. An instruction that is decoded into less than two uops does not contribute to the count.",
@@ -924,6 +1053,7 @@
},
{
"BriefDescription": "UOPS_RETIRED.MS",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.MS",
"MSRIndex": "0x3F7",
@@ -933,6 +1063,7 @@
},
{
"BriefDescription": "Retirement slots used.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.SLOTS",
"PublicDescription": "Counts the retirement slots used each cycle.",
@@ -941,6 +1072,7 @@
},
{
"BriefDescription": "Cycles without actually retired uops.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.STALLS",
@@ -951,6 +1083,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UOPS_RETIRED.STALLS",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"Deprecated": "1",
"EventCode": "0xc2",
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json
index f8c0eac8b828..2b3b013ccb06 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json
@@ -47,7 +47,7 @@
},
{
"BriefDescription": "Percentage of time spent in the active CPU power state C0",
- "MetricExpr": "tma_info_system_cpu_utilization",
+ "MetricExpr": "tma_info_system_cpus_utilized",
"MetricName": "cpu_utilization",
"ScaleUnit": "100%"
},
@@ -73,18 +73,54 @@
"ScaleUnit": "1per_instr"
},
{
+ "BriefDescription": "Bandwidth observed by the integrated I/O traffic controller (IIO) of IO reads that are initiated by end device controllers that are requesting memory from the CPU.",
+ "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.ALL_PARTS * 4 / 1e6 / duration_time",
+ "MetricName": "iio_bandwidth_read",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth observed by the integrated I/O traffic controller (IIO) of IO writes that are initiated by end device controllers that are writing memory to the CPU.",
+ "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.ALL_PARTS * 4 / 1e6 / duration_time",
+ "MetricName": "iio_bandwidth_write",
+ "ScaleUnit": "1MB/s"
+ },
+ {
"BriefDescription": "Bandwidth of IO reads that are initiated by end device controllers that are requesting memory from the CPU.",
"MetricExpr": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR * 64 / 1e6 / duration_time",
"MetricName": "io_bandwidth_read",
"ScaleUnit": "1MB/s"
},
{
+ "BriefDescription": "Bandwidth of IO reads that are initiated by end device controllers that are requesting memory from the local CPU socket.",
+ "MetricExpr": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR_LOCAL * 64 / 1e6 / duration_time",
+ "MetricName": "io_bandwidth_read_local",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth of IO reads that are initiated by end device controllers that are requesting memory from a remote CPU socket.",
+ "MetricExpr": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR_REMOTE * 64 / 1e6 / duration_time",
+ "MetricName": "io_bandwidth_read_remote",
+ "ScaleUnit": "1MB/s"
+ },
+ {
"BriefDescription": "Bandwidth of IO writes that are initiated by end device controllers that are writing memory to the CPU.",
"MetricExpr": "(UNC_CHA_TOR_INSERTS.IO_ITOM + UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR) * 64 / 1e6 / duration_time",
"MetricName": "io_bandwidth_write",
"ScaleUnit": "1MB/s"
},
{
+ "BriefDescription": "Bandwidth of IO writes that are initiated by end device controllers that are writing memory to the local CPU socket.",
+ "MetricExpr": "(UNC_CHA_TOR_INSERTS.IO_ITOM_LOCAL + UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR_LOCAL) * 64 / 1e6 / duration_time",
+ "MetricName": "io_bandwidth_write_local",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth of IO writes that are initiated by end device controllers that are writing memory to a remote CPU socket.",
+ "MetricExpr": "(UNC_CHA_TOR_INSERTS.IO_ITOM_REMOTE + UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR_REMOTE) * 64 / 1e6 / duration_time",
+ "MetricName": "io_bandwidth_write_remote",
+ "ScaleUnit": "1MB/s"
+ },
+ {
"BriefDescription": "Percentage of inbound full cacheline writes initiated by end device controllers that miss the L3 cache.",
"MetricExpr": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM / UNC_CHA_TOR_INSERTS.IO_ITOM",
"MetricName": "io_percent_of_inbound_full_writes_that_miss_l3",
@@ -334,7 +370,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the Advanced Matrix eXtensions (AMX) execution engine was busy with tile (arithmetic) operations",
"MetricExpr": "EXE.AMX_BUSY / tma_info_core_core_clks",
- "MetricGroup": "Compute;HPC;Server;TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricGroup": "BvCB;Compute;HPC;Server;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_amx_busy",
"MetricThreshold": "tma_amx_busy > 0.5 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"ScaleUnit": "100%"
@@ -342,7 +378,7 @@
{
"BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists",
"MetricExpr": "78 * ASSISTS.ANY / tma_info_thread_slots",
- "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricGroup": "BvIO;TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_assists",
"MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
"PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: ASSISTS.ANY",
@@ -360,7 +396,7 @@
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
"DefaultMetricgroupName": "TopdownL1",
"MetricExpr": "topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * tma_info_thread_slots",
- "MetricGroup": "Default;TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvOB;Default;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
"MetricThreshold": "tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL1;Default",
@@ -382,7 +418,7 @@
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
"DefaultMetricgroupName": "TopdownL2",
"MetricExpr": "topdown\\-br\\-mispredict / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * tma_info_thread_slots",
- "MetricGroup": "BadSpec;BrMispredicts;Default;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
+ "MetricGroup": "BadSpec;BrMispredicts;BvMP;Default;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
"MetricName": "tma_branch_mispredicts",
"MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2;Default",
@@ -434,8 +470,8 @@
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
- "MetricExpr": "(76 * tma_info_system_core_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) + 75.5 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
- "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricExpr": "(76.6 * tma_info_system_core_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) + 74.6 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricGroup": "BvMS;DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_contested_accesses",
"MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
@@ -454,8 +490,8 @@
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
- "MetricExpr": "75.5 * tma_info_system_core_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD + MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (1 - OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
- "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricExpr": "74.6 * tma_info_system_core_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD + MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (1 - OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricGroup": "BvMS;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_data_sharing",
"MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
@@ -473,7 +509,7 @@
{
"BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
"MetricExpr": "ARITH.DIV_ACTIVE / tma_info_thread_clks",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
"MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_ACTIVE",
@@ -503,13 +539,13 @@
"MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_dsb_switches",
"MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
"MetricExpr": "min(7 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_LOAD_MISSES.WALK_ACTIVE, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - MEMORY_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
- "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
+ "MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
"MetricName": "tma_dtlb_load",
"MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs, tma_info_bottleneck_memory_synchronization",
@@ -518,7 +554,7 @@
{
"BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
"MetricExpr": "(7 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_STORE_MISSES.WALK_ACTIVE) / tma_info_core_core_clks",
- "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
+ "MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
"MetricName": "tma_dtlb_store",
"MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load, tma_info_bottleneck_memory_data_tlbs, tma_info_bottleneck_memory_synchronization",
@@ -526,8 +562,8 @@
},
{
"BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing",
- "MetricExpr": "80 * tma_info_system_core_frequency * OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM / tma_info_thread_clks",
- "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
+ "MetricExpr": "81 * tma_info_system_core_frequency * OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM / tma_info_thread_clks",
+ "MetricGroup": "BvMS;DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
"MetricName": "tma_false_sharing",
"MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
@@ -536,7 +572,7 @@
{
"BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
"MetricExpr": "L1D_PEND_MISS.FB_FULL / tma_info_thread_clks",
- "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
+ "MetricGroup": "BvMS;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
"MetricName": "tma_fb_full",
"MetricThreshold": "tma_fb_full > 0.3",
"PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
@@ -550,7 +586,7 @@
"MetricName": "tma_fetch_bandwidth",
"MetricThreshold": "tma_fetch_bandwidth > 0.2",
"MetricgroupNoGroup": "TopdownL2;Default",
- "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
{
@@ -602,7 +638,7 @@
},
{
"BriefDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths",
- "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@ + FP_ARITH_INST_RETIRED2.VECTOR) / (tma_retiring * tma_info_thread_slots)",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.VECTOR + FP_ARITH_INST_RETIRED2.VECTOR) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_vector",
"MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
@@ -640,7 +676,7 @@
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
"DefaultMetricgroupName": "TopdownL1",
"MetricExpr": "topdown\\-fe\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) - INT_MISC.UOP_DROPPING / tma_info_thread_slots",
- "MetricGroup": "Default;PGO;TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvFB;BvIO;Default;PGO;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_frontend_bound",
"MetricThreshold": "tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL1;Default",
@@ -650,7 +686,7 @@
{
"BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions",
"MetricExpr": "tma_light_operations * INST_RETIRED.MACRO_FUSED / (tma_retiring * tma_info_thread_slots)",
- "MetricGroup": "Branches;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_fused_instructions",
"MetricThreshold": "tma_fused_instructions > 0.1 & tma_light_operations > 0.6",
"PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. CMP+JCC or DEC+JCC are common examples of legacy fusions. {([MTL] Note new MOV+OP and Load+OP fusions appear under Other_Light_Ops in MTL!)}",
@@ -670,7 +706,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses",
"MetricExpr": "ICACHE_DATA.STALLS / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_icache_misses",
"MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS",
@@ -726,36 +762,26 @@
},
{
"BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts",
- "MetricExpr": "(100 * (1 - max(0, topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) - topdown\\-mem\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound)) / (((cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ + cpu@RS.EMPTY\\,umask\\=0x1@) / CPU_CLK_UNHALTED.THREAD * (CYCLE_ACTIVITY.STALLS_TOTAL - EXE_ACTIVITY.BOUND_ON_LOADS) / CPU_CLK_UNHALTED.THREAD * CPU_CLK_UNHALTED.THREAD + (EXE_ACTIVITY.1_PORTS_UTIL + topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) * cpu@EXE_ACTIVITY.2_PORTS_UTIL\\,umask\\=0xc@)) / CPU_CLK_UNHALTED.THREAD if ARITH.DIV_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - EXE_ACTIVITY.BOUND_ON_LOADS else (EXE_ACTIVITY.1_PORTS_UTIL + topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) * cpu@EXE_ACTIVITY.2_PORTS_UTIL\\,umask\\=0xc@) / CPU_CLK_UNHALTED.THREAD) if max(0, topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) - topdown\\-mem\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound)) < (((cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ + cpu@RS.EMPTY\\,umask\\=0x1@) / CPU_CLK_UNHALTED.THREAD * (CYCLE_ACTIVITY.STALLS_TOTAL - EXE_ACTIVITY.BOUND_ON_LOADS) / CPU_CLK_UNHALTED.THREAD * CPU_CLK_UNHALTED.THREAD + (EXE_ACTIVITY.1_PORTS_UTIL + topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) * cpu@EXE_ACTIVITY.2_PORTS_UTIL\\,umask\\=0xc@)) / CPU_CLK_UNHALTED.THREAD if ARITH.DIV_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - EXE_ACTIVITY.BOUND_ON_LOADS else (EXE_ACTIVITY.1_PORTS_UTIL + topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) * cpu@EXE_ACTIVITY.2_PORTS_UTIL\\,umask\\=0xc@) / CPU_CLK_UNHALTED.THREAD) else 1) if tma_info_system_smt_2t_utilization > 0.5 else 0) + 0 * slots",
- "MetricGroup": "Cor;SMT",
- "MetricName": "tma_info_botlnk_core_bound_likely"
- },
- {
- "BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck.",
- "MetricExpr": "100 * (100 * ((topdown\\-fetch\\-lat / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) - INT_MISC.UOP_DROPPING / slots) * (DSB2MITE_SWITCHES.PENALTY_CYCLES / CPU_CLK_UNHALTED.THREAD) / (ICACHE_DATA.STALLS / CPU_CLK_UNHALTED.THREAD + ICACHE_TAG.STALLS / CPU_CLK_UNHALTED.THREAD + (INT_MISC.CLEAR_RESTEER_CYCLES / CPU_CLK_UNHALTED.THREAD + INT_MISC.UNKNOWN_BRANCH_CYCLES / CPU_CLK_UNHALTED.THREAD) + min(3 * cpu@UOPS_RETIRED.MS\\,cmask\\=0x1\\,edge\\=0x1@ / (UOPS_RETIRED.SLOTS / UOPS_ISSUED.ANY) / CPU_CLK_UNHALTED.THREAD, 1) + DECODE.LCP / CPU_CLK_UNHALTED.THREAD + DSB2MITE_SWITCHES.PENALTY_CYCLES / CPU_CLK_UNHALTED.THREAD) + max(0, topdown\\-fe\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) - INT_MISC.UOP_DROPPING / slots - (topdown\\-fetch\\-lat / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) - INT_MISC.UOP_DROPPING / slots)) * ((IDQ.MITE_CYCLES_ANY - IDQ.MITE_CYCLES_OK) / (CPU_CLK_UNHALTED.DISTRIBUTED if #SMT_on else CPU_CLK_UNHALTED.THREAD) / 2) / ((IDQ.MITE_CYCLES_ANY - IDQ.MITE_CYCLES_OK) / (CPU_CLK_UNHALTED.DISTRIBUTED if #SMT_on else CPU_CLK_UNHALTED.THREAD) / 2 + (IDQ.DSB_CYCLES_ANY - IDQ.DSB_CYCLES_OK) / (CPU_CLK_UNHALTED.DISTRIBUTED if #SMT_on else CPU_CLK_UNHALTED.THREAD) / 2)))",
- "MetricGroup": "DSBmiss;Fed",
- "MetricName": "tma_info_botlnk_dsb_misses"
- },
- {
- "BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck.",
- "MetricExpr": "100 * (100 * ((topdown\\-fetch\\-lat / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) - INT_MISC.UOP_DROPPING / slots) * (ICACHE_DATA.STALLS / CPU_CLK_UNHALTED.THREAD) / (ICACHE_DATA.STALLS / CPU_CLK_UNHALTED.THREAD + ICACHE_TAG.STALLS / CPU_CLK_UNHALTED.THREAD + (INT_MISC.CLEAR_RESTEER_CYCLES / CPU_CLK_UNHALTED.THREAD + INT_MISC.UNKNOWN_BRANCH_CYCLES / CPU_CLK_UNHALTED.THREAD) + min(3 * cpu@UOPS_RETIRED.MS\\,cmask\\=0x1\\,edge\\=0x1@ / (UOPS_RETIRED.SLOTS / UOPS_ISSUED.ANY) / CPU_CLK_UNHALTED.THREAD, 1) + DECODE.LCP / CPU_CLK_UNHALTED.THREAD + DSB2MITE_SWITCHES.PENALTY_CYCLES / CPU_CLK_UNHALTED.THREAD)))",
- "MetricGroup": "Fed;FetchLat;IcMiss",
- "MetricName": "tma_info_botlnk_ic_misses"
- },
- {
- "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts",
"MetricExpr": "(100 * (1 - tma_core_bound / tma_ports_utilization if tma_core_bound < tma_ports_utilization else 1) if tma_info_system_smt_2t_utilization > 0.5 else 0)",
"MetricGroup": "Cor;SMT",
"MetricName": "tma_info_botlnk_l0_core_bound_likely",
"MetricThreshold": "tma_info_botlnk_l0_core_bound_likely > 0.5"
},
{
+ "BriefDescription": "Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck",
+ "MetricExpr": "100 * (tma_frontend_bound * (tma_fetch_bandwidth / (tma_fetch_bandwidth + tma_fetch_latency)) * (tma_dsb / (tma_dsb + tma_mite)))",
+ "MetricGroup": "DSB;FetchBW;tma_issueFB",
+ "MetricName": "tma_info_botlnk_l2_dsb_bandwidth",
+ "MetricThreshold": "tma_info_botlnk_l2_dsb_bandwidth > 10",
+ "PublicDescription": "Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp"
+ },
+ {
"BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck",
"MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_mite))",
"MetricGroup": "DSBmiss;Fed;tma_issueFB",
"MetricName": "tma_info_botlnk_l2_dsb_misses",
"MetricThreshold": "tma_info_botlnk_l2_dsb_misses > 10",
- "PublicDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp"
+ "PublicDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp"
},
{
"BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
@@ -766,38 +792,32 @@
"PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: "
},
{
- "BriefDescription": "Total pipeline cost of \"useful operations\" - the baseline operations not covered by Branching_Overhead nor Irregular_Overhead.",
- "MetricExpr": "100 * (tma_retiring - (BR_INST_RETIRED.ALL_BRANCHES + BR_INST_RETIRED.NEAR_CALL) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
- "MetricGroup": "Ret",
- "MetricName": "tma_info_bottleneck_base_non_br",
- "MetricThreshold": "tma_info_bottleneck_base_non_br > 20"
- },
- {
"BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
"MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)",
- "MetricGroup": "BigFootprint;Fed;Frontend;IcMiss;MemoryTLB",
+ "MetricGroup": "BigFootprint;BvBC;Fed;Frontend;IcMiss;MemoryTLB",
"MetricName": "tma_info_bottleneck_big_code",
"MetricThreshold": "tma_info_bottleneck_big_code > 20"
},
{
- "BriefDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls)",
- "MetricExpr": "100 * ((BR_INST_RETIRED.ALL_BRANCHES + BR_INST_RETIRED.NEAR_CALL) / tma_info_thread_slots)",
- "MetricGroup": "Ret",
+ "BriefDescription": "Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA",
+ "MetricExpr": "100 * ((BR_INST_RETIRED.ALL_BRANCHES + 2 * BR_INST_RETIRED.NEAR_CALL + INST_RETIRED.NOP) / tma_info_thread_slots)",
+ "MetricGroup": "BvBO;Ret",
"MetricName": "tma_info_bottleneck_branching_overhead",
- "MetricThreshold": "tma_info_bottleneck_branching_overhead > 5"
+ "MetricThreshold": "tma_info_bottleneck_branching_overhead > 5",
+ "PublicDescription": "Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA. Examples include function calls; loops and alignments. (A lower bound)"
},
{
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_fb_full / (tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
- "MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_fb_full / (tma_dtlb_load + tma_fb_full + tma_l1_hit_latency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
+ "MetricGroup": "BvMB;Mem;MemoryBW;Offcore;tma_issueBW",
"MetricName": "tma_info_bottleneck_cache_memory_bandwidth",
"MetricThreshold": "tma_info_bottleneck_cache_memory_bandwidth > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full"
},
{
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
- "MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_l1_hit_latency / (tma_dtlb_load + tma_fb_full + tma_l1_hit_latency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
+ "MetricGroup": "BvML;Mem;MemoryLat;Offcore;tma_issueLat",
"MetricName": "tma_info_bottleneck_cache_memory_latency",
"MetricThreshold": "tma_info_bottleneck_cache_memory_latency > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks. Related metrics: tma_l3_hit_latency, tma_mem_latency"
@@ -805,30 +825,30 @@
{
"BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation",
"MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_amx_busy + tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * tma_amx_busy / (tma_amx_busy + tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * (tma_ports_utilization / (tma_amx_busy + tma_divider + tma_ports_utilization + tma_serializing_operation)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))",
- "MetricGroup": "Cor;tma_issueComp",
+ "MetricGroup": "BvCB;Cor;tma_issueComp",
"MetricName": "tma_info_bottleneck_compute_bound_est",
"MetricThreshold": "tma_info_bottleneck_compute_bound_est > 20",
"PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. Related metrics: "
},
{
- "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks",
+ "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end)",
"MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) - (1 - INST_RETIRED.REP_ITERATION / cpu@UOPS_RETIRED.MS\\,cmask\\=1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))) - tma_info_bottleneck_big_code",
- "MetricGroup": "Fed;FetchBW;Frontend",
+ "MetricGroup": "BvFB;Fed;FetchBW;Frontend",
"MetricName": "tma_info_bottleneck_instruction_fetch_bw",
"MetricThreshold": "tma_info_bottleneck_instruction_fetch_bw > 20"
},
{
"BriefDescription": "Total pipeline cost of irregular execution (e.g",
"MetricExpr": "100 * ((1 - INST_RETIRED.REP_ITERATION / cpu@UOPS_RETIRED.MS\\,cmask\\=1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + cpu@RS.EMPTY\\,umask\\=1@ / tma_info_thread_clks * tma_ports_utilized_0) / (tma_amx_busy + tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
- "MetricGroup": "Bad;Cor;Ret;tma_issueMS",
+ "MetricGroup": "Bad;BvIO;Cor;Ret;tma_issueMS",
"MetricName": "tma_info_bottleneck_irregular_overhead",
"MetricThreshold": "tma_info_bottleneck_irregular_overhead > 10",
"PublicDescription": "Total pipeline cost of irregular execution (e.g. FP-assists in HPC, Wait time with work imbalance multithreaded workloads, overhead in system services or virtualized environments). Related metrics: tma_microcode_sequencer, tma_ms_switches"
},
{
"BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
- "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
- "MetricGroup": "Mem;MemoryTLB;Offcore;tma_issueTLB",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_dtlb_load + tma_fb_full + tma_l1_hit_latency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
+ "MetricGroup": "BvMT;Mem;MemoryTLB;Offcore;tma_issueTLB",
"MetricName": "tma_info_bottleneck_memory_data_tlbs",
"MetricThreshold": "tma_info_bottleneck_memory_data_tlbs > 20",
"PublicDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs). Related metrics: tma_dtlb_load, tma_dtlb_store, tma_info_bottleneck_memory_synchronization"
@@ -836,7 +856,7 @@
{
"BriefDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)",
"MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) * tma_remote_cache / (tma_local_mem + tma_remote_cache + tma_remote_mem) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * tma_false_sharing / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))",
- "MetricGroup": "Mem;Offcore;tma_issueTLB",
+ "MetricGroup": "BvMS;Mem;Offcore;tma_issueTLB",
"MetricName": "tma_info_bottleneck_memory_synchronization",
"MetricThreshold": "tma_info_bottleneck_memory_synchronization > 10",
"PublicDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors). Related metrics: tma_dtlb_load, tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs"
@@ -844,18 +864,25 @@
{
"BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks",
"MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
- "MetricGroup": "Bad;BadSpec;BrMispredicts;tma_issueBM",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts;BvMP;tma_issueBM",
"MetricName": "tma_info_bottleneck_mispredictions",
"MetricThreshold": "tma_info_bottleneck_mispredictions > 20",
"PublicDescription": "Total pipeline cost of Branch Misprediction related bottlenecks. Related metrics: tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost, tma_mispredicts_resteers"
},
{
- "BriefDescription": "Total pipeline cost of remaining bottlenecks (apart from those listed in the Info.Bottlenecks metrics class)",
- "MetricExpr": "100 - (tma_info_bottleneck_big_code + tma_info_bottleneck_instruction_fetch_bw + tma_info_bottleneck_mispredictions + tma_info_bottleneck_cache_memory_bandwidth + tma_info_bottleneck_cache_memory_latency + tma_info_bottleneck_memory_data_tlbs + tma_info_bottleneck_memory_synchronization + tma_info_bottleneck_compute_bound_est + tma_info_bottleneck_irregular_overhead + tma_info_bottleneck_branching_overhead + tma_info_bottleneck_base_non_br)",
- "MetricGroup": "Cor;Offcore",
+ "BriefDescription": "Total pipeline cost of remaining bottlenecks in the back-end",
+ "MetricExpr": "100 - (tma_info_bottleneck_big_code + tma_info_bottleneck_instruction_fetch_bw + tma_info_bottleneck_mispredictions + tma_info_bottleneck_cache_memory_bandwidth + tma_info_bottleneck_cache_memory_latency + tma_info_bottleneck_memory_data_tlbs + tma_info_bottleneck_memory_synchronization + tma_info_bottleneck_compute_bound_est + tma_info_bottleneck_irregular_overhead + tma_info_bottleneck_branching_overhead + tma_info_bottleneck_useful_work)",
+ "MetricGroup": "BvOB;Cor;Offcore",
"MetricName": "tma_info_bottleneck_other_bottlenecks",
"MetricThreshold": "tma_info_bottleneck_other_bottlenecks > 20",
- "PublicDescription": "Total pipeline cost of remaining bottlenecks (apart from those listed in the Info.Bottlenecks metrics class). Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls."
+ "PublicDescription": "Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls."
+ },
+ {
+ "BriefDescription": "Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead.",
+ "MetricExpr": "100 * (tma_retiring - (BR_INST_RETIRED.ALL_BRANCHES + 2 * BR_INST_RETIRED.NEAR_CALL + INST_RETIRED.NOP) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
+ "MetricGroup": "BvUW;Ret",
+ "MetricName": "tma_info_bottleneck_useful_work",
+ "MetricThreshold": "tma_info_bottleneck_useful_work > 20"
},
{
"BriefDescription": "Fraction of branches that are CALL or RET",
@@ -907,7 +934,7 @@
},
{
"BriefDescription": "Floating Point Operations Per Cycle",
- "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * FP_ARITH_INST_RETIRED.8_FLOPS + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / tma_info_core_core_clks",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + FP_ARITH_INST_RETIRED2.SCALAR_HALF + 2 * (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED2.COMPLEX_SCALAR_HALF) + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * (FP_ARITH_INST_RETIRED2.128B_PACKED_HALF + FP_ARITH_INST_RETIRED.8_FLOPS) + 16 * (FP_ARITH_INST_RETIRED2.256B_PACKED_HALF + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) + 32 * FP_ARITH_INST_RETIRED2.512B_PACKED_HALF) / tma_info_core_core_clks",
"MetricGroup": "Flops;Ret",
"MetricName": "tma_info_core_flopc"
},
@@ -930,7 +957,7 @@
"MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
"MetricName": "tma_info_frontend_dsb_coverage",
"MetricThreshold": "tma_info_frontend_dsb_coverage < 0.7 & tma_info_thread_ipc / 6 > 0.35",
- "PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_inst_mix_iptb, tma_lcp"
+ "PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_inst_mix_iptb, tma_lcp"
},
{
"BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
@@ -997,7 +1024,7 @@
},
{
"BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
- "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR + FP_ARITH_INST_RETIRED2.SCALAR + (cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@ + FP_ARITH_INST_RETIRED2.VECTOR))",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR + FP_ARITH_INST_RETIRED2.SCALAR + (FP_ARITH_INST_RETIRED.VECTOR + FP_ARITH_INST_RETIRED2.VECTOR))",
"MetricGroup": "Flops;InsType",
"MetricName": "tma_info_inst_mix_iparith",
"MetricThreshold": "tma_info_inst_mix_iparith < 10",
@@ -1067,7 +1094,7 @@
},
{
"BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)",
- "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * FP_ARITH_INST_RETIRED.8_FLOPS + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR + FP_ARITH_INST_RETIRED2.SCALAR_HALF + 2 * (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED2.COMPLEX_SCALAR_HALF) + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * (FP_ARITH_INST_RETIRED2.128B_PACKED_HALF + FP_ARITH_INST_RETIRED.8_FLOPS) + 16 * (FP_ARITH_INST_RETIRED2.256B_PACKED_HALF + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) + 32 * FP_ARITH_INST_RETIRED2.512B_PACKED_HALF)",
"MetricGroup": "Flops;InsType",
"MetricName": "tma_info_inst_mix_ipflop",
"MetricThreshold": "tma_info_inst_mix_ipflop < 10"
@@ -1100,24 +1127,12 @@
"MetricThreshold": "tma_info_inst_mix_ipswpf < 100"
},
{
- "BriefDescription": "Instruction per taken branch",
+ "BriefDescription": "Instructions per taken branch",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
"MetricName": "tma_info_inst_mix_iptb",
"MetricThreshold": "tma_info_inst_mix_iptb < 13",
- "PublicDescription": "Instruction per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_lcp"
- },
- {
- "BriefDescription": "\"Bus lock\" per kilo instruction",
- "MetricExpr": "tma_info_memory_mix_bus_lock_pki",
- "MetricGroup": "Mem",
- "MetricName": "tma_info_memory_bus_lock_pki"
- },
- {
- "BriefDescription": "STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
- "MetricExpr": "tma_info_memory_tlb_code_stlb_mpki",
- "MetricGroup": "Fed;MemoryTLB",
- "MetricName": "tma_info_memory_code_stlb_mpki"
+ "PublicDescription": "Instructions per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_lcp"
},
{
"BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
@@ -1156,30 +1171,18 @@
"MetricName": "tma_info_memory_core_l3_cache_fill_bw_2t"
},
{
- "BriefDescription": "Average Parallel L2 cache miss data reads",
- "MetricExpr": "tma_info_memory_latency_data_l2_mlp",
- "MetricGroup": "Memory_BW;Offcore",
- "MetricName": "tma_info_memory_data_l2_mlp"
- },
- {
"BriefDescription": "Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)",
"MetricExpr": "1e3 * MEM_LOAD_RETIRED.FB_HIT / INST_RETIRED.ANY",
"MetricGroup": "CacheHits;Mem",
"MetricName": "tma_info_memory_fb_hpki"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
"MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l1d_cache_fill_bw"
},
{
- "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
- "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / (duration_time * 1e3 / 1e3)",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "tma_info_memory_l1d_cache_fill_bw_2t"
- },
- {
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
"MetricExpr": "1e3 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
"MetricGroup": "CacheHits;Mem",
@@ -1192,30 +1195,12 @@
"MetricName": "tma_info_memory_l1mpki_load"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
"MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l2_cache_fill_bw"
},
{
- "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
- "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / (duration_time * 1e3 / 1e3)",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "tma_info_memory_l2_cache_fill_bw_2t"
- },
- {
- "BriefDescription": "Rate of non silent evictions from the L2 cache per Kilo instruction",
- "MetricExpr": "1e3 * L2_LINES_OUT.NON_SILENT / INST_RETIRED.ANY",
- "MetricGroup": "L2Evicts;Mem;Server",
- "MetricName": "tma_info_memory_l2_evictions_nonsilent_pki"
- },
- {
- "BriefDescription": "Rate of silent evictions from the L2 cache per Kilo instruction where the evicted lines are dropped (no writeback to L3 or memory)",
- "MetricExpr": "1e3 * L2_LINES_OUT.SILENT / INST_RETIRED.ANY",
- "MetricGroup": "L2Evicts;Mem;Server",
- "MetricName": "tma_info_memory_l2_evictions_silent_pki"
- },
- {
"BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
"MetricExpr": "1e3 * (L2_RQSTS.REFERENCES - L2_RQSTS.MISS) / INST_RETIRED.ANY",
"MetricGroup": "CacheHits;Mem",
@@ -1246,30 +1231,24 @@
"MetricName": "tma_info_memory_l2mpki_load"
},
{
- "BriefDescription": "",
- "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / duration_time",
- "MetricGroup": "Mem;MemoryBW;Offcore",
- "MetricName": "tma_info_memory_l3_cache_access_bw"
+ "BriefDescription": "Offcore requests (L2 cache miss) per kilo instruction for demand RFOs",
+ "MetricExpr": "1e3 * L2_RQSTS.RFO_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Offcore",
+ "MetricName": "tma_info_memory_l2mpki_rfo"
},
{
- "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / (duration_time * 1e3 / 1e3)",
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW;Offcore",
- "MetricName": "tma_info_memory_l3_cache_access_bw_2t"
+ "MetricName": "tma_info_memory_l3_cache_access_bw"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
"MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l3_cache_fill_bw"
},
{
- "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / (duration_time * 1e3 / 1e3)",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "tma_info_memory_l3_cache_fill_bw_2t"
- },
- {
"BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
"MetricExpr": "1e3 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
"MetricGroup": "Mem",
@@ -1283,7 +1262,7 @@
},
{
"BriefDescription": "Average Latency for L2 cache miss demand Loads",
- "MetricExpr": "tma_info_memory_load_l2_miss_latency",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD",
"MetricGroup": "Memory_Lat;Offcore",
"MetricName": "tma_info_memory_latency_load_l2_miss_latency"
},
@@ -1295,27 +1274,9 @@
},
{
"BriefDescription": "Average Latency for L3 cache miss demand Loads",
- "MetricExpr": "tma_info_memory_load_l3_miss_latency",
- "MetricGroup": "Memory_Lat;Offcore",
- "MetricName": "tma_info_memory_latency_load_l3_miss_latency"
- },
- {
- "BriefDescription": "Average Latency for L2 cache miss demand Loads",
- "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD",
- "MetricGroup": "Memory_Lat;Offcore",
- "MetricName": "tma_info_memory_load_l2_miss_latency"
- },
- {
- "BriefDescription": "Average Parallel L2 cache miss demand Loads",
- "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=0x1@",
- "MetricGroup": "Memory_BW;Offcore",
- "MetricName": "tma_info_memory_load_l2_mlp"
- },
- {
- "BriefDescription": "Average Latency for L3 cache miss demand Loads",
"MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD / OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
"MetricGroup": "Memory_Lat;Offcore",
- "MetricName": "tma_info_memory_load_l3_miss_latency"
+ "MetricName": "tma_info_memory_latency_load_l3_miss_latency"
},
{
"BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
@@ -1324,12 +1285,6 @@
"MetricName": "tma_info_memory_load_miss_real_latency"
},
{
- "BriefDescription": "STLB (2nd level TLB) data load speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
- "MetricExpr": "tma_info_memory_tlb_load_stlb_mpki",
- "MetricGroup": "Mem;MemoryTLB",
- "MetricName": "tma_info_memory_load_stlb_mpki"
- },
- {
"BriefDescription": "\"Bus lock\" per kilo instruction",
"MetricExpr": "1e3 * SQ_MISC.BUS_LOCK / INST_RETIRED.ANY",
"MetricGroup": "Mem",
@@ -1355,7 +1310,7 @@
},
{
"BriefDescription": "Un-cacheable retired load per kilo instruction",
- "MetricExpr": "tma_info_memory_uc_load_pki",
+ "MetricExpr": "1e3 * MEM_LOAD_MISC_RETIRED.UC / INST_RETIRED.ANY",
"MetricGroup": "Mem",
"MetricName": "tma_info_memory_mix_uc_load_pki"
},
@@ -1367,51 +1322,6 @@
"PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)"
},
{
- "BriefDescription": "Off-core accesses per kilo instruction for modified write requests",
- "MetricExpr": "1e3 * OCR.MODIFIED_WRITE.ANY_RESPONSE / INST_RETIRED.ANY",
- "MetricGroup": "Offcore",
- "MetricName": "tma_info_memory_offcore_mwrite_any_pki"
- },
- {
- "BriefDescription": "Off-core accesses per kilo instruction for reads-to-core requests (speculative; including in-core HW prefetches)",
- "MetricExpr": "1e3 * OCR.READS_TO_CORE.ANY_RESPONSE / INST_RETIRED.ANY",
- "MetricGroup": "CacheHits;Offcore",
- "MetricName": "tma_info_memory_offcore_read_any_pki"
- },
- {
- "BriefDescription": "L3 cache misses per kilo instruction for reads-to-core requests (speculative; including in-core HW prefetches)",
- "MetricExpr": "1e3 * OCR.READS_TO_CORE.L3_MISS / INST_RETIRED.ANY",
- "MetricGroup": "Offcore",
- "MetricName": "tma_info_memory_offcore_read_l3m_pki"
- },
- {
- "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "(ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING) / (4 * (CPU_CLK_UNHALTED.DISTRIBUTED if #SMT_on else CPU_CLK_UNHALTED.THREAD))",
- "MetricGroup": "Mem;MemoryTLB",
- "MetricName": "tma_info_memory_page_walks_utilization"
- },
- {
- "BriefDescription": "Average DRAM BW for Reads-to-Core (R2C) covering for memory attached to local- and remote-socket",
- "MetricExpr": "64 * OCR.READS_TO_CORE.DRAM / 1e9 / (duration_time * 1e3 / 1e3)",
- "MetricGroup": "HPC;Mem;MemoryBW;SoC",
- "MetricName": "tma_info_memory_r2c_dram_bw",
- "PublicDescription": "Average DRAM BW for Reads-to-Core (R2C) covering for memory attached to local- and remote-socket. See R2C_Offcore_BW."
- },
- {
- "BriefDescription": "Average L3-cache miss BW for Reads-to-Core (R2C)",
- "MetricExpr": "64 * OCR.READS_TO_CORE.L3_MISS / 1e9 / (duration_time * 1e3 / 1e3)",
- "MetricGroup": "HPC;Mem;MemoryBW;SoC",
- "MetricName": "tma_info_memory_r2c_l3m_bw",
- "PublicDescription": "Average L3-cache miss BW for Reads-to-Core (R2C). This covering going to DRAM or other memory off-chip memory tears. See R2C_Offcore_BW."
- },
- {
- "BriefDescription": "Average Off-core access BW for Reads-to-Core (R2C)",
- "MetricExpr": "64 * OCR.READS_TO_CORE.ANY_RESPONSE / 1e9 / (duration_time * 1e3 / 1e3)",
- "MetricGroup": "HPC;Mem;MemoryBW;SoC",
- "MetricName": "tma_info_memory_r2c_offcore_bw",
- "PublicDescription": "Average Off-core access BW for Reads-to-Core (R2C). R2C account for demand or prefetch load/RFO/code access that fill data into the Core caches."
- },
- {
"BriefDescription": "Average DRAM BW for Reads-to-Core (R2C) covering for memory attached to local- and remote-socket",
"MetricExpr": "64 * OCR.READS_TO_CORE.DRAM / 1e9 / duration_time",
"MetricGroup": "HPC;Mem;MemoryBW;SoC",
@@ -1433,12 +1343,6 @@
"PublicDescription": "Average Off-core access BW for Reads-to-Core (R2C). R2C account for demand or prefetch load/RFO/code access that fill data into the Core caches."
},
{
- "BriefDescription": "STLB (2nd level TLB) data store speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
- "MetricExpr": "tma_info_memory_tlb_store_stlb_mpki",
- "MetricGroup": "Mem;MemoryTLB",
- "MetricName": "tma_info_memory_store_stlb_mpki"
- },
- {
"BriefDescription": "STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
"MetricExpr": "1e3 * ITLB_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
"MetricGroup": "Fed;MemoryTLB",
@@ -1464,18 +1368,24 @@
"MetricName": "tma_info_memory_tlb_store_stlb_mpki"
},
{
- "BriefDescription": "Un-cacheable retired load per kilo instruction",
- "MetricExpr": "1e3 * MEM_LOAD_MISC_RETIRED.UC / INST_RETIRED.ANY",
- "MetricGroup": "Mem",
- "MetricName": "tma_info_memory_uc_load_pki"
- },
- {
- "BriefDescription": "",
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per core",
"MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@)",
"MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
"MetricName": "tma_info_pipeline_execute"
},
{
+ "BriefDescription": "Average number of uops fetched from DSB per cycle",
+ "MetricExpr": "IDQ.DSB_UOPS / IDQ.DSB_CYCLES_ANY",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "tma_info_pipeline_fetch_dsb"
+ },
+ {
+ "BriefDescription": "Average number of uops fetched from MITE per cycle",
+ "MetricExpr": "IDQ.MITE_UOPS / IDQ.MITE_CYCLES_ANY",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "tma_info_pipeline_fetch_mite"
+ },
+ {
"BriefDescription": "Instructions per a microcode Assist invocation",
"MetricExpr": "INST_RETIRED.ANY / ASSISTS.ANY",
"MetricGroup": "MicroSeq;Pipeline;Ret;Retire",
@@ -1511,13 +1421,13 @@
},
{
"BriefDescription": "Average CPU Utilization (percentage)",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "tma_info_system_cpus_utilized / #num_cpus_online",
"MetricGroup": "HPC;Summary",
"MetricName": "tma_info_system_cpu_utilization"
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "#num_cpus_online * tma_info_system_cpu_utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized"
},
@@ -1530,7 +1440,7 @@
},
{
"BriefDescription": "Giga Floating Point Operations Per Second",
- "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * FP_ARITH_INST_RETIRED.8_FLOPS + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / 1e9 / duration_time",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + FP_ARITH_INST_RETIRED2.SCALAR_HALF + 2 * (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED2.COMPLEX_SCALAR_HALF) + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * (FP_ARITH_INST_RETIRED2.128B_PACKED_HALF + FP_ARITH_INST_RETIRED.8_FLOPS) + 16 * (FP_ARITH_INST_RETIRED2.256B_PACKED_HALF + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) + 32 * FP_ARITH_INST_RETIRED2.512B_PACKED_HALF) / 1e9 / duration_time",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "tma_info_system_gflops",
"PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width"
@@ -1685,7 +1595,7 @@
"MetricThreshold": "tma_info_thread_uoppi > 1.05"
},
{
- "BriefDescription": "Instruction per taken branch",
+ "BriefDescription": "Uops per taken branch",
"MetricExpr": "tma_retiring * tma_info_thread_slots / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW",
"MetricName": "tma_info_thread_uptb",
@@ -1721,7 +1631,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
"MetricExpr": "ICACHE_TAG.STALLS / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_itlb_misses",
"MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS",
@@ -1737,9 +1647,18 @@
"ScaleUnit": "100%"
},
{
+ "BriefDescription": "This metric roughly estimates fraction of cycles with demand load accesses that hit the L1 cache",
+ "MetricExpr": "min(2 * (MEM_INST_RETIRED.ALL_LOADS - MEM_LOAD_RETIRED.FB_HIT - MEM_LOAD_RETIRED.L1_MISS) * 20 / 100, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - MEMORY_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
+ "MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_l1_hit_latency",
+ "MetricThreshold": "tma_l1_hit_latency > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles with demand load accesses that hit the L1 cache. The short latency of the L1 data cache may be exposed in pointer-chasing memory access patterns as an example. Sample with: MEM_LOAD_RETIRED.L1_HIT",
+ "ScaleUnit": "100%"
+ },
+ {
"BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
"MetricExpr": "(MEMORY_ACTIVITY.STALLS_L1D_MISS - MEMORY_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks",
- "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricGroup": "BvML;CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l2_bound",
"MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT_PS",
@@ -1756,8 +1675,8 @@
},
{
"BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
- "MetricExpr": "33 * tma_info_system_core_frequency * (MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2)) / tma_info_thread_clks",
- "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
+ "MetricExpr": "32.6 * tma_info_system_core_frequency * (MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2)) / tma_info_thread_clks",
+ "MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
"MetricName": "tma_l3_hit_latency",
"MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_info_bottleneck_cache_memory_latency, tma_mem_latency",
@@ -1769,7 +1688,7 @@
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_lcp",
"MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
- "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
"ScaleUnit": "100%"
},
{
@@ -1810,11 +1729,11 @@
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory",
- "MetricExpr": "71 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricExpr": "72 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "Server;TopdownL5;tma_L5_group;tma_mem_latency_group",
"MetricName": "tma_local_mem",
"MetricThreshold": "tma_local_mem > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory. Caching will improve the latency and increase performance. Sample with: MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM_PS",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory. Caching will improve the latency and increase performance. Sample with: MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
"ScaleUnit": "100%"
},
{
@@ -1823,14 +1742,14 @@
"MetricGroup": "Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
"MetricName": "tma_lock_latency",
"MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
- "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS_PS. Related metrics: tma_store_latency",
+ "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS. Related metrics: tma_store_latency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears",
"DefaultMetricgroupName": "TopdownL2",
"MetricExpr": "max(0, tma_bad_speculation - tma_branch_mispredicts)",
- "MetricGroup": "BadSpec;Default;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
+ "MetricGroup": "BadSpec;BvMS;Default;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
"MetricName": "tma_machine_clears",
"MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2;Default",
@@ -1848,7 +1767,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_thread_clks",
- "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
"MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_sq_full",
@@ -1857,7 +1776,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM)",
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth",
- "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
+ "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
"MetricName": "tma_mem_latency",
"MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_info_bottleneck_cache_memory_latency, tma_l3_hit_latency",
@@ -1903,7 +1822,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage",
"MetricExpr": "tma_branch_mispredicts / tma_bad_speculation * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks",
- "MetricGroup": "BadSpec;BrMispredicts;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
+ "MetricGroup": "BadSpec;BrMispredicts;BvMP;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
"MetricName": "tma_mispredicts_resteers",
"MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost, tma_info_bottleneck_mispredictions",
@@ -1939,7 +1858,7 @@
{
"BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused",
"MetricExpr": "tma_light_operations * (BR_INST_RETIRED.ALL_BRANCHES - INST_RETIRED.MACRO_FUSED) / (tma_retiring * tma_info_thread_slots)",
- "MetricGroup": "Branches;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_non_fused_branches",
"MetricThreshold": "tma_non_fused_branches > 0.1 & tma_light_operations > 0.6",
"PublicDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused.",
@@ -1948,7 +1867,7 @@
{
"BriefDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions",
"MetricExpr": "tma_light_operations * INST_RETIRED.NOP / (tma_retiring * tma_info_thread_slots)",
- "MetricGroup": "Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
+ "MetricGroup": "BvBO;Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
"MetricName": "tma_nop_instructions",
"MetricThreshold": "tma_nop_instructions > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)",
"PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP",
@@ -1966,7 +1885,7 @@
{
"BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).",
"MetricExpr": "max(tma_branch_mispredicts * (1 - BR_MISP_RETIRED.ALL_BRANCHES / (INT_MISC.CLEARS_COUNT - MACHINE_CLEARS.COUNT)), 0.0001)",
- "MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
+ "MetricGroup": "BrMispredicts;BvIO;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_other_mispredicts",
"MetricThreshold": "tma_other_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
@@ -1974,7 +1893,7 @@
{
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.",
"MetricExpr": "max(tma_machine_clears * (1 - MACHINE_CLEARS.MEMORY_ORDERING / MACHINE_CLEARS.COUNT), 0.0001)",
- "MetricGroup": "Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group",
+ "MetricGroup": "BvIO;Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_other_nukes",
"MetricThreshold": "tma_other_nukes > 0.05 & (tma_machine_clears > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
@@ -2035,7 +1954,7 @@
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricExpr": "(cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ + cpu@RS.EMPTY\\,umask\\=1@) / tma_info_thread_clks * (CYCLE_ACTIVITY.STALLS_TOTAL - EXE_ACTIVITY.BOUND_ON_LOADS) / tma_info_thread_clks",
+ "MetricExpr": "(EXE_ACTIVITY.EXE_BOUND_0_PORTS + max(cpu@RS.EMPTY\\,umask\\=1@ - RESOURCE_STALLS.SCOREBOARD, 0)) / tma_info_thread_clks * (CYCLE_ACTIVITY.STALLS_TOTAL - EXE_ACTIVITY.BOUND_ON_LOADS) / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_0",
"MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
@@ -2065,7 +1984,7 @@
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
"MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "UOPS_EXECUTED.CYCLES_GE_3 / tma_info_thread_clks",
- "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricGroup": "BvCB;PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_3m",
"MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Sample with: UOPS_EXECUTED.CYCLES_GE_3",
@@ -2073,7 +1992,7 @@
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues",
- "MetricExpr": "(135.5 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM + 135.5 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricExpr": "(133 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM + 133 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "Offcore;Server;Snoop;TopdownL5;tma_L5_group;tma_issueSyncxn;tma_mem_latency_group",
"MetricName": "tma_remote_cache",
"MetricThreshold": "tma_remote_cache > 0.05 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
@@ -2082,7 +2001,7 @@
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory",
- "MetricExpr": "149 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricExpr": "153 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "Server;Snoop;TopdownL5;tma_L5_group;tma_mem_latency_group",
"MetricName": "tma_remote_mem",
"MetricThreshold": "tma_remote_mem > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
@@ -2093,7 +2012,7 @@
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
"DefaultMetricgroupName": "TopdownL1",
"MetricExpr": "topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * tma_info_thread_slots",
- "MetricGroup": "Default;TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvUW;Default;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_retiring",
"MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL1;Default",
@@ -2103,7 +2022,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations",
"MetricExpr": "RESOURCE_STALLS.SCOREBOARD / tma_info_thread_clks + tma_c02_wait",
- "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO",
+ "MetricGroup": "BvIO;PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO",
"MetricName": "tma_serializing_operation",
"MetricThreshold": "tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: RESOURCE_STALLS.SCOREBOARD. Related metrics: tma_ms_switches",
@@ -2149,7 +2068,7 @@
{
"BriefDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors)",
"MetricExpr": "(XQ.FULL_CYCLES + L1D_PEND_MISS.L2_STALLS) / tma_info_thread_clks",
- "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
+ "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
"MetricName": "tma_sq_full",
"MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth",
@@ -2176,7 +2095,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses",
"MetricExpr": "(MEM_STORE_RETIRED.L2_HIT * 10 * (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) + (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_thread_clks",
- "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
+ "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
"MetricName": "tma_store_latency",
"MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
@@ -2219,7 +2138,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears",
"MetricExpr": "INT_MISC.UNKNOWN_BRANCH_CYCLES / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
"MetricName": "tma_unknown_branches",
"MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: FRONTEND_RETIRED.UNKNOWN_BRANCH",
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cache.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cache.json
index 25a2b9695135..a38db3e253f2 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cache.json
@@ -1,8 +1,10 @@
[
{
"BriefDescription": "CHA to iMC Bypass : Intermediate bypass Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x57",
"EventName": "UNC_CHA_BYPASS_CHA_IMC.INTERMEDIATE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA to iMC Bypass : Intermediate bypass Taken : Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not. : Filter for transactions that succeeded in taking the intermediate bypass.",
"UMask": "0x2",
@@ -10,8 +12,10 @@
},
{
"BriefDescription": "CHA to iMC Bypass : Not Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x57",
"EventName": "UNC_CHA_BYPASS_CHA_IMC.NOT_TAKEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA to iMC Bypass : Not Taken : Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not. : Filter for transactions that could not take the bypass, and issues a read to memory. Note that transactions that did not take the bypass but did not issue read to memory will not be counted.",
"UMask": "0x4",
@@ -19,8 +23,10 @@
},
{
"BriefDescription": "CHA to iMC Bypass : Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x57",
"EventName": "UNC_CHA_BYPASS_CHA_IMC.TAKEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA to iMC Bypass : Taken : Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not. : Filter for transactions that succeeded in taking the full bypass.",
"UMask": "0x1",
@@ -28,6 +34,7 @@
},
{
"BriefDescription": "CHA Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_CHA_CLOCKTICKS",
"PerPkg": "1",
@@ -36,6 +43,7 @@
},
{
"BriefDescription": "CMS Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0xc0",
"EventName": "UNC_CHA_CMS_CLOCKTICKS",
"PerPkg": "1",
@@ -43,8 +51,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued : Any Cycle with Multiple Snoops",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.ANY_GTONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Core Cross Snoops Issued : Any Cycle with Multiple Snoops : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0xf2",
@@ -52,8 +62,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued : Any Single Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.ANY_ONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Core Cross Snoops Issued : Any Single Snoop : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0xf1",
@@ -61,8 +73,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued : Multiple Core Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.CORE_GTONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Core Cross Snoops Issued : Multiple Core Requests : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x42",
@@ -70,8 +84,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued : Single Core Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.CORE_ONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Core Cross Snoops Issued : Single Core Requests : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x41",
@@ -79,8 +95,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued : Multiple Eviction",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.EVICT_GTONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Core Cross Snoops Issued : Multiple Eviction : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x82",
@@ -88,8 +106,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued : Single Eviction",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.EVICT_ONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Core Cross Snoops Issued : Single Eviction : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x81",
@@ -97,8 +117,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued : Multiple External Snoops",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.EXT_GTONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Core Cross Snoops Issued : Multiple External Snoops : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x22",
@@ -106,8 +128,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued : Single External Snoops",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.EXT_ONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Core Cross Snoops Issued : Single External Snoops : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x21",
@@ -115,8 +139,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued : Multiple Snoop Targets from Remote",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.REMOTE_GTONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Core Cross Snoops Issued : Multiple Snoop Targets from Remote : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x12",
@@ -124,8 +150,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued : Single Snoop Target from Remote",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.REMOTE_ONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Core Cross Snoops Issued : Single Snoop Target from Remote : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x11",
@@ -133,96 +161,120 @@
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6e",
"EventName": "UNC_CHA_DIRECT_GO.HA_SUPPRESS_DRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6e",
"EventName": "UNC_CHA_DIRECT_GO.HA_SUPPRESS_NO_D2C",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6e",
"EventName": "UNC_CHA_DIRECT_GO.HA_TOR_DEALLOC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6d",
"EventName": "UNC_CHA_DIRECT_GO_OPC.EXTCMP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6d",
"EventName": "UNC_CHA_DIRECT_GO_OPC.FAST_GO",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6d",
"EventName": "UNC_CHA_DIRECT_GO_OPC.FAST_GO_PULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6d",
"EventName": "UNC_CHA_DIRECT_GO_OPC.GO",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6d",
"EventName": "UNC_CHA_DIRECT_GO_OPC.GO_PULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6d",
"EventName": "UNC_CHA_DIRECT_GO_OPC.IDLE_DUE_SUPPRESS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6d",
"EventName": "UNC_CHA_DIRECT_GO_OPC.NOP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6d",
"EventName": "UNC_CHA_DIRECT_GO_OPC.PULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Multi-socket cacheline Directory state lookups; Snoop Not Needed",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_CHA_DIR_LOOKUP.NO_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts transactions that looked into the multi-socket cacheline Directory state, and therefore did not send a snoop because the Directory indicated it was not needed.",
"UMask": "0x2",
@@ -230,8 +282,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory state lookups; Snoop Needed",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_CHA_DIR_LOOKUP.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts transactions that looked into the multi-socket cacheline Directory state, and sent one or more snoops, because the Directory indicated it was needed.",
"UMask": "0x1",
@@ -239,6 +293,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory state updates; Directory Updated memory write from the HA pipe",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_CHA_DIR_UPDATE.HA",
"PerPkg": "1",
@@ -248,6 +303,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory state updates; Directory Updated memory write from TOR pipe",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_CHA_DIR_UPDATE.TOR",
"PerPkg": "1",
@@ -257,8 +313,10 @@
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "Counter": "0,1,2,3",
"EventCode": "0xba",
"EventName": "UNC_CHA_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress Blocking due to Ordering requirements : Down : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x4",
@@ -266,8 +324,10 @@
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "Counter": "0,1,2,3",
"EventCode": "0xba",
"EventName": "UNC_CHA_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress Blocking due to Ordering requirements : Up : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x1",
@@ -275,8 +335,10 @@
},
{
"BriefDescription": "Read request from a remote socket which hit in the HitMe Cache to a line In the E state",
+ "Counter": "0,1,2,3",
"EventCode": "0x5f",
"EventName": "UNC_CHA_HITME_HIT.EX_RDS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts read requests from a remote socket which hit in the HitME cache (used to cache the multi-socket Directory state) to a line in the E(Exclusive) state. This includes the following read opcodes (RdCode, RdData, RdDataMigratory, RdCur, RdInv*, Inv*).",
"UMask": "0x1",
@@ -284,80 +346,100 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache : Shared hit and op is RdInvOwn, RdInv, Inv*",
+ "Counter": "0,1,2,3",
"EventCode": "0x5f",
"EventName": "UNC_CHA_HITME_HIT.SHARED_OWNREQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache : op is WbMtoE",
+ "Counter": "0,1,2,3",
"EventCode": "0x5f",
"EventName": "UNC_CHA_HITME_HIT.WBMTOE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache : op is WbMtoI, WbPushMtoI, WbFlush, or WbMtoS",
+ "Counter": "0,1,2,3",
"EventCode": "0x5f",
"EventName": "UNC_CHA_HITME_HIT.WBMTOI_OR_S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed : op is RdCode, RdData, RdDataMigratory, RdCur, RdInvOwn, RdInv, Inv*",
+ "Counter": "0,1,2,3",
"EventCode": "0x5e",
"EventName": "UNC_CHA_HITME_LOOKUP.READ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed : op is WbMtoE, WbMtoI, WbPushMtoI, WbFlush, or WbMtoS",
+ "Counter": "0,1,2,3",
"EventCode": "0x5e",
"EventName": "UNC_CHA_HITME_LOOKUP.WRITE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Counts Number of Misses in HitMe Cache : No SF/LLC HitS/F and op is RdInvOwn",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_CHA_HITME_MISS.NOTSHARED_RDINVOWN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "Counts Number of Misses in HitMe Cache : op is RdCode, RdData, RdDataMigratory, RdCur, RdInv, Inv*",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_CHA_HITME_MISS.READ_OR_INV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "Counts Number of Misses in HitMe Cache : SF/LLC HitS/F and op is RdInvOwn",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_CHA_HITME_MISS.SHARED_RDINVOWN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "Counts the number of Allocate/Update to HitMe Cache : Deallocate HitME$ on Reads without RspFwdI*",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_CHA_HITME_UPDATE.DEALLOCATE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "Counts the number of Allocate/Update to HitMe Cache : op is RspIFwd or RspIFwdWb for a local request",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_CHA_HITME_UPDATE.DEALLOCATE_RSPFWDI_LOC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of Allocate/Update to HitMe Cache : op is RspIFwd or RspIFwdWb for a local request : Received RspFwdI* for a local request, but converted HitME$ to SF entry",
"UMask": "0x1",
@@ -365,16 +447,20 @@
},
{
"BriefDescription": "Counts the number of Allocate/Update to HitMe Cache : Update HitMe Cache on RdInvOwn even if not RspFwdI*",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_CHA_HITME_UPDATE.RDINVOWN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Counts the number of Allocate/Update to HitMe Cache : op is RspIFwd or RspIFwdWb for a remote request",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_CHA_HITME_UPDATE.RSPFWDI_REM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of Allocate/Update to HitMe Cache : op is RspIFwd or RspIFwdWb for a remote request : Updated HitME$ on RspFwdI* or local HitM/E received for a remote request",
"UMask": "0x2",
@@ -382,14 +468,17 @@
},
{
"BriefDescription": "Counts the number of Allocate/Update to HitMe Cache : Update HitMe Cache to SHARed",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_CHA_HITME_UPDATE.SHARED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Normal priority reads issued to the memory controller from the CHA",
+ "Counter": "0,1,2,3",
"EventCode": "0x59",
"EventName": "UNC_CHA_IMC_READS_COUNT.NORMAL",
"PerPkg": "1",
@@ -399,8 +488,10 @@
},
{
"BriefDescription": "HA to iMC Reads Issued : ISOCH",
+ "Counter": "0,1,2,3",
"EventCode": "0x59",
"EventName": "UNC_CHA_IMC_READS_COUNT.PRIORITY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "HA to iMC Reads Issued : ISOCH : Count of the number of reads issued to any of the memory controller channels. This can be filtered by the priority of the reads.",
"UMask": "0x2",
@@ -408,6 +499,7 @@
},
{
"BriefDescription": "CHA to iMC Full Line Writes Issued; Full Line Non-ISOCH",
+ "Counter": "0,1,2,3",
"EventCode": "0x5b",
"EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL",
"PerPkg": "1",
@@ -417,8 +509,10 @@
},
{
"BriefDescription": "CHA to iMC Full Line Writes Issued : ISOCH Full Line",
+ "Counter": "0,1,2,3",
"EventCode": "0x5b",
"EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL_PRIORITY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA to iMC Full Line Writes Issued : ISOCH Full Line : Counts the total number of full line writes issued from the HA into the memory controller.",
"UMask": "0x4",
@@ -426,8 +520,10 @@
},
{
"BriefDescription": "CHA to iMC Full Line Writes Issued : Partial Non-ISOCH",
+ "Counter": "0,1,2,3",
"EventCode": "0x5b",
"EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA to iMC Full Line Writes Issued : Partial Non-ISOCH : Counts the total number of full line writes issued from the HA into the memory controller.",
"UMask": "0x2",
@@ -435,8 +531,10 @@
},
{
"BriefDescription": "CHA to iMC Full Line Writes Issued : ISOCH Partial",
+ "Counter": "0,1,2,3",
"EventCode": "0x5b",
"EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL_PRIORITY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA to iMC Full Line Writes Issued : ISOCH Partial : Counts the total number of full line writes issued from the HA into the memory controller.",
"UMask": "0x8",
@@ -444,8 +542,10 @@
},
{
"BriefDescription": "Cache and Snoop Filter Lookups; Any Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.; Filters for any transaction originating from the IPQ or IRQ. This does not include lookups originating from the ISMQ.",
"UMask": "0x1fffff",
@@ -453,8 +553,10 @@
},
{
"BriefDescription": "Cache Lookups : All transactions from Remote Agents",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.ALL_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : All transactions from Remote Agents : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0x17e0ff",
@@ -462,16 +564,20 @@
},
{
"BriefDescription": "Cache Lookups : All Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.ANY_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : All Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Any local or remote transaction to the LLC, including prefetch.",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : CRd Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.CODE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : CRd Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote CRd transactions to the LLC. This includes CRd prefetch.",
"UMask": "0x1bd0ff",
@@ -479,24 +585,30 @@
},
{
"BriefDescription": "Cache Lookups : CRd Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.CODE_READ_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : CRd Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote CRd transactions to the LLC. This includes CRd prefetch.",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : Local non-prefetch requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.COREPREF_OR_DMND_LOCAL_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Local non-prefetch requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Any local transaction to the LLC, not including prefetch",
"Unit": "CHA"
},
{
"BriefDescription": "Cache and Snoop Filter Lookups; Data Read Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.DATA_RD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
"UMask": "0x1bc1ff",
@@ -504,8 +616,10 @@
},
{
"BriefDescription": "Cache Lookups : Data Reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Data Reads : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0x1fc1ff",
@@ -513,16 +627,20 @@
},
{
"BriefDescription": "Cache Lookups : Data Read Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Data Read Request : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Read transactions.",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : Demand Data Reads, Core and LLC prefetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Demand Data Reads, Core and LLC prefetches : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0x841ff",
@@ -530,8 +648,10 @@
},
{
"BriefDescription": "Cache Lookups : Data Read Misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Data Read Misses : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0x1fc101",
@@ -539,8 +659,10 @@
},
{
"BriefDescription": "Cache Lookups : E State",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.E",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : E State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Hit Exclusive State",
"UMask": "0x20",
@@ -548,8 +670,10 @@
},
{
"BriefDescription": "Cache Lookups : F State",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : F State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Hit Forward State",
"UMask": "0x80",
@@ -557,8 +681,10 @@
},
{
"BriefDescription": "Cache Lookups : Flush or Invalidate Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.FLUSH_INV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Flush : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing.",
"UMask": "0x1a44ff",
@@ -566,16 +692,20 @@
},
{
"BriefDescription": "Cache Lookups : Flush",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.FLUSH_OR_INV_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Flush : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing.",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : I State",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.I",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : I State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Miss",
"UMask": "0x1",
@@ -583,16 +713,20 @@
},
{
"BriefDescription": "Cache Lookups : Local LLC prefetch requests (from LLC)",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LLCPREF_LOCAL_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Local LLC prefetch requests (from LLC) : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Any local LLC prefetch to the LLC",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : Transactions homed locally",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCALLY_HOMED_ADDRESS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Transactions homed locally : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Transaction whose address resides in the local MC.",
"UMask": "0xbdfff",
@@ -600,8 +734,10 @@
},
{
"BriefDescription": "Cache Lookups : CRd Requests that come from the local socket (usually the core)",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_CODE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : CRd Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote CRd transactions to the LLC. This includes CRd prefetch.",
"UMask": "0x19d0ff",
@@ -609,8 +745,10 @@
},
{
"BriefDescription": "Cache and Snoop Filter Lookups; Data Read Request that come from the local socket (usually the core)",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_DATA_RD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
"UMask": "0x19c1ff",
@@ -618,8 +756,10 @@
},
{
"BriefDescription": "Cache Lookups : Demand CRd Requests that come from the local socket (usually the core)",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_DMND_CODE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : CRd Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote CRd transactions to the LLC. This includes CRd prefetch.",
"UMask": "0x1850ff",
@@ -627,8 +767,10 @@
},
{
"BriefDescription": "Cache and Snoop Filter Lookups; Demand Data Reads that come from the local socket (usually the core)",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_DMND_DATA_RD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
"UMask": "0x1841ff",
@@ -636,8 +778,10 @@
},
{
"BriefDescription": "Cache Lookups : Demand RFO Requests that come from the local socket (usually the core)",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_DMND_RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : RFO Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote RFO transactions to the LLC. This includes RFO prefetch.",
"UMask": "0x1848ff",
@@ -645,16 +789,20 @@
},
{
"BriefDescription": "Cache Lookups : Transactions homed locally",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Transactions homed locally : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Transaction whose address resides in the local MC.",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : Flush or Invalidate Requests that come from the local socket (usually the core)",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_FLUSH_INV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Flush : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing.",
"UMask": "0x1844ff",
@@ -662,8 +810,10 @@
},
{
"BriefDescription": "Cache and Snoop Filter Lookups; Prefetch requests to the LLC that come from the local socket (usually the core)",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_LLC_PF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
"UMask": "0x189dff",
@@ -671,8 +821,10 @@
},
{
"BriefDescription": "Cache and Snoop Filter Lookups; Data Read Prefetches that come from the local socket (usually the core)",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_PF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
"UMask": "0x199dff",
@@ -680,8 +832,10 @@
},
{
"BriefDescription": "Cache Lookups : CRd Prefetches that come from the local socket (usually the core)",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_PF_CODE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : CRd Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote CRd transactions to the LLC. This includes CRd prefetch.",
"UMask": "0x1910ff",
@@ -689,8 +843,10 @@
},
{
"BriefDescription": "Cache and Snoop Filter Lookups; Data Read Prefetches that come from the local socket (usually the core)",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_PF_DATA_RD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
"UMask": "0x1981ff",
@@ -698,8 +854,10 @@
},
{
"BriefDescription": "Cache Lookups : RFO Prefetches that come from the local socket (usually the core)",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_PF_RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : RFO Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote RFO transactions to the LLC. This includes RFO prefetch.",
"UMask": "0x1908ff",
@@ -707,8 +865,10 @@
},
{
"BriefDescription": "Cache Lookups : RFO Requests that come from the local socket (usually the core)",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : RFO Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote RFO transactions to the LLC. This includes RFO prefetch.",
"UMask": "0x19c8ff",
@@ -716,8 +876,10 @@
},
{
"BriefDescription": "Cache Lookups : M State",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.M",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : M State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Hit Modified State",
"UMask": "0x40",
@@ -725,8 +887,10 @@
},
{
"BriefDescription": "Cache Lookups : All Misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.MISS_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0x1fe001",
@@ -734,24 +898,30 @@
},
{
"BriefDescription": "Cache Lookups : Write Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.OTHER_REQ_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Write Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Writeback transactions from L2 to the LLC This includes all write transactions -- both Cacheable and UC.",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : Remote non-snoop requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.PREF_OR_DMND_REMOTE_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Remote non-snoop requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Remote non-snoop transactions to the LLC.",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : Transactions homed remotely",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.REMOTELY_HOMED_ADDRESS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Transactions homed remotely : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Transaction whose address resides in a remote MC",
"UMask": "0x15dfff",
@@ -759,8 +929,10 @@
},
{
"BriefDescription": "Cache Lookups : CRd Requests that come from a Remote socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_CODE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : CRd Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote CRd transactions to the LLC. This includes CRd prefetch.",
"UMask": "0x1a10ff",
@@ -768,8 +940,10 @@
},
{
"BriefDescription": "Cache and Snoop Filter Lookups; Data Read Requests that come from a Remote socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_DATA_RD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
"UMask": "0x1a01ff",
@@ -777,16 +951,20 @@
},
{
"BriefDescription": "Cache Lookups : Transactions homed remotely",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Transactions homed remotely : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Transaction whose address resides in a remote MC",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : Flush or Invalidate requests that come from a Remote socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_FLUSH_INV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Flush : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing.",
"UMask": "0x1a04ff",
@@ -794,8 +972,10 @@
},
{
"BriefDescription": "Cache Lookups : Filters Requests for those that write info into the cache that come from a remote socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_OTHER",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Write Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Writeback transactions from L2 to the LLC This includes all write transactions -- both Cacheable and UC.",
"UMask": "0x1a02ff",
@@ -803,8 +983,10 @@
},
{
"BriefDescription": "Cache Lookups : RFO Requests that come from a Remote socket.",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : RFO Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote RFO transactions to the LLC. This includes RFO prefetch.",
"UMask": "0x1a08ff",
@@ -812,16 +994,20 @@
},
{
"BriefDescription": "Cache Lookups : Remote snoop requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_SNOOP_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Remote snoop requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Remote snoop transactions to the LLC.",
"Unit": "CHA"
},
{
"BriefDescription": "Cache and Snoop Filter Lookups; Snoop Requests from a Remote Socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.; Filters for any transaction originating from the IPQ or IRQ. This does not include lookups originating from the ISMQ.",
"UMask": "0x1c19ff",
@@ -829,8 +1015,10 @@
},
{
"BriefDescription": "Cache Lookups : RFO Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : RFO Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote RFO transactions to the LLC. This includes RFO prefetch.",
"UMask": "0x1bc8ff",
@@ -838,16 +1026,20 @@
},
{
"BriefDescription": "Cache Lookups : RFO Request Filter",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.RFO_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Local or remote RFO transactions to the LLC. This includes RFO prefetch.",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : Locally HOMed RFOs - Demand and Prefetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.RFO_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0x9c8ff",
@@ -855,8 +1047,10 @@
},
{
"BriefDescription": "Cache Lookups : S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.S",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : S State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Hit Shared State",
"UMask": "0x10",
@@ -864,8 +1058,10 @@
},
{
"BriefDescription": "Cache Lookups : SnoopFilter - E State",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.SF_E",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : SnoopFilter - E State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : SF Hit Exclusive State",
"UMask": "0x4",
@@ -873,8 +1069,10 @@
},
{
"BriefDescription": "Cache Lookups : SnoopFilter - H State",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.SF_H",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : SnoopFilter - H State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : SF Hit HitMe State",
"UMask": "0x8",
@@ -882,8 +1080,10 @@
},
{
"BriefDescription": "Cache Lookups : SnoopFilter - S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.SF_S",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : SnoopFilter - S State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : SF Hit Shared State",
"UMask": "0x2",
@@ -891,8 +1091,10 @@
},
{
"BriefDescription": "Cache Lookups : Writes",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.WRITE_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Requests that install or change a line in the LLC. Examples: Writebacks from Core L2's and UPI. Prefetches into the LLC.",
"UMask": "0x842ff",
@@ -900,8 +1102,10 @@
},
{
"BriefDescription": "Cache Lookups : Remote Writes",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.WRITE_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0x17c2ff",
@@ -909,8 +1113,10 @@
},
{
"BriefDescription": "Lines Victimized : Lines in E state",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.E_STATE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Lines in E state : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x2",
@@ -918,8 +1124,10 @@
},
{
"BriefDescription": "Lines Victimized : IA traffic",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.IA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : IA traffic : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x20",
@@ -927,8 +1135,10 @@
},
{
"BriefDescription": "Lines Victimized : IO traffic",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.IO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : IO traffic : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x10",
@@ -936,8 +1146,10 @@
},
{
"BriefDescription": "All LLC lines in E state that are victimized on a fill from an IO device",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.IO_E",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x12",
@@ -945,8 +1157,10 @@
},
{
"BriefDescription": "All LLC lines in F or S state that are victimized on a fill from an IO device",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.IO_FS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x1c",
@@ -954,8 +1168,10 @@
},
{
"BriefDescription": "All LLC lines in M state that are victimized on a fill from an IO device",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.IO_M",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x11",
@@ -963,8 +1179,10 @@
},
{
"BriefDescription": "All LLC lines in any state that are victimized on a fill from an IO device",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.IO_MESF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x1f",
@@ -972,8 +1190,10 @@
},
{
"BriefDescription": "Lines Victimized; Local - All Lines",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x200f",
@@ -981,8 +1201,10 @@
},
{
"BriefDescription": "Lines Victimized",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_E",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x2002",
@@ -990,8 +1212,10 @@
},
{
"BriefDescription": "Lines Victimized",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_M",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x2001",
@@ -999,16 +1223,20 @@
},
{
"BriefDescription": "Lines Victimized : Local Only",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_ONLY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Local Only : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"Unit": "CHA"
},
{
"BriefDescription": "Lines Victimized",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_S",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x2004",
@@ -1016,8 +1244,10 @@
},
{
"BriefDescription": "Lines Victimized : Lines in M state",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.M_STATE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Lines in M state : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x1",
@@ -1025,8 +1255,10 @@
},
{
"BriefDescription": "Lines Victimized; Remote - All Lines",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x800f",
@@ -1034,8 +1266,10 @@
},
{
"BriefDescription": "Lines Victimized",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_E",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x8002",
@@ -1043,8 +1277,10 @@
},
{
"BriefDescription": "Lines Victimized",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_M",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x8001",
@@ -1052,16 +1288,20 @@
},
{
"BriefDescription": "Lines Victimized : Remote Only",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_ONLY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Remote Only : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"Unit": "CHA"
},
{
"BriefDescription": "Lines Victimized",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_S",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x8004",
@@ -1069,8 +1309,10 @@
},
{
"BriefDescription": "Lines Victimized : Lines in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.S_STATE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Lines in S State : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x4",
@@ -1078,8 +1320,10 @@
},
{
"BriefDescription": "All LLC lines in E state that are victimized on a fill",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_E",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x2",
@@ -1087,8 +1331,10 @@
},
{
"BriefDescription": "All LLC lines in M state that are victimized on a fill",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_M",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x1",
@@ -1096,8 +1342,10 @@
},
{
"BriefDescription": "All LLC lines in S state that are victimized on a fill",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_S",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x4",
@@ -1105,8 +1353,10 @@
},
{
"BriefDescription": "Cbo Misc : CV0 Prefetch Miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_CHA_MISC.CV0_PREF_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cbo Misc : CV0 Prefetch Miss : Miscellaneous events in the Cbo.",
"UMask": "0x20",
@@ -1114,8 +1364,10 @@
},
{
"BriefDescription": "Cbo Misc : CV0 Prefetch Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_CHA_MISC.CV0_PREF_VIC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cbo Misc : CV0 Prefetch Victim : Miscellaneous events in the Cbo.",
"UMask": "0x10",
@@ -1123,8 +1375,10 @@
},
{
"BriefDescription": "Number of times that an RFO hit in S state.",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_CHA_MISC.RFO_HIT_S",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts when a RFO (the Read for Ownership issued before a write) request hit a cacheline in the S (Shared) state.",
"UMask": "0x8",
@@ -1132,8 +1386,10 @@
},
{
"BriefDescription": "Cbo Misc : Silent Snoop Eviction",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_CHA_MISC.RSPI_WAS_FSE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cbo Misc : Silent Snoop Eviction : Miscellaneous events in the Cbo. : Counts the number of times when a Snoop hit in FSE states and triggered a silent eviction. This is useful because this information is lost in the PRE encodings.",
"UMask": "0x1",
@@ -1141,8 +1397,10 @@
},
{
"BriefDescription": "Cbo Misc : Write Combining Aliasing",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_CHA_MISC.WC_ALIASING",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cbo Misc : Write Combining Aliasing : Miscellaneous events in the Cbo. : Counts the number of times that a USWC write (WCIL(F)) transaction hit in the LLC in M state, triggering a WBMtoI followed by the USWC write. This occurs when there is WC aliasing.",
"UMask": "0x2",
@@ -1150,8 +1408,10 @@
},
{
"BriefDescription": "OSB Snoop Broadcast : Local InvItoE",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_CHA_OSB.LOCAL_INVITOE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "OSB Snoop Broadcast : Local InvItoE : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
"UMask": "0x1",
@@ -1159,8 +1419,10 @@
},
{
"BriefDescription": "OSB Snoop Broadcast : Local Rd",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_CHA_OSB.LOCAL_READ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "OSB Snoop Broadcast : Local Rd : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
"UMask": "0x2",
@@ -1168,8 +1430,10 @@
},
{
"BriefDescription": "OSB Snoop Broadcast : Off",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_CHA_OSB.OFF_PWRHEURISTIC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "OSB Snoop Broadcast : Off : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
"UMask": "0x20",
@@ -1177,8 +1441,10 @@
},
{
"BriefDescription": "OSB Snoop Broadcast : Remote Rd",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_CHA_OSB.REMOTE_READ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "OSB Snoop Broadcast : Remote Rd : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
"UMask": "0x4",
@@ -1186,8 +1452,10 @@
},
{
"BriefDescription": "OSB Snoop Broadcast : Remote Rd InvItoE",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_CHA_OSB.REMOTE_READINVITOE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "OSB Snoop Broadcast : Remote Rd InvItoE : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
"UMask": "0x8",
@@ -1195,8 +1463,10 @@
},
{
"BriefDescription": "OSB Snoop Broadcast : RFO HitS Snoop Broadcast",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_CHA_OSB.RFO_HITS_SNP_BCAST",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "OSB Snoop Broadcast : RFO HitS Snoop Broadcast : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
"UMask": "0x10",
@@ -1204,32 +1474,40 @@
},
{
"BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.LOCAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x65",
"EventName": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.REMOTE",
+ "Counter": "0,1,2,3",
"EventCode": "0x65",
"EventName": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.SETCONFLICT",
+ "Counter": "0,1,2,3",
"EventCode": "0x65",
"EventName": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.SETCONFLICT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Memory Mode related events; Counts the number of times CHA saw a Near Memory set conflict in SF/LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x64",
"EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.LLC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Near Memory evictions due to another read to the same Near Memory set in the LLC.",
"UMask": "0x2",
@@ -1237,8 +1515,10 @@
},
{
"BriefDescription": "Memory Mode related events; Counts the number of times CHA saw a Near memory set conflict in SF/LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x64",
"EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.SF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Near Memory evictions due to another read to the same Near Memory set in the SF",
"UMask": "0x1",
@@ -1246,8 +1526,10 @@
},
{
"BriefDescription": "Memory Mode related events; Counts the number of times CHA saw a Near Memory set conflict in TOR",
+ "Counter": "0,1,2,3",
"EventCode": "0x64",
"EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.TOR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Reject in the CHA due to a pending read to the same Near Memory set in the TOR.",
"UMask": "0x4",
@@ -1255,88 +1537,110 @@
},
{
"BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.IODC",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.IODC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.MEMWR",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.MEMWR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.MEMWRNI",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.MEMWRNI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_PMM_QOS.DDR4_FAST_INSERT",
+ "Counter": "0,1,2,3",
"EventCode": "0x66",
"EventName": "UNC_CHA_PMM_QOS.DDR4_FAST_INSERT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_PMM_QOS.REJ_IRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x66",
"EventName": "UNC_CHA_PMM_QOS.REJ_IRQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_PMM_QOS.SLOWTORQ_SKIP",
+ "Counter": "0,1,2,3",
"EventCode": "0x66",
"EventName": "UNC_CHA_PMM_QOS.SLOWTORQ_SKIP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_PMM_QOS.SLOW_INSERT",
+ "Counter": "0,1,2,3",
"EventCode": "0x66",
"EventName": "UNC_CHA_PMM_QOS.SLOW_INSERT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_PMM_QOS.THROTTLE",
+ "Counter": "0,1,2,3",
"EventCode": "0x66",
"EventName": "UNC_CHA_PMM_QOS.THROTTLE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_PMM_QOS.THROTTLE_IRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x66",
"EventName": "UNC_CHA_PMM_QOS.THROTTLE_IRQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_PMM_QOS.THROTTLE_PRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x66",
"EventName": "UNC_CHA_PMM_QOS.THROTTLE_PRQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_PMM_QOS_OCCUPANCY.DDR_FAST_FIFO",
+ "Counter": "0,1,2,3",
"EventCode": "0x67",
"EventName": "UNC_CHA_PMM_QOS_OCCUPANCY.DDR_FAST_FIFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": count # of FAST TOR Request inserted to ha_tor_req_fifo",
"UMask": "0x2",
@@ -1344,16 +1648,20 @@
},
{
"BriefDescription": "Number of SLOW TOR Request inserted to ha_pmm_tor_req_fifo",
+ "Counter": "0,1,2,3",
"EventCode": "0x67",
"EventName": "UNC_CHA_PMM_QOS_OCCUPANCY.DDR_SLOW_FIFO",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty : MC0",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx READ Credits Empty : MC0 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 0 only.",
"UMask": "0x1",
@@ -1361,8 +1669,10 @@
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty : MC1",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx READ Credits Empty : MC1 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 1 only.",
"UMask": "0x2",
@@ -1370,8 +1680,10 @@
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty : MC2",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx READ Credits Empty : MC2 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 2 only.",
"UMask": "0x4",
@@ -1379,8 +1691,10 @@
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty : MC3",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx READ Credits Empty : MC3 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 3 only.",
"UMask": "0x8",
@@ -1388,8 +1702,10 @@
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty : MC4",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx READ Credits Empty : MC4 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 4 only.",
"UMask": "0x10",
@@ -1397,8 +1713,10 @@
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty : MC5",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx READ Credits Empty : MC5 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 5 only.",
"UMask": "0x20",
@@ -1406,8 +1724,10 @@
},
{
"BriefDescription": "Requests for exclusive ownership of a cache line without receiving data",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.INVITOE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the total number of requests coming from a unit on this socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.",
"UMask": "0x30",
@@ -1415,6 +1735,7 @@
},
{
"BriefDescription": "Local requests for exclusive ownership of a cache line without receiving data",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.INVITOE_LOCAL",
"PerPkg": "1",
@@ -1424,6 +1745,7 @@
},
{
"BriefDescription": "Remote requests for exclusive ownership of a cache line without receiving data",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.INVITOE_REMOTE",
"PerPkg": "1",
@@ -1433,6 +1755,7 @@
},
{
"BriefDescription": "Read requests made into the CHA",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.READS",
"PerPkg": "1",
@@ -1442,6 +1765,7 @@
},
{
"BriefDescription": "Read requests from a unit on this socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.READS_LOCAL",
"PerPkg": "1",
@@ -1451,6 +1775,7 @@
},
{
"BriefDescription": "Read requests from a remote socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.READS_REMOTE",
"PerPkg": "1",
@@ -1460,6 +1785,7 @@
},
{
"BriefDescription": "Write requests made into the CHA",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.WRITES",
"PerPkg": "1",
@@ -1469,6 +1795,7 @@
},
{
"BriefDescription": "Write Requests from a unit on this socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.WRITES_LOCAL",
"PerPkg": "1",
@@ -1478,6 +1805,7 @@
},
{
"BriefDescription": "Read and Write Requests; Writes Remote",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.WRITES_REMOTE",
"PerPkg": "1",
@@ -1487,8 +1815,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Allocations : IPQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_CHA_RxC_INSERTS.IPQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Allocations : IPQ : Counts number of allocations per cycle into the specified Ingress queue.",
"UMask": "0x4",
@@ -1496,8 +1826,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Allocations : IRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_CHA_RxC_INSERTS.IRQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Allocations : IRQ : Counts number of allocations per cycle into the specified Ingress queue.",
"UMask": "0x1",
@@ -1505,8 +1837,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Allocations : IRQ Rejected",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_CHA_RxC_INSERTS.IRQ_REJ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Allocations : IRQ Rejected : Counts number of allocations per cycle into the specified Ingress queue.",
"UMask": "0x2",
@@ -1514,8 +1848,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Allocations : PRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_CHA_RxC_INSERTS.PRQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Allocations : PRQ : Counts number of allocations per cycle into the specified Ingress queue.",
"UMask": "0x10",
@@ -1523,8 +1859,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Allocations : PRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_CHA_RxC_INSERTS.PRQ_REJ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Allocations : PRQ : Counts number of allocations per cycle into the specified Ingress queue.",
"UMask": "0x20",
@@ -1532,8 +1870,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Allocations : RRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_CHA_RxC_INSERTS.RRQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Allocations : RRQ : Counts number of allocations per cycle into the specified Ingress queue.",
"UMask": "0x40",
@@ -1541,8 +1881,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Allocations : WBQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_CHA_RxC_INSERTS.WBQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Allocations : WBQ : Counts number of allocations per cycle into the specified Ingress queue.",
"UMask": "0x80",
@@ -1550,8 +1892,10 @@
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_CHA_RxC_IPQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0 : No AD VN0 credit for generating a request",
"UMask": "0x1",
@@ -1559,8 +1903,10 @@
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_CHA_RxC_IPQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0 : No AD VN0 credit for generating a response",
"UMask": "0x2",
@@ -1568,8 +1914,10 @@
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_CHA_RxC_IPQ0_REJECT.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request : Can't inject AK ring message",
"UMask": "0x40",
@@ -1577,8 +1925,10 @@
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0 : No BL VN0 credit for NCB",
"UMask": "0x10",
@@ -1586,8 +1936,10 @@
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0 : No BL VN0 credit for NCS",
"UMask": "0x20",
@@ -1595,8 +1947,10 @@
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0 : No BL VN0 credit for generating a response",
"UMask": "0x4",
@@ -1604,8 +1958,10 @@
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0 : No BL VN0 credit for generating a writeback",
"UMask": "0x8",
@@ -1613,8 +1969,10 @@
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_CHA_RxC_IPQ0_REJECT.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request : Can't inject IV ring message",
"UMask": "0x80",
@@ -1622,16 +1980,20 @@
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : Allow Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_CHA_RxC_IPQ1_REJECT.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_CHA_RxC_IPQ1_REJECT.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IPQ Requests (from CMS) Rejected - Set 1 : ANY0 : Any condition listed in the IPQ0 Reject counter was true",
"UMask": "0x1",
@@ -1639,16 +2001,20 @@
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_CHA_RxC_IPQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : LLC OR SF Way",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_CHA_RxC_IPQ1_REJECT.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IPQ Requests (from CMS) Rejected - Set 1 : LLC OR SF Way : Way conflict with another request that caused the reject",
"UMask": "0x20",
@@ -1656,16 +2022,20 @@
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : LLC Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_CHA_RxC_IPQ1_REJECT.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : PhyAddr Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_CHA_RxC_IPQ1_REJECT.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IPQ Requests (from CMS) Rejected - Set 1 : PhyAddr Match : Address match with an outstanding request that was rejected.",
"UMask": "0x80",
@@ -1673,8 +2043,10 @@
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : SF Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_CHA_RxC_IPQ1_REJECT.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IPQ Requests (from CMS) Rejected - Set 1 : SF Victim : Requests did not generate Snoop filter victim",
"UMask": "0x8",
@@ -1682,16 +2054,20 @@
},
{
"BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_CHA_RxC_IPQ1_REJECT.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0 : No AD VN0 credit for generating a request",
"UMask": "0x1",
@@ -1699,8 +2075,10 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0 : No AD VN0 credit for generating a response",
"UMask": "0x2",
@@ -1708,8 +2086,10 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request : Can't inject AK ring message",
"UMask": "0x40",
@@ -1717,8 +2097,10 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0 : No BL VN0 credit for NCB",
"UMask": "0x10",
@@ -1726,8 +2108,10 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0 : No BL VN0 credit for NCS",
"UMask": "0x20",
@@ -1735,8 +2119,10 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0 : No BL VN0 credit for generating a response",
"UMask": "0x4",
@@ -1744,8 +2130,10 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0 : No BL VN0 credit for generating a writeback",
"UMask": "0x8",
@@ -1753,8 +2141,10 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request : Can't inject IV ring message",
"UMask": "0x80",
@@ -1762,16 +2152,20 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : Allow Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 1 : ANY0 : Any condition listed in the IRQ0 Reject counter was true",
"UMask": "0x1",
@@ -1779,16 +2173,20 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : LLC or SF Way",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 1 : LLC or SF Way : Way conflict with another request that caused the reject",
"UMask": "0x20",
@@ -1796,24 +2194,30 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : LLC Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; PhyAddr Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : SF Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 1 : SF Victim : Requests did not generate Snoop filter victim",
"UMask": "0x8",
@@ -1821,16 +2225,20 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "ISMQ Rejects - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Rejects - Set 0 : AD REQ on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No AD VN0 credit for generating a request",
"UMask": "0x1",
@@ -1838,8 +2246,10 @@
},
{
"BriefDescription": "ISMQ Rejects - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Rejects - Set 0 : AD RSP on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No AD VN0 credit for generating a response",
"UMask": "0x2",
@@ -1847,8 +2257,10 @@
},
{
"BriefDescription": "ISMQ Rejects - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Rejects - Set 0 : Non UPI AK Request : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Can't inject AK ring message",
"UMask": "0x40",
@@ -1856,8 +2268,10 @@
},
{
"BriefDescription": "ISMQ Rejects - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Rejects - Set 0 : BL NCB on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for NCB",
"UMask": "0x10",
@@ -1865,8 +2279,10 @@
},
{
"BriefDescription": "ISMQ Rejects - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Rejects - Set 0 : BL NCS on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for NCS",
"UMask": "0x20",
@@ -1874,8 +2290,10 @@
},
{
"BriefDescription": "ISMQ Rejects - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Rejects - Set 0 : BL RSP on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for generating a response",
"UMask": "0x4",
@@ -1883,8 +2301,10 @@
},
{
"BriefDescription": "ISMQ Rejects - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Rejects - Set 0 : BL WB on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for generating a writeback",
"UMask": "0x8",
@@ -1892,8 +2312,10 @@
},
{
"BriefDescription": "ISMQ Rejects - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Rejects - Set 0 : Non UPI IV Request : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Can't inject IV ring message",
"UMask": "0x80",
@@ -1901,8 +2323,10 @@
},
{
"BriefDescription": "ISMQ Retries - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2c",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Retries - Set 0 : AD REQ on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No AD VN0 credit for generating a request",
"UMask": "0x1",
@@ -1910,8 +2334,10 @@
},
{
"BriefDescription": "ISMQ Retries - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2c",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Retries - Set 0 : AD RSP on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No AD VN0 credit for generating a response",
"UMask": "0x2",
@@ -1919,8 +2345,10 @@
},
{
"BriefDescription": "ISMQ Retries - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x2c",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Retries - Set 0 : Non UPI AK Request : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Can't inject AK ring message",
"UMask": "0x40",
@@ -1928,8 +2356,10 @@
},
{
"BriefDescription": "ISMQ Retries - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2c",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Retries - Set 0 : BL NCB on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for NCB",
"UMask": "0x10",
@@ -1937,8 +2367,10 @@
},
{
"BriefDescription": "ISMQ Retries - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2c",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Retries - Set 0 : BL NCS on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for NCS",
"UMask": "0x20",
@@ -1946,8 +2378,10 @@
},
{
"BriefDescription": "ISMQ Retries - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2c",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Retries - Set 0 : BL RSP on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for generating a response",
"UMask": "0x4",
@@ -1955,8 +2389,10 @@
},
{
"BriefDescription": "ISMQ Retries - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2c",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Retries - Set 0 : BL WB on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for generating a writeback",
"UMask": "0x8",
@@ -1964,8 +2400,10 @@
},
{
"BriefDescription": "ISMQ Retries - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x2c",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Retries - Set 0 : Non UPI IV Request : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Can't inject IV ring message",
"UMask": "0x80",
@@ -1973,8 +2411,10 @@
},
{
"BriefDescription": "ISMQ Rejects - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_CHA_RxC_ISMQ1_REJECT.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Rejects - Set 1 : ANY0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Any condition listed in the ISMQ0 Reject counter was true",
"UMask": "0x1",
@@ -1982,8 +2422,10 @@
},
{
"BriefDescription": "ISMQ Rejects - Set 1 : HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_CHA_RxC_ISMQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Rejects - Set 1 : HA : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x2",
@@ -1991,8 +2433,10 @@
},
{
"BriefDescription": "ISMQ Retries - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2d",
"EventName": "UNC_CHA_RxC_ISMQ1_RETRY.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Retries - Set 1 : ANY0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Any condition listed in the ISMQ0 Reject counter was true",
"UMask": "0x1",
@@ -2000,8 +2444,10 @@
},
{
"BriefDescription": "ISMQ Retries - Set 1 : HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x2d",
"EventName": "UNC_CHA_RxC_ISMQ1_RETRY.HA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Retries - Set 1 : HA : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x2",
@@ -2009,8 +2455,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Occupancy : IPQ",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_CHA_RxC_OCCUPANCY.IPQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Occupancy : IPQ : Counts number of entries in the specified Ingress queue in each cycle.",
"UMask": "0x4",
@@ -2018,8 +2466,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Occupancy : RRQ",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_CHA_RxC_OCCUPANCY.RRQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Occupancy : RRQ : Counts number of entries in the specified Ingress queue in each cycle.",
"UMask": "0x40",
@@ -2027,8 +2477,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Occupancy : WBQ",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_CHA_RxC_OCCUPANCY.WBQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Occupancy : WBQ : Counts number of entries in the specified Ingress queue in each cycle.",
"UMask": "0x80",
@@ -2036,8 +2488,10 @@
},
{
"BriefDescription": "Other Retries - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 0 : AD REQ on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No AD VN0 credit for generating a request",
"UMask": "0x1",
@@ -2045,8 +2499,10 @@
},
{
"BriefDescription": "Other Retries - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 0 : AD RSP on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No AD VN0 credit for generating a response",
"UMask": "0x2",
@@ -2054,8 +2510,10 @@
},
{
"BriefDescription": "Other Retries - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 0 : Non UPI AK Request : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Can't inject AK ring message",
"UMask": "0x40",
@@ -2063,8 +2521,10 @@
},
{
"BriefDescription": "Other Retries - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 0 : BL NCB on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No BL VN0 credit for NCB",
"UMask": "0x10",
@@ -2072,8 +2532,10 @@
},
{
"BriefDescription": "Other Retries - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 0 : BL NCS on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No BL VN0 credit for NCS",
"UMask": "0x20",
@@ -2081,8 +2543,10 @@
},
{
"BriefDescription": "Other Retries - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 0 : BL RSP on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No BL VN0 credit for generating a response",
"UMask": "0x4",
@@ -2090,8 +2554,10 @@
},
{
"BriefDescription": "Other Retries - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 0 : BL WB on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No BL VN0 credit for generating a writeback",
"UMask": "0x8",
@@ -2099,8 +2565,10 @@
},
{
"BriefDescription": "Other Retries - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 0 : Non UPI IV Request : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Can't inject IV ring message",
"UMask": "0x80",
@@ -2108,8 +2576,10 @@
},
{
"BriefDescription": "Other Retries - Set 1 : Allow Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x2f",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 1 : Allow Snoop : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x40",
@@ -2117,8 +2587,10 @@
},
{
"BriefDescription": "Other Retries - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2f",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 1 : ANY0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Any condition listed in the Other0 Reject counter was true",
"UMask": "0x1",
@@ -2126,8 +2598,10 @@
},
{
"BriefDescription": "Other Retries - Set 1 : HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x2f",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.HA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 1 : HA : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x2",
@@ -2135,8 +2609,10 @@
},
{
"BriefDescription": "Other Retries - Set 1 : LLC OR SF Way",
+ "Counter": "0,1,2,3",
"EventCode": "0x2f",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 1 : LLC OR SF Way : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Way conflict with another request that caused the reject",
"UMask": "0x20",
@@ -2144,8 +2620,10 @@
},
{
"BriefDescription": "Other Retries - Set 1 : LLC Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x2f",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 1 : LLC Victim : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x4",
@@ -2153,8 +2631,10 @@
},
{
"BriefDescription": "Other Retries - Set 1 : PhyAddr Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x2f",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 1 : PhyAddr Match : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Address match with an outstanding request that was rejected.",
"UMask": "0x80",
@@ -2162,8 +2642,10 @@
},
{
"BriefDescription": "Other Retries - Set 1 : SF Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x2f",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 1 : SF Victim : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Requests did not generate Snoop filter victim",
"UMask": "0x8",
@@ -2171,8 +2653,10 @@
},
{
"BriefDescription": "Other Retries - Set 1 : Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x2f",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 1 : Victim : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x10",
@@ -2180,8 +2664,10 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0 : No AD VN0 credit for generating a request",
"UMask": "0x1",
@@ -2189,8 +2675,10 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0 : No AD VN0 credit for generating a response",
"UMask": "0x2",
@@ -2198,8 +2686,10 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request : Can't inject AK ring message",
"UMask": "0x40",
@@ -2207,8 +2697,10 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0 : No BL VN0 credit for NCB",
"UMask": "0x10",
@@ -2216,8 +2708,10 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0 : No BL VN0 credit for NCS",
"UMask": "0x20",
@@ -2225,8 +2719,10 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0 : No BL VN0 credit for generating a response",
"UMask": "0x4",
@@ -2234,8 +2730,10 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0 : No BL VN0 credit for generating a writeback",
"UMask": "0x8",
@@ -2243,8 +2741,10 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request : Can't inject IV ring message",
"UMask": "0x80",
@@ -2252,16 +2752,20 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : Allow Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 1 : ANY0 : Any condition listed in the PRQ0 Reject counter was true",
"UMask": "0x1",
@@ -2269,16 +2773,20 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : LLC OR SF Way",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 1 : LLC OR SF Way : Way conflict with another request that caused the reject",
"UMask": "0x20",
@@ -2286,16 +2794,20 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : LLC Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : PhyAddr Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 1 : PhyAddr Match : Address match with an outstanding request that was rejected.",
"UMask": "0x80",
@@ -2303,8 +2815,10 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : SF Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 1 : SF Victim : Requests did not generate Snoop filter victim",
"UMask": "0x8",
@@ -2312,16 +2826,20 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "Request Queue Retries - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 0 : AD REQ on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No AD VN0 credit for generating a request",
"UMask": "0x1",
@@ -2329,8 +2847,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 0 : AD RSP on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No AD VN0 credit for generating a response",
"UMask": "0x2",
@@ -2338,8 +2858,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 0 : Non UPI AK Request : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Can't inject AK ring message",
"UMask": "0x40",
@@ -2347,8 +2869,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 0 : BL NCB on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No BL VN0 credit for NCB",
"UMask": "0x10",
@@ -2356,8 +2880,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 0 : BL NCS on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No BL VN0 credit for NCS",
"UMask": "0x20",
@@ -2365,8 +2891,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 0 : BL RSP on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No BL VN0 credit for generating a response",
"UMask": "0x4",
@@ -2374,8 +2902,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 0 : BL WB on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No BL VN0 credit for generating a writeback",
"UMask": "0x8",
@@ -2383,8 +2913,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 0 : Non UPI IV Request : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Can't inject IV ring message",
"UMask": "0x80",
@@ -2392,8 +2924,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 1 : Allow Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x2b",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 1 : Allow Snoop : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x40",
@@ -2401,8 +2935,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2b",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 1 : ANY0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Any condition listed in the WBQ0 Reject counter was true",
"UMask": "0x1",
@@ -2410,8 +2946,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 1 : HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x2b",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.HA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 1 : HA : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x2",
@@ -2419,8 +2957,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 1 : LLC OR SF Way",
+ "Counter": "0,1,2,3",
"EventCode": "0x2b",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 1 : LLC OR SF Way : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Way conflict with another request that caused the reject",
"UMask": "0x20",
@@ -2428,8 +2968,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 1 : LLC Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x2b",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 1 : LLC Victim : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x4",
@@ -2437,8 +2979,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 1 : PhyAddr Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x2b",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 1 : PhyAddr Match : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Address match with an outstanding request that was rejected.",
"UMask": "0x80",
@@ -2446,8 +2990,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 1 : SF Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x2b",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 1 : SF Victim : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Requests did not generate Snoop filter victim",
"UMask": "0x8",
@@ -2455,8 +3001,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 1 : Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x2b",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 1 : Victim : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x10",
@@ -2464,8 +3012,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_CHA_RxC_RRQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 0 : AD REQ on VN0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : No AD VN0 credit for generating a request",
"UMask": "0x1",
@@ -2473,8 +3023,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_CHA_RxC_RRQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 0 : AD RSP on VN0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : No AD VN0 credit for generating a response",
"UMask": "0x2",
@@ -2482,8 +3034,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_CHA_RxC_RRQ0_REJECT.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 0 : Non UPI AK Request : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : Can't inject AK ring message",
"UMask": "0x40",
@@ -2491,8 +3045,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 0 : BL NCB on VN0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : No BL VN0 credit for NCB",
"UMask": "0x10",
@@ -2500,8 +3056,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 0 : BL NCS on VN0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : No BL VN0 credit for NCS",
"UMask": "0x20",
@@ -2509,8 +3067,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 0 : BL RSP on VN0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : No BL VN0 credit for generating a response",
"UMask": "0x4",
@@ -2518,8 +3078,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 0 : BL WB on VN0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : No BL VN0 credit for generating a writeback",
"UMask": "0x8",
@@ -2527,8 +3089,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_CHA_RxC_RRQ0_REJECT.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 0 : Non UPI IV Request : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : Can't inject IV ring message",
"UMask": "0x80",
@@ -2536,8 +3100,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 1 : Allow Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_CHA_RxC_RRQ1_REJECT.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 1 : Allow Snoop : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x40",
@@ -2545,8 +3111,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_CHA_RxC_RRQ1_REJECT.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 1 : ANY0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : Any condition listed in the RRQ0 Reject counter was true",
"UMask": "0x1",
@@ -2554,8 +3122,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 1 : HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_CHA_RxC_RRQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 1 : HA : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x2",
@@ -2563,8 +3133,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 1 : LLC OR SF Way",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_CHA_RxC_RRQ1_REJECT.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 1 : LLC OR SF Way : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : Way conflict with another request that caused the reject",
"UMask": "0x20",
@@ -2572,8 +3144,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 1 : LLC Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_CHA_RxC_RRQ1_REJECT.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 1 : LLC Victim : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x4",
@@ -2581,8 +3155,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 1 : PhyAddr Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_CHA_RxC_RRQ1_REJECT.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 1 : PhyAddr Match : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : Address match with an outstanding request that was rejected.",
"UMask": "0x80",
@@ -2590,8 +3166,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 1 : SF Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_CHA_RxC_RRQ1_REJECT.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 1 : SF Victim : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : Requests did not generate Snoop filter victim",
"UMask": "0x8",
@@ -2599,8 +3177,10 @@
},
{
"BriefDescription": "RRQ Rejects - Set 1 : Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_CHA_RxC_RRQ1_REJECT.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RRQ Rejects - Set 1 : Victim : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x10",
@@ -2608,8 +3188,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_CHA_RxC_WBQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 0 : AD REQ on VN0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : No AD VN0 credit for generating a request",
"UMask": "0x1",
@@ -2617,8 +3199,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_CHA_RxC_WBQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 0 : AD RSP on VN0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : No AD VN0 credit for generating a response",
"UMask": "0x2",
@@ -2626,8 +3210,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_CHA_RxC_WBQ0_REJECT.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 0 : Non UPI AK Request : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : Can't inject AK ring message",
"UMask": "0x40",
@@ -2635,8 +3221,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 0 : BL NCB on VN0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : No BL VN0 credit for NCB",
"UMask": "0x10",
@@ -2644,8 +3232,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 0 : BL NCS on VN0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : No BL VN0 credit for NCS",
"UMask": "0x20",
@@ -2653,8 +3243,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 0 : BL RSP on VN0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : No BL VN0 credit for generating a response",
"UMask": "0x4",
@@ -2662,8 +3254,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 0 : BL WB on VN0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : No BL VN0 credit for generating a writeback",
"UMask": "0x8",
@@ -2671,8 +3265,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_CHA_RxC_WBQ0_REJECT.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 0 : Non UPI IV Request : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : Can't inject IV ring message",
"UMask": "0x80",
@@ -2680,8 +3276,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 1 : Allow Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_CHA_RxC_WBQ1_REJECT.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 1 : Allow Snoop : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x40",
@@ -2689,8 +3287,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_CHA_RxC_WBQ1_REJECT.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 1 : ANY0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : Any condition listed in the WBQ0 Reject counter was true",
"UMask": "0x1",
@@ -2698,8 +3298,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 1 : HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_CHA_RxC_WBQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 1 : HA : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x2",
@@ -2707,8 +3309,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 1 : LLC OR SF Way",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_CHA_RxC_WBQ1_REJECT.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 1 : LLC OR SF Way : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : Way conflict with another request that caused the reject",
"UMask": "0x20",
@@ -2716,8 +3320,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 1 : LLC Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_CHA_RxC_WBQ1_REJECT.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 1 : LLC Victim : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x4",
@@ -2725,8 +3331,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 1 : PhyAddr Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_CHA_RxC_WBQ1_REJECT.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 1 : PhyAddr Match : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : Address match with an outstanding request that was rejected.",
"UMask": "0x80",
@@ -2734,8 +3342,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 1 : SF Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_CHA_RxC_WBQ1_REJECT.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 1 : SF Victim : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : Requests did not generate Snoop filter victim",
"UMask": "0x8",
@@ -2743,8 +3353,10 @@
},
{
"BriefDescription": "WBQ Rejects - Set 1 : Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_CHA_RxC_WBQ1_REJECT.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WBQ Rejects - Set 1 : Victim : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x10",
@@ -2752,8 +3364,10 @@
},
{
"BriefDescription": "Snoops Sent : All",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_CHA_SNOOPS_SENT.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoops Sent : All : Counts the number of snoops issued by the HA.",
"UMask": "0x1",
@@ -2761,8 +3375,10 @@
},
{
"BriefDescription": "Snoops Sent : Broadcast snoop for Local Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_CHA_SNOOPS_SENT.BCST_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoops Sent : Broadcast snoop for Local Requests : Counts the number of snoops issued by the HA. : Counts the number of broadcast snoops issued by the HA. This filter includes only requests coming from local sockets.",
"UMask": "0x10",
@@ -2770,8 +3386,10 @@
},
{
"BriefDescription": "Snoops Sent : Broadcast snoops for Remote Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_CHA_SNOOPS_SENT.BCST_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoops Sent : Broadcast snoops for Remote Requests : Counts the number of snoops issued by the HA. : Counts the number of broadcast snoops issued by the HA.This filter includes only requests coming from remote sockets.",
"UMask": "0x20",
@@ -2779,8 +3397,10 @@
},
{
"BriefDescription": "Snoops Sent : Directed snoops for Local Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_CHA_SNOOPS_SENT.DIRECT_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoops Sent : Directed snoops for Local Requests : Counts the number of snoops issued by the HA. : Counts the number of directed snoops issued by the HA. This filter includes only requests coming from local sockets.",
"UMask": "0x40",
@@ -2788,8 +3408,10 @@
},
{
"BriefDescription": "Snoops Sent : Directed snoops for Remote Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_CHA_SNOOPS_SENT.DIRECT_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoops Sent : Directed snoops for Remote Requests : Counts the number of snoops issued by the HA. : Counts the number of directed snoops issued by the HA. This filter includes only requests coming from remote sockets.",
"UMask": "0x80",
@@ -2797,8 +3419,10 @@
},
{
"BriefDescription": "Snoops Sent : Broadcast or directed Snoops sent for Local Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_CHA_SNOOPS_SENT.LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoops Sent : Broadcast or directed Snoops sent for Local Requests : Counts the number of snoops issued by the HA. : Counts the number of broadcast or directed snoops issued by the HA per request. This filter includes only requests coming from the local socket.",
"UMask": "0x4",
@@ -2806,8 +3430,10 @@
},
{
"BriefDescription": "Snoops Sent : Broadcast or directed Snoops sent for Remote Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_CHA_SNOOPS_SENT.REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoops Sent : Broadcast or directed Snoops sent for Remote Requests : Counts the number of snoops issued by the HA. : Counts the number of broadcast or directed snoops issued by the HA per request. This filter includes only requests coming from the remote socket.",
"UMask": "0x8",
@@ -2815,8 +3441,10 @@
},
{
"BriefDescription": "Snoop Responses Received : RSPCNFLCT*",
+ "Counter": "0,1,2,3",
"EventCode": "0x5c",
"EventName": "UNC_CHA_SNOOP_RESP.RSPCNFLCT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received : RSPCNFLCT* : Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1. : Filters for snoops responses of RspConflict. This is returned when a snoop finds an existing outstanding transaction in a remote caching agent when it CAMs that caching agent. This triggers conflict resolution hardware. This covers both RspCnflct and RspCnflctWbI.",
"UMask": "0x40",
@@ -2824,8 +3452,10 @@
},
{
"BriefDescription": "Snoop Responses Received : RspFwd",
+ "Counter": "0,1,2,3",
"EventCode": "0x5c",
"EventName": "UNC_CHA_SNOOP_RESP.RSPFWD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received : RspFwd : Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1. : Filters for a snoop response of RspFwd to a CA request. This snoop response is only possible for RdCur when a snoop HITM/E in a remote caching agent and it directly forwards data to a requestor without changing the requestor's cache line state.",
"UMask": "0x80",
@@ -2833,8 +3463,10 @@
},
{
"BriefDescription": "Snoop Responses Received : Rsp*Fwd*WB",
+ "Counter": "0,1,2,3",
"EventCode": "0x5c",
"EventName": "UNC_CHA_SNOOP_RESP.RSPFWDWB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received : Rsp*Fwd*WB : Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1. : Filters for a snoop response of Rsp*Fwd*WB. This snoop response is only used in 4s systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to the home to be written back to memory.",
"UMask": "0x20",
@@ -2842,8 +3474,10 @@
},
{
"BriefDescription": "RspI Snoop Responses Received",
+ "Counter": "0,1,2,3",
"EventCode": "0x5c",
"EventName": "UNC_CHA_SNOOP_RESP.RSPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts when a transaction with the opcode type RspI Snoop Response was received which indicates the remote cache does not have the data, or when the remote cache silently evicts data (such as when an RFO: the Read for Ownership issued before a write hits non-modified data).",
"UMask": "0x1",
@@ -2851,8 +3485,10 @@
},
{
"BriefDescription": "RspIFwd Snoop Responses Received",
+ "Counter": "0,1,2,3",
"EventCode": "0x5c",
"EventName": "UNC_CHA_SNOOP_RESP.RSPIFWD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts when a a transaction with the opcode type RspIFwd Snoop Response was received which indicates a remote caching agent forwarded the data and the requesting agent is able to acquire the data in E (Exclusive) or M (modified) states. This is commonly returned with RFO (the Read for Ownership issued before a write) transactions. The snoop could have either been to a cacheline in the M,E,F (Modified, Exclusive or Forward) states.",
"UMask": "0x4",
@@ -2860,8 +3496,10 @@
},
{
"BriefDescription": "RspS Snoop Responses Received",
+ "Counter": "0,1,2,3",
"EventCode": "0x5c",
"EventName": "UNC_CHA_SNOOP_RESP.RSPS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts when a transaction with the opcode type RspS Snoop Response was received which indicates when a remote cache has data but is not forwarding it. It is a way to let the requesting socket know that it cannot allocate the data in E state. No data is sent with S RspS.",
"UMask": "0x2",
@@ -2869,8 +3507,10 @@
},
{
"BriefDescription": "RspSFwd Snoop Responses Received",
+ "Counter": "0,1,2,3",
"EventCode": "0x5c",
"EventName": "UNC_CHA_SNOOP_RESP.RSPSFWD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts when a a transaction with the opcode type RspSFwd Snoop Response was received which indicates a remote caching agent forwarded the data but held on to its current copy. This is common for data and code reads that hit in a remote socket in E (Exclusive) or F (Forward) state.",
"UMask": "0x8",
@@ -2878,8 +3518,10 @@
},
{
"BriefDescription": "Snoop Responses Received : Rsp*WB",
+ "Counter": "0,1,2,3",
"EventCode": "0x5c",
"EventName": "UNC_CHA_SNOOP_RESP.RSPWB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received : Rsp*WB : Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1. : Filters for a snoop response of RspIWB or RspSWB. This is returned when a non-RFO request hits in M state. Data and Code Reads can return either RspIWB or RspSWB depending on how the system has been configured. InvItoE transactions will also return RspIWB because they must acquire ownership.",
"UMask": "0x10",
@@ -2887,8 +3529,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local : RspCnflct",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPCNFLCT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received Local : RspCnflct : Number of snoop responses received for a Local request : Filters for snoops responses of RspConflict to local CA requests. This is returned when a snoop finds an existing outstanding transaction in a remote caching agent when it CAMs that caching agent. This triggers conflict resolution hardware. This covers both RspCnflct and RspCnflctWbI.",
"UMask": "0x40",
@@ -2896,8 +3540,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local : RspFwd",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPFWD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received Local : RspFwd : Number of snoop responses received for a Local request : Filters for a snoop response of RspFwd to local CA requests. This snoop response is only possible for RdCur when a snoop HITM/E in a remote caching agent and it directly forwards data to a requestor without changing the requestor's cache line state.",
"UMask": "0x80",
@@ -2905,8 +3551,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local : Rsp*FWD*WB",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPFWDWB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received Local : Rsp*FWD*WB : Number of snoop responses received for a Local request : Filters for a snoop response of Rsp*Fwd*WB to local CA requests. This snoop response is only used in 4s systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to the home to be written back to memory.",
"UMask": "0x20",
@@ -2914,8 +3562,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local : RspI",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received Local : RspI : Number of snoop responses received for a Local request : Filters for snoops responses of RspI to local CA requests. RspI is returned when the remote cache does not have the data, or when the remote cache silently evicts data (such as when an RFO hits non-modified data).",
"UMask": "0x1",
@@ -2923,8 +3573,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local : RspIFwd",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPIFWD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received Local : RspIFwd : Number of snoop responses received for a Local request : Filters for snoop responses of RspIFwd to local CA requests. This is returned when a remote caching agent forwards data and the requesting agent is able to acquire the data in E or M states. This is commonly returned with RFO transactions. It can be either a HitM or a HitFE.",
"UMask": "0x4",
@@ -2932,8 +3584,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local : RspS",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received Local : RspS : Number of snoop responses received for a Local request : Filters for snoop responses of RspS to local CA requests. RspS is returned when a remote cache has data but is not forwarding it. It is a way to let the requesting socket know that it cannot allocate the data in E state. No data is sent with S RspS.",
"UMask": "0x2",
@@ -2941,8 +3595,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local : RspSFwd",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPSFWD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received Local : RspSFwd : Number of snoop responses received for a Local request : Filters for a snoop response of RspSFwd to local CA requests. This is returned when a remote caching agent forwards data but holds on to its current copy. This is common for data and code reads that hit in a remote socket in E or F state.",
"UMask": "0x8",
@@ -2950,8 +3606,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local : Rsp*WB",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPWB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received Local : Rsp*WB : Number of snoop responses received for a Local request : Filters for a snoop response of RspIWB or RspSWB to local CA requests. This is returned when a non-RFO request hits in M state. Data and Code Reads can return either RspIWB or RspSWB depending on how the system has been configured. InvItoE transactions will also return RspIWB because they must acquire ownership.",
"UMask": "0x10",
@@ -2959,56 +3617,70 @@
},
{
"BriefDescription": "Misc Snoop Responses Received : MtoI RspIDataM",
+ "Counter": "0,1,2,3",
"EventCode": "0x6b",
"EventName": "UNC_CHA_SNOOP_RSP_MISC.MTOI_RSPDATAM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Misc Snoop Responses Received : MtoI RspIFwdM",
+ "Counter": "0,1,2,3",
"EventCode": "0x6b",
"EventName": "UNC_CHA_SNOOP_RSP_MISC.MTOI_RSPIFWDM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Misc Snoop Responses Received : Pull Data Partial - Hit LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x6b",
"EventName": "UNC_CHA_SNOOP_RSP_MISC.PULLDATAPTL_HITLLC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "Misc Snoop Responses Received : Pull Data Partial - Hit SF",
+ "Counter": "0,1,2,3",
"EventCode": "0x6b",
"EventName": "UNC_CHA_SNOOP_RSP_MISC.PULLDATAPTL_HITSF",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "Misc Snoop Responses Received : RspIFwdPtl Hit LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x6b",
"EventName": "UNC_CHA_SNOOP_RSP_MISC.RSPIFWDMPTL_HITLLC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Misc Snoop Responses Received : RspIFwdPtl Hit SF",
+ "Counter": "0,1,2,3",
"EventCode": "0x6b",
"EventName": "UNC_CHA_SNOOP_RSP_MISC.RSPIFWDMPTL_HITSF",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : All",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : All : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"UMask": "0xc001ffff",
@@ -3016,16 +3688,20 @@
},
{
"BriefDescription": "TOR Inserts : DDR Access",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : DDR Access : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : SF/LLC Evictions",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.EVICT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : SF/LLC Evictions : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. : TOR allocation occurred as a result of SF/LLC evictions (came from the ISMQ)",
"UMask": "0x2",
@@ -3033,14 +3709,17 @@
},
{
"BriefDescription": "TOR Inserts : Just Hits",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.HIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : Just Hits : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts; All from Local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA",
"PerPkg": "1",
@@ -3050,6 +3729,7 @@
},
{
"BriefDescription": "TOR Inserts;CLFlush from Local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_CLFLUSH",
"PerPkg": "1",
@@ -3059,8 +3739,10 @@
},
{
"BriefDescription": "TOR Inserts;CLFlushOpt from Local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_CLFLUSHOPT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.; CLFlushOpt events that are initiated from the Core",
"UMask": "0xc8d7ff01",
@@ -3068,6 +3750,7 @@
},
{
"BriefDescription": "TOR Inserts; CRd from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_CRD",
"PerPkg": "1",
@@ -3077,8 +3760,10 @@
},
{
"BriefDescription": "TOR Inserts; CRd Pref from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_CRD_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts; Code read prefetch from local IA that misses in the snoop filter",
"UMask": "0xc88fff01",
@@ -3086,6 +3771,7 @@
},
{
"BriefDescription": "TOR Inserts; DRd from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_DRD",
"PerPkg": "1",
@@ -3095,8 +3781,10 @@
},
{
"BriefDescription": "TOR Inserts : DRd PTEs issued by iA Cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_DRDPTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : DRd PTEs issued by iA Cores due to a page walk : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc837ff01",
@@ -3104,8 +3792,10 @@
},
{
"BriefDescription": "TOR Inserts; DRd Opt from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_OPT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts; Data read opt from local IA that misses in the snoop filter",
"UMask": "0xc827ff01",
@@ -3113,8 +3803,10 @@
},
{
"BriefDescription": "TOR Inserts; DRd Opt Pref from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_OPT_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts; Data read opt prefetch from local IA that misses in the snoop filter",
"UMask": "0xc8a7ff01",
@@ -3122,6 +3814,7 @@
},
{
"BriefDescription": "TOR Inserts; DRd Pref from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_PREF",
"PerPkg": "1",
@@ -3131,6 +3824,7 @@
},
{
"BriefDescription": "TOR Inserts; Hits from Local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT",
"PerPkg": "1",
@@ -3140,6 +3834,7 @@
},
{
"BriefDescription": "TOR Inserts; CRd hits from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD",
"PerPkg": "1",
@@ -3149,6 +3844,7 @@
},
{
"BriefDescription": "TOR Inserts; CRd Pref hits from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD_PREF",
"PerPkg": "1",
@@ -3158,16 +3854,20 @@
},
{
"BriefDescription": "All requests issued from IA cores to CXL accelerator memory regions that hit the LLC.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10c0018101",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_TOR_INSERTS.IA_HIT_CXL_ACC_LOCAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CXL_ACC_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10c0008101",
@@ -3175,6 +3875,7 @@
},
{
"BriefDescription": "TOR Inserts; DRd hits from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD",
"PerPkg": "1",
@@ -3184,8 +3885,10 @@
},
{
"BriefDescription": "TOR Inserts : DRd PTEs issued by iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRDPTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : DRd PTEs issued by iA Cores due to page walks that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc837fd01",
@@ -3193,8 +3896,10 @@
},
{
"BriefDescription": "TOR Inserts; DRd Opt hits from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_OPT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts; Data read opt from local IA that hits in the snoop filter",
"UMask": "0xc827fd01",
@@ -3202,8 +3907,10 @@
},
{
"BriefDescription": "TOR Inserts; DRd Opt Pref hits from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_OPT_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts; Data read opt prefetch from local IA that hits in the snoop filter",
"UMask": "0xc8a7fd01",
@@ -3211,6 +3918,7 @@
},
{
"BriefDescription": "TOR Inserts; DRd Pref hits from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_PREF",
"PerPkg": "1",
@@ -3220,8 +3928,10 @@
},
{
"BriefDescription": "TOR Inserts : ItoMs issued by iA Cores that Hit LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_ITOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc47fd01",
@@ -3229,8 +3939,10 @@
},
{
"BriefDescription": "TOR Inserts; LLCPrefCode hits from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFCODE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts; Last level cache prefetch code read from local IA that hits in the snoop filter",
"UMask": "0xcccffd01",
@@ -3238,8 +3950,10 @@
},
{
"BriefDescription": "TOR Inserts; LLCPrefData hits from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFDATA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts; Last level cache prefetch data read from local IA that hits in the snoop filter",
"UMask": "0xccd7fd01",
@@ -3247,6 +3961,7 @@
},
{
"BriefDescription": "TOR Inserts; LLCPrefRFO hits from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFRFO",
"PerPkg": "1",
@@ -3256,6 +3971,7 @@
},
{
"BriefDescription": "TOR Inserts; RFO hits from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO",
"PerPkg": "1",
@@ -3265,6 +3981,7 @@
},
{
"BriefDescription": "TOR Inserts; RFO Pref hits from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO_PREF",
"PerPkg": "1",
@@ -3274,8 +3991,10 @@
},
{
"BriefDescription": "TOR Inserts;ItoM from Local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_ITOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.; ItoM events that are initiated from the Core",
"UMask": "0xcc47ff01",
@@ -3283,8 +4002,10 @@
},
{
"BriefDescription": "TOR Inserts : ItoMCacheNears issued by iA Cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_ITOMCACHENEAR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcd47ff01",
@@ -3292,8 +4013,10 @@
},
{
"BriefDescription": "TOR Inserts; LLCPrefCode from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFCODE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts; Last level cache prefetch code read from local IA.",
"UMask": "0xcccfff01",
@@ -3301,6 +4024,7 @@
},
{
"BriefDescription": "TOR Inserts; LLCPrefData from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFDATA",
"PerPkg": "1",
@@ -3310,6 +4034,7 @@
},
{
"BriefDescription": "TOR Inserts; LLCPrefRFO from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFRFO",
"PerPkg": "1",
@@ -3319,6 +4044,7 @@
},
{
"BriefDescription": "TOR Inserts; misses from Local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
"PerPkg": "1",
@@ -3328,6 +4054,7 @@
},
{
"BriefDescription": "TOR Inserts for CRd misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD",
"PerPkg": "1",
@@ -3337,16 +4064,20 @@
},
{
"BriefDescription": "CRds and equivalent opcodes issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRDMORPH_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10c80b8201",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : CRd issued by iA Cores that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc80efe01",
@@ -3354,6 +4085,7 @@
},
{
"BriefDescription": "TOR Inserts; CRd Pref misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF",
"PerPkg": "1",
@@ -3363,8 +4095,10 @@
},
{
"BriefDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc88efe01",
@@ -3372,8 +4106,10 @@
},
{
"BriefDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc88f7e01",
@@ -3381,8 +4117,10 @@
},
{
"BriefDescription": "TOR Inserts : CRd issued by iA Cores that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc80f7e01",
@@ -3390,16 +4128,20 @@
},
{
"BriefDescription": "All requests issued from IA cores to CXL accelerator memory regions that miss the LLC.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10c0018201",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_CXL_ACC_LOCAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CXL_ACC_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10c0008201",
@@ -3407,6 +4149,7 @@
},
{
"BriefDescription": "TOR Inserts for DRd misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD",
"PerPkg": "1",
@@ -3416,16 +4159,20 @@
},
{
"BriefDescription": "DRds and equivalent opcodes issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRDMORPH_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10c8138201",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : DRd PTEs issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRDPTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : DRd PTEs issued by iA Cores due to a page walk that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc837fe01",
@@ -3433,16 +4180,20 @@
},
{
"BriefDescription": "DRds issued from an IA core which miss the L3 and target memory in a CXL type 2 memory expander card.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10c8178201",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_CXL_ACC_LOCAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_CXL_ACC_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10c8168201",
@@ -3450,6 +4201,7 @@
},
{
"BriefDescription": "TOR Inserts for DRds issued by IA Cores targeting DDR Mem that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_DDR",
"PerPkg": "1",
@@ -3459,6 +4211,7 @@
},
{
"BriefDescription": "TOR Inserts for DRd misses from local IA targeting local memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL",
"PerPkg": "1",
@@ -3468,6 +4221,7 @@
},
{
"BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL_DDR",
"PerPkg": "1",
@@ -3477,6 +4231,7 @@
},
{
"BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL_PMM",
"PerPkg": "1",
@@ -3486,8 +4241,10 @@
},
{
"BriefDescription": "TOR Inserts; DRd Opt misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts; Data read opt from local IA that misses in the snoop filter",
"UMask": "0xc827fe01",
@@ -3495,8 +4252,10 @@
},
{
"BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_CXL_ACC_LOCAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_CXL_ACC_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10c8268201",
@@ -3504,8 +4263,10 @@
},
{
"BriefDescription": "TOR Inserts; DRd Opt Pref misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts; Data read opt prefetch from local IA that misses in the snoop filter",
"UMask": "0xc8a7fe01",
@@ -3513,8 +4274,10 @@
},
{
"BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_PREF_CXL_ACC_LOCAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_PREF_CXL_ACC_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10c8a68201",
@@ -3522,6 +4285,7 @@
},
{
"BriefDescription": "TOR Inserts for DRds issued by iA Cores targeting PMM Mem that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PMM",
"PerPkg": "1",
@@ -3531,6 +4295,7 @@
},
{
"BriefDescription": "TOR Inserts for DRd Pref misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF",
"PerPkg": "1",
@@ -3540,16 +4305,20 @@
},
{
"BriefDescription": "L2 data prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10c8978201",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_CXL_ACC_LOCAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_CXL_ACC_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10c8968201",
@@ -3557,8 +4326,10 @@
},
{
"BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8978601",
@@ -3566,6 +4337,7 @@
},
{
"BriefDescription": "TOR Inserts for DRd Pref misses from local IA targeting local memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL",
"PerPkg": "1",
@@ -3575,8 +4347,10 @@
},
{
"BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8968601",
@@ -3584,8 +4358,10 @@
},
{
"BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8968a01",
@@ -3593,8 +4369,10 @@
},
{
"BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8978a01",
@@ -3602,6 +4380,7 @@
},
{
"BriefDescription": "TOR Inserts for DRd Pref misses from local IA targeting remote memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE",
"PerPkg": "1",
@@ -3611,8 +4390,10 @@
},
{
"BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8970601",
@@ -3620,8 +4401,10 @@
},
{
"BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8970a01",
@@ -3629,6 +4412,7 @@
},
{
"BriefDescription": "TOR Inserts for DRd misses from local IA targeting remote memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE",
"PerPkg": "1",
@@ -3638,6 +4422,7 @@
},
{
"BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE_DDR",
"PerPkg": "1",
@@ -3647,6 +4432,7 @@
},
{
"BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE_PMM",
"PerPkg": "1",
@@ -3656,8 +4442,10 @@
},
{
"BriefDescription": "TOR Inserts : ItoMs issued by iA Cores that Missed LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_ITOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc47fe01",
@@ -3665,8 +4453,10 @@
},
{
"BriefDescription": "TOR Inserts; LLCPrefCode misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFCODE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts; Last level cache prefetch code read from local IA that misses in the snoop filter",
"UMask": "0xcccffe01",
@@ -3674,14 +4464,17 @@
},
{
"BriefDescription": "LLC Prefetch Code transactions issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFCODE_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10cccf8201",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts; LLCPrefData misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFDATA",
"PerPkg": "1",
@@ -3691,16 +4484,20 @@
},
{
"BriefDescription": "LLC data prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFDATA_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10ccd78201",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFDATA_CXL_ACC_LOCAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFDATA_CXL_ACC_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10ccd68201",
@@ -3708,6 +4505,7 @@
},
{
"BriefDescription": "TOR Inserts; LLCPrefRFO misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFRFO",
"PerPkg": "1",
@@ -3717,16 +4515,20 @@
},
{
"BriefDescription": "L2 RFO prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFRFO_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10c8878201",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFRFO_CXL_ACC_LOCAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFRFO_CXL_ACC_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10c8868201",
@@ -3734,8 +4536,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCILF_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8668601",
@@ -3743,8 +4547,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCILF_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8668a01",
@@ -3752,8 +4558,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCIL_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86e8601",
@@ -3761,8 +4569,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCIL_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86e8a01",
@@ -3770,8 +4580,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCILF_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8670601",
@@ -3779,8 +4591,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCILF_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8670a01",
@@ -3788,8 +4602,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCIL_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86f0601",
@@ -3797,8 +4613,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCIL_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86f0a01",
@@ -3806,6 +4624,7 @@
},
{
"BriefDescription": "TOR Inserts; RFO misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO",
"PerPkg": "1",
@@ -3815,24 +4634,30 @@
},
{
"BriefDescription": "RFO and L2 RFO prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFOMORPH_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10c8038201",
"Unit": "CHA"
},
{
"BriefDescription": "RFOs issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10c8078201",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_CXL_ACC_LOCAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_CXL_ACC_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10c8068201",
@@ -3840,6 +4665,7 @@
},
{
"BriefDescription": "TOR Inserts RFO misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_LOCAL",
"PerPkg": "1",
@@ -3849,6 +4675,7 @@
},
{
"BriefDescription": "TOR Inserts; RFO pref misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF",
"PerPkg": "1",
@@ -3858,16 +4685,20 @@
},
{
"BriefDescription": "LLC RFO prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10ccc78201",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_CXL_ACC_LOCAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_CXL_ACC_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10ccc68201",
@@ -3875,6 +4706,7 @@
},
{
"BriefDescription": "TOR Inserts; RFO prefetch misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_LOCAL",
"PerPkg": "1",
@@ -3884,6 +4716,7 @@
},
{
"BriefDescription": "TOR Inserts; RFO prefetch misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_REMOTE",
"PerPkg": "1",
@@ -3893,6 +4726,7 @@
},
{
"BriefDescription": "TOR Inserts; RFO misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_REMOTE",
"PerPkg": "1",
@@ -3902,8 +4736,10 @@
},
{
"BriefDescription": "TOR Inserts : UCRdFs issued by iA Cores that Missed LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_UCRDF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc877de01",
@@ -3911,8 +4747,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLs issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86ffe01",
@@ -3920,8 +4758,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLF issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc867fe01",
@@ -3929,8 +4769,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8678601",
@@ -3938,8 +4780,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8678a01",
@@ -3947,8 +4791,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86f8601",
@@ -3956,8 +4802,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86f8a01",
@@ -3965,8 +4813,10 @@
},
{
"BriefDescription": "TOR Inserts : WiLs issued by iA Cores that Missed LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WIL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc87fde01",
@@ -3974,6 +4824,7 @@
},
{
"BriefDescription": "TOR Inserts; RFO from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_RFO",
"PerPkg": "1",
@@ -3983,6 +4834,7 @@
},
{
"BriefDescription": "TOR Inserts; RFO pref from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_RFO_PREF",
"PerPkg": "1",
@@ -3992,6 +4844,7 @@
},
{
"BriefDescription": "TOR Inserts;SpecItoM from Local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_SPECITOM",
"PerPkg": "1",
@@ -4001,8 +4854,10 @@
},
{
"BriefDescription": "TOR Inserts : WBEFtoEs issued by an IA Core. Non Modified Write Backs",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WBEFTOE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WbEFtoEs issued by iA Cores . (Non Modified Write Backs) :Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc3fff01",
@@ -4010,8 +4865,10 @@
},
{
"BriefDescription": "TOR Inserts : WBEFtoEs issued by an IA Core. Non Modified Write Backs",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WBEFTOI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WbEFtoEs issued by iA Cores . (Non Modified Write Backs) :Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc37ff01",
@@ -4019,8 +4876,10 @@
},
{
"BriefDescription": "TOR Inserts : WBEFtoEs issued by an IA Core. Non Modified Write Backs",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WBMTOE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WbEFtoEs issued by iA Cores . (Non Modified Write Backs) :Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc2fff01",
@@ -4028,8 +4887,10 @@
},
{
"BriefDescription": "TOR Inserts : WbMtoIs issued by an iA Cores. Modified Write Backs",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WBMTOI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WbMtoIs issued by iA Cores . (Modified Write Backs) :Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc27ff01",
@@ -4037,8 +4898,10 @@
},
{
"BriefDescription": "TOR Inserts : WBEFtoEs issued by an IA Core. Non Modified Write Backs",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WBSTOI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WbEFtoEs issued by iA Cores . (Non Modified Write Backs) :Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc67ff01",
@@ -4046,8 +4909,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLs issued by iA Cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WCIL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86fff01",
@@ -4055,8 +4920,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLF issued by iA Cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WCILF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc867ff01",
@@ -4064,6 +4931,7 @@
},
{
"BriefDescription": "TOR Inserts; All from local IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO",
"PerPkg": "1",
@@ -4073,6 +4941,7 @@
},
{
"BriefDescription": "TOR Inserts : CLFlushes issued by IO Devices",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_CLFLUSH",
"PerPkg": "1",
@@ -4082,6 +4951,7 @@
},
{
"BriefDescription": "TOR Inserts; Hits from local IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_HIT",
"PerPkg": "1",
@@ -4091,6 +4961,7 @@
},
{
"BriefDescription": "TOR Inserts; ItoM hits from local IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_ITOM",
"PerPkg": "1",
@@ -4100,6 +4971,7 @@
},
{
"BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_ITOMCACHENEAR",
"PerPkg": "1",
@@ -4109,6 +4981,7 @@
},
{
"BriefDescription": "TOR Inserts; RdCur and FsRdCur hits from local IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_PCIRDCUR",
"PerPkg": "1",
@@ -4118,6 +4991,7 @@
},
{
"BriefDescription": "TOR Inserts; RFO hits from local IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_RFO",
"PerPkg": "1",
@@ -4127,6 +5001,7 @@
},
{
"BriefDescription": "TOR Inserts for ItoM from local IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM",
"PerPkg": "1",
@@ -4136,6 +5011,7 @@
},
{
"BriefDescription": "TOR Inserts for ItoMCacheNears from IO devices.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR",
"PerPkg": "1",
@@ -4145,6 +5021,7 @@
},
{
"BriefDescription": "ItoMCacheNear (partial write) transactions from an IO device that addresses memory on the local socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR_LOCAL",
"PerPkg": "1",
@@ -4154,6 +5031,7 @@
},
{
"BriefDescription": "ItoMCacheNear (partial write) transactions from an IO device that addresses memory on a remote socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR_REMOTE",
"PerPkg": "1",
@@ -4163,6 +5041,7 @@
},
{
"BriefDescription": "ItoM (write) transactions from an IO device that addresses memory on the local socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM_LOCAL",
"PerPkg": "1",
@@ -4172,6 +5051,7 @@
},
{
"BriefDescription": "ItoM (write) transactions from an IO device that addresses memory on a remote socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM_REMOTE",
"PerPkg": "1",
@@ -4181,6 +5061,7 @@
},
{
"BriefDescription": "TOR Inserts; Misses from local IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS",
"PerPkg": "1",
@@ -4190,6 +5071,7 @@
},
{
"BriefDescription": "TOR Inserts : ItoM, indicating a full cacheline write request, from IO Devices that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM",
"PerPkg": "1",
@@ -4199,6 +5081,7 @@
},
{
"BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR",
"PerPkg": "1",
@@ -4208,6 +5091,7 @@
},
{
"BriefDescription": "TOR Inserts; RdCur and FsRdCur requests from local IO that miss LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_PCIRDCUR",
"PerPkg": "1",
@@ -4217,6 +5101,7 @@
},
{
"BriefDescription": "TOR Inserts; RFO misses from local IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_RFO",
"PerPkg": "1",
@@ -4226,6 +5111,7 @@
},
{
"BriefDescription": "TOR Inserts for RdCur from local IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR",
"PerPkg": "1",
@@ -4235,6 +5121,7 @@
},
{
"BriefDescription": "PCIRDCUR (read) transactions from an IO device that addresses memory on a remote socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR_LOCAL",
"PerPkg": "1",
@@ -4244,6 +5131,7 @@
},
{
"BriefDescription": "PCIRDCUR (read) transactions from an IO device that addresses memory on the local socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR_REMOTE",
"PerPkg": "1",
@@ -4253,6 +5141,7 @@
},
{
"BriefDescription": "TOR Inserts; RFO from local IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_RFO",
"PerPkg": "1",
@@ -4262,6 +5151,7 @@
},
{
"BriefDescription": "TOR Inserts : WbMtoIs issued by IO Devices",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_WBMTOI",
"PerPkg": "1",
@@ -4271,8 +5161,10 @@
},
{
"BriefDescription": "TOR Inserts : IPQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IPQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : IPQ : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"UMask": "0x8",
@@ -4280,8 +5172,10 @@
},
{
"BriefDescription": "TOR Inserts : IRQ - iA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IRQ_IA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : IRQ - iA : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. : From an iA Core",
"UMask": "0x1",
@@ -4289,8 +5183,10 @@
},
{
"BriefDescription": "TOR Inserts : IRQ - Non iA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IRQ_NON_IA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : IRQ - Non iA : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"UMask": "0x10",
@@ -4298,24 +5194,30 @@
},
{
"BriefDescription": "TOR Inserts : Just ISOC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.ISOC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : Just ISOC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : Just Local Targets",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.LOCAL_TGT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : Just Local Targets : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : All from Local iA and IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.LOC_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : All from Local iA and IO : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. : All locally initiated requests",
"UMask": "0xc000ff05",
@@ -4323,8 +5225,10 @@
},
{
"BriefDescription": "TOR Inserts : All from Local iA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.LOC_IA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : All from Local iA : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. : All locally initiated requests from iA Cores",
"UMask": "0xc000ff01",
@@ -4332,8 +5236,10 @@
},
{
"BriefDescription": "TOR Inserts : All from Local IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.LOC_IO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : All from Local IO : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. : All locally generated IO traffic",
"UMask": "0xc000ff04",
@@ -4341,80 +5247,100 @@
},
{
"BriefDescription": "TOR Inserts : Match the Opcode in b[29:19] of the extended umask field",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.MATCH_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : Match the Opcode in b[29:19] of the extended umask field : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : Just Misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : Just Misses : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : MMCFG Access",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.MMCFG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : MMCFG Access : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : MMIO Access",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.MMIO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : MMIO Access : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : Just NearMem",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.NEARMEM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : Just NearMem : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : Just NonCoherent",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.NONCOH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : Just NonCoherent : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : Just NotNearMem",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.NOT_NEARMEM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : Just NotNearMem : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : PMM Access",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : PM Access : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : Match the PreMorphed Opcode in b[29:19] of the extended umask field",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.PREMORPH_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : Match the PreMorphed Opcode in b[29:19] of the extended umask field : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : PRQ - IOSF",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.PRQ_IOSF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : PRQ - IOSF : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. : From a PCIe Device",
"UMask": "0x4",
@@ -4422,8 +5348,10 @@
},
{
"BriefDescription": "TOR Inserts : PRQ - Non IOSF",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.PRQ_NON_IOSF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : PRQ - Non IOSF : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"UMask": "0x20",
@@ -4431,16 +5359,20 @@
},
{
"BriefDescription": "TOR Inserts : Just Remote Targets",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.REMOTE_TGT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : Just Remote Targets : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : All from Remote",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.REM_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : All from Remote : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. : All remote requests (e.g. snoops, writebacks) that came from remote sockets",
"UMask": "0xc001ffc8",
@@ -4448,8 +5380,10 @@
},
{
"BriefDescription": "TOR Inserts : All Snoops from Remote",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.REM_SNPS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : All Snoops from Remote : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. : All snoops to this LLC that came from remote sockets",
"UMask": "0xc001ff08",
@@ -4457,8 +5391,10 @@
},
{
"BriefDescription": "TOR Inserts : RRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.RRQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : RRQ : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"UMask": "0x40",
@@ -4466,8 +5402,10 @@
},
{
"BriefDescription": "TOR Inserts; All Snoops from Remote",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.SNPS_FROM_REM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. All snoops to this LLC that came from remote sockets.",
"UMask": "0xc001ff08",
@@ -4475,8 +5413,10 @@
},
{
"BriefDescription": "TOR Inserts : WBQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.WBQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : WBQ : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"UMask": "0x80",
@@ -4484,8 +5424,10 @@
},
{
"BriefDescription": "TOR Occupancy : All",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"UMask": "0xc001ffff",
@@ -4493,16 +5435,20 @@
},
{
"BriefDescription": "TOR Occupancy : DDR Access",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DDR Access : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : SF/LLC Evictions",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.EVICT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : SF/LLC Evictions : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T : TOR allocation occurred as a result of SF/LLC evictions (came from the ISMQ)",
"UMask": "0x2",
@@ -4510,14 +5456,17 @@
},
{
"BriefDescription": "TOR Occupancy : Just Hits",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.HIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : Just Hits : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy; All from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA",
"PerPkg": "1",
@@ -4527,6 +5476,7 @@
},
{
"BriefDescription": "TOR Occupancy : CLFlushes issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CLFLUSH",
"PerPkg": "1",
@@ -4536,8 +5486,10 @@
},
{
"BriefDescription": "TOR Occupancy : CLFlushOpts issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CLFLUSHOPT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : CLFlushOpts issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8d7ff01",
@@ -4545,6 +5497,7 @@
},
{
"BriefDescription": "TOR Occupancy; CRd from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CRD",
"PerPkg": "1",
@@ -4554,8 +5507,10 @@
},
{
"BriefDescription": "TOR Occupancy; CRd Pref from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CRD_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Code read prefetch from local IA that misses in the snoop filter",
"UMask": "0xc88fff01",
@@ -4563,6 +5518,7 @@
},
{
"BriefDescription": "TOR Occupancy; DRd from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD",
"PerPkg": "1",
@@ -4572,8 +5528,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRDPTE",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -4583,8 +5541,10 @@
},
{
"BriefDescription": "TOR Occupancy; DRd Opt from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_OPT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Data read opt from local IA that misses in the snoop filter",
"UMask": "0xc827ff01",
@@ -4592,8 +5552,10 @@
},
{
"BriefDescription": "TOR Occupancy; DRd Opt Pref from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_OPT_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Data read opt prefetch from local IA that misses in the snoop filter",
"UMask": "0xc8a7ff01",
@@ -4601,6 +5563,7 @@
},
{
"BriefDescription": "TOR Occupancy; DRd Pref from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_PREF",
"PerPkg": "1",
@@ -4610,6 +5573,7 @@
},
{
"BriefDescription": "TOR Occupancy; Hits from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT",
"PerPkg": "1",
@@ -4619,6 +5583,7 @@
},
{
"BriefDescription": "TOR Occupancy; CRd hits from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD",
"PerPkg": "1",
@@ -4628,6 +5593,7 @@
},
{
"BriefDescription": "TOR Occupancy; CRd Pref hits from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD_PREF",
"PerPkg": "1",
@@ -4637,16 +5603,20 @@
},
{
"BriefDescription": "TOR Occupancy for All requests issued from IA cores to CXL accelerator memory regions that hit the LLC.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10c0018101",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CXL_ACC_LOCAL",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CXL_ACC_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10c0008101",
@@ -4654,6 +5624,7 @@
},
{
"BriefDescription": "TOR Occupancy; DRd hits from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD",
"PerPkg": "1",
@@ -4663,8 +5634,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk that hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRDPTE",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -4674,8 +5647,10 @@
},
{
"BriefDescription": "TOR Occupancy; DRd Opt hits from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_OPT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Data read opt from local IA that hits in the snoop filter",
"UMask": "0xc827fd01",
@@ -4683,8 +5658,10 @@
},
{
"BriefDescription": "TOR Occupancy; DRd Opt Pref hits from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_OPT_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Data read opt prefetch from local IA that hits in the snoop filter",
"UMask": "0xc8a7fd01",
@@ -4692,6 +5669,7 @@
},
{
"BriefDescription": "TOR Occupancy; DRd Pref hits from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_PREF",
"PerPkg": "1",
@@ -4701,8 +5679,10 @@
},
{
"BriefDescription": "TOR Occupancy : ItoMs issued by iA Cores that Hit LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_ITOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : ItoMs issued by iA Cores that Hit LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc47fd01",
@@ -4710,8 +5690,10 @@
},
{
"BriefDescription": "TOR Occupancy; LLCPrefCode hits from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFCODE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Last level cache prefetch code read from local IA that hits in the snoop filter",
"UMask": "0xcccffd01",
@@ -4719,8 +5701,10 @@
},
{
"BriefDescription": "TOR Occupancy; LLCPrefData hits from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFDATA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Last level cache prefetch data read from local IA that hits in the snoop filter",
"UMask": "0xccd7fd01",
@@ -4728,6 +5712,7 @@
},
{
"BriefDescription": "TOR Occupancy; LLCPrefRFO hits from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFRFO",
"PerPkg": "1",
@@ -4737,6 +5722,7 @@
},
{
"BriefDescription": "TOR Occupancy; RFO hits from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO",
"PerPkg": "1",
@@ -4746,6 +5732,7 @@
},
{
"BriefDescription": "TOR Occupancy; RFO Pref hits from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO_PREF",
"PerPkg": "1",
@@ -4755,8 +5742,10 @@
},
{
"BriefDescription": "TOR Occupancy : ItoMs issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_ITOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : ItoMs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc47ff01",
@@ -4764,8 +5753,10 @@
},
{
"BriefDescription": "TOR Occupancy : ItoMCacheNears issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_ITOMCACHENEAR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : ItoMCacheNears issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcd47ff01",
@@ -4773,8 +5764,10 @@
},
{
"BriefDescription": "TOR Occupancy; LLCPrefCode from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFCODE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Last level cache prefetch data read from local IA.",
"UMask": "0xcccfff01",
@@ -4782,6 +5775,7 @@
},
{
"BriefDescription": "TOR Occupancy; LLCPrefData from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFDATA",
"PerPkg": "1",
@@ -4791,6 +5785,7 @@
},
{
"BriefDescription": "TOR Occupancy; LLCPrefRFO from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFRFO",
"PerPkg": "1",
@@ -4800,6 +5795,7 @@
},
{
"BriefDescription": "TOR Occupancy; Misses from Local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS",
"PerPkg": "1",
@@ -4809,6 +5805,7 @@
},
{
"BriefDescription": "TOR Occupancy; CRd misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD",
"PerPkg": "1",
@@ -4818,16 +5815,20 @@
},
{
"BriefDescription": "TOR Occupancy for CRds and equivalent opcodes issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRDMORPH_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10c80b8201",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : CRd issued by iA Cores that Missed the LLC - HOMed locally",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : CRd issued by iA Cores that Missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc80efe01",
@@ -4835,8 +5836,10 @@
},
{
"BriefDescription": "TOR Occupancy; CRd Pref misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Code read prefetch from local IA that misses in the snoop filter",
"UMask": "0xc88ffe01",
@@ -4844,8 +5847,10 @@
},
{
"BriefDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed locally",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc88efe01",
@@ -4853,8 +5858,10 @@
},
{
"BriefDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed remotely",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc88f7e01",
@@ -4862,8 +5869,10 @@
},
{
"BriefDescription": "TOR Occupancy : CRd issued by iA Cores that Missed the LLC - HOMed remotely",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : CRd issued by iA Cores that Missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc80f7e01",
@@ -4871,16 +5880,20 @@
},
{
"BriefDescription": "TOR Occupancy for All requests issued from IA cores to CXL accelerator memory regions that miss the LLC.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10c0018201",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CXL_ACC_LOCAL",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CXL_ACC_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10c0008201",
@@ -4888,6 +5901,7 @@
},
{
"BriefDescription": "TOR Occupancy for DRd misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD",
"PerPkg": "1",
@@ -4897,16 +5911,20 @@
},
{
"BriefDescription": "TOR Occupancy for DRds and equivalent opcodes issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRDMORPH_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10c8138201",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk that missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRDPTE",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -4916,16 +5934,20 @@
},
{
"BriefDescription": "TOR Occupancy for DRds and equivalent opcodes issued from an IA core which miss the L3 and target memory in a CXL type 2 memory expander card.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10c8178201",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_CXL_ACC_LOCAL",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_CXL_ACC_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10c8168201",
@@ -4933,6 +5955,7 @@
},
{
"BriefDescription": "TOR Occupancy for DRds issued by iA Cores targeting DDR Mem that Missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_DDR",
"PerPkg": "1",
@@ -4942,6 +5965,7 @@
},
{
"BriefDescription": "TOR Occupancy for DRd misses from local IA targeting local memory",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_LOCAL",
"PerPkg": "1",
@@ -4951,6 +5975,7 @@
},
{
"BriefDescription": "TOR Occupancy : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_LOCAL_DDR",
"PerPkg": "1",
@@ -4960,6 +5985,7 @@
},
{
"BriefDescription": "TOR Occupancy : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_LOCAL_PMM",
"PerPkg": "1",
@@ -4969,8 +5995,10 @@
},
{
"BriefDescription": "TOR Occupancy; DRd Opt misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Data read opt from local IA that misses in the snoop filter",
"UMask": "0xc827fe01",
@@ -4978,8 +6006,10 @@
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT_CXL_ACC_LOCAL",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT_CXL_ACC_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10c8268201",
@@ -4987,8 +6017,10 @@
},
{
"BriefDescription": "TOR Occupancy; DRd Opt Pref misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Data read opt prefetch from local IA that misses in the snoop filter",
"UMask": "0xc8a7fe01",
@@ -4996,8 +6028,10 @@
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT_PREF_CXL_ACC_LOCAL",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT_PREF_CXL_ACC_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10c8a68201",
@@ -5005,6 +6039,7 @@
},
{
"BriefDescription": "TOR Occupancy for DRds issued by iA Cores targeting PMM Mem that Missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PMM",
"PerPkg": "1",
@@ -5014,6 +6049,7 @@
},
{
"BriefDescription": "TOR Occupancy; DRd Pref misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF",
"PerPkg": "1",
@@ -5023,16 +6059,20 @@
},
{
"BriefDescription": "TOR Occupancy for L2 data prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10c8978201",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_CXL_ACC_LOCAL",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_CXL_ACC_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10c8968201",
@@ -5040,8 +6080,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8978601",
@@ -5049,8 +6091,10 @@
},
{
"BriefDescription": "TOR Occupancy; DRd Pref misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Data read prefetch from local IA that misses in the snoop filter",
"UMask": "0xc896fe01",
@@ -5058,8 +6102,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_LOCAL_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8968601",
@@ -5067,8 +6113,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_LOCAL_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8968a01",
@@ -5076,8 +6124,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8978a01",
@@ -5085,6 +6135,7 @@
},
{
"BriefDescription": "TOR Occupancy; DRd Pref misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_REMOTE",
"PerPkg": "1",
@@ -5094,8 +6145,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_REMOTE_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8970601",
@@ -5103,8 +6156,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_REMOTE_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8970a01",
@@ -5112,6 +6167,7 @@
},
{
"BriefDescription": "TOR Occupancy for DRd misses from local IA targeting remote memory",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_REMOTE",
"PerPkg": "1",
@@ -5121,6 +6177,7 @@
},
{
"BriefDescription": "TOR Occupancy : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_REMOTE_DDR",
"PerPkg": "1",
@@ -5130,6 +6187,7 @@
},
{
"BriefDescription": "TOR Occupancy : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_REMOTE_PMM",
"PerPkg": "1",
@@ -5139,8 +6197,10 @@
},
{
"BriefDescription": "TOR Occupancy : ItoMs issued by iA Cores that Missed LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_ITOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : ItoMs issued by iA Cores that Missed LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc47fe01",
@@ -5148,8 +6208,10 @@
},
{
"BriefDescription": "TOR Occupancy; LLCPrefCode misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFCODE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Last level cache prefetch code read from local IA that misses in the snoop filter",
"UMask": "0xcccffe01",
@@ -5157,14 +6219,17 @@
},
{
"BriefDescription": "TOR Occupancy for LLC Prefetch Code transactions issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFCODE_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10cccf8201",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy; LLCPrefData misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFDATA",
"PerPkg": "1",
@@ -5174,16 +6239,20 @@
},
{
"BriefDescription": "TOR Occupancy for LLC data prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFDATA_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10ccd78201",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFDATA_CXL_ACC_LOCAL",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFDATA_CXL_ACC_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10ccd68201",
@@ -5191,6 +6260,7 @@
},
{
"BriefDescription": "TOR Occupancy; LLCPrefRFO misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFRFO",
"PerPkg": "1",
@@ -5200,16 +6270,20 @@
},
{
"BriefDescription": "TOR Occupancy for L2 RFO prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFRFO_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10c8878201",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFRFO_CXL_ACC_LOCAL",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFRFO_CXL_ACC_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10c8868201",
@@ -5217,8 +6291,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed locally",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCILF_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8668601",
@@ -5226,8 +6302,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed locally",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCILF_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8668a01",
@@ -5235,8 +6313,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed locally",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCIL_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86e8601",
@@ -5244,8 +6324,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed locally",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCIL_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86e8a01",
@@ -5253,8 +6335,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCILF_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8670601",
@@ -5262,8 +6346,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCILF_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8670a01",
@@ -5271,8 +6357,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCIL_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86f0601",
@@ -5280,8 +6368,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCIL_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86f0a01",
@@ -5289,6 +6379,7 @@
},
{
"BriefDescription": "TOR Occupancy; RFO misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO",
"PerPkg": "1",
@@ -5298,24 +6389,30 @@
},
{
"BriefDescription": "TOR Occupancy for RFO and L2 RFO prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFOMORPH_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10c8038201",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy for RFOs issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10c8078201",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_CXL_ACC_LOCAL",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_CXL_ACC_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10c8068201",
@@ -5323,6 +6420,7 @@
},
{
"BriefDescription": "TOR Occupancy; RFO misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_LOCAL",
"PerPkg": "1",
@@ -5332,6 +6430,7 @@
},
{
"BriefDescription": "TOR Occupancy; RFO prefetch misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF",
"PerPkg": "1",
@@ -5341,16 +6440,20 @@
},
{
"BriefDescription": "TOR Occupancy for LLC RFO prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_CXL_ACC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10ccc78201",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_CXL_ACC_LOCAL",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_CXL_ACC_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10ccc68201",
@@ -5358,6 +6461,7 @@
},
{
"BriefDescription": "TOR Occupancy; RFO prefetch misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_LOCAL",
"PerPkg": "1",
@@ -5367,6 +6471,7 @@
},
{
"BriefDescription": "TOR Occupancy; RFO prefetch misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_REMOTE",
"PerPkg": "1",
@@ -5376,6 +6481,7 @@
},
{
"BriefDescription": "TOR Occupancy; RFO misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_REMOTE",
"PerPkg": "1",
@@ -5385,8 +6491,10 @@
},
{
"BriefDescription": "TOR Occupancy : UCRdFs issued by iA Cores that Missed LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_UCRDF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : UCRdFs issued by iA Cores that Missed LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc877de01",
@@ -5394,8 +6502,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores that Missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86ffe01",
@@ -5403,8 +6513,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLF issued by iA Cores that Missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLF issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc867fe01",
@@ -5412,8 +6524,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8678601",
@@ -5421,8 +6535,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8678a01",
@@ -5430,8 +6546,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL_DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86f8601",
@@ -5439,8 +6557,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86f8a01",
@@ -5448,8 +6568,10 @@
},
{
"BriefDescription": "TOR Occupancy : WiLs issued by iA Cores that Missed LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WIL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WiLs issued by iA Cores that Missed LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc87fde01",
@@ -5457,6 +6579,7 @@
},
{
"BriefDescription": "TOR Occupancy; RFO from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_RFO",
"PerPkg": "1",
@@ -5466,6 +6589,7 @@
},
{
"BriefDescription": "TOR Occupancy; RFO prefetch from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_RFO_PREF",
"PerPkg": "1",
@@ -5475,6 +6599,7 @@
},
{
"BriefDescription": "TOR Occupancy : SpecItoMs issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_SPECITOM",
"PerPkg": "1",
@@ -5484,8 +6609,10 @@
},
{
"BriefDescription": "TOR Occupancy : WbMtoIs issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WBMTOI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WbMtoIs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc27ff01",
@@ -5493,8 +6620,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WCIL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86fff01",
@@ -5502,8 +6631,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLF issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WCILF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLF issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc867ff01",
@@ -5511,6 +6642,7 @@
},
{
"BriefDescription": "TOR Occupancy; All from local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO",
"PerPkg": "1",
@@ -5520,8 +6652,10 @@
},
{
"BriefDescription": "TOR Occupancy : CLFlushes issued by IO Devices",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_CLFLUSH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : CLFlushes issued by IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8c3ff04",
@@ -5529,6 +6663,7 @@
},
{
"BriefDescription": "TOR Occupancy; Hits from local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT",
"PerPkg": "1",
@@ -5538,6 +6673,7 @@
},
{
"BriefDescription": "TOR Occupancy; ITOM hits from local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_ITOM",
"PerPkg": "1",
@@ -5547,6 +6683,7 @@
},
{
"BriefDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_ITOMCACHENEAR",
"PerPkg": "1",
@@ -5556,6 +6693,7 @@
},
{
"BriefDescription": "TOR Occupancy; RdCur and FsRdCur hits from local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_PCIRDCUR",
"PerPkg": "1",
@@ -5565,8 +6703,10 @@
},
{
"BriefDescription": "TOR Occupancy; RFO hits from local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : RFOs issued by IO Devices that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc803fd04",
@@ -5574,6 +6714,7 @@
},
{
"BriefDescription": "TOR Occupancy; ITOM from local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_ITOM",
"PerPkg": "1",
@@ -5583,8 +6724,10 @@
},
{
"BriefDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_ITOMCACHENEAR",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -5594,6 +6737,7 @@
},
{
"BriefDescription": "TOR Occupancy; Misses from local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS",
"PerPkg": "1",
@@ -5603,6 +6747,7 @@
},
{
"BriefDescription": "TOR Occupancy; ITOM misses from local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM",
"PerPkg": "1",
@@ -5612,6 +6757,7 @@
},
{
"BriefDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOMCACHENEAR",
"PerPkg": "1",
@@ -5621,8 +6767,10 @@
},
{
"BriefDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC and targets local memory",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOMCACHENEAR_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcd42fe04",
@@ -5630,8 +6778,10 @@
},
{
"BriefDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC and targets remote memory",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOMCACHENEAR_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcd437e04",
@@ -5639,8 +6789,10 @@
},
{
"BriefDescription": "TOR Occupancy; ITOM misses from local IO and targets local memory",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc42fe04",
@@ -5648,8 +6800,10 @@
},
{
"BriefDescription": "TOR Occupancy; ITOM misses from local IO and targets remote memory",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc437e04",
@@ -5657,6 +6811,7 @@
},
{
"BriefDescription": "TOR Occupancy; RdCur and FsRdCur misses from local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_PCIRDCUR",
"PerPkg": "1",
@@ -5666,8 +6821,10 @@
},
{
"BriefDescription": "TOR Occupancy; RdCur and FsRdCur misses from local IO and targets local memory",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_PCIRDCUR_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8f2fe04",
@@ -5675,8 +6832,10 @@
},
{
"BriefDescription": "TOR Occupancy; RdCur and FsRdCur misses from local IO and targets remote memory",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_PCIRDCUR_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8f37e04",
@@ -5684,8 +6843,10 @@
},
{
"BriefDescription": "TOR Occupancy; RFO misses from local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : RFOs issued by IO Devices that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc803fe04",
@@ -5693,6 +6854,7 @@
},
{
"BriefDescription": "TOR Occupancy; RdCur and FsRdCur from local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_PCIRDCUR",
"PerPkg": "1",
@@ -5702,8 +6864,10 @@
},
{
"BriefDescription": "TOR Occupancy; ItoM from local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : RFOs issued by IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc803ff04",
@@ -5711,8 +6875,10 @@
},
{
"BriefDescription": "TOR Occupancy : WbMtoIs issued by IO Devices",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_WBMTOI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WbMtoIs issued by IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc23ff04",
@@ -5720,8 +6886,10 @@
},
{
"BriefDescription": "TOR Occupancy : IPQ",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IPQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : IPQ : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"UMask": "0x8",
@@ -5729,8 +6897,10 @@
},
{
"BriefDescription": "TOR Occupancy : IRQ - iA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IRQ_IA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : IRQ - iA : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T : From an iA Core",
"UMask": "0x1",
@@ -5738,8 +6908,10 @@
},
{
"BriefDescription": "TOR Occupancy : IRQ - Non iA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IRQ_NON_IA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : IRQ - Non iA : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"UMask": "0x10",
@@ -5747,24 +6919,30 @@
},
{
"BriefDescription": "TOR Occupancy : Just ISOC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.ISOC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : Just ISOC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : Just Local Targets",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.LOCAL_TGT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : Just Local Targets : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : All from Local iA and IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All from Local iA and IO : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T : All locally initiated requests",
"UMask": "0xc000ff05",
@@ -5772,8 +6950,10 @@
},
{
"BriefDescription": "TOR Occupancy : All from Local iA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_IA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All from Local iA : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T : All locally initiated requests from iA Cores",
"UMask": "0xc000ff01",
@@ -5781,8 +6961,10 @@
},
{
"BriefDescription": "TOR Occupancy : All from Local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_IO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All from Local IO : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T : All locally generated IO traffic",
"UMask": "0xc000ff04",
@@ -5790,80 +6972,100 @@
},
{
"BriefDescription": "TOR Occupancy : Match the Opcode in b[29:19] of the extended umask field",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.MATCH_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : Match the Opcode in b[29:19] of the extended umask field : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : Just Misses",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : Just Misses : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : MMCFG Access",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.MMCFG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : MMCFG Access : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : MMIO Access",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.MMIO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : MMIO Access : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : Just NearMem",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.NEARMEM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : Just NearMem : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : Just NonCoherent",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.NONCOH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : Just NonCoherent : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : Just NotNearMem",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.NOT_NEARMEM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : Just NotNearMem : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : PMM Access",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : PMM Access : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : Match the PreMorphed Opcode in b[29:19] of the extended umask field",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.PREMORPH_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : Match the PreMorphed Opcode in b[29:19] of the extended umask field : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : PRQ - IOSF",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.PRQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : PRQ - IOSF : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T : From a PCIe Device",
"UMask": "0x4",
@@ -5871,8 +7073,10 @@
},
{
"BriefDescription": "TOR Occupancy : PRQ - Non IOSF",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.PRQ_NON_IOSF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : PRQ - Non IOSF : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"UMask": "0x20",
@@ -5880,16 +7084,20 @@
},
{
"BriefDescription": "TOR Occupancy : Just Remote Targets",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.REMOTE_TGT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : Just Remote Targets : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : All from Remote",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.REM_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All from Remote : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T : All remote requests (e.g. snoops, writebacks) that came from remote sockets",
"UMask": "0xc001ffc8",
@@ -5897,8 +7105,10 @@
},
{
"BriefDescription": "TOR Occupancy : All Snoops from Remote",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.REM_SNPS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All Snoops from Remote : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T : All snoops to this LLC that came from remote sockets",
"UMask": "0xc001ff08",
@@ -5906,8 +7116,10 @@
},
{
"BriefDescription": "TOR Occupancy : RRQ",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.RRQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : RRQ : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"UMask": "0x40",
@@ -5915,8 +7127,10 @@
},
{
"BriefDescription": "TOR Occupancy; All Snoops from Remote",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.SNPS_FROM_REM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. All snoops to this LLC that came from remote sockets.",
"UMask": "0xc001ff08",
@@ -5924,8 +7138,10 @@
},
{
"BriefDescription": "TOR Occupancy : WBQ",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.WBQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WBQ : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"UMask": "0x80",
@@ -5933,8 +7149,10 @@
},
{
"BriefDescription": "WbPushMtoI : Pushed to LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_CHA_WB_PUSH_MTOI.LLC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WbPushMtoI : Pushed to LLC : Counts the number of times when the CHA was received WbPushMtoI : Counts the number of times when the CHA was able to push WbPushMToI to LLC",
"UMask": "0x1",
@@ -5942,8 +7160,10 @@
},
{
"BriefDescription": "WbPushMtoI : Pushed to Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_CHA_WB_PUSH_MTOI.MEM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WbPushMtoI : Pushed to Memory : Counts the number of times when the CHA was received WbPushMtoI : Counts the number of times when the CHA was unable to push WbPushMToI to LLC (hence pushed it to MEM)",
"UMask": "0x2",
@@ -5951,8 +7171,10 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC0",
+ "Counter": "0,1,2,3",
"EventCode": "0x5a",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC0 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 0 only.",
"UMask": "0x1",
@@ -5960,8 +7182,10 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC1",
+ "Counter": "0,1,2,3",
"EventCode": "0x5a",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC1 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 1 only.",
"UMask": "0x2",
@@ -5969,8 +7193,10 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC2",
+ "Counter": "0,1,2,3",
"EventCode": "0x5a",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC2 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 2 only.",
"UMask": "0x4",
@@ -5978,8 +7204,10 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC3",
+ "Counter": "0,1,2,3",
"EventCode": "0x5a",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC3 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 3 only.",
"UMask": "0x8",
@@ -5987,8 +7215,10 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC4",
+ "Counter": "0,1,2,3",
"EventCode": "0x5a",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC4 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 4 only.",
"UMask": "0x10",
@@ -5996,8 +7226,10 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC5",
+ "Counter": "0,1,2,3",
"EventCode": "0x5a",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC5 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 5 only.",
"UMask": "0x20",
@@ -6005,8 +7237,10 @@
},
{
"BriefDescription": "XPT Prefetches : Dropped (on 0?) - Conflict",
+ "Counter": "0,1,2,3",
"EventCode": "0x6f",
"EventName": "UNC_CHA_XPT_PREF.DROP0_CONFLICT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "XPT Prefetches : Dropped (on 0?) - Conflict : Number of XPT prefetches dropped due to AD CMS write port contention",
"UMask": "0x8",
@@ -6014,8 +7248,10 @@
},
{
"BriefDescription": "XPT Prefetches : Dropped (on 0?) - No Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x6f",
"EventName": "UNC_CHA_XPT_PREF.DROP0_NOCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "XPT Prefetches : Dropped (on 0?) - No Credits : Number of XPT prefetches dropped due to lack of XPT AD egress credits",
"UMask": "0x4",
@@ -6023,8 +7259,10 @@
},
{
"BriefDescription": "XPT Prefetches : Dropped (on 1?) - Conflict",
+ "Counter": "0,1,2,3",
"EventCode": "0x6f",
"EventName": "UNC_CHA_XPT_PREF.DROP1_CONFLICT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "XPT Prefetches : Dropped (on 1?) - Conflict : Number of XPT prefetches dropped due to AD CMS write port contention",
"UMask": "0x80",
@@ -6032,8 +7270,10 @@
},
{
"BriefDescription": "XPT Prefetches : Dropped (on 1?) - No Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x6f",
"EventName": "UNC_CHA_XPT_PREF.DROP1_NOCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "XPT Prefetches : Dropped (on 1?) - No Credits : Number of XPT prefetches dropped due to lack of XPT AD egress credits",
"UMask": "0x40",
@@ -6041,8 +7281,10 @@
},
{
"BriefDescription": "XPT Prefetches : Sent (on 0?)",
+ "Counter": "0,1,2,3",
"EventCode": "0x6f",
"EventName": "UNC_CHA_XPT_PREF.SENT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "XPT Prefetches : Sent (on 0?) : Number of XPT prefetches sent",
"UMask": "0x1",
@@ -6050,8 +7292,10 @@
},
{
"BriefDescription": "XPT Prefetches : Sent (on 1?)",
+ "Counter": "0,1,2,3",
"EventCode": "0x6f",
"EventName": "UNC_CHA_XPT_PREF.SENT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "XPT Prefetches : Sent (on 1?) : Number of XPT prefetches sent",
"UMask": "0x10",
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cxl.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cxl.json
index f3e84fd88de3..ff81f3a6426a 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cxl.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cxl.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of lfclk ticks",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x01",
"EventName": "UNC_CXLCM_CLOCKTICKS",
"PerPkg": "1",
@@ -9,390 +10,487 @@
},
{
"BriefDescription": "Number of Allocation to Mem Rxx AGF 0",
+ "Counter": "4,5,6,7",
"EventCode": "0x43",
"EventName": "UNC_CXLCM_RxC_AGF_INSERTS.CACHE_DATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of Allocation to Cache Req AGF0",
+ "Counter": "4,5,6,7",
"EventCode": "0x43",
"EventName": "UNC_CXLCM_RxC_AGF_INSERTS.CACHE_REQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of Allocation to Cache Rsp AGF",
+ "Counter": "4,5,6,7",
"EventCode": "0x43",
"EventName": "UNC_CXLCM_RxC_AGF_INSERTS.CACHE_REQ1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of Allocation to Cache Data AGF",
+ "Counter": "4,5,6,7",
"EventCode": "0x43",
"EventName": "UNC_CXLCM_RxC_AGF_INSERTS.CACHE_RSP0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of Allocation to Cache Rsp AGF",
+ "Counter": "4,5,6,7",
"EventCode": "0x43",
"EventName": "UNC_CXLCM_RxC_AGF_INSERTS.CACHE_RSP1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of Allocation to Cache Req AGF 1",
+ "Counter": "4,5,6,7",
"EventCode": "0x43",
"EventName": "UNC_CXLCM_RxC_AGF_INSERTS.MEM_DATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of Allocation to Mem Data AGF",
+ "Counter": "4,5,6,7",
"EventCode": "0x43",
"EventName": "UNC_CXLCM_RxC_AGF_INSERTS.MEM_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CXLCM"
},
{
"BriefDescription": "Count the number of Flits with AK set",
+ "Counter": "4,5,6,7",
"EventCode": "0x4b",
"EventName": "UNC_CXLCM_RxC_FLITS.AK_HDR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CXLCM"
},
{
"BriefDescription": "Count the number of Flits with BE set",
+ "Counter": "4,5,6,7",
"EventCode": "0x4b",
"EventName": "UNC_CXLCM_RxC_FLITS.BE_HDR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CXLCM"
},
{
"BriefDescription": "Count the number of control flits received",
+ "Counter": "4,5,6,7",
"EventCode": "0x4b",
"EventName": "UNC_CXLCM_RxC_FLITS.CTRL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CXLCM"
},
{
"BriefDescription": "Count the number of Headerless flits received",
+ "Counter": "4,5,6,7",
"EventCode": "0x4b",
"EventName": "UNC_CXLCM_RxC_FLITS.NO_HDR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CXLCM"
},
{
"BriefDescription": "Count the number of protocol flits received",
+ "Counter": "4,5,6,7",
"EventCode": "0x4b",
"EventName": "UNC_CXLCM_RxC_FLITS.PROT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CXLCM"
},
{
"BriefDescription": "Count the number of Flits with SZ set",
+ "Counter": "4,5,6,7",
"EventCode": "0x4b",
"EventName": "UNC_CXLCM_RxC_FLITS.SZ_HDR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CXLCM"
},
{
"BriefDescription": "Count the number of flits received",
+ "Counter": "4,5,6,7",
"EventCode": "0x4b",
"EventName": "UNC_CXLCM_RxC_FLITS.VALID",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CXLCM"
},
{
"BriefDescription": "Count the number of valid messages in the flit",
+ "Counter": "4,5,6,7",
"EventCode": "0x4b",
"EventName": "UNC_CXLCM_RxC_FLITS.VALID_MSG",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CXLCM"
},
{
"BriefDescription": "Count the number of CRC errors detected",
+ "Counter": "4,5,6,7",
"EventCode": "0x40",
"EventName": "UNC_CXLCM_RxC_MISC.CRC_ERRORS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CXLCM"
},
{
"BriefDescription": "Count the number of Init flits sent",
+ "Counter": "4,5,6,7",
"EventCode": "0x40",
"EventName": "UNC_CXLCM_RxC_MISC.INIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CXLCM"
},
{
"BriefDescription": "Count the number of LLCRD flits sent",
+ "Counter": "4,5,6,7",
"EventCode": "0x40",
"EventName": "UNC_CXLCM_RxC_MISC.LLCRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CXLCM"
},
{
"BriefDescription": "Count the number of Retry flits sent",
+ "Counter": "4,5,6,7",
"EventCode": "0x40",
"EventName": "UNC_CXLCM_RxC_MISC.RETRY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of cycles the Packing Buffer is Full",
+ "Counter": "4,5,6,7",
"EventCode": "0x52",
"EventName": "UNC_CXLCM_RxC_PACK_BUF_FULL.CACHE_DATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of cycles the Packing Buffer is Full",
+ "Counter": "4,5,6,7",
"EventCode": "0x52",
"EventName": "UNC_CXLCM_RxC_PACK_BUF_FULL.CACHE_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of cycles the Packing Buffer is Full",
+ "Counter": "4,5,6,7",
"EventCode": "0x52",
"EventName": "UNC_CXLCM_RxC_PACK_BUF_FULL.CACHE_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of cycles the Packing Buffer is Full",
+ "Counter": "4,5,6,7",
"EventCode": "0x52",
"EventName": "UNC_CXLCM_RxC_PACK_BUF_FULL.MEM_DATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of cycles the Packing Buffer is Full",
+ "Counter": "4,5,6,7",
"EventCode": "0x52",
"EventName": "UNC_CXLCM_RxC_PACK_BUF_FULL.MEM_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of Allocation to Cache Data Packing buffer",
+ "Counter": "4,5,6,7",
"EventCode": "0x41",
"EventName": "UNC_CXLCM_RxC_PACK_BUF_INSERTS.CACHE_DATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of Allocation to Cache Req Packing buffer",
+ "Counter": "4,5,6,7",
"EventCode": "0x41",
"EventName": "UNC_CXLCM_RxC_PACK_BUF_INSERTS.CACHE_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of Allocation to Cache Rsp Packing buffer",
+ "Counter": "4,5,6,7",
"EventCode": "0x41",
"EventName": "UNC_CXLCM_RxC_PACK_BUF_INSERTS.CACHE_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of Allocation to Mem Data Packing buffer",
+ "Counter": "4,5,6,7",
"EventCode": "0x41",
"EventName": "UNC_CXLCM_RxC_PACK_BUF_INSERTS.MEM_DATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of Allocation to Mem Rxx Packing buffer",
+ "Counter": "4,5,6,7",
"EventCode": "0x41",
"EventName": "UNC_CXLCM_RxC_PACK_BUF_INSERTS.MEM_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of cycles of Not Empty for Cache Data Packing buffer",
+ "Counter": "4,5,6,7",
"EventCode": "0x42",
"EventName": "UNC_CXLCM_RxC_PACK_BUF_NE.CACHE_DATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of cycles of Not Empty for Cache Req Packing buffer",
+ "Counter": "4,5,6,7",
"EventCode": "0x42",
"EventName": "UNC_CXLCM_RxC_PACK_BUF_NE.CACHE_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of cycles of Not Empty for Cache Rsp Packing buffer",
+ "Counter": "4,5,6,7",
"EventCode": "0x42",
"EventName": "UNC_CXLCM_RxC_PACK_BUF_NE.CACHE_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of cycles of Not Empty for Mem Data Packing buffer",
+ "Counter": "4,5,6,7",
"EventCode": "0x42",
"EventName": "UNC_CXLCM_RxC_PACK_BUF_NE.MEM_DATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of cycles of Not Empty for Mem Rxx Packing buffer",
+ "Counter": "4,5,6,7",
"EventCode": "0x42",
"EventName": "UNC_CXLCM_RxC_PACK_BUF_NE.MEM_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CXLCM"
},
{
"BriefDescription": "Count the number of Flits with AK set",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_CXLCM_TxC_FLITS.AK_HDR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CXLCM"
},
{
"BriefDescription": "Count the number of Flits with BE set",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_CXLCM_TxC_FLITS.BE_HDR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CXLCM"
},
{
"BriefDescription": "Count the number of control flits packed",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_CXLCM_TxC_FLITS.CTRL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CXLCM"
},
{
"BriefDescription": "Count the number of Headerless flits packed",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_CXLCM_TxC_FLITS.NO_HDR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CXLCM"
},
{
"BriefDescription": "Count the number of protocol flits packed",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_CXLCM_TxC_FLITS.PROT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CXLCM"
},
{
"BriefDescription": "Count the number of Flits with SZ set",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_CXLCM_TxC_FLITS.SZ_HDR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CXLCM"
},
{
"BriefDescription": "Count the number of flits packed",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_CXLCM_TxC_FLITS.VALID",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of Allocation to Cache Data Packing buffer",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_CXLCM_TxC_PACK_BUF_INSERTS.CACHE_DATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of Allocation to Cache Req Packing buffer",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_CXLCM_TxC_PACK_BUF_INSERTS.CACHE_REQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of Allocation to Cache Rsp1 Packing buffer",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_CXLCM_TxC_PACK_BUF_INSERTS.CACHE_REQ1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of Allocation to Cache Rsp0 Packing buffer",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_CXLCM_TxC_PACK_BUF_INSERTS.CACHE_RSP0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of Allocation to Cache Req Packing buffer",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_CXLCM_TxC_PACK_BUF_INSERTS.CACHE_RSP1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of Allocation to Mem Data Packing buffer",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_CXLCM_TxC_PACK_BUF_INSERTS.MEM_DATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CXLCM"
},
{
"BriefDescription": "Number of Allocation to Mem Rxx Packing buffer",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_CXLCM_TxC_PACK_BUF_INSERTS.MEM_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CXLCM"
},
{
"BriefDescription": "Counts the number of uclk ticks",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_CXLDP_CLOCKTICKS",
"PerPkg": "1",
@@ -401,48 +499,60 @@
},
{
"BriefDescription": "Number of Allocation to M2S Data AGF",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_CXLDP_TxC_AGF_INSERTS.M2S_DATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CXLDP"
},
{
"BriefDescription": "Number of Allocation to M2S Req AGF",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_CXLDP_TxC_AGF_INSERTS.M2S_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CXLDP"
},
{
"BriefDescription": "Number of Allocation to U2C Data AGF",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_CXLDP_TxC_AGF_INSERTS.U2C_DATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CXLDP"
},
{
"BriefDescription": "Number of Allocation to U2C Req AGF",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_CXLDP_TxC_AGF_INSERTS.U2C_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CXLDP"
},
{
"BriefDescription": "Number of Allocation to U2C Rsp AGF 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_CXLDP_TxC_AGF_INSERTS.U2C_RSP0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CXLDP"
},
{
"BriefDescription": "Number of Allocation to U2C Rsp AGF 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_CXLDP_TxC_AGF_INSERTS.U2C_RSP1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CXLDP"
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-interconnect.json
index 22bb490e9666..8b1ae9540066 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-interconnect.json
@@ -1,8 +1,10 @@
[
{
"BriefDescription": "Total IRP occupancy of inbound read and write requests to coherent memory.",
+ "Counter": "0,1",
"EventCode": "0x0f",
"EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.MEM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Total IRP occupancy of inbound read and write requests to coherent memory. This is effectively the sum of read occupancy and write occupancy.",
"UMask": "0x4",
@@ -10,6 +12,7 @@
},
{
"BriefDescription": "IRP Clockticks",
+ "Counter": "0,1",
"EventCode": "0x01",
"EventName": "UNC_I_CLOCKTICKS",
"PerPkg": "1",
@@ -18,6 +21,7 @@
},
{
"BriefDescription": "FAF RF full",
+ "Counter": "0,1",
"EventCode": "0x17",
"EventName": "UNC_I_FAF_FULL",
"PerPkg": "1",
@@ -25,6 +29,7 @@
},
{
"BriefDescription": "FAF - request insert from TC.",
+ "Counter": "0,1",
"EventCode": "0x18",
"EventName": "UNC_I_FAF_INSERTS",
"PerPkg": "1",
@@ -32,6 +37,7 @@
},
{
"BriefDescription": "FAF occupancy",
+ "Counter": "0,1",
"EventCode": "0x19",
"EventName": "UNC_I_FAF_OCCUPANCY",
"PerPkg": "1",
@@ -39,6 +45,7 @@
},
{
"BriefDescription": "FAF allocation -- sent to ADQ",
+ "Counter": "0,1",
"EventCode": "0x16",
"EventName": "UNC_I_FAF_TRANSACTIONS",
"PerPkg": "1",
@@ -46,14 +53,17 @@
},
{
"BriefDescription": ": All Inserts Outbound (BL, AK, Snoops)",
+ "Counter": "0,1",
"EventCode": "0x20",
"EventName": "UNC_I_IRP_ALL.EVICTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "IRP"
},
{
"BriefDescription": ": All Inserts Inbound (p2p + faf + cset)",
+ "Counter": "0,1",
"EventCode": "0x20",
"EventName": "UNC_I_IRP_ALL.INBOUND_INSERTS",
"PerPkg": "1",
@@ -62,78 +72,97 @@
},
{
"BriefDescription": ": All Inserts Outbound (BL, AK, Snoops)",
+ "Counter": "0,1",
"EventCode": "0x20",
"EventName": "UNC_I_IRP_ALL.OUTBOUND_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "IRP"
},
{
"BriefDescription": "Counts Timeouts - Set 0 : Cache Inserts of Atomic Transactions as Secondary",
+ "Counter": "0,1",
"EventCode": "0x1e",
"EventName": "UNC_I_MISC0.2ND_ATOMIC_INSERT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "IRP"
},
{
"BriefDescription": "Counts Timeouts - Set 0 : Cache Inserts of Read Transactions as Secondary",
+ "Counter": "0,1",
"EventCode": "0x1e",
"EventName": "UNC_I_MISC0.2ND_RD_INSERT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "IRP"
},
{
"BriefDescription": "Counts Timeouts - Set 0 : Cache Inserts of Write Transactions as Secondary",
+ "Counter": "0,1",
"EventCode": "0x1e",
"EventName": "UNC_I_MISC0.2ND_WR_INSERT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "IRP"
},
{
"BriefDescription": "Counts Timeouts - Set 0 : Fastpath Rejects",
+ "Counter": "0,1",
"EventCode": "0x1e",
"EventName": "UNC_I_MISC0.FAST_REJ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "IRP"
},
{
"BriefDescription": "Counts Timeouts - Set 0 : Fastpath Requests",
+ "Counter": "0,1",
"EventCode": "0x1e",
"EventName": "UNC_I_MISC0.FAST_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "IRP"
},
{
"BriefDescription": "Counts Timeouts - Set 0 : Fastpath Transfers From Primary to Secondary",
+ "Counter": "0,1",
"EventCode": "0x1e",
"EventName": "UNC_I_MISC0.FAST_XFER",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "IRP"
},
{
"BriefDescription": "Counts Timeouts - Set 0 : Prefetch Ack Hints From Primary to Secondary",
+ "Counter": "0,1",
"EventCode": "0x1e",
"EventName": "UNC_I_MISC0.PF_ACK_HINT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "IRP"
},
{
"BriefDescription": "Counts Timeouts - Set 0 : Slow path fwpf didn't find prefetch",
+ "Counter": "0,1",
"EventCode": "0x1e",
"EventName": "UNC_I_MISC0.SLOWPATH_FWPF_NO_PRF",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "IRP"
},
{
"BriefDescription": "Misc Events - Set 1 : Lost Forward",
+ "Counter": "0,1",
"EventCode": "0x1f",
"EventName": "UNC_I_MISC1.LOST_FWD",
"PerPkg": "1",
@@ -143,8 +172,10 @@
},
{
"BriefDescription": "Misc Events - Set 1 : Received Invalid",
+ "Counter": "0,1",
"EventCode": "0x1f",
"EventName": "UNC_I_MISC1.SEC_RCVD_INVLD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Misc Events - Set 1 : Received Invalid : Secondary received a transfer that did not have sufficient MESI state",
"UMask": "0x20",
@@ -152,8 +183,10 @@
},
{
"BriefDescription": "Misc Events - Set 1 : Received Valid",
+ "Counter": "0,1",
"EventCode": "0x1f",
"EventName": "UNC_I_MISC1.SEC_RCVD_VLD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Misc Events - Set 1 : Received Valid : Secondary received a transfer that did have sufficient MESI state",
"UMask": "0x40",
@@ -161,8 +194,10 @@
},
{
"BriefDescription": "Misc Events - Set 1 : Slow Transfer of E Line",
+ "Counter": "0,1",
"EventCode": "0x1f",
"EventName": "UNC_I_MISC1.SLOW_E",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Misc Events - Set 1 : Slow Transfer of E Line : Secondary received a transfer that did have sufficient MESI state",
"UMask": "0x4",
@@ -170,8 +205,10 @@
},
{
"BriefDescription": "Misc Events - Set 1 : Slow Transfer of I Line",
+ "Counter": "0,1",
"EventCode": "0x1f",
"EventName": "UNC_I_MISC1.SLOW_I",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Misc Events - Set 1 : Slow Transfer of I Line : Snoop took cacheline ownership before write from data was committed.",
"UMask": "0x1",
@@ -179,8 +216,10 @@
},
{
"BriefDescription": "Misc Events - Set 1 : Slow Transfer of M Line",
+ "Counter": "0,1",
"EventCode": "0x1f",
"EventName": "UNC_I_MISC1.SLOW_M",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Misc Events - Set 1 : Slow Transfer of M Line : Snoop took cacheline ownership before write from data was committed.",
"UMask": "0x8",
@@ -188,8 +227,10 @@
},
{
"BriefDescription": "Misc Events - Set 1 : Slow Transfer of S Line",
+ "Counter": "0,1",
"EventCode": "0x1f",
"EventName": "UNC_I_MISC1.SLOW_S",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Misc Events - Set 1 : Slow Transfer of S Line : Secondary received a transfer that did not have sufficient MESI state",
"UMask": "0x2",
@@ -197,8 +238,10 @@
},
{
"BriefDescription": "Responses to snoops of any type that hit M, E, S or I line in the IIO",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.ALL_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit M, E, S or I line in the IIO",
"UMask": "0x7e",
@@ -206,8 +249,10 @@
},
{
"BriefDescription": "Responses to snoops of any type that hit E or S line in the IIO cache",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.ALL_HIT_ES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit E or S line in the IIO cache",
"UMask": "0x74",
@@ -215,8 +260,10 @@
},
{
"BriefDescription": "Responses to snoops of any type that hit I line in the IIO cache",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.ALL_HIT_I",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit I line in the IIO cache",
"UMask": "0x72",
@@ -224,6 +271,7 @@
},
{
"BriefDescription": "Responses to snoops of any type that hit M line in the IIO cache",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.ALL_HIT_M",
"PerPkg": "1",
@@ -233,8 +281,10 @@
},
{
"BriefDescription": "Responses to snoops of any type that miss the IIO cache",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.ALL_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Responses to snoops of any type (code, data, invalidate) that miss the IIO cache",
"UMask": "0x71",
@@ -242,62 +292,77 @@
},
{
"BriefDescription": "Snoop Responses : Hit E or S",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.HIT_ES",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses : Hit I",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.HIT_I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses : Hit M",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.HIT_M",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses : Miss",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses : SnpCode",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.SNPCODE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses : SnpData",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.SNPDATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses : SnpInv",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.SNPINV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "IRP"
},
{
"BriefDescription": "Inbound write (fast path) requests received by the IRP.",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_I_TRANSACTIONS.WR_PREF",
"PerPkg": "1",
@@ -307,132 +372,167 @@
},
{
"BriefDescription": "AK Egress Allocations",
+ "Counter": "0,1",
"EventCode": "0x0b",
"EventName": "UNC_I_TxC_AK_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL DRS Egress Cycles Full",
+ "Counter": "0,1",
"EventCode": "0x05",
"EventName": "UNC_I_TxC_BL_DRS_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL DRS Egress Inserts",
+ "Counter": "0,1",
"EventCode": "0x02",
"EventName": "UNC_I_TxC_BL_DRS_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL DRS Egress Occupancy",
+ "Counter": "0,1",
"EventCode": "0x08",
"EventName": "UNC_I_TxC_BL_DRS_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL NCB Egress Cycles Full",
+ "Counter": "0,1",
"EventCode": "0x06",
"EventName": "UNC_I_TxC_BL_NCB_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL NCB Egress Inserts",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "UNC_I_TxC_BL_NCB_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL NCB Egress Occupancy",
+ "Counter": "0,1",
"EventCode": "0x09",
"EventName": "UNC_I_TxC_BL_NCB_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL NCS Egress Cycles Full",
+ "Counter": "0,1",
"EventCode": "0x07",
"EventName": "UNC_I_TxC_BL_NCS_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL NCS Egress Inserts",
+ "Counter": "0,1",
"EventCode": "0x04",
"EventName": "UNC_I_TxC_BL_NCS_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL NCS Egress Occupancy",
+ "Counter": "0,1",
"EventCode": "0x0a",
"EventName": "UNC_I_TxC_BL_NCS_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "UNC_I_TxR2_AD01_STALL_CREDIT_CYCLES",
+ "Counter": "0,1",
"EventCode": "0x1c",
"EventName": "UNC_I_TxR2_AD01_STALL_CREDIT_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": Counts the number times when it is not possible to issue a request to the M2PCIe because there are no Egress Credits available on AD0, A1 or AD0AD1 both. Stalls on both AD0 and AD1 will count as 2",
"Unit": "IRP"
},
{
"BriefDescription": "No AD0 Egress Credits Stalls",
+ "Counter": "0,1",
"EventCode": "0x1a",
"EventName": "UNC_I_TxR2_AD0_STALL_CREDIT_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No AD0 Egress Credits Stalls : Counts the number times when it is not possible to issue a request to the M2PCIe because there are no AD0 Egress Credits available.",
"Unit": "IRP"
},
{
"BriefDescription": "No AD1 Egress Credits Stalls",
+ "Counter": "0,1",
"EventCode": "0x1b",
"EventName": "UNC_I_TxR2_AD1_STALL_CREDIT_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No AD1 Egress Credits Stalls : Counts the number times when it is not possible to issue a request to the M2PCIe because there are no AD1 Egress Credits available.",
"Unit": "IRP"
},
{
"BriefDescription": "No BL Egress Credit Stalls",
+ "Counter": "0,1",
"EventCode": "0x1d",
"EventName": "UNC_I_TxR2_BL_STALL_CREDIT_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No BL Egress Credit Stalls : Counts the number times when it is not possible to issue data to the R2PCIe because there are no BL Egress Credits available.",
"Unit": "IRP"
},
{
"BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
"EventCode": "0x0d",
"EventName": "UNC_I_TxS_DATA_INSERTS_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Outbound Read Requests : Counts the number of requests issued to the switch (towards the devices).",
"Unit": "IRP"
},
{
"BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
"EventCode": "0x0e",
"EventName": "UNC_I_TxS_DATA_INSERTS_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Outbound Read Requests : Counts the number of requests issued to the switch (towards the devices).",
"Unit": "IRP"
},
{
"BriefDescription": "Outbound Request Queue Occupancy",
+ "Counter": "0,1",
"EventCode": "0x0c",
"EventName": "UNC_I_TxS_REQUEST_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Outbound Request Queue Occupancy : Accumulates the number of outstanding outbound requests from the IRP to the switch (towards the devices). This can be used in conjunction with the allocations event in order to calculate average latency of outbound requests.",
"Unit": "IRP"
},
{
"BriefDescription": "M2M Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_M2M_CLOCKTICKS",
"PerPkg": "1",
@@ -441,6 +541,7 @@
},
{
"BriefDescription": "CMS Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0xc0",
"EventName": "UNC_M2M_CMS_CLOCKTICKS",
"PerPkg": "1",
@@ -448,16 +549,20 @@
},
{
"BriefDescription": "Cycles when direct to core mode (which bypasses the CHA) was disabled",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_DIRSTATE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles when direct to core mode, which bypasses the CHA, was disabled : Non Cisgress",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_DIRSTATE.NON_CISGRESS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles when direct to core mode, which bypasses the CHA, was disabled : Non Cisgress : Counts the number of time non cisgress D2C was not honoured by egress due to directory state constraints",
"UMask": "0x2",
@@ -465,39 +570,49 @@
},
{
"BriefDescription": "Counts the time when FM didn't do d2c for fill reads (cross tile case)",
+ "Counter": "0,1,2,3",
"EventCode": "0x4a",
"EventName": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_NOTFORKED",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Number of reads in which direct to core transaction were overridden",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M2M_DIRECT2CORE_TXN_OVERRIDE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2M"
},
{
"BriefDescription": "Number of reads in which direct to core transaction was overridden : Cisgress",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M2M_DIRECT2CORE_TXN_OVERRIDE.CISGRESS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Number of reads in which direct to core transaction was overridden : 2LM Hit?",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M2M_DIRECT2CORE_TXN_OVERRIDE.PMM_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Number of times a direct to UPI transaction was overridden.",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_M2M_DIRECT2UPITXN_OVERRIDE.PMM_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a direct to UPI transaction was overridden. : Counts the number of times D2K wasn't honored even though the incoming request had d2k set",
"UMask": "0x1",
@@ -505,24 +620,30 @@
},
{
"BriefDescription": "Number of reads in which direct to Intel UPI transactions were overridden",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_CREDITS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles when direct to Intel UPI was disabled",
+ "Counter": "0,1,2,3",
"EventCode": "0x1a",
"EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_DIRSTATE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles when Direct2UPI was Disabled : Cisgress D2U Ignored",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_DIRSTATE.CISGRESS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles when Direct2UPI was Disabled : Cisgress D2U Ignored : Counts cisgress d2K that was not honored due to directory constraints",
"UMask": "0x4",
@@ -530,8 +651,10 @@
},
{
"BriefDescription": "Cycles when Direct2UPI was Disabled : Egress Ignored D2U",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_DIRSTATE.EGRESS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles when Direct2UPI was Disabled : Egress Ignored D2U : Counts the number of time D2K was not honoured by egress due to directory state constraints",
"UMask": "0x1",
@@ -539,8 +662,10 @@
},
{
"BriefDescription": "Cycles when Direct2UPI was Disabled : Non Cisgress D2U Ignored",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_DIRSTATE.NON_CISGRESS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles when Direct2UPI was Disabled : Non Cisgress D2U Ignored : Counts non cisgress d2K that was not honored due to directory constraints",
"UMask": "0x2",
@@ -548,8 +673,10 @@
},
{
"BriefDescription": "Messages sent direct to the Intel UPI",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2M_DIRECT2UPI_TAKEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times egress did D2K (Direct to KTI)",
"UMask": "0x7",
@@ -557,86 +684,107 @@
},
{
"BriefDescription": "Number of reads that a message sent direct2 Intel UPI was overridden",
+ "Counter": "0,1,2,3",
"EventCode": "0x1c",
"EventName": "UNC_M2M_DIRECT2UPI_TXN_OVERRIDE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2M"
},
{
"BriefDescription": "Number of times a direct to UPI transaction was overridden.",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_M2M_DIRECT2UPI_TXN_OVERRIDE.CISGRESS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Hit : On NonDirty Line in A State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1d",
"EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Hit : On NonDirty Line in I State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1d",
"EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Hit : On NonDirty Line in L State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1d",
"EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_P",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Hit : On NonDirty Line in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1d",
"EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Hit : On Dirty Line in A State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1d",
"EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Hit : On Dirty Line in I State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1d",
"EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Hit : On Dirty Line in L State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1d",
"EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_P",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Hit : On Dirty Line in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1d",
"EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Multi-socket cacheline Directory lookups (any state found)",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M2M_DIRECTORY_LOOKUP.ANY",
"PerPkg": "1",
@@ -646,6 +794,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory lookups (cacheline found in A state)",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_A",
"PerPkg": "1",
@@ -655,6 +804,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory lookup (cacheline found in I state)",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_I",
"PerPkg": "1",
@@ -664,6 +814,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory lookup (cacheline found in S state)",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_S",
"PerPkg": "1",
@@ -673,86 +824,107 @@
},
{
"BriefDescription": "Directory Miss : On NonDirty Line in A State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Miss : On NonDirty Line in I State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Miss : On NonDirty Line in L State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_P",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Miss : On NonDirty Line in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Miss : On Dirty Line in A State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Miss : On Dirty Line in I State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Miss : On Dirty Line in L State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_P",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Miss : On Dirty Line in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Multi-socket cacheline Directory update from A to I",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.A2I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x320",
"Unit": "M2M"
},
{
"BriefDescription": "Multi-socket cacheline Directory update from A to S",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.A2S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x340",
"Unit": "M2M"
},
{
"BriefDescription": "Multi-socket cacheline Directory update from/to Any state",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.ANY",
"PerPkg": "1",
@@ -761,8 +933,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.A_TO_I_HIT_NON_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts 1lm or 2lm hit data returns that would result in directory update from A to I to non persistent memory (DRAM or HBM)",
"UMask": "0x120",
@@ -770,8 +944,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.A_TO_I_MISS_NON_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts 2lm miss data returns that would result in directory update from A to I to non persistent memory (DRAM or HBM)",
"UMask": "0x220",
@@ -779,8 +955,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.A_TO_S_HIT_NON_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts 1lm or 2lm hit data returns that would result in directory update from A to S to non persistent memory (DRAM or HBM)",
"UMask": "0x140",
@@ -788,8 +966,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.A_TO_S_MISS_NON_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts 2lm miss data returns that would result in directory update from A to S to non persistent memory (DRAM or HBM)",
"UMask": "0x240",
@@ -797,8 +977,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.HIT_NON_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts any 1lm or 2lm hit data return that would result in directory update to non persistent memory (DRAM or HBM)",
"UMask": "0x101",
@@ -806,24 +988,30 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory update from I to A",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.I2A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x304",
"Unit": "M2M"
},
{
"BriefDescription": "Multi-socket cacheline Directory update from I to S",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.I2S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x302",
"Unit": "M2M"
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.I_TO_A_HIT_NON_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts 1lm or 2lm hit data returns that would result in directory update from I to A to non persistent memory (DRAM or HBM)",
"UMask": "0x104",
@@ -831,8 +1019,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.I_TO_A_MISS_NON_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts 2lm miss data returns that would result in directory update from I to A to non persistent memory (DRAM or HBM)",
"UMask": "0x204",
@@ -840,8 +1030,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.I_TO_S_HIT_NON_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts 1lm or 2lm hit data returns that would result in directory update from I to S to non persistent memory (DRAM or HBM)",
"UMask": "0x102",
@@ -849,8 +1041,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.I_TO_S_MISS_NON_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts 2lm miss data returns that would result in directory update from I to S to non persistent memory (DRAM or HBM)",
"UMask": "0x202",
@@ -858,8 +1052,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.MISS_NON_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts any 2lm miss data return that would result in directory update to non persistent memory (DRAM or HBM)",
"UMask": "0x201",
@@ -867,24 +1063,30 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory update from S to A",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.S2A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x310",
"Unit": "M2M"
},
{
"BriefDescription": "Multi-socket cacheline Directory update from S to I",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.S2I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x308",
"Unit": "M2M"
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.S_TO_A_HIT_NON_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts 1lm or 2lm hit data returns that would result in directory update from S to A to non persistent memory (DRAM or HBM)",
"UMask": "0x110",
@@ -892,8 +1094,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.S_TO_A_MISS_NON_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts 2lm miss data returns that would result in directory update from S to A to non persistent memory (DRAM or HBM)",
"UMask": "0x210",
@@ -901,8 +1105,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.S_TO_I_HIT_NON_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts 1lm or 2lm hit data returns that would result in directory update from S to I to non persistent memory (DRAM or HBM)",
"UMask": "0x108",
@@ -910,8 +1116,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.S_TO_I_MISS_NON_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts 2lm miss data returns that would result in directory update from S to I to non persistent memory (DRAM or HBM)",
"UMask": "0x208",
@@ -919,8 +1127,10 @@
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "Counter": "0,1,2,3",
"EventCode": "0xba",
"EventName": "UNC_M2M_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress Blocking due to Ordering requirements : Down : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x80000004",
@@ -928,8 +1138,10 @@
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "Counter": "0,1,2,3",
"EventCode": "0xba",
"EventName": "UNC_M2M_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress Blocking due to Ordering requirements : Up : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x80000001",
@@ -937,40 +1149,50 @@
},
{
"BriefDescription": "Count when Starve Glocab counter is at 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M2M_IGR_STARVE_WINNER.MASK7",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M2M"
},
{
"BriefDescription": "Reads to iMC issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x304",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_READS.CH0.TO_NM1LM",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.CH0.TO_NM1LM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x108",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_READS.CH0.TO_NMCache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.CH0.TO_NMCache",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x110",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_READS.CH0_ALL",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.CH0_ALL",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -979,24 +1201,30 @@
},
{
"BriefDescription": "UNC_M2M_IMC_READS.CH0_FROM_TGR",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.CH0_FROM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x140",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_READS.CH0_ISOCH",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.CH0_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x102",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_READS.CH0_NORMAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.CH0_NORMAL",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -1005,24 +1233,30 @@
},
{
"BriefDescription": "UNC_M2M_IMC_READS.CH0_TO_DDR_AS_CACHE",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.CH0_TO_DDR_AS_CACHE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x110",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_READS.CH0_TO_DDR_AS_MEM",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.CH0_TO_DDR_AS_MEM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x108",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_READS.CH0_TO_PMM",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.CH0_TO_PMM",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -1031,24 +1265,30 @@
},
{
"BriefDescription": "UNC_M2M_IMC_READS.CH1.TO_NM1LM",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.CH1.TO_NM1LM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x208",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_READS.CH1.TO_NMCache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.CH1.TO_NMCache",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x210",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_READS.CH1_ALL",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.CH1_ALL",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -1057,24 +1297,30 @@
},
{
"BriefDescription": "UNC_M2M_IMC_READS.CH1_FROM_TGR",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.CH1_FROM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x240",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_READS.CH1_ISOCH",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.CH1_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x202",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_READS.CH1_NORMAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.CH1_NORMAL",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -1083,24 +1329,30 @@
},
{
"BriefDescription": "UNC_M2M_IMC_READS.CH1_TO_DDR_AS_CACHE",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.CH1_TO_DDR_AS_CACHE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x210",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_READS.CH1_TO_DDR_AS_MEM",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.CH1_TO_DDR_AS_MEM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x208",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_READS.CH1_TO_PMM",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.CH1_TO_PMM",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -1109,62 +1361,77 @@
},
{
"BriefDescription": "UNC_M2M_IMC_READS.FROM_TGR",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.FROM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x340",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_READS.ISOCH",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x302",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_READS.NORMAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.NORMAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x301",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_READS.TO_DDR_AS_CACHE",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.TO_DDR_AS_CACHE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x310",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_READS.TO_DDR_AS_MEM",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.TO_DDR_AS_MEM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x308",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_READS.TO_NM1LM",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.TO_NM1LM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x308",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_READS.TO_NMCACHE",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.TO_NMCACHE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x310",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_READS.TO_PMM",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_IMC_READS.TO_PMM",
"PerPkg": "1",
@@ -1173,23 +1440,29 @@
},
{
"BriefDescription": "All Writes - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1810",
"Unit": "M2M"
},
{
"BriefDescription": "Non-Inclusive - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH0.NI",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_WRITES.CH0_ALL",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH0_ALL",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -1198,15 +1471,19 @@
},
{
"BriefDescription": "From TGR - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH0_FROM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_WRITES.CH0_FULL",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH0_FULL",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -1215,30 +1492,38 @@
},
{
"BriefDescription": "UNC_M2M_IMC_WRITES.CH0_FULL_ISOCH",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH0_FULL_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x804",
"Unit": "M2M"
},
{
"BriefDescription": "Non-Inclusive - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH0_NI",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Non-Inclusive Miss - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH0_NI_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_WRITES.CH0_PARTIAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH0_PARTIAL",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -1247,32 +1532,40 @@
},
{
"BriefDescription": "UNC_M2M_IMC_WRITES.CH0_PARTIAL_ISOCH",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH0_PARTIAL_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x808",
"Unit": "M2M"
},
{
"BriefDescription": "DDR, acting as Cache - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH0_TO_DDR_AS_CACHE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x840",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_IMC_WRITES.CH0_TO_DDR_AS_MEM",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH0_TO_DDR_AS_MEM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x820",
"Unit": "M2M"
},
{
"BriefDescription": "PMM - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH0_TO_PMM",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -1282,15 +1575,19 @@
},
{
"BriefDescription": "Non-Inclusive - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH1.NI",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "All Writes - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH1_ALL",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -1299,15 +1596,19 @@
},
{
"BriefDescription": "From TGR - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH1_FROM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Full Line Non-ISOCH - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH1_FULL",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -1316,30 +1617,38 @@
},
{
"BriefDescription": "ISOCH Full Line - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH1_FULL_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1004",
"Unit": "M2M"
},
{
"BriefDescription": "Non-Inclusive - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH1_NI",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Non-Inclusive Miss - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH1_NI_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Partial Non-ISOCH - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH1_PARTIAL",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -1348,32 +1657,40 @@
},
{
"BriefDescription": "ISOCH Partial - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH1_PARTIAL_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1008",
"Unit": "M2M"
},
{
"BriefDescription": "DDR, acting as Cache - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH1_TO_DDR_AS_CACHE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1040",
"Unit": "M2M"
},
{
"BriefDescription": "DDR - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH1_TO_DDR_AS_MEM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1020",
"Unit": "M2M"
},
{
"BriefDescription": "PMM - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.CH1_TO_PMM",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -1383,75 +1700,94 @@
},
{
"BriefDescription": "From TGR - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.FROM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Full Non-ISOCH - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.FULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1801",
"Unit": "M2M"
},
{
"BriefDescription": "ISOCH Full Line - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.FULL_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1804",
"Unit": "M2M"
},
{
"BriefDescription": "Non-Inclusive - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.NI",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Non-Inclusive Miss - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.NI_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Partial Non-ISOCH - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.PARTIAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1802",
"Unit": "M2M"
},
{
"BriefDescription": "ISOCH Partial - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.PARTIAL_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1808",
"Unit": "M2M"
},
{
"BriefDescription": "DDR, acting as Cache - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.TO_DDR_AS_CACHE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1840",
"Unit": "M2M"
},
{
"BriefDescription": "DDR - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.TO_DDR_AS_MEM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1820",
"Unit": "M2M"
},
{
"BriefDescription": "PMM - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_IMC_WRITES.TO_PMM",
"PerPkg": "1",
@@ -1460,143 +1796,179 @@
},
{
"BriefDescription": "UNC_M2M_PREFCAM_CIS_DROPS",
+ "Counter": "0,1,2,3",
"EventCode": "0x5c",
"EventName": "UNC_M2M_PREFCAM_CIS_DROPS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH0_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH0_XPT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH1_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH1_XPT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped : UPI - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.UPI_ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.XPT_ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "M2M"
},
{
"BriefDescription": ": UPI - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.UPI_ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "M2M"
},
{
"BriefDescription": ": XPT - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.XPT_ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "M2M"
},
{
"BriefDescription": "Demands Not Merged with CAMed Prefetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x5E",
"EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.RD_MERGED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2M"
},
{
"BriefDescription": "Demands Not Merged with CAMed Prefetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x5E",
"EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.WR_MERGED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "Demands Not Merged with CAMed Prefetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x5E",
"EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.WR_SQUASHED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Inserts : UPI - Ch 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_M2M_PREFCAM_INSERTS.CH0_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Inserts : XPT - Ch 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_M2M_PREFCAM_INSERTS.CH0_XPT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Inserts : UPI - Ch 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_M2M_PREFCAM_INSERTS.CH1_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Inserts : XPT - Ch 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_M2M_PREFCAM_INSERTS.CH1_XPT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Inserts : UPI - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_M2M_PREFCAM_INSERTS.UPI_ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Inserts : XPT - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_M2M_PREFCAM_INSERTS.XPT_ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Prefetch CAM Inserts : XPT -All Channels",
"UMask": "0x5",
@@ -1604,108 +1976,135 @@
},
{
"BriefDescription": "Prefetch CAM Occupancy : All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_M2M_PREFCAM_OCCUPANCY.ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_M2M_PREFCAM_OCCUPANCY.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Occupancy : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_M2M_PREFCAM_OCCUPANCY.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x5F",
"EventName": "UNC_M2M_PREFCAM_RESP_MISS.ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2M"
},
{
"BriefDescription": ": Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x5f",
"EventName": "UNC_M2M_PREFCAM_RESP_MISS.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": ": Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x5f",
"EventName": "UNC_M2M_PREFCAM_RESP_MISS.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_PREFCAM_RxC_DEALLOCS.1LM_POSTED",
+ "Counter": "0,1,2,3",
"EventCode": "0x62",
"EventName": "UNC_M2M_PREFCAM_RxC_DEALLOCS.1LM_POSTED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_PREFCAM_RxC_DEALLOCS.CIS",
+ "Counter": "0,1,2,3",
"EventCode": "0x62",
"EventName": "UNC_M2M_PREFCAM_RxC_DEALLOCS.CIS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_PREFCAM_RxC_DEALLOCS.PMM_MEMMODE_ACCEPT",
+ "Counter": "0,1,2,3",
"EventCode": "0x62",
"EventName": "UNC_M2M_PREFCAM_RxC_DEALLOCS.PMM_MEMMODE_ACCEPT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_PREFCAM_RxC_DEALLOCS.SQUASHED",
+ "Counter": "0,1,2,3",
"EventCode": "0x62",
"EventName": "UNC_M2M_PREFCAM_RxC_DEALLOCS.SQUASHED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "AD Ingress (from CMS) Occupancy - Prefetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_M2M_PREFCAM_RxC_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "AD Ingress (from CMS) : AD Ingress (from CMS) Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_M2M_RxC_AD_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "AD Ingress (from CMS) Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M2M_RxC_AD_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Clean NearMem Read Hit",
+ "Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_M2M_TAG_HIT.NM_RD_HIT_CLEAN",
"PerPkg": "1",
@@ -1715,6 +2114,7 @@
},
{
"BriefDescription": "Dirty NearMem Read Hit",
+ "Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_M2M_TAG_HIT.NM_RD_HIT_DIRTY",
"PerPkg": "1",
@@ -1724,8 +2124,10 @@
},
{
"BriefDescription": "Tag Hit : Clean NearMem Underfill Hit",
+ "Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_M2M_TAG_HIT.NM_UFILL_HIT_CLEAN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Tag Hit indicates when a request sent to the iMC hit in Near Memory. : Counts clean underfill hits due to a partial write",
"UMask": "0x4",
@@ -1733,8 +2135,10 @@
},
{
"BriefDescription": "Tag Hit : Dirty NearMem Underfill Hit",
+ "Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_M2M_TAG_HIT.NM_UFILL_HIT_DIRTY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Tag Hit indicates when a request sent to the iMC hit in Near Memory. : Counts dirty underfill read hits due to a partial write",
"UMask": "0x8",
@@ -1742,230 +2146,288 @@
},
{
"BriefDescription": "UNC_M2M_TAG_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4b",
"EventName": "UNC_M2M_TAG_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2M"
},
{
"BriefDescription": "Number AD Ingress Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "UNC_M2M_TGR_AD_CREDITS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Number BL Ingress Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2f",
"EventName": "UNC_M2M_TGR_BL_CREDITS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Inserts : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_M2M_TRACKER_INSERTS.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x104",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Inserts : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_M2M_TRACKER_INSERTS.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x204",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Occupancy : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "WPQ Flush : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M2M_WPQ_FLUSH.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "WPQ Flush : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M2M_WPQ_FLUSH.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_WPQ_NO_REG_CRD.CHN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_WPQ_NO_REG_CRD.CHN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Special : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_WPQ_NO_SPEC_CRD.CHN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Special : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_WPQ_NO_SPEC_CRD.CHN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Inserts : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2M_WR_TRACKER_INSERTS.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Inserts : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2M_WR_TRACKER_INSERTS.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Cycles Not Empty : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_M2M_WR_TRACKER_NE.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Cycles Not Empty : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_M2M_WR_TRACKER_NE.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Cycles Not Empty : Mirror",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_M2M_WR_TRACKER_NE.MIRR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_M2M_WR_TRACKER_NE.MIRR_NONTGR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_M2M_WR_TRACKER_NE.MIRR_PWR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Non-Posted Inserts : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x4d",
"EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_INSERTS.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Non-Posted Inserts : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x4d",
"EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_INSERTS.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Non-Posted Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_OCCUPANCY.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Non-Posted Occupancy : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_OCCUPANCY.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Posted Inserts : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "UNC_M2M_WR_TRACKER_POSTED_INSERTS.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Posted Inserts : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "UNC_M2M_WR_TRACKER_POSTED_INSERTS.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Posted Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M2M_WR_TRACKER_POSTED_OCCUPANCY.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Posted Occupancy : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M2M_WR_TRACKER_POSTED_OCCUPANCY.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "CBox AD Credits Empty : Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CBox AD Credits Empty : Requests : No credits available to send to Cbox on the AD Ring (covers higher CBoxes)",
"UMask": "0x4",
@@ -1973,8 +2435,10 @@
},
{
"BriefDescription": "CBox AD Credits Empty : Snoops",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CBox AD Credits Empty : Snoops : No credits available to send to Cbox on the AD Ring (covers higher CBoxes)",
"UMask": "0x8",
@@ -1982,8 +2446,10 @@
},
{
"BriefDescription": "CBox AD Credits Empty : VNA Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.VNA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CBox AD Credits Empty : VNA Messages : No credits available to send to Cbox on the AD Ring (covers higher CBoxes)",
"UMask": "0x1",
@@ -1991,8 +2457,10 @@
},
{
"BriefDescription": "CBox AD Credits Empty : Writebacks",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CBox AD Credits Empty : Writebacks : No credits available to send to Cbox on the AD Ring (covers higher CBoxes)",
"UMask": "0x2",
@@ -2000,6 +2468,7 @@
},
{
"BriefDescription": "M3UPI Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_M3UPI_CLOCKTICKS",
"PerPkg": "1",
@@ -2008,31 +2477,39 @@
},
{
"BriefDescription": "M3UPI CMS Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0xc0",
"EventName": "UNC_M3UPI_CMS_CLOCKTICKS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M3UPI"
},
{
"BriefDescription": "D2C Sent",
+ "Counter": "0,1,2,3",
"EventCode": "0x2b",
"EventName": "UNC_M3UPI_D2C_SENT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "D2C Sent : Count cases BL sends direct to core",
"Unit": "M3UPI"
},
{
"BriefDescription": "D2U Sent",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_M3UPI_D2U_SENT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "D2U Sent : Cases where SMI3 sends D2U command",
"Unit": "M3UPI"
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "Counter": "0,1,2,3",
"EventCode": "0xba",
"EventName": "UNC_M3UPI_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress Blocking due to Ordering requirements : Down : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x4",
@@ -2040,8 +2517,10 @@
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "Counter": "0,1,2,3",
"EventCode": "0xba",
"EventName": "UNC_M3UPI_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress Blocking due to Ordering requirements : Up : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x1",
@@ -2049,8 +2528,10 @@
},
{
"BriefDescription": "M2 BL Credits Empty : IIO0 and IIO1 share the same ring destination. (1 VN0 credit only)",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2 BL Credits Empty : IIO0 and IIO1 share the same ring destination. (1 VN0 credit only) : No vn0 and vna credits available to send to M2",
"UMask": "0x1",
@@ -2058,8 +2539,10 @@
},
{
"BriefDescription": "M2 BL Credits Empty : IIO2",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO2_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2 BL Credits Empty : IIO2 : No vn0 and vna credits available to send to M2",
"UMask": "0x2",
@@ -2067,8 +2550,10 @@
},
{
"BriefDescription": "M2 BL Credits Empty : IIO3",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO3_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2 BL Credits Empty : IIO3 : No vn0 and vna credits available to send to M2",
"UMask": "0x4",
@@ -2076,8 +2561,10 @@
},
{
"BriefDescription": "M2 BL Credits Empty : IIO4",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO4_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2 BL Credits Empty : IIO4 : No vn0 and vna credits available to send to M2",
"UMask": "0x8",
@@ -2085,8 +2572,10 @@
},
{
"BriefDescription": "M2 BL Credits Empty : IIO5",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO5_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2 BL Credits Empty : IIO5 : No vn0 and vna credits available to send to M2",
"UMask": "0x10",
@@ -2094,8 +2583,10 @@
},
{
"BriefDescription": "M2 BL Credits Empty : All IIO targets for NCS are in single mask. ORs them together",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2 BL Credits Empty : All IIO targets for NCS are in single mask. ORs them together : No vn0 and vna credits available to send to M2",
"UMask": "0x40",
@@ -2103,8 +2594,10 @@
},
{
"BriefDescription": "M2 BL Credits Empty : Selected M2p BL NCS credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.NCS_SEL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2 BL Credits Empty : Selected M2p BL NCS credits : No vn0 and vna credits available to send to M2",
"UMask": "0x80",
@@ -2112,8 +2605,10 @@
},
{
"BriefDescription": "M2 BL Credits Empty : IIO5",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.UBOX_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2 BL Credits Empty : IIO5 : No vn0 and vna credits available to send to M2",
"UMask": "0x20",
@@ -2121,8 +2616,10 @@
},
{
"BriefDescription": "Multi Slot Flit Received : AD - Slot 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x3e",
"EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AD_SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Multi Slot Flit Received : AD - Slot 0 : Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
"UMask": "0x1",
@@ -2130,8 +2627,10 @@
},
{
"BriefDescription": "Multi Slot Flit Received : AD - Slot 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x3e",
"EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AD_SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Multi Slot Flit Received : AD - Slot 1 : Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
"UMask": "0x2",
@@ -2139,8 +2638,10 @@
},
{
"BriefDescription": "Multi Slot Flit Received : AD - Slot 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x3e",
"EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AD_SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Multi Slot Flit Received : AD - Slot 2 : Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
"UMask": "0x4",
@@ -2148,8 +2649,10 @@
},
{
"BriefDescription": "Multi Slot Flit Received : AK - Slot 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x3e",
"EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AK_SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Multi Slot Flit Received : AK - Slot 0 : Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
"UMask": "0x10",
@@ -2157,8 +2660,10 @@
},
{
"BriefDescription": "Multi Slot Flit Received : AK - Slot 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x3e",
"EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AK_SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Multi Slot Flit Received : AK - Slot 2 : Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
"UMask": "0x20",
@@ -2166,8 +2671,10 @@
},
{
"BriefDescription": "Multi Slot Flit Received : BL - Slot 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x3e",
"EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.BL_SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Multi Slot Flit Received : BL - Slot 0 : Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
"UMask": "0x8",
@@ -2175,8 +2682,10 @@
},
{
"BriefDescription": "Lost Arb for VN0 : REQ on AD",
+ "Counter": "0",
"EventCode": "0x4b",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN0 : REQ on AD : VN0 message requested but lost arbitration : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -2184,8 +2693,10 @@
},
{
"BriefDescription": "Lost Arb for VN0 : RSP on AD",
+ "Counter": "0",
"EventCode": "0x4b",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN0 : RSP on AD : VN0 message requested but lost arbitration : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -2193,8 +2704,10 @@
},
{
"BriefDescription": "Lost Arb for VN0 : SNP on AD",
+ "Counter": "0",
"EventCode": "0x4b",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN0 : SNP on AD : VN0 message requested but lost arbitration : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -2202,8 +2715,10 @@
},
{
"BriefDescription": "Lost Arb for VN0 : NCB on BL",
+ "Counter": "0",
"EventCode": "0x4b",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN0 : NCB on BL : VN0 message requested but lost arbitration : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -2211,8 +2726,10 @@
},
{
"BriefDescription": "Lost Arb for VN0 : NCS on BL",
+ "Counter": "0",
"EventCode": "0x4b",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN0 : NCS on BL : VN0 message requested but lost arbitration : Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -2220,8 +2737,10 @@
},
{
"BriefDescription": "Lost Arb for VN0 : RSP on BL",
+ "Counter": "0",
"EventCode": "0x4b",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN0 : RSP on BL : VN0 message requested but lost arbitration : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -2229,8 +2748,10 @@
},
{
"BriefDescription": "Lost Arb for VN0 : WB on BL",
+ "Counter": "0",
"EventCode": "0x4b",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN0 : WB on BL : VN0 message requested but lost arbitration : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -2238,8 +2759,10 @@
},
{
"BriefDescription": "Lost Arb for VN1 : REQ on AD",
+ "Counter": "0",
"EventCode": "0x4c",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN1 : REQ on AD : VN1 message requested but lost arbitration : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -2247,8 +2770,10 @@
},
{
"BriefDescription": "Lost Arb for VN1 : RSP on AD",
+ "Counter": "0",
"EventCode": "0x4c",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN1 : RSP on AD : VN1 message requested but lost arbitration : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -2256,8 +2781,10 @@
},
{
"BriefDescription": "Lost Arb for VN1 : SNP on AD",
+ "Counter": "0",
"EventCode": "0x4c",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN1 : SNP on AD : VN1 message requested but lost arbitration : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -2265,8 +2792,10 @@
},
{
"BriefDescription": "Lost Arb for VN1 : NCB on BL",
+ "Counter": "0",
"EventCode": "0x4c",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN1 : NCB on BL : VN1 message requested but lost arbitration : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -2274,8 +2803,10 @@
},
{
"BriefDescription": "Lost Arb for VN1 : NCS on BL",
+ "Counter": "0",
"EventCode": "0x4c",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN1 : NCS on BL : VN1 message requested but lost arbitration : Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -2283,8 +2814,10 @@
},
{
"BriefDescription": "Lost Arb for VN1 : RSP on BL",
+ "Counter": "0",
"EventCode": "0x4c",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN1 : RSP on BL : VN1 message requested but lost arbitration : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -2292,8 +2825,10 @@
},
{
"BriefDescription": "Lost Arb for VN1 : WB on BL",
+ "Counter": "0",
"EventCode": "0x4c",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lost Arb for VN1 : WB on BL : VN1 message requested but lost arbitration : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -2301,8 +2836,10 @@
},
{
"BriefDescription": "Arb Miscellaneous : AD, BL Parallel Win VN0",
+ "Counter": "0",
"EventCode": "0x4d",
"EventName": "UNC_M3UPI_RxC_ARB_MISC.ADBL_PARALLEL_WIN_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Arb Miscellaneous : AD, BL Parallel Win VN0 : AD and BL messages won arbitration concurrently / in parallel",
"UMask": "0x10",
@@ -2310,8 +2847,10 @@
},
{
"BriefDescription": "Arb Miscellaneous : AD, BL Parallel Win VN1",
+ "Counter": "0",
"EventCode": "0x4d",
"EventName": "UNC_M3UPI_RxC_ARB_MISC.ADBL_PARALLEL_WIN_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Arb Miscellaneous : AD, BL Parallel Win VN1 : AD and BL messages won arbitration concurrently / in parallel",
"UMask": "0x20",
@@ -2319,8 +2858,10 @@
},
{
"BriefDescription": "Arb Miscellaneous : Max Parallel Win",
+ "Counter": "0",
"EventCode": "0x4d",
"EventName": "UNC_M3UPI_RxC_ARB_MISC.ALL_PARALLEL_WIN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Arb Miscellaneous : Max Parallel Win : VN0 and VN1 arbitration sub-pipelines both produced AD and BL winners (maximum possible parallel winners)",
"UMask": "0x80",
@@ -2328,8 +2869,10 @@
},
{
"BriefDescription": "Arb Miscellaneous : No Progress on Pending AD VN0",
+ "Counter": "0",
"EventCode": "0x4d",
"EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_AD_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Arb Miscellaneous : No Progress on Pending AD VN0 : Arbitration stage made no progress on pending ad vn0 messages because slotting stage cannot accept new message",
"UMask": "0x1",
@@ -2337,8 +2880,10 @@
},
{
"BriefDescription": "Arb Miscellaneous : No Progress on Pending AD VN1",
+ "Counter": "0",
"EventCode": "0x4d",
"EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_AD_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Arb Miscellaneous : No Progress on Pending AD VN1 : Arbitration stage made no progress on pending ad vn1 messages because slotting stage cannot accept new message",
"UMask": "0x2",
@@ -2346,8 +2891,10 @@
},
{
"BriefDescription": "Arb Miscellaneous : No Progress on Pending BL VN0",
+ "Counter": "0",
"EventCode": "0x4d",
"EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_BL_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Arb Miscellaneous : No Progress on Pending BL VN0 : Arbitration stage made no progress on pending bl vn0 messages because slotting stage cannot accept new message",
"UMask": "0x4",
@@ -2355,8 +2902,10 @@
},
{
"BriefDescription": "Arb Miscellaneous : No Progress on Pending BL VN1",
+ "Counter": "0",
"EventCode": "0x4d",
"EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_BL_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Arb Miscellaneous : No Progress on Pending BL VN1 : Arbitration stage made no progress on pending bl vn1 messages because slotting stage cannot accept new message",
"UMask": "0x8",
@@ -2364,8 +2913,10 @@
},
{
"BriefDescription": "Arb Miscellaneous : VN0, VN1 Parallel Win",
+ "Counter": "0",
"EventCode": "0x4d",
"EventName": "UNC_M3UPI_RxC_ARB_MISC.VN01_PARALLEL_WIN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Arb Miscellaneous : VN0, VN1 Parallel Win : VN0 and VN1 arbitration sub-pipelines had parallel winners (at least one AD or BL on each side)",
"UMask": "0x40",
@@ -2373,8 +2924,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN0 : REQ on AD",
+ "Counter": "0",
"EventCode": "0x47",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN0 : REQ on AD : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -2382,8 +2935,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN0 : RSP on AD",
+ "Counter": "0",
"EventCode": "0x47",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN0 : RSP on AD : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -2391,8 +2946,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN0 : SNP on AD",
+ "Counter": "0",
"EventCode": "0x47",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN0 : SNP on AD : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -2400,8 +2957,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN0 : NCB on BL",
+ "Counter": "0",
"EventCode": "0x47",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN0 : NCB on BL : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -2409,8 +2968,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN0 : NCS on BL",
+ "Counter": "0",
"EventCode": "0x47",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN0 : NCS on BL : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -2418,8 +2979,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN0 : RSP on BL",
+ "Counter": "0",
"EventCode": "0x47",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN0 : RSP on BL : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -2427,8 +2990,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN0 : WB on BL",
+ "Counter": "0",
"EventCode": "0x47",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN0 : WB on BL : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -2436,8 +3001,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN1 : REQ on AD",
+ "Counter": "0",
"EventCode": "0x48",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN1 : REQ on AD : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -2445,8 +3012,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN1 : RSP on AD",
+ "Counter": "0",
"EventCode": "0x48",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN1 : RSP on AD : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -2454,8 +3023,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN1 : SNP on AD",
+ "Counter": "0",
"EventCode": "0x48",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN1 : SNP on AD : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -2463,8 +3034,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN1 : NCB on BL",
+ "Counter": "0",
"EventCode": "0x48",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN1 : NCB on BL : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -2472,8 +3045,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN1 : NCS on BL",
+ "Counter": "0",
"EventCode": "0x48",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN1 : NCS on BL : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -2481,8 +3056,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN1 : RSP on BL",
+ "Counter": "0",
"EventCode": "0x48",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN1 : RSP on BL : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -2490,8 +3067,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN1 : WB on BL",
+ "Counter": "0",
"EventCode": "0x48",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No Credits to Arb for VN1 : WB on BL : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -2499,8 +3078,10 @@
},
{
"BriefDescription": "Can't Arb for VN0 : REQ on AD",
+ "Counter": "0",
"EventCode": "0x49",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN0 : REQ on AD : VN0 message was not able to request arbitration while some other message won arbitration : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -2508,8 +3089,10 @@
},
{
"BriefDescription": "Can't Arb for VN0 : RSP on AD",
+ "Counter": "0",
"EventCode": "0x49",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN0 : RSP on AD : VN0 message was not able to request arbitration while some other message won arbitration : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -2517,8 +3100,10 @@
},
{
"BriefDescription": "Can't Arb for VN0 : SNP on AD",
+ "Counter": "0",
"EventCode": "0x49",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN0 : SNP on AD : VN0 message was not able to request arbitration while some other message won arbitration : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -2526,8 +3111,10 @@
},
{
"BriefDescription": "Can't Arb for VN0 : NCB on BL",
+ "Counter": "0",
"EventCode": "0x49",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN0 : NCB on BL : VN0 message was not able to request arbitration while some other message won arbitration : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -2535,8 +3122,10 @@
},
{
"BriefDescription": "Can't Arb for VN0 : NCS on BL",
+ "Counter": "0",
"EventCode": "0x49",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN0 : NCS on BL : VN0 message was not able to request arbitration while some other message won arbitration : Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -2544,8 +3133,10 @@
},
{
"BriefDescription": "Can't Arb for VN0 : RSP on BL",
+ "Counter": "0",
"EventCode": "0x49",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN0 : RSP on BL : VN0 message was not able to request arbitration while some other message won arbitration : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -2553,8 +3144,10 @@
},
{
"BriefDescription": "Can't Arb for VN0 : WB on BL",
+ "Counter": "0",
"EventCode": "0x49",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN0 : WB on BL : VN0 message was not able to request arbitration while some other message won arbitration : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -2562,8 +3155,10 @@
},
{
"BriefDescription": "Can't Arb for VN1 : REQ on AD",
+ "Counter": "0",
"EventCode": "0x4a",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN1 : REQ on AD : VN1 message was not able to request arbitration while some other message won arbitration : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -2571,8 +3166,10 @@
},
{
"BriefDescription": "Can't Arb for VN1 : RSP on AD",
+ "Counter": "0",
"EventCode": "0x4a",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN1 : RSP on AD : VN1 message was not able to request arbitration while some other message won arbitration : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -2580,8 +3177,10 @@
},
{
"BriefDescription": "Can't Arb for VN1 : SNP on AD",
+ "Counter": "0",
"EventCode": "0x4a",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN1 : SNP on AD : VN1 message was not able to request arbitration while some other message won arbitration : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -2589,8 +3188,10 @@
},
{
"BriefDescription": "Can't Arb for VN1 : NCB on BL",
+ "Counter": "0",
"EventCode": "0x4a",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN1 : NCB on BL : VN1 message was not able to request arbitration while some other message won arbitration : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -2598,8 +3199,10 @@
},
{
"BriefDescription": "Can't Arb for VN1 : NCS on BL",
+ "Counter": "0",
"EventCode": "0x4a",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN1 : NCS on BL : VN1 message was not able to request arbitration while some other message won arbitration : Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -2607,8 +3210,10 @@
},
{
"BriefDescription": "Can't Arb for VN1 : RSP on BL",
+ "Counter": "0",
"EventCode": "0x4a",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN1 : RSP on BL : VN1 message was not able to request arbitration while some other message won arbitration : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -2616,8 +3221,10 @@
},
{
"BriefDescription": "Can't Arb for VN1 : WB on BL",
+ "Counter": "0",
"EventCode": "0x4a",
"EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Can't Arb for VN1 : WB on BL : VN1 message was not able to request arbitration while some other message won arbitration : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -2625,8 +3232,10 @@
},
{
"BriefDescription": "Ingress Queue Bypasses : AD to Slot 0 on BL Arb",
+ "Counter": "0,1,2",
"EventCode": "0x40",
"EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S0_BL_ARB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress Queue Bypasses : AD to Slot 0 on BL Arb : Number of times message is bypassed around the Ingress Queue : AD is taking bypass to slot 0 of independent flit while bl message is in arbitration",
"UMask": "0x2",
@@ -2634,8 +3243,10 @@
},
{
"BriefDescription": "Ingress Queue Bypasses : AD to Slot 0 on Idle",
+ "Counter": "0,1,2",
"EventCode": "0x40",
"EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S0_IDLE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress Queue Bypasses : AD to Slot 0 on Idle : Number of times message is bypassed around the Ingress Queue : AD is taking bypass to slot 0 of independent flit while pipeline is idle",
"UMask": "0x1",
@@ -2643,8 +3254,10 @@
},
{
"BriefDescription": "Ingress Queue Bypasses : AD + BL to Slot 1",
+ "Counter": "0,1,2",
"EventCode": "0x40",
"EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S1_BL_SLOT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress Queue Bypasses : AD + BL to Slot 1 : Number of times message is bypassed around the Ingress Queue : AD is taking bypass to flit slot 1 while merging with bl message in same flit",
"UMask": "0x4",
@@ -2652,8 +3265,10 @@
},
{
"BriefDescription": "Ingress Queue Bypasses : AD + BL to Slot 2",
+ "Counter": "0,1,2",
"EventCode": "0x40",
"EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S2_BL_SLOT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress Queue Bypasses : AD + BL to Slot 2 : Number of times message is bypassed around the Ingress Queue : AD is taking bypass to flit slot 2 while merging with bl message in same flit",
"UMask": "0x8",
@@ -2661,8 +3276,10 @@
},
{
"BriefDescription": "Miscellaneous Credit Events : Any In BGF FIFO",
+ "Counter": "0",
"EventCode": "0x5f",
"EventName": "UNC_M3UPI_RxC_CRD_MISC.ANY_BGF_FIFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Miscellaneous Credit Events : Any In BGF FIFO : Indication that at least one packet (flit) is in the bgf (fifo only)",
"UMask": "0x1",
@@ -2670,8 +3287,10 @@
},
{
"BriefDescription": "Miscellaneous Credit Events : Any in BGF Path",
+ "Counter": "0",
"EventCode": "0x5f",
"EventName": "UNC_M3UPI_RxC_CRD_MISC.ANY_BGF_PATH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Miscellaneous Credit Events : Any in BGF Path : Indication that at least one packet (flit) is in the bgf path (i.e. pipe to fifo)",
"UMask": "0x2",
@@ -2679,8 +3298,10 @@
},
{
"BriefDescription": "Miscellaneous Credit Events",
+ "Counter": "0",
"EventCode": "0x5f",
"EventName": "UNC_M3UPI_RxC_CRD_MISC.LT1_FOR_D2K",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Miscellaneous Credit Events : d2k credit count is less than 1",
"UMask": "0x10",
@@ -2688,8 +3309,10 @@
},
{
"BriefDescription": "Miscellaneous Credit Events",
+ "Counter": "0",
"EventCode": "0x5f",
"EventName": "UNC_M3UPI_RxC_CRD_MISC.LT2_FOR_D2K",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Miscellaneous Credit Events : d2k credit count is less than 2",
"UMask": "0x20",
@@ -2697,8 +3320,10 @@
},
{
"BriefDescription": "Miscellaneous Credit Events : No D2K For Arb",
+ "Counter": "0",
"EventCode": "0x5f",
"EventName": "UNC_M3UPI_RxC_CRD_MISC.VN0_NO_D2K_FOR_ARB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Miscellaneous Credit Events : No D2K For Arb : VN0 BL RSP message was blocked from arbitration request due to lack of D2K CMP credit",
"UMask": "0x4",
@@ -2706,8 +3331,10 @@
},
{
"BriefDescription": "Miscellaneous Credit Events",
+ "Counter": "0",
"EventCode": "0x5f",
"EventName": "UNC_M3UPI_RxC_CRD_MISC.VN1_NO_D2K_FOR_ARB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Miscellaneous Credit Events : VN1 BL RSP message was blocked from arbitration request due to lack of D2K CMP credits",
"UMask": "0x8",
@@ -2715,8 +3342,10 @@
},
{
"BriefDescription": "Credit Occupancy : Credits Consumed",
+ "Counter": "0",
"EventCode": "0x60",
"EventName": "UNC_M3UPI_RxC_CRD_OCC.CONSUMED",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Credit Occupancy : Credits Consumed : number of remote vna credits consumed per cycle",
"UMask": "0x80",
@@ -2724,8 +3353,10 @@
},
{
"BriefDescription": "Credit Occupancy : D2K Credits",
+ "Counter": "0",
"EventCode": "0x60",
"EventName": "UNC_M3UPI_RxC_CRD_OCC.D2K_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Credit Occupancy : D2K Credits : D2K completion fifo credit occupancy (credits in use), accumulated across all cycles",
"UMask": "0x10",
@@ -2733,8 +3364,10 @@
},
{
"BriefDescription": "Credit Occupancy : Packets in BGF FIFO",
+ "Counter": "0",
"EventCode": "0x60",
"EventName": "UNC_M3UPI_RxC_CRD_OCC.FLITS_IN_FIFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Credit Occupancy : Packets in BGF FIFO : Occupancy of m3upi ingress -> upi link layer bgf; packets (flits) in fifo",
"UMask": "0x2",
@@ -2742,8 +3375,10 @@
},
{
"BriefDescription": "Credit Occupancy : Packets in BGF Path",
+ "Counter": "0",
"EventCode": "0x60",
"EventName": "UNC_M3UPI_RxC_CRD_OCC.FLITS_IN_PATH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Credit Occupancy : Packets in BGF Path : Occupancy of m3upi ingress -> upi link layer bgf; packets (flits) in path (i.e. pipe to fifo or fifo)",
"UMask": "0x4",
@@ -2751,8 +3386,10 @@
},
{
"BriefDescription": "Credit Occupancy",
+ "Counter": "0",
"EventCode": "0x60",
"EventName": "UNC_M3UPI_RxC_CRD_OCC.P1P_FIFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Credit Occupancy : count of bl messages in pump-1-pending state, in completion fifo only",
"UMask": "0x40",
@@ -2760,8 +3397,10 @@
},
{
"BriefDescription": "Credit Occupancy",
+ "Counter": "0",
"EventCode": "0x60",
"EventName": "UNC_M3UPI_RxC_CRD_OCC.P1P_TOTAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Credit Occupancy : count of bl messages in pump-1-pending state, in marker table and in fifo",
"UMask": "0x20",
@@ -2769,8 +3408,10 @@
},
{
"BriefDescription": "Credit Occupancy : Transmit Credits",
+ "Counter": "0",
"EventCode": "0x60",
"EventName": "UNC_M3UPI_RxC_CRD_OCC.TxQ_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Credit Occupancy : Transmit Credits : Link layer transmit queue credit occupancy (credits in use), accumulated across all cycles",
"UMask": "0x8",
@@ -2778,8 +3419,10 @@
},
{
"BriefDescription": "Credit Occupancy : VNA In Use",
+ "Counter": "0",
"EventCode": "0x60",
"EventName": "UNC_M3UPI_RxC_CRD_OCC.VNA_IN_USE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Credit Occupancy : VNA In Use : Remote UPI VNA credit occupancy (number of credits in use), accumulated across all cycles",
"UMask": "0x1",
@@ -2787,8 +3430,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : REQ on AD",
+ "Counter": "0",
"EventCode": "0x43",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : REQ on AD : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -2796,8 +3441,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : RSP on AD",
+ "Counter": "0",
"EventCode": "0x43",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : RSP on AD : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -2805,8 +3452,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : SNP on AD",
+ "Counter": "0",
"EventCode": "0x43",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : SNP on AD : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -2814,8 +3463,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : NCB on BL",
+ "Counter": "0",
"EventCode": "0x43",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : NCB on BL : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -2823,8 +3474,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : NCS on BL",
+ "Counter": "0",
"EventCode": "0x43",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : NCS on BL : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -2832,8 +3485,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : RSP on BL",
+ "Counter": "0",
"EventCode": "0x43",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : RSP on BL : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -2841,8 +3496,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : WB on BL",
+ "Counter": "0",
"EventCode": "0x43",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : WB on BL : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -2850,8 +3507,10 @@
},
{
"BriefDescription": "Data Flit Not Sent : All",
+ "Counter": "0",
"EventCode": "0x55",
"EventName": "UNC_M3UPI_RxC_DATA_FLITS_NOT_SENT.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Data Flit Not Sent : All : Data flit is ready for transmission but could not be sent : data flit is ready for transmission but could not be sent for any reason, e.g. low credits, low tsv, stall injection",
"UMask": "0x1",
@@ -2859,8 +3518,10 @@
},
{
"BriefDescription": "Data Flit Not Sent : No BGF Credits",
+ "Counter": "0",
"EventCode": "0x55",
"EventName": "UNC_M3UPI_RxC_DATA_FLITS_NOT_SENT.NO_BGF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Data Flit Not Sent : No BGF Credits : Data flit is ready for transmission but could not be sent",
"UMask": "0x8",
@@ -2868,8 +3529,10 @@
},
{
"BriefDescription": "Data Flit Not Sent : No TxQ Credits",
+ "Counter": "0",
"EventCode": "0x55",
"EventName": "UNC_M3UPI_RxC_DATA_FLITS_NOT_SENT.NO_TXQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Data Flit Not Sent : No TxQ Credits : Data flit is ready for transmission but could not be sent",
"UMask": "0x10",
@@ -2877,8 +3540,10 @@
},
{
"BriefDescription": "Data Flit Not Sent : TSV High",
+ "Counter": "0",
"EventCode": "0x55",
"EventName": "UNC_M3UPI_RxC_DATA_FLITS_NOT_SENT.TSV_HI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Data Flit Not Sent : TSV High : Data flit is ready for transmission but could not be sent : data flit is ready for transmission but was not sent while tsv high",
"UMask": "0x2",
@@ -2886,8 +3551,10 @@
},
{
"BriefDescription": "Data Flit Not Sent : Cycle valid for Flit",
+ "Counter": "0",
"EventCode": "0x55",
"EventName": "UNC_M3UPI_RxC_DATA_FLITS_NOT_SENT.VALID_FOR_FLIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Data Flit Not Sent : Cycle valid for Flit : Data flit is ready for transmission but could not be sent : data flit is ready for transmission but was not sent while cycle is valid for flit transmission",
"UMask": "0x4",
@@ -2895,8 +3562,10 @@
},
{
"BriefDescription": "Generating BL Data Flit Sequence : Wait on Pump 0",
+ "Counter": "0",
"EventCode": "0x57",
"EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P0_WAIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Generating BL Data Flit Sequence : Wait on Pump 0 : generating bl data flit sequence; waiting for data pump 0",
"UMask": "0x1",
@@ -2904,8 +3573,10 @@
},
{
"BriefDescription": "Generating BL Data Flit Sequence",
+ "Counter": "0",
"EventCode": "0x57",
"EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_AT_LIMIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Generating BL Data Flit Sequence : pump-1-pending logic is at capacity (pending table plus completion fifo at limit)",
"UMask": "0x10",
@@ -2913,8 +3584,10 @@
},
{
"BriefDescription": "Generating BL Data Flit Sequence",
+ "Counter": "0",
"EventCode": "0x57",
"EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_BUSY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Generating BL Data Flit Sequence : pump-1-pending logic is tracking at least one message",
"UMask": "0x8",
@@ -2922,8 +3595,10 @@
},
{
"BriefDescription": "Generating BL Data Flit Sequence",
+ "Counter": "0",
"EventCode": "0x57",
"EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_FIFO_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Generating BL Data Flit Sequence : pump-1-pending completion fifo is full",
"UMask": "0x40",
@@ -2931,8 +3606,10 @@
},
{
"BriefDescription": "Generating BL Data Flit Sequence",
+ "Counter": "0",
"EventCode": "0x57",
"EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_HOLD_P0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Generating BL Data Flit Sequence : pump-1-pending logic is at or near capacity, such that pump-0-only bl messages are getting stalled in slotting stage",
"UMask": "0x20",
@@ -2940,8 +3617,10 @@
},
{
"BriefDescription": "Generating BL Data Flit Sequence",
+ "Counter": "0",
"EventCode": "0x57",
"EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_TO_LIMBO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Generating BL Data Flit Sequence : a bl message finished but is in limbo and moved to pump-1-pending logic",
"UMask": "0x4",
@@ -2949,8 +3628,10 @@
},
{
"BriefDescription": "Generating BL Data Flit Sequence : Wait on Pump 1",
+ "Counter": "0",
"EventCode": "0x57",
"EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1_WAIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Generating BL Data Flit Sequence : Wait on Pump 1 : generating bl data flit sequence; waiting for data pump 1",
"UMask": "0x2",
@@ -2958,8 +3639,10 @@
},
{
"BriefDescription": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_IN_HOLDOFF",
+ "Counter": "0",
"EventCode": "0x58",
"EventName": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_IN_HOLDOFF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": slot 2 request naturally serviced during hold-off period",
"UMask": "0x4",
@@ -2967,8 +3650,10 @@
},
{
"BriefDescription": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_IN_SERVICE",
+ "Counter": "0",
"EventCode": "0x58",
"EventName": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_IN_SERVICE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": slot 2 request forcibly serviced during service window",
"UMask": "0x8",
@@ -2976,8 +3661,10 @@
},
{
"BriefDescription": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_RECEIVED",
+ "Counter": "0",
"EventCode": "0x58",
"EventName": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_RECEIVED",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": slot 2 request received from link layer while idle (with no slot 2 request active immediately prior)",
"UMask": "0x1",
@@ -2985,8 +3672,10 @@
},
{
"BriefDescription": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_WITHDRAWN",
+ "Counter": "0",
"EventCode": "0x58",
"EventName": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_WITHDRAWN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": slot 2 request withdrawn during hold-off period or service window",
"UMask": "0x2",
@@ -2994,16 +3683,20 @@
},
{
"BriefDescription": "Slotting BL Message Into Header Flit : All",
+ "Counter": "0",
"EventCode": "0x56",
"EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M3UPI"
},
{
"BriefDescription": "Slotting BL Message Into Header Flit : Needs Data Flit",
+ "Counter": "0",
"EventCode": "0x56",
"EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.NEED_DATA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Slotting BL Message Into Header Flit : Needs Data Flit : BL message requires data flit sequence",
"UMask": "0x2",
@@ -3011,8 +3704,10 @@
},
{
"BriefDescription": "Slotting BL Message Into Header Flit : Wait on Pump 0",
+ "Counter": "0",
"EventCode": "0x56",
"EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P0_WAIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Slotting BL Message Into Header Flit : Wait on Pump 0 : Waiting for header pump 0",
"UMask": "0x4",
@@ -3020,8 +3715,10 @@
},
{
"BriefDescription": "Slotting BL Message Into Header Flit : Don't Need Pump 1",
+ "Counter": "0",
"EventCode": "0x56",
"EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_NOT_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Slotting BL Message Into Header Flit : Don't Need Pump 1 : Header pump 1 is not required for flit",
"UMask": "0x10",
@@ -3029,8 +3726,10 @@
},
{
"BriefDescription": "Slotting BL Message Into Header Flit : Don't Need Pump 1 - Bubble",
+ "Counter": "0",
"EventCode": "0x56",
"EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_NOT_REQ_BUT_BUBBLE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Slotting BL Message Into Header Flit : Don't Need Pump 1 - Bubble : Header pump 1 is not required for flit but flit transmission delayed",
"UMask": "0x20",
@@ -3038,8 +3737,10 @@
},
{
"BriefDescription": "Slotting BL Message Into Header Flit : Don't Need Pump 1 - Not Avail",
+ "Counter": "0",
"EventCode": "0x56",
"EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_NOT_REQ_NOT_AVAIL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Slotting BL Message Into Header Flit : Don't Need Pump 1 - Not Avail : Header pump 1 is not required for flit and not available",
"UMask": "0x40",
@@ -3047,8 +3748,10 @@
},
{
"BriefDescription": "Slotting BL Message Into Header Flit : Wait on Pump 1",
+ "Counter": "0",
"EventCode": "0x56",
"EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_WAIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Slotting BL Message Into Header Flit : Wait on Pump 1 : Waiting for header pump 1",
"UMask": "0x8",
@@ -3056,8 +3759,10 @@
},
{
"BriefDescription": "Flit Gen - Header 1 : Accumulate",
+ "Counter": "0",
"EventCode": "0x51",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.ACCUM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Flit Gen - Header 1 : Accumulate : Events related to Header Flit Generation - Set 1 : Header flit slotting control state machine is in any accumulate state; multi-message flit may be assembled over multiple cycles",
"UMask": "0x1",
@@ -3065,8 +3770,10 @@
},
{
"BriefDescription": "Flit Gen - Header 1 : Accumulate Ready",
+ "Counter": "0",
"EventCode": "0x51",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.ACCUM_READ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Flit Gen - Header 1 : Accumulate Ready : Events related to Header Flit Generation - Set 1 : header flit slotting control state machine is in accum_ready state; flit is ready to send but transmission is blocked; more messages may be slotted into flit",
"UMask": "0x2",
@@ -3074,8 +3781,10 @@
},
{
"BriefDescription": "Flit Gen - Header 1 : Accumulate Wasted",
+ "Counter": "0",
"EventCode": "0x51",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.ACCUM_WASTED",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Flit Gen - Header 1 : Accumulate Wasted : Events related to Header Flit Generation - Set 1 : Flit is being assembled over multiple cycles, but no additional message is being slotted into flit in current cycle; accumulate cycle is wasted",
"UMask": "0x4",
@@ -3083,8 +3792,10 @@
},
{
"BriefDescription": "Flit Gen - Header 1 : Run-Ahead - Blocked",
+ "Counter": "0",
"EventCode": "0x51",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_BLOCKED",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Flit Gen - Header 1 : Run-Ahead - Blocked : Events related to Header Flit Generation - Set 1 : Header flit slotting entered run-ahead state; new header flit is started while transmission of prior, fully assembled flit is blocked",
"UMask": "0x8",
@@ -3092,8 +3803,10 @@
},
{
"BriefDescription": "Flit Gen - Header 1",
+ "Counter": "0",
"EventCode": "0x51",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_MSG1_AFTER",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Flit Gen - Header 1 : Events related to Header Flit Generation - Set 1 : run-ahead mode: message was slotted only after run-ahead was over; run-ahead mode definitely wasted",
"UMask": "0x80",
@@ -3101,8 +3814,10 @@
},
{
"BriefDescription": "Flit Gen - Header 1 : Run-Ahead - Message",
+ "Counter": "0",
"EventCode": "0x51",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_MSG1_DURING",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Flit Gen - Header 1 : Run-Ahead - Message : Events related to Header Flit Generation - Set 1 : run-ahead mode: one message slotted during run-ahead",
"UMask": "0x10",
@@ -3110,8 +3825,10 @@
},
{
"BriefDescription": "Flit Gen - Header 1",
+ "Counter": "0",
"EventCode": "0x51",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_MSG2_AFTER",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Flit Gen - Header 1 : Events related to Header Flit Generation - Set 1 : run-ahead mode: second message slotted immediately after run-ahead; potential run-ahead success",
"UMask": "0x20",
@@ -3119,8 +3836,10 @@
},
{
"BriefDescription": "Flit Gen - Header 1",
+ "Counter": "0",
"EventCode": "0x51",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_MSG2_SENT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Flit Gen - Header 1 : Events related to Header Flit Generation - Set 1 : run-ahead mode: two (or three) message flit sent immediately after run-ahead; complete run-ahead success",
"UMask": "0x40",
@@ -3128,8 +3847,10 @@
},
{
"BriefDescription": "Flit Gen - Header 2 : Parallel Ok",
+ "Counter": "0",
"EventCode": "0x52",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.PAR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Flit Gen - Header 2 : Parallel Ok : Events related to Header Flit Generation - Set 2 : new header flit construction may proceed in parallel with data flit sequence",
"UMask": "0x4",
@@ -3137,8 +3858,10 @@
},
{
"BriefDescription": "Flit Gen - Header 2 : Parallel Flit Finished",
+ "Counter": "0",
"EventCode": "0x52",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.PAR_FLIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Flit Gen - Header 2 : Parallel Flit Finished : Events related to Header Flit Generation - Set 2 : header flit finished assembly in parallel with data flit sequence",
"UMask": "0x10",
@@ -3146,8 +3869,10 @@
},
{
"BriefDescription": "Flit Gen - Header 2 : Parallel Message",
+ "Counter": "0",
"EventCode": "0x52",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.PAR_MSG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Flit Gen - Header 2 : Parallel Message : Events related to Header Flit Generation - Set 2 : message is slotted into header flit in parallel with data flit sequence",
"UMask": "0x8",
@@ -3155,8 +3880,10 @@
},
{
"BriefDescription": "Flit Gen - Header 2 : Rate-matching Stall",
+ "Counter": "0",
"EventCode": "0x52",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.RMSTALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Flit Gen - Header 2 : Rate-matching Stall : Events related to Header Flit Generation - Set 2 : Rate-matching stall injected",
"UMask": "0x1",
@@ -3164,8 +3891,10 @@
},
{
"BriefDescription": "Flit Gen - Header 2 : Rate-matching Stall - No Message",
+ "Counter": "0",
"EventCode": "0x52",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.RMSTALL_NOMSG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Flit Gen - Header 2 : Rate-matching Stall - No Message : Events related to Header Flit Generation - Set 2 : Rate matching stall injected, but no additional message slotted during stall cycle",
"UMask": "0x2",
@@ -3173,8 +3902,10 @@
},
{
"BriefDescription": "Sent Header Flit : One Message",
+ "Counter": "0",
"EventCode": "0x54",
"EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.1_MSG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Sent Header Flit : One Message : One message in flit; VNA or non-VNA flit",
"UMask": "0x1",
@@ -3182,8 +3913,10 @@
},
{
"BriefDescription": "Sent Header Flit : One Message in non-VNA",
+ "Counter": "0",
"EventCode": "0x54",
"EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.1_MSG_VNX",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Sent Header Flit : One Message in non-VNA : One message in flit; non-VNA flit",
"UMask": "0x8",
@@ -3191,8 +3924,10 @@
},
{
"BriefDescription": "Sent Header Flit : Two Messages",
+ "Counter": "0",
"EventCode": "0x54",
"EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.2_MSGS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Sent Header Flit : Two Messages : Two messages in flit; VNA flit",
"UMask": "0x2",
@@ -3200,8 +3935,10 @@
},
{
"BriefDescription": "Sent Header Flit : Three Messages",
+ "Counter": "0",
"EventCode": "0x54",
"EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.3_MSGS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Sent Header Flit : Three Messages : Three messages in flit; VNA flit",
"UMask": "0x4",
@@ -3209,32 +3946,40 @@
},
{
"BriefDescription": "Sent Header Flit : One Slot Taken",
+ "Counter": "0",
"EventCode": "0x54",
"EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.SLOTS_1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M3UPI"
},
{
"BriefDescription": "Sent Header Flit : Two Slots Taken",
+ "Counter": "0",
"EventCode": "0x54",
"EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.SLOTS_2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M3UPI"
},
{
"BriefDescription": "Sent Header Flit : All Slots Taken",
+ "Counter": "0",
"EventCode": "0x54",
"EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.SLOTS_3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M3UPI"
},
{
"BriefDescription": "Header Not Sent : All",
+ "Counter": "0",
"EventCode": "0x53",
"EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Header Not Sent : All : header flit is ready for transmission but could not be sent : header flit is ready for transmission but could not be sent for any reason, e.g. no credits, low tsv, stall injection",
"UMask": "0x1",
@@ -3242,8 +3987,10 @@
},
{
"BriefDescription": "Header Not Sent : No BGF Credits",
+ "Counter": "0",
"EventCode": "0x53",
"EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.NO_BGF_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Header Not Sent : No BGF Credits : header flit is ready for transmission but could not be sent : No BGF credits available",
"UMask": "0x8",
@@ -3251,8 +3998,10 @@
},
{
"BriefDescription": "Header Not Sent : No BGF Credits + No Extra Message Slotted",
+ "Counter": "0",
"EventCode": "0x53",
"EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.NO_BGF_NO_MSG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Header Not Sent : No BGF Credits + No Extra Message Slotted : header flit is ready for transmission but could not be sent : No BGF credits available; no additional message slotted into flit",
"UMask": "0x20",
@@ -3260,8 +4009,10 @@
},
{
"BriefDescription": "Header Not Sent : No TxQ Credits",
+ "Counter": "0",
"EventCode": "0x53",
"EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.NO_TXQ_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Header Not Sent : No TxQ Credits : header flit is ready for transmission but could not be sent : No TxQ credits available",
"UMask": "0x10",
@@ -3269,8 +4020,10 @@
},
{
"BriefDescription": "Header Not Sent : No TxQ Credits + No Extra Message Slotted",
+ "Counter": "0",
"EventCode": "0x53",
"EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.NO_TXQ_NO_MSG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Header Not Sent : No TxQ Credits + No Extra Message Slotted : header flit is ready for transmission but could not be sent : No TxQ credits available; no additional message slotted into flit",
"UMask": "0x40",
@@ -3278,8 +4031,10 @@
},
{
"BriefDescription": "Header Not Sent : TSV High",
+ "Counter": "0",
"EventCode": "0x53",
"EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.TSV_HI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Header Not Sent : TSV High : header flit is ready for transmission but could not be sent : header flit is ready for transmission but was not sent while tsv high",
"UMask": "0x2",
@@ -3287,8 +4042,10 @@
},
{
"BriefDescription": "Header Not Sent : Cycle valid for Flit",
+ "Counter": "0",
"EventCode": "0x53",
"EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.VALID_FOR_FLIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Header Not Sent : Cycle valid for Flit : header flit is ready for transmission but could not be sent : header flit is ready for transmission but was not sent while cycle is valid for flit transmission",
"UMask": "0x4",
@@ -3296,8 +4053,10 @@
},
{
"BriefDescription": "Message Held : Can't Slot AD",
+ "Counter": "0,1,2",
"EventCode": "0x50",
"EventName": "UNC_M3UPI_RxC_HELD.CANT_SLOT_AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Message Held : Can't Slot AD : some AD message could not be slotted (logical OR of all AD events under INGR_SLOT_CANT_MC_VN{0,1})",
"UMask": "0x10",
@@ -3305,8 +4064,10 @@
},
{
"BriefDescription": "Message Held : Can't Slot BL",
+ "Counter": "0,1,2",
"EventCode": "0x50",
"EventName": "UNC_M3UPI_RxC_HELD.CANT_SLOT_BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Message Held : Can't Slot BL : some BL message could not be slotted (logical OR of all BL events under INGR_SLOT_CANT_MC_VN{0,1})",
"UMask": "0x20",
@@ -3314,8 +4075,10 @@
},
{
"BriefDescription": "Message Held : Parallel Attempt",
+ "Counter": "0,1,2",
"EventCode": "0x50",
"EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_ATTEMPT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Message Held : Parallel Attempt : ad and bl messages attempted to slot into the same flit in parallel",
"UMask": "0x4",
@@ -3323,8 +4086,10 @@
},
{
"BriefDescription": "Message Held : Parallel Success",
+ "Counter": "0,1,2",
"EventCode": "0x50",
"EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_SUCCESS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Message Held : Parallel Success : ad and bl messages were actually slotted into the same flit in parallel",
"UMask": "0x8",
@@ -3332,8 +4097,10 @@
},
{
"BriefDescription": "Message Held : VN0",
+ "Counter": "0,1,2",
"EventCode": "0x50",
"EventName": "UNC_M3UPI_RxC_HELD.VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Message Held : VN0 : vn0 message(s) that couldn't be slotted into last vn0 flit are held in slotting stage while processing vn1 flit",
"UMask": "0x1",
@@ -3341,8 +4108,10 @@
},
{
"BriefDescription": "Message Held : VN1",
+ "Counter": "0,1,2",
"EventCode": "0x50",
"EventName": "UNC_M3UPI_RxC_HELD.VN1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Message Held : VN1 : vn1 message(s) that couldn't be slotted into last vn1 flit are held in slotting stage while processing vn0 flit",
"UMask": "0x2",
@@ -3350,8 +4119,10 @@
},
{
"BriefDescription": "VN0 message can't slot into flit : REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4e",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message can't slot into flit : REQ on AD : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -3359,8 +4130,10 @@
},
{
"BriefDescription": "VN0 message can't slot into flit : RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4e",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message can't slot into flit : RSP on AD : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -3368,8 +4141,10 @@
},
{
"BriefDescription": "VN0 message can't slot into flit : SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4e",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message can't slot into flit : SNP on AD : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -3377,8 +4152,10 @@
},
{
"BriefDescription": "VN0 message can't slot into flit : NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4e",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message can't slot into flit : NCB on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -3386,8 +4163,10 @@
},
{
"BriefDescription": "VN0 message can't slot into flit : NCS on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4e",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message can't slot into flit : NCS on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -3395,8 +4174,10 @@
},
{
"BriefDescription": "VN0 message can't slot into flit : RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4e",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message can't slot into flit : RSP on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -3404,8 +4185,10 @@
},
{
"BriefDescription": "VN0 message can't slot into flit : WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4e",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message can't slot into flit : WB on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -3413,8 +4196,10 @@
},
{
"BriefDescription": "VN1 message can't slot into flit : REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4f",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message can't slot into flit : REQ on AD : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -3422,8 +4207,10 @@
},
{
"BriefDescription": "VN1 message can't slot into flit : RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4f",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message can't slot into flit : RSP on AD : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -3431,8 +4218,10 @@
},
{
"BriefDescription": "VN1 message can't slot into flit : SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4f",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message can't slot into flit : SNP on AD : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -3440,8 +4229,10 @@
},
{
"BriefDescription": "VN1 message can't slot into flit : NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4f",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message can't slot into flit : NCB on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -3449,8 +4240,10 @@
},
{
"BriefDescription": "VN1 message can't slot into flit : NCS on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4f",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message can't slot into flit : NCS on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -3458,8 +4251,10 @@
},
{
"BriefDescription": "VN1 message can't slot into flit : RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4f",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message can't slot into flit : RSP on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -3467,8 +4262,10 @@
},
{
"BriefDescription": "VN1 message can't slot into flit : WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4f",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message can't slot into flit : WB on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -3476,8 +4273,10 @@
},
{
"BriefDescription": "Remote VNA Credits : Any In Use",
+ "Counter": "0",
"EventCode": "0x5a",
"EventName": "UNC_M3UPI_RxC_VNA_CRD.ANY_IN_USE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Remote VNA Credits : Any In Use : At least one remote vna credit is in use",
"UMask": "0x20",
@@ -3485,8 +4284,10 @@
},
{
"BriefDescription": "Remote VNA Credits : Corrected",
+ "Counter": "0",
"EventCode": "0x5a",
"EventName": "UNC_M3UPI_RxC_VNA_CRD.CORRECTED",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Remote VNA Credits : Corrected : Number of remote vna credits corrected (local return) per cycle",
"UMask": "0x1",
@@ -3494,8 +4295,10 @@
},
{
"BriefDescription": "Remote VNA Credits : Level < 1",
+ "Counter": "0",
"EventCode": "0x5a",
"EventName": "UNC_M3UPI_RxC_VNA_CRD.LT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Remote VNA Credits : Level < 1 : Remote vna credit level is less than 1 (i.e. no vna credits available)",
"UMask": "0x2",
@@ -3503,8 +4306,10 @@
},
{
"BriefDescription": "Remote VNA Credits : Level < 10",
+ "Counter": "0",
"EventCode": "0x5a",
"EventName": "UNC_M3UPI_RxC_VNA_CRD.LT10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Remote VNA Credits : Level < 10 : remote vna credit level is less than 10; parallel vn0/vn1 arb not possible",
"UMask": "0x10",
@@ -3512,8 +4317,10 @@
},
{
"BriefDescription": "Remote VNA Credits : Level < 4",
+ "Counter": "0",
"EventCode": "0x5a",
"EventName": "UNC_M3UPI_RxC_VNA_CRD.LT4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Remote VNA Credits : Level < 4 : Remote vna credit level is less than 4; bl (or ad requiring 4 vna) cannot arb on vna",
"UMask": "0x4",
@@ -3521,8 +4328,10 @@
},
{
"BriefDescription": "Remote VNA Credits : Level < 5",
+ "Counter": "0",
"EventCode": "0x5a",
"EventName": "UNC_M3UPI_RxC_VNA_CRD.LT5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Remote VNA Credits : Level < 5 : Remote vna credit level is less than 5; parallel ad/bl arb on vna not possible",
"UMask": "0x8",
@@ -3530,8 +4339,10 @@
},
{
"BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.REQ_ADBL_ALLOC_L5",
+ "Counter": "0",
"EventCode": "0x59",
"EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.REQ_ADBL_ALLOC_L5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": remote vna credit count was less than 5 and allocation to ad or bl messages was required",
"UMask": "0x2",
@@ -3539,8 +4350,10 @@
},
{
"BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.REQ_VN01_ALLOC_LT10",
+ "Counter": "0",
"EventCode": "0x59",
"EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.REQ_VN01_ALLOC_LT10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": remote vna credit count was less than 10 and allocation to vn0 or vn1 was required",
"UMask": "0x1",
@@ -3548,8 +4361,10 @@
},
{
"BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_JUST_AD",
+ "Counter": "0",
"EventCode": "0x59",
"EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_JUST_AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": on vn0, remote vna credits were allocated only to ad messages, not to bl",
"UMask": "0x10",
@@ -3557,8 +4372,10 @@
},
{
"BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_JUST_BL",
+ "Counter": "0",
"EventCode": "0x59",
"EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_JUST_BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": on vn0, remote vna credits were allocated only to bl messages, not to ad",
"UMask": "0x20",
@@ -3566,8 +4383,10 @@
},
{
"BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_ONLY",
+ "Counter": "0",
"EventCode": "0x59",
"EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_ONLY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": remote vna credits were allocated only to vn0, not to vn1",
"UMask": "0x4",
@@ -3575,8 +4394,10 @@
},
{
"BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_JUST_AD",
+ "Counter": "0",
"EventCode": "0x59",
"EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_JUST_AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": on vn1, remote vna credits were allocated only to ad messages, not to bl",
"UMask": "0x40",
@@ -3584,8 +4405,10 @@
},
{
"BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_JUST_BL",
+ "Counter": "0",
"EventCode": "0x59",
"EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_JUST_BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": on vn1, remote vna credits were allocated only to bl messages, not to ad",
"UMask": "0x80",
@@ -3593,8 +4416,10 @@
},
{
"BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_ONLY",
+ "Counter": "0",
"EventCode": "0x59",
"EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_ONLY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": remote vna credits were allocated only to vn1, not to vn0",
"UMask": "0x8",
@@ -3602,8 +4427,10 @@
},
{
"BriefDescription": "Failed ARB for AD : VN0 REQ Messages",
+ "Counter": "0",
"EventCode": "0x30",
"EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for AD : VN0 REQ Messages : AD arb but no win; arb request asserted but not won",
"UMask": "0x1",
@@ -3611,8 +4438,10 @@
},
{
"BriefDescription": "Failed ARB for AD : VN0 RSP Messages",
+ "Counter": "0",
"EventCode": "0x30",
"EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for AD : VN0 RSP Messages : AD arb but no win; arb request asserted but not won",
"UMask": "0x4",
@@ -3620,8 +4449,10 @@
},
{
"BriefDescription": "Failed ARB for AD : VN0 SNP Messages",
+ "Counter": "0",
"EventCode": "0x30",
"EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for AD : VN0 SNP Messages : AD arb but no win; arb request asserted but not won",
"UMask": "0x2",
@@ -3629,8 +4460,10 @@
},
{
"BriefDescription": "Failed ARB for AD : VN0 WB Messages",
+ "Counter": "0",
"EventCode": "0x30",
"EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for AD : VN0 WB Messages : AD arb but no win; arb request asserted but not won",
"UMask": "0x8",
@@ -3638,8 +4471,10 @@
},
{
"BriefDescription": "Failed ARB for AD : VN1 REQ Messages",
+ "Counter": "0",
"EventCode": "0x30",
"EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for AD : VN1 REQ Messages : AD arb but no win; arb request asserted but not won",
"UMask": "0x10",
@@ -3647,8 +4482,10 @@
},
{
"BriefDescription": "Failed ARB for AD : VN1 RSP Messages",
+ "Counter": "0",
"EventCode": "0x30",
"EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for AD : VN1 RSP Messages : AD arb but no win; arb request asserted but not won",
"UMask": "0x40",
@@ -3656,8 +4493,10 @@
},
{
"BriefDescription": "Failed ARB for AD : VN1 SNP Messages",
+ "Counter": "0",
"EventCode": "0x30",
"EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for AD : VN1 SNP Messages : AD arb but no win; arb request asserted but not won",
"UMask": "0x20",
@@ -3665,8 +4504,10 @@
},
{
"BriefDescription": "Failed ARB for AD : VN1 WB Messages",
+ "Counter": "0",
"EventCode": "0x30",
"EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for AD : VN1 WB Messages : AD arb but no win; arb request asserted but not won",
"UMask": "0x80",
@@ -3674,8 +4515,10 @@
},
{
"BriefDescription": "AD FlowQ Bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -3684,8 +4527,10 @@
},
{
"BriefDescription": "AD FlowQ Bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x2c",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.AD_SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD FlowQ Bypass : Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
"UMask": "0x1",
@@ -3693,8 +4538,10 @@
},
{
"BriefDescription": "AD FlowQ Bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x2c",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.AD_SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD FlowQ Bypass : Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
"UMask": "0x2",
@@ -3702,8 +4549,10 @@
},
{
"BriefDescription": "AD FlowQ Bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x2c",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.AD_SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD FlowQ Bypass : Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
"UMask": "0x4",
@@ -3711,8 +4560,10 @@
},
{
"BriefDescription": "AD FlowQ Bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x2c",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.BL_EARLY_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD FlowQ Bypass : Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
"UMask": "0x8",
@@ -3720,8 +4571,10 @@
},
{
"BriefDescription": "AD Flow Q Not Empty : VN0 REQ Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Not Empty : VN0 REQ Messages : Number of cycles the AD Egress queue is Not Empty",
"UMask": "0x1",
@@ -3729,8 +4582,10 @@
},
{
"BriefDescription": "AD Flow Q Not Empty : VN0 RSP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Not Empty : VN0 RSP Messages : Number of cycles the AD Egress queue is Not Empty",
"UMask": "0x4",
@@ -3738,8 +4593,10 @@
},
{
"BriefDescription": "AD Flow Q Not Empty : VN0 SNP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Not Empty : VN0 SNP Messages : Number of cycles the AD Egress queue is Not Empty",
"UMask": "0x2",
@@ -3747,8 +4604,10 @@
},
{
"BriefDescription": "AD Flow Q Not Empty : VN0 WB Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Not Empty : VN0 WB Messages : Number of cycles the AD Egress queue is Not Empty",
"UMask": "0x8",
@@ -3756,8 +4615,10 @@
},
{
"BriefDescription": "AD Flow Q Not Empty : VN1 REQ Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Not Empty : VN1 REQ Messages : Number of cycles the AD Egress queue is Not Empty",
"UMask": "0x10",
@@ -3765,8 +4626,10 @@
},
{
"BriefDescription": "AD Flow Q Not Empty : VN1 RSP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Not Empty : VN1 RSP Messages : Number of cycles the AD Egress queue is Not Empty",
"UMask": "0x40",
@@ -3774,8 +4637,10 @@
},
{
"BriefDescription": "AD Flow Q Not Empty : VN1 SNP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Not Empty : VN1 SNP Messages : Number of cycles the AD Egress queue is Not Empty",
"UMask": "0x20",
@@ -3783,8 +4648,10 @@
},
{
"BriefDescription": "AD Flow Q Not Empty : VN1 WB Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Not Empty : VN1 WB Messages : Number of cycles the AD Egress queue is Not Empty",
"UMask": "0x80",
@@ -3792,8 +4659,10 @@
},
{
"BriefDescription": "AD Flow Q Inserts : VN0 REQ Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2d",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Inserts : VN0 REQ Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x1",
@@ -3801,8 +4670,10 @@
},
{
"BriefDescription": "AD Flow Q Inserts : VN0 RSP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2d",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Inserts : VN0 RSP Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x4",
@@ -3810,8 +4681,10 @@
},
{
"BriefDescription": "AD Flow Q Inserts : VN0 SNP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2d",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Inserts : VN0 SNP Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x2",
@@ -3819,8 +4692,10 @@
},
{
"BriefDescription": "AD Flow Q Inserts : VN0 WB Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2d",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Inserts : VN0 WB Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x8",
@@ -3828,8 +4703,10 @@
},
{
"BriefDescription": "AD Flow Q Inserts : VN1 REQ Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2d",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Inserts : VN1 REQ Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x10",
@@ -3837,8 +4714,10 @@
},
{
"BriefDescription": "AD Flow Q Inserts : VN1 RSP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2d",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Inserts : VN1 RSP Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x40",
@@ -3846,8 +4725,10 @@
},
{
"BriefDescription": "AD Flow Q Inserts : VN1 SNP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2d",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN1_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Flow Q Inserts : VN1 SNP Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x20",
@@ -3855,78 +4736,98 @@
},
{
"BriefDescription": "AD Flow Q Occupancy : VN0 REQ Messages",
+ "Counter": "0",
"EventCode": "0x1c",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M3UPI"
},
{
"BriefDescription": "AD Flow Q Occupancy : VN0 RSP Messages",
+ "Counter": "0",
"EventCode": "0x1c",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M3UPI"
},
{
"BriefDescription": "AD Flow Q Occupancy : VN0 SNP Messages",
+ "Counter": "0",
"EventCode": "0x1c",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M3UPI"
},
{
"BriefDescription": "AD Flow Q Occupancy : VN0 WB Messages",
+ "Counter": "0",
"EventCode": "0x1c",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M3UPI"
},
{
"BriefDescription": "AD Flow Q Occupancy : VN1 REQ Messages",
+ "Counter": "0",
"EventCode": "0x1c",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M3UPI"
},
{
"BriefDescription": "AD Flow Q Occupancy : VN1 RSP Messages",
+ "Counter": "0",
"EventCode": "0x1c",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M3UPI"
},
{
"BriefDescription": "AD Flow Q Occupancy : VN1 SNP Messages",
+ "Counter": "0",
"EventCode": "0x1c",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN1_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M3UPI"
},
{
"BriefDescription": "AK Flow Q Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x2f",
"EventName": "UNC_M3UPI_TxC_AK_FLQ_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M3UPI"
},
{
"BriefDescription": "AK Flow Q Occupancy",
+ "Counter": "0",
"EventCode": "0x1e",
"EventName": "UNC_M3UPI_TxC_AK_FLQ_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M3UPI"
},
{
"BriefDescription": "Failed ARB for BL : VN0 NCB Messages",
+ "Counter": "0",
"EventCode": "0x35",
"EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for BL : VN0 NCB Messages : BL arb but no win; arb request asserted but not won",
"UMask": "0x4",
@@ -3934,8 +4835,10 @@
},
{
"BriefDescription": "Failed ARB for BL : VN0 NCS Messages",
+ "Counter": "0",
"EventCode": "0x35",
"EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for BL : VN0 NCS Messages : BL arb but no win; arb request asserted but not won",
"UMask": "0x8",
@@ -3943,8 +4846,10 @@
},
{
"BriefDescription": "Failed ARB for BL : VN0 RSP Messages",
+ "Counter": "0",
"EventCode": "0x35",
"EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for BL : VN0 RSP Messages : BL arb but no win; arb request asserted but not won",
"UMask": "0x1",
@@ -3952,8 +4857,10 @@
},
{
"BriefDescription": "Failed ARB for BL : VN0 WB Messages",
+ "Counter": "0",
"EventCode": "0x35",
"EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for BL : VN0 WB Messages : BL arb but no win; arb request asserted but not won",
"UMask": "0x2",
@@ -3961,8 +4868,10 @@
},
{
"BriefDescription": "Failed ARB for BL : VN1 NCS Messages",
+ "Counter": "0",
"EventCode": "0x35",
"EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for BL : VN1 NCS Messages : BL arb but no win; arb request asserted but not won",
"UMask": "0x40",
@@ -3970,8 +4879,10 @@
},
{
"BriefDescription": "Failed ARB for BL : VN1 NCB Messages",
+ "Counter": "0",
"EventCode": "0x35",
"EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for BL : VN1 NCB Messages : BL arb but no win; arb request asserted but not won",
"UMask": "0x80",
@@ -3979,8 +4890,10 @@
},
{
"BriefDescription": "Failed ARB for BL : VN1 RSP Messages",
+ "Counter": "0",
"EventCode": "0x35",
"EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for BL : VN1 RSP Messages : BL arb but no win; arb request asserted but not won",
"UMask": "0x10",
@@ -3988,8 +4901,10 @@
},
{
"BriefDescription": "Failed ARB for BL : VN1 WB Messages",
+ "Counter": "0",
"EventCode": "0x35",
"EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Failed ARB for BL : VN1 WB Messages : BL arb but no win; arb request asserted but not won",
"UMask": "0x20",
@@ -3997,8 +4912,10 @@
},
{
"BriefDescription": "BL Flow Q Not Empty : VN0 REQ Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Not Empty : VN0 REQ Messages : Number of cycles the BL Egress queue is Not Empty",
"UMask": "0x1",
@@ -4006,8 +4923,10 @@
},
{
"BriefDescription": "BL Flow Q Not Empty : VN0 RSP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Not Empty : VN0 RSP Messages : Number of cycles the BL Egress queue is Not Empty",
"UMask": "0x4",
@@ -4015,8 +4934,10 @@
},
{
"BriefDescription": "BL Flow Q Not Empty : VN0 SNP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Not Empty : VN0 SNP Messages : Number of cycles the BL Egress queue is Not Empty",
"UMask": "0x2",
@@ -4024,8 +4945,10 @@
},
{
"BriefDescription": "BL Flow Q Not Empty : VN0 WB Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Not Empty : VN0 WB Messages : Number of cycles the BL Egress queue is Not Empty",
"UMask": "0x8",
@@ -4033,8 +4956,10 @@
},
{
"BriefDescription": "BL Flow Q Not Empty : VN1 REQ Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Not Empty : VN1 REQ Messages : Number of cycles the BL Egress queue is Not Empty",
"UMask": "0x10",
@@ -4042,8 +4967,10 @@
},
{
"BriefDescription": "BL Flow Q Not Empty : VN1 RSP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Not Empty : VN1 RSP Messages : Number of cycles the BL Egress queue is Not Empty",
"UMask": "0x40",
@@ -4051,8 +4978,10 @@
},
{
"BriefDescription": "BL Flow Q Not Empty : VN1 SNP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Not Empty : VN1 SNP Messages : Number of cycles the BL Egress queue is Not Empty",
"UMask": "0x20",
@@ -4060,8 +4989,10 @@
},
{
"BriefDescription": "BL Flow Q Not Empty : VN1 WB Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Not Empty : VN1 WB Messages : Number of cycles the BL Egress queue is Not Empty",
"UMask": "0x80",
@@ -4069,8 +5000,10 @@
},
{
"BriefDescription": "BL Flow Q Inserts : VN0 RSP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Inserts : VN0 RSP Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x1",
@@ -4078,8 +5011,10 @@
},
{
"BriefDescription": "BL Flow Q Inserts : VN0 WB Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Inserts : VN0 WB Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x2",
@@ -4087,8 +5022,10 @@
},
{
"BriefDescription": "BL Flow Q Inserts : VN0 NCS Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Inserts : VN0 NCS Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x8",
@@ -4096,8 +5033,10 @@
},
{
"BriefDescription": "BL Flow Q Inserts : VN0 NCB Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Inserts : VN0 NCB Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x4",
@@ -4105,8 +5044,10 @@
},
{
"BriefDescription": "BL Flow Q Inserts : VN1 RSP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Inserts : VN1 RSP Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x10",
@@ -4114,8 +5055,10 @@
},
{
"BriefDescription": "BL Flow Q Inserts : VN1 WB Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Inserts : VN1 WB Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x20",
@@ -4123,8 +5066,10 @@
},
{
"BriefDescription": "BL Flow Q Inserts : VN1_NCB Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Inserts : VN1_NCB Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x80",
@@ -4132,8 +5077,10 @@
},
{
"BriefDescription": "BL Flow Q Inserts : VN1_NCS Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Flow Q Inserts : VN1_NCS Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x40",
@@ -4141,120 +5088,150 @@
},
{
"BriefDescription": "BL Flow Q Occupancy : VN0 NCB Messages",
+ "Counter": "0",
"EventCode": "0x1d",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy : VN0 NCS Messages",
+ "Counter": "0",
"EventCode": "0x1d",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy : VN0 RSP Messages",
+ "Counter": "0",
"EventCode": "0x1d",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy : VN0 WB Messages",
+ "Counter": "0",
"EventCode": "0x1d",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy : VN1_NCS Messages",
+ "Counter": "0",
"EventCode": "0x1d",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy : VN1_NCB Messages",
+ "Counter": "0",
"EventCode": "0x1d",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy : VN1 RSP Messages",
+ "Counter": "0",
"EventCode": "0x1d",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy : VN1 WB Messages",
+ "Counter": "0",
"EventCode": "0x1d",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy : VN0 RSP Messages",
+ "Counter": "0",
"EventCode": "0x1f",
"EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN0_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy : VN0 WB Messages",
+ "Counter": "0",
"EventCode": "0x1f",
"EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN0_THROUGH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy : VN0 NCB Messages",
+ "Counter": "0",
"EventCode": "0x1f",
"EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN0_WRPULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy : VN1 RSP Messages",
+ "Counter": "0",
"EventCode": "0x1f",
"EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN1_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy : VN1 WB Messages",
+ "Counter": "0",
"EventCode": "0x1f",
"EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN1_THROUGH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy : VN1_NCS Messages",
+ "Counter": "0",
"EventCode": "0x1f",
"EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN1_WRPULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M3UPI"
},
{
"BriefDescription": "UPI0 AD Credits Empty : VN0 REQ Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN0_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 AD Credits Empty : VN0 REQ Messages : No credits available to send to UPIs on the AD Ring",
"UMask": "0x2",
@@ -4262,8 +5239,10 @@
},
{
"BriefDescription": "UPI0 AD Credits Empty : VN0 RSP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 AD Credits Empty : VN0 RSP Messages : No credits available to send to UPIs on the AD Ring",
"UMask": "0x8",
@@ -4271,8 +5250,10 @@
},
{
"BriefDescription": "UPI0 AD Credits Empty : VN0 SNP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN0_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 AD Credits Empty : VN0 SNP Messages : No credits available to send to UPIs on the AD Ring",
"UMask": "0x4",
@@ -4280,8 +5261,10 @@
},
{
"BriefDescription": "UPI0 AD Credits Empty : VN1 REQ Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 AD Credits Empty : VN1 REQ Messages : No credits available to send to UPIs on the AD Ring",
"UMask": "0x10",
@@ -4289,8 +5272,10 @@
},
{
"BriefDescription": "UPI0 AD Credits Empty : VN1 RSP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 AD Credits Empty : VN1 RSP Messages : No credits available to send to UPIs on the AD Ring",
"UMask": "0x40",
@@ -4298,8 +5283,10 @@
},
{
"BriefDescription": "UPI0 AD Credits Empty : VN1 SNP Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN1_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 AD Credits Empty : VN1 SNP Messages : No credits available to send to UPIs on the AD Ring",
"UMask": "0x20",
@@ -4307,8 +5294,10 @@
},
{
"BriefDescription": "UPI0 AD Credits Empty : VNA",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VNA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 AD Credits Empty : VNA : No credits available to send to UPIs on the AD Ring",
"UMask": "0x1",
@@ -4316,8 +5305,10 @@
},
{
"BriefDescription": "UPI0 BL Credits Empty : VN0 RSP Messages",
+ "Counter": "0",
"EventCode": "0x21",
"EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN0_NCS_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 BL Credits Empty : VN0 RSP Messages : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
"UMask": "0x4",
@@ -4325,8 +5316,10 @@
},
{
"BriefDescription": "UPI0 BL Credits Empty : VN0 REQ Messages",
+ "Counter": "0",
"EventCode": "0x21",
"EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 BL Credits Empty : VN0 REQ Messages : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
"UMask": "0x2",
@@ -4334,8 +5327,10 @@
},
{
"BriefDescription": "UPI0 BL Credits Empty : VN0 SNP Messages",
+ "Counter": "0",
"EventCode": "0x21",
"EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 BL Credits Empty : VN0 SNP Messages : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
"UMask": "0x8",
@@ -4343,8 +5338,10 @@
},
{
"BriefDescription": "UPI0 BL Credits Empty : VN1 RSP Messages",
+ "Counter": "0",
"EventCode": "0x21",
"EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN1_NCS_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 BL Credits Empty : VN1 RSP Messages : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
"UMask": "0x20",
@@ -4352,8 +5349,10 @@
},
{
"BriefDescription": "UPI0 BL Credits Empty : VN1 REQ Messages",
+ "Counter": "0",
"EventCode": "0x21",
"EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 BL Credits Empty : VN1 REQ Messages : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
"UMask": "0x10",
@@ -4361,8 +5360,10 @@
},
{
"BriefDescription": "UPI0 BL Credits Empty : VN1 SNP Messages",
+ "Counter": "0",
"EventCode": "0x21",
"EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 BL Credits Empty : VN1 SNP Messages : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
"UMask": "0x40",
@@ -4370,8 +5371,10 @@
},
{
"BriefDescription": "UPI0 BL Credits Empty : VNA",
+ "Counter": "0",
"EventCode": "0x21",
"EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VNA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "UPI0 BL Credits Empty : VNA : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
"UMask": "0x1",
@@ -4379,16 +5382,20 @@
},
{
"BriefDescription": "FlowQ Generated Prefetch",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_M3UPI_UPI_PREFETCH_SPAWN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "FlowQ Generated Prefetch : Count cases where FlowQ causes spawn of Prefetch to iMC/SMI3 target",
"Unit": "M3UPI"
},
{
"BriefDescription": "VN0 Credit Used : WB on BL",
+ "Counter": "0",
"EventCode": "0x5b",
"EventName": "UNC_M3UPI_VN0_CREDITS_USED.NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Credit Used : WB on BL : Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers. : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -4396,8 +5403,10 @@
},
{
"BriefDescription": "VN0 Credit Used : NCB on BL",
+ "Counter": "0",
"EventCode": "0x5b",
"EventName": "UNC_M3UPI_VN0_CREDITS_USED.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Credit Used : NCB on BL : Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers. : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -4405,8 +5414,10 @@
},
{
"BriefDescription": "VN0 Credit Used : REQ on AD",
+ "Counter": "0",
"EventCode": "0x5b",
"EventName": "UNC_M3UPI_VN0_CREDITS_USED.REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Credit Used : REQ on AD : Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers. : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -4414,8 +5425,10 @@
},
{
"BriefDescription": "VN0 Credit Used : RSP on AD",
+ "Counter": "0",
"EventCode": "0x5b",
"EventName": "UNC_M3UPI_VN0_CREDITS_USED.RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Credit Used : RSP on AD : Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers. : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -4423,8 +5436,10 @@
},
{
"BriefDescription": "VN0 Credit Used : SNP on AD",
+ "Counter": "0",
"EventCode": "0x5b",
"EventName": "UNC_M3UPI_VN0_CREDITS_USED.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Credit Used : SNP on AD : Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers. : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -4432,8 +5447,10 @@
},
{
"BriefDescription": "VN0 Credit Used : RSP on BL",
+ "Counter": "0",
"EventCode": "0x5b",
"EventName": "UNC_M3UPI_VN0_CREDITS_USED.WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Credit Used : RSP on BL : Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers. : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -4441,8 +5458,10 @@
},
{
"BriefDescription": "VN0 No Credits : WB on BL",
+ "Counter": "0",
"EventCode": "0x5d",
"EventName": "UNC_M3UPI_VN0_NO_CREDITS.NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 No Credits : WB on BL : Number of Cycles there were no VN0 Credits : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -4450,8 +5469,10 @@
},
{
"BriefDescription": "VN0 No Credits : NCB on BL",
+ "Counter": "0",
"EventCode": "0x5d",
"EventName": "UNC_M3UPI_VN0_NO_CREDITS.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 No Credits : NCB on BL : Number of Cycles there were no VN0 Credits : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -4459,8 +5480,10 @@
},
{
"BriefDescription": "VN0 No Credits : REQ on AD",
+ "Counter": "0",
"EventCode": "0x5d",
"EventName": "UNC_M3UPI_VN0_NO_CREDITS.REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 No Credits : REQ on AD : Number of Cycles there were no VN0 Credits : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -4468,8 +5491,10 @@
},
{
"BriefDescription": "VN0 No Credits : RSP on AD",
+ "Counter": "0",
"EventCode": "0x5d",
"EventName": "UNC_M3UPI_VN0_NO_CREDITS.RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 No Credits : RSP on AD : Number of Cycles there were no VN0 Credits : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -4477,8 +5502,10 @@
},
{
"BriefDescription": "VN0 No Credits : SNP on AD",
+ "Counter": "0",
"EventCode": "0x5d",
"EventName": "UNC_M3UPI_VN0_NO_CREDITS.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 No Credits : SNP on AD : Number of Cycles there were no VN0 Credits : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -4486,8 +5513,10 @@
},
{
"BriefDescription": "VN0 No Credits : RSP on BL",
+ "Counter": "0",
"EventCode": "0x5d",
"EventName": "UNC_M3UPI_VN0_NO_CREDITS.WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 No Credits : RSP on BL : Number of Cycles there were no VN0 Credits : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -4495,8 +5524,10 @@
},
{
"BriefDescription": "VN1 Credit Used : WB on BL",
+ "Counter": "0",
"EventCode": "0x5c",
"EventName": "UNC_M3UPI_VN1_CREDITS_USED.NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Credit Used : WB on BL : Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers. : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -4504,8 +5535,10 @@
},
{
"BriefDescription": "VN1 Credit Used : NCB on BL",
+ "Counter": "0",
"EventCode": "0x5c",
"EventName": "UNC_M3UPI_VN1_CREDITS_USED.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Credit Used : NCB on BL : Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers. : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -4513,8 +5546,10 @@
},
{
"BriefDescription": "VN1 Credit Used : REQ on AD",
+ "Counter": "0",
"EventCode": "0x5c",
"EventName": "UNC_M3UPI_VN1_CREDITS_USED.REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Credit Used : REQ on AD : Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers. : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -4522,8 +5557,10 @@
},
{
"BriefDescription": "VN1 Credit Used : RSP on AD",
+ "Counter": "0",
"EventCode": "0x5c",
"EventName": "UNC_M3UPI_VN1_CREDITS_USED.RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Credit Used : RSP on AD : Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers. : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -4531,8 +5568,10 @@
},
{
"BriefDescription": "VN1 Credit Used : SNP on AD",
+ "Counter": "0",
"EventCode": "0x5c",
"EventName": "UNC_M3UPI_VN1_CREDITS_USED.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Credit Used : SNP on AD : Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers. : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -4540,8 +5579,10 @@
},
{
"BriefDescription": "VN1 Credit Used : RSP on BL",
+ "Counter": "0",
"EventCode": "0x5c",
"EventName": "UNC_M3UPI_VN1_CREDITS_USED.WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Credit Used : RSP on BL : Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers. : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -4549,8 +5590,10 @@
},
{
"BriefDescription": "VN1 No Credits : WB on BL",
+ "Counter": "0",
"EventCode": "0x5e",
"EventName": "UNC_M3UPI_VN1_NO_CREDITS.NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 No Credits : WB on BL : Number of Cycles there were no VN1 Credits : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -4558,8 +5601,10 @@
},
{
"BriefDescription": "VN1 No Credits : NCB on BL",
+ "Counter": "0",
"EventCode": "0x5e",
"EventName": "UNC_M3UPI_VN1_NO_CREDITS.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 No Credits : NCB on BL : Number of Cycles there were no VN1 Credits : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -4567,8 +5612,10 @@
},
{
"BriefDescription": "VN1 No Credits : REQ on AD",
+ "Counter": "0",
"EventCode": "0x5e",
"EventName": "UNC_M3UPI_VN1_NO_CREDITS.REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 No Credits : REQ on AD : Number of Cycles there were no VN1 Credits : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -4576,8 +5623,10 @@
},
{
"BriefDescription": "VN1 No Credits : RSP on AD",
+ "Counter": "0",
"EventCode": "0x5e",
"EventName": "UNC_M3UPI_VN1_NO_CREDITS.RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 No Credits : RSP on AD : Number of Cycles there were no VN1 Credits : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -4585,8 +5634,10 @@
},
{
"BriefDescription": "VN1 No Credits : SNP on AD",
+ "Counter": "0",
"EventCode": "0x5e",
"EventName": "UNC_M3UPI_VN1_NO_CREDITS.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 No Credits : SNP on AD : Number of Cycles there were no VN1 Credits : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -4594,8 +5645,10 @@
},
{
"BriefDescription": "VN1 No Credits : RSP on BL",
+ "Counter": "0",
"EventCode": "0x5e",
"EventName": "UNC_M3UPI_VN1_NO_CREDITS.WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 No Credits : RSP on BL : Number of Cycles there were no VN1 Credits : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -4603,168 +5656,210 @@
},
{
"BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_EQ_LOCALDEST_VN0",
+ "Counter": "0",
"EventCode": "0x7e",
"EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_EQ_LOCALDEST_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x82",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_EQ_LOCALDEST_VN1",
+ "Counter": "0",
"EventCode": "0x7e",
"EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_EQ_LOCALDEST_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa0",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_GT_LOCALDEST_VN0",
+ "Counter": "0",
"EventCode": "0x7e",
"EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_GT_LOCALDEST_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x81",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_GT_LOCALDEST_VN1",
+ "Counter": "0",
"EventCode": "0x7e",
"EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_GT_LOCALDEST_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x90",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_LT_LOCALDEST_VN0",
+ "Counter": "0",
"EventCode": "0x7e",
"EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_LT_LOCALDEST_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x84",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_LT_LOCALDEST_VN1",
+ "Counter": "0",
"EventCode": "0x7e",
"EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_LT_LOCALDEST_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc0",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_EQ_LOCALDEST_VN0",
+ "Counter": "0",
"EventCode": "0x7e",
"EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_EQ_LOCALDEST_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_EQ_LOCALDEST_VN1",
+ "Counter": "0",
"EventCode": "0x7e",
"EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_EQ_LOCALDEST_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_GT_LOCALDEST_VN0",
+ "Counter": "0",
"EventCode": "0x7e",
"EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_GT_LOCALDEST_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_GT_LOCALDEST_VN1",
+ "Counter": "0",
"EventCode": "0x7e",
"EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_GT_LOCALDEST_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_LT_LOCALDEST_VN0",
+ "Counter": "0",
"EventCode": "0x7e",
"EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_LT_LOCALDEST_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_LT_LOCALDEST_VN1",
+ "Counter": "0",
"EventCode": "0x7e",
"EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_LT_LOCALDEST_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_PENDING.LOCALDEST_VN0",
+ "Counter": "0",
"EventCode": "0x7d",
"EventName": "UNC_M3UPI_WB_PENDING.LOCALDEST_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_PENDING.LOCALDEST_VN1",
+ "Counter": "0",
"EventCode": "0x7d",
"EventName": "UNC_M3UPI_WB_PENDING.LOCALDEST_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_PENDING.LOCAL_AND_RT_VN0",
+ "Counter": "0",
"EventCode": "0x7d",
"EventName": "UNC_M3UPI_WB_PENDING.LOCAL_AND_RT_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_PENDING.LOCAL_AND_RT_VN1",
+ "Counter": "0",
"EventCode": "0x7d",
"EventName": "UNC_M3UPI_WB_PENDING.LOCAL_AND_RT_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_PENDING.ROUTETHRU_VN0",
+ "Counter": "0",
"EventCode": "0x7d",
"EventName": "UNC_M3UPI_WB_PENDING.ROUTETHRU_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_PENDING.ROUTETHRU_VN1",
+ "Counter": "0",
"EventCode": "0x7d",
"EventName": "UNC_M3UPI_WB_PENDING.ROUTETHRU_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_PENDING.WAITING4PULL_VN0",
+ "Counter": "0",
"EventCode": "0x7d",
"EventName": "UNC_M3UPI_WB_PENDING.WAITING4PULL_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_WB_PENDING.WAITING4PULL_VN1",
+ "Counter": "0",
"EventCode": "0x7d",
"EventName": "UNC_M3UPI_WB_PENDING.WAITING4PULL_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M3UPI"
},
{
"BriefDescription": "UNC_M3UPI_XPT_PFTCH.ARB",
+ "Counter": "0",
"EventCode": "0x61",
"EventName": "UNC_M3UPI_XPT_PFTCH.ARB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": xpt prefetch message is making arbitration request",
"UMask": "0x4",
@@ -4772,8 +5867,10 @@
},
{
"BriefDescription": "UNC_M3UPI_XPT_PFTCH.ARRIVED",
+ "Counter": "0",
"EventCode": "0x61",
"EventName": "UNC_M3UPI_XPT_PFTCH.ARRIVED",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": xpt prefetch message arrived in ingress pipeline",
"UMask": "0x1",
@@ -4781,8 +5878,10 @@
},
{
"BriefDescription": "UNC_M3UPI_XPT_PFTCH.BYPASS",
+ "Counter": "0",
"EventCode": "0x61",
"EventName": "UNC_M3UPI_XPT_PFTCH.BYPASS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": xpt prefetch message took bypass path",
"UMask": "0x2",
@@ -4790,8 +5889,10 @@
},
{
"BriefDescription": "UNC_M3UPI_XPT_PFTCH.FLITTED",
+ "Counter": "0",
"EventCode": "0x61",
"EventName": "UNC_M3UPI_XPT_PFTCH.FLITTED",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": xpt prefetch message was slotted into flit (non bypass)",
"UMask": "0x10",
@@ -4799,8 +5900,10 @@
},
{
"BriefDescription": "UNC_M3UPI_XPT_PFTCH.LOST_ARB",
+ "Counter": "0",
"EventCode": "0x61",
"EventName": "UNC_M3UPI_XPT_PFTCH.LOST_ARB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": xpt prefetch message lost arbitration",
"UMask": "0x8",
@@ -4808,8 +5911,10 @@
},
{
"BriefDescription": "UNC_M3UPI_XPT_PFTCH.LOST_OLD",
+ "Counter": "0",
"EventCode": "0x61",
"EventName": "UNC_M3UPI_XPT_PFTCH.LOST_OLD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": xpt prefetch message was dropped because it became too old",
"UMask": "0x20",
@@ -4817,8 +5922,10 @@
},
{
"BriefDescription": "UNC_M3UPI_XPT_PFTCH.LOST_QFULL",
+ "Counter": "0",
"EventCode": "0x61",
"EventName": "UNC_M3UPI_XPT_PFTCH.LOST_QFULL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": xpt prefetch message was dropped because it was overwritten by new message while prefetch queue was full",
"UMask": "0x40",
@@ -4826,8 +5933,10 @@
},
{
"BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (AD Bounceable)",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_MDF_CRS_TxR_INSERTS.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD Bounceable : Number of allocations into the CRS Egress",
"UMask": "0x1",
@@ -4835,8 +5944,10 @@
},
{
"BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (AD credited)",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_MDF_CRS_TxR_INSERTS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD credited : Number of allocations into the CRS Egress",
"UMask": "0x2",
@@ -4844,8 +5955,10 @@
},
{
"BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (AK)",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_MDF_CRS_TxR_INSERTS.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AK : Number of allocations into the CRS Egress",
"UMask": "0x10",
@@ -4853,8 +5966,10 @@
},
{
"BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (AKC)",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_MDF_CRS_TxR_INSERTS.AKC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AKC : Number of allocations into the CRS Egress",
"UMask": "0x40",
@@ -4862,8 +5977,10 @@
},
{
"BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (BL Bounceable)",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_MDF_CRS_TxR_INSERTS.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL Bounceable : Number of allocations into the CRS Egress",
"UMask": "0x4",
@@ -4871,8 +5988,10 @@
},
{
"BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (BL credited)",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_MDF_CRS_TxR_INSERTS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL credited : Number of allocations into the CRS Egress",
"UMask": "0x8",
@@ -4880,8 +5999,10 @@
},
{
"BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (IV)",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_MDF_CRS_TxR_INSERTS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IV : Number of allocations into the CRS Egress",
"UMask": "0x20",
@@ -4889,8 +6010,10 @@
},
{
"BriefDescription": "Number of cycles incoming messages from the vertical ring that are bounced at the SBO Ingress (V-EMIB) (AD)",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_MDF_CRS_TxR_V_BOUNCES.AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD : Number of cycles incoming messages from the vertical ring that are bounced at the SBO",
"UMask": "0x1",
@@ -4898,8 +6021,10 @@
},
{
"BriefDescription": "Number of cycles incoming messages from the vertical ring that are bounced at the SBO Ingress (V-EMIB) (AK)",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_MDF_CRS_TxR_V_BOUNCES.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AK : Number of cycles incoming messages from the vertical ring that are bounced at the SBO",
"UMask": "0x4",
@@ -4907,8 +6032,10 @@
},
{
"BriefDescription": "Number of cycles incoming messages from the vertical ring that are bounced at the SBO Ingress (V-EMIB) (AKC)",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_MDF_CRS_TxR_V_BOUNCES.AKC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AKC : Number of cycles incoming messages from the vertical ring that are bounced at the SBO",
"UMask": "0x10",
@@ -4916,8 +6043,10 @@
},
{
"BriefDescription": "Number of cycles incoming messages from the vertical ring that are bounced at the SBO Ingress (V-EMIB) (BL)",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_MDF_CRS_TxR_V_BOUNCES.BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL : Number of cycles incoming messages from the vertical ring that are bounced at the SBO",
"UMask": "0x2",
@@ -4925,8 +6054,10 @@
},
{
"BriefDescription": "Number of cycles incoming messages from the vertical ring that are bounced at the SBO Ingress (V-EMIB) (IV)",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_MDF_CRS_TxR_V_BOUNCES.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IV : Number of cycles incoming messages from the vertical ring that are bounced at the SBO",
"UMask": "0x8",
@@ -4934,8 +6065,10 @@
},
{
"BriefDescription": "Counts the number of cycles when the distress signals are asserted based on SBO Ingress threshold",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_MDF_FAST_ASSERTED.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD bnc : Counts the number of cycles when the distress signals are asserted based on SBO Ingress threshold",
"UMask": "0x1",
@@ -4943,8 +6076,10 @@
},
{
"BriefDescription": "Counts the number of cycles when the distress signals are asserted based on SBO Ingress threshold",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_MDF_FAST_ASSERTED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL bnc : Counts the number of cycles when the distress signals are asserted based on SBO Ingress threshold",
"UMask": "0x2",
@@ -4952,6 +6087,7 @@
},
{
"BriefDescription": "UPI Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_UPI_CLOCKTICKS",
"PerPkg": "1",
@@ -4960,8 +6096,10 @@
},
{
"BriefDescription": "Direct packet attempts : D2C",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2C",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Direct packet attempts : D2C : Counts the number of DRS packets that we attempted to do direct2core/direct2UPI on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.",
"UMask": "0x1",
@@ -4969,8 +6107,10 @@
},
{
"BriefDescription": "Direct packet attempts : D2K",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2K",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Direct packet attempts : D2K : Counts the number of DRS packets that we attempted to do direct2core/direct2UPI on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.",
"UMask": "0x2",
@@ -4978,70 +6118,87 @@
},
{
"BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ1",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ2",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ1",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ2",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ3",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.BL_VNA_EQ0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.BL_VNA_EQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "Cycles in L1",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_UPI_L1_POWER_CYCLES",
"PerPkg": "1",
@@ -5050,246 +6207,308 @@
},
{
"BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.BGF_CRD",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_UPI_M3_BYP_BLOCKED.BGF_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.GV_BLOCK",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_UPI_M3_BYP_BLOCKED.GV_BLOCK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_CRD_RETURN_BLOCKED",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_UPI_M3_CRD_RETURN_BLOCKED",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.BGF_CRD",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_UPI_M3_RXQ_BLOCKED.BGF_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_BTW_2_THRESH",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_BTW_2_THRESH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_BTW_0_THRESH",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_BTW_0_THRESH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.GV_BLOCK",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_UPI_M3_RXQ_BLOCKED.GV_BLOCK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "UPI"
},
{
"BriefDescription": "Cycles where phy is not in L0, L0c, L0p, L1",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_UPI_PHY_INIT_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UPI"
},
{
"BriefDescription": "L1 Req Nack",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_UPI_POWER_L1_NACK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "L1 Req Nack : Counts the number of times a link sends/receives a LinkReqNAck. When the UPI links would like to change power state, the Tx side initiates a request to the Rx side requesting to change states. This requests can either be accepted or denied. If the Rx side replies with an Ack, the power mode will change. If it replies with NAck, no change will take place. This can be filtered based on Rx and Tx. An Rx LinkReqNAck refers to receiving an NAck (meaning this agent's Tx originally requested the power change). A Tx LinkReqNAck refers to sending this command (meaning the peer agent's Tx originally requested the power change and this agent accepted it).",
"Unit": "UPI"
},
{
"BriefDescription": "L1 Req (same as L1 Ack).",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_UPI_POWER_L1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "L1 Req (same as L1 Ack). : Counts the number of times a link sends/receives a LinkReqAck. When the UPI links would like to change power state, the Tx side initiates a request to the Rx side requesting to change states. This requests can either be accepted or denied. If the Rx side replies with an Ack, the power mode will change. If it replies with NAck, no change will take place. This can be filtered based on Rx and Tx. An Rx LinkReqAck refers to receiving an Ack (meaning this agent's Tx originally requested the power change). A Tx LinkReqAck refers to sending this command (meaning the peer agent's Tx originally requested the power change and this agent accepted it).",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.ACK",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.ACK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.VN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.VNA",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.VNA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UPI"
},
{
"BriefDescription": "Cycles in L0p",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_UPI_RxL0P_POWER_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles in L0p : Number of UPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 1/2 of the UPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize UPI for snoops and their responses. Use edge detect to count the number of instances when the UPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.",
"Unit": "UPI"
},
{
"BriefDescription": "Cycles in L0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_UPI_RxL0_POWER_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles in L0 : Number of UPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_ANY_FLITS.DATA",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_UPI_RxL_ANY_FLITS.DATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_ANY_FLITS.LLCRD",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_UPI_RxL_ANY_FLITS.LLCRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_ANY_FLITS.LLCTRL",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_UPI_RxL_ANY_FLITS.LLCTRL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_ANY_FLITS.NULL",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_UPI_RxL_ANY_FLITS.NULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_ANY_FLITS.PROTHDR",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_UPI_RxL_ANY_FLITS.PROTHDR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_ANY_FLITS.SLOT0",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_UPI_RxL_ANY_FLITS.SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_ANY_FLITS.SLOT1",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_UPI_RxL_ANY_FLITS.SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_ANY_FLITS.SLOT2",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_UPI_RxL_ANY_FLITS.SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0xe",
@@ -5297,8 +6516,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass, Match Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass, Match Opcode : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0x10e",
@@ -5306,8 +6527,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0xf",
@@ -5315,8 +6538,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard, Match Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard, Match Opcode : Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0x10f",
@@ -5324,8 +6549,10 @@
},
{
"BriefDescription": "RxQ Flit Buffer Bypassed : Slot 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_UPI_RxL_BYPASSED.SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RxQ Flit Buffer Bypassed : Slot 0 : Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of flits transferred, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
"UMask": "0x1",
@@ -5333,8 +6560,10 @@
},
{
"BriefDescription": "RxQ Flit Buffer Bypassed : Slot 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_UPI_RxL_BYPASSED.SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RxQ Flit Buffer Bypassed : Slot 1 : Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of flits transferred, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
"UMask": "0x2",
@@ -5342,8 +6571,10 @@
},
{
"BriefDescription": "RxQ Flit Buffer Bypassed : Slot 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_UPI_RxL_BYPASSED.SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RxQ Flit Buffer Bypassed : Slot 2 : Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of flits transferred, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
"UMask": "0x4",
@@ -5351,40 +6582,50 @@
},
{
"BriefDescription": "CRC Errors Detected",
+ "Counter": "0,1,2,3",
"EventCode": "0x0b",
"EventName": "UNC_UPI_RxL_CRC_ERRORS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CRC Errors Detected : Number of CRC errors detected in the UPI Agent. Each UPI flit incorporates 8 bits of CRC for error detection. This counts the number of flits where the CRC was able to detect an error. After an error has been detected, the UPI agent will send a request to the transmitting socket to resend the flit (as well as any flits that came after it).",
"Unit": "UPI"
},
{
"BriefDescription": "LLR Requests Sent",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "UNC_UPI_RxL_CRC_LLR_REQ_TRANSMIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "LLR Requests Sent : Number of LLR Requests were transmitted. This should generally be <= the number of CRC errors detected. If multiple errors are detected before the Rx side receives a LLC_REQ_ACK from the Tx side, there is no need to send more LLR_REQ_NACKs..",
"Unit": "UPI"
},
{
"BriefDescription": "VN0 Credit Consumed",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_UPI_RxL_CREDITS_CONSUMED_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 Credit Consumed : Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
"Unit": "UPI"
},
{
"BriefDescription": "VN1 Credit Consumed",
+ "Counter": "0,1,2,3",
"EventCode": "0x3a",
"EventName": "UNC_UPI_RxL_CREDITS_CONSUMED_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 Credit Consumed : Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
"Unit": "UPI"
},
{
"BriefDescription": "VNA Credit Consumed",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_UPI_RxL_CREDITS_CONSUMED_VNA",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -5393,6 +6634,7 @@
},
{
"BriefDescription": "Valid Flits Received : All Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.ALL_DATA",
"PerPkg": "1",
@@ -5402,6 +6644,7 @@
},
{
"BriefDescription": "Null FLITs received from any slot",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.ALL_NULL",
"PerPkg": "1",
@@ -5410,8 +6653,10 @@
},
{
"BriefDescription": "Valid Flits Received : Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.DATA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Received : Data : Shows legal flit time (hides impact of L0p and L0c). : Count Data Flits (which consume all slots), but how much to count is based on Slot0-2 mask, so count can be 0-3 depending on which slots are enabled for counting..",
"UMask": "0x8",
@@ -5419,8 +6664,10 @@
},
{
"BriefDescription": "Valid Flits Received : Idle",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.IDLE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Received : Idle : Shows legal flit time (hides impact of L0p and L0c).",
"UMask": "0x47",
@@ -5428,8 +6675,10 @@
},
{
"BriefDescription": "Valid Flits Received : LLCRD Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.LLCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Received : LLCRD Not Empty : Shows legal flit time (hides impact of L0p and L0c). : Enables counting of LLCRD (with non-zero payload). This only applies to slot 2 since LLCRD is only allowed in slot 2",
"UMask": "0x10",
@@ -5437,8 +6686,10 @@
},
{
"BriefDescription": "Valid Flits Received : LLCTRL",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.LLCTRL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Received : LLCTRL : Shows legal flit time (hides impact of L0p and L0c). : Equivalent to an idle packet. Enables counting of slot 0 LLCTRL messages.",
"UMask": "0x40",
@@ -5446,6 +6697,7 @@
},
{
"BriefDescription": "Valid Flits Received : All Non Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.NON_DATA",
"PerPkg": "1",
@@ -5455,8 +6707,10 @@
},
{
"BriefDescription": "Valid Flits Received : Slot NULL or LLCRD Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.NULL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Received : Slot NULL or LLCRD Empty : Shows legal flit time (hides impact of L0p and L0c). : LLCRD with all zeros is treated as NULL. Slot 1 is not treated as NULL if slot 0 is a dual slot. This can apply to slot 0,1, or 2.",
"UMask": "0x20",
@@ -5464,8 +6718,10 @@
},
{
"BriefDescription": "Valid Flits Received : Protocol Header",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.PROTHDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Received : Protocol Header : Shows legal flit time (hides impact of L0p and L0c). : Enables count of protocol headers in slot 0,1,2 (depending on slot uMask bits)",
"UMask": "0x80",
@@ -5473,8 +6729,10 @@
},
{
"BriefDescription": "Valid Flits Received : Slot 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Received : Slot 0 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 0 - Other mask bits determine types of headers to count.",
"UMask": "0x1",
@@ -5482,8 +6740,10 @@
},
{
"BriefDescription": "Valid Flits Received : Slot 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Received : Slot 1 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 1 - Other mask bits determine types of headers to count.",
"UMask": "0x2",
@@ -5491,8 +6751,10 @@
},
{
"BriefDescription": "Valid Flits Received : Slot 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Received : Slot 2 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 2 - Other mask bits determine types of headers to count.",
"UMask": "0x4",
@@ -5500,8 +6762,10 @@
},
{
"BriefDescription": "RxQ Flit Buffer Allocations : Slot 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_UPI_RxL_INSERTS.SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RxQ Flit Buffer Allocations : Slot 0 : Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
"UMask": "0x1",
@@ -5509,8 +6773,10 @@
},
{
"BriefDescription": "RxQ Flit Buffer Allocations : Slot 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_UPI_RxL_INSERTS.SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RxQ Flit Buffer Allocations : Slot 1 : Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
"UMask": "0x2",
@@ -5518,8 +6784,10 @@
},
{
"BriefDescription": "RxQ Flit Buffer Allocations : Slot 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_UPI_RxL_INSERTS.SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RxQ Flit Buffer Allocations : Slot 2 : Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
"UMask": "0x4",
@@ -5527,8 +6795,10 @@
},
{
"BriefDescription": "RxQ Occupancy - All Packets : Slot 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RxQ Occupancy - All Packets : Slot 0 : Accumulates the number of elements in the UPI RxQ in each cycle. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.",
"UMask": "0x1",
@@ -5536,8 +6806,10 @@
},
{
"BriefDescription": "RxQ Occupancy - All Packets : Slot 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RxQ Occupancy - All Packets : Slot 1 : Accumulates the number of elements in the UPI RxQ in each cycle. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.",
"UMask": "0x2",
@@ -5545,8 +6817,10 @@
},
{
"BriefDescription": "RxQ Occupancy - All Packets : Slot 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RxQ Occupancy - All Packets : Slot 2 : Accumulates the number of elements in the UPI RxQ in each cycle. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.",
"UMask": "0x4",
@@ -5554,214 +6828,268 @@
},
{
"BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ1",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ2",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ0",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ2",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ0",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ1",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.CFG_CTL",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.CFG_CTL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.DFX",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.DFX",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RETRY",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RETRY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_BYPASS",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_BYPASS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_CRED",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_CRED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.SPARE",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.SPARE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.TXQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.TXQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "UPI"
},
{
"BriefDescription": "Cycles in L0p",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_UPI_TxL0P_POWER_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles in L0p : Number of UPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 1/2 of the UPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize UPI for snoops and their responses. Use edge detect to count the number of instances when the UPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_POWER_CYCLES_LL_ENTER",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_UPI_TxL0P_POWER_CYCLES_LL_ENTER",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_POWER_CYCLES_M3_EXIT",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_UPI_TxL0P_POWER_CYCLES_M3_EXIT",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UPI"
},
{
"BriefDescription": "Cycles in L0",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_UPI_TxL0_POWER_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles in L0 : Number of UPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL_ANY_FLITS.DATA",
+ "Counter": "0,1,2,3",
"EventCode": "0x4A",
"EventName": "UNC_UPI_TxL_ANY_FLITS.DATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL_ANY_FLITS.LLCRD",
+ "Counter": "0,1,2,3",
"EventCode": "0x4A",
"EventName": "UNC_UPI_TxL_ANY_FLITS.LLCRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL_ANY_FLITS.LLCTRL",
+ "Counter": "0,1,2,3",
"EventCode": "0x4A",
"EventName": "UNC_UPI_TxL_ANY_FLITS.LLCTRL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL_ANY_FLITS.NULL",
+ "Counter": "0,1,2,3",
"EventCode": "0x4A",
"EventName": "UNC_UPI_TxL_ANY_FLITS.NULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL_ANY_FLITS.PROTHDR",
+ "Counter": "0,1,2,3",
"EventCode": "0x4A",
"EventName": "UNC_UPI_TxL_ANY_FLITS.PROTHDR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL_ANY_FLITS.SLOT0",
+ "Counter": "0,1,2,3",
"EventCode": "0x4A",
"EventName": "UNC_UPI_TxL_ANY_FLITS.SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL_ANY_FLITS.SLOT1",
+ "Counter": "0,1,2,3",
"EventCode": "0x4A",
"EventName": "UNC_UPI_TxL_ANY_FLITS.SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL_ANY_FLITS.SLOT2",
+ "Counter": "0,1,2,3",
"EventCode": "0x4A",
"EventName": "UNC_UPI_TxL_ANY_FLITS.SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0xe",
@@ -5769,8 +7097,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass, Match Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass, Match Opcode : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0x10e",
@@ -5778,8 +7108,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0xf",
@@ -5787,8 +7119,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard, Match Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard, Match Opcode : Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.",
"UMask": "0x10f",
@@ -5796,14 +7130,17 @@
},
{
"BriefDescription": "Tx Flit Buffer Bypassed",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_UPI_TxL_BYPASSED",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Tx Flit Buffer Bypassed : Counts the number of times that an incoming flit was able to bypass the Tx flit buffer and pass directly out the UPI Link. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link.",
"Unit": "UPI"
},
{
"BriefDescription": "Valid Flits Sent : All Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.ALL_DATA",
"PerPkg": "1",
@@ -5813,8 +7150,10 @@
},
{
"BriefDescription": "Valid Flits Sent : All LLCRD Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.ALL_LLCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Sent : All Data : Shows legal flit time (hides impact of L0p and L0c).",
"UMask": "0x17",
@@ -5822,8 +7161,10 @@
},
{
"BriefDescription": "Valid Flits Sent : All LLCTRL",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.ALL_LLCTRL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Sent : All LLCTRL : Shows legal flit time (hides impact of L0p and L0c).",
"UMask": "0x47",
@@ -5831,6 +7172,7 @@
},
{
"BriefDescription": "All Null Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.ALL_NULL",
"PerPkg": "1",
@@ -5839,8 +7181,10 @@
},
{
"BriefDescription": "Valid Flits Sent : All Protocol Header",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.ALL_PROTHDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Sent : All ProtDDR : Shows legal flit time (hides impact of L0p and L0c).",
"UMask": "0x87",
@@ -5848,8 +7192,10 @@
},
{
"BriefDescription": "Valid Flits Sent : Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.DATA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Sent : Data : Shows legal flit time (hides impact of L0p and L0c). : Count Data Flits (which consume all slots), but how much to count is based on Slot0-2 mask, so count can be 0-3 depending on which slots are enabled for counting..",
"UMask": "0x8",
@@ -5857,8 +7203,10 @@
},
{
"BriefDescription": "Valid Flits Sent : Idle",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.IDLE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Sent : Idle : Shows legal flit time (hides impact of L0p and L0c).",
"UMask": "0x47",
@@ -5866,8 +7214,10 @@
},
{
"BriefDescription": "Valid Flits Sent : LLCRD Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.LLCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Sent : LLCRD Not Empty : Shows legal flit time (hides impact of L0p and L0c). : Enables counting of LLCRD (with non-zero payload). This only applies to slot 2 since LLCRD is only allowed in slot 2",
"UMask": "0x10",
@@ -5875,8 +7225,10 @@
},
{
"BriefDescription": "Valid Flits Sent : LLCTRL",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.LLCTRL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Sent : LLCTRL : Shows legal flit time (hides impact of L0p and L0c). : Equivalent to an idle packet. Enables counting of slot 0 LLCTRL messages.",
"UMask": "0x40",
@@ -5884,6 +7236,7 @@
},
{
"BriefDescription": "Valid Flits Sent : All Non Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.NON_DATA",
"PerPkg": "1",
@@ -5893,8 +7246,10 @@
},
{
"BriefDescription": "Valid Flits Sent : Slot NULL or LLCRD Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.NULL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Sent : Slot NULL or LLCRD Empty : Shows legal flit time (hides impact of L0p and L0c). : LLCRD with all zeros is treated as NULL. Slot 1 is not treated as NULL if slot 0 is a dual slot. This can apply to slot 0,1, or 2.",
"UMask": "0x20",
@@ -5902,8 +7257,10 @@
},
{
"BriefDescription": "Valid Flits Sent : Protocol Header",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.PROTHDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Sent : Protocol Header : Shows legal flit time (hides impact of L0p and L0c). : Enables count of protocol headers in slot 0,1,2 (depending on slot uMask bits)",
"UMask": "0x80",
@@ -5911,8 +7268,10 @@
},
{
"BriefDescription": "Valid Flits Sent : Slot 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Sent : Slot 0 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 0 - Other mask bits determine types of headers to count.",
"UMask": "0x1",
@@ -5920,8 +7279,10 @@
},
{
"BriefDescription": "Valid Flits Sent : Slot 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Sent : Slot 1 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 1 - Other mask bits determine types of headers to count.",
"UMask": "0x2",
@@ -5929,8 +7290,10 @@
},
{
"BriefDescription": "Valid Flits Sent : Slot 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Sent : Slot 2 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 2 - Other mask bits determine types of headers to count.",
"UMask": "0x4",
@@ -5938,47 +7301,59 @@
},
{
"BriefDescription": "Tx Flit Buffer Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_UPI_TxL_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Tx Flit Buffer Allocations : Number of allocations into the UPI Tx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
"Unit": "UPI"
},
{
"BriefDescription": "Tx Flit Buffer Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_UPI_TxL_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Tx Flit Buffer Occupancy : Accumulates the number of flits in the TxQ. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This can be used with the cycles not empty event to track average occupancy, or the allocations event to track average lifetime in the TxQ.",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_VNA_CREDIT_RETURN_BLOCKED_VN01",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_UPI_VNA_CREDIT_RETURN_BLOCKED_VN01",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UPI"
},
{
"BriefDescription": "VNA Credits Pending Return - Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_UPI_VNA_CREDIT_RETURN_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VNA Credits Pending Return - Occupancy : Number of VNA credits in the Rx side that are waitng to be returned back across the link.",
"Unit": "UPI"
},
{
"BriefDescription": "Message Received : Doorbell",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.DOORBELL_RCVD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UBOX"
},
{
"BriefDescription": "Message Received : Interrupt",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.INT_PRIO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Message Received : Interrupt : Interrupts",
"UMask": "0x10",
@@ -5986,8 +7361,10 @@
},
{
"BriefDescription": "Message Received : IPI",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.IPI_RCVD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Message Received : IPI : Inter Processor Interrupts",
"UMask": "0x4",
@@ -5995,8 +7372,10 @@
},
{
"BriefDescription": "Message Received : MSI",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.MSI_RCVD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Message Received : MSI : Message Signaled Interrupts - interrupts sent by devices (including PCIe via IOxAPIC) (Socket Mode only)",
"UMask": "0x2",
@@ -6004,8 +7383,10 @@
},
{
"BriefDescription": "Message Received : VLW",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.VLW_RCVD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Message Received : VLW : Virtual Logical Wire (legacy) message were received from Uncore.",
"UMask": "0x1",
@@ -6013,152 +7394,190 @@
},
{
"BriefDescription": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_CBO_NCB",
+ "Counter": "0",
"EventCode": "0x4d",
"EventName": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_CBO_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_CBO_NCS",
+ "Counter": "0",
"EventCode": "0x4d",
"EventName": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_CBO_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_UPI_NCB",
+ "Counter": "0",
"EventCode": "0x4d",
"EventName": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_UPI_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_UPI_NCS",
+ "Counter": "0",
"EventCode": "0x4d",
"EventName": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_UPI_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCB",
+ "Counter": "0",
"EventCode": "0x4d",
"EventName": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCS",
+ "Counter": "0",
"EventCode": "0x4d",
"EventName": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_UPI_NCB",
+ "Counter": "0",
"EventCode": "0x4d",
"EventName": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_UPI_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_UPI_NCS",
+ "Counter": "0",
"EventCode": "0x4d",
"EventName": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_UPI_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC2.RxC_CYCLES_EMPTY_BL",
+ "Counter": "0",
"EventCode": "0x4e",
"EventName": "UNC_U_M2U_MISC2.RxC_CYCLES_EMPTY_BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC2.RxC_CYCLES_FULL_BL",
+ "Counter": "0",
"EventCode": "0x4e",
"EventName": "UNC_U_M2U_MISC2.RxC_CYCLES_FULL_BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCB",
+ "Counter": "0",
"EventCode": "0x4e",
"EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCS",
+ "Counter": "0",
"EventCode": "0x4e",
"EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_AK",
+ "Counter": "0",
"EventCode": "0x4e",
"EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_AKC",
+ "Counter": "0",
"EventCode": "0x4e",
"EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_AKC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_BL",
+ "Counter": "0",
"EventCode": "0x4e",
"EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_FULL_BL",
+ "Counter": "0",
"EventCode": "0x4e",
"EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_FULL_BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC3.TxC_CYCLES_FULL_AK",
+ "Counter": "0",
"EventCode": "0x4f",
"EventName": "UNC_U_M2U_MISC3.TxC_CYCLES_FULL_AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC3.TxC_CYCLES_FULL_AKC",
+ "Counter": "0",
"EventCode": "0x4f",
"EventName": "UNC_U_M2U_MISC3.TxC_CYCLES_FULL_AKC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UBOX"
},
{
"BriefDescription": "Cycles PHOLD Assert to Ack : Assert to ACK",
+ "Counter": "0,1",
"EventCode": "0x45",
"EventName": "UNC_U_PHOLD_CYCLES.ASSERT_TO_ACK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles PHOLD Assert to Ack : Assert to ACK : PHOLD cycles.",
"UMask": "0x1",
@@ -6166,32 +7585,40 @@
},
{
"BriefDescription": "UNC_U_RACU_DRNG.PFTCH_BUF_EMPTY",
+ "Counter": "0",
"EventCode": "0x4c",
"EventName": "UNC_U_RACU_DRNG.PFTCH_BUF_EMPTY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_RACU_DRNG.RDRAND",
+ "Counter": "0",
"EventCode": "0x4c",
"EventName": "UNC_U_RACU_DRNG.RDRAND",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_RACU_DRNG.RDSEED",
+ "Counter": "0",
"EventCode": "0x4c",
"EventName": "UNC_U_RACU_DRNG.RDSEED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UBOX"
},
{
"BriefDescription": "RACU Request",
+ "Counter": "0,1",
"EventCode": "0x46",
"EventName": "UNC_U_RACU_REQUESTS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RACU Request : Number outstanding register requests within message channel tracker",
"Unit": "UBOX"
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-io.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-io.json
index 03596db87710..91013ced74aa 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-io.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-io.json
@@ -1,134 +1,167 @@
[
{
"BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "1",
"EventCode": "0xff",
"EventName": "UNC_IIO_BANDWIDTH_IN.PART0_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "iio_free_running"
},
{
"BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "2",
"EventCode": "0xff",
"EventName": "UNC_IIO_BANDWIDTH_IN.PART1_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x21",
"Unit": "iio_free_running"
},
{
"BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "3",
"EventCode": "0xff",
"EventName": "UNC_IIO_BANDWIDTH_IN.PART2_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x22",
"Unit": "iio_free_running"
},
{
"BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "4",
"EventCode": "0xff",
"EventName": "UNC_IIO_BANDWIDTH_IN.PART3_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x23",
"Unit": "iio_free_running"
},
{
"BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "5",
"EventCode": "0xff",
"EventName": "UNC_IIO_BANDWIDTH_IN.PART4_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x24",
"Unit": "iio_free_running"
},
{
"BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "6",
"EventCode": "0xff",
"EventName": "UNC_IIO_BANDWIDTH_IN.PART5_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x25",
"Unit": "iio_free_running"
},
{
"BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "7",
"EventCode": "0xff",
"EventName": "UNC_IIO_BANDWIDTH_IN.PART6_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x26",
"Unit": "iio_free_running"
},
{
"BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "8",
"EventCode": "0xff",
"EventName": "UNC_IIO_BANDWIDTH_IN.PART7_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x27",
"Unit": "iio_free_running"
},
{
"BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "9",
"EventCode": "0xff",
"EventName": "UNC_IIO_BANDWIDTH_OUT.PART0_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x30",
"Unit": "iio_free_running"
},
{
"BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "10",
"EventCode": "0xff",
"EventName": "UNC_IIO_BANDWIDTH_OUT.PART1_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x31",
"Unit": "iio_free_running"
},
{
"BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "11",
"EventCode": "0xff",
"EventName": "UNC_IIO_BANDWIDTH_OUT.PART2_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x32",
"Unit": "iio_free_running"
},
{
"BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "12",
"EventCode": "0xff",
"EventName": "UNC_IIO_BANDWIDTH_OUT.PART3_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x33",
"Unit": "iio_free_running"
},
{
"BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "13",
"EventCode": "0xff",
"EventName": "UNC_IIO_BANDWIDTH_OUT.PART4_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x34",
"Unit": "iio_free_running"
},
{
"BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "14",
"EventCode": "0xff",
"EventName": "UNC_IIO_BANDWIDTH_OUT.PART5_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x35",
"Unit": "iio_free_running"
},
{
"BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "15",
"EventCode": "0xff",
"EventName": "UNC_IIO_BANDWIDTH_OUT.PART6_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x36",
"Unit": "iio_free_running"
},
{
"BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "16",
"EventCode": "0xff",
"EventName": "UNC_IIO_BANDWIDTH_OUT.PART7_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x37",
"Unit": "iio_free_running"
},
{
"BriefDescription": "IIO Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_IIO_CLOCKTICKS",
"PerPkg": "1",
@@ -138,6 +171,7 @@
},
{
"BriefDescription": "Free running counter that increments for IIO clocktick",
+ "Counter": "0",
"EventCode": "0xff",
"EventName": "UNC_IIO_CLOCKTICKS_FREERUN",
"PerPkg": "1",
@@ -146,8 +180,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0-7",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.ALL_PARTS",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xff",
@@ -157,8 +193,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0001",
@@ -168,8 +206,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0002",
@@ -179,8 +219,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0004",
@@ -190,8 +232,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0008",
@@ -201,8 +245,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0010",
@@ -212,8 +258,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0020",
@@ -223,8 +271,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0040",
@@ -234,8 +284,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0080",
@@ -245,8 +297,10 @@
},
{
"BriefDescription": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL_PARTS",
+ "Counter": "2,3",
"EventCode": "0xd5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL_PARTS",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"UMask": "0xff",
@@ -254,8 +308,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Occupancy : Part 0",
+ "Counter": "2,3",
"EventCode": "0xd5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0000",
@@ -265,8 +321,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Occupancy : Part 1",
+ "Counter": "2,3",
"EventCode": "0xd5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0000",
@@ -276,8 +334,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Occupancy : Part 2",
+ "Counter": "2,3",
"EventCode": "0xd5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0000",
@@ -287,8 +347,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Occupancy : Part 3",
+ "Counter": "2,3",
"EventCode": "0xd5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0000",
@@ -298,8 +360,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Occupancy : Part 4",
+ "Counter": "2,3",
"EventCode": "0xd5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0000",
@@ -309,8 +373,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Occupancy : Part 5",
+ "Counter": "2,3",
"EventCode": "0xd5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0000",
@@ -320,8 +386,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Occupancy : Part 6",
+ "Counter": "2,3",
"EventCode": "0xd5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0000",
@@ -331,8 +399,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Occupancy : Part 7",
+ "Counter": "2,3",
"EventCode": "0xd5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0000",
@@ -342,8 +412,10 @@
},
{
"BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part0-7",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.ALL_PARTS",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00ff",
@@ -352,6 +424,7 @@
},
{
"BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part0",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART0",
"FCMask": "0x07",
@@ -363,6 +436,7 @@
},
{
"BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part1",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART1",
"FCMask": "0x07",
@@ -374,6 +448,7 @@
},
{
"BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part2",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART2",
"FCMask": "0x07",
@@ -385,6 +460,7 @@
},
{
"BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part3",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART3",
"FCMask": "0x07",
@@ -396,6 +472,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART4",
"FCMask": "0x07",
@@ -407,6 +484,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART5",
"FCMask": "0x07",
@@ -418,6 +496,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART6",
"FCMask": "0x07",
@@ -429,6 +508,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART7",
"FCMask": "0x07",
@@ -440,8 +520,10 @@
},
{
"BriefDescription": "Write request of 4 bytes made to IIO Part0-7 by the CPU",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.ALL_PARTS",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00ff",
@@ -450,8 +532,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0100",
@@ -461,8 +545,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0200",
@@ -472,6 +558,7 @@
},
{
"BriefDescription": "Write request of 4 bytes made to IIO Part0 by the CPU",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART0",
"FCMask": "0x07",
@@ -483,6 +570,7 @@
},
{
"BriefDescription": "Write request of 4 bytes made to IIO Part1 by the CPU",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART1",
"FCMask": "0x07",
@@ -494,6 +582,7 @@
},
{
"BriefDescription": "Write request of 4 bytes made to IIO Part2 by the CPU",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART2",
"FCMask": "0x07",
@@ -505,6 +594,7 @@
},
{
"BriefDescription": "Write request of 4 bytes made to IIO Part3 by the CPU",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART3",
"FCMask": "0x07",
@@ -516,6 +606,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART4",
"FCMask": "0x07",
@@ -527,6 +618,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART5",
"FCMask": "0x07",
@@ -538,6 +630,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART6",
"FCMask": "0x07",
@@ -549,6 +642,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART7",
"FCMask": "0x07",
@@ -560,8 +654,10 @@
},
{
"BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part0",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0001",
@@ -571,8 +667,10 @@
},
{
"BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part0",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0002",
@@ -582,8 +680,10 @@
},
{
"BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part0",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0004",
@@ -593,8 +693,10 @@
},
{
"BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part0",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0008",
@@ -604,8 +706,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0010",
@@ -615,8 +719,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0020",
@@ -626,8 +732,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0040",
@@ -637,8 +745,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0080",
@@ -648,8 +758,10 @@
},
{
"BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part0 by a different IIO unit",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0001",
@@ -659,8 +771,10 @@
},
{
"BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part0 by a different IIO unit",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0002",
@@ -670,8 +784,10 @@
},
{
"BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part0 by a different IIO unit",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0004",
@@ -681,8 +797,10 @@
},
{
"BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part0 by a different IIO unit",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0008",
@@ -692,8 +810,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0010",
@@ -703,8 +823,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0020",
@@ -714,8 +836,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0040",
@@ -725,8 +849,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0080",
@@ -736,8 +862,10 @@
},
{
"BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.ALL_PARTS",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xff",
@@ -747,6 +875,7 @@
},
{
"BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART0",
"FCMask": "0x07",
@@ -758,6 +887,7 @@
},
{
"BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART1",
"FCMask": "0x07",
@@ -769,6 +899,7 @@
},
{
"BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART2",
"FCMask": "0x07",
@@ -780,6 +911,7 @@
},
{
"BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART3",
"FCMask": "0x07",
@@ -791,6 +923,7 @@
},
{
"BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART4",
"FCMask": "0x07",
@@ -802,6 +935,7 @@
},
{
"BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART5",
"FCMask": "0x07",
@@ -813,6 +947,7 @@
},
{
"BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART6",
"FCMask": "0x07",
@@ -824,6 +959,7 @@
},
{
"BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART7",
"FCMask": "0x07",
@@ -835,8 +971,10 @@
},
{
"BriefDescription": "Read request for 4 bytes made by IIO Part0-7 to Memory",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.ALL_PARTS",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00ff",
@@ -845,6 +983,7 @@
},
{
"BriefDescription": "Read request for 4 bytes made by IIO Part0 to Memory",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0",
"FCMask": "0x07",
@@ -856,6 +995,7 @@
},
{
"BriefDescription": "Read request for 4 bytes made by IIO Part1 to Memory",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1",
"FCMask": "0x07",
@@ -867,6 +1007,7 @@
},
{
"BriefDescription": "Read request for 4 bytes made by IIO Part2 to Memory",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2",
"FCMask": "0x07",
@@ -878,6 +1019,7 @@
},
{
"BriefDescription": "Read request for 4 bytes made by IIO Part3 to Memory",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
"FCMask": "0x07",
@@ -889,6 +1031,7 @@
},
{
"BriefDescription": "Data requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART4",
"FCMask": "0x07",
@@ -900,6 +1043,7 @@
},
{
"BriefDescription": "Data requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART5",
"FCMask": "0x07",
@@ -911,6 +1055,7 @@
},
{
"BriefDescription": "Data requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART6",
"FCMask": "0x07",
@@ -922,6 +1067,7 @@
},
{
"BriefDescription": "Data requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART7",
"FCMask": "0x07",
@@ -933,8 +1079,10 @@
},
{
"BriefDescription": "Write request of 4 bytes made by IIO Part0-7 to Memory",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.ALL_PARTS",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00ff",
@@ -943,6 +1091,7 @@
},
{
"BriefDescription": "Write request of 4 bytes made by IIO Part0 to Memory",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0",
"FCMask": "0x07",
@@ -954,6 +1103,7 @@
},
{
"BriefDescription": "Write request of 4 bytes made by IIO Part1 to Memory",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1",
"FCMask": "0x07",
@@ -965,6 +1115,7 @@
},
{
"BriefDescription": "Write request of 4 bytes made by IIO Part2 to Memory",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2",
"FCMask": "0x07",
@@ -976,6 +1127,7 @@
},
{
"BriefDescription": "Write request of 4 bytes made by IIO Part3 to Memory",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
"FCMask": "0x07",
@@ -987,6 +1139,7 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART4",
"FCMask": "0x07",
@@ -998,6 +1151,7 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART5",
"FCMask": "0x07",
@@ -1009,6 +1163,7 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART6",
"FCMask": "0x07",
@@ -1020,6 +1175,7 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART7",
"FCMask": "0x07",
@@ -1031,8 +1187,10 @@
},
{
"BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0001",
@@ -1042,8 +1200,10 @@
},
{
"BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0002",
@@ -1053,8 +1213,10 @@
},
{
"BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0004",
@@ -1064,8 +1226,10 @@
},
{
"BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0008",
@@ -1075,8 +1239,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0010",
@@ -1086,8 +1252,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0020",
@@ -1097,8 +1265,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0040",
@@ -1108,8 +1278,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0080",
@@ -1119,8 +1291,10 @@
},
{
"BriefDescription": "Incoming arbitration requests : Passing data to be written",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_IIO_INBOUND_ARB_REQ.DATA",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1130,8 +1304,10 @@
},
{
"BriefDescription": "Incoming arbitration requests : Issuing final read or write of line",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_IIO_INBOUND_ARB_REQ.FINAL_RD_WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1141,8 +1317,10 @@
},
{
"BriefDescription": "Incoming arbitration requests : Processing response from IOMMU",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_IIO_INBOUND_ARB_REQ.IOMMU_HIT",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1152,8 +1330,10 @@
},
{
"BriefDescription": "Incoming arbitration requests : Issuing to IOMMU",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_IIO_INBOUND_ARB_REQ.IOMMU_REQ",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1163,8 +1343,10 @@
},
{
"BriefDescription": "Incoming arbitration requests : Request Ownership",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_IIO_INBOUND_ARB_REQ.REQ_OWN",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1174,8 +1356,10 @@
},
{
"BriefDescription": "Incoming arbitration requests : Writing line",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_IIO_INBOUND_ARB_REQ.WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1185,8 +1369,10 @@
},
{
"BriefDescription": "Incoming arbitration requests granted : Passing data to be written",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_IIO_INBOUND_ARB_WON.DATA",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1196,8 +1382,10 @@
},
{
"BriefDescription": "Incoming arbitration requests granted : Issuing final read or write of line",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_IIO_INBOUND_ARB_WON.FINAL_RD_WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1207,8 +1395,10 @@
},
{
"BriefDescription": "Incoming arbitration requests granted : Processing response from IOMMU",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_IIO_INBOUND_ARB_WON.IOMMU_HIT",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1218,8 +1408,10 @@
},
{
"BriefDescription": "Incoming arbitration requests granted : Issuing to IOMMU",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_IIO_INBOUND_ARB_WON.IOMMU_REQ",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1229,8 +1421,10 @@
},
{
"BriefDescription": "Incoming arbitration requests granted : Request Ownership",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_IIO_INBOUND_ARB_WON.REQ_OWN",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1240,8 +1434,10 @@
},
{
"BriefDescription": "Incoming arbitration requests granted : Writing line",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_IIO_INBOUND_ARB_WON.WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1251,8 +1447,10 @@
},
{
"BriefDescription": ": IOTLB Hits to a 1G Page",
+ "Counter": "0",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.1G_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": ": IOTLB Hits to a 1G Page : Counts if a transaction to a 1G page, on its first lookup, hits the IOTLB.",
@@ -1261,8 +1459,10 @@
},
{
"BriefDescription": ": IOTLB Hits to a 2M Page",
+ "Counter": "0",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.2M_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": ": IOTLB Hits to a 2M Page : Counts if a transaction to a 2M page, on its first lookup, hits the IOTLB.",
@@ -1271,8 +1471,10 @@
},
{
"BriefDescription": ": IOTLB Hits to a 4K Page",
+ "Counter": "0",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.4K_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": ": IOTLB Hits to a 4K Page : Counts if a transaction to a 4K page, on its first lookup, hits the IOTLB.",
@@ -1281,8 +1483,10 @@
},
{
"BriefDescription": ": Context cache hits",
+ "Counter": "0",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.CTXT_CACHE_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": ": Context cache hits : Counts each time a first look up of the transaction hits the RCC.",
@@ -1291,8 +1495,10 @@
},
{
"BriefDescription": ": Context cache lookups",
+ "Counter": "0",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.CTXT_CACHE_LOOKUPS",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": ": Context cache lookups : Counts each time a transaction looks up root context cache.",
@@ -1301,8 +1507,10 @@
},
{
"BriefDescription": ": IOTLB lookups first",
+ "Counter": "0",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.FIRST_LOOKUPS",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": ": IOTLB lookups first : Some transactions have to look up IOTLB multiple times. Counts the first time a request looks up IOTLB.",
@@ -1311,8 +1519,10 @@
},
{
"BriefDescription": "IOTLB Fills (same as IOTLB miss)",
+ "Counter": "0",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.MISSES",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": "IOTLB Fills (same as IOTLB miss) : When a transaction misses IOTLB, it does a page walk to look up memory and bring in the relevant page translation. Counts when this page translation is written to IOTLB.",
@@ -1321,8 +1531,10 @@
},
{
"BriefDescription": ": IOMMU memory access",
+ "Counter": "0",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.NUM_MEM_ACCESSES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": IOMMU memory access : IOMMU sends out memory fetches when it misses the cache look up which is indicated by this signal. M2IOSF only uses low priority channel",
"UMask": "0xc0",
@@ -1330,8 +1542,10 @@
},
{
"BriefDescription": ": PWC Hit to a 2M page",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.PWC_1G_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": PWC Hit to a 2M page : Counts each time a transaction's first look up hits the SLPWC at the 2M level",
"UMask": "0x4",
@@ -1339,8 +1553,10 @@
},
{
"BriefDescription": ": PWT Hit to a 256T page",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.PWC_256T_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": PWT Hit to a 256T page : Counts each time a transaction's first look up hits the SLPWC at the 512G level",
"UMask": "0x10",
@@ -1348,8 +1564,10 @@
},
{
"BriefDescription": ": PWC Hit to a 4K page",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.PWC_2M_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": PWC Hit to a 4K page : Counts each time a transaction's first look up hits the SLPWC at the 4K level",
"UMask": "0x2",
@@ -1357,8 +1575,10 @@
},
{
"BriefDescription": ": PWC Hit to a 1G page",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.PWC_512G_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": PWC Hit to a 1G page : Counts each time a transaction's first look up hits the SLPWC at the 1G level",
"UMask": "0x8",
@@ -1366,8 +1586,10 @@
},
{
"BriefDescription": ": PageWalk cache fill",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.PWC_CACHE_FILLS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": PageWalk cache fill : When a transaction misses SLPWC, it does a page walk to look up memory and bring in the relevant page translation. When this page translation is written to SLPWC, ObsPwcFillValid_nnnH is asserted.",
"UMask": "0x20",
@@ -1375,8 +1597,10 @@
},
{
"BriefDescription": ": PageWalk cache lookup",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.PWT_CACHE_LOOKUPS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": PageWalk cache lookup : Counts each time a transaction looks up second level page walk cache.",
"UMask": "0x1",
@@ -1384,8 +1608,10 @@
},
{
"BriefDescription": ": PWC Hit to a 2M page",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.SLPWC_1G_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": PWC Hit to a 2M page : Counts each time a transaction's first look up hits the SLPWC at the 2M level",
"UMask": "0x4",
@@ -1393,8 +1619,10 @@
},
{
"BriefDescription": ": PWC Hit to a 2M page",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.SLPWC_256T_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": PWC Hit to a 2M page : Counts each time a transaction's first look up hits the SLPWC at the 2M level",
"UMask": "0x10",
@@ -1402,8 +1630,10 @@
},
{
"BriefDescription": ": PWC Hit to a 1G page",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.SLPWC_512G_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": PWC Hit to a 1G page : Counts each time a transaction's first look up hits the SLPWC at the 1G level",
"UMask": "0x8",
@@ -1411,8 +1641,10 @@
},
{
"BriefDescription": ": Global IOTLB invalidation cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_IIO_IOMMU3.PWT_OCCUPANCY_MSB",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": ": Global IOTLB invalidation cycles : Indicates that IOMMU is doing global invalidation.",
@@ -1421,8 +1653,10 @@
},
{
"BriefDescription": "AND Mask/match for debug bus : Non-PCIE bus",
+ "Counter": "0,1",
"EventCode": "0x02",
"EventName": "UNC_IIO_MASK_MATCH_AND.BUS0",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": "AND Mask/match for debug bus : Non-PCIE bus : Asserted if all bits specified by mask match",
@@ -1431,8 +1665,10 @@
},
{
"BriefDescription": "AND Mask/match for debug bus : Non-PCIE bus and PCIE bus",
+ "Counter": "0,1",
"EventCode": "0x02",
"EventName": "UNC_IIO_MASK_MATCH_AND.BUS0_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": "AND Mask/match for debug bus : Non-PCIE bus and PCIE bus : Asserted if all bits specified by mask match",
@@ -1441,8 +1677,10 @@
},
{
"BriefDescription": "AND Mask/match for debug bus : Non-PCIE bus and !(PCIE bus)",
+ "Counter": "0,1",
"EventCode": "0x02",
"EventName": "UNC_IIO_MASK_MATCH_AND.BUS0_NOT_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": "AND Mask/match for debug bus : Non-PCIE bus and !(PCIE bus) : Asserted if all bits specified by mask match",
@@ -1451,8 +1689,10 @@
},
{
"BriefDescription": "AND Mask/match for debug bus : PCIE bus",
+ "Counter": "0,1",
"EventCode": "0x02",
"EventName": "UNC_IIO_MASK_MATCH_AND.BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": "AND Mask/match for debug bus : PCIE bus : Asserted if all bits specified by mask match",
@@ -1461,8 +1701,10 @@
},
{
"BriefDescription": "AND Mask/match for debug bus : !(Non-PCIE bus) and PCIE bus",
+ "Counter": "0,1",
"EventCode": "0x02",
"EventName": "UNC_IIO_MASK_MATCH_AND.NOT_BUS0_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": "AND Mask/match for debug bus : !(Non-PCIE bus) and PCIE bus : Asserted if all bits specified by mask match",
@@ -1471,8 +1713,10 @@
},
{
"BriefDescription": "AND Mask/match for debug bus : !(Non-PCIE bus) and !(PCIE bus)",
+ "Counter": "0,1",
"EventCode": "0x02",
"EventName": "UNC_IIO_MASK_MATCH_AND.NOT_BUS0_NOT_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": "AND Mask/match for debug bus : !(Non-PCIE bus) and !(PCIE bus) : Asserted if all bits specified by mask match",
@@ -1481,8 +1725,10 @@
},
{
"BriefDescription": "OR Mask/match for debug bus : Non-PCIE bus",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "UNC_IIO_MASK_MATCH_OR.BUS0",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": "OR Mask/match for debug bus : Non-PCIE bus : Asserted if any bits specified by mask match",
@@ -1491,8 +1737,10 @@
},
{
"BriefDescription": "OR Mask/match for debug bus : Non-PCIE bus and PCIE bus",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "UNC_IIO_MASK_MATCH_OR.BUS0_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": "OR Mask/match for debug bus : Non-PCIE bus and PCIE bus : Asserted if any bits specified by mask match",
@@ -1501,8 +1749,10 @@
},
{
"BriefDescription": "OR Mask/match for debug bus : Non-PCIE bus and !(PCIE bus)",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "UNC_IIO_MASK_MATCH_OR.BUS0_NOT_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": "OR Mask/match for debug bus : Non-PCIE bus and !(PCIE bus) : Asserted if any bits specified by mask match",
@@ -1511,8 +1761,10 @@
},
{
"BriefDescription": "OR Mask/match for debug bus : PCIE bus",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "UNC_IIO_MASK_MATCH_OR.BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": "OR Mask/match for debug bus : PCIE bus : Asserted if any bits specified by mask match",
@@ -1521,8 +1773,10 @@
},
{
"BriefDescription": "OR Mask/match for debug bus : !(Non-PCIE bus) and PCIE bus",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "UNC_IIO_MASK_MATCH_OR.NOT_BUS0_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": "OR Mask/match for debug bus : !(Non-PCIE bus) and PCIE bus : Asserted if any bits specified by mask match",
@@ -1531,8 +1785,10 @@
},
{
"BriefDescription": "OR Mask/match for debug bus : !(Non-PCIE bus) and !(PCIE bus)",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "UNC_IIO_MASK_MATCH_OR.NOT_BUS0_NOT_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": "OR Mask/match for debug bus : !(Non-PCIE bus) and !(PCIE bus) : Asserted if any bits specified by mask match",
@@ -1541,6 +1797,7 @@
},
{
"BriefDescription": "Number requests PCIe makes of the main die : All",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU.COMMIT.ALL",
"FCMask": "0x07",
@@ -1552,8 +1809,10 @@
},
{
"BriefDescription": "Num requests sent by PCIe - by target : Abort",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.ABORT",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1562,8 +1821,10 @@
},
{
"BriefDescription": "Num requests sent by PCIe - by target : Confined P2P",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.CONFINED_P2P",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1572,8 +1833,10 @@
},
{
"BriefDescription": "Num requests sent by PCIe - by target : Local P2P",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.LOC_P2P",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1582,8 +1845,10 @@
},
{
"BriefDescription": "Num requests sent by PCIe - by target : Multi-cast",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MCAST",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1592,8 +1857,10 @@
},
{
"BriefDescription": "Num requests sent by PCIe - by target : Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MEM",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1602,8 +1869,10 @@
},
{
"BriefDescription": "Num requests sent by PCIe - by target : MsgB",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MSGB",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1612,8 +1881,10 @@
},
{
"BriefDescription": "Num requests sent by PCIe - by target : Remote P2P",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.REM_P2P",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1622,8 +1893,10 @@
},
{
"BriefDescription": "Num requests sent by PCIe - by target : Ubox",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.UBOX",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1632,8 +1905,10 @@
},
{
"BriefDescription": "ITC address map 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8f",
"EventName": "UNC_IIO_NUM_TGT_MATCHED_REQ_OF_CPU",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": "UNC_IIO_NUM_TGT_MATCHED_REQ_OF_CPU",
@@ -1641,8 +1916,10 @@
},
{
"BriefDescription": "Outbound cacheline requests issued : 64B requests issued to device",
+ "Counter": "0,1,2,3",
"EventCode": "0xd0",
"EventName": "UNC_IIO_OUTBOUND_CL_REQS_ISSUED.TO_IO",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1652,8 +1929,10 @@
},
{
"BriefDescription": "Outbound TLP (transaction layer packet) requests issued : To device",
+ "Counter": "0,1,2,3",
"EventCode": "0xd1",
"EventName": "UNC_IIO_OUTBOUND_TLP_REQS_ISSUED.TO_IO",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1663,8 +1942,10 @@
},
{
"BriefDescription": "PWT occupancy. Does not include 9th bit of occupancy (will undercount if PWT is greater than 255 per cycle).",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_IIO_PWT_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x0000",
"PublicDescription": "PWT occupancy : Indicates how many page walks are outstanding at any point in time.",
@@ -1673,8 +1954,10 @@
},
{
"BriefDescription": "Request Ownership : PCIe Request complete",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_IIO_REQ_FROM_PCIE_CL_CMPL.DATA",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1684,8 +1967,10 @@
},
{
"BriefDescription": "Request Ownership : Writing line",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_IIO_REQ_FROM_PCIE_CL_CMPL.FINAL_RD_WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1695,8 +1980,10 @@
},
{
"BriefDescription": "Request Ownership : Issuing final read or write of line",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_IIO_REQ_FROM_PCIE_CL_CMPL.REQ_OWN",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1706,8 +1993,10 @@
},
{
"BriefDescription": "Request Ownership : Passing data to be written",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_IIO_REQ_FROM_PCIE_CL_CMPL.WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1717,8 +2006,10 @@
},
{
"BriefDescription": "Processing response from IOMMU : Passing data to be written",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.FINAL_RD_WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1728,8 +2019,10 @@
},
{
"BriefDescription": "Processing response from IOMMU : Issuing final read or write of line",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.IOMMU_HIT",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1738,8 +2031,10 @@
},
{
"BriefDescription": "Processing response from IOMMU : Request Ownership",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.IOMMU_REQ",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1749,8 +2044,10 @@
},
{
"BriefDescription": "Processing response from IOMMU : Writing line",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.REQ_OWN",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1760,8 +2057,10 @@
},
{
"BriefDescription": "PCIe Request - pass complete : Passing data to be written",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_IIO_REQ_FROM_PCIE_PASS_CMPL.DATA",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1771,8 +2070,10 @@
},
{
"BriefDescription": "PCIe Request - pass complete : Issuing final read or write of line",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_IIO_REQ_FROM_PCIE_PASS_CMPL.FINAL_RD_WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1782,8 +2083,10 @@
},
{
"BriefDescription": "PCIe Request - pass complete : Request Ownership",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_IIO_REQ_FROM_PCIE_PASS_CMPL.REQ_OWN",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1793,8 +2096,10 @@
},
{
"BriefDescription": "PCIe Request - pass complete : Writing line",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_IIO_REQ_FROM_PCIE_PASS_CMPL.WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x00FF",
@@ -1804,6 +2109,7 @@
},
{
"BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part0",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART0",
"FCMask": "0x07",
@@ -1815,6 +2121,7 @@
},
{
"BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part1",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART1",
"FCMask": "0x07",
@@ -1826,6 +2133,7 @@
},
{
"BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part2",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART2",
"FCMask": "0x07",
@@ -1837,6 +2145,7 @@
},
{
"BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part3",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART3",
"FCMask": "0x07",
@@ -1848,6 +2157,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART4",
"FCMask": "0x07",
@@ -1859,6 +2169,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART5",
"FCMask": "0x07",
@@ -1870,6 +2181,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART6",
"FCMask": "0x07",
@@ -1881,6 +2193,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART7",
"FCMask": "0x07",
@@ -1892,6 +2205,7 @@
},
{
"BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part0 by the CPU",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART0",
"FCMask": "0x07",
@@ -1903,6 +2217,7 @@
},
{
"BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part1 by the CPU",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART1",
"FCMask": "0x07",
@@ -1914,6 +2229,7 @@
},
{
"BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part2 by the CPU",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART2",
"FCMask": "0x07",
@@ -1925,6 +2241,7 @@
},
{
"BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part3 by the CPU",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART3",
"FCMask": "0x07",
@@ -1936,6 +2253,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART4",
"FCMask": "0x07",
@@ -1947,6 +2265,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART5",
"FCMask": "0x07",
@@ -1958,6 +2277,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART6",
"FCMask": "0x07",
@@ -1969,6 +2289,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART7",
"FCMask": "0x07",
@@ -1980,8 +2301,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0001",
@@ -1991,8 +2314,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0002",
@@ -2002,8 +2327,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0004",
@@ -2013,8 +2340,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0008",
@@ -2024,8 +2353,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0010",
@@ -2035,8 +2366,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0020",
@@ -2046,8 +2379,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0040",
@@ -2057,8 +2392,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0080",
@@ -2068,6 +2405,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART0",
"FCMask": "0x07",
@@ -2079,6 +2417,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART1",
"FCMask": "0x07",
@@ -2090,6 +2429,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART2",
"FCMask": "0x07",
@@ -2101,6 +2441,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART3",
"FCMask": "0x07",
@@ -2112,6 +2453,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART4",
"FCMask": "0x07",
@@ -2123,6 +2465,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART5",
"FCMask": "0x07",
@@ -2134,6 +2477,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART6",
"FCMask": "0x07",
@@ -2145,6 +2489,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART7",
"FCMask": "0x07",
@@ -2156,6 +2501,7 @@
},
{
"BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part0 to Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART0",
"FCMask": "0x07",
@@ -2167,6 +2513,7 @@
},
{
"BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part1 to Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART1",
"FCMask": "0x07",
@@ -2178,6 +2525,7 @@
},
{
"BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part2 to Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART2",
"FCMask": "0x07",
@@ -2189,6 +2537,7 @@
},
{
"BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part3 to Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART3",
"FCMask": "0x07",
@@ -2200,6 +2549,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART4",
"FCMask": "0x07",
@@ -2211,6 +2561,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART5",
"FCMask": "0x07",
@@ -2222,6 +2573,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART6",
"FCMask": "0x07",
@@ -2233,6 +2585,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART7",
"FCMask": "0x07",
@@ -2244,6 +2597,7 @@
},
{
"BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part0 to Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART0",
"FCMask": "0x07",
@@ -2255,6 +2609,7 @@
},
{
"BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part1 to Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART1",
"FCMask": "0x07",
@@ -2266,6 +2621,7 @@
},
{
"BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part2 to Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART2",
"FCMask": "0x07",
@@ -2277,6 +2633,7 @@
},
{
"BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part3 to Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART3",
"FCMask": "0x07",
@@ -2288,6 +2645,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART4",
"FCMask": "0x07",
@@ -2299,6 +2657,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART5",
"FCMask": "0x07",
@@ -2310,6 +2669,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART6",
"FCMask": "0x07",
@@ -2321,6 +2681,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART7",
"FCMask": "0x07",
@@ -2332,8 +2693,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0001",
@@ -2343,8 +2706,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0002",
@@ -2354,8 +2719,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0004",
@@ -2365,8 +2732,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0008",
@@ -2376,8 +2745,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0010",
@@ -2387,8 +2758,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0020",
@@ -2398,8 +2771,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0040",
@@ -2409,8 +2784,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0080",
@@ -2420,6 +2797,7 @@
},
{
"BriefDescription": "M2P Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_M2P_CLOCKTICKS",
"PerPkg": "1",
@@ -2428,6 +2806,7 @@
},
{
"BriefDescription": "CMS Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0xc0",
"EventName": "UNC_M2P_CMS_CLOCKTICKS",
"PerPkg": "1",
@@ -2435,8 +2814,10 @@
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "Counter": "0,1,2,3",
"EventCode": "0xba",
"EventName": "UNC_M2P_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress Blocking due to Ordering requirements : Down : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x4",
@@ -2444,8 +2825,10 @@
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "Counter": "0,1,2,3",
"EventCode": "0xba",
"EventName": "UNC_M2P_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress Blocking due to Ordering requirements : Up : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x1",
@@ -2453,8 +2836,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credit Acquired : DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.DRS_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credit Acquired : DRS : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the DRS message class.",
"UMask": "0x1",
@@ -2462,8 +2847,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credit Acquired : DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.DRS_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credit Acquired : DRS : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the DRS message class.",
"UMask": "0x2",
@@ -2471,8 +2858,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credit Acquired : NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.NCB_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credit Acquired : NCB : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCB message class.",
"UMask": "0x4",
@@ -2480,8 +2869,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credit Acquired : NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.NCB_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credit Acquired : NCB : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCB message class.",
"UMask": "0x8",
@@ -2489,8 +2880,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credit Acquired : NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.NCS_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credit Acquired : NCS : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCS message class.",
"UMask": "0x10",
@@ -2498,8 +2891,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credit Acquired : NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.NCS_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credit Acquired : NCS : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credit for transfer through CMS Port 0s to the IIO for the NCS message class.",
"UMask": "0x20",
@@ -2507,8 +2902,10 @@
},
{
"BriefDescription": "M2PCIe IIO Failed to Acquire a Credit : DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_M2P_IIO_CREDITS_REJECT.DRS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Failed to Acquire a Credit : DRS : Counts the number of times that a request pending in the BL Ingress attempted to acquire either a NCB or NCS credit to transmit into the IIO, but was rejected because no credits were available. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits to the IIO for the DRS message class.",
"UMask": "0x8",
@@ -2516,8 +2913,10 @@
},
{
"BriefDescription": "M2PCIe IIO Failed to Acquire a Credit : NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_M2P_IIO_CREDITS_REJECT.NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Failed to Acquire a Credit : NCB : Counts the number of times that a request pending in the BL Ingress attempted to acquire either a NCB or NCS credit to transmit into the IIO, but was rejected because no credits were available. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits to the IIO for the NCB message class.",
"UMask": "0x10",
@@ -2525,8 +2924,10 @@
},
{
"BriefDescription": "M2PCIe IIO Failed to Acquire a Credit : NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_M2P_IIO_CREDITS_REJECT.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Failed to Acquire a Credit : NCS : Counts the number of times that a request pending in the BL Ingress attempted to acquire either a NCB or NCS credit to transmit into the IIO, but was rejected because no credits were available. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits to the IIO for the NCS message class.",
"UMask": "0x20",
@@ -2534,8 +2935,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credits in Use : DRS to CMS Port 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_M2P_IIO_CREDITS_USED.DRS_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credits in Use : DRS to CMS Port 0 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the DRS message class.",
"UMask": "0x1",
@@ -2543,8 +2946,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credits in Use : DRS to CMS Port 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_M2P_IIO_CREDITS_USED.DRS_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credits in Use : DRS to CMS Port 1 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the DRS message class.",
"UMask": "0x2",
@@ -2552,8 +2957,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credits in Use : NCB to CMS Port 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_M2P_IIO_CREDITS_USED.NCB_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credits in Use : NCB to CMS Port 0 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCB message class.",
"UMask": "0x4",
@@ -2561,8 +2968,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credits in Use : NCB to CMS Port 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_M2P_IIO_CREDITS_USED.NCB_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credits in Use : NCB to CMS Port 1 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCB message class.",
"UMask": "0x8",
@@ -2570,8 +2979,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credits in Use : NCS to CMS Port 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_M2P_IIO_CREDITS_USED.NCS_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credits in Use : NCS to CMS Port 0 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCS message class.",
"UMask": "0x10",
@@ -2579,8 +2990,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credits in Use : NCS to CMS Port 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_M2P_IIO_CREDITS_USED.NCS_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credits in Use : NCS to CMS Port 1 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credit for transfer through CMS Port 0s to the IIO for the NCS message class.",
"UMask": "0x20",
@@ -2588,896 +3001,1120 @@
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF0 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF0 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF1 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF1 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF2 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF2_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF2 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF2_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF3 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF3_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF3 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF3_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 1 : M2IOSF4 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF4_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 1 : M2IOSF4 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF4_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 1 : M2IOSF5 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF5_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 1 : M2IOSF5 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF5_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF0 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF0 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF1 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF1 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF2 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF2_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF2 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF2_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF3 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF3_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF3 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF3_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 1 : M2IOSF4 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x1a",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_1.MS2IOSF4_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 1 : M2IOSF4 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x1a",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_1.MS2IOSF4_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 1 : M2IOSF5 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x1a",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_1.MS2IOSF5_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 1 : M2IOSF5 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x1a",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_1.MS2IOSF5_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Shared Credits Returned : Agent0",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M2P_LOCAL_P2P_SHAR_RETURNED.AGENT_0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Shared Credits Returned : Agent1",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M2P_LOCAL_P2P_SHAR_RETURNED.AGENT_1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Shared Credits Returned : Agent2",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M2P_LOCAL_P2P_SHAR_RETURNED.AGENT_2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent0",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent1",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent2",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent3",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent4",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent5",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF0 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF0 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF1 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF1 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF2 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF2_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF2 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF2_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF3 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF3_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF3 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF3_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 1 : M2IOSF4 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF4_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 1 : M2IOSF4 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF4_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 1 : M2IOSF5 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF5_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 1 : M2IOSF5 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF5_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF0 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x4a",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF0 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4a",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF1 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x4a",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF1 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4a",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF2 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x4a",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF2_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF2 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4a",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF2_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF3 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x4a",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF3_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF3 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4a",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF3_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 1 : M2IOSF4 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x4b",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF4_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 1 : M2IOSF4 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4b",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF4_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 1 : M2IOSF5 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x4b",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF5_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 1 : M2IOSF5 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4b",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF5_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "P2P Credit Occupancy : All",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "P2P Credit Occupancy : Local NCB",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.LOCAL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "P2P Credit Occupancy : Local NCS",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.LOCAL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "P2P Credit Occupancy : Remote NCB",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.REMOTE_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "P2P Credit Occupancy : Remote NCS",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.REMOTE_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Dedicated Credits Received : All",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_M2P_P2P_DED_RECEIVED.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Dedicated Credits Received : Local NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_M2P_P2P_DED_RECEIVED.LOCAL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Dedicated Credits Received : Local NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_M2P_P2P_DED_RECEIVED.LOCAL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Dedicated Credits Received : Remote NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_M2P_P2P_DED_RECEIVED.REMOTE_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Dedicated Credits Received : Remote NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_M2P_P2P_DED_RECEIVED.REMOTE_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Shared Credits Received : All",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_M2P_P2P_SHAR_RECEIVED.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Shared Credits Received : Local NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_M2P_P2P_SHAR_RECEIVED.LOCAL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Shared Credits Received : Local NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_M2P_P2P_SHAR_RECEIVED.LOCAL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Shared Credits Received : Remote NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_M2P_P2P_SHAR_RECEIVED.REMOTE_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Shared Credits Received : Remote NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_M2P_P2P_SHAR_RECEIVED.REMOTE_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI0 - DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI0_DRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI0 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI0 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI1 - DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI1_DRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI1 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI1 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Dedicated P2P Credit Taken - 1 : UPI2 - DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_1.UPI2_DRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Dedicated P2P Credit Taken - 1 : UPI2 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_1.UPI2_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Dedicated P2P Credit Taken - 1 : UPI2 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_1.UPI2_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote P2P Dedicated Credits Returned : UPI0 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote P2P Dedicated Credits Returned : UPI0 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote P2P Dedicated Credits Returned : UPI1 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote P2P Dedicated Credits Returned : UPI1 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote P2P Dedicated Credits Returned : UPI2 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI2_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote P2P Dedicated Credits Returned : UPI2 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI2_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote P2P Shared Credits Returned : Agent0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M2P_REMOTE_P2P_SHAR_RETURNED.AGENT_0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote P2P Shared Credits Returned : Agent1",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M2P_REMOTE_P2P_SHAR_RETURNED.AGENT_1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote P2P Shared Credits Returned : Agent2",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M2P_REMOTE_P2P_SHAR_RETURNED.AGENT_2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Shared P2P Credit Returned to credit ring : Agent0",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_RETURNED.AGENT_0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Shared P2P Credit Returned to credit ring : Agent1",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_RETURNED.AGENT_1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Shared P2P Credit Returned to credit ring : Agent2",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_RETURNED.AGENT_2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI0 - DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI0_DRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI0 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI0 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI1 - DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI1_DRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI1 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI1 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Shared P2P Credit Taken - 1 : UPI2 - DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_1.UPI2_DRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Shared P2P Credit Taken - 1 : UPI2 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_1.UPI2_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Shared P2P Credit Taken - 1 : UPI2 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_1.UPI2_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI0 - DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI0_DRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI0 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI0 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI1 - DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI1_DRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI1 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI1 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Remote Shared P2P Credit - 1 : UPI2 - DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4d",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_1.UPI2_DRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Remote Shared P2P Credit - 1 : UPI2 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x4d",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_1.UPI2_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Remote Shared P2P Credit - 1 : UPI2 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4d",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_1.UPI2_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M2P_RxC_CYCLES_NE.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
"UMask": "0x80",
@@ -3485,8 +4122,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M2P_RxC_CYCLES_NE.CHA_IDI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
"UMask": "0x1",
@@ -3494,8 +4133,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M2P_RxC_CYCLES_NE.CHA_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
"UMask": "0x2",
@@ -3503,8 +4144,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M2P_RxC_CYCLES_NE.CHA_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
"UMask": "0x4",
@@ -3512,8 +4155,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M2P_RxC_CYCLES_NE.IIO_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
"UMask": "0x20",
@@ -3521,8 +4166,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M2P_RxC_CYCLES_NE.IIO_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
"UMask": "0x40",
@@ -3530,8 +4177,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M2P_RxC_CYCLES_NE.UPI_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
"UMask": "0x8",
@@ -3539,8 +4188,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M2P_RxC_CYCLES_NE.UPI_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
"UMask": "0x10",
@@ -3548,8 +4199,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2P_RxC_INSERTS.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
"UMask": "0x80",
@@ -3557,8 +4210,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2P_RxC_INSERTS.CHA_IDI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
"UMask": "0x1",
@@ -3566,8 +4221,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2P_RxC_INSERTS.CHA_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
"UMask": "0x2",
@@ -3575,8 +4232,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2P_RxC_INSERTS.CHA_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
"UMask": "0x4",
@@ -3584,8 +4243,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2P_RxC_INSERTS.IIO_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
"UMask": "0x20",
@@ -3593,8 +4254,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2P_RxC_INSERTS.IIO_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
"UMask": "0x40",
@@ -3602,8 +4265,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2P_RxC_INSERTS.UPI_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
"UMask": "0x8",
@@ -3611,8 +4276,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2P_RxC_INSERTS.UPI_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
"UMask": "0x10",
@@ -3620,24 +4287,30 @@
},
{
"BriefDescription": "UNC_M2P_TxC_CREDITS.PMM",
+ "Counter": "0,1",
"EventCode": "0x2d",
"EventName": "UNC_M2P_TxC_CREDITS.PMM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "UNC_M2P_TxC_CREDITS.PRQ",
+ "Counter": "0,1",
"EventCode": "0x2d",
"EventName": "UNC_M2P_TxC_CREDITS.PRQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Egress (to CMS) Cycles Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2P_TxC_CYCLES_FULL.PMM_BLOCK_0",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -3647,8 +4320,10 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2P_TxC_CYCLES_FULL.PMM_BLOCK_1",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -3658,8 +4333,10 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "Counter": "0,1",
"EventCode": "0x23",
"EventName": "UNC_M2P_TxC_CYCLES_NE.PMM_DISTRESS_0",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -3669,8 +4346,10 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "Counter": "0,1",
"EventCode": "0x23",
"EventName": "UNC_M2P_TxC_CYCLES_NE.PMM_DISTRESS_1",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-memory.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-memory.json
index 3ff9e9b722c8..aa06088dd26f 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Cycles - at UCLK",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_M2HBM_CLOCKTICKS",
"PerPkg": "1",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "CMS Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0xc0",
"EventName": "UNC_M2HBM_CMS_CLOCKTICKS",
"PerPkg": "1",
@@ -15,16 +17,20 @@
},
{
"BriefDescription": "Cycles when direct to core mode (which bypasses the CHA) was disabled",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M2HBM_DIRECT2CORE_NOT_TAKEN_DIRSTATE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "M2HBM"
},
{
"BriefDescription": "Cycles when direct to core mode, which bypasses the CHA, was disabled : Non Cisgress",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M2HBM_DIRECT2CORE_NOT_TAKEN_DIRSTATE.NON_CISGRESS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of time non cisgress D2C was not honoured by egress due to directory state constraints",
"UMask": "0x2",
@@ -32,47 +38,59 @@
},
{
"BriefDescription": "Counts the time when FM didn't do d2c for fill reads (cross tile case)",
+ "Counter": "0,1,2,3",
"EventCode": "0x4a",
"EventName": "UNC_M2HBM_DIRECT2CORE_NOT_TAKEN_NOTFORKED",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2HBM"
},
{
"BriefDescription": "Number of reads in which direct to core transaction were overridden",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M2HBM_DIRECT2CORE_TXN_OVERRIDE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2HBM"
},
{
"BriefDescription": "Number of reads in which direct to core transaction was overridden : Cisgress",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M2HBM_DIRECT2CORE_TXN_OVERRIDE.CISGRESS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2HBM"
},
{
"BriefDescription": "Number of reads in which direct to Intel UPI transactions were overridden",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_M2HBM_DIRECT2UPI_NOT_TAKEN_CREDITS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "M2HBM"
},
{
"BriefDescription": "Cycles when direct to Intel UPI was disabled",
+ "Counter": "0,1,2,3",
"EventCode": "0x1a",
"EventName": "UNC_M2HBM_DIRECT2UPI_NOT_TAKEN_DIRSTATE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "M2HBM"
},
{
"BriefDescription": "Cycles when Direct2UPI was Disabled : Cisgress D2U Ignored",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_M2HBM_DIRECT2UPI_NOT_TAKEN_DIRSTATE.CISGRESS",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -82,8 +100,10 @@
},
{
"BriefDescription": "Cycles when Direct2UPI was Disabled : Egress Ignored D2U",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_M2HBM_DIRECT2UPI_NOT_TAKEN_DIRSTATE.EGRESS",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -93,8 +113,10 @@
},
{
"BriefDescription": "Cycles when Direct2UPI was Disabled : Non Cisgress D2U Ignored",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_M2HBM_DIRECT2UPI_NOT_TAKEN_DIRSTATE.NON_CISGRESS",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -104,86 +126,107 @@
},
{
"BriefDescription": "Number of reads that a message sent direct2 Intel UPI was overridden",
+ "Counter": "0,1,2,3",
"EventCode": "0x1c",
"EventName": "UNC_M2HBM_DIRECT2UPI_TXN_OVERRIDE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2HBM"
},
{
"BriefDescription": "Number of times a direct to UPI transaction was overridden.",
+ "Counter": "0,1,2,3",
"EventCode": "0x1c",
"EventName": "UNC_M2HBM_DIRECT2UPI_TXN_OVERRIDE.CISGRESS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2HBM"
},
{
"BriefDescription": "Directory Hit : On NonDirty Line in A State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1d",
"EventName": "UNC_M2HBM_DIRECTORY_HIT.CLEAN_A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M2HBM"
},
{
"BriefDescription": "Directory Hit : On NonDirty Line in I State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1d",
"EventName": "UNC_M2HBM_DIRECTORY_HIT.CLEAN_I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2HBM"
},
{
"BriefDescription": "Directory Hit : On NonDirty Line in L State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1d",
"EventName": "UNC_M2HBM_DIRECTORY_HIT.CLEAN_P",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2HBM"
},
{
"BriefDescription": "Directory Hit : On NonDirty Line in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1d",
"EventName": "UNC_M2HBM_DIRECTORY_HIT.CLEAN_S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2HBM"
},
{
"BriefDescription": "Directory Hit : On Dirty Line in A State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1d",
"EventName": "UNC_M2HBM_DIRECTORY_HIT.DIRTY_A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2HBM"
},
{
"BriefDescription": "Directory Hit : On Dirty Line in I State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1d",
"EventName": "UNC_M2HBM_DIRECTORY_HIT.DIRTY_I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2HBM"
},
{
"BriefDescription": "Directory Hit : On Dirty Line in L State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1d",
"EventName": "UNC_M2HBM_DIRECTORY_HIT.DIRTY_P",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2HBM"
},
{
"BriefDescription": "Directory Hit : On Dirty Line in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1d",
"EventName": "UNC_M2HBM_DIRECTORY_HIT.DIRTY_S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2HBM"
},
{
"BriefDescription": "Multi-socket cacheline Directory lookups (any state found)",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M2HBM_DIRECTORY_LOOKUP.ANY",
"PerPkg": "1",
@@ -193,6 +236,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory lookups (cacheline found in A state)",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M2HBM_DIRECTORY_LOOKUP.STATE_A",
"PerPkg": "1",
@@ -202,6 +246,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory lookup (cacheline found in I state)",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M2HBM_DIRECTORY_LOOKUP.STATE_I",
"PerPkg": "1",
@@ -211,6 +256,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory lookup (cacheline found in S state)",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M2HBM_DIRECTORY_LOOKUP.STATE_S",
"PerPkg": "1",
@@ -220,86 +266,107 @@
},
{
"BriefDescription": "Directory Miss : On NonDirty Line in A State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_M2HBM_DIRECTORY_MISS.CLEAN_A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M2HBM"
},
{
"BriefDescription": "Directory Miss : On NonDirty Line in I State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_M2HBM_DIRECTORY_MISS.CLEAN_I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2HBM"
},
{
"BriefDescription": "Directory Miss : On NonDirty Line in L State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_M2HBM_DIRECTORY_MISS.CLEAN_P",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2HBM"
},
{
"BriefDescription": "Directory Miss : On NonDirty Line in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_M2HBM_DIRECTORY_MISS.CLEAN_S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2HBM"
},
{
"BriefDescription": "Directory Miss : On Dirty Line in A State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_M2HBM_DIRECTORY_MISS.DIRTY_A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2HBM"
},
{
"BriefDescription": "Directory Miss : On Dirty Line in I State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_M2HBM_DIRECTORY_MISS.DIRTY_I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2HBM"
},
{
"BriefDescription": "Directory Miss : On Dirty Line in L State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_M2HBM_DIRECTORY_MISS.DIRTY_P",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2HBM"
},
{
"BriefDescription": "Directory Miss : On Dirty Line in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1e",
"EventName": "UNC_M2HBM_DIRECTORY_MISS.DIRTY_S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2HBM"
},
{
"BriefDescription": "Multi-socket cacheline Directory update from A to I",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.A2I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x320",
"Unit": "M2HBM"
},
{
"BriefDescription": "Multi-socket cacheline Directory update from A to S",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.A2S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x340",
"Unit": "M2HBM"
},
{
"BriefDescription": "Multi-socket cacheline Directory update from/to Any state",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.ANY",
"PerPkg": "1",
@@ -308,8 +375,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.A_TO_I_HIT_NON_PMM",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -319,8 +388,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.A_TO_I_MISS_NON_PMM",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -330,8 +401,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.A_TO_S_HIT_NON_PMM",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -341,8 +414,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.A_TO_S_MISS_NON_PMM",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -352,8 +427,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.HIT_NON_PMM",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -363,24 +440,30 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory update from I to A",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.I2A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x304",
"Unit": "M2HBM"
},
{
"BriefDescription": "Multi-socket cacheline Directory update from I to S",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.I2S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x302",
"Unit": "M2HBM"
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.I_TO_A_HIT_NON_PMM",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -390,8 +473,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.I_TO_A_MISS_NON_PMM",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -401,8 +486,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.I_TO_S_HIT_NON_PMM",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -412,8 +499,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.I_TO_S_MISS_NON_PMM",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -423,8 +512,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.MISS_NON_PMM",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -434,24 +525,30 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory update from S to A",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.S2A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x310",
"Unit": "M2HBM"
},
{
"BriefDescription": "Multi-socket cacheline Directory update from S to I",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.S2I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x308",
"Unit": "M2HBM"
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.S_TO_A_HIT_NON_PMM",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -461,8 +558,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.S_TO_A_MISS_NON_PMM",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -472,8 +571,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.S_TO_I_HIT_NON_PMM",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -483,8 +584,10 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory Updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2HBM_DIRECTORY_UPDATE.S_TO_I_MISS_NON_PMM",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -494,64 +597,80 @@
},
{
"BriefDescription": "Count distress signalled on AkAd cmp message",
+ "Counter": "0,1,2,3",
"EventCode": "0x67",
"EventName": "UNC_M2HBM_DISTRESS.AD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2HBM"
},
{
"BriefDescription": "Count distress signalled on any packet type",
+ "Counter": "0,1,2,3",
"EventCode": "0x67",
"EventName": "UNC_M2HBM_DISTRESS.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2HBM"
},
{
"BriefDescription": "Count distress signalled on Bl Cmp message",
+ "Counter": "0,1,2,3",
"EventCode": "0x67",
"EventName": "UNC_M2HBM_DISTRESS.BL_CMP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2HBM"
},
{
"BriefDescription": "Count distress signalled on NM fill write message",
+ "Counter": "0,1,2,3",
"EventCode": "0x67",
"EventName": "UNC_M2HBM_DISTRESS.CROSSTILE_NMWR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2HBM"
},
{
"BriefDescription": "Count distress signalled on D2Cha message",
+ "Counter": "0,1,2,3",
"EventCode": "0x67",
"EventName": "UNC_M2HBM_DISTRESS.D2CHA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2HBM"
},
{
"BriefDescription": "Count distress signalled on D2c message",
+ "Counter": "0,1,2,3",
"EventCode": "0x67",
"EventName": "UNC_M2HBM_DISTRESS.D2CORE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2HBM"
},
{
"BriefDescription": "Count distress signalled on D2k message",
+ "Counter": "0,1,2,3",
"EventCode": "0x67",
"EventName": "UNC_M2HBM_DISTRESS.D2UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2HBM"
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "Counter": "0,1,2,3",
"EventCode": "0xba",
"EventName": "UNC_M2HBM_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress Blocking due to Ordering requirements : Down : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x80000004",
@@ -559,8 +678,10 @@
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "Counter": "0,1,2,3",
"EventCode": "0xba",
"EventName": "UNC_M2HBM_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress Blocking due to Ordering requirements : Up : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x80000001",
@@ -568,8 +689,10 @@
},
{
"BriefDescription": "Count when Starve Glocab counter is at 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M2HBM_IGR_STARVE_WINNER.MASK7",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -578,32 +701,40 @@
},
{
"BriefDescription": "Reads to iMC issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2HBM_IMC_READS.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x304",
"Unit": "M2HBM"
},
{
"BriefDescription": "UNC_M2HBM_IMC_READS.CH0.ALL",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2HBM_IMC_READS.CH0.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x104",
"Unit": "M2HBM"
},
{
"BriefDescription": "UNC_M2HBM_IMC_READS.CH0.NORMAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2HBM_IMC_READS.CH0.NORMAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x101",
"Unit": "M2HBM"
},
{
"BriefDescription": "UNC_M2HBM_IMC_READS.CH0_ALL",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2HBM_IMC_READS.CH0_ALL",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -612,24 +743,30 @@
},
{
"BriefDescription": "UNC_M2HBM_IMC_READS.CH0_FROM_TGR",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2HBM_IMC_READS.CH0_FROM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x140",
"Unit": "M2HBM"
},
{
"BriefDescription": "Critical Priority - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2HBM_IMC_READS.CH0_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x102",
"Unit": "M2HBM"
},
{
"BriefDescription": "UNC_M2HBM_IMC_READS.CH0_NORMAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2HBM_IMC_READS.CH0_NORMAL",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -638,24 +775,30 @@
},
{
"BriefDescription": "UNC_M2HBM_IMC_READS.CH1.ALL",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2HBM_IMC_READS.CH1.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x204",
"Unit": "M2HBM"
},
{
"BriefDescription": "UNC_M2HBM_IMC_READS.CH1.NORMAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2HBM_IMC_READS.CH1.NORMAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x201",
"Unit": "M2HBM"
},
{
"BriefDescription": "UNC_M2HBM_IMC_READS.CH1_ALL",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2HBM_IMC_READS.CH1_ALL",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -664,24 +807,30 @@
},
{
"BriefDescription": "From TGR - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2HBM_IMC_READS.CH1_FROM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x240",
"Unit": "M2HBM"
},
{
"BriefDescription": "Critical Priority - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2HBM_IMC_READS.CH1_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x202",
"Unit": "M2HBM"
},
{
"BriefDescription": "UNC_M2HBM_IMC_READS.CH1_NORMAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2HBM_IMC_READS.CH1_NORMAL",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -690,64 +839,80 @@
},
{
"BriefDescription": "From TGR - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2HBM_IMC_READS.FROM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x340",
"Unit": "M2HBM"
},
{
"BriefDescription": "Critical Priority - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2HBM_IMC_READS.ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x302",
"Unit": "M2HBM"
},
{
"BriefDescription": "UNC_M2HBM_IMC_READS.NORMAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2HBM_IMC_READS.NORMAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x301",
"Unit": "M2HBM"
},
{
"BriefDescription": "All Writes - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1810",
"Unit": "M2HBM"
},
{
"BriefDescription": "UNC_M2HBM_IMC_WRITES.CH0.ALL",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH0.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x810",
"Unit": "M2HBM"
},
{
"BriefDescription": "UNC_M2HBM_IMC_WRITES.CH0.FULL",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH0.FULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x801",
"Unit": "M2HBM"
},
{
"BriefDescription": "UNC_M2HBM_IMC_WRITES.CH0.PARTIAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH0.PARTIAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x802",
"Unit": "M2HBM"
},
{
"BriefDescription": "UNC_M2HBM_IMC_WRITES.CH0_ALL",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH0_ALL",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -756,15 +921,19 @@
},
{
"BriefDescription": "From TGR - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH0_FROM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2HBM"
},
{
"BriefDescription": "UNC_M2HBM_IMC_WRITES.CH0_FULL",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH0_FULL",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -773,16 +942,20 @@
},
{
"BriefDescription": "ISOCH Full Line - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH0_FULL_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x804",
"Unit": "M2HBM"
},
{
"BriefDescription": "Non-Inclusive - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH0_NI",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -790,8 +963,10 @@
},
{
"BriefDescription": "Non-Inclusive Miss - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH0_NI_MISS",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -799,8 +974,10 @@
},
{
"BriefDescription": "UNC_M2HBM_IMC_WRITES.CH0_PARTIAL",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH0_PARTIAL",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -809,40 +986,50 @@
},
{
"BriefDescription": "ISOCH Partial - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH0_PARTIAL_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x808",
"Unit": "M2HBM"
},
{
"BriefDescription": "All Writes - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH1.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1010",
"Unit": "M2HBM"
},
{
"BriefDescription": "Full Line Non-ISOCH - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH1.FULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1001",
"Unit": "M2HBM"
},
{
"BriefDescription": "Partial Non-ISOCH - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH1.PARTIAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1002",
"Unit": "M2HBM"
},
{
"BriefDescription": "All Writes - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH1_ALL",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -851,15 +1038,19 @@
},
{
"BriefDescription": "From TGR - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH1_FROM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2HBM"
},
{
"BriefDescription": "Full Line Non-ISOCH - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH1_FULL",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -868,16 +1059,20 @@
},
{
"BriefDescription": "ISOCH Full Line - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH1_FULL_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1004",
"Unit": "M2HBM"
},
{
"BriefDescription": "Non-Inclusive - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH1_NI",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -885,8 +1080,10 @@
},
{
"BriefDescription": "Non-Inclusive Miss - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH1_NI_MISS",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -894,8 +1091,10 @@
},
{
"BriefDescription": "Partial Non-ISOCH - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH1_PARTIAL",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -904,39 +1103,49 @@
},
{
"BriefDescription": "ISOCH Partial - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.CH1_PARTIAL_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1008",
"Unit": "M2HBM"
},
{
"BriefDescription": "From TGR - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.FROM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2HBM"
},
{
"BriefDescription": "Full Non-ISOCH - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.FULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1801",
"Unit": "M2HBM"
},
{
"BriefDescription": "ISOCH Full Line - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.FULL_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1804",
"Unit": "M2HBM"
},
{
"BriefDescription": "Non-Inclusive - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.NI",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -944,8 +1153,10 @@
},
{
"BriefDescription": "Non-Inclusive Miss - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.NI_MISS",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -953,159 +1164,199 @@
},
{
"BriefDescription": "Partial Non-ISOCH - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.PARTIAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1802",
"Unit": "M2HBM"
},
{
"BriefDescription": "ISOCH Partial - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2HBM_IMC_WRITES.PARTIAL_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1808",
"Unit": "M2HBM"
},
{
"BriefDescription": "UNC_M2HBM_PREFCAM_CIS_DROPS",
+ "Counter": "0,1,2,3",
"EventCode": "0x5c",
"EventName": "UNC_M2HBM_PREFCAM_CIS_DROPS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2HBM"
},
{
"BriefDescription": "Data Prefetches Dropped",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_M2HBM_PREFCAM_DEMAND_DROPS.CH0_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2HBM"
},
{
"BriefDescription": "Data Prefetches Dropped",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_M2HBM_PREFCAM_DEMAND_DROPS.CH0_XPT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2HBM"
},
{
"BriefDescription": "Data Prefetches Dropped",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_M2HBM_PREFCAM_DEMAND_DROPS.CH1_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2HBM"
},
{
"BriefDescription": "Data Prefetches Dropped",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_M2HBM_PREFCAM_DEMAND_DROPS.CH1_XPT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2HBM"
},
{
"BriefDescription": "Data Prefetches Dropped : UPI - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_M2HBM_PREFCAM_DEMAND_DROPS.UPI_ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "M2HBM"
},
{
"BriefDescription": "Data Prefetches Dropped",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_M2HBM_PREFCAM_DEMAND_DROPS.XPT_ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "M2HBM"
},
{
"BriefDescription": ": UPI - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "UNC_M2HBM_PREFCAM_DEMAND_MERGE.UPI_ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "M2HBM"
},
{
"BriefDescription": ": XPT - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "UNC_M2HBM_PREFCAM_DEMAND_MERGE.XPT_ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "M2HBM"
},
{
"BriefDescription": "Demands Not Merged with CAMed Prefetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x5e",
"EventName": "UNC_M2HBM_PREFCAM_DEMAND_NO_MERGE.RD_MERGED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2HBM"
},
{
"BriefDescription": "Demands Not Merged with CAMed Prefetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x5e",
"EventName": "UNC_M2HBM_PREFCAM_DEMAND_NO_MERGE.WR_MERGED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2HBM"
},
{
"BriefDescription": "Demands Not Merged with CAMed Prefetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x5e",
"EventName": "UNC_M2HBM_PREFCAM_DEMAND_NO_MERGE.WR_SQUASHED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2HBM"
},
{
"BriefDescription": "Prefetch CAM Inserts : UPI - Ch 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_M2HBM_PREFCAM_INSERTS.CH0_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2HBM"
},
{
"BriefDescription": "Prefetch CAM Inserts : XPT - Ch 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_M2HBM_PREFCAM_INSERTS.CH0_XPT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2HBM"
},
{
"BriefDescription": "Prefetch CAM Inserts : UPI - Ch 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_M2HBM_PREFCAM_INSERTS.CH1_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2HBM"
},
{
"BriefDescription": "Prefetch CAM Inserts : XPT - Ch 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_M2HBM_PREFCAM_INSERTS.CH1_XPT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2HBM"
},
{
"BriefDescription": "Prefetch CAM Inserts : UPI - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_M2HBM_PREFCAM_INSERTS.UPI_ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "M2HBM"
},
{
"BriefDescription": "Prefetch CAM Inserts : XPT - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_M2HBM_PREFCAM_INSERTS.XPT_ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Prefetch CAM Inserts : XPT -All Channels",
"UMask": "0x5",
@@ -1113,80 +1364,100 @@
},
{
"BriefDescription": "Prefetch CAM Occupancy : All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_M2HBM_PREFCAM_OCCUPANCY.ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2HBM"
},
{
"BriefDescription": "Prefetch CAM Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_M2HBM_PREFCAM_OCCUPANCY.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2HBM"
},
{
"BriefDescription": "Prefetch CAM Occupancy : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_M2HBM_PREFCAM_OCCUPANCY.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2HBM"
},
{
"BriefDescription": "All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x5f",
"EventName": "UNC_M2HBM_PREFCAM_RESP_MISS.ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2HBM"
},
{
"BriefDescription": ": Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x5f",
"EventName": "UNC_M2HBM_PREFCAM_RESP_MISS.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2HBM"
},
{
"BriefDescription": ": Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x5f",
"EventName": "UNC_M2HBM_PREFCAM_RESP_MISS.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2HBM"
},
{
"BriefDescription": "UNC_M2HBM_PREFCAM_RxC_DEALLOCS.1LM_POSTED",
+ "Counter": "0,1,2,3",
"EventCode": "0x62",
"EventName": "UNC_M2HBM_PREFCAM_RxC_DEALLOCS.1LM_POSTED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2HBM"
},
{
"BriefDescription": "UNC_M2HBM_PREFCAM_RxC_DEALLOCS.CIS",
+ "Counter": "0,1,2,3",
"EventCode": "0x62",
"EventName": "UNC_M2HBM_PREFCAM_RxC_DEALLOCS.CIS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2HBM"
},
{
"BriefDescription": "UNC_M2HBM_PREFCAM_RxC_DEALLOCS.SQUASHED",
+ "Counter": "0,1,2,3",
"EventCode": "0x62",
"EventName": "UNC_M2HBM_PREFCAM_RxC_DEALLOCS.SQUASHED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2HBM"
},
{
"BriefDescription": "UNC_M2HBM_PREFCAM_RxC_OCCUPANCY",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_M2HBM_PREFCAM_RxC_OCCUPANCY",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -1194,8 +1465,10 @@
},
{
"BriefDescription": "AD Ingress (from CMS) : AD Ingress (from CMS) Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_M2HBM_RxC_AD.INSERTS",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -1204,23 +1477,29 @@
},
{
"BriefDescription": "AD Ingress (from CMS) : AD Ingress (from CMS) Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_M2HBM_RxC_AD_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2HBM"
},
{
"BriefDescription": "AD Ingress (from CMS) Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M2HBM_RxC_AD_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2HBM"
},
{
"BriefDescription": "BL Ingress (from CMS) : BL Ingress (from CMS) Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_M2HBM_RxC_BL.INSERTS",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -1230,8 +1509,10 @@
},
{
"BriefDescription": "BL Ingress (from CMS) : BL Ingress (from CMS) Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_M2HBM_RxC_BL_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts anytime a BL packet is added to Ingress",
"UMask": "0x1",
@@ -1239,61 +1520,77 @@
},
{
"BriefDescription": "BL Ingress (from CMS) Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_M2HBM_RxC_BL_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2HBM"
},
{
"BriefDescription": "Number AD Ingress Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "UNC_M2HBM_TGR_AD_CREDITS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2HBM"
},
{
"BriefDescription": "Number BL Ingress Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x2f",
"EventName": "UNC_M2HBM_TGR_BL_CREDITS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2HBM"
},
{
"BriefDescription": "Tracker Inserts : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_M2HBM_TRACKER_INSERTS.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x104",
"Unit": "M2HBM"
},
{
"BriefDescription": "Tracker Inserts : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_M2HBM_TRACKER_INSERTS.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x204",
"Unit": "M2HBM"
},
{
"BriefDescription": "Tracker Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_M2HBM_TRACKER_OCCUPANCY.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2HBM"
},
{
"BriefDescription": "Tracker Occupancy : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_M2HBM_TRACKER_OCCUPANCY.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2HBM"
},
{
"BriefDescription": "AD Egress (to CMS) : AD Egress (to CMS) Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_M2HBM_TxC_AD.INSERTS",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -1303,8 +1600,10 @@
},
{
"BriefDescription": "AD Egress (to CMS) : AD Egress (to CMS) Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_M2HBM_TxC_AD_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts anytime a AD packet is added to Egress",
"UMask": "0x1",
@@ -1312,15 +1611,19 @@
},
{
"BriefDescription": "AD Egress (to CMS) Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x07",
"EventName": "UNC_M2HBM_TxC_AD_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2HBM"
},
{
"BriefDescription": "BL Egress (to CMS) : Inserts - CMS0 - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UNC_M2HBM_TxC_BL.INSERTS_CMS0",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -1330,8 +1633,10 @@
},
{
"BriefDescription": "BL Egress (to CMS) : Inserts - CMS1 - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UNC_M2HBM_TxC_BL.INSERTS_CMS1",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -1341,160 +1646,200 @@
},
{
"BriefDescription": "BL Egress (to CMS) Occupancy : All",
+ "Counter": "0,1,2,3",
"EventCode": "0x0f",
"EventName": "UNC_M2HBM_TxC_BL_OCCUPANCY.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2HBM"
},
{
"BriefDescription": "BL Egress (to CMS) Occupancy : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x0f",
"EventName": "UNC_M2HBM_TxC_BL_OCCUPANCY.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2HBM"
},
{
"BriefDescription": "BL Egress (to CMS) Occupancy : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x0f",
"EventName": "UNC_M2HBM_TxC_BL_OCCUPANCY.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2HBM"
},
{
"BriefDescription": "WPQ Flush : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M2HBM_WPQ_FLUSH.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2HBM"
},
{
"BriefDescription": "WPQ Flush : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M2HBM_WPQ_FLUSH.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2HBM"
},
{
"BriefDescription": "M2M and iMC WPQ Cycles w/Credits - Regular : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2HBM_WPQ_NO_REG_CRD.CHN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2HBM"
},
{
"BriefDescription": "M2M and iMC WPQ Cycles w/Credits - Regular : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2HBM_WPQ_NO_REG_CRD.CHN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2HBM"
},
{
"BriefDescription": "M2M and iMC WPQ Cycles w/Credits - Special : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2HBM_WPQ_NO_SPEC_CRD.CHN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2HBM"
},
{
"BriefDescription": "M2M and iMC WPQ Cycles w/Credits - Special : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2HBM_WPQ_NO_SPEC_CRD.CHN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2HBM"
},
{
"BriefDescription": "Write Tracker Inserts : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2HBM_WR_TRACKER_INSERTS.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2HBM"
},
{
"BriefDescription": "Write Tracker Inserts : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2HBM_WR_TRACKER_INSERTS.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2HBM"
},
{
"BriefDescription": "Write Tracker Non-Posted Inserts : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x4d",
"EventName": "UNC_M2HBM_WR_TRACKER_NONPOSTED_INSERTS.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2HBM"
},
{
"BriefDescription": "Write Tracker Non-Posted Inserts : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x4d",
"EventName": "UNC_M2HBM_WR_TRACKER_NONPOSTED_INSERTS.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2HBM"
},
{
"BriefDescription": "Write Tracker Non-Posted Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "UNC_M2HBM_WR_TRACKER_NONPOSTED_OCCUPANCY.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2HBM"
},
{
"BriefDescription": "Write Tracker Non-Posted Occupancy : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "UNC_M2HBM_WR_TRACKER_NONPOSTED_OCCUPANCY.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2HBM"
},
{
"BriefDescription": "Write Tracker Posted Inserts : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "UNC_M2HBM_WR_TRACKER_POSTED_INSERTS.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2HBM"
},
{
"BriefDescription": "Write Tracker Posted Inserts : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "UNC_M2HBM_WR_TRACKER_POSTED_INSERTS.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2HBM"
},
{
"BriefDescription": "Write Tracker Posted Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M2HBM_WR_TRACKER_POSTED_OCCUPANCY.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2HBM"
},
{
"BriefDescription": "Write Tracker Posted Occupancy : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M2HBM_WR_TRACKER_POSTED_OCCUPANCY.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2HBM"
},
{
"BriefDescription": "Activate due to read, write, underfill, or bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_MCHBM_ACT_COUNT.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Activate commands sent on this channel. Activate commands are issued to open up a page on the HBM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
"UMask": "0xff",
@@ -1502,8 +1847,10 @@
},
{
"BriefDescription": "Activate due to read",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_MCHBM_ACT_COUNT.RD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Activate commands sent on this channel. Activate commands are issued to open up a page on the HBM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
"UMask": "0x11",
@@ -1511,8 +1858,10 @@
},
{
"BriefDescription": "HBM Activate Count : Activate due to Read in PCH0",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_MCHBM_ACT_COUNT.RD_PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Activate commands sent on this channel. Activate commands are issued to open up a page on the HBM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
"UMask": "0x1",
@@ -1520,8 +1869,10 @@
},
{
"BriefDescription": "HBM Activate Count : Activate due to Read in PCH1",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_MCHBM_ACT_COUNT.RD_PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Activate commands sent on this channel. Activate commands are issued to open up a page on the HBM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
"UMask": "0x10",
@@ -1529,8 +1880,10 @@
},
{
"BriefDescription": "HBM Activate Count : Underfill Read transaction on Page Empty or Page Miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_MCHBM_ACT_COUNT.UFILL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Activate commands sent on this channel. Activate commands are issued to open up a page on the HBM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
"UMask": "0x44",
@@ -1538,8 +1891,10 @@
},
{
"BriefDescription": "HBM Activate Count",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_MCHBM_ACT_COUNT.UFILL_PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Activate commands sent on this channel. Activate commands are issued to open up a page on the HBM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
"UMask": "0x4",
@@ -1547,8 +1902,10 @@
},
{
"BriefDescription": "HBM Activate Count",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_MCHBM_ACT_COUNT.UFILL_PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Activate commands sent on this channel. Activate commands are issued to open up a page on the HBM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
"UMask": "0x40",
@@ -1556,8 +1913,10 @@
},
{
"BriefDescription": "Activate due to write",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_MCHBM_ACT_COUNT.WR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Activate commands sent on this channel. Activate commands are issued to open up a page on the HBM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
"UMask": "0x22",
@@ -1565,8 +1924,10 @@
},
{
"BriefDescription": "HBM Activate Count : Activate due to Write in PCH0",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_MCHBM_ACT_COUNT.WR_PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Activate commands sent on this channel. Activate commands are issued to open up a page on the HBM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
"UMask": "0x2",
@@ -1574,8 +1935,10 @@
},
{
"BriefDescription": "HBM Activate Count : Activate due to Write in PCH1",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_MCHBM_ACT_COUNT.WR_PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Activate commands sent on this channel. Activate commands are issued to open up a page on the HBM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
"UMask": "0x20",
@@ -1583,16 +1946,20 @@
},
{
"BriefDescription": "All CAS commands issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_MCHBM_CAS_COUNT.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xff",
"Unit": "MCHBM"
},
{
"BriefDescription": "Pseudo Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_MCHBM_CAS_COUNT.PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "HBM RD_CAS and WR_CAS Commands",
"UMask": "0x40",
@@ -1600,8 +1967,10 @@
},
{
"BriefDescription": "Pseudo Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_MCHBM_CAS_COUNT.PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "HBM RD_CAS and WR_CAS Commands",
"UMask": "0x80",
@@ -1609,134 +1978,167 @@
},
{
"BriefDescription": "Read CAS commands issued (regular and underfill)",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_MCHBM_CAS_COUNT.RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xcf",
"Unit": "MCHBM"
},
{
"BriefDescription": "Regular read CAS commands with precharge",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_MCHBM_CAS_COUNT.RD_PRE_REG",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc2",
"Unit": "MCHBM"
},
{
"BriefDescription": "Underfill read CAS commands with precharge",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_MCHBM_CAS_COUNT.RD_PRE_UNDERFILL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc8",
"Unit": "MCHBM"
},
{
"BriefDescription": "Regular read CAS commands issued (does not include underfills)",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_MCHBM_CAS_COUNT.RD_REG",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc1",
"Unit": "MCHBM"
},
{
"BriefDescription": "Underfill read CAS commands issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_MCHBM_CAS_COUNT.RD_UNDERFILL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc4",
"Unit": "MCHBM"
},
{
"BriefDescription": "Write CAS commands issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_MCHBM_CAS_COUNT.WR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf0",
"Unit": "MCHBM"
},
{
"BriefDescription": "HBM RD_CAS and WR_CAS Commands. : HBM WR_CAS commands w/o auto-pre",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_MCHBM_CAS_COUNT.WR_NONPRE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd0",
"Unit": "MCHBM"
},
{
"BriefDescription": "Write CAS commands with precharge",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_MCHBM_CAS_COUNT.WR_PRE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe0",
"Unit": "MCHBM"
},
{
"BriefDescription": "Pseudo Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_MCHBM_CAS_ISSUED_REQ_LEN.PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "MCHBM"
},
{
"BriefDescription": "Pseudo Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_MCHBM_CAS_ISSUED_REQ_LEN.PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "MCHBM"
},
{
"BriefDescription": "Read CAS Command in Interleaved Mode (32B)",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_MCHBM_CAS_ISSUED_REQ_LEN.RD_32B",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc8",
"Unit": "MCHBM"
},
{
"BriefDescription": "Read CAS Command in Regular Mode (64B) in Pseudochannel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_MCHBM_CAS_ISSUED_REQ_LEN.RD_64B",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc1",
"Unit": "MCHBM"
},
{
"BriefDescription": "Underfill Read CAS Command in Interleaved Mode (32B)",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_MCHBM_CAS_ISSUED_REQ_LEN.RD_UFILL_32B",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd0",
"Unit": "MCHBM"
},
{
"BriefDescription": "Underfill Read CAS Command in Regular Mode (64B) in Pseudochannel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_MCHBM_CAS_ISSUED_REQ_LEN.RD_UFILL_64B",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc2",
"Unit": "MCHBM"
},
{
"BriefDescription": "Write CAS Command in Interleaved Mode (32B)",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_MCHBM_CAS_ISSUED_REQ_LEN.WR_32B",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe0",
"Unit": "MCHBM"
},
{
"BriefDescription": "Write CAS Command in Regular Mode (64B) in Pseudochannel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_MCHBM_CAS_ISSUED_REQ_LEN.WR_64B",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc4",
"Unit": "MCHBM"
},
{
"BriefDescription": "IMC Clockticks at DCLK frequency",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_MCHBM_CLOCKTICKS",
"PerPkg": "1",
@@ -1745,8 +2147,10 @@
},
{
"BriefDescription": "HBM Precharge All Commands",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_MCHBM_HBM_PREALL.PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times that the precharge all command was sent.",
"UMask": "0x1",
@@ -1754,8 +2158,10 @@
},
{
"BriefDescription": "HBM Precharge All Commands",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_MCHBM_HBM_PREALL.PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times that the precharge all command was sent.",
"UMask": "0x2",
@@ -1763,8 +2169,10 @@
},
{
"BriefDescription": "All Precharge Commands",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_MCHBM_HBM_PRE_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Precharge All Commands: Counts the number of times that the precharge all command was sent.",
"UMask": "0x3",
@@ -1772,15 +2180,19 @@
},
{
"BriefDescription": "IMC Clockticks at HCLK frequency",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_MCHBM_HCLOCKTICKS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "MCHBM"
},
{
"BriefDescription": "All precharge events",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_MCHBM_PRE_COUNT.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Precharge commands sent on this channel.",
"UMask": "0xff",
@@ -1788,8 +2200,10 @@
},
{
"BriefDescription": "Precharge from MC page table",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_MCHBM_PRE_COUNT.PGT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Precharge commands sent on this channel.",
"UMask": "0x88",
@@ -1797,8 +2211,10 @@
},
{
"BriefDescription": "HBM Precharge commands. : Precharges from Page Table",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_MCHBM_PRE_COUNT.PGT_PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Precharge commands sent on this channel. : Equivalent to PAGE_EMPTY",
"UMask": "0x8",
@@ -1806,8 +2222,10 @@
},
{
"BriefDescription": "HBM Precharge commands.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_MCHBM_PRE_COUNT.PGT_PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Precharge commands sent on this channel.",
"UMask": "0x80",
@@ -1815,8 +2233,10 @@
},
{
"BriefDescription": "Precharge due to read on page miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_MCHBM_PRE_COUNT.RD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Precharge commands sent on this channel.",
"UMask": "0x11",
@@ -1824,8 +2244,10 @@
},
{
"BriefDescription": "HBM Precharge commands. : Precharge due to read",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_MCHBM_PRE_COUNT.RD_PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Precharge commands sent on this channel. : Precharge from read bank scheduler",
"UMask": "0x1",
@@ -1833,8 +2255,10 @@
},
{
"BriefDescription": "HBM Precharge commands.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_MCHBM_PRE_COUNT.RD_PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Precharge commands sent on this channel.",
"UMask": "0x10",
@@ -1842,8 +2266,10 @@
},
{
"BriefDescription": "HBM Precharge commands.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_MCHBM_PRE_COUNT.UFILL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Precharge commands sent on this channel.",
"UMask": "0x44",
@@ -1851,8 +2277,10 @@
},
{
"BriefDescription": "HBM Precharge commands.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_MCHBM_PRE_COUNT.UFILL_PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Precharge commands sent on this channel.",
"UMask": "0x4",
@@ -1860,8 +2288,10 @@
},
{
"BriefDescription": "HBM Precharge commands.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_MCHBM_PRE_COUNT.UFILL_PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Precharge commands sent on this channel.",
"UMask": "0x40",
@@ -1869,8 +2299,10 @@
},
{
"BriefDescription": "Precharge due to write on page miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_MCHBM_PRE_COUNT.WR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Precharge commands sent on this channel.",
"UMask": "0x22",
@@ -1878,8 +2310,10 @@
},
{
"BriefDescription": "HBM Precharge commands. : Precharge due to write",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_MCHBM_PRE_COUNT.WR_PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Precharge commands sent on this channel. : Precharge from write bank scheduler",
"UMask": "0x2",
@@ -1887,8 +2321,10 @@
},
{
"BriefDescription": "HBM Precharge commands.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_MCHBM_PRE_COUNT.WR_PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of HBM Precharge commands sent on this channel.",
"UMask": "0x20",
@@ -1896,46 +2332,58 @@
},
{
"BriefDescription": "Counts the number of cycles where the read buffer has greater than UMASK elements. NOTE: Umask must be set to the maximum number of elements in the queue (24 entries for SPR).",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_MCHBM_RDB_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "MCHBM"
},
{
"BriefDescription": "Counts the number of inserts into the read buffer.",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_MCHBM_RDB_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "MCHBM"
},
{
"BriefDescription": "Read Data Buffer Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_MCHBM_RDB_INSERTS.PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "MCHBM"
},
{
"BriefDescription": "Read Data Buffer Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_MCHBM_RDB_INSERTS.PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "MCHBM"
},
{
"BriefDescription": "Counts the number of elements in the read buffer per cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x1a",
"EventName": "UNC_MCHBM_RDB_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "MCHBM"
},
{
"BriefDescription": "Read Pending Queue Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_MCHBM_RPQ_INSERTS.PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Read Pending Queue Allocations: Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.",
"UMask": "0x1",
@@ -1943,8 +2391,10 @@
},
{
"BriefDescription": "Read Pending Queue Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_MCHBM_RPQ_INSERTS.PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Read Pending Queue Allocations: Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.",
"UMask": "0x2",
@@ -1952,24 +2402,30 @@
},
{
"BriefDescription": "Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_MCHBM_RPQ_OCCUPANCY_PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Read Pending Queue Occupancy: Accumulates the occupancies of the Read Pending Queue each cycle. This can then be used to calculate both the average occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory.",
"Unit": "MCHBM"
},
{
"BriefDescription": "Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "UNC_MCHBM_RPQ_OCCUPANCY_PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Read Pending Queue Occupancy: Accumulates the occupancies of the Read Pending Queue each cycle. This can then be used to calculate both the average occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory.",
"Unit": "MCHBM"
},
{
"BriefDescription": "Write Pending Queue Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_MCHBM_WPQ_INSERTS.PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Write Pending Queue Allocations: Counts the number of allocations into the Write Pending Queue. This can then be used to calculate the average queuing latency (in conjunction with the WPQ occupancy count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate after being issued. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC.",
"UMask": "0x1",
@@ -1977,8 +2433,10 @@
},
{
"BriefDescription": "Write Pending Queue Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_MCHBM_WPQ_INSERTS.PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Write Pending Queue Allocations: Counts the number of allocations into the Write Pending Queue. This can then be used to calculate the average queuing latency (in conjunction with the WPQ occupancy count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate after being issued. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC.",
"UMask": "0x2",
@@ -1986,24 +2444,30 @@
},
{
"BriefDescription": "Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_MCHBM_WPQ_OCCUPANCY_PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Write Pending Queue Occupancy: Accumulates the occupancies of the Write Pending Queue each cycle. This can then be used to calculate both the average queue occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to memory. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC. This is not to be confused with actually performing the write. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies. So, we provide filtering based on if the request has posted or not. By using the not posted filter, we can track how long writes spent in the iMC before completions were sent to the HA. The posted filter, on the other hand, provides information about how much queueing is actually happening in the iMC for writes before they are actually issued to memory. High average occupancies will generally coincide with high write major mode counts.",
"Unit": "MCHBM"
},
{
"BriefDescription": "Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_MCHBM_WPQ_OCCUPANCY_PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Write Pending Queue Occupancy: Accumulates the occupancies of the Write Pending Queue each cycle. This can then be used to calculate both the average queue occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to memory. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC. This is not to be confused with actually performing the write. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies. So, we provide filtering based on if the request has posted or not. By using the not posted filter, we can track how long writes spent in the iMC before completions were sent to the HA. The posted filter, on the other hand, provides information about how much queueing is actually happening in the iMC for writes before they are actually issued to memory. High average occupancies will generally coincide with high write major mode counts.",
"Unit": "MCHBM"
},
{
"BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_MCHBM_WPQ_READ_HIT",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -2012,8 +2476,10 @@
},
{
"BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_MCHBM_WPQ_READ_HIT.PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Write Pending Queue CAM Match: Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
"UMask": "0x1",
@@ -2021,8 +2487,10 @@
},
{
"BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_MCHBM_WPQ_READ_HIT.PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Write Pending Queue CAM Match: Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
"UMask": "0x2",
@@ -2030,8 +2498,10 @@
},
{
"BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_MCHBM_WPQ_WRITE_HIT",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -2040,8 +2510,10 @@
},
{
"BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_MCHBM_WPQ_WRITE_HIT.PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Write Pending Queue CAM Match: Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
"UMask": "0x1",
@@ -2049,8 +2521,10 @@
},
{
"BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_MCHBM_WPQ_WRITE_HIT.PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Write Pending Queue CAM Match: Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
"UMask": "0x2",
@@ -2058,6 +2532,7 @@
},
{
"BriefDescription": "Activate due to read, write, underfill, or bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_M_ACT_COUNT.ALL",
"PerPkg": "1",
@@ -2067,6 +2542,7 @@
},
{
"BriefDescription": "All DRAM CAS commands issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_M_CAS_COUNT.ALL",
"PerPkg": "1",
@@ -2076,8 +2552,10 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands. : Pseudo Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_M_CAS_COUNT.PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : Pseudo Channel 0 : DRAM RD_CAS and WR_CAS Commands",
"UMask": "0x40",
@@ -2085,8 +2563,10 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands. : Pseudo Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_M_CAS_COUNT.PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : Pseudo Channel 1 : DRAM RD_CAS and WR_CAS Commands",
"UMask": "0x80",
@@ -2094,6 +2574,7 @@
},
{
"BriefDescription": "All DRAM read CAS commands issued (including underfills)",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_M_CAS_COUNT.RD",
"PerPkg": "1",
@@ -2103,8 +2584,10 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_M_CAS_COUNT.RD_PRE_REG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM RD_CAS and WR_CAS Commands",
"UMask": "0xc2",
@@ -2112,8 +2595,10 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_M_CAS_COUNT.RD_PRE_UNDERFILL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM RD_CAS and WR_CAS Commands",
"UMask": "0xc8",
@@ -2121,8 +2606,10 @@
},
{
"BriefDescription": "All DRAM read CAS commands issued (does not include underfills)",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_M_CAS_COUNT.RD_REG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM RD_CAS commands w/out auto-pre : DRAM RD_CAS and WR_CAS Commands : Counts the total number or DRAM Read CAS commands issued on this channel. This includes both regular RD CAS commands as well as those with implicit Precharge. We do not filter based on major mode, as RD_CAS is not issued during WMM (with the exception of underfills).",
"UMask": "0xc1",
@@ -2130,8 +2617,10 @@
},
{
"BriefDescription": "DRAM underfill read CAS commands issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : Underfill Read Issued : DRAM RD_CAS and WR_CAS Commands",
"UMask": "0xc4",
@@ -2139,6 +2628,7 @@
},
{
"BriefDescription": "All DRAM write CAS commands issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_M_CAS_COUNT.WR",
"PerPkg": "1",
@@ -2148,8 +2638,10 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM WR_CAS commands w/o auto-pre",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_M_CAS_COUNT.WR_NONPRE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM WR_CAS commands w/o auto-pre : DRAM RD_CAS and WR_CAS Commands",
"UMask": "0xd0",
@@ -2157,8 +2649,10 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_M_CAS_COUNT.WR_PRE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM RD_CAS and WR_CAS Commands",
"UMask": "0xe0",
@@ -2166,70 +2660,87 @@
},
{
"BriefDescription": "Pseudo Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_M_CAS_ISSUED_REQ_LEN.PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "iMC"
},
{
"BriefDescription": "Pseudo Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_M_CAS_ISSUED_REQ_LEN.PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "iMC"
},
{
"BriefDescription": "Read CAS Command in Interleaved Mode (32B)",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_M_CAS_ISSUED_REQ_LEN.RD_32B",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc8",
"Unit": "iMC"
},
{
"BriefDescription": "Read CAS Command in Regular Mode (64B) in Pseudochannel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_M_CAS_ISSUED_REQ_LEN.RD_64B",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc1",
"Unit": "iMC"
},
{
"BriefDescription": "Underfill Read CAS Command in Interleaved Mode (32B)",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_M_CAS_ISSUED_REQ_LEN.RD_UFILL_32B",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd0",
"Unit": "iMC"
},
{
"BriefDescription": "Underfill Read CAS Command in Regular Mode (64B) in Pseudochannel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_M_CAS_ISSUED_REQ_LEN.RD_UFILL_64B",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc2",
"Unit": "iMC"
},
{
"BriefDescription": "Write CAS Command in Interleaved Mode (32B)",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_M_CAS_ISSUED_REQ_LEN.WR_32B",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe0",
"Unit": "iMC"
},
{
"BriefDescription": "Write CAS Command in Regular Mode (64B) in Pseudochannel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_M_CAS_ISSUED_REQ_LEN.WR_64B",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc4",
"Unit": "iMC"
},
{
"BriefDescription": "IMC Clockticks at DCLK frequency",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_M_CLOCKTICKS",
"PerPkg": "1",
@@ -2239,8 +2750,10 @@
},
{
"BriefDescription": "DRAM Precharge All Commands",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M_DRAM_PRE_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM Precharge All Commands : Counts the number of times that the precharge all command was sent.",
"UMask": "0x3",
@@ -2248,6 +2761,7 @@
},
{
"BriefDescription": "IMC Clockticks at HCLK frequency",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_M_HCLOCKTICKS",
"PerPkg": "1",
@@ -2256,30 +2770,37 @@
},
{
"BriefDescription": "UNC_M_PCLS.RD",
+ "Counter": "0,1,2,3",
"EventCode": "0xa0",
"EventName": "UNC_M_PCLS.RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_PCLS.TOTAL",
+ "Counter": "0,1,2,3",
"EventCode": "0xa0",
"EventName": "UNC_M_PCLS.TOTAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_PCLS.WR",
+ "Counter": "0,1,2,3",
"EventCode": "0xa0",
"EventName": "UNC_M_PCLS.WR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "iMC"
},
{
"BriefDescription": "PMM Read Pending Queue inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0xe3",
"EventName": "UNC_M_PMM_RPQ_INSERTS",
"PerPkg": "1",
@@ -2288,6 +2809,7 @@
},
{
"BriefDescription": "PMM Read Pending Queue occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0xe0",
"EventName": "UNC_M_PMM_RPQ_OCCUPANCY.ALL_SCH0",
"PerPkg": "1",
@@ -2297,6 +2819,7 @@
},
{
"BriefDescription": "PMM Read Pending Queue occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0xe0",
"EventName": "UNC_M_PMM_RPQ_OCCUPANCY.ALL_SCH1",
"PerPkg": "1",
@@ -2306,8 +2829,10 @@
},
{
"BriefDescription": "PMM Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M_PMM_RPQ_OCCUPANCY.GNT_WAIT_SCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PMM Read Pending Queue Occupancy : Accumulates the per cycle occupancy of the PMM Read Pending Queue.",
"UMask": "0x10",
@@ -2315,8 +2840,10 @@
},
{
"BriefDescription": "PMM Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M_PMM_RPQ_OCCUPANCY.GNT_WAIT_SCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PMM Read Pending Queue Occupancy : Accumulates the per cycle occupancy of the PMM Read Pending Queue.",
"UMask": "0x20",
@@ -2324,8 +2851,10 @@
},
{
"BriefDescription": "PMM Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0xe0",
"EventName": "UNC_M_PMM_RPQ_OCCUPANCY.NO_GNT_SCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the per cycle occupancy of the PMM Read Pending Queue.",
"UMask": "0x4",
@@ -2333,8 +2862,10 @@
},
{
"BriefDescription": "PMM Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0xe0",
"EventName": "UNC_M_PMM_RPQ_OCCUPANCY.NO_GNT_SCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the per cycle occupancy of the PMM Read Pending Queue.",
"UMask": "0x8",
@@ -2342,13 +2873,16 @@
},
{
"BriefDescription": "PMM (for IXP) Write Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0xe5",
"EventName": "UNC_M_PMM_WPQ_CYCLES_NE",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "PMM Write Pending Queue inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0xe7",
"EventName": "UNC_M_PMM_WPQ_INSERTS",
"PerPkg": "1",
@@ -2357,6 +2891,7 @@
},
{
"BriefDescription": "PMM Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0xe4",
"EventName": "UNC_M_PMM_WPQ_OCCUPANCY.ALL",
"PerPkg": "1",
@@ -2366,6 +2901,7 @@
},
{
"BriefDescription": "PMM Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0xE4",
"EventName": "UNC_M_PMM_WPQ_OCCUPANCY.ALL_SCH0",
"PerPkg": "1",
@@ -2375,6 +2911,7 @@
},
{
"BriefDescription": "PMM Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0xE4",
"EventName": "UNC_M_PMM_WPQ_OCCUPANCY.ALL_SCH1",
"PerPkg": "1",
@@ -2384,8 +2921,10 @@
},
{
"BriefDescription": "PMM (for IXP) Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0xe4",
"EventName": "UNC_M_PMM_WPQ_OCCUPANCY.CAS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PMM (for IXP) Write Pending Queue Occupancy : Accumulates the per cycle occupancy of the Write Pending Queue to the IXP DIMM.",
"UMask": "0xc",
@@ -2393,8 +2932,10 @@
},
{
"BriefDescription": "PMM (for IXP) Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0xe4",
"EventName": "UNC_M_PMM_WPQ_OCCUPANCY.PWR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PMM (for IXP) Write Pending Queue Occupancy : Accumulates the per cycle occupancy of the Write Pending Queue to the IXP DIMM.",
"UMask": "0x30",
@@ -2402,16 +2943,20 @@
},
{
"BriefDescription": "Channel PPD Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_M_POWER_CHANNEL_PPD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Channel PPD Cycles : Number of cycles when all the ranks in the channel are in PPD mode. If IBT=off is enabled, then this can be used to count those cycles. If it is not enabled, then this can count the number of cycles when that could have been taken advantage of.",
"Unit": "iMC"
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank : DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M_POWER_CKE_CYCLES.LOW_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CKE_ON_CYCLES by Rank : DIMM ID : Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
"UMask": "0x1",
@@ -2419,8 +2964,10 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank : DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M_POWER_CKE_CYCLES.LOW_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CKE_ON_CYCLES by Rank : DIMM ID : Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
"UMask": "0x2",
@@ -2428,8 +2975,10 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank : DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M_POWER_CKE_CYCLES.LOW_2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CKE_ON_CYCLES by Rank : DIMM ID : Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
"UMask": "0x4",
@@ -2437,8 +2986,10 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank : DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M_POWER_CKE_CYCLES.LOW_3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CKE_ON_CYCLES by Rank : DIMM ID : Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
"UMask": "0x8",
@@ -2446,8 +2997,10 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M_POWER_CRIT_THROTTLE_CYCLES.SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Throttle Cycles for Rank 0 : Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1. : Thermal throttling is performed per DIMM. We support 3 DIMMs per channel. This ID allows us to filter by ID.",
"UMask": "0x1",
@@ -2455,8 +3008,10 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M_POWER_CRIT_THROTTLE_CYCLES.SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Throttle Cycles for Rank 0 : Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
"UMask": "0x2",
@@ -2464,14 +3019,17 @@
},
{
"BriefDescription": "Clock-Enabled Self-Refresh",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M_POWER_SELF_REFRESH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Clock-Enabled Self-Refresh : Counts the number of cycles when the iMC is in self-refresh and the iMC still has a clock. This happens in some package C-states. For example, the PCU may ask the iMC to enter self-refresh even though some of the cores are still processing. One use of this is for Monroe technology. Self-refresh is required during package C3 and C6, but there is no clock in the iMC at this time, so it is not possible to count these cases.",
"Unit": "iMC"
},
{
"BriefDescription": "Precharge due to read, write, underfill, or PGT.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M_PRE_COUNT.ALL",
"PerPkg": "1",
@@ -2481,6 +3039,7 @@
},
{
"BriefDescription": "DRAM Precharge commands",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M_PRE_COUNT.PGT",
"PerPkg": "1",
@@ -2490,8 +3049,10 @@
},
{
"BriefDescription": "DRAM Precharge commands. : Precharges from Page Table",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M_PRE_COUNT.PGT_PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM Precharge commands. : Precharges from Page Table : Counts the number of DRAM Precharge commands sent on this channel. : Equivalent to PAGE_EMPTY",
"UMask": "0x8",
@@ -2499,8 +3060,10 @@
},
{
"BriefDescription": "DRAM Precharge commands.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M_PRE_COUNT.PGT_PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
"UMask": "0x80",
@@ -2508,6 +3071,7 @@
},
{
"BriefDescription": "Precharge due to read on page miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M_PRE_COUNT.RD",
"PerPkg": "1",
@@ -2517,8 +3081,10 @@
},
{
"BriefDescription": "DRAM Precharge commands. : Precharge due to read",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M_PRE_COUNT.RD_PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM Precharge commands. : Precharge due to read : Counts the number of DRAM Precharge commands sent on this channel. : Precharge from read bank scheduler",
"UMask": "0x1",
@@ -2526,8 +3092,10 @@
},
{
"BriefDescription": "DRAM Precharge commands.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M_PRE_COUNT.RD_PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
"UMask": "0x10",
@@ -2535,8 +3103,10 @@
},
{
"BriefDescription": "DRAM Precharge commands.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M_PRE_COUNT.UFILL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
"UMask": "0x44",
@@ -2544,8 +3114,10 @@
},
{
"BriefDescription": "DRAM Precharge commands.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M_PRE_COUNT.UFILL_PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
"UMask": "0x4",
@@ -2553,8 +3125,10 @@
},
{
"BriefDescription": "DRAM Precharge commands.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M_PRE_COUNT.UFILL_PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
"UMask": "0x40",
@@ -2562,6 +3136,7 @@
},
{
"BriefDescription": "Precharge due to write on page miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M_PRE_COUNT.WR",
"PerPkg": "1",
@@ -2571,8 +3146,10 @@
},
{
"BriefDescription": "DRAM Precharge commands. : Precharge due to write",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M_PRE_COUNT.WR_PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM Precharge commands. : Precharge due to write : Counts the number of DRAM Precharge commands sent on this channel. : Precharge from write bank scheduler",
"UMask": "0x2",
@@ -2580,8 +3157,10 @@
},
{
"BriefDescription": "DRAM Precharge commands.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M_PRE_COUNT.WR_PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
"UMask": "0x20",
@@ -2589,21 +3168,26 @@
},
{
"BriefDescription": "Counts the number of cycles where the read buffer has greater than UMASK elements. This includes reads to both DDR and PMEM. NOTE: Umask must be set to the maximum number of elements in the queue (24 entries for SPR).",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M_RDB_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "Counts the number of inserts into the read buffer destined for DDR. Does not count reads destined for PMEM.",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M_RDB_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "Read Data Buffer Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M_RDB_INSERTS.PCH0",
"PerPkg": "1",
@@ -2612,6 +3196,7 @@
},
{
"BriefDescription": "Read Data Buffer Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M_RDB_INSERTS.PCH1",
"PerPkg": "1",
@@ -2620,45 +3205,56 @@
},
{
"BriefDescription": "Counts the number of cycles where there's at least one element in the read buffer. This includes reads to both DDR and PMEM.",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M_RDB_NE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "Read Data Buffer Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M_RDB_NE.PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "Read Data Buffer Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M_RDB_NE.PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "Counts the number of cycles where there's at least one element in the read buffer. This includes reads to both DDR and PMEM.",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M_RDB_NOT_EMPTY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "Counts the number of elements in the read buffer, including reads to both DDR and PMEM.",
+ "Counter": "0,1,2,3",
"EventCode": "0x1a",
"EventName": "UNC_M_RDB_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "Read Pending Queue Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M_RPQ_INSERTS.PCH0",
"PerPkg": "1",
@@ -2668,6 +3264,7 @@
},
{
"BriefDescription": "Read Pending Queue Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M_RPQ_INSERTS.PCH1",
"PerPkg": "1",
@@ -2677,6 +3274,7 @@
},
{
"BriefDescription": "Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M_RPQ_OCCUPANCY_PCH0",
"PerPkg": "1",
@@ -2685,6 +3283,7 @@
},
{
"BriefDescription": "Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "UNC_M_RPQ_OCCUPANCY_PCH1",
"PerPkg": "1",
@@ -2693,294 +3292,368 @@
},
{
"BriefDescription": "Scoreboard accepts",
+ "Counter": "0,1,2,3",
"EventCode": "0xd2",
"EventName": "UNC_M_SB_ACCESSES.ACCEPTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Accesses : Write Accepts",
+ "Counter": "0,1,2,3",
"EventCode": "0xd2",
"EventName": "UNC_M_SB_ACCESSES.FM_RD_CMPS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Accesses : Write Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0xd2",
"EventName": "UNC_M_SB_ACCESSES.FM_WR_CMPS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Accesses : FM read completions",
+ "Counter": "0,1,2,3",
"EventCode": "0xd2",
"EventName": "UNC_M_SB_ACCESSES.NM_RD_CMPS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Accesses : FM write completions",
+ "Counter": "0,1,2,3",
"EventCode": "0xd2",
"EventName": "UNC_M_SB_ACCESSES.NM_WR_CMPS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Accesses : Read Accepts",
+ "Counter": "0,1,2,3",
"EventCode": "0xd2",
"EventName": "UNC_M_SB_ACCESSES.RD_ACCEPTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Accesses : Read Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0xd2",
"EventName": "UNC_M_SB_ACCESSES.RD_REJECTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0xd2",
"EventName": "UNC_M_SB_ACCESSES.REJECTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Accesses : NM read completions",
+ "Counter": "0,1,2,3",
"EventCode": "0xd2",
"EventName": "UNC_M_SB_ACCESSES.WR_ACCEPTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Accesses : NM write completions",
+ "Counter": "0,1,2,3",
"EventCode": "0xd2",
"EventName": "UNC_M_SB_ACCESSES.WR_REJECTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": ": Alloc",
+ "Counter": "0,1,2,3",
"EventCode": "0xd9",
"EventName": "UNC_M_SB_CANARY.ALLOC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": ": Dealloc",
+ "Counter": "0,1,2,3",
"EventCode": "0xd9",
"EventName": "UNC_M_SB_CANARY.DEALLOC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": ": Near Mem Write Starved",
+ "Counter": "0,1,2,3",
"EventCode": "0xd9",
"EventName": "UNC_M_SB_CANARY.FM_RD_STARVED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "iMC"
},
{
"BriefDescription": ": Far Mem Write Starved",
+ "Counter": "0,1,2,3",
"EventCode": "0xd9",
"EventName": "UNC_M_SB_CANARY.FM_TGR_WR_STARVED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "iMC"
},
{
"BriefDescription": ": Far Mem Read Starved",
+ "Counter": "0,1,2,3",
"EventCode": "0xd9",
"EventName": "UNC_M_SB_CANARY.FM_WR_STARVED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "iMC"
},
{
"BriefDescription": ": Valid",
+ "Counter": "0,1,2,3",
"EventCode": "0xd9",
"EventName": "UNC_M_SB_CANARY.NM_RD_STARVED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": ": Near Mem Read Starved",
+ "Counter": "0,1,2,3",
"EventCode": "0xd9",
"EventName": "UNC_M_SB_CANARY.NM_WR_STARVED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": ": Reject",
+ "Counter": "0,1,2,3",
"EventCode": "0xd9",
"EventName": "UNC_M_SB_CANARY.VLD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Cycles Full",
+ "Counter": "0,1,2,3",
"EventCode": "0xd1",
"EventName": "UNC_M_SB_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Cycles Not-Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0xd0",
"EventName": "UNC_M_SB_CYCLES_NE",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Inserts : Block region reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xd6",
"EventName": "UNC_M_SB_INSERTS.BLOCK_RDS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Inserts : Block region writes",
+ "Counter": "0,1,2,3",
"EventCode": "0xd6",
"EventName": "UNC_M_SB_INSERTS.BLOCK_WRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Inserts : Persistent Mem reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xd6",
"EventName": "UNC_M_SB_INSERTS.PMM_RDS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Inserts : Persistent Mem writes",
+ "Counter": "0,1,2,3",
"EventCode": "0xd6",
"EventName": "UNC_M_SB_INSERTS.PMM_WRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Inserts : Reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xd6",
"EventName": "UNC_M_SB_INSERTS.RDS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Inserts : Writes",
+ "Counter": "0,1,2,3",
"EventCode": "0xd6",
"EventName": "UNC_M_SB_INSERTS.WRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Occupancy : Block region reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xd5",
"EventName": "UNC_M_SB_OCCUPANCY.BLOCK_RDS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Occupancy : Block region writes",
+ "Counter": "0,1,2,3",
"EventCode": "0xd5",
"EventName": "UNC_M_SB_OCCUPANCY.BLOCK_WRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Occupancy : Persistent Mem reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xd5",
"EventName": "UNC_M_SB_OCCUPANCY.PMM_RDS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Occupancy : Persistent Mem writes",
+ "Counter": "0,1,2,3",
"EventCode": "0xd5",
"EventName": "UNC_M_SB_OCCUPANCY.PMM_WRS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Occupancy : Reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xd5",
"EventName": "UNC_M_SB_OCCUPANCY.RDS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Prefetch Inserts : All",
+ "Counter": "0,1,2,3",
"EventCode": "0xda",
"EventName": "UNC_M_SB_PREF_INSERTS.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Prefetch Inserts : DDR4",
+ "Counter": "0,1,2,3",
"EventCode": "0xda",
"EventName": "UNC_M_SB_PREF_INSERTS.DDR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Prefetch Inserts : PMM",
+ "Counter": "0,1,2,3",
"EventCode": "0xda",
"EventName": "UNC_M_SB_PREF_INSERTS.PMM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Prefetch Occupancy : All",
+ "Counter": "0,1,2,3",
"EventCode": "0xdb",
"EventName": "UNC_M_SB_PREF_OCCUPANCY.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Prefetch Occupancy : DDR4",
+ "Counter": "0,1,2,3",
"EventCode": "0xdb",
"EventName": "UNC_M_SB_PREF_OCCUPANCY.DDR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "Scoreboard Prefetch Occupancy : Persistent Mem",
+ "Counter": "0,1,2,3",
"EventCode": "0xDB",
"EventName": "UNC_M_SB_PREF_OCCUPANCY.PMM",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -2989,230 +3662,287 @@
},
{
"BriefDescription": "Number of Scoreboard Requests Rejected",
+ "Counter": "0,1,2,3",
"EventCode": "0xd4",
"EventName": "UNC_M_SB_REJECT.CANARY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "Number of Scoreboard Requests Rejected",
+ "Counter": "0,1,2,3",
"EventCode": "0xd4",
"EventName": "UNC_M_SB_REJECT.DDR_EARLY_CMP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "iMC"
},
{
"BriefDescription": "Number of Scoreboard Requests Rejected : FM requests rejected due to full address conflict",
+ "Counter": "0,1,2,3",
"EventCode": "0xd4",
"EventName": "UNC_M_SB_REJECT.FM_ADDR_CNFLT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "Number of Scoreboard Requests Rejected : NM requests rejected due to set conflict",
+ "Counter": "0,1,2,3",
"EventCode": "0xd4",
"EventName": "UNC_M_SB_REJECT.NM_SET_CNFLT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "Number of Scoreboard Requests Rejected : Patrol requests rejected due to set conflict",
+ "Counter": "0,1,2,3",
"EventCode": "0xd4",
"EventName": "UNC_M_SB_REJECT.PATROL_SET_CNFLT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": ": Far Mem Read - Set",
+ "Counter": "0,1,2,3",
"EventCode": "0xd7",
"EventName": "UNC_M_SB_STRV_ALLOC.FM_RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": ": Near Mem Read - Clear",
+ "Counter": "0,1,2,3",
"EventCode": "0xd7",
"EventName": "UNC_M_SB_STRV_ALLOC.FM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": ": Far Mem Write - Set",
+ "Counter": "0,1,2,3",
"EventCode": "0xd7",
"EventName": "UNC_M_SB_STRV_ALLOC.FM_WR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": ": Near Mem Read - Set",
+ "Counter": "0,1,2,3",
"EventCode": "0xd7",
"EventName": "UNC_M_SB_STRV_ALLOC.NM_RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": ": Near Mem Write - Set",
+ "Counter": "0,1,2,3",
"EventCode": "0xd7",
"EventName": "UNC_M_SB_STRV_ALLOC.NM_WR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": ": Far Mem Read - Set",
+ "Counter": "0,1,2,3",
"EventCode": "0xde",
"EventName": "UNC_M_SB_STRV_DEALLOC.FM_RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": ": Near Mem Read - Clear",
+ "Counter": "0,1,2,3",
"EventCode": "0xde",
"EventName": "UNC_M_SB_STRV_DEALLOC.FM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": ": Far Mem Write - Set",
+ "Counter": "0,1,2,3",
"EventCode": "0xde",
"EventName": "UNC_M_SB_STRV_DEALLOC.FM_WR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": ": Near Mem Read - Set",
+ "Counter": "0,1,2,3",
"EventCode": "0xde",
"EventName": "UNC_M_SB_STRV_DEALLOC.NM_RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": ": Near Mem Write - Set",
+ "Counter": "0,1,2,3",
"EventCode": "0xde",
"EventName": "UNC_M_SB_STRV_DEALLOC.NM_WR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": ": Far Mem Read",
+ "Counter": "0,1,2,3",
"EventCode": "0xd8",
"EventName": "UNC_M_SB_STRV_OCC.FM_RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": ": Near Mem Read - Clear",
+ "Counter": "0,1,2,3",
"EventCode": "0xd8",
"EventName": "UNC_M_SB_STRV_OCC.FM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": ": Far Mem Write",
+ "Counter": "0,1,2,3",
"EventCode": "0xd8",
"EventName": "UNC_M_SB_STRV_OCC.FM_WR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": ": Near Mem Read",
+ "Counter": "0,1,2,3",
"EventCode": "0xd8",
"EventName": "UNC_M_SB_STRV_OCC.NM_RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": ": Near Mem Write",
+ "Counter": "0,1,2,3",
"EventCode": "0xd8",
"EventName": "UNC_M_SB_STRV_OCC.NM_WR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_SB_TAGGED.DDR4_CMP",
+ "Counter": "0,1,2,3",
"EventCode": "0xdd",
"EventName": "UNC_M_SB_TAGGED.DDR4_CMP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_SB_TAGGED.NEW",
+ "Counter": "0,1,2,3",
"EventCode": "0xdd",
"EventName": "UNC_M_SB_TAGGED.NEW",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_SB_TAGGED.OCC",
+ "Counter": "0,1,2,3",
"EventCode": "0xdd",
"EventName": "UNC_M_SB_TAGGED.OCC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_SB_TAGGED.PMM0_CMP",
+ "Counter": "0,1,2,3",
"EventCode": "0xdd",
"EventName": "UNC_M_SB_TAGGED.PMM0_CMP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_SB_TAGGED.PMM1_CMP",
+ "Counter": "0,1,2,3",
"EventCode": "0xdd",
"EventName": "UNC_M_SB_TAGGED.PMM1_CMP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_SB_TAGGED.PMM2_CMP",
+ "Counter": "0,1,2,3",
"EventCode": "0xdd",
"EventName": "UNC_M_SB_TAGGED.PMM2_CMP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_SB_TAGGED.RD_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xdd",
"EventName": "UNC_M_SB_TAGGED.RD_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_SB_TAGGED.RD_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xdd",
"EventName": "UNC_M_SB_TAGGED.RD_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "2LM Tag check hit in near memory cache (DDR4)",
+ "Counter": "0,1,2,3",
"EventCode": "0xd3",
"EventName": "UNC_M_TAGCHK.HIT",
"PerPkg": "1",
@@ -3221,6 +3951,7 @@
},
{
"BriefDescription": "2LM Tag check miss, no data at this line",
+ "Counter": "0,1,2,3",
"EventCode": "0xd3",
"EventName": "UNC_M_TAGCHK.MISS_CLEAN",
"PerPkg": "1",
@@ -3229,6 +3960,7 @@
},
{
"BriefDescription": "2LM Tag check miss, existing data may be evicted to PMM",
+ "Counter": "0,1,2,3",
"EventCode": "0xd3",
"EventName": "UNC_M_TAGCHK.MISS_DIRTY",
"PerPkg": "1",
@@ -3237,6 +3969,7 @@
},
{
"BriefDescription": "2LM Tag check hit due to memory read",
+ "Counter": "0,1,2,3",
"EventCode": "0xd3",
"EventName": "UNC_M_TAGCHK.NM_RD_HIT",
"PerPkg": "1",
@@ -3245,6 +3978,7 @@
},
{
"BriefDescription": "2LM Tag check hit due to memory write",
+ "Counter": "0,1,2,3",
"EventCode": "0xd3",
"EventName": "UNC_M_TAGCHK.NM_WR_HIT",
"PerPkg": "1",
@@ -3253,6 +3987,7 @@
},
{
"BriefDescription": "Write Pending Queue Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M_WPQ_INSERTS.PCH0",
"PerPkg": "1",
@@ -3262,6 +3997,7 @@
},
{
"BriefDescription": "Write Pending Queue Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M_WPQ_INSERTS.PCH1",
"PerPkg": "1",
@@ -3271,6 +4007,7 @@
},
{
"BriefDescription": "Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M_WPQ_OCCUPANCY_PCH0",
"PerPkg": "1",
@@ -3279,6 +4016,7 @@
},
{
"BriefDescription": "Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_WPQ_OCCUPANCY_PCH1",
"PerPkg": "1",
@@ -3287,8 +4025,10 @@
},
{
"BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_M_WPQ_READ_HIT",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
@@ -3297,8 +4037,10 @@
},
{
"BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M_WPQ_WRITE_HIT",
+ "Experimental": "1",
"FCMask": "0x00000000",
"PerPkg": "1",
"PortMask": "0x00000000",
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-power.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-power.json
index 8948e85074f0..9482ddaea4d1 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-power.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-power.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "PCU PCLK Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_P_CLOCKTICKS",
"PerPkg": "1",
@@ -9,187 +10,235 @@
},
{
"BriefDescription": "UNC_P_CORE_TRANSITION_CYCLES",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_P_CORE_TRANSITION_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "PCU"
},
{
"BriefDescription": "UNC_P_DEMOTIONS",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_P_DEMOTIONS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "PCU"
},
{
"BriefDescription": "Phase Shed 0 Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x75",
"EventName": "UNC_P_FIVR_PS_PS0_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Phase Shed 0 Cycles : Cycles spent in phase-shedding power state 0",
"Unit": "PCU"
},
{
"BriefDescription": "Phase Shed 1 Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x76",
"EventName": "UNC_P_FIVR_PS_PS1_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Phase Shed 1 Cycles : Cycles spent in phase-shedding power state 1",
"Unit": "PCU"
},
{
"BriefDescription": "Phase Shed 2 Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x77",
"EventName": "UNC_P_FIVR_PS_PS2_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Phase Shed 2 Cycles : Cycles spent in phase-shedding power state 2",
"Unit": "PCU"
},
{
"BriefDescription": "Phase Shed 3 Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x78",
"EventName": "UNC_P_FIVR_PS_PS3_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Phase Shed 3 Cycles : Cycles spent in phase-shedding power state 3",
"Unit": "PCU"
},
{
"BriefDescription": "AVX256 Frequency Clipping",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "UNC_P_FREQ_CLIP_AVX256",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "PCU"
},
{
"BriefDescription": "AVX512 Frequency Clipping",
+ "Counter": "0,1,2,3",
"EventCode": "0x4a",
"EventName": "UNC_P_FREQ_CLIP_AVX512",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "PCU"
},
{
"BriefDescription": "Thermal Strongest Upper Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Thermal Strongest Upper Limit Cycles : Number of cycles any frequency is reduced due to a thermal limit. Count only if throttling is occurring.",
"Unit": "PCU"
},
{
"BriefDescription": "Power Strongest Upper Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_P_FREQ_MAX_POWER_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Power Strongest Upper Limit Cycles : Counts the number of cycles when power is the upper limit on frequency.",
"Unit": "PCU"
},
{
"BriefDescription": "IO P Limit Strongest Lower Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x73",
"EventName": "UNC_P_FREQ_MIN_IO_P_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IO P Limit Strongest Lower Limit Cycles : Counts the number of cycles when IO P Limit is preventing us from dropping the frequency lower. This algorithm monitors the needs to the IO subsystem on both local and remote sockets and will maintain a frequency high enough to maintain good IO BW. This is necessary for when all the IA cores on a socket are idle but a user still would like to maintain high IO Bandwidth.",
"Unit": "PCU"
},
{
"BriefDescription": "Cycles spent changing Frequency",
+ "Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "UNC_P_FREQ_TRANS_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles spent changing Frequency : Counts the number of cycles when the system is changing frequency. This can not be filtered by thread ID. One can also use it with the occupancy counter that monitors number of threads in C0 to estimate the performance impact that frequency transitions had on the system.",
"Unit": "PCU"
},
{
"BriefDescription": "Memory Phase Shedding Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x2f",
"EventName": "UNC_P_MEMORY_PHASE_SHEDDING_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Memory Phase Shedding Cycles : Counts the number of cycles that the PCU has triggered memory phase shedding. This is a mode that can be run in the iMC physicals that saves power at the expense of additional latency.",
"Unit": "PCU"
},
{
"BriefDescription": "Package C State Residency - C0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2a",
"EventName": "UNC_P_PKG_RESIDENCY_C0_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Package C State Residency - C0 : Counts the number of cycles when the package was in C0. This event can be used in conjunction with edge detect to count C0 entrances (or exits using invert). Residency events do not include transition times.",
"Unit": "PCU"
},
{
"BriefDescription": "Package C State Residency - C2E",
+ "Counter": "0,1,2,3",
"EventCode": "0x2b",
"EventName": "UNC_P_PKG_RESIDENCY_C2E_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Package C State Residency - C2E : Counts the number of cycles when the package was in C2E. This event can be used in conjunction with edge detect to count C2E entrances (or exits using invert). Residency events do not include transition times.",
"Unit": "PCU"
},
{
"BriefDescription": "Package C State Residency - C6",
+ "Counter": "0,1,2,3",
"EventCode": "0x2d",
"EventName": "UNC_P_PKG_RESIDENCY_C6_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Package C State Residency - C6 : Counts the number of cycles when the package was in C6. This event can be used in conjunction with edge detect to count C6 entrances (or exits using invert). Residency events do not include transition times.",
"Unit": "PCU"
},
{
"BriefDescription": "UNC_P_PMAX_THROTTLED_CYCLES",
+ "Counter": "0",
"EventCode": "0x06",
"EventName": "UNC_P_PMAX_THROTTLED_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "PCU"
},
{
"BriefDescription": "Number of cores in C0",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_P_POWER_STATE_OCCUPANCY_CORES_C0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cores in C0 : This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
"Unit": "PCU"
},
{
"BriefDescription": "Number of cores in C3",
+ "Counter": "0,1,2,3",
"EventCode": "0x36",
"EventName": "UNC_P_POWER_STATE_OCCUPANCY_CORES_C3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cores in C3 : This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
"Unit": "PCU"
},
{
"BriefDescription": "Number of cores in C6",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_P_POWER_STATE_OCCUPANCY_CORES_C6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cores in C6 : This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
"Unit": "PCU"
},
{
"BriefDescription": "External Prochot",
+ "Counter": "0,1,2,3",
"EventCode": "0x0a",
"EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "External Prochot : Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip.",
"Unit": "PCU"
},
{
"BriefDescription": "Internal Prochot",
+ "Counter": "0,1,2,3",
"EventCode": "0x09",
"EventName": "UNC_P_PROCHOT_INTERNAL_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Internal Prochot : Counts the number of cycles that we are in Internal PROCHOT mode. This mode is triggered when a sensor on the die determines that we are too hot and must throttle to avoid damaging the chip.",
"Unit": "PCU"
},
{
"BriefDescription": "Total Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_P_TOTAL_TRANSITION_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Total Core C State Transition Cycles : Number of cycles spent performing core C state transitions across all cores.",
"Unit": "PCU"
},
{
"BriefDescription": "VR Hot",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_P_VR_HOT_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VR Hot : Number of cycles that a CPU SVID VR is hot. Does not cover DRAM VRs",
"Unit": "PCU"
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/virtual-memory.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/virtual-memory.json
index a1e3b8d2ebe7..609a9549cbf3 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/virtual-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Loads that miss the DTLB and hit the STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for a demand load.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x12",
"EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (all page sizes) caused by demand data loads. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -26,6 +29,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data load to a 1G page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
"PublicDescription": "Counts completed page walks (1G sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -34,6 +38,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data load to a 2M/4M page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -42,6 +47,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data load to a 4K page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts completed page walks (4K sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -50,6 +56,7 @@
},
{
"BriefDescription": "Number of page walks outstanding for a demand load in the PMH each cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding for a demand load in the PMH (Page Miss Handler) each cycle.",
@@ -58,6 +65,7 @@
},
{
"BriefDescription": "Stores that miss the DTLB and hit the STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "DTLB_STORE_MISSES.STLB_HIT",
"PublicDescription": "Counts stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
@@ -66,6 +74,7 @@
},
{
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x13",
"EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
@@ -75,6 +84,7 @@
},
{
"BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (all page sizes) caused by demand data stores. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -83,6 +93,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data store to a 1G page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
"PublicDescription": "Counts completed page walks (1G sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -91,6 +102,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data store to a 2M/4M page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -99,6 +111,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data store to a 4K page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts completed page walks (4K sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -107,6 +120,7 @@
},
{
"BriefDescription": "Number of page walks outstanding for a store in the PMH each cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "DTLB_STORE_MISSES.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding for a store in the PMH (Page Miss Handler) each cycle.",
@@ -115,6 +129,7 @@
},
{
"BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "ITLB_MISSES.STLB_HIT",
"PublicDescription": "Counts instruction fetch requests that miss the ITLB (Instruction TLB) and hit the STLB (Second-level TLB).",
@@ -123,6 +138,7 @@
},
{
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x11",
"EventName": "ITLB_MISSES.WALK_ACTIVE",
@@ -132,6 +148,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (all page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
@@ -140,6 +157,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts completed page walks (2M/4M page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
@@ -148,6 +166,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts completed page walks (4K page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
@@ -156,6 +175,7 @@
},
{
"BriefDescription": "Number of page walks outstanding for an outstanding code request in the PMH each cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "ITLB_MISSES.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding for an outstanding code (instruction fetch) request in the PMH (Page Miss Handler) each cycle.",
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/cache.json b/tools/perf/pmu-events/arch/x86/sierraforest/cache.json
index f937ba0e50e1..04802e254e51 100644
--- a/tools/perf/pmu-events/arch/x86/sierraforest/cache.json
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/cache.json
@@ -1,22 +1,25 @@
[
{
"BriefDescription": "Counts the number of cacheable memory requests that miss in the LLC. Counts on a per core basis.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.MISS",
- "PublicDescription": "Counts the number of cacheable memory requests that miss in the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the platform has an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
+ "PublicDescription": "Counts the number of cacheable memory requests that miss in the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the core has access to an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
"SampleAfterValue": "200003",
"UMask": "0x41"
},
{
"BriefDescription": "Counts the number of cacheable memory requests that access the LLC. Counts on a per core basis.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
- "PublicDescription": "Counts the number of cacheable memory requests that access the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the platform has an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
+ "PublicDescription": "Counts the number of cacheable memory requests that access the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the core has access to an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
"SampleAfterValue": "200003",
"UMask": "0x4f"
},
{
"BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an instruction cache or TLB miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x35",
"EventName": "MEM_BOUND_STALLS_IFETCH.ALL",
"SampleAfterValue": "1000003",
@@ -24,6 +27,7 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in the L2 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x35",
"EventName": "MEM_BOUND_STALLS_IFETCH.L2_HIT",
"PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or Translation Lookaside Buffer (TLB) miss which hit in the L2 cache.",
@@ -32,6 +36,7 @@
},
{
"BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an icache or itlb miss which hit in the LLC.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x35",
"EventName": "MEM_BOUND_STALLS_IFETCH.LLC_HIT",
"SampleAfterValue": "1000003",
@@ -39,6 +44,7 @@
},
{
"BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an icache or itlb miss which missed all the caches.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x35",
"EventName": "MEM_BOUND_STALLS_IFETCH.LLC_MISS",
"SampleAfterValue": "1000003",
@@ -46,6 +52,7 @@
},
{
"BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an L1 demand load miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS_LOAD.ALL",
"SampleAfterValue": "1000003",
@@ -53,6 +60,7 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to a demand load which hit in the L2 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS_LOAD.L2_HIT",
"PublicDescription": "Counts the number of cycles a core is stalled due to a demand load which hit in the L2 cache.",
@@ -61,6 +69,7 @@
},
{
"BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to a demand load miss which hit in the LLC.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS_LOAD.LLC_HIT",
"SampleAfterValue": "1000003",
@@ -68,6 +77,7 @@
},
{
"BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to a demand load miss which missed all the local caches.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS_LOAD.LLC_MISS",
"SampleAfterValue": "1000003",
@@ -75,62 +85,63 @@
},
{
"BriefDescription": "Counts the number of load ops retired that miss the L3 cache and hit in DRAM",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xd3",
"EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
- "PEBS": "1",
"SampleAfterValue": "1000003",
"UMask": "0x1"
},
{
"BriefDescription": "Counts the number of load ops retired that hit the L1 data cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x1"
},
{
"BriefDescription": "Counts the number of load ops retired that miss in the L1 data cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x40"
},
{
"BriefDescription": "Counts the number of load ops retired that hit in the L2 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x2"
},
{
"BriefDescription": "Counts the number of load ops retired that miss in the L2 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x80"
},
{
"BriefDescription": "Counts the number of load ops retired that hit in the L3 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x1c"
},
{
"BriefDescription": "Counts the number of loads that hit in a write combining buffer (WCB), excluding the first load that caused the WCB to allocate.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.WCB_HIT",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x20"
},
{
"BriefDescription": "Counts the number of cycles that uops are blocked for any of the following reasons: load buffer, store buffer or RSV full.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x04",
"EventName": "MEM_SCHEDULER_BLOCK.ALL",
"SampleAfterValue": "20003",
@@ -138,6 +149,7 @@
},
{
"BriefDescription": "Counts the number of cycles that uops are blocked due to a load buffer full condition.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x04",
"EventName": "MEM_SCHEDULER_BLOCK.LD_BUF",
"SampleAfterValue": "20003",
@@ -145,6 +157,7 @@
},
{
"BriefDescription": "Counts the number of cycles that uops are blocked due to an RSV full condition.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x04",
"EventName": "MEM_SCHEDULER_BLOCK.RSV",
"SampleAfterValue": "20003",
@@ -152,6 +165,7 @@
},
{
"BriefDescription": "Counts the number of cycles that uops are blocked due to a store buffer full condition.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x04",
"EventName": "MEM_SCHEDULER_BLOCK.ST_BUF",
"SampleAfterValue": "20003",
@@ -159,179 +173,210 @@
},
{
"BriefDescription": "Counts the number of load ops retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x81"
},
{
"BriefDescription": "Counts the number of store ops retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x82"
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_1024",
"MSRIndex": "0x3F6",
"MSRValue": "0x400",
- "PEBS": "2",
"SampleAfterValue": "1000003",
"UMask": "0x5"
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_128",
"MSRIndex": "0x3F6",
"MSRValue": "0x80",
- "PEBS": "2",
"SampleAfterValue": "1000003",
"UMask": "0x5"
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_16",
"MSRIndex": "0x3F6",
"MSRValue": "0x10",
- "PEBS": "2",
"SampleAfterValue": "1000003",
"UMask": "0x5"
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_2048",
"MSRIndex": "0x3F6",
"MSRValue": "0x800",
- "PEBS": "2",
"SampleAfterValue": "1000003",
"UMask": "0x5"
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_256",
"MSRIndex": "0x3F6",
"MSRValue": "0x100",
- "PEBS": "2",
"SampleAfterValue": "1000003",
"UMask": "0x5"
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_32",
"MSRIndex": "0x3F6",
"MSRValue": "0x20",
- "PEBS": "2",
"SampleAfterValue": "1000003",
"UMask": "0x5"
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_4",
"MSRIndex": "0x3F6",
"MSRValue": "0x4",
- "PEBS": "2",
"SampleAfterValue": "1000003",
"UMask": "0x5"
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_512",
"MSRIndex": "0x3F6",
"MSRValue": "0x200",
- "PEBS": "2",
"SampleAfterValue": "1000003",
"UMask": "0x5"
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_64",
"MSRIndex": "0x3F6",
"MSRValue": "0x40",
- "PEBS": "2",
"SampleAfterValue": "1000003",
"UMask": "0x5"
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_8",
"MSRIndex": "0x3F6",
"MSRValue": "0x8",
- "PEBS": "2",
"SampleAfterValue": "1000003",
"UMask": "0x5"
},
{
"BriefDescription": "Counts the number of load uops retired that performed one or more locks",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x21"
},
{
"BriefDescription": "Counts the number of memory uops retired that were splits.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.SPLIT",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x43"
},
{
"BriefDescription": "Counts the number of retired split load uops.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x41"
},
{
"BriefDescription": "Counts the number of retired split store uops.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x42"
},
{
"BriefDescription": "Counts the number of stores uops retired same as MEM_UOPS_RETIRED.ALL_STORES",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.STORE_LATENCY",
- "PEBS": "2",
"SampleAfterValue": "1000003",
"UMask": "0x6"
},
{
+ "BriefDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to an icache miss",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.ICACHE",
"SampleAfterValue": "1000003",
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/counter.json b/tools/perf/pmu-events/arch/x86/sierraforest/counter.json
new file mode 100644
index 000000000000..e57e3bf98b2a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/counter.json
@@ -0,0 +1,77 @@
+[
+ {
+ "Unit": "core",
+ "CountersNumFixed": "3",
+ "CountersNumGeneric": "8"
+ },
+ {
+ "Unit": "B2CMI",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "CHA",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "IMC",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "B2HOT",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": 4
+ },
+ {
+ "Unit": "IIO",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "IRP",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "UPI",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "B2UPI",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": 4
+ },
+ {
+ "Unit": "B2CXL",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": 4
+ },
+ {
+ "Unit": "PCU",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": 4
+ },
+ {
+ "Unit": "CHACMS",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": 4
+ },
+ {
+ "Unit": "MDF",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": 4
+ },
+ {
+ "Unit": "CXLCM",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": 8
+ },
+ {
+ "Unit": "CXLDP",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": 4
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/floating-point.json b/tools/perf/pmu-events/arch/x86/sierraforest/floating-point.json
index 00c9a8ae0f53..5266eed969be 100644
--- a/tools/perf/pmu-events/arch/x86/sierraforest/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/floating-point.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of cycles when any of the floating point dividers are active.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xcd",
"EventName": "ARITH.FPDIV_ACTIVE",
@@ -9,48 +10,89 @@
},
{
"BriefDescription": "Counts the number of all types of floating point operations per uop with all default weighting",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc8",
"EventName": "FP_FLOPS_RETIRED.ALL",
- "PEBS": "1",
"SampleAfterValue": "1000003",
"UMask": "0x3"
},
{
"BriefDescription": "This event is deprecated. [This event is alias to FP_FLOPS_RETIRED.FP64]",
+ "Counter": "0,1,2,3,4,5,6,7",
"Deprecated": "1",
"EventCode": "0xc8",
"EventName": "FP_FLOPS_RETIRED.DP",
- "PEBS": "1",
"SampleAfterValue": "1000003",
"UMask": "0x1"
},
{
"BriefDescription": "Counts the number of floating point operations that produce 32 bit single precision results [This event is alias to FP_FLOPS_RETIRED.SP]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc8",
"EventName": "FP_FLOPS_RETIRED.FP32",
- "PEBS": "1",
"SampleAfterValue": "1000003",
"UMask": "0x2"
},
{
"BriefDescription": "Counts the number of floating point operations that produce 64 bit double precision results [This event is alias to FP_FLOPS_RETIRED.DP]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc8",
"EventName": "FP_FLOPS_RETIRED.FP64",
- "PEBS": "1",
"SampleAfterValue": "1000003",
"UMask": "0x1"
},
{
"BriefDescription": "This event is deprecated. [This event is alias to FP_FLOPS_RETIRED.FP32]",
+ "Counter": "0,1,2,3,4,5,6,7",
"Deprecated": "1",
"EventCode": "0xc8",
"EventName": "FP_FLOPS_RETIRED.SP",
- "PEBS": "1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the total number of floating point retired instructions.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_INST_RETIRED.128B_DP",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of retired instructions whose sources are a packed 128 bit single precision floating point. This may be SSE or AVX.128 operations.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_INST_RETIRED.128B_SP",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of retired instructions whose sources are a packed 256 bit double precision floating point.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_INST_RETIRED.256B_DP",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Counts the number of retired instructions whose sources are a scalar 32bit single precision floating point.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_INST_RETIRED.32B_SP",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of retired instructions whose sources are a scalar 64 bit double precision floating point.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_INST_RETIRED.64B_DP",
"SampleAfterValue": "1000003",
"UMask": "0x2"
},
{
"BriefDescription": "Counts the number of floating point operations retired that required microcode assist.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.FP_ASSIST",
"PublicDescription": "Counts the number of floating point operations retired that required microcode assist, which is not a reflection of the number of FP operations, instructions or uops.",
@@ -59,9 +101,9 @@
},
{
"BriefDescription": "Counts the number of floating point divide uops retired (x87 and sse, including x87 sqrt).",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.FPDIV",
- "PEBS": "1",
"SampleAfterValue": "2000003",
"UMask": "0x8"
}
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/frontend.json b/tools/perf/pmu-events/arch/x86/sierraforest/frontend.json
index 356d36aecc81..7cdf611efb23 100644
--- a/tools/perf/pmu-events/arch/x86/sierraforest/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/frontend.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the total number of BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe6",
"EventName": "BACLEARS.ANY",
"PublicDescription": "Counts the total number of BACLEARS, which occur when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
@@ -9,14 +10,15 @@
},
{
"BriefDescription": "Counts the number of instructions retired that were tagged because empty issue slots were seen before the uop due to ITLB miss",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.ITLB_MISS",
- "PEBS": "1",
"SampleAfterValue": "1000003",
"UMask": "0x10"
},
{
"BriefDescription": "Counts every time the code stream enters into a new cache line by walking sequential from the previous line or being redirected by a jump.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x80",
"EventName": "ICACHE.ACCESSES",
"SampleAfterValue": "200003",
@@ -24,6 +26,7 @@
},
{
"BriefDescription": "Counts every time the code stream enters into a new cache line by walking sequential from the previous line or being redirected by a jump and the instruction cache registers bytes are not present. -",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x80",
"EventName": "ICACHE.MISSES",
"SampleAfterValue": "200003",
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/memory.json b/tools/perf/pmu-events/arch/x86/sierraforest/memory.json
index e0ce2decc805..22d23077618e 100644
--- a/tools/perf/pmu-events/arch/x86/sierraforest/memory.json
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to any number of reasons, including an L1 miss, WCB full, pagewalk, store address block or store data block, on a load that retires.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x05",
"EventName": "LD_HEAD.ANY_AT_RET",
"SampleAfterValue": "1000003",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to a core bound stall including a store address match, a DTLB miss or a page walk that detains the load from retiring.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x05",
"EventName": "LD_HEAD.L1_BOUND_AT_RET",
"SampleAfterValue": "1000003",
@@ -15,6 +17,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a DL1 miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x05",
"EventName": "LD_HEAD.L1_MISS_AT_RET",
"SampleAfterValue": "1000003",
@@ -22,6 +25,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to other block cases.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x05",
"EventName": "LD_HEAD.OTHER_AT_RET",
"PublicDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to other block cases such as pipeline conflicts, fences, etc.",
@@ -30,6 +34,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a pagewalk.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x05",
"EventName": "LD_HEAD.PGWALK_AT_RET",
"SampleAfterValue": "1000003",
@@ -37,6 +42,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a store address match.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x05",
"EventName": "LD_HEAD.ST_ADDR_AT_RET",
"SampleAfterValue": "1000003",
@@ -44,6 +50,7 @@
},
{
"BriefDescription": "Counts the number of machine clears due to memory ordering caused by a snoop from an external agent. Does not count internally generated machine clears such as those due to memory disambiguation.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
"SampleAfterValue": "20003",
@@ -51,22 +58,23 @@
},
{
"BriefDescription": "Counts misaligned loads that are 4K page splits.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x13",
"EventName": "MISALIGN_MEM_REF.LOAD_PAGE_SPLIT",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x2"
},
{
"BriefDescription": "Counts misaligned stores that are 4K page splits.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x13",
"EventName": "MISALIGN_MEM_REF.STORE_PAGE_SPLIT",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x4"
},
{
"BriefDescription": "Counts demand data reads that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -76,6 +84,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/metricgroups.json b/tools/perf/pmu-events/arch/x86/sierraforest/metricgroups.json
new file mode 100644
index 000000000000..40984c23a6c9
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/metricgroups.json
@@ -0,0 +1,23 @@
+{
+ "Flops": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Ifetch": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Load_Store_Miss": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Mem_Exec": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Power": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Summary": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "TopdownL1": "Metrics for top-down breakdown at level 1",
+ "TopdownL2": "Metrics for top-down breakdown at level 2",
+ "TopdownL3": "Metrics for top-down breakdown at level 3",
+ "load_store_bound": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "tma_L1_group": "Metrics for top-down breakdown at level 1",
+ "tma_L2_group": "Metrics for top-down breakdown at level 2",
+ "tma_L3_group": "Metrics for top-down breakdown at level 3",
+ "tma_backend_bound_group": "Metrics contributing to tma_backend_bound category",
+ "tma_bad_speculation_group": "Metrics contributing to tma_bad_speculation category",
+ "tma_core_bound_group": "Metrics contributing to tma_core_bound category",
+ "tma_frontend_bound_group": "Metrics contributing to tma_frontend_bound category",
+ "tma_ifetch_bandwidth_group": "Metrics contributing to tma_ifetch_bandwidth category",
+ "tma_ifetch_latency_group": "Metrics contributing to tma_ifetch_latency category",
+ "tma_machine_clears_group": "Metrics contributing to tma_machine_clears category",
+ "tma_resource_bound_group": "Metrics contributing to tma_resource_bound category"
+}
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/other.json b/tools/perf/pmu-events/arch/x86/sierraforest/other.json
index 70a9da7e97df..28f9a4c3ea84 100644
--- a/tools/perf/pmu-events/arch/x86/sierraforest/other.json
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/other.json
@@ -1,15 +1,16 @@
[
{
"BriefDescription": "This event is deprecated. [This event is alias to MISC_RETIRED.LBR_INSERTS]",
+ "Counter": "0,1,2,3,4,5,6,7",
"Deprecated": "1",
"EventCode": "0xe4",
"EventName": "LBR_INSERTS.ANY",
- "PEBS": "1",
"SampleAfterValue": "1000003",
"UMask": "0x1"
},
{
"BriefDescription": "Counts demand data reads that have any type of response.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -19,6 +20,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -27,7 +29,18 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts streaming stores that have any type of response.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts the number of issue slots in a UMWAIT or TPAUSE instruction where no uop issues due to the instruction putting the CPU into the C0.1 activity state.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x75",
"EventName": "SERIALIZATION.C01_MS_SCB",
"SampleAfterValue": "200003",
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/pipeline.json b/tools/perf/pmu-events/arch/x86/sierraforest/pipeline.json
index 90292dc03d33..b67c0c89054d 100644
--- a/tools/perf/pmu-events/arch/x86/sierraforest/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/pipeline.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of cycles when any of the dividers are active.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xcd",
"EventName": "ARITH.DIV_ACTIVE",
@@ -9,153 +10,157 @@
},
{
"BriefDescription": "Counts the total number of branch instructions retired for all branch types.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "PEBS": "1",
"PublicDescription": "Counts the total number of instructions in which the instruction pointer (IP) of the processor is resteered due to a branch instruction and the branch instruction successfully retires. All branch type instructions are accounted for.",
"SampleAfterValue": "200003"
},
{
"BriefDescription": "Counts the number of retired JCC (Jump on Conditional Code) branch instructions retired, includes both taken and not taken branches.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x7e"
},
{
"BriefDescription": "Counts the number of taken JCC (Jump on Conditional Code) branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_TAKEN",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0xfe"
},
{
"BriefDescription": "Counts the number of far branch instructions retired, includes far jump, far call and return, and interrupt call and return.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0xbf"
},
{
"BriefDescription": "Counts the number of near indirect JMP and near indirect CALL branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.INDIRECT",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0xeb"
},
{
"BriefDescription": "Counts the number of near indirect CALL branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.INDIRECT_CALL",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0xfb"
},
{
"BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.INDIRECT_CALL",
+ "Counter": "0,1,2,3,4,5,6,7",
"Deprecated": "1",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.IND_CALL",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0xfb"
},
{
"BriefDescription": "Counts the number of near CALL branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0xf9"
},
{
"BriefDescription": "Counts the number of near RET branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0xf7"
},
{
"BriefDescription": "Counts the total number of mispredicted branch instructions retired for all branch types.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "PEBS": "1",
"PublicDescription": "Counts the total number of mispredicted branch instructions retired. All branch type instructions are accounted for. Prediction of the branch target address enables the processor to begin executing instructions before the non-speculative execution path is known. The branch prediction unit (BPU) predicts the target address based on the instruction pointer (IP) of the branch and on the execution path through which execution reached this IP. A branch misprediction occurs when the prediction is wrong, and results in discarding all instructions executed in the speculative path and re-fetching from the correct path.",
"SampleAfterValue": "200003"
},
{
"BriefDescription": "Counts the number of mispredicted JCC (Jump on Conditional Code) branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x7e"
},
{
"BriefDescription": "Counts the number of mispredicted taken JCC (Jump on Conditional Code) branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_TAKEN",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0xfe"
},
{
"BriefDescription": "Counts the number of mispredicted near indirect JMP and near indirect CALL branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0xeb"
},
{
"BriefDescription": "Counts the number of mispredicted near indirect CALL branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT_CALL",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0xfb"
},
{
"BriefDescription": "Counts the number of mispredicted near taken branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0x80"
},
{
"BriefDescription": "Counts the number of mispredicted near RET branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.RETURN",
- "PEBS": "1",
"SampleAfterValue": "200003",
"UMask": "0xf7"
},
{
"BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.CORE",
"SampleAfterValue": "2000003",
"UMask": "0x2"
},
{
"BriefDescription": "Counts the number of unhalted core clock cycles [This event is alias to CPU_CLK_UNHALTED.THREAD_P]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.CORE_P",
"SampleAfterValue": "2000003"
},
{
"BriefDescription": "Fixed Counter: Counts the number of unhalted reference clock cycles",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"SampleAfterValue": "2000003",
"UMask": "0x3"
},
{
"BriefDescription": "Counts the number of unhalted reference clock cycles at TSC frequency.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.REF_TSC_P",
"PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is not affected by core frequency changes and increments at a fixed frequency that is also used for the Time Stamp Counter (TSC). This event uses a programmable general purpose performance counter.",
@@ -164,18 +169,21 @@
},
{
"BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"SampleAfterValue": "2000003",
"UMask": "0x2"
},
{
"BriefDescription": "Counts the number of unhalted core clock cycles [This event is alias to CPU_CLK_UNHALTED.CORE_P]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
"SampleAfterValue": "2000003"
},
{
"BriefDescription": "Fixed Counter: Counts the number of instructions retired",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PEBS": "1",
"SampleAfterValue": "2000003",
@@ -183,37 +191,38 @@
},
{
"BriefDescription": "Counts the number of instructions retired",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.ANY_P",
- "PEBS": "1",
"SampleAfterValue": "2000003"
},
{
"BriefDescription": "Counts the number of retired loads that are blocked because it initially appears to be store forward blocked, but subsequently is shown not to be blocked based on 4K alias check.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.ADDRESS_ALIAS",
- "PEBS": "1",
"SampleAfterValue": "1000003",
"UMask": "0x4"
},
{
"BriefDescription": "Counts the number of retired loads that are blocked because its address exactly matches an older store whose data is not ready.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.DATA_UNKNOWN",
- "PEBS": "1",
"SampleAfterValue": "1000003",
"UMask": "0x1"
},
{
"BriefDescription": "Counts the number of retired loads that are blocked because its address partially overlapped with an older store.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.STORE_FORWARD",
- "PEBS": "1",
"SampleAfterValue": "1000003",
"UMask": "0x2"
},
{
"BriefDescription": "Counts the number of machine clears due to memory ordering in which an internal load passes an older store within the same CPU.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.DISAMBIGUATION",
"SampleAfterValue": "20003",
@@ -221,6 +230,7 @@
},
{
"BriefDescription": "Counts the number of machine clears due to a page fault. Counts both I-Side and D-Side (Loads/Stores) page faults. A page fault occurs when either the page is not present, or an access violation occurs.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.PAGE_FAULT",
"SampleAfterValue": "20003",
@@ -228,6 +238,7 @@
},
{
"BriefDescription": "Counts the number of machine clears that flush the pipeline and restart the machine with the use of microcode due to SMC, MEMORY_ORDERING, FP_ASSISTS, PAGE_FAULT, DISAMBIGUATION, and FPC_VIRTUAL_TRAP.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.SLOW",
"SampleAfterValue": "20003",
@@ -235,6 +246,7 @@
},
{
"BriefDescription": "Counts the number of machine clears due to program modifying data (self modifying code) within 1K of a recently fetched code page.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.SMC",
"SampleAfterValue": "20003",
@@ -242,14 +254,15 @@
},
{
"BriefDescription": "Counts the number of Last Branch Record (LBR) entries. Requires LBRs to be enabled and configured in IA32_LBR_CTL. [This event is alias to LBR_INSERTS.ANY]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe4",
"EventName": "MISC_RETIRED.LBR_INSERTS",
- "PEBS": "1",
"SampleAfterValue": "1000003",
"UMask": "0x1"
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. [This event is alias to TOPDOWN_BAD_SPECULATION.ALL_P]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.ALL",
"PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window, including relevant microcode flows, and while uops are not yet available in the instruction queue (IQ) or until an FE_BOUND event occurs besides OTHER and CISC. Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear. [This event is alias to TOPDOWN_BAD_SPECULATION.ALL_P]",
@@ -257,6 +270,7 @@
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. [This event is alias to TOPDOWN_BAD_SPECULATION.ALL]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.ALL_P",
"PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window, including relevant microcode flows, and while uops are not yet available in the instruction queue (IQ) or until an FE_BOUND event occurs besides OTHER and CISC. Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear. [This event is alias to TOPDOWN_BAD_SPECULATION.ALL]",
@@ -264,6 +278,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to Fast Nukes such as Memory Ordering Machine clears and MRN nukes",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.FASTNUKE",
"SampleAfterValue": "1000003",
@@ -271,6 +286,7 @@
},
{
"BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a machine clear (nuke) of any kind including memory ordering and memory disambiguation.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS",
"SampleAfterValue": "1000003",
@@ -278,6 +294,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to Branch Mispredict",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.MISPREDICT",
"SampleAfterValue": "1000003",
@@ -285,6 +302,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to a machine clear (nuke).",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.NUKE",
"SampleAfterValue": "1000003",
@@ -292,12 +310,14 @@
},
{
"BriefDescription": "Counts the number of retirement slots not consumed due to backend stalls [This event is alias to TOPDOWN_BE_BOUND.ALL_P]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.ALL",
"SampleAfterValue": "1000003"
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to due to certain allocation restrictions",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS",
"SampleAfterValue": "1000003",
@@ -305,12 +325,14 @@
},
{
"BriefDescription": "Counts the number of retirement slots not consumed due to backend stalls [This event is alias to TOPDOWN_BE_BOUND.ALL]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.ALL_P",
"SampleAfterValue": "1000003"
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to memory reservation stall (scheduler not being able to accept another uop). This could be caused by RSV full or load/store buffer block.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.MEM_SCHEDULER",
"SampleAfterValue": "1000003",
@@ -318,6 +340,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to IEC and FPC RAT stalls - which can be due to the FIQ and IEC reservation station stall (integer, FP and SIMD scheduler not being able to accept another uop. )",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER",
"SampleAfterValue": "1000003",
@@ -325,6 +348,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to mrbl stall. A 'marble' refers to a physical register file entry, also known as the physical destination (PDST).",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.REGISTER",
"SampleAfterValue": "1000003",
@@ -332,6 +356,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to ROB full",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.REORDER_BUFFER",
"SampleAfterValue": "1000003",
@@ -339,6 +364,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to iq/jeu scoreboards or ms scb",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.SERIALIZATION",
"SampleAfterValue": "1000003",
@@ -346,18 +372,21 @@
},
{
"BriefDescription": "Counts the number of retirement slots not consumed due to front end stalls [This event is alias to TOPDOWN_FE_BOUND.ALL_P]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.ALL",
"SampleAfterValue": "1000003"
},
{
"BriefDescription": "Counts the number of retirement slots not consumed due to front end stalls [This event is alias to TOPDOWN_FE_BOUND.ALL]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.ALL_P",
"SampleAfterValue": "1000003"
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BAClear",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.BRANCH_DETECT",
"SampleAfterValue": "1000003",
@@ -365,6 +394,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BTClear",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.BRANCH_RESTEER",
"SampleAfterValue": "1000003",
@@ -372,6 +402,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to ms",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.CISC",
"SampleAfterValue": "1000003",
@@ -379,6 +410,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to decode stall",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.DECODE",
"SampleAfterValue": "1000003",
@@ -386,6 +418,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.FRONTEND_BANDWIDTH",
"SampleAfterValue": "1000003",
@@ -393,6 +426,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to latency related stalls including BACLEARs, BTCLEARs, ITLB misses, and ICache misses.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.FRONTEND_LATENCY",
"SampleAfterValue": "1000003",
@@ -400,6 +434,7 @@
},
{
"BriefDescription": "This event is deprecated. [This event is alias to TOPDOWN_FE_BOUND.ITLB_MISS]",
+ "Counter": "0,1,2,3,4,5,6,7",
"Deprecated": "1",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.ITLB",
@@ -408,6 +443,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to itlb miss [This event is alias to TOPDOWN_FE_BOUND.ITLB]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.ITLB_MISS",
"SampleAfterValue": "1000003",
@@ -415,6 +451,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend that do not categorize into any other common frontend stall",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.OTHER",
"SampleAfterValue": "1000003",
@@ -422,27 +459,29 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to predecode wrong",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.PREDECODE",
"SampleAfterValue": "1000003",
"UMask": "0x4"
},
{
- "BriefDescription": "Counts the number of consumed retirement slots. Similar to UOPS_RETIRED.ALL [This event is alias to TOPDOWN_RETIRING.ALL_P]",
+ "BriefDescription": "Counts the number of consumed retirement slots. [This event is alias to TOPDOWN_RETIRING.ALL_P]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x72",
"EventName": "TOPDOWN_RETIRING.ALL",
- "PEBS": "1",
"SampleAfterValue": "1000003"
},
{
- "BriefDescription": "Counts the number of consumed retirement slots. Similar to UOPS_RETIRED.ALL [This event is alias to TOPDOWN_RETIRING.ALL]",
+ "BriefDescription": "Counts the number of consumed retirement slots. [This event is alias to TOPDOWN_RETIRING.ALL]",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x72",
"EventName": "TOPDOWN_RETIRING.ALL_P",
- "PEBS": "1",
"SampleAfterValue": "1000003"
},
{
"BriefDescription": "Counts the number of uops issued by the front end every cycle.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x0e",
"EventName": "UOPS_ISSUED.ANY",
"PublicDescription": "Counts the number of uops issued by the front end every cycle. When 4-uops are requested and only 2-uops are delivered, the event counts 2. Uops_issued correlates to the number of ROB entries. If uop takes 2 ROB slots it counts as 2 uops_issued.",
@@ -450,32 +489,32 @@
},
{
"BriefDescription": "Counts the total number of uops retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.ALL",
- "PEBS": "1",
"SampleAfterValue": "2000003"
},
{
"BriefDescription": "Counts the number of integer divide uops retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.IDIV",
- "PEBS": "1",
"SampleAfterValue": "2000003",
"UMask": "0x10"
},
{
"BriefDescription": "Counts the number of uops that are from the complex flows issued by the micro-sequencer (MS). This includes uops from flows due to complex instructions, faults, assists, and inserted flows.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.MS",
- "PEBS": "1",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
{
"BriefDescription": "Counts the number of x87 uops retired, includes those in ms flows",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.X87",
- "PEBS": "1",
"SampleAfterValue": "2000003",
"UMask": "0x2"
}
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/srf-metrics.json b/tools/perf/pmu-events/arch/x86/sierraforest/srf-metrics.json
new file mode 100644
index 000000000000..b881b1958f11
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/srf-metrics.json
@@ -0,0 +1,927 @@
+[
+ {
+ "BriefDescription": "C1 residency percent per core",
+ "MetricExpr": "cstate_core@c1\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C1_Core_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C6 residency percent per core",
+ "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Core_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C6 residency percent per module",
+ "MetricExpr": "cstate_module@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Module_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C6 residency percent per package",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Cycles per instruction retired; indicating how much time each executed instruction took; in units of cycles.",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD / INST_RETIRED.ANY",
+ "MetricName": "cpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "CPU operating frequency (in GHz)",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC * #SYSTEM_TSC_FREQ / 1e9",
+ "MetricName": "cpu_operating_frequency",
+ "ScaleUnit": "1GHz"
+ },
+ {
+ "BriefDescription": "Percentage of time spent in the active CPU power state C0",
+ "MetricExpr": "tma_info_system_cpu_utilization",
+ "MetricName": "cpu_utilization",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for 2 megabyte page sizes) caused by demand data loads to the total number of completed instructions",
+ "MetricExpr": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M / INST_RETIRED.ANY",
+ "MetricName": "dtlb_2nd_level_2mb_large_page_load_mpi",
+ "PublicDescription": "Ratio of number of completed page walks (for 2 megabyte page sizes) caused by demand data loads to the total number of completed instructions. This implies it missed in the Data Translation Lookaside Buffer (DTLB) and further levels of TLB.",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for all page sizes) caused by demand data loads to the total number of completed instructions",
+ "MetricExpr": "DTLB_LOAD_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricName": "dtlb_2nd_level_load_mpi",
+ "PublicDescription": "Ratio of number of completed page walks (for all page sizes) caused by demand data loads to the total number of completed instructions. This implies it missed in the DTLB and further levels of TLB.",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for all page sizes) caused by demand data stores to the total number of completed instructions",
+ "MetricExpr": "DTLB_STORE_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricName": "dtlb_2nd_level_store_mpi",
+ "PublicDescription": "Ratio of number of completed page walks (for all page sizes) caused by demand data stores to the total number of completed instructions. This implies it missed in the DTLB and further levels of TLB.",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Bandwidth observed by the integrated I/O traffic contoller (IIO) of IO reads that are initiated by end device controllers that are requesting memory from the CPU.",
+ "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.ALL_PARTS * 4 / 1e6 / duration_time",
+ "MetricName": "iio_bandwidth_read",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth observed by the integrated I/O traffic controller (IIO) of IO writes that are initiated by end device controllers that are writing memory to the CPU.",
+ "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.ALL_PARTS * 4 / 1e6 / duration_time",
+ "MetricName": "iio_bandwidth_write",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth of IO reads that are initiated by end device controllers that are requesting memory from the CPU.",
+ "MetricExpr": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR * 64 / 1e6 / duration_time",
+ "MetricName": "io_bandwidth_read",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth of IO reads that are initiated by end device controllers that are requesting memory from the local CPU socket.",
+ "MetricExpr": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR_LOCAL * 64 / 1e6 / duration_time",
+ "MetricName": "io_bandwidth_read_local",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth of IO reads that are initiated by end device controllers that are requesting memory from a remote CPU socket.",
+ "MetricExpr": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR_REMOTE * 64 / 1e6 / duration_time",
+ "MetricName": "io_bandwidth_read_remote",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth of IO writes that are initiated by end device controllers that are writing memory to the CPU.",
+ "MetricExpr": "(UNC_CHA_TOR_INSERTS.IO_ITOM + UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR) * 64 / 1e6 / duration_time",
+ "MetricName": "io_bandwidth_write",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth of IO writes that are initiated by end device controllers that are writing memory to the local CPU socket.",
+ "MetricExpr": "(UNC_CHA_TOR_INSERTS.IO_ITOM_LOCAL + UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR_LOCAL) * 64 / 1e6 / duration_time",
+ "MetricName": "io_bandwidth_write_local",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth of IO writes that are initiated by end device controllers that are writing memory to a remote CPU socket.",
+ "MetricExpr": "(UNC_CHA_TOR_INSERTS.IO_ITOM_REMOTE + UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR_REMOTE) * 64 / 1e6 / duration_time",
+ "MetricName": "io_bandwidth_write_remote",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for 2 megabyte and 4 megabyte page sizes) caused by a code fetch to the total number of completed instructions",
+ "MetricExpr": "ITLB_MISSES.WALK_COMPLETED_2M_4M / INST_RETIRED.ANY",
+ "MetricName": "itlb_2nd_level_large_page_mpi",
+ "PublicDescription": "Ratio of number of completed page walks (for 2 megabyte and 4 megabyte page sizes) caused by a code fetch to the total number of completed instructions. This implies it missed in the Instruction Translation Lookaside Buffer (ITLB) and further levels of TLB.",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for all page sizes) caused by a code fetch to the total number of completed instructions",
+ "MetricExpr": "ITLB_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricName": "itlb_2nd_level_mpi",
+ "PublicDescription": "Ratio of number of completed page walks (for all page sizes) caused by a code fetch to the total number of completed instructions. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB.",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of code read requests missing in L1 instruction cache (includes prefetches) to the total number of completed instructions",
+ "MetricExpr": "ICACHE.MISSES / INST_RETIRED.ANY",
+ "MetricName": "l1_i_code_read_misses_with_prefetches_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of demand load requests hitting in L1 data cache to the total number of completed instructions",
+ "MetricExpr": "MEM_LOAD_UOPS_RETIRED.L1_HIT / INST_RETIRED.ANY",
+ "MetricName": "l1d_demand_data_read_hits_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed demand load requests hitting in L2 cache to the total number of completed instructions",
+ "MetricExpr": "MEM_LOAD_UOPS_RETIRED.L2_HIT / INST_RETIRED.ANY",
+ "MetricName": "l2_demand_data_read_hits_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed data read request missing L2 cache to the total number of completed instructions",
+ "MetricExpr": "MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricName": "l2_demand_data_read_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of requests missing L2 cache (includes code+data+rfo w/ prefetches) to the total number of completed instructions",
+ "MetricExpr": "LONGEST_LAT_CACHE.REFERENCE / INST_RETIRED.ANY",
+ "MetricName": "l2_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of code read requests missing last level core cache (includes demand w/ prefetches) to the total number of completed instructions",
+ "MetricExpr": "(UNC_CHA_TOR_INSERTS.IA_MISS_CRD + UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF) / INST_RETIRED.ANY",
+ "MetricName": "llc_code_read_mpi_demand_plus_prefetch",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of data read requests missing last level core cache (includes demand w/ prefetches) to the total number of completed instructions",
+ "MetricExpr": "(UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_PREF + UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFDATA) / INST_RETIRED.ANY",
+ "MetricName": "llc_data_read_mpi_demand_plus_prefetch",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Average latency of a last level cache (LLC) demand data read miss (read memory access) in nano seconds",
+ "MetricExpr": "1e9 * (UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT / UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT) / (UNC_CHA_CLOCKTICKS / (source_count(UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT) * #num_packages)) * duration_time",
+ "MetricName": "llc_demand_data_read_miss_latency",
+ "ScaleUnit": "1ns"
+ },
+ {
+ "BriefDescription": "Bandwidth (MB/sec) of read requests that miss the last level cache (LLC) and go to local memory.",
+ "MetricExpr": "UNC_CHA_REQUESTS.READS_LOCAL * 64 / 1e6 / duration_time",
+ "MetricName": "llc_miss_local_memory_bandwidth_read",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth (MB/sec) of write requests that miss the last level cache (LLC) and go to local memory.",
+ "MetricExpr": "UNC_CHA_REQUESTS.WRITES_LOCAL * 64 / 1e6 / duration_time",
+ "MetricName": "llc_miss_local_memory_bandwidth_write",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth (MB/sec) of read requests that miss the last level cache (LLC) and go to remote memory.",
+ "MetricExpr": "UNC_CHA_REQUESTS.READS_REMOTE * 64 / 1e6 / duration_time",
+ "MetricName": "llc_miss_remote_memory_bandwidth_read",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth (MB/sec) of write requests that miss the last level cache (LLC) and go to remote memory.",
+ "MetricExpr": "UNC_CHA_REQUESTS.WRITES_REMOTE * 64 / 1e6 / duration_time",
+ "MetricName": "llc_miss_remote_memory_bandwidth_write",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Load operations retired per instruction",
+ "MetricExpr": "MEM_UOPS_RETIRED.ALL_LOADS / INST_RETIRED.ANY",
+ "MetricName": "loads_retired_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "DDR memory read bandwidth (MB/sec)",
+ "MetricExpr": "(UNC_M_CAS_COUNT_SCH0.RD + UNC_M_CAS_COUNT_SCH1.RD) * 64 / 1e6 / duration_time",
+ "MetricName": "memory_bandwidth_read",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "DDR memory bandwidth (MB/sec)",
+ "MetricExpr": "(UNC_M_CAS_COUNT_SCH0.RD + UNC_M_CAS_COUNT_SCH1.RD + UNC_M_CAS_COUNT_SCH0.WR + UNC_M_CAS_COUNT_SCH1.WR) * 64 / 1e6 / duration_time",
+ "MetricName": "memory_bandwidth_total",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "DDR memory write bandwidth (MB/sec)",
+ "MetricExpr": "(UNC_M_CAS_COUNT_SCH0.WR + UNC_M_CAS_COUNT_SCH1.WR) * 64 / 1e6 / duration_time",
+ "MetricName": "memory_bandwidth_write",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Memory read that miss the last level cache (LLC) addressed to local DRAM as a percentage of total memory read accesses, does not include LLC prefetches.",
+ "MetricExpr": "(UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_LOCAL + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_PREF_LOCAL) / (UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_LOCAL + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_PREF_LOCAL + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_REMOTE + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_PREF_REMOTE)",
+ "MetricName": "numa_reads_addressed_to_local_dram",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Memory reads that miss the last level cache (LLC) addressed to remote DRAM as a percentage of total memory read accesses, does not include LLC prefetches.",
+ "MetricExpr": "(UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_REMOTE + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_PREF_REMOTE) / (UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_LOCAL + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_PREF_LOCAL + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_REMOTE + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_PREF_REMOTE)",
+ "MetricName": "numa_reads_addressed_to_remote_dram",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of cycles spent in System Management Interrupts.",
+ "MetricExpr": "((msr@aperf@ - cycles) / msr@aperf@ if msr@smi@ > 0 else 0)",
+ "MetricGroup": "smi",
+ "MetricName": "smi_cycles",
+ "MetricThreshold": "smi_cycles > 0.1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Number of SMI interrupts.",
+ "MetricExpr": "msr@smi@",
+ "MetricGroup": "smi",
+ "MetricName": "smi_num",
+ "ScaleUnit": "1SMI#"
+ },
+ {
+ "BriefDescription": "Store operations retired per instruction",
+ "MetricExpr": "MEM_UOPS_RETIRED.ALL_STORES / INST_RETIRED.ANY",
+ "MetricName": "stores_retired_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to certain allocation restrictions",
+ "MetricExpr": "tma_core_bound",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_allocation_restriction",
+ "MetricThreshold": "tma_allocation_restriction > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.1)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls",
+ "MetricExpr": "TOPDOWN_BE_BOUND.ALL_P / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL1;tma_L1_group",
+ "MetricName": "tma_backend_bound",
+ "MetricThreshold": "tma_backend_bound > 0.1",
+ "MetricgroupNoGroup": "TopdownL1",
+ "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls. Note that uops must be available for consumption in order for this event to count. If a uop is not available (IQ is empty), this event will not count",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear",
+ "MetricExpr": "TOPDOWN_BAD_SPECULATION.ALL_P / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL1;tma_L1_group",
+ "MetricName": "tma_bad_speculation",
+ "MetricThreshold": "tma_bad_speculation > 0.15",
+ "MetricgroupNoGroup": "TopdownL1",
+ "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window including relevant microcode flows and while uops are not yet available in the instruction queue (IQ). Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend",
+ "MetricExpr": "TOPDOWN_FE_BOUND.BRANCH_DETECT / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
+ "MetricName": "tma_branch_detect",
+ "MetricThreshold": "tma_branch_detect > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
+ "PublicDescription": "Counts the number of issue slots that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to branch mispredicts",
+ "MetricExpr": "TOPDOWN_BAD_SPECULATION.MISPREDICT / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
+ "MetricName": "tma_branch_mispredicts",
+ "MetricThreshold": "tma_branch_mispredicts > 0.05 & tma_bad_speculation > 0.15",
+ "MetricgroupNoGroup": "TopdownL2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to BTCLEARS, which occurs when the Branch Target Buffer (BTB) predicts a taken branch.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.BRANCH_RESTEER / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
+ "MetricName": "tma_branch_resteer",
+ "MetricThreshold": "tma_branch_resteer > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to the microcode sequencer (MS).",
+ "MetricExpr": "TOPDOWN_FE_BOUND.CISC / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
+ "MetricName": "tma_cisc",
+ "MetricThreshold": "tma_cisc > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles due to backend bound stalls that are bounded by core restrictions and not attributed to an outstanding load or stores, or resource limitation",
+ "MetricExpr": "TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_core_bound",
+ "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.1",
+ "MetricgroupNoGroup": "TopdownL2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to decode stalls.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.DECODE / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
+ "MetricName": "tma_decode",
+ "MetricThreshold": "tma_decode > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to a machine clear that does not require the use of microcode, classified as a fast nuke, due to memory ordering, memory disambiguation and memory renaming",
+ "MetricExpr": "TOPDOWN_BAD_SPECULATION.FASTNUKE / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group",
+ "MetricName": "tma_fast_nuke",
+ "MetricThreshold": "tma_fast_nuke > 0.05 & (tma_machine_clears > 0.05 & tma_bad_speculation > 0.15)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to frontend stalls.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.ALL_P / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL1;tma_L1_group",
+ "MetricName": "tma_frontend_bound",
+ "MetricThreshold": "tma_frontend_bound > 0.2",
+ "MetricgroupNoGroup": "TopdownL1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to instruction cache misses.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.ICACHE / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
+ "MetricName": "tma_icache_misses",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.FRONTEND_BANDWIDTH / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
+ "MetricName": "tma_ifetch_bandwidth",
+ "MetricThreshold": "tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2",
+ "MetricgroupNoGroup": "TopdownL2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to frontend latency restrictions due to icache misses, itlb misses, branch detection, and resteer limitations.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.FRONTEND_LATENCY / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
+ "MetricName": "tma_ifetch_latency",
+ "MetricThreshold": "tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2",
+ "MetricgroupNoGroup": "TopdownL2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Instructions per Floating Point (FP) Operation",
+ "MetricExpr": "INST_RETIRED.ANY / FP_FLOPS_RETIRED.ALL",
+ "MetricGroup": "Flops",
+ "MetricName": "tma_info_arith_inst_mix_ipflop"
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_INST_RETIRED.128B_DP + FP_INST_RETIRED.128B_SP)",
+ "MetricGroup": "Flops",
+ "MetricName": "tma_info_arith_inst_mix_ipfparith_avx128"
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction",
+ "MetricExpr": "INST_RETIRED.ANY / FP_INST_RETIRED.64B_DP",
+ "MetricGroup": "Flops",
+ "MetricName": "tma_info_arith_inst_mix_ipfparith_scalar_dp"
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction",
+ "MetricExpr": "INST_RETIRED.ANY / FP_INST_RETIRED.32B_SP",
+ "MetricGroup": "Flops",
+ "MetricName": "tma_info_arith_inst_mix_ipfparith_scalar_sp"
+ },
+ {
+ "BriefDescription": "Percentage of time that retirement is stalled due to a first level data TLB miss",
+ "MetricExpr": "tma_info_bottleneck_dtlb_miss_bound_cycles",
+ "MetricName": "tma_info_bottleneck_%_dtlb_miss_bound_cycles"
+ },
+ {
+ "BriefDescription": "Percentage of time that allocation and retirement is stalled by the Frontend Cluster due to an Ifetch Miss, either Icache or ITLB Miss",
+ "MetricExpr": "tma_info_bottleneck_ifetch_miss_bound_cycles",
+ "MetricGroup": "Ifetch",
+ "MetricName": "tma_info_bottleneck_%_ifetch_miss_bound_cycles",
+ "PublicDescription": "Percentage of time that allocation and retirement is stalled by the Frontend Cluster due to an Ifetch Miss, either Icache or ITLB Miss. See Info.Ifetch_Bound"
+ },
+ {
+ "BriefDescription": "Percentage of time that retirement is stalled due to an L1 miss",
+ "MetricExpr": "tma_info_bottleneck_load_miss_bound_cycles",
+ "MetricGroup": "Load_Store_Miss",
+ "MetricName": "tma_info_bottleneck_%_load_miss_bound_cycles",
+ "PublicDescription": "Percentage of time that retirement is stalled due to an L1 miss. See Info.Load_Miss_Bound"
+ },
+ {
+ "BriefDescription": "Percentage of time that retirement is stalled by the Memory Cluster due to a pipeline stall",
+ "MetricExpr": "tma_info_bottleneck_mem_exec_bound_cycles",
+ "MetricGroup": "Mem_Exec",
+ "MetricName": "tma_info_bottleneck_%_mem_exec_bound_cycles",
+ "PublicDescription": "Percentage of time that retirement is stalled by the Memory Cluster due to a pipeline stall. See Info.Mem_Exec_Bound"
+ },
+ {
+ "BriefDescription": "Percentage of time that retirement is stalled due to a first level data TLB miss",
+ "MetricExpr": "100 * (LD_HEAD.DTLB_MISS_AT_RET + LD_HEAD.PGWALK_AT_RET) / CPU_CLK_UNHALTED.CORE",
+ "MetricGroup": "Cycles",
+ "MetricName": "tma_info_bottleneck_dtlb_miss_bound_cycles",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of time that allocation and retirement is stalled by the Frontend Cluster due to an Ifetch Miss, either Icache or ITLB Miss",
+ "MetricExpr": "100 * MEM_BOUND_STALLS_IFETCH.ALL / CPU_CLK_UNHALTED.CORE",
+ "MetricGroup": "Cycles;Ifetch",
+ "MetricName": "tma_info_bottleneck_ifetch_miss_bound_cycles",
+ "PublicDescription": "Percentage of time that allocation and retirement is stalled by the Frontend Cluster due to an Ifetch Miss, either Icache or ITLB Miss. See Info.Ifetch_Bound",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of time that retirement is stalled due to an L1 miss",
+ "MetricExpr": "100 * MEM_BOUND_STALLS_LOAD.ALL / CPU_CLK_UNHALTED.CORE",
+ "MetricGroup": "Cycles;Load_Store_Miss",
+ "MetricName": "tma_info_bottleneck_load_miss_bound_cycles",
+ "PublicDescription": "Percentage of time that retirement is stalled due to an L1 miss. See Info.Load_Miss_Bound",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of time that retirement is stalled by the Memory Cluster due to a pipeline stall",
+ "MetricExpr": "100 * LD_HEAD.ANY_AT_RET / CPU_CLK_UNHALTED.CORE",
+ "MetricGroup": "Cycles;Mem_Exec",
+ "MetricName": "tma_info_bottleneck_mem_exec_bound_cycles",
+ "PublicDescription": "Percentage of time that retirement is stalled by the Memory Cluster due to a pipeline stall. See Info.Mem_Exec_Bound",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricName": "tma_info_br_inst_mix_ipbranch"
+ },
+ {
+ "BriefDescription": "Instruction per (near) call (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "MetricName": "tma_info_br_inst_mix_ipcall"
+ },
+ {
+ "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
+ "MetricName": "tma_info_br_inst_mix_ipfarbranch"
+ },
+ {
+ "BriefDescription": "Instructions per retired conditional Branch Misprediction where the branch was not taken",
+ "MetricExpr": "INST_RETIRED.ANY / (BR_MISP_RETIRED.COND - BR_MISP_RETIRED.COND_TAKEN)",
+ "MetricName": "tma_info_br_inst_mix_ipmisp_cond_ntaken"
+ },
+ {
+ "BriefDescription": "Instructions per retired conditional Branch Misprediction where the branch was taken",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_TAKEN",
+ "MetricName": "tma_info_br_inst_mix_ipmisp_cond_taken"
+ },
+ {
+ "BriefDescription": "Instructions per retired indirect call or jump Branch Misprediction",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.INDIRECT",
+ "MetricName": "tma_info_br_inst_mix_ipmisp_indirect"
+ },
+ {
+ "BriefDescription": "Instructions per retired return Branch Misprediction",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.RETURN",
+ "MetricName": "tma_info_br_inst_mix_ipmisp_ret"
+ },
+ {
+ "BriefDescription": "Instructions per retired Branch Misprediction",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricName": "tma_info_br_inst_mix_ipmispredict"
+ },
+ {
+ "BriefDescription": "Ratio of all branches which mispredict",
+ "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricName": "tma_info_br_mispredict_bound_branch_mispredict_ratio"
+ },
+ {
+ "BriefDescription": "Ratio between Mispredicted branches and unknown branches",
+ "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / BACLEARS.ANY",
+ "MetricName": "tma_info_br_mispredict_bound_branch_mispredict_to_unknown_branch_ratio"
+ },
+ {
+ "BriefDescription": "Percentage of time that allocation is stalled due to load buffer full",
+ "MetricExpr": "tma_info_buffer_stalls_load_buffer_stall_cycles",
+ "MetricName": "tma_info_buffer_stalls_%_load_buffer_stall_cycles"
+ },
+ {
+ "BriefDescription": "Percentage of time that allocation is stalled due to memory reservation stations full",
+ "MetricExpr": "tma_info_buffer_stalls_mem_rsv_stall_cycles",
+ "MetricName": "tma_info_buffer_stalls_%_mem_rsv_stall_cycles"
+ },
+ {
+ "BriefDescription": "Percentage of time that allocation is stalled due to store buffer full",
+ "MetricExpr": "tma_info_buffer_stalls_store_buffer_stall_cycles",
+ "MetricName": "tma_info_buffer_stalls_%_store_buffer_stall_cycles"
+ },
+ {
+ "BriefDescription": "Percentage of time that allocation is stalled due to load buffer full",
+ "MetricExpr": "100 * MEM_SCHEDULER_BLOCK.LD_BUF / CPU_CLK_UNHALTED.CORE",
+ "MetricName": "tma_info_buffer_stalls_load_buffer_stall_cycles",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of time that allocation is stalled due to memory reservation stations full",
+ "MetricExpr": "100 * MEM_SCHEDULER_BLOCK.RSV / CPU_CLK_UNHALTED.CORE",
+ "MetricName": "tma_info_buffer_stalls_mem_rsv_stall_cycles",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of time that allocation is stalled due to store buffer full",
+ "MetricExpr": "100 * MEM_SCHEDULER_BLOCK.ST_BUF / CPU_CLK_UNHALTED.CORE",
+ "MetricName": "tma_info_buffer_stalls_store_buffer_stall_cycles",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction",
+ "MetricExpr": "CPU_CLK_UNHALTED.CORE / INST_RETIRED.ANY",
+ "MetricName": "tma_info_core_cpi"
+ },
+ {
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricExpr": "FP_FLOPS_RETIRED.ALL / CPU_CLK_UNHALTED.CORE",
+ "MetricGroup": "Flops",
+ "MetricName": "tma_info_core_flopc"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle",
+ "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.CORE",
+ "MetricName": "tma_info_core_ipc"
+ },
+ {
+ "BriefDescription": "Uops Per Instruction",
+ "MetricExpr": "TOPDOWN_RETIRING.ALL_P / INST_RETIRED.ANY",
+ "MetricName": "tma_info_core_upi"
+ },
+ {
+ "BriefDescription": "Percentage of ifetch miss bound stalls, where the ifetch miss hits in the L2",
+ "MetricExpr": "tma_info_ifetch_miss_bound_ifetchmissbound_with_l2hit",
+ "MetricName": "tma_info_ifetch_miss_bound_%_ifetchmissbound_with_l2hit"
+ },
+ {
+ "BriefDescription": "Percentage of ifetch miss bound stalls, where the ifetch miss hits in the L3",
+ "MetricExpr": "tma_info_ifetch_miss_bound_ifetchmissbound_with_l3hit",
+ "MetricName": "tma_info_ifetch_miss_bound_%_ifetchmissbound_with_l3hit"
+ },
+ {
+ "BriefDescription": "Percentage of ifetch miss bound stalls, where the ifetch miss subsequently misses in the L3",
+ "MetricExpr": "100 * MEM_BOUND_STALLS_IFETCH.LLC_MISS / MEM_BOUND_STALLS_IFETCH.ALL",
+ "MetricName": "tma_info_ifetch_miss_bound_%_ifetchmissbound_with_l3miss"
+ },
+ {
+ "BriefDescription": "Percentage of ifetch miss bound stalls, where the ifetch miss hits in the L2",
+ "MetricExpr": "100 * MEM_BOUND_STALLS_IFETCH.L2_HIT / MEM_BOUND_STALLS_IFETCH.ALL",
+ "MetricName": "tma_info_ifetch_miss_bound_ifetchmissbound_with_l2hit",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of ifetch miss bound stalls, where the ifetch miss hits in the L3",
+ "MetricExpr": "100 * MEM_BOUND_STALLS_IFETCH.LLC_HIT / MEM_BOUND_STALLS_IFETCH.ALL",
+ "MetricName": "tma_info_ifetch_miss_bound_ifetchmissbound_with_l3hit",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of memory bound stalls where retirement is stalled due to an L1 miss that hit the L2",
+ "MetricExpr": "tma_info_load_miss_bound_loadmissbound_with_l2hit",
+ "MetricGroup": "load_store_bound",
+ "MetricName": "tma_info_load_miss_bound_%_loadmissbound_with_l2hit"
+ },
+ {
+ "BriefDescription": "Percentage of memory bound stalls where retirement is stalled due to an L1 miss that hit the L3",
+ "MetricExpr": "tma_info_load_miss_bound_loadmissbound_with_l3hit",
+ "MetricGroup": "load_store_bound",
+ "MetricName": "tma_info_load_miss_bound_%_loadmissbound_with_l3hit"
+ },
+ {
+ "BriefDescription": "Percentage of memory bound stalls where retirement is stalled due to an L1 miss that subsequently misses the L3",
+ "MetricExpr": "100 * MEM_BOUND_STALLS_LOAD.LLC_MISS / MEM_BOUND_STALLS_LOAD.ALL",
+ "MetricGroup": "load_store_bound",
+ "MetricName": "tma_info_load_miss_bound_%_loadmissbound_with_l3miss"
+ },
+ {
+ "BriefDescription": "Percentage of memory bound stalls where retirement is stalled due to an L1 miss that hit the L2",
+ "MetricExpr": "100 * MEM_BOUND_STALLS_LOAD.L2_HIT / MEM_BOUND_STALLS_LOAD.ALL",
+ "MetricGroup": "load_store_bound",
+ "MetricName": "tma_info_load_miss_bound_loadmissbound_with_l2hit",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of memory bound stalls where retirement is stalled due to an L1 miss that hit the L3",
+ "MetricExpr": "100 * MEM_BOUND_STALLS_LOAD.LLC_HIT / MEM_BOUND_STALLS_LOAD.ALL",
+ "MetricGroup": "load_store_bound",
+ "MetricName": "tma_info_load_miss_bound_loadmissbound_with_l3hit",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a pipeline block",
+ "MetricExpr": "100 * LD_HEAD.L1_BOUND_AT_RET / CPU_CLK_UNHALTED.CORE",
+ "MetricGroup": "load_store_bound",
+ "MetricName": "tma_info_load_store_bound_l1_bound"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement",
+ "MetricExpr": "100 * (LD_HEAD.L1_BOUND_AT_RET + MEM_BOUND_STALLS_LOAD.ALL) / CPU_CLK_UNHALTED.CORE",
+ "MetricGroup": "load_store_bound",
+ "MetricName": "tma_info_load_store_bound_load_bound"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to store buffer full",
+ "MetricExpr": "100 * (MEM_SCHEDULER_BLOCK.ST_BUF / MEM_SCHEDULER_BLOCK.ALL) * tma_mem_scheduler",
+ "MetricGroup": "load_store_bound",
+ "MetricName": "tma_info_load_store_bound_store_bound"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears relative to thousands of instructions retired, due to floating point assists",
+ "MetricExpr": "1e3 * MACHINE_CLEARS.FP_ASSIST / INST_RETIRED.ANY",
+ "MetricName": "tma_info_machine_clear_bound_machine_clears_fp_assist_pki"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears relative to thousands of instructions retired, due to page faults",
+ "MetricExpr": "1e3 * MACHINE_CLEARS.PAGE_FAULT / INST_RETIRED.ANY",
+ "MetricName": "tma_info_machine_clear_bound_machine_clears_page_fault_pki"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears relative to thousands of instructions retired, due to self-modifying code",
+ "MetricExpr": "1e3 * MACHINE_CLEARS.SMC / INST_RETIRED.ANY",
+ "MetricName": "tma_info_machine_clear_bound_machine_clears_smc_pki"
+ },
+ {
+ "BriefDescription": "Percentage of total non-speculative loads with an address aliasing block",
+ "MetricExpr": "tma_info_mem_exec_blocks_loads_with_adressaliasing",
+ "MetricName": "tma_info_mem_exec_blocks_%_loads_with_adressaliasing"
+ },
+ {
+ "BriefDescription": "Percentage of total non-speculative loads with a store forward or unknown store address block",
+ "MetricExpr": "tma_info_mem_exec_blocks_loads_with_storefwdblk",
+ "MetricName": "tma_info_mem_exec_blocks_%_loads_with_storefwdblk"
+ },
+ {
+ "BriefDescription": "Percentage of total non-speculative loads with an address aliasing block",
+ "MetricExpr": "100 * LD_BLOCKS.ADDRESS_ALIAS / MEM_UOPS_RETIRED.ALL_LOADS",
+ "MetricName": "tma_info_mem_exec_blocks_loads_with_adressaliasing",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of total non-speculative loads with a store forward or unknown store address block",
+ "MetricExpr": "100 * LD_BLOCKS.DATA_UNKNOWN / MEM_UOPS_RETIRED.ALL_LOADS",
+ "MetricName": "tma_info_mem_exec_blocks_loads_with_storefwdblk",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of Memory Execution Bound due to a first level data cache miss",
+ "MetricExpr": "tma_info_mem_exec_bound_loadhead_with_l1miss",
+ "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_l1miss"
+ },
+ {
+ "BriefDescription": "Percentage of Memory Execution Bound due to other block cases, such as pipeline conflicts, fences, etc",
+ "MetricExpr": "tma_info_mem_exec_bound_loadhead_with_otherpipelineblks",
+ "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_otherpipelineblks"
+ },
+ {
+ "BriefDescription": "Percentage of Memory Execution Bound due to a pagewalk",
+ "MetricExpr": "tma_info_mem_exec_bound_loadhead_with_pagewalk",
+ "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_pagewalk"
+ },
+ {
+ "BriefDescription": "Percentage of Memory Execution Bound due to a second level TLB miss",
+ "MetricExpr": "tma_info_mem_exec_bound_loadhead_with_stlbhit",
+ "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_stlbhit"
+ },
+ {
+ "BriefDescription": "Percentage of Memory Execution Bound due to a store forward address match",
+ "MetricExpr": "tma_info_mem_exec_bound_loadhead_with_storefwding",
+ "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_storefwding"
+ },
+ {
+ "BriefDescription": "Percentage of Memory Execution Bound due to a first level data cache miss",
+ "MetricExpr": "100 * LD_HEAD.L1_MISS_AT_RET / LD_HEAD.ANY_AT_RET",
+ "MetricName": "tma_info_mem_exec_bound_loadhead_with_l1miss",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of Memory Execution Bound due to other block cases, such as pipeline conflicts, fences, etc",
+ "MetricExpr": "100 * LD_HEAD.OTHER_AT_RET / LD_HEAD.ANY_AT_RET",
+ "MetricName": "tma_info_mem_exec_bound_loadhead_with_otherpipelineblks",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of Memory Execution Bound due to a pagewalk",
+ "MetricExpr": "100 * LD_HEAD.PGWALK_AT_RET / LD_HEAD.ANY_AT_RET",
+ "MetricName": "tma_info_mem_exec_bound_loadhead_with_pagewalk",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of Memory Execution Bound due to a second level TLB miss",
+ "MetricExpr": "100 * LD_HEAD.DTLB_MISS_AT_RET / LD_HEAD.ANY_AT_RET",
+ "MetricName": "tma_info_mem_exec_bound_loadhead_with_stlbhit",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of Memory Execution Bound due to a store forward address match",
+ "MetricExpr": "100 * LD_HEAD.ST_ADDR_AT_RET / LD_HEAD.ANY_AT_RET",
+ "MetricName": "tma_info_mem_exec_bound_loadhead_with_storefwding",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Instructions per Load",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
+ "MetricName": "tma_info_mem_mix_ipload"
+ },
+ {
+ "BriefDescription": "Instructions per Store",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
+ "MetricName": "tma_info_mem_mix_ipstore"
+ },
+ {
+ "BriefDescription": "Percentage of total non-speculative loads that perform one or more locks",
+ "MetricExpr": "100 * MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_LOADS",
+ "MetricName": "tma_info_mem_mix_load_locks_ratio"
+ },
+ {
+ "BriefDescription": "Percentage of total non-speculative loads that are splits",
+ "MetricExpr": "100 * MEM_UOPS_RETIRED.SPLIT_LOADS / MEM_UOPS_RETIRED.ALL_LOADS",
+ "MetricName": "tma_info_mem_mix_load_splits_ratio"
+ },
+ {
+ "BriefDescription": "Ratio of mem load uops to all uops",
+ "MetricExpr": "1e3 * MEM_UOPS_RETIRED.ALL_LOADS / TOPDOWN_RETIRING.ALL_P",
+ "MetricName": "tma_info_mem_mix_memload_ratio"
+ },
+ {
+ "BriefDescription": "Percentage of time that the core is stalled due to a TPAUSE or UMWAIT instruction",
+ "MetricExpr": "tma_info_serialization_tpause_cycles",
+ "MetricName": "tma_info_serialization _%_tpause_cycles"
+ },
+ {
+ "BriefDescription": "Percentage of time that the core is stalled due to a TPAUSE or UMWAIT instruction",
+ "MetricExpr": "100 * SERIALIZATION.C01_MS_SCB / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricName": "tma_info_serialization_tpause_cycles",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricName": "tma_info_system_cpu_utilization"
+ },
+ {
+ "BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricExpr": "FP_FLOPS_RETIRED.ALL / (duration_time * 1e9)",
+ "MetricGroup": "Flops",
+ "MetricName": "tma_info_system_gflops",
+ "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width"
+ },
+ {
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
+ "MetricExpr": "cpu@CPU_CLK_UNHALTED.CORE_P@k / CPU_CLK_UNHALTED.CORE",
+ "MetricGroup": "Summary",
+ "MetricName": "tma_info_system_kernel_utilization"
+ },
+ {
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "CPU_CLK_UNHALTED.CORE / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": "Power",
+ "MetricName": "tma_info_system_turbo_utilization"
+ },
+ {
+ "BriefDescription": "Percentage of all uops which are FPDiv uops",
+ "MetricExpr": "100 * UOPS_RETIRED.FPDIV / TOPDOWN_RETIRING.ALL_P",
+ "MetricName": "tma_info_uop_mix_fpdiv_uop_ratio"
+ },
+ {
+ "BriefDescription": "Percentage of all uops which are IDiv uops",
+ "MetricExpr": "100 * UOPS_RETIRED.IDIV / TOPDOWN_RETIRING.ALL_P",
+ "MetricName": "tma_info_uop_mix_idiv_uop_ratio"
+ },
+ {
+ "BriefDescription": "Percentage of all uops which are microcode ops",
+ "MetricExpr": "100 * UOPS_RETIRED.MS / TOPDOWN_RETIRING.ALL_P",
+ "MetricName": "tma_info_uop_mix_microcode_uop_ratio"
+ },
+ {
+ "BriefDescription": "Percentage of all uops which are x87 uops",
+ "MetricExpr": "100 * UOPS_RETIRED.X87 / TOPDOWN_RETIRING.ALL_P",
+ "MetricName": "tma_info_uop_mix_x87_uop_ratio"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to Instruction Table Lookaside Buffer (ITLB) misses.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.ITLB_MISS / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
+ "MetricName": "tma_itlb_misses",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a machine clear (nuke) of any kind including memory ordering and memory disambiguation",
+ "MetricExpr": "TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
+ "MetricName": "tma_machine_clears",
+ "MetricThreshold": "tma_machine_clears > 0.05 & tma_bad_speculation > 0.15",
+ "MetricgroupNoGroup": "TopdownL2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to memory reservation stalls in which a scheduler is not able to accept uops",
+ "MetricExpr": "TOPDOWN_BE_BOUND.MEM_SCHEDULER / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricName": "tma_mem_scheduler",
+ "MetricThreshold": "tma_mem_scheduler > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to IEC or FPC RAT stalls, which can be due to FIQ or IEC reservation stalls in which the integer, floating point or SIMD scheduler is not able to accept uops",
+ "MetricExpr": "TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricName": "tma_non_mem_scheduler",
+ "MetricThreshold": "tma_non_mem_scheduler > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to a machine clear that requires the use of microcode (slow nuke)",
+ "MetricExpr": "TOPDOWN_BAD_SPECULATION.NUKE / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group",
+ "MetricName": "tma_nuke",
+ "MetricThreshold": "tma_nuke > 0.05 & (tma_machine_clears > 0.05 & tma_bad_speculation > 0.15)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to other common frontend stalls not categorized.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.OTHER / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
+ "MetricName": "tma_other_fb",
+ "MetricThreshold": "tma_other_fb > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to wrong predecodes.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.PREDECODE / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
+ "MetricName": "tma_predecode",
+ "MetricThreshold": "tma_predecode > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to the physical register file unable to accept an entry (marble stalls)",
+ "MetricExpr": "TOPDOWN_BE_BOUND.REGISTER / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricName": "tma_register",
+ "MetricThreshold": "tma_register > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to the reorder buffer being full (ROB stalls)",
+ "MetricExpr": "TOPDOWN_BE_BOUND.REORDER_BUFFER / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricName": "tma_reorder_buffer",
+ "MetricThreshold": "tma_reorder_buffer > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to a resource limitation",
+ "MetricExpr": "tma_backend_bound - tma_core_bound",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_resource_bound",
+ "MetricThreshold": "tma_resource_bound > 0.2 & tma_backend_bound > 0.1",
+ "MetricgroupNoGroup": "TopdownL2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that result in retirement slots",
+ "MetricExpr": "TOPDOWN_RETIRING.ALL_P / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL1;tma_L1_group",
+ "MetricName": "tma_retiring",
+ "MetricThreshold": "tma_retiring > 0.75",
+ "MetricgroupNoGroup": "TopdownL1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to scoreboards from the instruction queue (IQ), jump execution unit (JEU), or microcode sequencer (MS)",
+ "MetricExpr": "TOPDOWN_BE_BOUND.SERIALIZATION / (6 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricName": "tma_serialization",
+ "MetricThreshold": "tma_serialization > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Uncore operating frequency in GHz",
+ "MetricExpr": "UNC_CHA_CLOCKTICKS / (source_count(UNC_CHA_CLOCKTICKS) * #num_packages) / 1e9 / duration_time",
+ "MetricName": "uncore_frequency",
+ "ScaleUnit": "1GHz"
+ },
+ {
+ "BriefDescription": "Intel(R) Ultra Path Interconnect (UPI) data transmit bandwidth (MB/sec)",
+ "MetricExpr": "UNC_UPI_TxL_FLITS.ALL_DATA * 7.111111111111111 / 1e6 / duration_time",
+ "MetricName": "upi_data_transmit_bw",
+ "ScaleUnit": "1MB/s"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/uncore-cache.json b/tools/perf/pmu-events/arch/x86/sierraforest/uncore-cache.json
index a3aafbbc3484..f37107373e3b 100644
--- a/tools/perf/pmu-events/arch/x86/sierraforest/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/uncore-cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Clockticks for CMS units attached to CHA",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_CHACMS_CLOCKTICKS",
"PerPkg": "1",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "Number of CHA clock cycles while the event is enabled",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_CHA_CLOCKTICKS",
"PerPkg": "1",
@@ -18,38 +20,47 @@
},
{
"BriefDescription": "Counts transactions that looked into the multi-socket cacheline Directory state, and therefore did not send a snoop because the Directory indicated it was not needed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_CHA_DIR_LOOKUP.NO_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Counts transactions that looked into the multi-socket cacheline Directory state, and sent one or more snoops, because the Directory indicated it was needed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_CHA_DIR_LOOKUP.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Counts only multi-socket cacheline Directory state updates memory writes issued from the HA pipe. This does not include memory write requests which are for I (Invalid) or E (Exclusive) cachelines.",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_CHA_DIR_UPDATE.HA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Counts only multi-socket cacheline Directory state updates due to memory writes issued from the TOR pipe which are the result of remote transaction hitting the SF/LLC and returning data Core2Core. This does not include memory write requests which are for I (Invalid) or E (Exclusive) cachelines.",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_CHA_DIR_UPDATE.TOR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Distress signal assertion for dynamic prefetch throttle (DPT). Threshold for distress signal assertion reached in TOR or IRQ (immediate cause for triggering).",
+ "Counter": "0,1,2,3",
"EventCode": "0x59",
"EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_ANY",
"PerPkg": "1",
@@ -59,6 +70,7 @@
},
{
"BriefDescription": "Distress signal assertion for dynamic prefetch throttle (DPT). Threshold for distress signal assertion reached in IRQ (immediate cause for triggering).",
+ "Counter": "0,1,2,3",
"EventCode": "0x59",
"EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_IRQ",
"PerPkg": "1",
@@ -67,6 +79,7 @@
},
{
"BriefDescription": "Distress signal assertion for dynamic prefetch throttle (DPT). Threshold for distress signal assertion reached in TOR (immediate cause for triggering).",
+ "Counter": "0,1,2,3",
"EventCode": "0x59",
"EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_TOR",
"PerPkg": "1",
@@ -75,40 +88,50 @@
},
{
"BriefDescription": "Counts when a normal (Non-Isochronous) full line write is issued from the CHA to the any of the memory controller channels.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5b",
"EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "CHA to iMC Full Line Writes Issued : ISOCH Full Line : Counts the total number of full line writes issued from the HA into the memory controller.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5b",
"EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL_PRIORITY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "CHA to iMC Full Line Writes Issued : Partial Non-ISOCH : Counts the total number of full line writes issued from the HA into the memory controller.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5b",
"EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "CHA to iMC Full Line Writes Issued : ISOCH Partial : Counts the total number of full line writes issued from the HA into the memory controller.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5b",
"EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL_PRIORITY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups: All Requests to Remotely Homed Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.ALL_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : All transactions from Remote Agents",
"UMask": "0x17e0ff",
@@ -116,8 +139,10 @@
},
{
"BriefDescription": "Cache Lookups: CRd Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.CODE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : CRd Requests",
"UMask": "0x1bd0ff",
@@ -125,8 +150,10 @@
},
{
"BriefDescription": "Cache Lookups: Read Requests and Read Prefetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.DATA_RD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
"UMask": "0x1bc1ff",
@@ -134,8 +161,10 @@
},
{
"BriefDescription": "Cache Lookups: Read Requests, Read Prefetches, and Snoops",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Data Reads",
"UMask": "0x1fc1ff",
@@ -143,8 +172,10 @@
},
{
"BriefDescription": "Cache Lookups: Read Requests to Locally Homed Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Demand Data Reads, Core and LLC prefetches",
"UMask": "0x841ff",
@@ -152,8 +183,10 @@
},
{
"BriefDescription": "Cache Lookups: Read Requests, Read Prefetches, and Snoops which miss the Cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Data Read Misses",
"UMask": "0x1fc101",
@@ -161,8 +194,10 @@
},
{
"BriefDescription": "Cache Lookups: All Requests to Locally Homed Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCALLY_HOMED_ADDRESS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Transactions homed locally",
"UMask": "0xbdfff",
@@ -170,8 +205,10 @@
},
{
"BriefDescription": "Cache Lookups: Code Read Requests and Code Read Prefetches to Locally Homed Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_CODE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : CRd Requests",
"UMask": "0x19d0ff",
@@ -179,8 +216,10 @@
},
{
"BriefDescription": "Cache Lookups: Read Requests and Read Prefetches to Locally Homed Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_DATA_RD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
"UMask": "0x19c1ff",
@@ -188,8 +227,10 @@
},
{
"BriefDescription": "Cache Lookups: Code Read Requests to Locally Homed Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_DMND_CODE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : CRd Requests",
"UMask": "0x1850ff",
@@ -197,8 +238,10 @@
},
{
"BriefDescription": "Cache Lookups: Read Requests to Locally Homed Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_DMND_DATA_RD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
"UMask": "0x1841ff",
@@ -206,8 +249,10 @@
},
{
"BriefDescription": "Cache Lookups: RFO Requests to Locally Homed Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_DMND_RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : RFO Requests",
"UMask": "0x1848ff",
@@ -215,8 +260,10 @@
},
{
"BriefDescription": "Cache Lookups: LLC Prefetch Requests to Locally Homed Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_LLC_PF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
"UMask": "0x189dff",
@@ -224,8 +271,10 @@
},
{
"BriefDescription": "Cache Lookups: All Prefetches to Locally Homed Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_PF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
"UMask": "0x199dff",
@@ -233,8 +282,10 @@
},
{
"BriefDescription": "Cache Lookups: Code Prefetches to Locally Homed Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_PF_CODE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : CRd Requests",
"UMask": "0x1910ff",
@@ -242,8 +293,10 @@
},
{
"BriefDescription": "Cache Lookups: Read Prefetches to Locally Homed Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_PF_DATA_RD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
"UMask": "0x1981ff",
@@ -251,8 +304,10 @@
},
{
"BriefDescription": "Cache Lookups: RFO Prefetches to Locally Homed Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_PF_RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : RFO Requests",
"UMask": "0x1908ff",
@@ -260,8 +315,10 @@
},
{
"BriefDescription": "Cache Lookups: RFO Requests and RFO Prefetches to Locally Homed Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : RFO Requests",
"UMask": "0x19c8ff",
@@ -269,8 +326,10 @@
},
{
"BriefDescription": "Cache Lookups: All Requests to Remotely Homed Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.REMOTELY_HOMED_ADDRESS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Transactions homed remotely : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Transaction whose address resides in a remote MC",
"UMask": "0x15dfff",
@@ -278,8 +337,10 @@
},
{
"BriefDescription": "Cache Lookups: Code Read/Prefetch Requests from a Remote Socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_CODE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : CRd Requests",
"UMask": "0x1a10ff",
@@ -287,8 +348,10 @@
},
{
"BriefDescription": "Cache Lookups: Data Read/Prefetch Requests from a Remote Socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_DATA_RD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
"UMask": "0x1a01ff",
@@ -296,8 +359,10 @@
},
{
"BriefDescription": "Cache Lookups: RFO Requests/Prefetches from a Remote Socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : RFO Requests",
"UMask": "0x1a08ff",
@@ -305,8 +370,10 @@
},
{
"BriefDescription": "Cache Lookups: Snoop Requests from a Remote Socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed",
"UMask": "0x1c19ff",
@@ -314,8 +381,10 @@
},
{
"BriefDescription": "Cache Lookups: All RFO and RFO Prefetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : All RFOs - Demand and Prefetches",
"UMask": "0x1bc8ff",
@@ -323,8 +392,10 @@
},
{
"BriefDescription": "Cache Lookups: RFO Requests and RFO Prefetches to Locally Homed Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.RFO_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Locally HOMed RFOs - Demand and Prefetches",
"UMask": "0x9c8ff",
@@ -332,8 +403,10 @@
},
{
"BriefDescription": "Cache Lookups: Writes to Locally Homed Memory (includes writebacks from L1/L2)",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.WRITE_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Writes",
"UMask": "0x842ff",
@@ -341,8 +414,10 @@
},
{
"BriefDescription": "Cache Lookups: Writes to Remotely Homed Memory (includes writebacks from L1/L2)",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.WRITE_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Remote Writes",
"UMask": "0x17c2ff",
@@ -350,8 +425,10 @@
},
{
"BriefDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : All Lines Victimized",
"UMask": "0xf",
@@ -359,24 +436,30 @@
},
{
"BriefDescription": "Lines Victimized : IA traffic : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.IA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "Lines Victimized : IO traffic : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.IO",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Local - All Lines",
"UMask": "0x200f",
@@ -384,8 +467,10 @@
},
{
"BriefDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_E",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Local - Lines in E State",
"UMask": "0x2002",
@@ -393,8 +478,10 @@
},
{
"BriefDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Local - Lines in F State",
"UMask": "0x2008",
@@ -402,8 +489,10 @@
},
{
"BriefDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_M",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Local - Lines in M State",
"UMask": "0x2001",
@@ -411,8 +500,10 @@
},
{
"BriefDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_S",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Local - Lines in S State",
"UMask": "0x2004",
@@ -420,8 +511,10 @@
},
{
"BriefDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Remote - All Lines",
"UMask": "0x800f",
@@ -429,8 +522,10 @@
},
{
"BriefDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_E",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Remote - Lines in E State",
"UMask": "0x8002",
@@ -438,8 +533,10 @@
},
{
"BriefDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_M",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Remote - Lines in M State",
"UMask": "0x8001",
@@ -447,8 +544,10 @@
},
{
"BriefDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_S",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Remote - Lines in S State",
"UMask": "0x8004",
@@ -456,8 +555,10 @@
},
{
"BriefDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_E",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Lines in E state",
"UMask": "0x2",
@@ -465,8 +566,10 @@
},
{
"BriefDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_M",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Lines in M state",
"UMask": "0x1",
@@ -474,8 +577,10 @@
},
{
"BriefDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_S",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Lines in S State",
"UMask": "0x4",
@@ -483,6 +588,7 @@
},
{
"BriefDescription": "Counts when a RFO (the Read for Ownership issued before a write) request hit a cacheline in the S (Shared) state.",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_CHA_MISC.RFO_HIT_S",
"PerPkg": "1",
@@ -492,38 +598,47 @@
},
{
"BriefDescription": "OSB Snoop Broadcast : Local InvItoE : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_CHA_OSB.LOCAL_INVITOE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "OSB Snoop Broadcast : Local Rd : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_CHA_OSB.LOCAL_READ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "OSB Snoop Broadcast : Off : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_CHA_OSB.OFF_PWRHEURISTIC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "OSB Snoop Broadcast : Remote Rd : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_CHA_OSB.REMOTE_READ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "OSB Snoop Broadcast : RFO HitS Snoop Broadcast : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_CHA_OSB.RFO_HITS_SNP_BCAST",
"PerPkg": "1",
@@ -532,60 +647,75 @@
},
{
"BriefDescription": "UNC_CHA_REMOTE_SF.ALLOC_EXCLUSIVE",
+ "Counter": "0,1,2,3",
"EventCode": "0x69",
"EventName": "UNC_CHA_REMOTE_SF.ALLOC_EXCLUSIVE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_REMOTE_SF.ALLOC_SHARED",
+ "Counter": "0,1,2,3",
"EventCode": "0x69",
"EventName": "UNC_CHA_REMOTE_SF.ALLOC_SHARED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_REMOTE_SF.DEALLOC_EVCTCLN",
+ "Counter": "0,1,2,3",
"EventCode": "0x69",
"EventName": "UNC_CHA_REMOTE_SF.DEALLOC_EVCTCLN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_REMOTE_SF.DIRBACKED_ONLY",
+ "Counter": "0,1,2,3",
"EventCode": "0x69",
"EventName": "UNC_CHA_REMOTE_SF.DIRBACKED_ONLY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_REMOTE_SF.HIT_EXCLUSIVE",
+ "Counter": "0,1,2,3",
"EventCode": "0x69",
"EventName": "UNC_CHA_REMOTE_SF.HIT_EXCLUSIVE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_REMOTE_SF.HIT_SHARED",
+ "Counter": "0,1,2,3",
"EventCode": "0x69",
"EventName": "UNC_CHA_REMOTE_SF.HIT_SHARED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_REMOTE_SF.INCLUSIVE_ONLY",
+ "Counter": "0,1,2,3",
"EventCode": "0x69",
"EventName": "UNC_CHA_REMOTE_SF.INCLUSIVE_ONLY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_REMOTE_SF.MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0x69",
"EventName": "UNC_CHA_REMOTE_SF.MISS",
"PerPkg": "1",
@@ -594,35 +724,44 @@
},
{
"BriefDescription": "UNC_CHA_REMOTE_SF.UPDATE_EXCLUSIVE",
+ "Counter": "0,1,2,3",
"EventCode": "0x69",
"EventName": "UNC_CHA_REMOTE_SF.UPDATE_EXCLUSIVE",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_REMOTE_SF.UPDATE_SHARED",
+ "Counter": "0,1,2,3",
"EventCode": "0x69",
"EventName": "UNC_CHA_REMOTE_SF.UPDATE_SHARED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_REMOTE_SF.VICTIM_EXCLUSIVE",
+ "Counter": "0,1,2,3",
"EventCode": "0x69",
"EventName": "UNC_CHA_REMOTE_SF.VICTIM_EXCLUSIVE",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "CHA"
},
{
"BriefDescription": "UNC_CHA_REMOTE_SF.VICTIM_SHARED",
+ "Counter": "0,1,2,3",
"EventCode": "0x69",
"EventName": "UNC_CHA_REMOTE_SF.VICTIM_SHARED",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "CHA"
},
{
"BriefDescription": "Counts the total number of requests coming from a unit on this socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.INVITOE",
"PerPkg": "1",
@@ -632,6 +771,7 @@
},
{
"BriefDescription": "Counts the total number of requests coming from a unit on this socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.INVITOE_LOCAL",
"PerPkg": "1",
@@ -640,6 +780,7 @@
},
{
"BriefDescription": "Counts the total number of requests coming from a remote socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.INVITOE_REMOTE",
"PerPkg": "1",
@@ -648,6 +789,7 @@
},
{
"BriefDescription": "Counts read requests made into this CHA. Reads include all read opcodes (including RFO: the Read for Ownership issued before a write) .",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.READS",
"PerPkg": "1",
@@ -657,6 +799,7 @@
},
{
"BriefDescription": "Counts read requests coming from a unit on this socket made into this CHA. Reads include all read opcodes (including RFO: the Read for Ownership issued before a write).",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.READS_LOCAL",
"PerPkg": "1",
@@ -665,6 +808,7 @@
},
{
"BriefDescription": "Counts read requests coming from a remote socket made into the CHA. Reads include all read opcodes (including RFO: the Read for Ownership issued before a write).",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.READS_REMOTE",
"PerPkg": "1",
@@ -673,6 +817,7 @@
},
{
"BriefDescription": "Counts write requests made into the CHA, including streaming, evictions, HitM (Reads from another core to a Modified cacheline), etc.",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.WRITES",
"PerPkg": "1",
@@ -682,6 +827,7 @@
},
{
"BriefDescription": "Counts write requests coming from a unit on this socket made into this CHA, including streaming, evictions, HitM (Reads from another core to a Modified cacheline), etc.",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.WRITES_LOCAL",
"PerPkg": "1",
@@ -690,6 +836,7 @@
},
{
"BriefDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.WRITES_REMOTE",
"PerPkg": "1",
@@ -698,8 +845,10 @@
},
{
"BriefDescription": "All TOR Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : All",
"UMask": "0xc001ffff",
@@ -707,152 +856,190 @@
},
{
"BriefDescription": "CLFlush transactions from a CXL device which hit in the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.CXL_HIT_CLFLUSH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x78c8c7fd20",
"Unit": "CHA"
},
{
"BriefDescription": "FsRdCur transactions from a CXL device which hit in the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.CXL_HIT_FSRDCUR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x78c8effd20",
"Unit": "CHA"
},
{
"BriefDescription": "FsRdCurPtl transactions from a CXL device which hit in the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.CXL_HIT_FSRDCURPTL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x78c9effd20",
"Unit": "CHA"
},
{
"BriefDescription": "ItoM transactions from a CXL device which hit in the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.CXL_HIT_ITOM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x78cc47fd20",
"Unit": "CHA"
},
{
"BriefDescription": "ItoMWr transactions from a CXL device which hit in the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.CXL_HIT_ITOMWR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x78cc4ffd20",
"Unit": "CHA"
},
{
"BriefDescription": "MemPushWr transactions from a CXL device which hit in the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.CXL_HIT_MEMPUSHWR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x78cc6ffd20",
"Unit": "CHA"
},
{
"BriefDescription": "WCiL transactions from a CXL device which hit in the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.CXL_HIT_WCIL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x78c86ffd20",
"Unit": "CHA"
},
{
"BriefDescription": "WcilF transactions from a CXL device which hit in the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.CXL_HIT_WCILF",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x78c867fd20",
"Unit": "CHA"
},
{
"BriefDescription": "WiL transactions from a CXL device which hit in the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.CXL_HIT_WIL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x78c87ffd20",
"Unit": "CHA"
},
{
"BriefDescription": "CLFlush transactions from a CXL device which miss the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.CXL_MISS_CLFLUSH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x78c8c7fe20",
"Unit": "CHA"
},
{
"BriefDescription": "FsRdCur transactions from a CXL device which miss the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.CXL_MISS_FSRDCUR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x78c8effe20",
"Unit": "CHA"
},
{
"BriefDescription": "FsRdCurPtl transactions from a CXL device which miss the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.CXL_MISS_FSRDCURPTL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x78c9effe20",
"Unit": "CHA"
},
{
"BriefDescription": "ItoM transactions from a CXL device which miss the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.CXL_MISS_ITOM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x78cc47fe20",
"Unit": "CHA"
},
{
"BriefDescription": "ItoMWr transactions from a CXL device which miss the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.CXL_MISS_ITOMWR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x78cc4ffe20",
"Unit": "CHA"
},
{
"BriefDescription": "MemPushWr transactions from a CXL device which miss the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.CXL_MISS_MEMPUSHWR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x78cc6ffe20",
"Unit": "CHA"
},
{
"BriefDescription": "WCiL transactions from a CXL device which miss the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.CXL_MISS_WCIL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x78c86ffe20",
"Unit": "CHA"
},
{
"BriefDescription": "WcilF transactions from a CXL device which miss the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.CXL_MISS_WCILF",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x78c867fe20",
"Unit": "CHA"
},
{
"BriefDescription": "WiL transactions from a CXL device which miss the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.CXL_MISS_WIL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x78c87ffe20",
"Unit": "CHA"
},
{
"BriefDescription": "All locally initiated requests from IA Cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : All requests from iA Cores",
"UMask": "0xc001ff01",
@@ -860,6 +1047,7 @@
},
{
"BriefDescription": "CLFlush events that are initiated from the Core",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_CLFLUSH",
"PerPkg": "1",
@@ -869,6 +1057,7 @@
},
{
"BriefDescription": "CLFlushOpt events that are initiated from the Core",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_CLFLUSHOPT",
"PerPkg": "1",
@@ -878,6 +1067,7 @@
},
{
"BriefDescription": "Code read from local IA that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_CRD",
"PerPkg": "1",
@@ -887,6 +1077,7 @@
},
{
"BriefDescription": "Code read prefetch from local IA that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_CRD_PREF",
"PerPkg": "1",
@@ -896,6 +1087,7 @@
},
{
"BriefDescription": "Data read opt from local IA that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_OPT",
"PerPkg": "1",
@@ -905,6 +1097,7 @@
},
{
"BriefDescription": "Data read opt prefetch from local IA that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_OPT_PREF",
"PerPkg": "1",
@@ -914,8 +1107,10 @@
},
{
"BriefDescription": "All locally initiated requests from IA Cores which hit the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : All requests from iA Cores that Hit the LLC",
"UMask": "0xc001fd01",
@@ -923,6 +1118,7 @@
},
{
"BriefDescription": "Code read from local IA that hit the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD",
"PerPkg": "1",
@@ -932,6 +1128,7 @@
},
{
"BriefDescription": "Code read prefetch from local IA that hit the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD_PREF",
"PerPkg": "1",
@@ -941,6 +1138,7 @@
},
{
"BriefDescription": "All requests issued from IA cores to CXL accelerator memory regions that hit the LLC.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CXL_ACC",
"PerPkg": "1",
@@ -949,6 +1147,7 @@
},
{
"BriefDescription": "Data read opt from local IA that hit the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_OPT",
"PerPkg": "1",
@@ -958,6 +1157,7 @@
},
{
"BriefDescription": "Data read opt prefetch from local IA that hit the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_OPT_PREF",
"PerPkg": "1",
@@ -967,6 +1167,7 @@
},
{
"BriefDescription": "ItoM requests from local IA cores that hit the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_ITOM",
"PerPkg": "1",
@@ -976,6 +1177,7 @@
},
{
"BriefDescription": "Last level cache prefetch code read from local IA that hit the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFCODE",
"PerPkg": "1",
@@ -985,6 +1187,7 @@
},
{
"BriefDescription": "Last level cache prefetch data read from local IA that hit the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFDATA",
"PerPkg": "1",
@@ -994,6 +1197,7 @@
},
{
"BriefDescription": "Last level cache prefetch read for ownership from local IA that hit the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFRFO",
"PerPkg": "1",
@@ -1003,6 +1207,7 @@
},
{
"BriefDescription": "Read for ownership from local IA that hit the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO",
"PerPkg": "1",
@@ -1012,6 +1217,7 @@
},
{
"BriefDescription": "Read for ownership prefetch from local IA that hit the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO_PREF",
"PerPkg": "1",
@@ -1021,6 +1227,7 @@
},
{
"BriefDescription": "ItoM events that are initiated from the Core",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_ITOM",
"PerPkg": "1",
@@ -1030,6 +1237,7 @@
},
{
"BriefDescription": "ItoMCacheNear requests from local IA cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_ITOMCACHENEAR",
"PerPkg": "1",
@@ -1039,6 +1247,7 @@
},
{
"BriefDescription": "Last level cache prefetch code read from local IA.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFCODE",
"PerPkg": "1",
@@ -1048,6 +1257,7 @@
},
{
"BriefDescription": "Last level cache prefetch data read from local IA.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFDATA",
"PerPkg": "1",
@@ -1057,6 +1267,7 @@
},
{
"BriefDescription": "Last level cache prefetch read for ownership from local IA that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFRFO",
"PerPkg": "1",
@@ -1066,6 +1277,7 @@
},
{
"BriefDescription": "All locally initiated requests from IA Cores which miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
"PerPkg": "1",
@@ -1075,6 +1287,7 @@
},
{
"BriefDescription": "Code read from local IA that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD",
"PerPkg": "1",
@@ -1084,6 +1297,7 @@
},
{
"BriefDescription": "CRDs from local IA cores to locally homed memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_LOCAL",
"PerPkg": "1",
@@ -1093,6 +1307,7 @@
},
{
"BriefDescription": "Code read prefetch from local IA that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF",
"PerPkg": "1",
@@ -1102,6 +1317,7 @@
},
{
"BriefDescription": "CRD Prefetches from local IA cores to locally homed memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF_LOCAL",
"PerPkg": "1",
@@ -1111,6 +1327,7 @@
},
{
"BriefDescription": "CRD Prefetches from local IA cores to remotely homed memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF_REMOTE",
"PerPkg": "1",
@@ -1120,6 +1337,7 @@
},
{
"BriefDescription": "CRDs from local IA cores to remotely homed memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_REMOTE",
"PerPkg": "1",
@@ -1129,6 +1347,7 @@
},
{
"BriefDescription": "All requests issued from IA cores to CXL accelerator memory regions that miss the LLC.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CXL_ACC",
"PerPkg": "1",
@@ -1137,6 +1356,7 @@
},
{
"BriefDescription": "DRds and equivalent opcodes issued from an IA core which miss the L3 and target memory in a CXL type 2 memory expander card.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_CXL_ACC",
"PerPkg": "1",
@@ -1146,6 +1366,7 @@
},
{
"BriefDescription": "Data read opt from local IA that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT",
"PerPkg": "1",
@@ -1155,6 +1376,7 @@
},
{
"BriefDescription": "Inserts into the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRd_Opt, and which target local memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_LOCAL",
"PerPkg": "1",
@@ -1164,6 +1386,7 @@
},
{
"BriefDescription": "Data read opt prefetch from local IA that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_PREF",
"PerPkg": "1",
@@ -1173,6 +1396,7 @@
},
{
"BriefDescription": "Inserts into the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRD_PREF_OPT, and target local memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_PREF_LOCAL",
"PerPkg": "1",
@@ -1182,6 +1406,7 @@
},
{
"BriefDescription": "Inserts into the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRD_PREF_OPT, and target remote memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_PREF_REMOTE",
"PerPkg": "1",
@@ -1191,6 +1416,7 @@
},
{
"BriefDescription": "Inserts into the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRd_Opt, and target remote memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_REMOTE",
"PerPkg": "1",
@@ -1200,6 +1426,7 @@
},
{
"BriefDescription": "L2 data prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_CXL_ACC",
"PerPkg": "1",
@@ -1208,6 +1435,7 @@
},
{
"BriefDescription": "ItoM requests from local IA cores that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_ITOM",
"PerPkg": "1",
@@ -1217,6 +1445,7 @@
},
{
"BriefDescription": "Last level cache prefetch code read from local IA that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFCODE",
"PerPkg": "1",
@@ -1226,6 +1455,7 @@
},
{
"BriefDescription": "Last level cache prefetch data read from local IA that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFDATA",
"PerPkg": "1",
@@ -1235,6 +1465,7 @@
},
{
"BriefDescription": "LLC data prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFDATA_CXL_ACC",
"PerPkg": "1",
@@ -1243,6 +1474,7 @@
},
{
"BriefDescription": "Last level cache prefetch read for ownership from local IA that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFRFO",
"PerPkg": "1",
@@ -1252,6 +1484,7 @@
},
{
"BriefDescription": "L2 RFO prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFRFO_CXL_ACC",
"PerPkg": "1",
@@ -1260,6 +1493,7 @@
},
{
"BriefDescription": "WCILF requests from local IA cores to locally homed DDR addresses that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCILF_DDR",
"PerPkg": "1",
@@ -1269,8 +1503,10 @@
},
{
"BriefDescription": "WCILF requests from local IA cores to locally homed PMM addresses which miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCILF_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed locally",
"UMask": "0xc8668a01",
@@ -1278,6 +1514,7 @@
},
{
"BriefDescription": "WCIL requests from local IA cores to locally homed DDR addresses that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCIL_DDR",
"PerPkg": "1",
@@ -1287,8 +1524,10 @@
},
{
"BriefDescription": "WCIL requests from local IA cores to locally homed PMM addresses which miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCIL_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed locally",
"UMask": "0xc86e8a01",
@@ -1296,6 +1535,7 @@
},
{
"BriefDescription": "WCILF requests from local IA cores to remotely homed DDR addresses that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCILF_DDR",
"PerPkg": "1",
@@ -1305,8 +1545,10 @@
},
{
"BriefDescription": "WCILF requests from local IA cores to remotely homed PMM addresses which miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCILF_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely",
"UMask": "0xc8670a01",
@@ -1314,6 +1556,7 @@
},
{
"BriefDescription": "WCIL requests from local IA cores to remotely homed DDR addresses that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCIL_DDR",
"PerPkg": "1",
@@ -1323,8 +1566,10 @@
},
{
"BriefDescription": "WCIL requests from local IA cores to remotely homed PMM addresses which miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCIL_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely",
"UMask": "0xc86f0a01",
@@ -1332,6 +1577,7 @@
},
{
"BriefDescription": "Read for ownership from local IA that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO",
"PerPkg": "1",
@@ -1341,6 +1587,7 @@
},
{
"BriefDescription": "RFOs issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_CXL_ACC",
"PerPkg": "1",
@@ -1349,6 +1596,7 @@
},
{
"BriefDescription": "Read for ownership from local IA that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_LOCAL",
"PerPkg": "1",
@@ -1358,6 +1606,7 @@
},
{
"BriefDescription": "Read for ownership prefetch from local IA that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF",
"PerPkg": "1",
@@ -1367,6 +1616,7 @@
},
{
"BriefDescription": "LLC RFO prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_CXL_ACC",
"PerPkg": "1",
@@ -1375,6 +1625,7 @@
},
{
"BriefDescription": "Read for ownership prefetch from local IA that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_LOCAL",
"PerPkg": "1",
@@ -1384,6 +1635,7 @@
},
{
"BriefDescription": "Read for ownership prefetch from local IA that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_REMOTE",
"PerPkg": "1",
@@ -1393,6 +1645,7 @@
},
{
"BriefDescription": "Read for ownership from local IA that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_REMOTE",
"PerPkg": "1",
@@ -1402,6 +1655,7 @@
},
{
"BriefDescription": "UCRDF requests from local IA cores that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_UCRDF",
"PerPkg": "1",
@@ -1411,6 +1665,7 @@
},
{
"BriefDescription": "WCIL requests from a local IA core that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL",
"PerPkg": "1",
@@ -1420,6 +1675,7 @@
},
{
"BriefDescription": "WCILF requests from local IA core that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF",
"PerPkg": "1",
@@ -1429,6 +1685,7 @@
},
{
"BriefDescription": "WCILF requests from local IA cores to DDR homed addresses which miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF_DDR",
"PerPkg": "1",
@@ -1438,8 +1695,10 @@
},
{
"BriefDescription": "WCILF requests from local IA cores to PMM homed addresses which miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC",
"UMask": "0xc8678a01",
@@ -1447,6 +1706,7 @@
},
{
"BriefDescription": "WCIL requests from local IA cores to DDR homed addresses which miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL_DDR",
"PerPkg": "1",
@@ -1456,8 +1716,10 @@
},
{
"BriefDescription": "WCIL requests from a local IA core to PMM homed addresses that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC",
"UMask": "0xc86f8a01",
@@ -1465,6 +1727,7 @@
},
{
"BriefDescription": "WIL requests from local IA cores that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WIL",
"PerPkg": "1",
@@ -1474,6 +1737,7 @@
},
{
"BriefDescription": "Read for ownership from local IA that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_RFO",
"PerPkg": "1",
@@ -1483,6 +1747,7 @@
},
{
"BriefDescription": "Read for ownership prefetch from local IA that miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_RFO_PREF",
"PerPkg": "1",
@@ -1492,6 +1757,7 @@
},
{
"BriefDescription": "SpecItoM events that are initiated from the Core",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_SPECITOM",
"PerPkg": "1",
@@ -1501,6 +1767,7 @@
},
{
"BriefDescription": "WbEFtoEs issued by iA Cores. (Non Modified Write Backs)",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WBEFTOE",
"PerPkg": "1",
@@ -1510,6 +1777,7 @@
},
{
"BriefDescription": "WbEFtoIs issued by iA Cores . (Non Modified Write Backs)",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WBEFTOI",
"PerPkg": "1",
@@ -1519,6 +1787,7 @@
},
{
"BriefDescription": "WbMtoEs issued by iA Cores . (Modified Write Backs)",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WBMTOE",
"PerPkg": "1",
@@ -1528,6 +1797,7 @@
},
{
"BriefDescription": "WbMtoI requests from local IA cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WBMTOI",
"PerPkg": "1",
@@ -1537,6 +1807,7 @@
},
{
"BriefDescription": "WbStoIs issued by iA Cores . (Non Modified Write Backs)",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WBSTOI",
"PerPkg": "1",
@@ -1546,6 +1817,7 @@
},
{
"BriefDescription": "WCIL requests from a local IA core",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WCIL",
"PerPkg": "1",
@@ -1555,6 +1827,7 @@
},
{
"BriefDescription": "WCILF requests from local IA core",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WCILF",
"PerPkg": "1",
@@ -1564,8 +1837,10 @@
},
{
"BriefDescription": "All TOR inserts from local IO devices",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : All requests from IO Devices",
"UMask": "0xc001ff04",
@@ -1573,6 +1848,7 @@
},
{
"BriefDescription": "CLFlush requests from IO devices",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_CLFLUSH",
"PerPkg": "1",
@@ -1582,8 +1858,10 @@
},
{
"BriefDescription": "All TOR inserts from local IO devices which hit the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : All requests from IO Devices that hit the LLC",
"UMask": "0xc001fd04",
@@ -1591,6 +1869,7 @@
},
{
"BriefDescription": "ItoMs from local IO devices which hit the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_ITOM",
"PerPkg": "1",
@@ -1600,6 +1879,7 @@
},
{
"BriefDescription": "ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_ITOMCACHENEAR",
"PerPkg": "1",
@@ -1609,6 +1889,7 @@
},
{
"BriefDescription": "PCIRDCURs issued by IO devices which hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_PCIRDCUR",
"PerPkg": "1",
@@ -1618,6 +1899,7 @@
},
{
"BriefDescription": "RFOs from local IO devices which hit the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_RFO",
"PerPkg": "1",
@@ -1627,6 +1909,7 @@
},
{
"BriefDescription": "All TOR ItoM inserts from local IO devices",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM",
"PerPkg": "1",
@@ -1636,6 +1919,7 @@
},
{
"BriefDescription": "ItoMCacheNears, indicating a partial write request, from IO Devices",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR",
"PerPkg": "1",
@@ -1644,70 +1928,79 @@
"Unit": "CHA"
},
{
- "BriefDescription": "All TOR inserts from local IO devices which miss the cache",
+ "BriefDescription": "ItoMCacheNear (partial write) transactions from an IO device that addresses memory on the local socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR_LOCAL",
"PerPkg": "1",
- "PublicDescription": "TOR Inserts : All requests from IO Devices that missed the LLC",
- "UMask": "0xc001fe04",
+ "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that address memory on the local socket",
+ "UMask": "0xcd42ff04",
"Unit": "CHA"
},
{
- "BriefDescription": "All TOR ItoM inserts from local IO devices which miss the cache",
+ "BriefDescription": "ItoMCacheNear (partial write) transactions from an IO device that addresses memory on a remote socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR_REMOTE",
"PerPkg": "1",
- "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices that missed the LLC",
- "UMask": "0xcc43fe04",
+ "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that address memory on a remote socket",
+ "UMask": "0xcd437f04",
"Unit": "CHA"
},
{
- "BriefDescription": "ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
+ "BriefDescription": "ItoM (write) transactions from an IO device that addresses memory on the local socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM_LOCAL",
"PerPkg": "1",
- "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
- "UMask": "0xcd43fe04",
+ "PublicDescription": "TOR Inserts : ItoM, indicating a write request, from IO Devices that address memory on the local socket",
+ "UMask": "0xcc42ff04",
"Unit": "CHA"
},
{
- "BriefDescription": "ItoMCacheNear transactions from an IO device on the local socket that miss the cache",
+ "BriefDescription": "ItoM (write) transactions from an IO device that addresses memory on a remote socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR_LOCAL",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM_REMOTE",
"PerPkg": "1",
- "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
- "UMask": "0xcd42fe04",
+ "PublicDescription": "TOR Inserts : ItoM, indicating a write request, from IO Devices that address memory on a remote socket",
+ "UMask": "0xcc437f04",
"Unit": "CHA"
},
{
- "BriefDescription": "ItoMCacheNear transactions from an IO device on a remote socket that miss the cache",
+ "BriefDescription": "All TOR inserts from local IO devices which miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR_REMOTE",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS",
+ "Experimental": "1",
"PerPkg": "1",
- "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
- "UMask": "0xcd437e04",
+ "PublicDescription": "TOR Inserts : All requests from IO Devices that missed the LLC",
+ "UMask": "0xc001fe04",
"Unit": "CHA"
},
{
- "BriefDescription": "ItoM transactions from an IO device on the local socket that miss the cache",
+ "BriefDescription": "All TOR ItoM inserts from local IO devices which miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM_LOCAL",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : ItoMs issued by IO Devices that missed the LLC",
- "UMask": "0xcc42fe04",
+ "UMask": "0xcc43fe04",
"Unit": "CHA"
},
{
- "BriefDescription": "ItoM transactions from an IO device on a remote socket that miss the cache",
+ "BriefDescription": "ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM_REMOTE",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR",
"PerPkg": "1",
- "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices that missed the LLC",
- "UMask": "0xcc437e04",
+ "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
+ "UMask": "0xcd43fe04",
"Unit": "CHA"
},
{
"BriefDescription": "PCIRDCURs issued by IO devices which miss the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_PCIRDCUR",
"PerPkg": "1",
@@ -1717,6 +2010,7 @@
},
{
"BriefDescription": "All TOR RFO inserts from local IO devices which miss the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_RFO",
"PerPkg": "1",
@@ -1726,6 +2020,7 @@
},
{
"BriefDescription": "PCIRDCURs issued by IO devices",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR",
"PerPkg": "1",
@@ -1734,7 +2029,28 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "PCIRDCUR (read) transactions from an IO device that addresses memory on the local socket",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : PCIRdCurs issued by IO Devices that addresses memory on the local socket",
+ "UMask": "0xc8f2ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PCIRDCUR (read) transactions from an IO device that addresses memory on a remote socket",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : PCIRdCurs issued by IO Devices that addresses memory on a remote socket",
+ "UMask": "0xc8f37f04",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "RFOs from local IO devices",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_RFO",
"PerPkg": "1",
@@ -1744,6 +2060,7 @@
},
{
"BriefDescription": "WBMtoI requests from IO devices",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_WBMTOI",
"PerPkg": "1",
@@ -1753,6 +2070,7 @@
},
{
"BriefDescription": "TOR Inserts for SF or LLC Evictions",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.LLC_OR_SF_EVICTIONS",
"PerPkg": "1",
@@ -1762,8 +2080,10 @@
},
{
"BriefDescription": "All locally initiated requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.LOC_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : All from Local iA and IO",
"UMask": "0xc000ff05",
@@ -1771,8 +2091,10 @@
},
{
"BriefDescription": "All from Local iA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.LOC_IA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : All from Local iA",
"UMask": "0xc000ff01",
@@ -1780,8 +2102,10 @@
},
{
"BriefDescription": "All from Local IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.LOC_IO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : All from Local IO",
"UMask": "0xc000ff04",
@@ -1789,8 +2113,10 @@
},
{
"BriefDescription": "All remote requests (e.g. snoops, writebacks) that came from remote sockets",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.REM_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : All Remote Requests",
"UMask": "0xc001ffc8",
@@ -1798,8 +2124,10 @@
},
{
"BriefDescription": "All snoops to this LLC that came from remote sockets",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.REM_SNPS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : All Snoops from Remote",
"UMask": "0xc001ff08",
@@ -1807,8 +2135,10 @@
},
{
"BriefDescription": "Occupancy for all TOR entries",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All",
"UMask": "0xc001ffff",
@@ -1816,152 +2146,190 @@
},
{
"BriefDescription": "TOR Occupancy for CLFlush transactions from a CXL device which hit in the L3.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_HIT_CLFLUSH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x78c8c7fd20",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy for FsRdCur transactions from a CXL device which hit in the L3.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_HIT_FSRDCUR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x78c8effd20",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy for FsRdCurPtl transactions from a CXL device which hit in the L3.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_HIT_FSRDCURPTL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x78c9effd20",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy for ItoM transactions from a CXL device which hit in the L3.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_HIT_ITOM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x78cc47fd20",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy for ItoMWr transactions from a CXL device which hit in the L3.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_HIT_ITOMWR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x78cc4ffd20",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy for MemPushWr transactions from a CXL device which hit in the L3.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_HIT_MEMPUSHWR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x78cc6ffd20",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy for WCiL transactions from a CXL device which hit in the L3.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_HIT_WCIL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x78c86ffd20",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy for WcilF transactions from a CXL device which hit in the L3.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_HIT_WCILF",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x78c867fd20",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy for WiL transactions from a CXL device which hit in the L3.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_HIT_WIL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x78c87ffd20",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy for CLFlush transactions from a CXL device which miss the L3.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_MISS_CLFLUSH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x78c8c7fe20",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy for FsRdCur transactions from a CXL device which miss the L3.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_MISS_FSRDCUR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x78c8effe20",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy for FsRdCurPtl transactions from a CXL device which miss the L3.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_MISS_FSRDCURPTL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x78c9effe20",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy for ItoM transactions from a CXL device which miss the L3.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_MISS_ITOM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x78cc47fe20",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy for ItoMWr transactions from a CXL device which miss the L3.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_MISS_ITOMWR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x78cc4ffe20",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy for MemPushWr transactions from a CXL device which miss the L3.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_MISS_MEMPUSHWR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x78cc6ffe20",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy for WCiL transactions from a CXL device which miss the L3.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_MISS_WCIL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x78c86ffe20",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy for WcilF transactions from a CXL device which miss the L3.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_MISS_WCILF",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x78c867fe20",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy for WiL transactions from a CXL device which miss the L3.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.CXL_MISS_WIL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x78c87ffe20",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy for All locally initiated requests from IA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All requests from iA Cores",
"UMask": "0xc001ff01",
@@ -1969,6 +2337,7 @@
},
{
"BriefDescription": "TOR Occupancy for CLFlush events that are initiated from the Core",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CLFLUSH",
"PerPkg": "1",
@@ -1978,6 +2347,7 @@
},
{
"BriefDescription": "TOR Occupancy for CLFlushOpt events that are initiated from the Core",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CLFLUSHOPT",
"PerPkg": "1",
@@ -1987,6 +2357,7 @@
},
{
"BriefDescription": "TOR Occupancy for Code read from local IA that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CRD",
"PerPkg": "1",
@@ -1996,6 +2367,7 @@
},
{
"BriefDescription": "TOR Occupancy for Code read prefetch from local IA that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CRD_PREF",
"PerPkg": "1",
@@ -2005,8 +2377,10 @@
},
{
"BriefDescription": "TOR Occupancy for Data read opt from local IA that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_OPT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Opts issued by iA Cores",
"UMask": "0xc827ff01",
@@ -2014,8 +2388,10 @@
},
{
"BriefDescription": "TOR Occupancy for Data read opt prefetch from local IA that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_OPT_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores",
"UMask": "0xc8a7ff01",
@@ -2023,8 +2399,10 @@
},
{
"BriefDescription": "TOR Occupancy for All locally initiated requests from IA Cores which hit the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All requests from iA Cores that Hit the LLC",
"UMask": "0xc001fd01",
@@ -2032,6 +2410,7 @@
},
{
"BriefDescription": "TOR Occupancy for Code read from local IA that hit the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD",
"PerPkg": "1",
@@ -2041,6 +2420,7 @@
},
{
"BriefDescription": "TOR Occupancy for Code read prefetch from local IA that hit the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD_PREF",
"PerPkg": "1",
@@ -2050,6 +2430,7 @@
},
{
"BriefDescription": "TOR Occupancy for All requests issued from IA cores to CXL accelerator memory regions that hit the LLC.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CXL_ACC",
"PerPkg": "1",
@@ -2058,8 +2439,10 @@
},
{
"BriefDescription": "TOR Occupancy for Data read opt from local IA that hit the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_OPT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Opts issued by iA Cores that hit the LLC",
"UMask": "0xc827fd01",
@@ -2067,8 +2450,10 @@
},
{
"BriefDescription": "TOR Occupancy for Data read opt prefetch from local IA that hit the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_OPT_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores that hit the LLC",
"UMask": "0xc8a7fd01",
@@ -2076,6 +2461,7 @@
},
{
"BriefDescription": "TOR Occupancy for ItoM requests from local IA cores that hit the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_ITOM",
"PerPkg": "1",
@@ -2085,6 +2471,7 @@
},
{
"BriefDescription": "TOR Occupancy for Last level cache prefetch code read from local IA that hit the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFCODE",
"PerPkg": "1",
@@ -2094,6 +2481,7 @@
},
{
"BriefDescription": "TOR Occupancy for Last level cache prefetch data read from local IA that hit the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFDATA",
"PerPkg": "1",
@@ -2103,6 +2491,7 @@
},
{
"BriefDescription": "TOR Occupancy for Last level cache prefetch read for ownership from local IA that hit the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFRFO",
"PerPkg": "1",
@@ -2112,6 +2501,7 @@
},
{
"BriefDescription": "TOR Occupancy for Read for ownership from local IA that hit the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO",
"PerPkg": "1",
@@ -2121,6 +2511,7 @@
},
{
"BriefDescription": "TOR Occupancy for Read for ownership prefetch from local IA that hit the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO_PREF",
"PerPkg": "1",
@@ -2130,6 +2521,7 @@
},
{
"BriefDescription": "TOR Occupancy for ItoM events that are initiated from the Core",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_ITOM",
"PerPkg": "1",
@@ -2139,6 +2531,7 @@
},
{
"BriefDescription": "TOR Occupancy for ItoMCacheNear requests from local IA cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_ITOMCACHENEAR",
"PerPkg": "1",
@@ -2148,6 +2541,7 @@
},
{
"BriefDescription": "TOR Occupancy for Last level cache prefetch code read from local IA.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFCODE",
"PerPkg": "1",
@@ -2157,6 +2551,7 @@
},
{
"BriefDescription": "TOR Occupancy for Last level cache prefetch data read from local IA.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFDATA",
"PerPkg": "1",
@@ -2166,6 +2561,7 @@
},
{
"BriefDescription": "TOR Occupancy for Last level cache prefetch read for ownership from local IA that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFRFO",
"PerPkg": "1",
@@ -2175,8 +2571,10 @@
},
{
"BriefDescription": "TOR Occupancy for All locally initiated requests from IA Cores which miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All requests from iA Cores that Missed the LLC",
"UMask": "0xc001fe01",
@@ -2184,6 +2582,7 @@
},
{
"BriefDescription": "TOR Occupancy for Code read from local IA that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD",
"PerPkg": "1",
@@ -2193,6 +2592,7 @@
},
{
"BriefDescription": "TOR Occupancy for CRDs from local IA cores to locally homed memory",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_LOCAL",
"PerPkg": "1",
@@ -2202,6 +2602,7 @@
},
{
"BriefDescription": "TOR Occupancy for Code read prefetch from local IA that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF",
"PerPkg": "1",
@@ -2211,6 +2612,7 @@
},
{
"BriefDescription": "TOR Occupancy for CRD Prefetches from local IA cores to locally homed memory",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF_LOCAL",
"PerPkg": "1",
@@ -2220,6 +2622,7 @@
},
{
"BriefDescription": "TOR Occupancy for CRD Prefetches from local IA cores to remotely homed memory",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF_REMOTE",
"PerPkg": "1",
@@ -2229,6 +2632,7 @@
},
{
"BriefDescription": "TOR Occupancy for CRDs from local IA cores to remotely homed memory",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_REMOTE",
"PerPkg": "1",
@@ -2238,6 +2642,7 @@
},
{
"BriefDescription": "TOR Occupancy for All requests issued from IA cores to CXL accelerator memory regions that miss the LLC.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CXL_ACC",
"PerPkg": "1",
@@ -2246,6 +2651,7 @@
},
{
"BriefDescription": "TOR Occupancy for DRds and equivalent opcodes issued from an IA core which miss the L3 and target memory in a CXL type 2 memory expander card.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_CXL_ACC",
"PerPkg": "1",
@@ -2254,8 +2660,10 @@
},
{
"BriefDescription": "TOR Occupancy for Data read opt from local IA that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Opt issued by iA Cores that missed the LLC",
"UMask": "0xc827fe01",
@@ -2263,8 +2671,10 @@
},
{
"BriefDescription": "TOR Occupancy for Data read opt prefetch from local IA that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores that missed the LLC",
"UMask": "0xc8a7fe01",
@@ -2272,6 +2682,7 @@
},
{
"BriefDescription": "TOR Occupancy for L2 data prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_CXL_ACC",
"PerPkg": "1",
@@ -2280,6 +2691,7 @@
},
{
"BriefDescription": "TOR Occupancy for ItoM requests from local IA cores that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_ITOM",
"PerPkg": "1",
@@ -2289,6 +2701,7 @@
},
{
"BriefDescription": "TOR Occupancy for Last level cache prefetch code read from local IA that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFCODE",
"PerPkg": "1",
@@ -2298,6 +2711,7 @@
},
{
"BriefDescription": "TOR Occupancy for Last level cache prefetch data read from local IA that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFDATA",
"PerPkg": "1",
@@ -2307,6 +2721,7 @@
},
{
"BriefDescription": "TOR Occupancy for LLC data prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFDATA_CXL_ACC",
"PerPkg": "1",
@@ -2315,6 +2730,7 @@
},
{
"BriefDescription": "TOR Occupancy for Last level cache prefetch read for ownership from local IA that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFRFO",
"PerPkg": "1",
@@ -2324,6 +2740,7 @@
},
{
"BriefDescription": "TOR Occupancy for L2 RFO prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFRFO_CXL_ACC",
"PerPkg": "1",
@@ -2332,6 +2749,7 @@
},
{
"BriefDescription": "TOR Occupancy for WCILF requests from local IA cores to locally homed DDR addresses that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCILF_DDR",
"PerPkg": "1",
@@ -2341,8 +2759,10 @@
},
{
"BriefDescription": "TOR Occupancy for WCILF requests from local IA cores to locally homed PMM addresses which miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCILF_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed locally",
"UMask": "0xc8668a01",
@@ -2350,6 +2770,7 @@
},
{
"BriefDescription": "TOR Occupancy for WCIL requests from local IA cores to locally homed DDR addresses that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCIL_DDR",
"PerPkg": "1",
@@ -2359,8 +2780,10 @@
},
{
"BriefDescription": "TOR Occupancy for WCIL requests from local IA cores to locally homed PMM addresses which miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCIL_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed locally",
"UMask": "0xc86e8a01",
@@ -2368,6 +2791,7 @@
},
{
"BriefDescription": "TOR Occupancy for WCILF requests from local IA cores to remotely homed DDR addresses that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCILF_DDR",
"PerPkg": "1",
@@ -2377,8 +2801,10 @@
},
{
"BriefDescription": "TOR Occupancy for WCILF requests from local IA cores to remotely homed PMM addresses which miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCILF_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely",
"UMask": "0xc8670a01",
@@ -2386,6 +2812,7 @@
},
{
"BriefDescription": "TOR Occupancy for WCIL requests from local IA cores to remotely homed DDR addresses that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCIL_DDR",
"PerPkg": "1",
@@ -2395,8 +2822,10 @@
},
{
"BriefDescription": "TOR Occupancy for WCIL requests from local IA cores to remotely homed PMM addresses which miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCIL_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely",
"UMask": "0xc86f0a01",
@@ -2404,6 +2833,7 @@
},
{
"BriefDescription": "TOR Occupancy for Read for ownership from local IA that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO",
"PerPkg": "1",
@@ -2413,6 +2843,7 @@
},
{
"BriefDescription": "TOR Occupancy for RFOs issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_CXL_ACC",
"PerPkg": "1",
@@ -2421,6 +2852,7 @@
},
{
"BriefDescription": "TOR Occupancy for Read for ownership from local IA that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_LOCAL",
"PerPkg": "1",
@@ -2430,6 +2862,7 @@
},
{
"BriefDescription": "TOR Occupancy for Read for ownership prefetch from local IA that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF",
"PerPkg": "1",
@@ -2439,6 +2872,7 @@
},
{
"BriefDescription": "TOR Occupancy for LLC RFO prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_CXL_ACC",
"PerPkg": "1",
@@ -2447,6 +2881,7 @@
},
{
"BriefDescription": "TOR Occupancy for Read for ownership prefetch from local IA that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_LOCAL",
"PerPkg": "1",
@@ -2456,6 +2891,7 @@
},
{
"BriefDescription": "TOR Occupancy for Read for ownership prefetch from local IA that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_REMOTE",
"PerPkg": "1",
@@ -2465,6 +2901,7 @@
},
{
"BriefDescription": "TOR Occupancy for Read for ownership from local IA that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_REMOTE",
"PerPkg": "1",
@@ -2474,6 +2911,7 @@
},
{
"BriefDescription": "TOR Occupancy for UCRDF requests from local IA cores that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_UCRDF",
"PerPkg": "1",
@@ -2483,6 +2921,7 @@
},
{
"BriefDescription": "TOR Occupancy for WCIL requests from a local IA core that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL",
"PerPkg": "1",
@@ -2492,6 +2931,7 @@
},
{
"BriefDescription": "TOR Occupancy for WCILF requests from local IA core that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF",
"PerPkg": "1",
@@ -2501,6 +2941,7 @@
},
{
"BriefDescription": "TOR Occupancy for WCILF requests from local IA cores to DDR homed addresses which miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF_DDR",
"PerPkg": "1",
@@ -2510,8 +2951,10 @@
},
{
"BriefDescription": "TOR Occupancy for WCILF requests from local IA cores to PMM homed addresses which miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC",
"UMask": "0xc8678a01",
@@ -2519,6 +2962,7 @@
},
{
"BriefDescription": "TOR Occupancy for WCIL requests from local IA cores to DDR homed addresses which miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL_DDR",
"PerPkg": "1",
@@ -2528,8 +2972,10 @@
},
{
"BriefDescription": "TOR Occupancy for WCIL requests from a local IA core to PMM homed addresses that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL_PMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC",
"UMask": "0xc86f8a01",
@@ -2537,6 +2983,7 @@
},
{
"BriefDescription": "TOR Occupancy for WIL requests from local IA cores that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WIL",
"PerPkg": "1",
@@ -2546,6 +2993,7 @@
},
{
"BriefDescription": "TOR Occupancy for Read for ownership from local IA that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_RFO",
"PerPkg": "1",
@@ -2555,6 +3003,7 @@
},
{
"BriefDescription": "TOR Occupancy for Read for ownership prefetch from local IA that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_RFO_PREF",
"PerPkg": "1",
@@ -2564,6 +3013,7 @@
},
{
"BriefDescription": "TOR Occupancy for SpecItoM events that are initiated from the Core",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_SPECITOM",
"PerPkg": "1",
@@ -2573,6 +3023,7 @@
},
{
"BriefDescription": "TOR Occupancy for WbMtoI requests from local IA cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WBMTOI",
"PerPkg": "1",
@@ -2582,6 +3033,7 @@
},
{
"BriefDescription": "TOR Occupancy for WCIL requests from a local IA core",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WCIL",
"PerPkg": "1",
@@ -2591,6 +3043,7 @@
},
{
"BriefDescription": "TOR Occupancy for WCILF requests from local IA core",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WCILF",
"PerPkg": "1",
@@ -2600,8 +3053,10 @@
},
{
"BriefDescription": "TOR Occupancy for All TOR inserts from local IO devices",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All requests from IO Devices",
"UMask": "0xc001ff04",
@@ -2609,6 +3064,7 @@
},
{
"BriefDescription": "TOR Occupancy for CLFlush requests from IO devices",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_CLFLUSH",
"PerPkg": "1",
@@ -2618,8 +3074,10 @@
},
{
"BriefDescription": "TOR Occupancy for All TOR inserts from local IO devices which hit the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All requests from IO Devices that hit the LLC",
"UMask": "0xc001fd04",
@@ -2627,6 +3085,7 @@
},
{
"BriefDescription": "TOR Occupancy for ItoMs from local IO devices which hit the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_ITOM",
"PerPkg": "1",
@@ -2636,6 +3095,7 @@
},
{
"BriefDescription": "TOR Occupancy for ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_ITOMCACHENEAR",
"PerPkg": "1",
@@ -2645,6 +3105,7 @@
},
{
"BriefDescription": "TOR Occupancy for PCIRDCURs issued by IO devices which hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_PCIRDCUR",
"PerPkg": "1",
@@ -2654,6 +3115,7 @@
},
{
"BriefDescription": "TOR Occupancy for RFOs from local IO devices which hit the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_RFO",
"PerPkg": "1",
@@ -2663,6 +3125,7 @@
},
{
"BriefDescription": "TOR Occupancy for All TOR ItoM inserts from local IO devices",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_ITOM",
"PerPkg": "1",
@@ -2672,6 +3135,7 @@
},
{
"BriefDescription": "TOR Occupancy for ItoMCacheNears, indicating a partial write request, from IO Devices",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_ITOMCACHENEAR",
"PerPkg": "1",
@@ -2681,8 +3145,10 @@
},
{
"BriefDescription": "TOR Occupancy for All TOR inserts from local IO devices which miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All requests from IO Devices that missed the LLC",
"UMask": "0xc001fe04",
@@ -2690,6 +3156,7 @@
},
{
"BriefDescription": "TOR Occupancy for All TOR ItoM inserts from local IO devices which miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM",
"PerPkg": "1",
@@ -2699,6 +3166,7 @@
},
{
"BriefDescription": "TOR Occupancy for ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOMCACHENEAR",
"PerPkg": "1",
@@ -2708,8 +3176,10 @@
},
{
"BriefDescription": "TOR Occupancy for ItoMCacheNear transactions from an IO device on the local socket that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOMCACHENEAR_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
"UMask": "0xcd42fe04",
@@ -2717,8 +3187,10 @@
},
{
"BriefDescription": "TOR Occupancy for ItoMCacheNear transactions from an IO device on a remote socket that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOMCACHENEAR_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
"UMask": "0xcd437e04",
@@ -2726,8 +3198,10 @@
},
{
"BriefDescription": "TOR Occupancy for ItoM transactions from an IO device on the local socket that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : ItoMs issued by IO Devices that missed the LLC",
"UMask": "0xcc42fe04",
@@ -2735,8 +3209,10 @@
},
{
"BriefDescription": "TOR Occupancy for ItoM transactions from an IO device on a remote socket that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : ItoMs issued by IO Devices that missed the LLC",
"UMask": "0xcc437e04",
@@ -2744,6 +3220,7 @@
},
{
"BriefDescription": "TOR Occupancy for PCIRDCURs issued by IO devices which miss the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_PCIRDCUR",
"PerPkg": "1",
@@ -2753,8 +3230,10 @@
},
{
"BriefDescription": "TOR Occupancy for PCIRDCUR transactions from an IO device on the local socket that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_PCIRDCUR_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that missed the LLC",
"UMask": "0xc8f2fe04",
@@ -2762,8 +3241,10 @@
},
{
"BriefDescription": "TOR Occupancy for PCIRDCUR transactions from an IO device on a remote socket that miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_PCIRDCUR_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that missed the LLC",
"UMask": "0xc8f37e04",
@@ -2771,6 +3252,7 @@
},
{
"BriefDescription": "TOR Occupancy for All TOR RFO inserts from local IO devices which miss the cache",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_RFO",
"PerPkg": "1",
@@ -2780,6 +3262,7 @@
},
{
"BriefDescription": "TOR Occupancy for PCIRDCURs issued by IO devices",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_PCIRDCUR",
"PerPkg": "1",
@@ -2789,6 +3272,7 @@
},
{
"BriefDescription": "TOR Occupancy for RFOs from local IO devices",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_RFO",
"PerPkg": "1",
@@ -2798,6 +3282,7 @@
},
{
"BriefDescription": "TOR Occupancy for WBMtoI requests from IO devices",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_WBMTOI",
"PerPkg": "1",
@@ -2807,8 +3292,10 @@
},
{
"BriefDescription": "TOR Occupancy for All locally initiated requests",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All from Local iA and IO",
"UMask": "0xc000ff05",
@@ -2816,8 +3303,10 @@
},
{
"BriefDescription": "TOR Occupancy for All from Local iA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_IA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All from Local iA",
"UMask": "0xc000ff01",
@@ -2825,8 +3314,10 @@
},
{
"BriefDescription": "TOR Occupancy for All from Local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_IO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All from Local IO",
"UMask": "0xc000ff04",
@@ -2834,8 +3325,10 @@
},
{
"BriefDescription": "TOR Occupancy for All remote requests (e.g. snoops, writebacks) that came from remote sockets",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.REM_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All Remote Requests",
"UMask": "0xc001ffc8",
@@ -2843,8 +3336,10 @@
},
{
"BriefDescription": "TOR Occupancy for All snoops to this LLC that came from remote sockets",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.REM_SNPS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All Snoops from Remote",
"UMask": "0xc001ff08",
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/uncore-cxl.json b/tools/perf/pmu-events/arch/x86/sierraforest/uncore-cxl.json
index dc676c7aa37f..383a5ba5a697 100644
--- a/tools/perf/pmu-events/arch/x86/sierraforest/uncore-cxl.json
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/uncore-cxl.json
@@ -1,10 +1,31 @@
[
{
"BriefDescription": "B2CXL Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_B2CXL_CLOCKTICKS",
"PerPkg": "1",
"PortMask": "0x000",
"Unit": "B2CXL"
+ },
+ {
+ "BriefDescription": "Number of Allocation to Mem Data Packing buffer",
+ "Counter": "4,5,6,7",
+ "EventCode": "0x41",
+ "EventName": "UNC_CXLCM_RxC_PACK_BUF_INSERTS.MEM_DATA",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of Allocation to M2S Data AGF",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x02",
+ "EventName": "UNC_CXLDP_TxC_AGF_INSERTS.M2S_DATA",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CXLDP"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/sierraforest/uncore-interconnect.json
index 6932b2fea3a5..80440edac431 100644
--- a/tools/perf/pmu-events/arch/x86/sierraforest/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/uncore-interconnect.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Clockticks of the mesh to memory (B2CMI)",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_B2CMI_CLOCKTICKS",
"PerPkg": "1",
@@ -8,14 +9,17 @@
},
{
"BriefDescription": "Counts the number of time D2C was not honoured by egress due to directory state constraints",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_B2CMI_DIRECT2CORE_NOT_TAKEN_DIRSTATE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "B2CMI"
},
{
"BriefDescription": "Counts the number of times B2CMI egress did D2C (direct to core)",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_B2CMI_DIRECT2CORE_TAKEN",
"PerPkg": "1",
@@ -24,14 +28,17 @@
},
{
"BriefDescription": "Counts the number of times D2C wasn't honoured even though the incoming request had d2c set for non cisgress txn",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_B2CMI_DIRECT2CORE_TXN_OVERRIDE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "B2CMI"
},
{
"BriefDescription": "Counts the number of d2k wasn't done due to credit constraints",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_B2CMI_DIRECT2UPI_NOT_TAKEN_CREDITS",
"PerPkg": "1",
@@ -40,6 +47,7 @@
},
{
"BriefDescription": "Direct to UPI Transactions - Ignored due to lack of credits : All : Counts the number of d2k wasn't done due to credit constraints",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_B2CMI_DIRECT2UPI_NOT_TAKEN_CREDITS.EGRESS",
"PerPkg": "1",
@@ -48,6 +56,7 @@
},
{
"BriefDescription": "Counts the number of time D2K was not honoured by egress due to directory state constraints",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_B2CMI_DIRECT2UPI_NOT_TAKEN_DIRSTATE",
"PerPkg": "1",
@@ -56,6 +65,7 @@
},
{
"BriefDescription": "Cycles when Direct2UPI was Disabled : Egress Ignored D2U : Counts the number of time D2K was not honoured by egress due to directory state constraints",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_B2CMI_DIRECT2UPI_NOT_TAKEN_DIRSTATE.EGRESS",
"PerPkg": "1",
@@ -64,6 +74,7 @@
},
{
"BriefDescription": "Counts the number of times egress did D2K (Direct to KTI)",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_B2CMI_DIRECT2UPI_TAKEN",
"PerPkg": "1",
@@ -72,6 +83,7 @@
},
{
"BriefDescription": "Counts the number of times D2K wasn't honoured even though the incoming request had d2k set for non cisgress txn",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_B2CMI_DIRECT2UPI_TXN_OVERRIDE",
"PerPkg": "1",
@@ -80,70 +92,87 @@
},
{
"BriefDescription": "Directory Hit Clean",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_B2CMI_DIRECTORY_HIT.CLEAN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x38",
"Unit": "B2CMI"
},
{
"BriefDescription": "Directory Hit : On NonDirty Line in A State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_B2CMI_DIRECTORY_HIT.CLEAN_A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "B2CMI"
},
{
"BriefDescription": "Directory Hit : On NonDirty Line in I State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_B2CMI_DIRECTORY_HIT.CLEAN_I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "B2CMI"
},
{
"BriefDescription": "Directory Hit : On NonDirty Line in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_B2CMI_DIRECTORY_HIT.CLEAN_S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "B2CMI"
},
{
"BriefDescription": "Directory Hit Dirty (modified)",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_B2CMI_DIRECTORY_HIT.DIRTY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "B2CMI"
},
{
"BriefDescription": "Directory Hit : On Dirty Line in A State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_B2CMI_DIRECTORY_HIT.DIRTY_A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "B2CMI"
},
{
"BriefDescription": "Directory Hit : On Dirty Line in I State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_B2CMI_DIRECTORY_HIT.DIRTY_I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "B2CMI"
},
{
"BriefDescription": "Directory Hit : On Dirty Line in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_B2CMI_DIRECTORY_HIT.DIRTY_S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "B2CMI"
},
{
"BriefDescription": "Counts the number of 1lm or 2lm hit read data returns to egress with any directory to non persistent memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_B2CMI_DIRECTORY_LOOKUP.ANY",
"PerPkg": "1",
@@ -152,6 +181,7 @@
},
{
"BriefDescription": "Counts the number of 1lm or 2lm hit read data returns to egress with directory A to non persistent memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_B2CMI_DIRECTORY_LOOKUP.STATE_A",
"PerPkg": "1",
@@ -160,6 +190,7 @@
},
{
"BriefDescription": "Counts the number of 1lm or 2lm hit read data returns to egress with directory I to non persistent memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_B2CMI_DIRECTORY_LOOKUP.STATE_I",
"PerPkg": "1",
@@ -168,6 +199,7 @@
},
{
"BriefDescription": "Counts the number of 1lm or 2lm hit read data returns to egress with directory S to non persistent memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_B2CMI_DIRECTORY_LOOKUP.STATE_S",
"PerPkg": "1",
@@ -177,70 +209,87 @@
},
{
"BriefDescription": "Directory Miss Clean",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_B2CMI_DIRECTORY_MISS.CLEAN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x38",
"Unit": "B2CMI"
},
{
"BriefDescription": "Directory Miss : On NonDirty Line in A State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_B2CMI_DIRECTORY_MISS.CLEAN_A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "B2CMI"
},
{
"BriefDescription": "Directory Miss : On NonDirty Line in I State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_B2CMI_DIRECTORY_MISS.CLEAN_I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "B2CMI"
},
{
"BriefDescription": "Directory Miss : On NonDirty Line in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_B2CMI_DIRECTORY_MISS.CLEAN_S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "B2CMI"
},
{
"BriefDescription": "Directory Miss Dirty (modified)",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_B2CMI_DIRECTORY_MISS.DIRTY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "B2CMI"
},
{
"BriefDescription": "Directory Miss : On Dirty Line in A State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_B2CMI_DIRECTORY_MISS.DIRTY_A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "B2CMI"
},
{
"BriefDescription": "Directory Miss : On Dirty Line in I State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_B2CMI_DIRECTORY_MISS.DIRTY_I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "B2CMI"
},
{
"BriefDescription": "Directory Miss : On Dirty Line in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_B2CMI_DIRECTORY_MISS.DIRTY_S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "B2CMI"
},
{
"BriefDescription": "Any A2I Transition",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_B2CMI_DIRECTORY_UPDATE.A2I",
"PerPkg": "1",
@@ -249,6 +298,7 @@
},
{
"BriefDescription": "Any A2S Transition",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_B2CMI_DIRECTORY_UPDATE.A2S",
"PerPkg": "1",
@@ -257,6 +307,7 @@
},
{
"BriefDescription": "Counts cisgress directory updates",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_B2CMI_DIRECTORY_UPDATE.ANY",
"PerPkg": "1",
@@ -265,38 +316,47 @@
},
{
"BriefDescription": "Counts any 1lm or 2lm hit data return that would result in directory update to non persistent memory (DRAM)",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_B2CMI_DIRECTORY_UPDATE.HIT_ANY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x101",
"Unit": "B2CMI"
},
{
"BriefDescription": "Directory update in near memory to the A state",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_B2CMI_DIRECTORY_UPDATE.HIT_X2A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x114",
"Unit": "B2CMI"
},
{
"BriefDescription": "Directory update in near memory to the I state",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_B2CMI_DIRECTORY_UPDATE.HIT_X2I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x128",
"Unit": "B2CMI"
},
{
"BriefDescription": "Directory update in near memory to the S state",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_B2CMI_DIRECTORY_UPDATE.HIT_X2S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x142",
"Unit": "B2CMI"
},
{
"BriefDescription": "Any I2A Transition",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_B2CMI_DIRECTORY_UPDATE.I2A",
"PerPkg": "1",
@@ -305,6 +365,7 @@
},
{
"BriefDescription": "Any I2S Transition",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_B2CMI_DIRECTORY_UPDATE.I2S",
"PerPkg": "1",
@@ -313,70 +374,87 @@
},
{
"BriefDescription": "Directory update in far memory to the A state",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_B2CMI_DIRECTORY_UPDATE.MISS_X2A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x214",
"Unit": "B2CMI"
},
{
"BriefDescription": "Directory update in far memory to the I state",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_B2CMI_DIRECTORY_UPDATE.MISS_X2I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x228",
"Unit": "B2CMI"
},
{
"BriefDescription": "Directory update in far memory to the S state",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_B2CMI_DIRECTORY_UPDATE.MISS_X2S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x242",
"Unit": "B2CMI"
},
{
"BriefDescription": "Any S2A Transition",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_B2CMI_DIRECTORY_UPDATE.S2A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x310",
"Unit": "B2CMI"
},
{
"BriefDescription": "Any S2I Transition",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_B2CMI_DIRECTORY_UPDATE.S2I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x308",
"Unit": "B2CMI"
},
{
"BriefDescription": "Directory update to the A state",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_B2CMI_DIRECTORY_UPDATE.X2A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x314",
"Unit": "B2CMI"
},
{
"BriefDescription": "Directory update to the I state",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_B2CMI_DIRECTORY_UPDATE.X2I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x328",
"Unit": "B2CMI"
},
{
"BriefDescription": "Directory update to the S state",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_B2CMI_DIRECTORY_UPDATE.X2S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x342",
"Unit": "B2CMI"
},
{
"BriefDescription": "Counts any read",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_B2CMI_IMC_READS.ALL",
"PerPkg": "1",
@@ -385,6 +463,7 @@
},
{
"BriefDescription": "Counts normal reads issue to CMI",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_B2CMI_IMC_READS.NORMAL",
"PerPkg": "1",
@@ -393,22 +472,27 @@
},
{
"BriefDescription": "Count reads to NM region",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_B2CMI_IMC_READS.TO_DDR_AS_CACHE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x110",
"Unit": "B2CMI"
},
{
"BriefDescription": "Counts reads to 1lm non persistent memory regions",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_B2CMI_IMC_READS.TO_DDR_AS_MEM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x108",
"Unit": "B2CMI"
},
{
"BriefDescription": "All Writes - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_B2CMI_IMC_WRITES.ALL",
"PerPkg": "1",
@@ -417,6 +501,7 @@
},
{
"BriefDescription": "Full Non-ISOCH - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_B2CMI_IMC_WRITES.FULL",
"PerPkg": "1",
@@ -425,20 +510,25 @@
},
{
"BriefDescription": "Non-Inclusive - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_B2CMI_IMC_WRITES.NI",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "B2CMI"
},
{
"BriefDescription": "Non-Inclusive Miss - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_B2CMI_IMC_WRITES.NI_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "B2CMI"
},
{
"BriefDescription": "Partial Non-ISOCH - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_B2CMI_IMC_WRITES.PARTIAL",
"PerPkg": "1",
@@ -447,38 +537,47 @@
},
{
"BriefDescription": "DDR, acting as Cache - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_B2CMI_IMC_WRITES.TO_DDR_AS_CACHE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x140",
"Unit": "B2CMI"
},
{
"BriefDescription": "DDR - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_B2CMI_IMC_WRITES.TO_DDR_AS_MEM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x120",
"Unit": "B2CMI"
},
{
"BriefDescription": "Prefetch CAM Inserts : UPI - Ch 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_B2CMI_PREFCAM_INSERTS.CH0_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "B2CMI"
},
{
"BriefDescription": "Prefetch CAM Inserts : XPT - Ch 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_B2CMI_PREFCAM_INSERTS.CH0_XPT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "B2CMI"
},
{
"BriefDescription": "Prefetch CAM Inserts : UPI - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_B2CMI_PREFCAM_INSERTS.UPI_ALLCH",
"PerPkg": "1",
@@ -487,6 +586,7 @@
},
{
"BriefDescription": "Prefetch CAM Inserts : XPT -All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_B2CMI_PREFCAM_INSERTS.XPT_ALLCH",
"PerPkg": "1",
@@ -496,118 +596,147 @@
},
{
"BriefDescription": "Prefetch CAM Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_B2CMI_PREFCAM_OCCUPANCY.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "B2CMI"
},
{
"BriefDescription": "Counts the 2lm reads and WRNI which were a hit",
+ "Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_B2CMI_TAG_HIT.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf",
"Unit": "B2CMI"
},
{
"BriefDescription": "Counts the 2lm reads which were a hit clean",
+ "Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_B2CMI_TAG_HIT.RD_CLEAN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "B2CMI"
},
{
"BriefDescription": "Counts the 2lm reads which were a hit dirty",
+ "Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_B2CMI_TAG_HIT.RD_DIRTY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "B2CMI"
},
{
"BriefDescription": "Counts the 2lm WRNI which were a hit clean",
+ "Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_B2CMI_TAG_HIT.WR_CLEAN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "B2CMI"
},
{
"BriefDescription": "Counts the 2lm WRNI which were a hit dirty",
+ "Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_B2CMI_TAG_HIT.WR_DIRTY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "B2CMI"
},
{
"BriefDescription": "Counts the 2lm second way read miss for a WrNI",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_B2CMI_TAG_MISS.CLEAN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "B2CMI"
},
{
"BriefDescription": "Counts the 2lm second way read miss for a WrNI",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_B2CMI_TAG_MISS.DIRTY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "B2CMI"
},
{
"BriefDescription": "Counts the 2lm second way read miss for a Rd",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_B2CMI_TAG_MISS.RD_2WAY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "B2CMI"
},
{
"BriefDescription": "Counts the 2lm reads which were a miss and the cache line is unmodified",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_B2CMI_TAG_MISS.RD_CLEAN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "B2CMI"
},
{
"BriefDescription": "Counts the 2lm reads which were a miss and the cache line is modified",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_B2CMI_TAG_MISS.RD_DIRTY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "B2CMI"
},
{
"BriefDescription": "Counts the 2lm second way read miss for a WrNI",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_B2CMI_TAG_MISS.WR_2WAY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "B2CMI"
},
{
"BriefDescription": "Counts the 2lm WRNI which were a miss and the cache line is unmodified",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_B2CMI_TAG_MISS.WR_CLEAN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "B2CMI"
},
{
"BriefDescription": "Counts the 2lm WRNI which were a miss and the cache line is modified",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_B2CMI_TAG_MISS.WR_DIRTY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "B2CMI"
},
{
"BriefDescription": "Tracker Inserts : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_B2CMI_TRACKER_INSERTS.CH0",
"PerPkg": "1",
@@ -616,6 +745,7 @@
},
{
"BriefDescription": "Tracker Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_B2CMI_TRACKER_OCCUPANCY.CH0",
"PerPkg": "1",
@@ -624,14 +754,17 @@
},
{
"BriefDescription": "Write Tracker Inserts : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_B2CMI_WR_TRACKER_INSERTS.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "B2CMI"
},
{
"BriefDescription": "UNC_B2HOT_CLOCKTICKS",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_B2HOT_CLOCKTICKS",
"PerPkg": "1",
@@ -640,6 +773,7 @@
},
{
"BriefDescription": "Number of uclks in domain",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_B2UPI_CLOCKTICKS",
"PerPkg": "1",
@@ -647,14 +781,17 @@
},
{
"BriefDescription": "Total Write Cache Occupancy : Mem",
+ "Counter": "0,1,2,3",
"EventCode": "0x0F",
"EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.MEM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "IRP"
},
{
"BriefDescription": "IRP Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_I_CLOCKTICKS",
"PerPkg": "1",
@@ -662,6 +799,7 @@
},
{
"BriefDescription": "Inbound read requests received by the IRP and inserted into the FAF queue",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_I_FAF_INSERTS",
"PerPkg": "1",
@@ -669,21 +807,26 @@
},
{
"BriefDescription": "FAF occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_I_FAF_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "Misc Events - Set 1 : Lost Forward : Snoop pulled away ownership before a write was committed",
+ "Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_I_MISC1.LOST_FWD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "IRP"
},
{
"BriefDescription": "Inbound write (fast path) requests to coherent memory, received by the IRP resulting in write ownership requests issued by IRP to the mesh.",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_I_TRANSACTIONS.WR_PREF",
"PerPkg": "1",
@@ -692,6 +835,7 @@
},
{
"BriefDescription": "MDF Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_MDF_CLOCKTICKS",
"PerPkg": "1",
@@ -699,6 +843,7 @@
},
{
"BriefDescription": "Number of UPI LL clock cycles while the event is enabled",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_UPI_CLOCKTICKS",
"PerPkg": "1",
@@ -707,45 +852,56 @@
},
{
"BriefDescription": "Cycles in L1 : Number of UPI qfclk cycles spent in L1 power mode. L1 is a mode that totally shuts down a UPI link. Use edge detect to count the number of instances when the UPI link entered L1. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. Because L1 totally shuts down the link, it takes a good amount of time to exit this mode.",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_UPI_L1_POWER_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass, Match Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10e",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard, Match Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10f",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Receive path of a UPI Port : Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.REQ",
"PerPkg": "1",
@@ -754,78 +910,97 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port : Request, Match Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.REQ_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x108",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Receive path of a UPI Port : Response - Conflict",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSPCNFLT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1aa",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Receive path of a UPI Port : Response - Invalid",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x12a",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Receive path of a UPI Port : Response - Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_DATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Receive path of a UPI Port : Response - Data, Match Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_DATA_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10c",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Receive path of a UPI Port : Response - No Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_NODATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Receive path of a UPI Port : Response - No Data, Match Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_NODATA_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10a",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Receive path of a UPI Port : Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x9",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Receive path of a UPI Port : Snoop, Match Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.SNP_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x109",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Receive path of a UPI Port : Writeback",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.WB",
"PerPkg": "1",
@@ -834,14 +1009,17 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port : Writeback, Match Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.WB_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10d",
"Unit": "UPI"
},
{
"BriefDescription": "Valid Flits Received : All Data : Shows legal flit time (hides impact of L0p and L0c).",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.ALL_DATA",
"PerPkg": "1",
@@ -850,8 +1028,10 @@
},
{
"BriefDescription": "Null FLITs received from any slot",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.ALL_NULL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Valid Flits Received : Null FLITs received from any slot",
"UMask": "0x27",
@@ -859,38 +1039,47 @@
},
{
"BriefDescription": "Valid Flits Received : Data : Shows legal flit time (hides impact of L0p and L0c). : Count Data Flits (which consume all slots), but how much to count is based on Slot0-2 mask, so count can be 0-3 depending on which slots are enabled for counting..",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.DATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "Valid Flits Received : Idle : Shows legal flit time (hides impact of L0p and L0c).",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.IDLE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x47",
"Unit": "UPI"
},
{
"BriefDescription": "Valid Flits Received : LLCRD Not Empty : Shows legal flit time (hides impact of L0p and L0c). : Enables counting of LLCRD (with non-zero payload). This only applies to slot 2 since LLCRD is only allowed in slot 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.LLCRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "UPI"
},
{
"BriefDescription": "Valid Flits Received : LLCTRL : Shows legal flit time (hides impact of L0p and L0c). : Equivalent to an idle packet. Enables counting of slot 0 LLCTRL messages.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.LLCTRL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "UPI"
},
{
"BriefDescription": "Valid Flits Received : All Non Data : Shows legal flit time (hides impact of L0p and L0c).",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.NON_DATA",
"PerPkg": "1",
@@ -899,222 +1088,277 @@
},
{
"BriefDescription": "Valid Flits Received : Slot NULL or LLCRD Empty : Shows legal flit time (hides impact of L0p and L0c). : LLCRD with all zeros is treated as NULL. Slot 1 is not treated as NULL if slot 0 is a dual slot. This can apply to slot 0,1, or 2.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.NULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "UPI"
},
{
"BriefDescription": "Valid Flits Received : Protocol Header : Shows legal flit time (hides impact of L0p and L0c). : Enables count of protocol headers in slot 0,1,2 (depending on slot uMask bits)",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.PROTHDR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "UPI"
},
{
"BriefDescription": "Valid Flits Received : Slot 0 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 0 - Other mask bits determine types of headers to count.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UPI"
},
{
"BriefDescription": "Valid Flits Received : Slot 1 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 1 - Other mask bits determine types of headers to count.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UPI"
},
{
"BriefDescription": "Valid Flits Received : Slot 2 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 2 - Other mask bits determine types of headers to count.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_UPI_RxL_FLITS.SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UPI"
},
{
"BriefDescription": "RxQ Flit Buffer Allocations : Slot 0 : Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_UPI_RxL_INSERTS.SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UPI"
},
{
"BriefDescription": "RxQ Flit Buffer Allocations : Slot 1 : Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_UPI_RxL_INSERTS.SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UPI"
},
{
"BriefDescription": "RxQ Flit Buffer Allocations : Slot 2 : Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_UPI_RxL_INSERTS.SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UPI"
},
{
"BriefDescription": "RxQ Occupancy - All Packets : Slot 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UPI"
},
{
"BriefDescription": "RxQ Occupancy - All Packets : Slot 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UPI"
},
{
"BriefDescription": "RxQ Occupancy - All Packets : Slot 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass, Match Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10e",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard, Match Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10f",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port : Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port : Request, Match Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.REQ_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x108",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port : Response - Conflict",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSPCNFLT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1aa",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port : Response - Invalid",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x12a",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port : Response - Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_DATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port : Response - Data, Match Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_DATA_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10c",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port : Response - No Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_NODATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port : Response - No Data, Match Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_NODATA_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10a",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port : Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x9",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port : Snoop, Match Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.SNP_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x109",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port : Writeback",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.WB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port : Writeback, Match Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.WB_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10d",
"Unit": "UPI"
},
{
"BriefDescription": "Valid Flits Sent : All Data : Counts number of data flits across this UPI link.",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.ALL_DATA",
"PerPkg": "1",
@@ -1123,6 +1367,7 @@
},
{
"BriefDescription": "All Null Flits",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.ALL_NULL",
"PerPkg": "1",
@@ -1132,14 +1377,17 @@
},
{
"BriefDescription": "Valid Flits Sent : Data : Shows legal flit time (hides impact of L0p and L0c). : Count Data Flits (which consume all slots), but how much to count is based on Slot0-2 mask, so count can be 0-3 depending on which slots are enabled for counting..",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.DATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "Valid Flits Sent : Idle : Shows legal flit time (hides impact of L0p and L0c).",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.IDLE",
"PerPkg": "1",
@@ -1148,22 +1396,27 @@
},
{
"BriefDescription": "Valid Flits Sent : LLCRD Not Empty : Shows legal flit time (hides impact of L0p and L0c). : Enables counting of LLCRD (with non-zero payload). This only applies to slot 2 since LLCRD is only allowed in slot 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.LLCRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "UPI"
},
{
"BriefDescription": "Valid Flits Sent : LLCTRL : Shows legal flit time (hides impact of L0p and L0c). : Equivalent to an idle packet. Enables counting of slot 0 LLCTRL messages.",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.LLCTRL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "UPI"
},
{
"BriefDescription": "Valid Flits Sent : All Non Data : Shows legal flit time (hides impact of L0p and L0c).",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.NON_DATA",
"PerPkg": "1",
@@ -1173,55 +1426,69 @@
},
{
"BriefDescription": "Valid Flits Sent : Slot NULL or LLCRD Empty : Shows legal flit time (hides impact of L0p and L0c). : LLCRD with all zeros is treated as NULL. Slot 1 is not treated as NULL if slot 0 is a dual slot. This can apply to slot 0,1, or 2.",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.NULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "UPI"
},
{
"BriefDescription": "Valid Flits Sent : Protocol Header : Shows legal flit time (hides impact of L0p and L0c). : Enables count of protocol headers in slot 0,1,2 (depending on slot uMask bits)",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.PROTHDR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "UPI"
},
{
"BriefDescription": "Valid Flits Sent : Slot 0 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 0 - Other mask bits determine types of headers to count.",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UPI"
},
{
"BriefDescription": "Valid Flits Sent : Slot 1 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 1 - Other mask bits determine types of headers to count.",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UPI"
},
{
"BriefDescription": "Valid Flits Sent : Slot 2 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 2 - Other mask bits determine types of headers to count.",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_UPI_TxL_FLITS.SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UPI"
},
{
"BriefDescription": "Tx Flit Buffer Allocations : Number of allocations into the UPI Tx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_UPI_TxL_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UPI"
},
{
"BriefDescription": "Tx Flit Buffer Occupancy : Accumulates the number of flits in the TxQ. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This can be used with the cycles not empty event to track average occupancy, or the allocations event to track average lifetime in the TxQ.",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_UPI_TxL_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UPI"
}
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/uncore-io.json b/tools/perf/pmu-events/arch/x86/sierraforest/uncore-io.json
index 9495cb0f68ea..cffb9d94b53d 100644
--- a/tools/perf/pmu-events/arch/x86/sierraforest/uncore-io.json
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/uncore-io.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "IIO Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_IIO_CLOCKTICKS",
"PerPkg": "1",
@@ -9,8 +10,10 @@
},
{
"BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.ALL_PARTS",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0FF",
@@ -19,8 +22,10 @@
},
{
"BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x001",
@@ -29,8 +34,10 @@
},
{
"BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x002",
@@ -39,8 +46,10 @@
},
{
"BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x004",
@@ -49,8 +58,10 @@
},
{
"BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x008",
@@ -59,8 +70,10 @@
},
{
"BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x010",
@@ -69,8 +82,10 @@
},
{
"BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x020",
@@ -79,8 +94,10 @@
},
{
"BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x040",
@@ -89,8 +106,10 @@
},
{
"BriefDescription": "PCIE Completion Buffer Inserts. Counts once per 64 byte read issued from this PCIE device.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x080",
@@ -99,8 +118,10 @@
},
{
"BriefDescription": "Count of allocations in the completion buffer",
+ "Counter": "2,3",
"EventCode": "0xD5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL_PARTS",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0FF",
@@ -109,8 +130,10 @@
},
{
"BriefDescription": "Count of allocations in the completion buffer",
+ "Counter": "2,3",
"EventCode": "0xD5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x001",
@@ -119,8 +142,10 @@
},
{
"BriefDescription": "Count of allocations in the completion buffer",
+ "Counter": "2,3",
"EventCode": "0xD5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x002",
@@ -129,8 +154,10 @@
},
{
"BriefDescription": "Count of allocations in the completion buffer",
+ "Counter": "2,3",
"EventCode": "0xD5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x004",
@@ -139,8 +166,10 @@
},
{
"BriefDescription": "Count of allocations in the completion buffer",
+ "Counter": "2,3",
"EventCode": "0xD5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x008",
@@ -149,8 +178,10 @@
},
{
"BriefDescription": "Count of allocations in the completion buffer",
+ "Counter": "2,3",
"EventCode": "0xD5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x010",
@@ -159,8 +190,10 @@
},
{
"BriefDescription": "Count of allocations in the completion buffer",
+ "Counter": "2,3",
"EventCode": "0xD5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x020",
@@ -169,8 +202,10 @@
},
{
"BriefDescription": "Count of allocations in the completion buffer",
+ "Counter": "2,3",
"EventCode": "0xD5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x040",
@@ -179,8 +214,10 @@
},
{
"BriefDescription": "Count of allocations in the completion buffer",
+ "Counter": "2,3",
"EventCode": "0xD5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x080",
@@ -189,8 +226,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.ALL_PARTS",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0FF",
@@ -199,8 +238,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x001",
@@ -209,8 +250,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x002",
@@ -219,8 +262,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x004",
@@ -229,8 +274,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x008",
@@ -239,8 +286,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x010",
@@ -249,8 +298,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x020",
@@ -259,8 +310,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x040",
@@ -269,8 +322,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x080",
@@ -279,8 +334,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.ALL_PARTS",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0FF",
@@ -289,6 +346,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART0",
"FCMask": "0x07",
@@ -299,6 +357,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART1",
"FCMask": "0x07",
@@ -309,6 +368,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART2",
"FCMask": "0x07",
@@ -319,6 +379,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART3",
"FCMask": "0x07",
@@ -329,6 +390,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART4",
"FCMask": "0x07",
@@ -339,6 +401,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART5",
"FCMask": "0x07",
@@ -349,6 +412,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART6",
"FCMask": "0x07",
@@ -359,6 +423,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART7",
"FCMask": "0x07",
@@ -369,6 +434,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.ALL_PARTS",
"FCMask": "0x07",
@@ -379,6 +445,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.ALL_PARTS",
"FCMask": "0x07",
@@ -389,6 +456,7 @@
},
{
"BriefDescription": "Counts once for every 4 bytes read from this card to memory. This event does include reads to IO.",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.ALL_PARTS",
"FCMask": "0x07",
@@ -399,6 +467,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0",
"FCMask": "0x07",
@@ -409,6 +478,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1",
"FCMask": "0x07",
@@ -419,6 +489,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2",
"FCMask": "0x07",
@@ -429,6 +500,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
"FCMask": "0x07",
@@ -439,6 +511,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART4",
"FCMask": "0x07",
@@ -449,6 +522,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART5",
"FCMask": "0x07",
@@ -459,6 +533,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART6",
"FCMask": "0x07",
@@ -469,6 +544,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART7",
"FCMask": "0x07",
@@ -479,6 +555,7 @@
},
{
"BriefDescription": "Counts once for every 4 bytes written from this card to memory. This event does include writes to IO.",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.ALL_PARTS",
"FCMask": "0x07",
@@ -489,6 +566,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0",
"FCMask": "0x07",
@@ -499,6 +577,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1",
"FCMask": "0x07",
@@ -509,6 +588,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2",
"FCMask": "0x07",
@@ -519,6 +599,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
"FCMask": "0x07",
@@ -529,6 +610,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART4",
"FCMask": "0x07",
@@ -539,6 +621,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART5",
"FCMask": "0x07",
@@ -549,6 +632,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART6",
"FCMask": "0x07",
@@ -559,6 +643,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART7",
"FCMask": "0x07",
@@ -569,8 +654,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x001",
@@ -579,8 +666,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x002",
@@ -589,8 +678,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x004",
@@ -599,8 +690,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x008",
@@ -609,8 +702,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x010",
@@ -619,8 +714,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x020",
@@ -629,8 +726,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x040",
@@ -639,8 +738,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x080",
@@ -649,8 +750,10 @@
},
{
"BriefDescription": "Counts once for every 4 bytes written from this card to a peer device's IO space.",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.ALL_PARTS",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0FF",
@@ -659,8 +762,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x001",
@@ -669,8 +774,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x002",
@@ -679,8 +786,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x004",
@@ -689,8 +798,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x008",
@@ -699,8 +810,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x010",
@@ -709,8 +822,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x020",
@@ -719,8 +834,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x040",
@@ -729,8 +846,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x080",
@@ -739,8 +858,10 @@
},
{
"BriefDescription": "IOTLB Hits to a 1G Page",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.1G_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10",
@@ -748,8 +869,10 @@
},
{
"BriefDescription": "IOTLB Hits to a 2M Page",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.2M_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x8",
@@ -757,8 +880,10 @@
},
{
"BriefDescription": "IOTLB Hits to a 4K Page",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.4K_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x4",
@@ -766,8 +891,10 @@
},
{
"BriefDescription": "IOTLB lookups all",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.ALL_LOOKUPS",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x2",
@@ -775,8 +902,10 @@
},
{
"BriefDescription": "Context cache hits",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.CTXT_CACHE_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x80",
@@ -784,8 +913,10 @@
},
{
"BriefDescription": "Context cache lookups",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.CTXT_CACHE_LOOKUPS",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x40",
@@ -793,8 +924,10 @@
},
{
"BriefDescription": "IOTLB lookups first",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.FIRST_LOOKUPS",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x1",
@@ -802,8 +935,10 @@
},
{
"BriefDescription": "IOTLB Fills (same as IOTLB miss)",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.MISSES",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x20",
@@ -811,8 +946,10 @@
},
{
"BriefDescription": "IOMMU memory access (both low and high priority)",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.NUM_MEM_ACCESSES",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0xc0",
@@ -820,8 +957,10 @@
},
{
"BriefDescription": "IOMMU high priority memory access",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.NUM_MEM_ACCESSES_HIGH",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x80",
@@ -829,8 +968,10 @@
},
{
"BriefDescription": "IOMMU low priority memory access",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.NUM_MEM_ACCESSES_LOW",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x40",
@@ -838,8 +979,10 @@
},
{
"BriefDescription": "Second Level Page Walk Cache Hit to a 1G page",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.SLPWC_1G_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x4",
@@ -847,8 +990,10 @@
},
{
"BriefDescription": "Second Level Page Walk Cache Hit to a 256T page",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.SLPWC_256T_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10",
@@ -856,8 +1001,10 @@
},
{
"BriefDescription": "Second Level Page Walk Cache Hit to a 2M page",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.SLPWC_2M_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x2",
@@ -865,8 +1012,10 @@
},
{
"BriefDescription": "Second Level Page Walk Cache Hit to a 512G page",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.SLPWC_512G_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x8",
@@ -874,8 +1023,10 @@
},
{
"BriefDescription": "Second Level Page Walk Cache fill",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.SLPWC_CACHE_FILLS",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x20",
@@ -883,8 +1034,10 @@
},
{
"BriefDescription": "Second Level Page Walk Cache lookup",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.SLPWC_CACHE_LOOKUPS",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x1",
@@ -892,8 +1045,10 @@
},
{
"BriefDescription": "Cycles PWT full",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_IIO_IOMMU3.CYC_PWT_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x2",
@@ -901,8 +1056,10 @@
},
{
"BriefDescription": "Interrupt Entry cache hit",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_IIO_IOMMU3.INT_CACHE_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x80",
@@ -910,8 +1067,10 @@
},
{
"BriefDescription": "Interrupt Entry cache lookup",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_IIO_IOMMU3.INT_CACHE_LOOKUPS",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x40",
@@ -919,8 +1078,10 @@
},
{
"BriefDescription": "Context Cache invalidation events",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_IIO_IOMMU3.NUM_INVAL_CTXT_CACHE",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x8",
@@ -928,8 +1089,10 @@
},
{
"BriefDescription": "Interrupt Entry Cache invalidation events",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_IIO_IOMMU3.NUM_INVAL_INT_CACHE",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x20",
@@ -937,8 +1100,10 @@
},
{
"BriefDescription": "IOTLB invalidation events",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_IIO_IOMMU3.NUM_INVAL_IOTLB",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x4",
@@ -946,8 +1111,10 @@
},
{
"BriefDescription": "PASID Cache invalidation events",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_IIO_IOMMU3.NUM_INVAL_PASID_CACHE",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"UMask": "0x10",
@@ -955,8 +1122,10 @@
},
{
"BriefDescription": "Occupancy of outbound request queue : To device : Counts number of outbound requests/completions IIO is currently processing",
+ "Counter": "2,3",
"EventCode": "0xc5",
"EventName": "UNC_IIO_NUM_OUSTANDING_REQ_FROM_CPU.TO_IO",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0FF",
@@ -965,8 +1134,10 @@
},
{
"BriefDescription": "Passing data to be written",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.DATA",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0FF",
@@ -975,8 +1146,10 @@
},
{
"BriefDescription": "Issuing final read or write of line",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.FINAL_RD_WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0FF",
@@ -985,8 +1158,10 @@
},
{
"BriefDescription": "Processing response from IOMMU",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.IOMMU_HIT",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0FF",
@@ -995,8 +1170,10 @@
},
{
"BriefDescription": "Issuing to IOMMU",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.IOMMU_REQ",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0FF",
@@ -1005,8 +1182,10 @@
},
{
"BriefDescription": "Request Ownership",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.REQ_OWN",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0FF",
@@ -1015,8 +1194,10 @@
},
{
"BriefDescription": "Writing line",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0FF",
@@ -1025,8 +1206,10 @@
},
{
"BriefDescription": "-",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.ABORT",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0FF",
@@ -1035,8 +1218,10 @@
},
{
"BriefDescription": "-",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.CONFINED_P2P",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0FF",
@@ -1045,8 +1230,10 @@
},
{
"BriefDescription": "-",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.LOC_P2P",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0FF",
@@ -1055,8 +1242,10 @@
},
{
"BriefDescription": "-",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MCAST",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0FF",
@@ -1065,8 +1254,10 @@
},
{
"BriefDescription": "-",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MEM",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0FF",
@@ -1075,8 +1266,10 @@
},
{
"BriefDescription": "-",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MSGB",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0FF",
@@ -1085,8 +1278,10 @@
},
{
"BriefDescription": "-",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.REM_P2P",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0FF",
@@ -1095,8 +1290,10 @@
},
{
"BriefDescription": "-",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.UBOX",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x0FF",
@@ -1105,14 +1302,17 @@
},
{
"BriefDescription": "All 9 bits of Page Walk Tracker Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_IIO_PWT_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"PortMask": "0x000",
"Unit": "IIO"
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.ALL_PARTS",
"FCMask": "0x07",
@@ -1123,6 +1323,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART0",
"FCMask": "0x07",
@@ -1133,6 +1334,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART1",
"FCMask": "0x07",
@@ -1143,6 +1345,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART2",
"FCMask": "0x07",
@@ -1153,6 +1356,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART3",
"FCMask": "0x07",
@@ -1163,6 +1367,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART4",
"FCMask": "0x07",
@@ -1173,6 +1378,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART5",
"FCMask": "0x07",
@@ -1183,6 +1389,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART6",
"FCMask": "0x07",
@@ -1193,6 +1400,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART7",
"FCMask": "0x07",
@@ -1203,6 +1411,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.ALL_PARTS",
"FCMask": "0x07",
@@ -1213,6 +1422,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART0",
"FCMask": "0x07",
@@ -1223,6 +1433,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART1",
"FCMask": "0x07",
@@ -1233,6 +1444,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART2",
"FCMask": "0x07",
@@ -1243,6 +1455,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART3",
"FCMask": "0x07",
@@ -1253,6 +1466,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART4",
"FCMask": "0x07",
@@ -1263,6 +1477,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART5",
"FCMask": "0x07",
@@ -1273,6 +1488,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART6",
"FCMask": "0x07",
@@ -1283,6 +1499,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART7",
"FCMask": "0x07",
@@ -1293,6 +1510,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.ALL_PARTS",
"FCMask": "0x07",
@@ -1303,6 +1521,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.ALL_PARTS",
"FCMask": "0x07",
@@ -1313,6 +1532,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART0",
"FCMask": "0x07",
@@ -1323,6 +1543,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART1",
"FCMask": "0x07",
@@ -1333,6 +1554,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART2",
"FCMask": "0x07",
@@ -1343,6 +1565,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART3",
"FCMask": "0x07",
@@ -1353,6 +1576,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART4",
"FCMask": "0x07",
@@ -1363,6 +1587,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART5",
"FCMask": "0x07",
@@ -1373,6 +1598,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART6",
"FCMask": "0x07",
@@ -1383,6 +1609,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART7",
"FCMask": "0x07",
@@ -1393,6 +1620,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART0",
"FCMask": "0x07",
@@ -1403,6 +1631,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART1",
"FCMask": "0x07",
@@ -1413,6 +1642,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART2",
"FCMask": "0x07",
@@ -1423,6 +1653,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART3",
"FCMask": "0x07",
@@ -1433,6 +1664,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART4",
"FCMask": "0x07",
@@ -1443,6 +1675,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART5",
"FCMask": "0x07",
@@ -1453,6 +1686,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART6",
"FCMask": "0x07",
@@ -1463,6 +1697,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART7",
"FCMask": "0x07",
@@ -1473,8 +1708,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x001",
@@ -1483,8 +1720,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x002",
@@ -1493,8 +1732,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x004",
@@ -1503,8 +1744,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x008",
@@ -1513,8 +1756,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x010",
@@ -1523,8 +1768,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x020",
@@ -1533,8 +1780,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x040",
@@ -1543,8 +1792,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x080",
@@ -1553,8 +1804,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x001",
@@ -1563,8 +1816,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x002",
@@ -1573,8 +1828,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x004",
@@ -1583,8 +1840,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x008",
@@ -1593,8 +1852,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x010",
@@ -1603,8 +1864,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x020",
@@ -1613,8 +1876,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x040",
@@ -1623,8 +1888,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x080",
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/uncore-memory.json b/tools/perf/pmu-events/arch/x86/sierraforest/uncore-memory.json
index a2405ed640c9..7e6e6764f181 100644
--- a/tools/perf/pmu-events/arch/x86/sierraforest/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/uncore-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "DRAM Activate Count : Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_M_ACT_COUNT.ALL",
"PerPkg": "1",
@@ -9,30 +10,37 @@
},
{
"BriefDescription": "DRAM Activate Count : Read transaction on Page Empty or Page Miss : Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_M_ACT_COUNT.RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf1",
"Unit": "IMC"
},
{
"BriefDescription": "DRAM Activate Count : Underfill Read transaction on Page Empty or Page Miss : Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_M_ACT_COUNT.UFILL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf4",
"Unit": "IMC"
},
{
"BriefDescription": "DRAM Activate Count : Write transaction on Page Empty or Page Miss : Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_M_ACT_COUNT.WR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf2",
"Unit": "IMC"
},
{
"BriefDescription": "CAS count for SubChannel 0, all CAS operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_M_CAS_COUNT_SCH0.ALL",
"PerPkg": "1",
@@ -41,6 +49,7 @@
},
{
"BriefDescription": "CAS count for SubChannel 0, all reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_M_CAS_COUNT_SCH0.RD",
"PerPkg": "1",
@@ -49,6 +58,7 @@
},
{
"BriefDescription": "CAS count for SubChannel 0 regular reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_M_CAS_COUNT_SCH0.RD_REG",
"PerPkg": "1",
@@ -57,6 +67,7 @@
},
{
"BriefDescription": "CAS count for SubChannel 0 underfill reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_M_CAS_COUNT_SCH0.RD_UNDERFILL",
"PerPkg": "1",
@@ -65,6 +76,7 @@
},
{
"BriefDescription": "CAS count for SubChannel 0, all writes",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_M_CAS_COUNT_SCH0.WR",
"PerPkg": "1",
@@ -73,22 +85,27 @@
},
{
"BriefDescription": "CAS count for SubChannel 0 regular writes",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_M_CAS_COUNT_SCH0.WR_NONPRE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd0",
"Unit": "IMC"
},
{
"BriefDescription": "CAS count for SubChannel 0 auto-precharge writes",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_M_CAS_COUNT_SCH0.WR_PRE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe0",
"Unit": "IMC"
},
{
"BriefDescription": "CAS count for SubChannel 1, all CAS operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_M_CAS_COUNT_SCH1.ALL",
"PerPkg": "1",
@@ -97,6 +114,7 @@
},
{
"BriefDescription": "CAS count for SubChannel 1, all reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_M_CAS_COUNT_SCH1.RD",
"PerPkg": "1",
@@ -105,6 +123,7 @@
},
{
"BriefDescription": "CAS count for SubChannel 1 regular reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_M_CAS_COUNT_SCH1.RD_REG",
"PerPkg": "1",
@@ -113,6 +132,7 @@
},
{
"BriefDescription": "CAS count for SubChannel 1 underfill reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_M_CAS_COUNT_SCH1.RD_UNDERFILL",
"PerPkg": "1",
@@ -121,6 +141,7 @@
},
{
"BriefDescription": "CAS count for SubChannel 1, all writes",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_M_CAS_COUNT_SCH1.WR",
"PerPkg": "1",
@@ -129,22 +150,27 @@
},
{
"BriefDescription": "CAS count for SubChannel 1 regular writes",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_M_CAS_COUNT_SCH1.WR_NONPRE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd0",
"Unit": "IMC"
},
{
"BriefDescription": "CAS count for SubChannel 1 auto-precharge writes",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_M_CAS_COUNT_SCH1.WR_PRE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe0",
"Unit": "IMC"
},
{
"BriefDescription": "Number of DRAM DCLK clock cycles while the event is enabled",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_M_CLOCKTICKS",
"PerPkg": "1",
@@ -154,14 +180,17 @@
},
{
"BriefDescription": "Number of DRAM HCLK clock cycles while the event is enabled",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_M_HCLOCKTICKS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM Clockticks",
"Unit": "IMC"
},
{
"BriefDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M_PRE_COUNT.ALL",
"PerPkg": "1",
@@ -170,6 +199,7 @@
},
{
"BriefDescription": "DRAM Precharge commands. : Precharge due to (?) : Counts the number of DRAM Precharge commands sent on this channel.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M_PRE_COUNT.PGT",
"PerPkg": "1",
@@ -178,46 +208,57 @@
},
{
"BriefDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M_PRE_COUNT.RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf1",
"Unit": "IMC"
},
{
"BriefDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M_PRE_COUNT.UFILL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf4",
"Unit": "IMC"
},
{
"BriefDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M_PRE_COUNT.WR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf2",
"Unit": "IMC"
},
{
"BriefDescription": "Read buffer inserts on subchannel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M_RDB_INSERTS.SCH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "IMC"
},
{
"BriefDescription": "Read buffer inserts on subchannel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M_RDB_INSERTS.SCH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "IMC"
},
{
"BriefDescription": "Read buffer occupancy on subchannel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x1a",
"EventName": "UNC_M_RDB_OCCUPANCY_SCH0",
"PerPkg": "1",
@@ -225,6 +266,7 @@
},
{
"BriefDescription": "Read buffer occupancy on subchannel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x1b",
"EventName": "UNC_M_RDB_OCCUPANCY_SCH1",
"PerPkg": "1",
@@ -232,22 +274,27 @@
},
{
"BriefDescription": "Read Pending Queue Allocations : Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M_RPQ_INSERTS.PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x50",
"Unit": "IMC"
},
{
"BriefDescription": "Read Pending Queue Allocations : Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M_RPQ_INSERTS.PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa0",
"Unit": "IMC"
},
{
"BriefDescription": "Read Pending Queue inserts for subchannel 0, pseudochannel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M_RPQ_INSERTS.SCH0_PCH0",
"PerPkg": "1",
@@ -256,6 +303,7 @@
},
{
"BriefDescription": "Read Pending Queue inserts for subchannel 0, pseudochannel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M_RPQ_INSERTS.SCH0_PCH1",
"PerPkg": "1",
@@ -264,6 +312,7 @@
},
{
"BriefDescription": "Read Pending Queue inserts for subchannel 1, pseudochannel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M_RPQ_INSERTS.SCH1_PCH0",
"PerPkg": "1",
@@ -272,6 +321,7 @@
},
{
"BriefDescription": "Read Pending Queue inserts for subchannel 1, pseudochannel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M_RPQ_INSERTS.SCH1_PCH1",
"PerPkg": "1",
@@ -280,6 +330,7 @@
},
{
"BriefDescription": "Read pending queue occupancy for subchannel 0, pseudochannel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M_RPQ_OCCUPANCY_SCH0_PCH0",
"PerPkg": "1",
@@ -287,6 +338,7 @@
},
{
"BriefDescription": "Read pending queue occupancy for subchannel 0, pseudochannel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "UNC_M_RPQ_OCCUPANCY_SCH0_PCH1",
"PerPkg": "1",
@@ -294,6 +346,7 @@
},
{
"BriefDescription": "Read pending queue occupancy for subchannel 1, pseudochannel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M_RPQ_OCCUPANCY_SCH1_PCH0",
"PerPkg": "1",
@@ -301,6 +354,7 @@
},
{
"BriefDescription": "Read pending queue occupancy for subchannel 1, pseudochannel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_RPQ_OCCUPANCY_SCH1_PCH1",
"PerPkg": "1",
@@ -308,22 +362,27 @@
},
{
"BriefDescription": "Write Pending Queue Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M_WPQ_INSERTS.PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x50",
"Unit": "IMC"
},
{
"BriefDescription": "Write Pending Queue Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M_WPQ_INSERTS.PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa0",
"Unit": "IMC"
},
{
"BriefDescription": "Write Pending Queue inserts for subchannel 0, pseudochannel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M_WPQ_INSERTS.SCH0_PCH0",
"PerPkg": "1",
@@ -332,6 +391,7 @@
},
{
"BriefDescription": "Write Pending Queue inserts for subchannel 0, pseudochannel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M_WPQ_INSERTS.SCH0_PCH1",
"PerPkg": "1",
@@ -340,6 +400,7 @@
},
{
"BriefDescription": "Write Pending Queue inserts for subchannel 1, pseudochannel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M_WPQ_INSERTS.SCH1_PCH0",
"PerPkg": "1",
@@ -348,6 +409,7 @@
},
{
"BriefDescription": "Write Pending Queue inserts for subchannel 1, pseudochannel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M_WPQ_INSERTS.SCH1_PCH1",
"PerPkg": "1",
@@ -356,6 +418,7 @@
},
{
"BriefDescription": "Write pending queue occupancy for subchannel 0, pseudochannel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M_WPQ_OCCUPANCY_SCH0_PCH0",
"PerPkg": "1",
@@ -363,6 +426,7 @@
},
{
"BriefDescription": "Write pending queue occupancy for subchannel 0, pseudochannel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_M_WPQ_OCCUPANCY_SCH0_PCH1",
"PerPkg": "1",
@@ -370,6 +434,7 @@
},
{
"BriefDescription": "Write pending queue occupancy for subchannel 1, pseudochannel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M_WPQ_OCCUPANCY_SCH1_PCH0",
"PerPkg": "1",
@@ -377,6 +442,7 @@
},
{
"BriefDescription": "Write pending queue occupancy for subchannel 1, pseudochannel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_M_WPQ_OCCUPANCY_SCH1_PCH1",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/uncore-power.json b/tools/perf/pmu-events/arch/x86/sierraforest/uncore-power.json
index e3a66166e28c..02e59f64a544 100644
--- a/tools/perf/pmu-events/arch/x86/sierraforest/uncore-power.json
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/uncore-power.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "PCU Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_P_CLOCKTICKS",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/virtual-memory.json b/tools/perf/pmu-events/arch/x86/sierraforest/virtual-memory.json
index 371974c6d6c3..35cc5b6d41f2 100644
--- a/tools/perf/pmu-events/arch/x86/sierraforest/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/virtual-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of first level TLB misses but second level hits due to a demand load that did not start a page walk. Accounts for all page sizes. Will result in a DTLB write from STLB.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"SampleAfterValue": "200003",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to load DTLB misses.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"SampleAfterValue": "200003",
@@ -15,6 +17,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 2M or 4M page.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 2M or 4M pages. Includes page walks that page fault.",
@@ -23,6 +26,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 4K page.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.",
@@ -31,6 +35,7 @@
},
{
"BriefDescription": "Counts the number of page walks outstanding for Loads (demand or SW prefetch) in PMH every cycle.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding for Loads (demand or SW prefetch) in PMH every cycle. A PMH page walk is outstanding from page walk start till PMH becomes idle again (ready to serve next walk). Includes EPT-walk intervals.",
@@ -39,6 +44,7 @@
},
{
"BriefDescription": "Counts the number of first level TLB misses but second level hits due to stores that did not start a page walk. Accounts for all pages sizes. Will result in a DTLB write from STLB.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.STLB_HIT",
"SampleAfterValue": "2000003",
@@ -46,6 +52,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to store DTLB misses to a 1G page.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"SampleAfterValue": "2000003",
@@ -53,6 +60,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to store DTLB misses to a 2M or 4M page.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 2M or 4M pages. Includes page walks that page fault.",
@@ -61,6 +69,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to store DTLB misses to a 4K page.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.",
@@ -69,6 +78,7 @@
},
{
"BriefDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for stores every cycle.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for stores every cycle. A PMH page walk is outstanding from page walk start till PMH becomes idle again (ready to serve next walk). Includes EPT-walk intervals.",
@@ -77,6 +87,7 @@
},
{
"BriefDescription": "Counts the number of page walks initiated by a instruction fetch that missed the first and second level TLBs.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.MISS_CAUSED_WALK",
"SampleAfterValue": "1000003",
@@ -84,6 +95,7 @@
},
{
"BriefDescription": "Counts the number of first level TLB misses but second level hits due to an instruction fetch that did not start a page walk. Account for all pages sizes. Will result in an ITLB write from STLB.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.STLB_HIT",
"SampleAfterValue": "2000003",
@@ -91,6 +103,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to any page size.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
@@ -99,6 +112,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to a 2M or 4M page.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 2M or 4M pages. Includes page walks that page fault.",
@@ -107,6 +121,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to a 4K page.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.",
@@ -115,6 +130,7 @@
},
{
"BriefDescription": "Counts the number of page walks outstanding for iside in PMH every cycle.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding for iside in PMH every cycle. A PMH page walk is outstanding from page walk start till PMH becomes idle again (ready to serve next walk). Includes EPT-walk intervals. Walks could be counted by edge detecting on this event, but would count restarted suspended walks.",
@@ -123,6 +139,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a DTLB miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x05",
"EventName": "LD_HEAD.DTLB_MISS_AT_RET",
"SampleAfterValue": "1000003",
diff --git a/tools/perf/pmu-events/arch/x86/silvermont/cache.json b/tools/perf/pmu-events/arch/x86/silvermont/cache.json
index 818e0664a3a6..5e5e2170fd8f 100644
--- a/tools/perf/pmu-events/arch/x86/silvermont/cache.json
+++ b/tools/perf/pmu-events/arch/x86/silvermont/cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of request that were not accepted into the L2Q because the L2Q is FULL.",
+ "Counter": "0,1",
"EventCode": "0x31",
"EventName": "CORE_REJECT_L2Q.ALL",
"PublicDescription": "Counts the number of (demand and L1 prefetchers) core requests rejected by the L2Q due to a full or nearly full w condition which likely indicates back pressure from L2Q. It also counts requests that would have gone directly to the XQ, but are rejected due to a full or nearly full condition, indicating back pressure from the IDI link. The L2Q may also reject transactions from a core to insure fairness between cores, or to delay a core?s dirty eviction when the address conflicts incoming external snoops. (Note that L2 prefetcher requests that are dropped are not counted by this event.)",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Cycles code-fetch stalled due to an outstanding ICache miss.",
+ "Counter": "0,1",
"EventCode": "0x86",
"EventName": "FETCH_STALL.ICACHE_FILL_PENDING_CYCLES",
"PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ICache miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ICache miss. Note: this event is not the same as the total number of cycles spent retrieving instruction cache lines from the memory hierarchy.\r\nCounts cycles that fetch is stalled due to any reason. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes. This will include cycles due to an ITLB miss, ICache miss and other events.",
@@ -16,6 +18,7 @@
},
{
"BriefDescription": "Counts the number of request from the L2 that were not accepted into the XQ",
+ "Counter": "0,1",
"EventCode": "0x30",
"EventName": "L2_REJECT_XQ.ALL",
"PublicDescription": "This event counts the number of demand and prefetch transactions that the L2 XQ rejects due to a full or near full condition which likely indicates back pressure from the IDI link. The XQ may reject transactions from the L2Q (non-cacheable requests), BBS (L2 misses) and WOB (L2 write-back victims).",
@@ -23,6 +26,7 @@
},
{
"BriefDescription": "L2 cache request misses",
+ "Counter": "0,1",
"EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.MISS",
"PublicDescription": "This event counts the total number of L2 cache references and the number of L2 cache misses respectively.",
@@ -31,6 +35,7 @@
},
{
"BriefDescription": "L2 cache requests from this core",
+ "Counter": "0,1",
"EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
"PublicDescription": "This event counts requests originating from the core that references a cache line in the L2 cache.",
@@ -39,6 +44,7 @@
},
{
"BriefDescription": "All Loads",
+ "Counter": "0,1",
"EventCode": "0x04",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
"PublicDescription": "This event counts the number of load ops retired.",
@@ -47,6 +53,7 @@
},
{
"BriefDescription": "All Stores",
+ "Counter": "0,1",
"EventCode": "0x04",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"PublicDescription": "This event counts the number of store ops retired.",
@@ -55,6 +62,7 @@
},
{
"BriefDescription": "Cross core or cross module hitm",
+ "Counter": "0,1",
"EventCode": "0x04",
"EventName": "MEM_UOPS_RETIRED.HITM",
"PEBS": "1",
@@ -64,6 +72,7 @@
},
{
"BriefDescription": "Loads missed L1",
+ "Counter": "0,1",
"EventCode": "0x04",
"EventName": "MEM_UOPS_RETIRED.L1_MISS_LOADS",
"PublicDescription": "This event counts the number of load ops retired that miss in L1 Data cache. Note that prefetch misses will not be counted.",
@@ -72,6 +81,7 @@
},
{
"BriefDescription": "Loads hit L2",
+ "Counter": "0,1",
"EventCode": "0x04",
"EventName": "MEM_UOPS_RETIRED.L2_HIT_LOADS",
"PEBS": "1",
@@ -81,6 +91,7 @@
},
{
"BriefDescription": "Loads missed L2",
+ "Counter": "0,1",
"EventCode": "0x04",
"EventName": "MEM_UOPS_RETIRED.L2_MISS_LOADS",
"PEBS": "1",
@@ -90,6 +101,7 @@
},
{
"BriefDescription": "Loads missed UTLB",
+ "Counter": "0,1",
"EventCode": "0x04",
"EventName": "MEM_UOPS_RETIRED.UTLB_MISS",
"PublicDescription": "This event counts the number of load ops retired that had UTLB miss.",
@@ -98,6 +110,7 @@
},
{
"BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
@@ -106,6 +119,7 @@
},
{
"BriefDescription": "Counts any code reads (demand & prefetch) that have any response type.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -115,6 +129,7 @@
},
{
"BriefDescription": "Counts any code reads (demand & prefetch) that miss L2.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -124,6 +139,7 @@
},
{
"BriefDescription": "Counts any code reads (demand & prefetch) that hit in the other module where modified copies were found in other core's L1 cache.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -133,6 +149,7 @@
},
{
"BriefDescription": "Counts any code reads (demand & prefetch) that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -142,6 +159,7 @@
},
{
"BriefDescription": "Counts any code reads (demand & prefetch) that miss L2 with a snoop miss response.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -151,6 +169,7 @@
},
{
"BriefDescription": "Counts any data read (demand & prefetch) that have any response type.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -160,6 +179,7 @@
},
{
"BriefDescription": "Counts any data read (demand & prefetch) that miss L2.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -169,6 +189,7 @@
},
{
"BriefDescription": "Counts any data read (demand & prefetch) that hit in the other module where modified copies were found in other core's L1 cache.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -178,6 +199,7 @@
},
{
"BriefDescription": "Counts any data read (demand & prefetch) that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -187,6 +209,7 @@
},
{
"BriefDescription": "Counts any data read (demand & prefetch) that miss L2 with a snoop miss response.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -196,6 +219,7 @@
},
{
"BriefDescription": "Counts any request that have any response type.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -205,6 +229,7 @@
},
{
"BriefDescription": "Counts any request that hit in the other module where modified copies were found in other core's L1 cache.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -214,6 +239,7 @@
},
{
"BriefDescription": "Counts any request that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -223,6 +249,7 @@
},
{
"BriefDescription": "Counts any request that miss L2 with a snoop miss response.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -232,6 +259,7 @@
},
{
"BriefDescription": "Counts any rfo reads (demand & prefetch) that have any response type.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -241,6 +269,7 @@
},
{
"BriefDescription": "Counts any rfo reads (demand & prefetch) that miss L2.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -250,6 +279,7 @@
},
{
"BriefDescription": "Counts any rfo reads (demand & prefetch) that hit in the other module where modified copies were found in other core's L1 cache.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -259,6 +289,7 @@
},
{
"BriefDescription": "Counts any rfo reads (demand & prefetch) that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -268,6 +299,7 @@
},
{
"BriefDescription": "Counts any rfo reads (demand & prefetch) that miss L2 with a snoop miss response.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -277,6 +309,7 @@
},
{
"BriefDescription": "Counts writeback (modified to exclusive) that miss L2.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -286,6 +319,7 @@
},
{
"BriefDescription": "Counts writeback (modified to exclusive) that miss L2 with no details on snoop-related information.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -295,6 +329,7 @@
},
{
"BriefDescription": "Counts demand and DCU prefetch instruction cacheline that have any response type.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -304,6 +339,7 @@
},
{
"BriefDescription": "Counts demand and DCU prefetch instruction cacheline that miss L2.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -313,6 +349,7 @@
},
{
"BriefDescription": "Counts demand and DCU prefetch instruction cacheline that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -322,6 +359,7 @@
},
{
"BriefDescription": "Counts demand and DCU prefetch instruction cacheline that miss L2 with a snoop miss response.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -331,6 +369,7 @@
},
{
"BriefDescription": "Counts demand and DCU prefetch instruction cacheline that are are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -340,6 +379,7 @@
},
{
"BriefDescription": "Counts demand and DCU prefetch data read that have any response type.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -349,6 +389,7 @@
},
{
"BriefDescription": "Counts demand and DCU prefetch data read that miss L2.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -358,6 +399,7 @@
},
{
"BriefDescription": "Counts demand and DCU prefetch data read that hit in the other module where modified copies were found in other core's L1 cache.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -367,6 +409,7 @@
},
{
"BriefDescription": "Counts demand and DCU prefetch data read that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -376,6 +419,7 @@
},
{
"BriefDescription": "Counts demand and DCU prefetch data read that miss L2 with a snoop miss response.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -385,6 +429,7 @@
},
{
"BriefDescription": "Counts demand and DCU prefetch data read that are are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -394,6 +439,7 @@
},
{
"BriefDescription": "Counts demand and DCU prefetch RFOs that miss L2.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -403,6 +449,7 @@
},
{
"BriefDescription": "Counts demand and DCU prefetch RFOs that hit in the other module where modified copies were found in other core's L1 cache.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -412,6 +459,7 @@
},
{
"BriefDescription": "Counts demand and DCU prefetch RFOs that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -421,6 +469,7 @@
},
{
"BriefDescription": "Counts demand and DCU prefetch RFOs that miss L2 with a snoop miss response.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -430,6 +479,7 @@
},
{
"BriefDescription": "Counts demand and DCU prefetch RFOs that are are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -439,6 +489,7 @@
},
{
"BriefDescription": "Counts demand reads of partial cache lines (including UC and WC) that miss L2.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -448,6 +499,7 @@
},
{
"BriefDescription": "Countsof demand RFO requests to write to partial cache lines that miss L2.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -457,6 +509,7 @@
},
{
"BriefDescription": "Counts DCU hardware prefetcher data read that have any response type.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -466,6 +519,7 @@
},
{
"BriefDescription": "Counts DCU hardware prefetcher data read that miss L2.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -475,6 +529,7 @@
},
{
"BriefDescription": "Counts DCU hardware prefetcher data read that hit in the other module where modified copies were found in other core's L1 cache.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -484,6 +539,7 @@
},
{
"BriefDescription": "Counts DCU hardware prefetcher data read that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -493,6 +549,7 @@
},
{
"BriefDescription": "Counts DCU hardware prefetcher data read that miss L2 with a snoop miss response.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -502,6 +559,7 @@
},
{
"BriefDescription": "Counts code reads generated by L2 prefetchers that miss L2.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -511,6 +569,7 @@
},
{
"BriefDescription": "Counts code reads generated by L2 prefetchers that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -520,6 +579,7 @@
},
{
"BriefDescription": "Counts code reads generated by L2 prefetchers that miss L2 with a snoop miss response.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -529,6 +589,7 @@
},
{
"BriefDescription": "Counts data cacheline reads generated by L2 prefetchers that miss L2.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -538,6 +599,7 @@
},
{
"BriefDescription": "Counts data cacheline reads generated by L2 prefetchers that hit in the other module where modified copies were found in other core's L1 cache.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -547,6 +609,7 @@
},
{
"BriefDescription": "Counts data cacheline reads generated by L2 prefetchers that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -556,6 +619,7 @@
},
{
"BriefDescription": "Counts data cacheline reads generated by L2 prefetchers that miss L2 with a snoop miss response.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -565,6 +629,7 @@
},
{
"BriefDescription": "Counts RFO requests generated by L2 prefetchers that miss L2.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -574,6 +639,7 @@
},
{
"BriefDescription": "Counts RFO requests generated by L2 prefetchers that hit in the other module where modified copies were found in other core's L1 cache.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -583,6 +649,7 @@
},
{
"BriefDescription": "Counts RFO requests generated by L2 prefetchers that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -592,6 +659,7 @@
},
{
"BriefDescription": "Counts RFO requests generated by L2 prefetchers that miss L2 with a snoop miss response.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -601,6 +669,7 @@
},
{
"BriefDescription": "Counts streaming store that miss L2.",
+ "Counter": "0,1",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
@@ -610,6 +679,7 @@
},
{
"BriefDescription": "Any reissued load uops",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "REHABQ.ANY_LD",
"PublicDescription": "This event counts the number of load uops reissued from Rehabq.",
@@ -618,6 +688,7 @@
},
{
"BriefDescription": "Any reissued store uops",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "REHABQ.ANY_ST",
"PublicDescription": "This event counts the number of store uops reissued from Rehabq.",
@@ -626,6 +697,7 @@
},
{
"BriefDescription": "Loads blocked due to store data not ready",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "REHABQ.LD_BLOCK_STD_NOTREADY",
"PublicDescription": "This event counts the cases where a forward was technically possible, but did not occur because the store data was not available at the right time.",
@@ -634,6 +706,7 @@
},
{
"BriefDescription": "Loads blocked due to store forward restriction",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "REHABQ.LD_BLOCK_ST_FORWARD",
"PEBS": "1",
@@ -643,6 +716,7 @@
},
{
"BriefDescription": "Load uops that split cache line boundary",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "REHABQ.LD_SPLITS",
"PEBS": "1",
@@ -652,6 +726,7 @@
},
{
"BriefDescription": "Uops with lock semantics",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "REHABQ.LOCK",
"PublicDescription": "This event counts the number of retired memory operations with lock semantics. These are either implicit locked instructions such as the XCHG instruction or instructions with an explicit LOCK prefix (0xF0).",
@@ -660,6 +735,7 @@
},
{
"BriefDescription": "Store address buffer full",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "REHABQ.STA_FULL",
"PublicDescription": "This event counts the number of retired stores that are delayed because there is not a store address buffer available.",
@@ -668,6 +744,7 @@
},
{
"BriefDescription": "Store uops that split cache line boundary",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "REHABQ.ST_SPLITS",
"PublicDescription": "This event counts the number of retire stores that experienced cache line boundary splits.",
diff --git a/tools/perf/pmu-events/arch/x86/silvermont/counter.json b/tools/perf/pmu-events/arch/x86/silvermont/counter.json
new file mode 100644
index 000000000000..eb89b55f31bd
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/silvermont/counter.json
@@ -0,0 +1,7 @@
+[
+ {
+ "Unit": "core",
+ "CountersNumFixed": "4",
+ "CountersNumGeneric": "2"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/silvermont/floating-point.json b/tools/perf/pmu-events/arch/x86/silvermont/floating-point.json
index f2b1e8f08d68..aa4faf110512 100644
--- a/tools/perf/pmu-events/arch/x86/silvermont/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/silvermont/floating-point.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Stalls due to FP assists",
+ "Counter": "0,1",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.FP_ASSIST",
"PublicDescription": "This event counts the number of times that pipeline stalled due to FP operations needing assists.",
diff --git a/tools/perf/pmu-events/arch/x86/silvermont/frontend.json b/tools/perf/pmu-events/arch/x86/silvermont/frontend.json
index cd6ed3f59e26..fc6cfb291249 100644
--- a/tools/perf/pmu-events/arch/x86/silvermont/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/silvermont/frontend.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of baclears",
+ "Counter": "0,1",
"EventCode": "0xE6",
"EventName": "BACLEARS.ALL",
"PublicDescription": "The BACLEARS event counts the number of times the front end is resteered, mainly when the Branch Prediction Unit cannot provide a correct prediction and this is corrected by the Branch Address Calculator at the front end. The BACLEARS.ANY event counts the number of baclears for any type of branch.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Counts the number of JCC baclears",
+ "Counter": "0,1",
"EventCode": "0xE6",
"EventName": "BACLEARS.COND",
"PublicDescription": "The BACLEARS event counts the number of times the front end is resteered, mainly when the Branch Prediction Unit cannot provide a correct prediction and this is corrected by the Branch Address Calculator at the front end. The BACLEARS.COND event counts the number of JCC (Jump on Conditional Code) baclears.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Counts the number of RETURN baclears",
+ "Counter": "0,1",
"EventCode": "0xE6",
"EventName": "BACLEARS.RETURN",
"PublicDescription": "The BACLEARS event counts the number of times the front end is resteered, mainly when the Branch Prediction Unit cannot provide a correct prediction and this is corrected by the Branch Address Calculator at the front end. The BACLEARS.RETURN event counts the number of RETURN baclears.",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Counts the number of times a decode restriction reduced the decode throughput due to wrong instruction length prediction",
+ "Counter": "0,1",
"EventCode": "0xE9",
"EventName": "DECODE_RESTRICTION.PREDECODE_WRONG",
"PublicDescription": "Counts the number of times a decode restriction reduced the decode throughput due to wrong instruction length prediction.",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Instruction fetches",
+ "Counter": "0,1",
"EventCode": "0x80",
"EventName": "ICACHE.ACCESSES",
"PublicDescription": "This event counts all instruction fetches, not including most uncacheable\r\nfetches.",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "Instruction fetches from Icache",
+ "Counter": "0,1",
"EventCode": "0x80",
"EventName": "ICACHE.HIT",
"PublicDescription": "This event counts all instruction fetches from the instruction cache.",
@@ -49,6 +55,7 @@
},
{
"BriefDescription": "Icache miss",
+ "Counter": "0,1",
"EventCode": "0x80",
"EventName": "ICACHE.MISSES",
"PublicDescription": "This event counts all instruction fetches that miss the Instruction cache or produce memory requests. This includes uncacheable fetches. An instruction fetch miss is counted only once and not once for every cycle it is outstanding.",
@@ -57,6 +64,7 @@
},
{
"BriefDescription": "Counts the number of times entered into a ucode flow in the FEC. Includes inserted flows due to front-end detected faults or assists. Speculative count.",
+ "Counter": "0,1",
"EventCode": "0xE7",
"EventName": "MS_DECODED.MS_ENTRY",
"PublicDescription": "Counts the number of times the MSROM starts a flow of UOPS. It does not count every time a UOP is read from the microcode ROM. The most common case that this counts is when a micro-coded instruction is encountered by the front end of the machine. Other cases include when an instruction encounters a fault, trap, or microcode assist of any sort. The event will count MSROM startups for UOPS that are speculative, and subsequently cleared by branch mispredict or machine clear. Background: UOPS are produced by two mechanisms. Either they are generated by hardware that decodes instructions into UOPS, or they are delivered by a ROM (called the MSROM) that holds UOPS associated with a specific instruction. MSROM UOPS might also be delivered in response to some condition such as a fault or other exceptional condition. This event is an excellent mechanism for detecting instructions that require the use of MSROM instructions.",
diff --git a/tools/perf/pmu-events/arch/x86/silvermont/memory.json b/tools/perf/pmu-events/arch/x86/silvermont/memory.json
index 15ea45187210..0f5fba43da4c 100644
--- a/tools/perf/pmu-events/arch/x86/silvermont/memory.json
+++ b/tools/perf/pmu-events/arch/x86/silvermont/memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Stalls due to Memory ordering",
+ "Counter": "0,1",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
"PublicDescription": "This event counts the number of times that pipeline was cleared due to memory ordering issues.",
diff --git a/tools/perf/pmu-events/arch/x86/silvermont/other.json b/tools/perf/pmu-events/arch/x86/silvermont/other.json
index cff113adb823..4db59d84c144 100644
--- a/tools/perf/pmu-events/arch/x86/silvermont/other.json
+++ b/tools/perf/pmu-events/arch/x86/silvermont/other.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Cycles code-fetch stalled due to any reason.",
+ "Counter": "0,1",
"EventCode": "0x86",
"EventName": "FETCH_STALL.ALL",
"PublicDescription": "Counts cycles that fetch is stalled due to any reason. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes. This will include cycles due to an ITLB miss, ICache miss and other events.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Cycles code-fetch stalled due to an outstanding ITLB miss.",
+ "Counter": "0,1",
"EventCode": "0x86",
"EventName": "FETCH_STALL.ITLB_FILL_PENDING_CYCLES",
"PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ITLB miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ITLB miss. Note: this event is not the same as page walk cycles to retrieve an instruction translation.",
diff --git a/tools/perf/pmu-events/arch/x86/silvermont/pipeline.json b/tools/perf/pmu-events/arch/x86/silvermont/pipeline.json
index 2d4214bf9e39..48ca8bb2656b 100644
--- a/tools/perf/pmu-events/arch/x86/silvermont/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/silvermont/pipeline.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of branch instructions retired...",
+ "Counter": "0,1",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Counts the number of taken branch instructions retired",
+ "Counter": "0,1",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.ALL_TAKEN_BRANCHES",
"PEBS": "2",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "Counts the number of near CALL branch instructions retired",
+ "Counter": "0,1",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.CALL",
"PEBS": "1",
@@ -27,6 +30,7 @@
},
{
"BriefDescription": "Counts the number of far branch instructions retired",
+ "Counter": "0,1",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"PEBS": "1",
@@ -36,6 +40,7 @@
},
{
"BriefDescription": "Counts the number of near indirect CALL branch instructions retired",
+ "Counter": "0,1",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.IND_CALL",
"PEBS": "1",
@@ -45,6 +50,7 @@
},
{
"BriefDescription": "Counts the number of JCC branch instructions retired",
+ "Counter": "0,1",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.JCC",
"PEBS": "1",
@@ -54,6 +60,7 @@
},
{
"BriefDescription": "Counts the number of near indirect JMP and near indirect CALL branch instructions retired",
+ "Counter": "0,1",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NON_RETURN_IND",
"PEBS": "1",
@@ -63,6 +70,7 @@
},
{
"BriefDescription": "Counts the number of near relative CALL branch instructions retired",
+ "Counter": "0,1",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.REL_CALL",
"PEBS": "1",
@@ -72,6 +80,7 @@
},
{
"BriefDescription": "Counts the number of near RET branch instructions retired",
+ "Counter": "0,1",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.RETURN",
"PEBS": "1",
@@ -81,6 +90,7 @@
},
{
"BriefDescription": "Counts the number of taken JCC branch instructions retired",
+ "Counter": "0,1",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.TAKEN_JCC",
"PEBS": "1",
@@ -90,6 +100,7 @@
},
{
"BriefDescription": "Counts the number of mispredicted branch instructions retired",
+ "Counter": "0,1",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -98,6 +109,7 @@
},
{
"BriefDescription": "Counts the number of mispredicted near indirect CALL branch instructions retired",
+ "Counter": "0,1",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.IND_CALL",
"PEBS": "1",
@@ -107,6 +119,7 @@
},
{
"BriefDescription": "Counts the number of mispredicted JCC branch instructions retired",
+ "Counter": "0,1",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.JCC",
"PEBS": "1",
@@ -116,6 +129,7 @@
},
{
"BriefDescription": "Counts the number of mispredicted near indirect JMP and near indirect CALL branch instructions retired",
+ "Counter": "0,1",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.NON_RETURN_IND",
"PEBS": "1",
@@ -125,6 +139,7 @@
},
{
"BriefDescription": "Counts the number of mispredicted near RET branch instructions retired",
+ "Counter": "0,1",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.RETURN",
"PEBS": "1",
@@ -134,6 +149,7 @@
},
{
"BriefDescription": "Counts the number of mispredicted taken JCC branch instructions retired",
+ "Counter": "0,1",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.TAKEN_JCC",
"PEBS": "1",
@@ -143,6 +159,7 @@
},
{
"BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.CORE",
"PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. In systems with a constant core frequency, this event can give you a measurement of the elapsed time while the core was not in halt state by dividing the event count by the core frequency. This event is architecturally defined and is a designated fixed counter. CPU_CLK_UNHALTED.CORE and CPU_CLK_UNHALTED.CORE_P use the core frequency which may change from time to time. CPU_CLK_UNHALTE.REF_TSC and CPU_CLK_UNHALTED.REF are not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. The fixed events are CPU_CLK_UNHALTED.CORE and CPU_CLK_UNHALTED.REF_TSC and the programmable events are CPU_CLK_UNHALTED.CORE_P and CPU_CLK_UNHALTED.REF.",
"SampleAfterValue": "2000003",
@@ -150,6 +167,7 @@
},
{
"BriefDescription": "Core cycles when core is not halted",
+ "Counter": "0,1",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.CORE_P",
"PublicDescription": "This event counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time.",
@@ -157,6 +175,7 @@
},
{
"BriefDescription": "Reference cycles when core is not halted",
+ "Counter": "0,1",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.REF",
"PublicDescription": "This event counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time. This event is not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time.",
@@ -165,6 +184,7 @@
},
{
"BriefDescription": "Fixed Counter: Counts the number of unhalted reference clock cycles",
+ "Counter": "Fixed counter 3",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "Counts the number of reference cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time. This event is not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. Divide this event count by core frequency to determine the elapsed time while the core was not in halt state. Divide this event count by core frequency to determine the elapsed time while the core was not in halt state. This event is architecturally defined and is a designated fixed counter. CPU_CLK_UNHALTED.CORE and CPU_CLK_UNHALTED.CORE_P use the core frequency which may change from time to time. CPU_CLK_UNHALTE.REF_TSC and CPU_CLK_UNHALTED.REF are not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. The fixed events are CPU_CLK_UNHALTED.CORE and CPU_CLK_UNHALTED.REF_TSC and the programmable events are CPU_CLK_UNHALTED.CORE_P and CPU_CLK_UNHALTED.REF.",
"SampleAfterValue": "2000003",
@@ -172,6 +192,7 @@
},
{
"BriefDescription": "Cycles the divider is busy. Does not imply a stall waiting for the divider.",
+ "Counter": "0,1",
"EventCode": "0xCD",
"EventName": "CYCLES_DIV_BUSY.ALL",
"PublicDescription": "Cycles the divider is busy.This event counts the cycles when the divide unit is unable to accept a new divide UOP because it is busy processing a previously dispatched UOP. The cycles will be counted irrespective of whether or not another divide UOP is waiting to enter the divide unit (from the RS). This event might count cycles while a divide is in progress even if the RS is empty. The divide instruction is one of the longest latency instructions in the machine. Hence, it has a special event associated with it to help determine if divides are delaying the retirement of instructions.",
@@ -180,6 +201,7 @@
},
{
"BriefDescription": "Fixed Counter: Counts the number of instructions retired",
+ "Counter": "Fixed counter 1",
"EventName": "INST_RETIRED.ANY",
"PublicDescription": "This event counts the number of instructions that retire. For instructions that consist of multiple micro-ops, this event counts exactly once, as the last micro-op of the instruction retires. The event continues counting while instructions retire, including during interrupt service routines caused by hardware interrupts, faults or traps. Background: Modern microprocessors employ extensive pipelining and speculative techniques. Since sometimes an instruction is started but never completed, the notion of \"retirement\" is introduced. A retired instruction is one that commits its states. Or stated differently, an instruction might be abandoned at some point. No instruction is truly finished until it retires. This counter measures the number of completed instructions. The fixed event is INST_RETIRED.ANY and the programmable event is INST_RETIRED.ANY_P.",
"SampleAfterValue": "2000003",
@@ -187,6 +209,7 @@
},
{
"BriefDescription": "Instructions retired",
+ "Counter": "0,1",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.ANY_P",
"PublicDescription": "This event counts the number of instructions that retire execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. The counter continues counting during hardware interrupts, traps, and inside interrupt handlers.",
@@ -194,6 +217,7 @@
},
{
"BriefDescription": "Counts all machine clears",
+ "Counter": "0,1",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.ALL",
"PublicDescription": "Machine clears happen when something happens in the machine that causes the hardware to need to take special care to get the right answer. When such a condition is signaled on an instruction, the front end of the machine is notified that it must restart, so no more instructions will be decoded from the current path. All instructions \"older\" than this one will be allowed to finish. This instruction and all \"younger\" instructions must be cleared, since they must not be allowed to complete. Essentially, the hardware waits until the problematic instruction is the oldest instruction in the machine. This means all older instructions are retired, and all pending stores (from older instructions) are completed. Then the new path of instructions from the front end are allowed to start into the machine. There are many conditions that might cause a machine clear (including the receipt of an interrupt, or a trap or a fault). All those conditions (including but not limited to MACHINE_CLEARS.MEMORY_ORDERING, MACHINE_CLEARS.SMC, and MACHINE_CLEARS.FP_ASSIST) are captured in the ANY event. In addition, some conditions can be specifically counted (i.e. SMC, MEMORY_ORDERING, FP_ASSIST). However, the sum of SMC, MEMORY_ORDERING, and FP_ASSIST machine clears will not necessarily equal the number of ANY.",
@@ -202,6 +226,7 @@
},
{
"BriefDescription": "Self-Modifying Code detected",
+ "Counter": "0,1",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.SMC",
"PublicDescription": "This event counts the number of times that a program writes to a code section. Self-modifying code causes a severe penalty in all Intel? architecture processors.",
@@ -210,6 +235,7 @@
},
{
"BriefDescription": "Counts the number of cycles when no uops are allocated for any reason.",
+ "Counter": "0,1",
"EventCode": "0xCA",
"EventName": "NO_ALLOC_CYCLES.ALL",
"PublicDescription": "The NO_ALLOC_CYCLES.ALL event counts the number of cycles when the front-end does not provide any instructions to be allocated for any reason. This event indicates the cycles where an allocation stalls occurs, and no UOPS are allocated in that cycle.",
@@ -218,6 +244,7 @@
},
{
"BriefDescription": "Counts the number of cycles when no uops are allocated and the alloc pipe is stalled waiting for a mispredicted jump to retire. After the misprediction is detected, the front end will start immediately but the allocate pipe stalls until the mispredicted",
+ "Counter": "0,1",
"EventCode": "0xCA",
"EventName": "NO_ALLOC_CYCLES.MISPREDICTS",
"PublicDescription": "Counts the number of cycles when no uops are allocated and the alloc pipe is stalled waiting for a mispredicted jump to retire. After the misprediction is detected, the front end will start immediately but the allocate pipe stalls until the mispredicted.",
@@ -226,6 +253,7 @@
},
{
"BriefDescription": "Counts the number of cycles when no uops are allocated, the IQ is empty, and no other condition is blocking allocation.",
+ "Counter": "0,1",
"EventCode": "0xCA",
"EventName": "NO_ALLOC_CYCLES.NOT_DELIVERED",
"PublicDescription": "The NO_ALLOC_CYCLES.NOT_DELIVERED event is used to measure front-end inefficiencies, i.e. when front-end of the machine is not delivering micro-ops to the back-end and the back-end is not stalled. This event can be used to identify if the machine is truly front-end bound. When this event occurs, it is an indication that the front-end of the machine is operating at less than its theoretical peak performance. Background: We can think of the processor pipeline as being divided into 2 broader parts: Front-end and Back-end. Front-end is responsible for fetching the instruction, decoding into micro-ops (uops) in machine understandable format and putting them into a micro-op queue to be consumed by back end. The back-end then takes these micro-ops, allocates the required resources. When all resources are ready, micro-ops are executed. If the back-end is not ready to accept micro-ops from the front-end, then we do not want to count these as front-end bottlenecks. However, whenever we have bottlenecks in the back-end, we will have allocation unit stalls and eventually forcing the front-end to wait until the back-end is ready to receive more UOPS. This event counts the cycles only when back-end is requesting more uops and front-end is not able to provide them. Some examples of conditions that cause front-end efficiencies are: Icache misses, ITLB misses, and decoder restrictions that limit the front-end bandwidth.",
@@ -234,6 +262,7 @@
},
{
"BriefDescription": "Counts the number of cycles when no uops are allocated and a RATstall is asserted.",
+ "Counter": "0,1",
"EventCode": "0xCA",
"EventName": "NO_ALLOC_CYCLES.RAT_STALL",
"SampleAfterValue": "200003",
@@ -241,6 +270,7 @@
},
{
"BriefDescription": "Counts the number of cycles when no uops are allocated and the ROB is full (less than 2 entries available)",
+ "Counter": "0,1",
"EventCode": "0xCA",
"EventName": "NO_ALLOC_CYCLES.ROB_FULL",
"PublicDescription": "Counts the number of cycles when no uops are allocated and the ROB is full (less than 2 entries available).",
@@ -249,6 +279,7 @@
},
{
"BriefDescription": "Counts the number of cycles the Alloc pipeline is stalled when any one of the RSs (IEC, FPC and MEC) is full. This event is a superset of all the individual RS stall event counts.",
+ "Counter": "0,1",
"EventCode": "0xCB",
"EventName": "RS_FULL_STALL.ALL",
"SampleAfterValue": "200003",
@@ -256,6 +287,7 @@
},
{
"BriefDescription": "Counts the number of cycles and allocation pipeline is stalled and is waiting for a free MEC reservation station entry. The cycles should be appropriately counted in case of the cracked ops e.g. In case of a cracked load-op, the load portion is sent to M",
+ "Counter": "0,1",
"EventCode": "0xCB",
"EventName": "RS_FULL_STALL.MEC",
"PublicDescription": "Counts the number of cycles and allocation pipeline is stalled and is waiting for a free MEC reservation station entry. The cycles should be appropriately counted in case of the cracked ops e.g. In case of a cracked load-op, the load portion is sent to M.",
@@ -264,6 +296,7 @@
},
{
"BriefDescription": "Micro-ops retired",
+ "Counter": "0,1",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.ALL",
"PublicDescription": "This event counts the number of micro-ops retired. The processor decodes complex macro instructions into a sequence of simpler micro-ops. Most instructions are composed of one or two micro-ops. Some instructions are decoded into longer sequences such as repeat instructions, floating point transcendental instructions, and assists. In some cases micro-op sequences are fused or whole instructions are fused into one micro-op. See other UOPS_RETIRED events for differentiating retired fused and non-fused micro-ops.",
@@ -272,6 +305,7 @@
},
{
"BriefDescription": "MSROM micro-ops retired",
+ "Counter": "0,1",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.MS",
"PublicDescription": "This event counts the number of micro-ops retired that were supplied from MSROM.",
diff --git a/tools/perf/pmu-events/arch/x86/silvermont/virtual-memory.json b/tools/perf/pmu-events/arch/x86/silvermont/virtual-memory.json
index 1be3fa5c4ad3..b50cee3a5e4c 100644
--- a/tools/perf/pmu-events/arch/x86/silvermont/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/silvermont/virtual-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Loads missed DTLB",
+ "Counter": "0,1",
"EventCode": "0x04",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS_LOADS",
"PEBS": "1",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "Total cycles for all the page walks. (I-side and D-side)",
+ "Counter": "0,1",
"EventCode": "0x05",
"EventName": "PAGE_WALKS.CYCLES",
"PublicDescription": "This event counts every cycle when a data (D) page walk or instruction (I) page walk is in progress. Since a pagewalk implies a TLB miss, the approximate cost of a TLB miss can be determined from this event.",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "Duration of D-side page-walks in core cycles",
+ "Counter": "0,1",
"EventCode": "0x05",
"EventName": "PAGE_WALKS.D_SIDE_CYCLES",
"PublicDescription": "This event counts every cycle when a D-side (walks due to a load) page walk is in progress. Page walk duration divided by number of page walks is the average duration of page-walks.",
@@ -26,6 +29,7 @@
},
{
"BriefDescription": "D-side page-walks",
+ "Counter": "0,1",
"EdgeDetect": "1",
"EventCode": "0x05",
"EventName": "PAGE_WALKS.D_SIDE_WALKS",
@@ -35,6 +39,7 @@
},
{
"BriefDescription": "Duration of I-side page-walks in core cycles",
+ "Counter": "0,1",
"EventCode": "0x05",
"EventName": "PAGE_WALKS.I_SIDE_CYCLES",
"PublicDescription": "This event counts every cycle when a I-side (walks due to an instruction fetch) page walk is in progress. Page walk duration divided by number of page walks is the average duration of page-walks.",
@@ -43,6 +48,7 @@
},
{
"BriefDescription": "I-side page-walks",
+ "Counter": "0,1",
"EdgeDetect": "1",
"EventCode": "0x05",
"EventName": "PAGE_WALKS.I_SIDE_WALKS",
@@ -52,6 +58,7 @@
},
{
"BriefDescription": "Total page walks that are completed (I-side and D-side)",
+ "Counter": "0,1",
"EdgeDetect": "1",
"EventCode": "0x05",
"EventName": "PAGE_WALKS.WALKS",
diff --git a/tools/perf/pmu-events/arch/x86/skylake/cache.json b/tools/perf/pmu-events/arch/x86/skylake/cache.json
index ce592d871949..3dd61a325859 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/cache.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "L1D data line replacements",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "L1D.REPLACEMENT",
"PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Number of times a request needed a FB entry but there was no entry available for it. That is the FB unavailability was dominant reason for blocking the request. A request includes cacheable/uncacheable demands that is load, store or SW prefetch.",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.FB_FULL",
"PublicDescription": "Number of times a request needed a FB (Fill Buffer) entry but there was no entry available for it. A request includes cacheable/uncacheable demands that are load, store or SW prefetch instructions.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "L1D miss outstandings duration in cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING",
"PublicDescription": "Counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch.Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING_CYCLES",
@@ -35,6 +39,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
@@ -43,6 +48,7 @@
},
{
"BriefDescription": "L2 cache lines filling L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.ALL",
"PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
@@ -51,6 +57,7 @@
},
{
"BriefDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines are in Modified state. Modified lines are written back to L3",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.NON_SILENT",
"SampleAfterValue": "200003",
@@ -58,6 +65,7 @@
},
{
"BriefDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared or Exclusive state. A non-threaded event.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.SILENT",
"SampleAfterValue": "200003",
@@ -65,6 +73,7 @@
},
{
"BriefDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.USELESS_HWPF",
"SampleAfterValue": "200003",
@@ -72,6 +81,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event L2_LINES_OUT.USELESS_HWPF",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.USELESS_PREF",
@@ -80,6 +90,7 @@
},
{
"BriefDescription": "L2 code requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_CODE_RD",
"PublicDescription": "Counts the total number of L2 code requests.",
@@ -88,6 +99,7 @@
},
{
"BriefDescription": "Demand Data Read requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
"PublicDescription": "Counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
@@ -96,6 +108,7 @@
},
{
"BriefDescription": "Demand requests that miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_MISS",
"PublicDescription": "Demand requests that miss L2 cache.",
@@ -104,6 +117,7 @@
},
{
"BriefDescription": "Demand requests to L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
"PublicDescription": "Demand requests to L2 cache.",
@@ -112,6 +126,7 @@
},
{
"BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_PF",
"PublicDescription": "Counts the total number of requests from the L2 hardware prefetchers.",
@@ -120,6 +135,7 @@
},
{
"BriefDescription": "RFO requests to L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_RFO",
"PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
@@ -128,6 +144,7 @@
},
{
"BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_HIT",
"PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
@@ -136,6 +153,7 @@
},
{
"BriefDescription": "L2 cache misses when fetching instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_MISS",
"PublicDescription": "Counts L2 cache misses when fetching instructions.",
@@ -144,6 +162,7 @@
},
{
"BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
"PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache",
@@ -152,6 +171,7 @@
},
{
"BriefDescription": "Demand Data Read miss L2, no rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
"PublicDescription": "Counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
@@ -160,6 +180,7 @@
},
{
"BriefDescription": "All requests that miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.MISS",
"PublicDescription": "All requests that miss L2 cache.",
@@ -168,6 +189,7 @@
},
{
"BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.PF_HIT",
"PublicDescription": "Counts requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that hit L2 cache.",
@@ -176,6 +198,7 @@
},
{
"BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.PF_MISS",
"PublicDescription": "Counts requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that miss L2 cache.",
@@ -184,6 +207,7 @@
},
{
"BriefDescription": "All L2 requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.REFERENCES",
"PublicDescription": "All L2 requests.",
@@ -192,6 +216,7 @@
},
{
"BriefDescription": "RFO requests that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_HIT",
"PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
@@ -200,6 +225,7 @@
},
{
"BriefDescription": "RFO requests that miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_MISS",
"PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
@@ -208,6 +234,7 @@
},
{
"BriefDescription": "L2 writebacks that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.L2_WB",
"PublicDescription": "Counts L2 writebacks that access L2 cache.",
@@ -216,6 +243,7 @@
},
{
"BriefDescription": "Core-originated cacheable demand requests missed L3",
+ "Counter": "0,1,2,3",
"Errata": "SKL057",
"EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.MISS",
@@ -225,6 +253,7 @@
},
{
"BriefDescription": "Core-originated cacheable demand requests that refer to L3",
+ "Counter": "0,1,2,3",
"Errata": "SKL057",
"EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
@@ -234,6 +263,7 @@
},
{
"BriefDescription": "Retired load instructions.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_INST_RETIRED.ALL_LOADS",
@@ -244,6 +274,7 @@
},
{
"BriefDescription": "Retired store instructions.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_INST_RETIRED.ALL_STORES",
@@ -254,6 +285,7 @@
},
{
"BriefDescription": "All retired memory instructions.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_INST_RETIRED.ANY",
@@ -264,6 +296,7 @@
},
{
"BriefDescription": "Retired load instructions with locked access.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_INST_RETIRED.LOCK_LOADS",
@@ -273,6 +306,7 @@
},
{
"BriefDescription": "Retired load instructions that split across a cacheline boundary.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
@@ -283,6 +317,7 @@
},
{
"BriefDescription": "Retired store instructions that split across a cacheline boundary.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_INST_RETIRED.SPLIT_STORES",
@@ -293,6 +328,7 @@
},
{
"BriefDescription": "Retired load instructions that miss the STLB.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
@@ -303,6 +339,7 @@
},
{
"BriefDescription": "Retired store instructions that miss the STLB.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
@@ -313,6 +350,7 @@
},
{
"BriefDescription": "Retired load instructions which data sources were L3 and cross-core snoop hits in on-pkg core cache",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT",
@@ -323,6 +361,7 @@
},
{
"BriefDescription": "Retired load instructions which data sources were HitM responses from shared L3",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM",
@@ -333,6 +372,7 @@
},
{
"BriefDescription": "Retired load instructions which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
@@ -342,6 +382,7 @@
},
{
"BriefDescription": "Retired load instructions which data sources were hits in L3 without snoops required",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
@@ -352,6 +393,7 @@
},
{
"BriefDescription": "Retired instructions with at least 1 uncacheable load or lock.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD4",
"EventName": "MEM_LOAD_MISC_RETIRED.UC",
@@ -361,6 +403,7 @@
},
{
"BriefDescription": "Retired load instructions which data sources were load missed L1 but hit FB due to preceding miss to the same cache line with data not ready",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_RETIRED.FB_HIT",
@@ -371,6 +414,7 @@
},
{
"BriefDescription": "Retired load instructions with L1 cache hits as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_RETIRED.L1_HIT",
@@ -381,6 +425,7 @@
},
{
"BriefDescription": "Retired load instructions missed L1 cache as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_RETIRED.L1_MISS",
@@ -391,6 +436,7 @@
},
{
"BriefDescription": "Retired load instructions with L2 cache hits as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_RETIRED.L2_HIT",
@@ -401,6 +447,7 @@
},
{
"BriefDescription": "Retired load instructions missed L2 cache as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_RETIRED.L2_MISS",
@@ -411,6 +458,7 @@
},
{
"BriefDescription": "Retired load instructions with L3 cache hits as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_RETIRED.L3_HIT",
@@ -421,6 +469,7 @@
},
{
"BriefDescription": "Retired load instructions missed L3 cache as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_RETIRED.L3_MISS",
@@ -431,6 +480,7 @@
},
{
"BriefDescription": "Demand and prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
"PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
@@ -439,6 +489,7 @@
},
{
"BriefDescription": "Any memory transaction that reached the SQ.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
"PublicDescription": "Counts memory transactions reached the super queue including requests initiated by the core, all L3 prefetches, page walks, etc..",
@@ -447,6 +498,7 @@
},
{
"BriefDescription": "Cacheable and non-cacheable code read requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
"PublicDescription": "Counts both cacheable and non-cacheable code read requests.",
@@ -455,6 +507,7 @@
},
{
"BriefDescription": "Demand Data Read requests sent to uncore",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
"PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
@@ -463,6 +516,7 @@
},
{
"BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
"PublicDescription": "Counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
@@ -471,6 +525,7 @@
},
{
"BriefDescription": "Offcore requests buffer cannot take more entries for this thread core.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
"PublicDescription": "Counts the number of cases when the offcore requests buffer cannot take more entries for the core. This can happen when the superqueue does not contain eligible entries, or when L1D writeback pending FIFO requests is full.Note: Writeback pending FIFO has six entries.",
@@ -479,6 +534,7 @@
},
{
"BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
"PublicDescription": "Counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
@@ -487,6 +543,7 @@
},
{
"BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
@@ -496,6 +553,7 @@
},
{
"BriefDescription": "Cycles with offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
@@ -505,6 +563,7 @@
},
{
"BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
@@ -514,6 +573,7 @@
},
{
"BriefDescription": "Cycles with offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
@@ -523,6 +583,7 @@
},
{
"BriefDescription": "Offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore, every cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
"PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
@@ -531,6 +592,7 @@
},
{
"BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
"PublicDescription": "Counts the number of offcore outstanding Demand Data Read transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor. See the corresponding Umask under OFFCORE_REQUESTS.Note: A prefetch promoted to Demand is counted from the promotion point.",
@@ -539,6 +601,7 @@
},
{
"BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
@@ -547,6 +610,7 @@
},
{
"BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
"PublicDescription": "Counts the number of offcore outstanding RFO (store) transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
@@ -555,6 +619,7 @@
},
{
"BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
@@ -563,6 +628,7 @@
},
{
"BriefDescription": "Counts all demand code reads have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -572,6 +638,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -581,6 +648,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -590,6 +658,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -599,6 +668,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -608,6 +678,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -617,6 +688,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -626,6 +698,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SPL_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -635,6 +708,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -644,6 +718,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -653,6 +728,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -662,6 +738,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -671,6 +748,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -680,6 +758,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -689,6 +768,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SPL_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -698,6 +778,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -707,6 +788,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -716,6 +798,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -725,6 +808,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -734,6 +818,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -743,6 +828,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -752,6 +838,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SPL_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -761,6 +848,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -770,6 +858,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -779,6 +868,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -788,6 +878,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -797,6 +888,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -806,6 +898,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -815,6 +908,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SPL_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -824,6 +918,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -833,6 +928,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -842,6 +938,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -851,6 +948,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -860,6 +958,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -869,6 +968,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -878,6 +978,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SPL_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -887,6 +988,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -896,6 +998,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -905,6 +1008,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -914,6 +1018,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -923,6 +1028,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -932,6 +1038,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -941,6 +1048,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SPL_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -950,6 +1058,7 @@
},
{
"BriefDescription": "Counts demand data reads have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -959,6 +1068,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -968,6 +1078,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -977,6 +1088,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -986,6 +1098,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -995,6 +1108,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1004,6 +1118,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1013,6 +1128,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SPL_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1022,6 +1138,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1031,6 +1148,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1040,6 +1158,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1049,6 +1168,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1058,6 +1178,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1067,6 +1188,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1076,6 +1198,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SPL_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1085,6 +1208,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1094,6 +1218,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1103,6 +1228,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1112,6 +1238,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1121,6 +1248,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1130,6 +1258,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1139,6 +1268,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SPL_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1148,6 +1278,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1157,6 +1288,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1166,6 +1298,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1175,6 +1308,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1184,6 +1318,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1193,6 +1328,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1202,6 +1338,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SPL_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1211,6 +1348,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1220,6 +1358,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1229,6 +1368,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1238,6 +1378,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1247,6 +1388,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1256,6 +1398,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1265,6 +1408,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SPL_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1274,6 +1418,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1283,6 +1428,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1292,6 +1438,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1301,6 +1448,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1310,6 +1458,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1319,6 +1468,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1328,6 +1478,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SPL_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1337,6 +1488,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1346,6 +1498,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1355,6 +1508,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1364,6 +1518,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1373,6 +1528,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1382,6 +1538,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1391,6 +1548,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1400,6 +1558,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SPL_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1409,6 +1568,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1418,6 +1578,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1427,6 +1588,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1436,6 +1598,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1445,6 +1608,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1454,6 +1618,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1463,6 +1628,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SPL_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1472,6 +1638,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1481,6 +1648,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1490,6 +1658,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1499,6 +1668,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1508,6 +1678,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1517,6 +1688,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1526,6 +1698,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SPL_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1535,6 +1708,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1544,6 +1718,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1553,6 +1728,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1562,6 +1738,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1571,6 +1748,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1580,6 +1758,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1589,6 +1768,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SPL_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1598,6 +1778,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1607,6 +1788,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1616,6 +1798,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1625,6 +1808,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1634,6 +1818,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1643,6 +1828,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1652,6 +1838,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SPL_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1661,6 +1848,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1670,6 +1858,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1679,6 +1868,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1688,6 +1878,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1697,6 +1888,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1706,6 +1898,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1715,6 +1908,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SPL_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1724,6 +1918,7 @@
},
{
"BriefDescription": "Counts any other requests have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1733,6 +1928,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1742,6 +1938,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1751,6 +1948,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1760,6 +1958,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1769,6 +1968,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1778,6 +1978,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1787,6 +1988,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SPL_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1796,6 +1998,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1805,6 +2008,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1814,6 +2018,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1823,6 +2028,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1832,6 +2038,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1841,6 +2048,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1850,6 +2058,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SPL_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1859,6 +2068,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1868,6 +2078,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1877,6 +2088,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1886,6 +2098,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1895,6 +2108,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1904,6 +2118,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1913,6 +2128,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SPL_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1922,6 +2138,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1931,6 +2148,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1940,6 +2158,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1949,6 +2168,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -1958,6 +2178,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1967,6 +2188,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1976,6 +2198,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SPL_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1985,6 +2208,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1994,6 +2218,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2003,6 +2228,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2012,6 +2238,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -2021,6 +2248,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2030,6 +2258,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -2039,6 +2268,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SPL_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -2048,6 +2278,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -2057,6 +2288,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2066,6 +2298,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -2075,6 +2308,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -2084,6 +2318,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2093,6 +2328,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -2102,6 +2338,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SPL_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -2111,6 +2348,7 @@
},
{
"BriefDescription": "Number of cache line split locks sent to uncore.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF4",
"EventName": "SQ_MISC.SPLIT_LOCK",
"PublicDescription": "Counts the number of cache line split locks sent to the uncore.",
@@ -2118,7 +2356,16 @@
"UMask": "0x10"
},
{
+ "BriefDescription": "Counts the number of PREFETCHNTA, PREFETCHW, PREFETCHT0, PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.ANY",
+ "SampleAfterValue": "2000003",
+ "UMask": "0xf"
+ },
+ {
"BriefDescription": "Number of PREFETCHNTA instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "SW_PREFETCH_ACCESS.NTA",
"SampleAfterValue": "2000003",
@@ -2126,6 +2373,7 @@
},
{
"BriefDescription": "Number of PREFETCHW instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
"SampleAfterValue": "2000003",
@@ -2133,6 +2381,7 @@
},
{
"BriefDescription": "Number of PREFETCHT0 instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "SW_PREFETCH_ACCESS.T0",
"SampleAfterValue": "2000003",
@@ -2140,6 +2389,7 @@
},
{
"BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "SW_PREFETCH_ACCESS.T1_T2",
"SampleAfterValue": "2000003",
diff --git a/tools/perf/pmu-events/arch/x86/skylake/counter.json b/tools/perf/pmu-events/arch/x86/skylake/counter.json
new file mode 100644
index 000000000000..1be6522e2bbc
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/skylake/counter.json
@@ -0,0 +1,22 @@
+[
+ {
+ "Unit": "core",
+ "CountersNumFixed": "3",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "CBOX",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "2"
+ },
+ {
+ "Unit": "ARB",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "2"
+ },
+ {
+ "Unit": "cbox_0",
+ "CountersNumFixed": 1,
+ "CountersNumGeneric": "0"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/skylake/floating-point.json b/tools/perf/pmu-events/arch/x86/skylake/floating-point.json
index 5891bd74af60..f1ecda8aed07 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/floating-point.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts once for most SIMD 128-bit packed computational double precision floating-point instructions retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
"PublicDescription": "Counts once for most SIMD 128-bit packed computational double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Counts once for most SIMD 128-bit packed computational single precision floating-point instruction retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
"PublicDescription": "Counts once for most SIMD 128-bit packed computational single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Counts once for most SIMD 256-bit packed double computational precision floating-point instructions retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
"PublicDescription": "Counts once for most SIMD 256-bit packed double computational precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Counts once for most SIMD 256-bit packed single computational precision floating-point instructions retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
"PublicDescription": "Counts once for most SIMD 256-bit packed single computational precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 128-bit packed single and 256-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and packed double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.4_FLOPS",
"PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision and 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point and packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "Counts once for most SIMD scalar computational floating-point instructions retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR",
"PublicDescription": "Counts once for most SIMD scalar computational single precision and double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SIMD scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -49,6 +55,7 @@
},
{
"BriefDescription": "Counts once for most SIMD scalar computational double precision floating-point instructions retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
"PublicDescription": "Counts once for most SIMD scalar computational double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SIMD scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -57,6 +64,7 @@
},
{
"BriefDescription": "Counts once for most SIMD scalar computational single precision floating-point instructions retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
"PublicDescription": "Counts once for most SIMD scalar computational single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SIMD scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -65,6 +73,7 @@
},
{
"BriefDescription": "Number of any Vector retired FP arithmetic instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.VECTOR",
"SampleAfterValue": "2000003",
@@ -72,6 +81,7 @@
},
{
"BriefDescription": "Cycles with any input/output SSE or FP assist",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.ANY",
diff --git a/tools/perf/pmu-events/arch/x86/skylake/frontend.json b/tools/perf/pmu-events/arch/x86/skylake/frontend.json
index d6f543471b24..0e1dedce00f2 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/frontend.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "Counter": "0,1,2,3",
"EventCode": "0xE6",
"EventName": "BACLEARS.ANY",
"PublicDescription": "Counts the number of times the front-end is resteered when it finds a branch instruction in a fetch line. This occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Stalls caused by changing prefix length of the instruction. [This event is alias to ILD_STALL.LCP]",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "DECODE.LCP",
"PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk. [This event is alias to ILD_STALL.LCP]",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switches",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "DSB2MITE_SWITCHES.COUNT",
"PublicDescription": "This event counts the number of the Decode Stream Buffer (DSB)-to-MITE switches including all misses because of missing Decode Stream Buffer (DSB) cache and u-arch forced misses. Note: Invoking MITE requires two or three cycles delay.",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
"PublicDescription": "Counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. MM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.Penalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced DSB miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.ANY_DSB_MISS",
"MSRIndex": "0x3F7",
@@ -44,6 +49,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced a critical DSB miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.DSB_MISS",
"MSRIndex": "0x3F7",
@@ -55,6 +61,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced iTLB true miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.ITLB_MISS",
"MSRIndex": "0x3F7",
@@ -66,6 +73,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.L1I_MISS",
"MSRIndex": "0x3F7",
@@ -76,6 +84,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.L2_MISS",
"MSRIndex": "0x3F7",
@@ -86,6 +95,7 @@
},
{
"BriefDescription": "Retired instructions after front-end starvation of at least 1 cycle",
+ "Counter": "0,1,2,3",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_1",
"MSRIndex": "0x3F7",
@@ -97,6 +107,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
"MSRIndex": "0x3F7",
@@ -107,6 +118,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
"MSRIndex": "0x3F7",
@@ -118,6 +130,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
"MSRIndex": "0x3F7",
@@ -128,6 +141,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
"MSRIndex": "0x3F7",
@@ -138,6 +152,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
"MSRIndex": "0x3F7",
@@ -149,6 +164,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 2 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_2",
"MSRIndex": "0x3F7",
@@ -159,6 +175,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 3 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_3",
"MSRIndex": "0x3F7",
@@ -169,6 +186,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
"MSRIndex": "0x3F7",
@@ -180,6 +198,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
"MSRIndex": "0x3F7",
@@ -190,6 +209,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
"MSRIndex": "0x3F7",
@@ -200,6 +220,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
"MSRIndex": "0x3F7",
@@ -210,6 +231,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
"MSRIndex": "0x3F7",
@@ -221,6 +243,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.STLB_MISS",
"MSRIndex": "0x3F7",
@@ -232,6 +255,7 @@
},
{
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE_16B.IFDATA_STALL",
"PublicDescription": "Cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity.",
@@ -240,6 +264,7 @@
},
{
"BriefDescription": "Instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "ICACHE_64B.IFTAG_HIT",
"SampleAfterValue": "200003",
@@ -247,6 +272,7 @@
},
{
"BriefDescription": "Instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "ICACHE_64B.IFTAG_MISS",
"SampleAfterValue": "200003",
@@ -254,6 +280,7 @@
},
{
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_TAG.STALLS]",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "ICACHE_64B.IFTAG_STALL",
"SampleAfterValue": "200003",
@@ -261,6 +288,7 @@
},
{
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_64B.IFTAG_STALL]",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "ICACHE_TAG.STALLS",
"SampleAfterValue": "200003",
@@ -268,6 +296,7 @@
},
{
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 or more Uops [This event is alias to IDQ.DSB_CYCLES_OK]",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0x79",
"EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
@@ -277,6 +306,7 @@
},
{
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop [This event is alias to IDQ.DSB_CYCLES_ANY]",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
@@ -286,6 +316,7 @@
},
{
"BriefDescription": "Cycles MITE is delivering 4 Uops",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0x79",
"EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
@@ -295,6 +326,7 @@
},
{
"BriefDescription": "Cycles MITE is delivering any Uop",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
@@ -304,6 +336,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.DSB_CYCLES",
@@ -313,6 +346,7 @@
},
{
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop [This event is alias to IDQ.ALL_DSB_CYCLES_ANY_UOPS]",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.DSB_CYCLES_ANY",
@@ -322,6 +356,7 @@
},
{
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 or more Uops [This event is alias to IDQ.ALL_DSB_CYCLES_4_UOPS]",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0x79",
"EventName": "IDQ.DSB_CYCLES_OK",
@@ -331,6 +366,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.DSB_UOPS",
"PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
@@ -339,6 +375,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MITE_CYCLES",
@@ -348,6 +385,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MITE_UOPS",
"PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
@@ -356,6 +394,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MS_CYCLES",
@@ -365,6 +404,7 @@
},
{
"BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MS_DSB_CYCLES",
@@ -374,6 +414,7 @@
},
{
"BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_MITE_UOPS",
"PublicDescription": "Counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
@@ -382,6 +423,7 @@
},
{
"BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x79",
@@ -392,6 +434,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_UOPS",
"PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS). Any instruction over 4 uops will be delivered by the MS. Some instructions such as transcendentals may additionally generate uops from the MS.",
@@ -400,6 +443,7 @@
},
{
"BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
"PublicDescription": "Counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4 x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread. b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions). c. Instruction Decode Queue (IDQ) delivers four uops.",
@@ -408,6 +452,7 @@
},
{
"BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
@@ -417,6 +462,7 @@
},
{
"BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
@@ -426,6 +472,7 @@
},
{
"BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+ "Counter": "0,1,2,3",
"CounterMask": "3",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
@@ -435,6 +482,7 @@
},
{
"BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
@@ -444,6 +492,7 @@
},
{
"BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
diff --git a/tools/perf/pmu-events/arch/x86/skylake/memory.json b/tools/perf/pmu-events/arch/x86/skylake/memory.json
index f047862f9735..5e61d3e291ca 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/memory.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Cycles while L3 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L3_MISS",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one).",
+ "Counter": "0,1,2,3",
"EventCode": "0xC8",
"EventName": "HLE_RETIRED.ABORTED",
"PEBS": "1",
@@ -26,6 +29,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to unfriendly events (such as interrupts).",
+ "Counter": "0,1,2,3",
"EventCode": "0xC8",
"EventName": "HLE_RETIRED.ABORTED_EVENTS",
"SampleAfterValue": "2000003",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "Counter": "0,1,2,3",
"EventCode": "0xC8",
"EventName": "HLE_RETIRED.ABORTED_MEM",
"SampleAfterValue": "2000003",
@@ -40,6 +45,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to incompatible memory type",
+ "Counter": "0,1,2,3",
"EventCode": "0xC8",
"EventName": "HLE_RETIRED.ABORTED_MEMTYPE",
"PublicDescription": "Number of times an HLE execution aborted due to incompatible memory type.",
@@ -48,6 +54,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to hardware timer expiration.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC8",
"EventName": "HLE_RETIRED.ABORTED_TIMER",
"SampleAfterValue": "2000003",
@@ -55,6 +62,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
+ "Counter": "0,1,2,3",
"EventCode": "0xC8",
"EventName": "HLE_RETIRED.ABORTED_UNFRIENDLY",
"SampleAfterValue": "2000003",
@@ -62,6 +70,7 @@
},
{
"BriefDescription": "Number of times an HLE execution successfully committed",
+ "Counter": "0,1,2,3",
"EventCode": "0xC8",
"EventName": "HLE_RETIRED.COMMIT",
"PublicDescription": "Number of times HLE commit succeeded.",
@@ -70,6 +79,7 @@
},
{
"BriefDescription": "Number of times an HLE execution started.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC8",
"EventName": "HLE_RETIRED.START",
"PublicDescription": "Number of times we entered an HLE region. Does not count nested transactions.",
@@ -78,6 +88,7 @@
},
{
"BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
+ "Counter": "0,1,2,3",
"Errata": "SKL089",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
@@ -87,6 +98,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
@@ -99,6 +111,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
@@ -111,6 +124,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
@@ -123,6 +137,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
@@ -135,6 +150,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
@@ -147,6 +163,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
@@ -159,6 +176,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
@@ -171,6 +189,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
@@ -183,6 +202,7 @@
},
{
"BriefDescription": "Demand Data Read requests who miss L3 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
"PublicDescription": "Demand Data Read requests who miss L3 cache.",
@@ -191,6 +211,7 @@
},
{
"BriefDescription": "Cycles with at least 1 Demand Data Read requests who miss L3 cache in the superQ.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD",
@@ -199,6 +220,7 @@
},
{
"BriefDescription": "Counts number of Offcore outstanding Demand Data Read requests that miss L3 cache in the superQ every cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD",
"SampleAfterValue": "2000003",
@@ -206,6 +228,7 @@
},
{
"BriefDescription": "Cycles with at least 6 Demand Data Read requests that miss L3 cache in the superQ.",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD_GE_6",
@@ -214,6 +237,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -223,6 +247,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -232,6 +257,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -241,6 +267,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -250,6 +277,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -259,6 +287,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -268,6 +297,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -277,6 +307,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -286,6 +317,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -295,6 +327,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -304,6 +337,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -313,6 +347,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SPL_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -322,6 +357,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -331,6 +367,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -340,6 +377,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -349,6 +387,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -358,6 +397,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -367,6 +407,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -376,6 +417,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -385,6 +427,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SPL_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -394,6 +437,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -403,6 +447,7 @@
},
{
"BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -412,6 +457,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -421,6 +467,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -430,6 +477,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -439,6 +487,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -448,6 +497,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -457,6 +507,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -466,6 +517,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -475,6 +527,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -484,6 +537,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -493,6 +547,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -502,6 +557,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -511,6 +567,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SPL_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -520,6 +577,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -529,6 +587,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -538,6 +597,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -547,6 +607,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -556,6 +617,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -565,6 +627,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -574,6 +637,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -583,6 +647,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SPL_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -592,6 +657,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -601,6 +667,7 @@
},
{
"BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -610,6 +677,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -619,6 +687,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -628,6 +697,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -637,6 +707,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -646,6 +717,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -655,6 +727,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -664,6 +737,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -673,6 +747,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -682,6 +757,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -691,6 +767,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -700,6 +777,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -709,6 +787,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SPL_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -718,6 +797,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -727,6 +807,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -736,6 +817,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -745,6 +827,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -754,6 +837,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -763,6 +847,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -772,6 +857,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -781,6 +867,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SPL_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -790,6 +877,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -799,6 +887,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -808,6 +897,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -817,6 +907,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -826,6 +917,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -835,6 +927,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -844,6 +937,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -853,6 +947,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -862,6 +957,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -871,6 +967,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -880,6 +977,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -889,6 +987,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -898,6 +997,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -907,6 +1007,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SPL_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -916,6 +1017,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -925,6 +1027,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -934,6 +1037,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -943,6 +1047,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -952,6 +1057,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
@@ -961,6 +1067,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -970,6 +1077,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -979,6 +1087,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SPL_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -988,6 +1097,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -997,6 +1107,7 @@
},
{
"BriefDescription": "Counts any other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1006,6 +1117,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one).",
+ "Counter": "0,1,2,3",
"EventCode": "0xC9",
"EventName": "RTM_RETIRED.ABORTED",
"PEBS": "2",
@@ -1015,6 +1127,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC9",
"EventName": "RTM_RETIRED.ABORTED_EVENTS",
"PublicDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
@@ -1023,6 +1136,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC9",
"EventName": "RTM_RETIRED.ABORTED_MEM",
"PublicDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).",
@@ -1031,6 +1145,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
+ "Counter": "0,1,2,3",
"EventCode": "0xC9",
"EventName": "RTM_RETIRED.ABORTED_MEMTYPE",
"PublicDescription": "Number of times an RTM execution aborted due to incompatible memory type.",
@@ -1039,6 +1154,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to uncommon conditions.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC9",
"EventName": "RTM_RETIRED.ABORTED_TIMER",
"SampleAfterValue": "2000003",
@@ -1046,6 +1162,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0xC9",
"EventName": "RTM_RETIRED.ABORTED_UNFRIENDLY",
"PublicDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions.",
@@ -1054,6 +1171,7 @@
},
{
"BriefDescription": "Number of times an RTM execution successfully committed",
+ "Counter": "0,1,2,3",
"EventCode": "0xC9",
"EventName": "RTM_RETIRED.COMMIT",
"PublicDescription": "Number of times RTM commit succeeded.",
@@ -1062,6 +1180,7 @@
},
{
"BriefDescription": "Number of times an RTM execution started.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC9",
"EventName": "RTM_RETIRED.START",
"PublicDescription": "Number of times we entered an RTM region. Does not count nested transactions.",
@@ -1070,6 +1189,7 @@
},
{
"BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed. Since this is the count of execution, it may not always cause a transactional abort.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC1",
"SampleAfterValue": "2000003",
@@ -1077,6 +1197,7 @@
},
{
"BriefDescription": "Counts the number of times a class of instructions (e.g., vzeroupper) that may cause a transactional abort was executed inside a transactional region",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC2",
"PublicDescription": "Unfriendly TSX abort triggered by a vzeroupper instruction.",
@@ -1085,6 +1206,7 @@
},
{
"BriefDescription": "Counts the number of times an instruction execution caused the transactional nest count supported to be exceeded",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC3",
"PublicDescription": "Unfriendly TSX abort triggered by a nest count that is too deep.",
@@ -1093,6 +1215,7 @@
},
{
"BriefDescription": "Counts the number of times a XBEGIN instruction was executed inside an HLE transactional region.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC4",
"PublicDescription": "RTM region detected inside HLE.",
@@ -1101,6 +1224,7 @@
},
{
"BriefDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC5",
"PublicDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region.",
@@ -1109,6 +1233,7 @@
},
{
"BriefDescription": "Number of times a transactional abort was signaled due to a data capacity limitation for transactional reads or writes.",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_CAPACITY",
"SampleAfterValue": "2000003",
@@ -1116,6 +1241,7 @@
},
{
"BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_CONFLICT",
"PublicDescription": "Number of times a TSX line had a cache conflict.",
@@ -1124,6 +1250,7 @@
},
{
"BriefDescription": "Number of times an HLE transactional execution aborted due to XRELEASE lock not satisfying the address and value requirements in the elision buffer",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
"PublicDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch.",
@@ -1132,6 +1259,7 @@
},
{
"BriefDescription": "Number of times an HLE transactional execution aborted due to NoAllocatedElisionBuffer being non-zero.",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
"PublicDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty.",
@@ -1140,6 +1268,7 @@
},
{
"BriefDescription": "Number of times an HLE transactional execution aborted due to an unsupported read alignment from the elision buffer.",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
"PublicDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer.",
@@ -1148,6 +1277,7 @@
},
{
"BriefDescription": "Number of times a HLE transactional region aborted due to a non XRELEASE prefixed instruction writing to an elided lock in the elision buffer",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
"PublicDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock.",
@@ -1156,6 +1286,7 @@
},
{
"BriefDescription": "Number of times HLE lock could not be elided due to ElisionBufferAvailable being zero.",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
"PublicDescription": "Number of times we could not allocate Lock Buffer.",
diff --git a/tools/perf/pmu-events/arch/x86/skylake/metricgroups.json b/tools/perf/pmu-events/arch/x86/skylake/metricgroups.json
index 5452a1448ded..3a88260194d1 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/metricgroups.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/metricgroups.json
@@ -5,7 +5,20 @@
"BigFootprint": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"BrMispredicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Branches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvBC": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvBO": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvCB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvFB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvIO": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvML": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMP": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMS": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMT": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvOB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvUW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"CacheHits": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "CacheMisses": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"CodeGen": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Compute": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Cor": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
diff --git a/tools/perf/pmu-events/arch/x86/skylake/other.json b/tools/perf/pmu-events/arch/x86/skylake/other.json
index d75d53279b4e..f14eeeb85d39 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/other.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/other.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Number of hardware interrupts received by the processor.",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "HW_INTERRUPTS.RECEIVED",
"PublicDescription": "Counts the number of hardware interruptions received by the processor.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "MEMORY_DISAMBIGUATION.HISTORY_RESET",
+ "Counter": "0,1,2,3",
"EventCode": "0x09",
"EventName": "MEMORY_DISAMBIGUATION.HISTORY_RESET",
"SampleAfterValue": "2000003",
diff --git a/tools/perf/pmu-events/arch/x86/skylake/pipeline.json b/tools/perf/pmu-events/arch/x86/skylake/pipeline.json
index fe202d1e368a..6d57930afbfd 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/pipeline.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Cycles when divide unit is busy executing divide or square root operations. Accounts for integer and floating-point operations.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x14",
"EventName": "ARITH.DIVIDER_ACTIVE",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "All (macro) branch instructions retired.",
+ "Counter": "0,1,2,3",
"Errata": "SKL091",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "All (macro) branch instructions retired.",
+ "Counter": "0,1,2,3",
"Errata": "SKL091",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
@@ -27,6 +30,7 @@
},
{
"BriefDescription": "Conditional branch instructions retired. [This event is alias to BR_INST_RETIRED.CONDITIONAL]",
+ "Counter": "0,1,2,3",
"Errata": "SKL091",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.COND",
@@ -36,6 +40,7 @@
},
{
"BriefDescription": "Conditional branch instructions retired. [This event is alias to BR_INST_RETIRED.COND]",
+ "Counter": "0,1,2,3",
"Errata": "SKL091",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
@@ -46,6 +51,7 @@
},
{
"BriefDescription": "Not taken branch instructions retired.",
+ "Counter": "0,1,2,3",
"Errata": "SKL091",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_NTAKEN",
@@ -55,6 +61,7 @@
},
{
"BriefDescription": "Far branch instructions retired.",
+ "Counter": "0,1,2,3",
"Errata": "SKL091",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
@@ -65,6 +72,7 @@
},
{
"BriefDescription": "Direct and indirect near call instructions retired.",
+ "Counter": "0,1,2,3",
"Errata": "SKL091",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
@@ -75,6 +83,7 @@
},
{
"BriefDescription": "Return instructions retired.",
+ "Counter": "0,1,2,3",
"Errata": "SKL091",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
@@ -85,6 +94,7 @@
},
{
"BriefDescription": "Taken branch instructions retired.",
+ "Counter": "0,1,2,3",
"Errata": "SKL091",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
@@ -95,6 +105,7 @@
},
{
"BriefDescription": "Not taken branch instructions retired.",
+ "Counter": "0,1,2,3",
"Errata": "SKL091",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NOT_TAKEN",
@@ -104,6 +115,7 @@
},
{
"BriefDescription": "Speculative and retired mispredicted macro conditional branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.ALL_BRANCHES",
"PublicDescription": "This event counts both taken and not taken speculative and retired mispredicted branch instructions.",
@@ -112,6 +124,7 @@
},
{
"BriefDescription": "Speculative mispredicted indirect branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.INDIRECT",
"PublicDescription": "Counts speculatively miss-predicted indirect branches at execution time. Counts for indirect near CALL or JMP instructions (RET excluded).",
@@ -120,6 +133,7 @@
},
{
"BriefDescription": "All mispredicted macro branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
@@ -127,6 +141,7 @@
},
{
"BriefDescription": "Mispredicted macro branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
"PEBS": "2",
@@ -136,6 +151,7 @@
},
{
"BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.CONDITIONAL",
"PEBS": "1",
@@ -145,6 +161,7 @@
},
{
"BriefDescription": "Mispredicted direct and indirect near call instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.NEAR_CALL",
"PEBS": "1",
@@ -154,6 +171,7 @@
},
{
"BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
"PEBS": "1",
@@ -162,6 +180,7 @@
},
{
"BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "25003",
@@ -169,6 +188,7 @@
},
{
"BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
"SampleAfterValue": "25003",
@@ -177,6 +197,7 @@
{
"AnyThread": "1",
"BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
"SampleAfterValue": "25003",
@@ -184,6 +205,7 @@
},
{
"BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "25003",
@@ -191,6 +213,7 @@
},
{
"BriefDescription": "Reference cycles when the core is not in halt state.",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
"SampleAfterValue": "2000003",
@@ -198,6 +221,7 @@
},
{
"BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.REF_XCLK",
"SampleAfterValue": "25003",
@@ -206,6 +230,7 @@
{
"AnyThread": "1",
"BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
"SampleAfterValue": "25003",
@@ -213,6 +238,7 @@
},
{
"BriefDescription": "Counts when there is a transition from ring 1, 2 or 3 to ring 0.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x3C",
@@ -222,6 +248,7 @@
},
{
"BriefDescription": "Core cycles when the thread is not in halt state",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"SampleAfterValue": "2000003",
@@ -230,12 +257,14 @@
{
"AnyThread": "1",
"BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
"SampleAfterValue": "2000003",
"UMask": "0x2"
},
{
"BriefDescription": "Thread cycles when thread is not in halt state",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
"PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
@@ -244,12 +273,14 @@
{
"AnyThread": "1",
"BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
"SampleAfterValue": "2000003"
},
{
"BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "8",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
@@ -258,6 +289,7 @@
},
{
"BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
@@ -266,6 +298,7 @@
},
{
"BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
"CounterMask": "16",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
@@ -274,6 +307,7 @@
},
{
"BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "12",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
@@ -282,6 +316,7 @@
},
{
"BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
@@ -290,6 +325,7 @@
},
{
"BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
"CounterMask": "20",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
@@ -298,6 +334,7 @@
},
{
"BriefDescription": "Total execution stalls.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
@@ -306,6 +343,7 @@
},
{
"BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "EXE_ACTIVITY.1_PORTS_UTIL",
"PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
@@ -314,6 +352,7 @@
},
{
"BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "EXE_ACTIVITY.2_PORTS_UTIL",
"PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
@@ -322,6 +361,7 @@
},
{
"BriefDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "EXE_ACTIVITY.3_PORTS_UTIL",
"PublicDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
@@ -330,6 +370,7 @@
},
{
"BriefDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "EXE_ACTIVITY.4_PORTS_UTIL",
"PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.",
@@ -338,6 +379,7 @@
},
{
"BriefDescription": "Cycles where the Store Buffer was full and no outstanding load.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "EXE_ACTIVITY.BOUND_ON_STORES",
"SampleAfterValue": "2000003",
@@ -345,6 +387,7 @@
},
{
"BriefDescription": "Cycles where no uops were executed, the Reservation Station was not empty, the Store Buffer was full and there was no outstanding load.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "EXE_ACTIVITY.EXE_BOUND_0_PORTS",
"PublicDescription": "Counts cycles during which no uops were executed on all ports and Reservation Station (RS) was not empty.",
@@ -353,6 +396,7 @@
},
{
"BriefDescription": "Stalls caused by changing prefix length of the instruction. [This event is alias to DECODE.LCP]",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.LCP",
"PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk. [This event is alias to DECODE.LCP]",
@@ -361,6 +405,7 @@
},
{
"BriefDescription": "Instruction decoders utilized in a cycle",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "INST_DECODED.DECODERS",
"PublicDescription": "Number of decoders utilized in a cycle when the MITE (legacy decode pipeline) fetches instructions.",
@@ -369,6 +414,7 @@
},
{
"BriefDescription": "Instructions retired from execution.",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PublicDescription": "Counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, Counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. Counting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
"SampleAfterValue": "2000003",
@@ -376,6 +422,7 @@
},
{
"BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Counter": "0,1,2,3",
"Errata": "SKL091, SKL044",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.ANY_P",
@@ -384,15 +431,17 @@
},
{
"BriefDescription": "Number of all retired NOP instructions.",
+ "Counter": "0,1,2,3",
"Errata": "SKL091, SKL044",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.NOP",
- "PEBS": "2",
+ "PEBS": "1",
"SampleAfterValue": "2000003",
"UMask": "0x2"
},
{
"BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
+ "Counter": "1",
"Errata": "SKL091, SKL044",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.PREC_DIST",
@@ -403,6 +452,7 @@
},
{
"BriefDescription": "Number of cycles using always true condition applied to PEBS instructions retired event.",
+ "Counter": "0,2,3",
"CounterMask": "10",
"Errata": "SKL091, SKL044",
"EventCode": "0xC0",
@@ -415,6 +465,7 @@
},
{
"BriefDescription": "Clears speculative count",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x0D",
@@ -425,6 +476,7 @@
},
{
"BriefDescription": "Cycles the issue-stage is waiting for front-end to fetch from resteered path following branch misprediction or machine clear events.",
+ "Counter": "0,1,2,3",
"EventCode": "0x0D",
"EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
"SampleAfterValue": "2000003",
@@ -432,6 +484,7 @@
},
{
"BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
+ "Counter": "0,1,2,3",
"EventCode": "0x0D",
"EventName": "INT_MISC.RECOVERY_CYCLES",
"PublicDescription": "Core cycles the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
@@ -441,6 +494,7 @@
{
"AnyThread": "1",
"BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "Counter": "0,1,2,3",
"EventCode": "0x0D",
"EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
"SampleAfterValue": "2000003",
@@ -448,6 +502,7 @@
},
{
"BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.NO_SR",
"PublicDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
@@ -456,6 +511,7 @@
},
{
"BriefDescription": "Loads blocked due to overlapping with a preceding store that cannot be forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.STORE_FORWARD",
"PublicDescription": "Counts the number of times where store forwarding was prevented for a load operation. The most common case is a load blocked due to the address of memory access (partially) overlapping with a preceding uncompleted store. Note: See the table of not supported store forwards in the Optimization Guide.",
@@ -464,6 +520,7 @@
},
{
"BriefDescription": "False dependencies in MOB due to partial compare on address.",
+ "Counter": "0,1,2,3",
"EventCode": "0x07",
"EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
"PublicDescription": "Counts false dependencies in MOB when the partial comparison upon loose net check and dependency was resolved by the Enhanced Loose net mechanism. This may not result in high performance penalties. Loose net checks can fail when loads and stores are 4k aliased.",
@@ -472,6 +529,7 @@
},
{
"BriefDescription": "Demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.",
+ "Counter": "0,1,2,3",
"EventCode": "0x4C",
"EventName": "LOAD_HIT_PRE.SW_PF",
"PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
@@ -480,6 +538,7 @@
},
{
"BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder. [This event is alias to LSD.CYCLES_OK]",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xA8",
"EventName": "LSD.CYCLES_4_UOPS",
@@ -489,6 +548,7 @@
},
{
"BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA8",
"EventName": "LSD.CYCLES_ACTIVE",
@@ -498,6 +558,7 @@
},
{
"BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder. [This event is alias to LSD.CYCLES_4_UOPS]",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xA8",
"EventName": "LSD.CYCLES_OK",
@@ -507,6 +568,7 @@
},
{
"BriefDescription": "Number of Uops delivered by the LSD.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA8",
"EventName": "LSD.UOPS",
"PublicDescription": "Number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
@@ -515,6 +577,7 @@
},
{
"BriefDescription": "Number of machine clears (nukes) of any type.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xC3",
@@ -524,6 +587,7 @@
},
{
"BriefDescription": "Self-modifying code (SMC) detected.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.SMC",
"PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
@@ -532,6 +596,7 @@
},
{
"BriefDescription": "Number of times a microcode assist is invoked by HW other than FP-assist. Examples include AD (page Access Dirty) and AVX* related assists.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "OTHER_ASSISTS.ANY",
"SampleAfterValue": "100003",
@@ -539,6 +604,7 @@
},
{
"BriefDescription": "Cycles where the pipeline is stalled due to serializing operations.",
+ "Counter": "0,1,2,3",
"EventCode": "0x59",
"EventName": "PARTIAL_RAT_STALLS.SCOREBOARD",
"PublicDescription": "This event counts cycles during which the microcode scoreboard stalls happen.",
@@ -547,6 +613,7 @@
},
{
"BriefDescription": "Resource-related stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xa2",
"EventName": "RESOURCE_STALLS.ANY",
"PublicDescription": "Counts resource-related stall cycles.",
@@ -555,6 +622,7 @@
},
{
"BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.SB",
"PublicDescription": "Counts allocation stall cycles caused by the store buffer (SB) being full. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
@@ -563,6 +631,7 @@
},
{
"BriefDescription": "Increments whenever there is an update to the LBR array.",
+ "Counter": "0,1,2,3",
"EventCode": "0xCC",
"EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
"PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR enable via IA32_DEBUGCTL MSR and branch type selection via MSR_LBR_SELECT.",
@@ -571,6 +640,7 @@
},
{
"BriefDescription": "Number of retired PAUSE instructions (that do not end up with a VMExit to the VMM; TSX aborted Instructions may be counted). This event is not supported on first SKL and KBL products.",
+ "Counter": "0,1,2,3",
"EventCode": "0xCC",
"EventName": "ROB_MISC_EVENTS.PAUSE_INST",
"SampleAfterValue": "2000003",
@@ -578,6 +648,7 @@
},
{
"BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "Counter": "0,1,2,3",
"EventCode": "0x5E",
"EventName": "RS_EVENTS.EMPTY_CYCLES",
"PublicDescription": "Counts cycles during which the reservation station (RS) is empty for the thread.; Note: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.",
@@ -586,6 +657,7 @@
},
{
"BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x5E",
@@ -597,6 +669,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_0",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 0.",
@@ -605,6 +678,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_1",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 1.",
@@ -613,6 +687,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_2",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 2.",
@@ -621,6 +696,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_3",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 3.",
@@ -629,6 +705,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_4",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 4.",
@@ -637,6 +714,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_5",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 5.",
@@ -645,6 +723,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_6",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 6.",
@@ -653,6 +732,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_7",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 7.",
@@ -661,6 +741,7 @@
},
{
"BriefDescription": "Number of uops executed on the core.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE",
"PublicDescription": "Number of uops executed from any thread.",
@@ -669,6 +750,7 @@
},
{
"BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
@@ -677,6 +759,7 @@
},
{
"BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
@@ -685,6 +768,7 @@
},
{
"BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
@@ -693,6 +777,7 @@
},
{
"BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
@@ -701,6 +786,7 @@
},
{
"BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
@@ -710,6 +796,7 @@
},
{
"BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
@@ -719,6 +806,7 @@
},
{
"BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
@@ -728,6 +816,7 @@
},
{
"BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "Counter": "0,1,2,3",
"CounterMask": "3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
@@ -737,6 +826,7 @@
},
{
"BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
@@ -746,6 +836,7 @@
},
{
"BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.STALL_CYCLES",
@@ -756,6 +847,7 @@
},
{
"BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.THREAD",
"PublicDescription": "Number of uops to be executed per-thread each cycle.",
@@ -764,6 +856,7 @@
},
{
"BriefDescription": "Counts the number of x87 uops dispatched.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.X87",
"PublicDescription": "Counts the number of x87 uops executed.",
@@ -772,6 +865,7 @@
},
{
"BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.ANY",
"PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
@@ -780,6 +874,7 @@
},
{
"BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.SLOW_LEA",
"SampleAfterValue": "2000003",
@@ -787,6 +882,7 @@
},
{
"BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.STALL_CYCLES",
@@ -797,6 +893,7 @@
},
{
"BriefDescription": "Uops inserted at issue-stage in order to preserve upper bits of vector registers.",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH",
"PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to Mixing Intel AVX and Intel SSE Code section of the Optimization Guide.",
@@ -805,6 +902,7 @@
},
{
"BriefDescription": "Number of macro-fused uops retired. (non precise)",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.MACRO_FUSED",
"PublicDescription": "Counts the number of macro-fused uops retired. (non precise)",
@@ -813,6 +911,7 @@
},
{
"BriefDescription": "Retirement slots used.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.RETIRE_SLOTS",
"PublicDescription": "Counts the retirement slots used.",
@@ -821,6 +920,7 @@
},
{
"BriefDescription": "Cycles without actually retired uops.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.STALL_CYCLES",
@@ -831,6 +931,7 @@
},
{
"BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "Counter": "0,1,2,3",
"CounterMask": "16",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.TOTAL_CYCLES",
diff --git a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
index 3af71b84bb9d..4e954fe8547c 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
@@ -89,7 +89,7 @@
{
"BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists",
"MetricExpr": "34 * (FP_ASSIST.ANY + OTHER_ASSISTS.ANY) / tma_info_thread_slots",
- "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricGroup": "BvIO;TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_assists",
"MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
"PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: OTHER_ASSISTS.ANY",
@@ -98,7 +98,7 @@
{
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
"MetricExpr": "1 - tma_frontend_bound - (UOPS_ISSUED.ANY + 4 * (INT_MISC.RECOVERY_CYCLES_ANY / 2 if #SMT_on else INT_MISC.RECOVERY_CYCLES)) / tma_info_thread_slots",
- "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvOB;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
"MetricThreshold": "tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL1",
@@ -119,7 +119,7 @@
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * tma_bad_speculation",
- "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
+ "MetricGroup": "BadSpec;BrMispredicts;BvMP;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
"MetricName": "tma_branch_mispredicts",
"MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
@@ -157,7 +157,7 @@
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(18.5 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM + 16.5 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
- "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricGroup": "BvMS;DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_contested_accesses",
"MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS_PS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
@@ -178,7 +178,7 @@
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "16.5 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
- "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricGroup": "BvMS;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_data_sharing",
"MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT_PS. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
@@ -196,7 +196,7 @@
{
"BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
"MetricExpr": "ARITH.DIVIDER_ACTIVE / tma_info_thread_clks",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
"MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_ACTIVE",
@@ -227,14 +227,14 @@
"MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_dsb_switches",
"MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
"MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "min(9 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_LOAD_MISSES.WALK_ACTIVE, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
- "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
+ "MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
"MetricName": "tma_dtlb_load",
"MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs, tma_info_bottleneck_memory_synchronization",
@@ -243,7 +243,7 @@
{
"BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
"MetricExpr": "(9 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_STORE_MISSES.WALK_ACTIVE) / tma_info_core_core_clks",
- "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
+ "MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
"MetricName": "tma_dtlb_store",
"MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load, tma_info_bottleneck_memory_data_tlbs, tma_info_bottleneck_memory_synchronization",
@@ -253,7 +253,7 @@
"BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "22 * tma_info_system_core_frequency * OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM / tma_info_thread_clks",
- "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
+ "MetricGroup": "BvMS;DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
"MetricName": "tma_false_sharing",
"MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
@@ -263,7 +263,7 @@
"BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
"MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "tma_info_memory_load_miss_real_latency * cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@ / tma_info_thread_clks",
- "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
+ "MetricGroup": "BvMS;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
"MetricName": "tma_fb_full",
"MetricThreshold": "tma_fb_full > 0.3",
"PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
@@ -276,7 +276,7 @@
"MetricName": "tma_fetch_bandwidth",
"MetricThreshold": "tma_fetch_bandwidth > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
{
@@ -291,6 +291,7 @@
},
{
"BriefDescription": "This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or up to ([SNB+] four; [ADL+] five) uops",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "tma_heavy_operations - tma_microcode_sequencer",
"MetricGroup": "TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueD0",
"MetricName": "tma_few_uops_instructions",
@@ -319,7 +320,7 @@
},
{
"BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired",
- "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricExpr": "FP_ARITH_INST_RETIRED.SCALAR / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_scalar",
"MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
@@ -329,7 +330,7 @@
{
"BriefDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0x3c@ / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricExpr": "FP_ARITH_INST_RETIRED.VECTOR / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_vector",
"MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
@@ -357,7 +358,7 @@
{
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / tma_info_thread_slots",
- "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvFB;BvIO;PGO;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_frontend_bound",
"MetricThreshold": "tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL1",
@@ -367,7 +368,7 @@
{
"BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions",
"MetricExpr": "tma_light_operations * UOPS_RETIRED.MACRO_FUSED / UOPS_RETIRED.RETIRE_SLOTS",
- "MetricGroup": "Branches;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_fused_instructions",
"MetricThreshold": "tma_fused_instructions > 0.1 & tma_light_operations > 0.6",
"PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. CMP+JCC or DEC+JCC are common examples of legacy fusions. {([MTL] Note new MOV+OP and Load+OP fusions appear under Other_Light_Ops in MTL!)}",
@@ -386,7 +387,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses",
"MetricExpr": "(ICACHE_16B.IFDATA_STALL + 2 * cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=1\\,edge@) / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_icache_misses",
"MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS",
@@ -428,13 +429,21 @@
"MetricThreshold": "tma_info_botlnk_l0_core_bound_likely > 0.5"
},
{
+ "BriefDescription": "Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck",
+ "MetricExpr": "100 * (tma_frontend_bound * (tma_fetch_bandwidth / (tma_fetch_bandwidth + tma_fetch_latency)) * (tma_dsb / (tma_dsb + tma_mite)))",
+ "MetricGroup": "DSB;FetchBW;tma_issueFB",
+ "MetricName": "tma_info_botlnk_l2_dsb_bandwidth",
+ "MetricThreshold": "tma_info_botlnk_l2_dsb_bandwidth > 10",
+ "PublicDescription": "Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp"
+ },
+ {
"BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_mite))",
"MetricGroup": "DSBmiss;Fed;tma_issueFB",
"MetricName": "tma_info_botlnk_l2_dsb_misses",
"MetricThreshold": "tma_info_botlnk_l2_dsb_misses > 10",
- "PublicDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp"
+ "PublicDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp"
},
{
"BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
@@ -445,39 +454,33 @@
"PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: "
},
{
- "BriefDescription": "Total pipeline cost of \"useful operations\" - the baseline operations not covered by Branching_Overhead nor Irregular_Overhead.",
- "MetricExpr": "100 * (tma_retiring - (BR_INST_RETIRED.ALL_BRANCHES + BR_INST_RETIRED.NEAR_CALL) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
- "MetricGroup": "Ret",
- "MetricName": "tma_info_bottleneck_base_non_br",
- "MetricThreshold": "tma_info_bottleneck_base_non_br > 20"
- },
- {
"BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)",
- "MetricGroup": "BigFootprint;Fed;Frontend;IcMiss;MemoryTLB",
+ "MetricGroup": "BigFootprint;BvBC;Fed;Frontend;IcMiss;MemoryTLB",
"MetricName": "tma_info_bottleneck_big_code",
"MetricThreshold": "tma_info_bottleneck_big_code > 20"
},
{
- "BriefDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls)",
- "MetricExpr": "100 * ((BR_INST_RETIRED.ALL_BRANCHES + BR_INST_RETIRED.NEAR_CALL) / tma_info_thread_slots)",
- "MetricGroup": "Ret",
+ "BriefDescription": "Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA",
+ "MetricExpr": "100 * ((BR_INST_RETIRED.ALL_BRANCHES + 2 * BR_INST_RETIRED.NEAR_CALL + INST_RETIRED.NOP) / tma_info_thread_slots)",
+ "MetricGroup": "BvBO;Ret",
"MetricName": "tma_info_bottleneck_branching_overhead",
- "MetricThreshold": "tma_info_bottleneck_branching_overhead > 5"
+ "MetricThreshold": "tma_info_bottleneck_branching_overhead > 5",
+ "PublicDescription": "Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA. Examples include function calls; loops and alignments. (A lower bound)"
},
{
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
- "MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_hit_latency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
+ "MetricGroup": "BvMB;Mem;MemoryBW;Offcore;tma_issueBW",
"MetricName": "tma_info_bottleneck_cache_memory_bandwidth",
"MetricThreshold": "tma_info_bottleneck_cache_memory_bandwidth > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full"
},
{
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency)))",
- "MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l1_hit_latency / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_hit_latency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
+ "MetricGroup": "BvML;Mem;MemoryLat;Offcore;tma_issueLat",
"MetricName": "tma_info_bottleneck_cache_memory_latency",
"MetricThreshold": "tma_info_bottleneck_cache_memory_latency > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks. Related metrics: tma_l3_hit_latency, tma_mem_latency"
@@ -485,23 +488,23 @@
{
"BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation",
"MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_ports_utilization + tma_serializing_operation)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))",
- "MetricGroup": "Cor;tma_issueComp",
+ "MetricGroup": "BvCB;Cor;tma_issueComp",
"MetricName": "tma_info_bottleneck_compute_bound_est",
"MetricThreshold": "tma_info_bottleneck_compute_bound_est > 20",
"PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. Related metrics: "
},
{
- "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks",
+ "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end)",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * (10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts)) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_info_bottleneck_big_code",
- "MetricGroup": "Fed;FetchBW;Frontend",
+ "MetricGroup": "BvFB;Fed;FetchBW;Frontend",
"MetricName": "tma_info_bottleneck_instruction_fetch_bw",
"MetricThreshold": "tma_info_bottleneck_instruction_fetch_bw > 20"
},
{
"BriefDescription": "Total pipeline cost of irregular execution (e.g",
"MetricExpr": "100 * (tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * (10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts)) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + tma_core_bound * RS_EVENTS.EMPTY_CYCLES / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
- "MetricGroup": "Bad;Cor;Ret;tma_issueMS",
+ "MetricGroup": "Bad;BvIO;Cor;Ret;tma_issueMS",
"MetricName": "tma_info_bottleneck_irregular_overhead",
"MetricThreshold": "tma_info_bottleneck_irregular_overhead > 10",
"PublicDescription": "Total pipeline cost of irregular execution (e.g. FP-assists in HPC, Wait time with work imbalance multithreaded workloads, overhead in system services or virtualized environments). Related metrics: tma_microcode_sequencer, tma_ms_switches"
@@ -509,8 +512,8 @@
{
"BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency)))",
- "MetricGroup": "Mem;MemoryTLB;Offcore;tma_issueTLB",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_hit_latency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency)))",
+ "MetricGroup": "BvMT;Mem;MemoryTLB;Offcore;tma_issueTLB",
"MetricName": "tma_info_bottleneck_memory_data_tlbs",
"MetricThreshold": "tma_info_bottleneck_memory_data_tlbs > 20",
"PublicDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs). Related metrics: tma_dtlb_load, tma_dtlb_store, tma_info_bottleneck_memory_synchronization"
@@ -518,7 +521,7 @@
{
"BriefDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)",
"MetricExpr": "100 * (tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * tma_false_sharing / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))",
- "MetricGroup": "Mem;Offcore;tma_issueTLB",
+ "MetricGroup": "BvMS;Mem;Offcore;tma_issueTLB",
"MetricName": "tma_info_bottleneck_memory_synchronization",
"MetricThreshold": "tma_info_bottleneck_memory_synchronization > 10",
"PublicDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors). Related metrics: tma_dtlb_load, tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs"
@@ -527,18 +530,25 @@
"BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
- "MetricGroup": "Bad;BadSpec;BrMispredicts;tma_issueBM",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts;BvMP;tma_issueBM",
"MetricName": "tma_info_bottleneck_mispredictions",
"MetricThreshold": "tma_info_bottleneck_mispredictions > 20",
"PublicDescription": "Total pipeline cost of Branch Misprediction related bottlenecks. Related metrics: tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost, tma_mispredicts_resteers"
},
{
- "BriefDescription": "Total pipeline cost of remaining bottlenecks (apart from those listed in the Info.Bottlenecks metrics class)",
- "MetricExpr": "100 - (tma_info_bottleneck_big_code + tma_info_bottleneck_instruction_fetch_bw + tma_info_bottleneck_mispredictions + tma_info_bottleneck_cache_memory_bandwidth + tma_info_bottleneck_cache_memory_latency + tma_info_bottleneck_memory_data_tlbs + tma_info_bottleneck_memory_synchronization + tma_info_bottleneck_compute_bound_est + tma_info_bottleneck_irregular_overhead + tma_info_bottleneck_branching_overhead + tma_info_bottleneck_base_non_br)",
- "MetricGroup": "Cor;Offcore",
+ "BriefDescription": "Total pipeline cost of remaining bottlenecks in the back-end",
+ "MetricExpr": "100 - (tma_info_bottleneck_big_code + tma_info_bottleneck_instruction_fetch_bw + tma_info_bottleneck_mispredictions + tma_info_bottleneck_cache_memory_bandwidth + tma_info_bottleneck_cache_memory_latency + tma_info_bottleneck_memory_data_tlbs + tma_info_bottleneck_memory_synchronization + tma_info_bottleneck_compute_bound_est + tma_info_bottleneck_irregular_overhead + tma_info_bottleneck_branching_overhead + tma_info_bottleneck_useful_work)",
+ "MetricGroup": "BvOB;Cor;Offcore",
"MetricName": "tma_info_bottleneck_other_bottlenecks",
"MetricThreshold": "tma_info_bottleneck_other_bottlenecks > 20",
- "PublicDescription": "Total pipeline cost of remaining bottlenecks (apart from those listed in the Info.Bottlenecks metrics class). Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls."
+ "PublicDescription": "Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls."
+ },
+ {
+ "BriefDescription": "Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead.",
+ "MetricExpr": "100 * (tma_retiring - (BR_INST_RETIRED.ALL_BRANCHES + 2 * BR_INST_RETIRED.NEAR_CALL + INST_RETIRED.NOP) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
+ "MetricGroup": "BvUW;Ret",
+ "MetricName": "tma_info_bottleneck_useful_work",
+ "MetricThreshold": "tma_info_bottleneck_useful_work > 20"
},
{
"BriefDescription": "Fraction of branches that are CALL or RET",
@@ -592,7 +602,7 @@
},
{
"BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
- "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0x3c@) / (2 * tma_info_core_core_clks)",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + FP_ARITH_INST_RETIRED.VECTOR) / (2 * tma_info_core_core_clks)",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "tma_info_core_fp_arith_utilization",
"PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
@@ -609,7 +619,7 @@
"MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
"MetricName": "tma_info_frontend_dsb_coverage",
"MetricThreshold": "tma_info_frontend_dsb_coverage < 0.7 & tma_info_thread_ipc / 4 > 0.35",
- "PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_inst_mix_iptb, tma_lcp"
+ "PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_inst_mix_iptb, tma_lcp"
},
{
"BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
@@ -670,7 +680,7 @@
{
"BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "INST_RETIRED.ANY / (cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0x3c@)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR + FP_ARITH_INST_RETIRED.VECTOR)",
"MetricGroup": "Flops;InsType",
"MetricName": "tma_info_inst_mix_iparith",
"MetricThreshold": "tma_info_inst_mix_iparith < 10",
@@ -752,12 +762,12 @@
"MetricThreshold": "tma_info_inst_mix_ipswpf < 100"
},
{
- "BriefDescription": "Instruction per taken branch",
+ "BriefDescription": "Instructions per taken branch",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
"MetricName": "tma_info_inst_mix_iptb",
"MetricThreshold": "tma_info_inst_mix_iptb < 9",
- "PublicDescription": "Instruction per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_lcp"
+ "PublicDescription": "Instructions per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_lcp"
},
{
"BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
@@ -790,7 +800,7 @@
"MetricName": "tma_info_memory_fb_hpki"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
"MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l1d_cache_fill_bw"
@@ -808,7 +818,7 @@
"MetricName": "tma_info_memory_l1mpki_load"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
"MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l2_cache_fill_bw"
@@ -844,13 +854,19 @@
"MetricName": "tma_info_memory_l2mpki_load"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Offcore requests (L2 cache miss) per kilo instruction for demand RFOs",
+ "MetricExpr": "1e3 * OFFCORE_REQUESTS.DEMAND_RFO / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Offcore",
+ "MetricName": "tma_info_memory_l2mpki_rfo"
+ },
+ {
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
"MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW;Offcore",
"MetricName": "tma_info_memory_l3_cache_access_bw"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
"MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l3_cache_fill_bw"
@@ -925,12 +941,24 @@
"MetricName": "tma_info_memory_tlb_store_stlb_mpki"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per core",
"MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@)",
"MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
"MetricName": "tma_info_pipeline_execute"
},
{
+ "BriefDescription": "Average number of uops fetched from DSB per cycle",
+ "MetricExpr": "IDQ.DSB_UOPS / IDQ.DSB_CYCLES_ANY",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "tma_info_pipeline_fetch_dsb"
+ },
+ {
+ "BriefDescription": "Average number of uops fetched from MITE per cycle",
+ "MetricExpr": "IDQ.MITE_UOPS / IDQ.MITE_CYCLES",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "tma_info_pipeline_fetch_mite"
+ },
+ {
"BriefDescription": "Instructions per a microcode Assist invocation",
"MetricExpr": "INST_RETIRED.ANY / (FP_ASSIST.ANY + OTHER_ASSISTS.ANY)",
"MetricGroup": "MicroSeq;Pipeline;Ret;Retire",
@@ -952,13 +980,13 @@
},
{
"BriefDescription": "Average CPU Utilization (percentage)",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "tma_info_system_cpus_utilized / #num_cpus_online",
"MetricGroup": "HPC;Summary",
"MetricName": "tma_info_system_cpu_utilization"
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "#num_cpus_online * tma_info_system_cpu_utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized"
},
@@ -1068,7 +1096,7 @@
"MetricThreshold": "tma_info_thread_uoppi > 1.05"
},
{
- "BriefDescription": "Instruction per taken branch",
+ "BriefDescription": "Uops per taken branch",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW",
"MetricName": "tma_info_thread_uptb",
@@ -1077,7 +1105,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
"MetricExpr": "ICACHE_TAG.STALLS / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_itlb_misses",
"MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS",
@@ -1093,10 +1121,19 @@
"ScaleUnit": "100%"
},
{
+ "BriefDescription": "This metric roughly estimates fraction of cycles with demand load accesses that hit the L1 cache",
+ "MetricExpr": "min(2 * (MEM_INST_RETIRED.ALL_LOADS - MEM_LOAD_RETIRED.FB_HIT - MEM_LOAD_RETIRED.L1_MISS) * 20 / 100, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
+ "MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_l1_hit_latency",
+ "MetricThreshold": "tma_l1_hit_latency > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles with demand load accesses that hit the L1 cache. The short latency of the L1 data cache may be exposed in pointer-chasing memory access patterns as an example. Sample with: MEM_LOAD_RETIRED.L1_HIT",
+ "ScaleUnit": "100%"
+ },
+ {
"BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) / (MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@) * ((CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks)",
- "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricGroup": "BvML;CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l2_bound",
"MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT_PS",
@@ -1114,7 +1151,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
"MetricExpr": "6.5 * tma_info_system_core_frequency * (MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2)) / tma_info_thread_clks",
- "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
+ "MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
"MetricName": "tma_l3_hit_latency",
"MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_info_bottleneck_cache_memory_latency, tma_mem_latency",
@@ -1126,7 +1163,7 @@
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_lcp",
"MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
- "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
"ScaleUnit": "100%"
},
{
@@ -1171,14 +1208,14 @@
"MetricGroup": "Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
"MetricName": "tma_lock_latency",
"MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
- "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS_PS. Related metrics: tma_store_latency",
+ "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS. Related metrics: tma_store_latency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_bad_speculation - tma_branch_mispredicts",
- "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
+ "MetricGroup": "BadSpec;BvMS;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
"MetricName": "tma_machine_clears",
"MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
@@ -1188,7 +1225,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_thread_clks",
- "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
"MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_sq_full",
@@ -1197,7 +1234,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM)",
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth",
- "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
+ "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
"MetricName": "tma_mem_latency",
"MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_info_bottleneck_cache_memory_latency, tma_l3_hit_latency",
@@ -1224,6 +1261,7 @@
},
{
"BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * IDQ.MS_UOPS / tma_info_thread_slots",
"MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS",
"MetricName": "tma_microcode_sequencer",
@@ -1234,7 +1272,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage",
"MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks",
- "MetricGroup": "BadSpec;BrMispredicts;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
+ "MetricGroup": "BadSpec;BrMispredicts;BvMP;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
"MetricName": "tma_mispredicts_resteers",
"MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost, tma_info_bottleneck_mispredictions",
@@ -1270,7 +1308,7 @@
{
"BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused",
"MetricExpr": "tma_light_operations * (BR_INST_RETIRED.ALL_BRANCHES - UOPS_RETIRED.MACRO_FUSED) / UOPS_RETIRED.RETIRE_SLOTS",
- "MetricGroup": "Branches;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_non_fused_branches",
"MetricThreshold": "tma_non_fused_branches > 0.1 & tma_light_operations > 0.6",
"PublicDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused.",
@@ -1279,7 +1317,7 @@
{
"BriefDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions",
"MetricExpr": "tma_light_operations * INST_RETIRED.NOP / UOPS_RETIRED.RETIRE_SLOTS",
- "MetricGroup": "Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
+ "MetricGroup": "BvBO;Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
"MetricName": "tma_nop_instructions",
"MetricThreshold": "tma_nop_instructions > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)",
"PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP",
@@ -1297,7 +1335,7 @@
{
"BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).",
"MetricExpr": "max(tma_branch_mispredicts * (1 - BR_MISP_RETIRED.ALL_BRANCHES / (INT_MISC.CLEARS_COUNT - MACHINE_CLEARS.COUNT)), 0.0001)",
- "MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
+ "MetricGroup": "BrMispredicts;BvIO;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_other_mispredicts",
"MetricThreshold": "tma_other_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
@@ -1305,7 +1343,7 @@
{
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.",
"MetricExpr": "max(tma_machine_clears * (1 - MACHINE_CLEARS.MEMORY_ORDERING / MACHINE_CLEARS.COUNT), 0.0001)",
- "MetricGroup": "Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group",
+ "MetricGroup": "BvIO;Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_other_nukes",
"MetricThreshold": "tma_other_nukes > 0.05 & (tma_machine_clears > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
@@ -1393,7 +1431,7 @@
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricExpr": "(EXE_ACTIVITY.EXE_BOUND_0_PORTS + tma_core_bound * RS_EVENTS.EMPTY_CYCLES) / tma_info_thread_clks * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) / tma_info_thread_clks",
+ "MetricExpr": "EXE_ACTIVITY.EXE_BOUND_0_PORTS / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_0",
"MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
@@ -1421,7 +1459,7 @@
{
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise).",
"MetricExpr": "(UOPS_EXECUTED.CORE_CYCLES_GE_3 / 2 if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_3) / tma_info_core_core_clks",
- "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricGroup": "BvCB;PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_3m",
"MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%"
@@ -1429,7 +1467,7 @@
{
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / tma_info_thread_slots",
- "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvUW;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_retiring",
"MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL1",
@@ -1439,7 +1477,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations",
"MetricExpr": "PARTIAL_RAT_STALLS.SCOREBOARD / tma_info_thread_clks",
- "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO",
+ "MetricGroup": "BvIO;PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO",
"MetricName": "tma_serializing_operation",
"MetricThreshold": "tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: PARTIAL_RAT_STALLS.SCOREBOARD. Related metrics: tma_ms_switches",
@@ -1467,7 +1505,7 @@
{
"BriefDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors)",
"MetricExpr": "(OFFCORE_REQUESTS_BUFFER.SQ_FULL / 2 if #SMT_on else OFFCORE_REQUESTS_BUFFER.SQ_FULL) / tma_info_core_core_clks",
- "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
+ "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
"MetricName": "tma_sq_full",
"MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth",
@@ -1495,7 +1533,7 @@
"BriefDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses",
"MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "(L2_RQSTS.RFO_HIT * 9 * (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) + (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_thread_clks",
- "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
+ "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
"MetricName": "tma_store_latency",
"MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
@@ -1528,7 +1566,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears",
"MetricExpr": "9 * BACLEARS.ANY / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
"MetricName": "tma_unknown_branches",
"MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: BACLEARS.ANY",
diff --git a/tools/perf/pmu-events/arch/x86/skylake/uncore-cache.json b/tools/perf/pmu-events/arch/x86/skylake/uncore-cache.json
index b4e061477c1a..46ba98ab3ba4 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/uncore-cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "L3 Lookup any request that access cache and found line in E or S-state",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.ANY_ES",
"PerPkg": "1",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "L3 Lookup any request that access cache and found line in I-state",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.ANY_I",
"PerPkg": "1",
@@ -19,6 +21,7 @@
},
{
"BriefDescription": "L3 Lookup any request that access cache and found line in M-state",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.ANY_M",
"PerPkg": "1",
@@ -28,6 +31,7 @@
},
{
"BriefDescription": "L3 Lookup any request that access cache and found line in MESI-state",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.ANY_MESI",
"PerPkg": "1",
@@ -37,6 +41,7 @@
},
{
"BriefDescription": "L3 Lookup read request that access cache and found line in E or S-state",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.READ_ES",
"PerPkg": "1",
@@ -46,6 +51,7 @@
},
{
"BriefDescription": "L3 Lookup read request that access cache and found line in I-state",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.READ_I",
"PerPkg": "1",
@@ -55,6 +61,7 @@
},
{
"BriefDescription": "L3 Lookup read request that access cache and found line in any MESI-state",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.READ_MESI",
"PerPkg": "1",
@@ -64,6 +71,7 @@
},
{
"BriefDescription": "L3 Lookup write request that access cache and found line in E or S-state",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_ES",
"PerPkg": "1",
@@ -73,6 +81,7 @@
},
{
"BriefDescription": "L3 Lookup write request that access cache and found line in M-state",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_M",
"PerPkg": "1",
@@ -82,6 +91,7 @@
},
{
"BriefDescription": "L3 Lookup write request that access cache and found line in MESI-state",
+ "Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_MESI",
"PerPkg": "1",
@@ -91,6 +101,7 @@
},
{
"BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a modified line in some processor core.",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_CBO_XSNP_RESPONSE.HITM_XCORE",
"PerPkg": "1",
@@ -99,6 +110,7 @@
},
{
"BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a non-modified line in some processor core.",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_CBO_XSNP_RESPONSE.HIT_XCORE",
"PerPkg": "1",
@@ -107,6 +119,7 @@
},
{
"BriefDescription": "A cross-core snoop resulted from L3 Eviction which misses in some processor core.",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_CBO_XSNP_RESPONSE.MISS_EVICTION",
"PerPkg": "1",
@@ -115,10 +128,20 @@
},
{
"BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which misses in some processor core.",
+ "Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_CBO_XSNP_RESPONSE.MISS_XCORE",
"PerPkg": "1",
"UMask": "0x41",
"Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "This 48-bit fixed counter counts the UCLK cycles",
+ "Counter": "FIXED",
+ "EventCode": "0xff",
+ "EventName": "UNC_CLOCK.SOCKET",
+ "PerPkg": "1",
+ "PublicDescription": "This 48-bit fixed counter counts the UCLK cycles.",
+ "Unit": "cbox_0"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/skylake/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/skylake/uncore-interconnect.json
index fe7e19717371..6b9fbed5847c 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/uncore-interconnect.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Number of entries allocated. Account for Any type: e.g. Snoop, Core aperture, etc.",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_ARB_COH_TRK_REQUESTS.ALL",
"PerPkg": "1",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Number of all Core entries outstanding for the memory controller. The outstanding interval starts after LLC miss till return of first data chunk. Accounts for Coherent and non-coherent traffic.",
+ "Counter": "0",
"EventCode": "0x80",
"EventName": "UNC_ARB_TRK_OCCUPANCY.ALL",
"PerPkg": "1",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Cycles with at least one request outstanding is waiting for data return from memory controller. Account for coherent and non-coherent requests initiated by IA Cores, Processor Graphics Unit, or LLC.",
+ "Counter": "0",
"CounterMask": "1",
"EventCode": "0x80",
"EventName": "UNC_ARB_TRK_OCCUPANCY.CYCLES_WITH_ANY_REQUEST",
@@ -26,6 +29,7 @@
},
{
"BriefDescription": "Number of Core Data Read entries outstanding for the memory controller. The outstanding interval starts after LLC miss till return of first data chunk.",
+ "Counter": "0",
"EventCode": "0x80",
"EventName": "UNC_ARB_TRK_OCCUPANCY.DATA_READ",
"PerPkg": "1",
@@ -34,6 +38,7 @@
},
{
"BriefDescription": "UNC_ARB_TRK_REQUESTS.ALL",
+ "Counter": "0,1",
"EventCode": "0x81",
"EventName": "UNC_ARB_TRK_REQUESTS.ALL",
"PerPkg": "1",
@@ -42,6 +47,7 @@
},
{
"BriefDescription": "Number of Core coherent Data Read requests sent to memory controller whose data is returned directly to requesting agent.",
+ "Counter": "0,1",
"EventCode": "0x81",
"EventName": "UNC_ARB_TRK_REQUESTS.DATA_READ",
"PerPkg": "1",
@@ -50,6 +56,7 @@
},
{
"BriefDescription": "Number of Core coherent Data Read requests sent to memory controller whose data is returned directly to requesting agent.",
+ "Counter": "0,1",
"EventCode": "0x81",
"EventName": "UNC_ARB_TRK_REQUESTS.DRD_DIRECT",
"PerPkg": "1",
@@ -58,6 +65,7 @@
},
{
"BriefDescription": "Number of Writes allocated - any write transactions: full/partials writes and evictions.",
+ "Counter": "0,1",
"EventCode": "0x81",
"EventName": "UNC_ARB_TRK_REQUESTS.WRITES",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/skylake/uncore-other.json b/tools/perf/pmu-events/arch/x86/skylake/uncore-other.json
deleted file mode 100644
index 58be90d7cc93..000000000000
--- a/tools/perf/pmu-events/arch/x86/skylake/uncore-other.json
+++ /dev/null
@@ -1,10 +0,0 @@
-[
- {
- "BriefDescription": "This 48-bit fixed counter counts the UCLK cycles",
- "EventCode": "0xff",
- "EventName": "UNC_CLOCK.SOCKET",
- "PerPkg": "1",
- "PublicDescription": "This 48-bit fixed counter counts the UCLK cycles.",
- "Unit": "CLOCK"
- }
-]
diff --git a/tools/perf/pmu-events/arch/x86/skylake/virtual-memory.json b/tools/perf/pmu-events/arch/x86/skylake/virtual-memory.json
index 73feadaf7674..ad33fff57c03 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/virtual-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Load misses in all DTLB levels that cause page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "Counts demand data loads that caused a page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels, but the walk need not have completed.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Loads that miss the DTLB and hit the STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for a load. EPT page walk duration are excluded in Skylake.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
@@ -26,6 +29,7 @@
},
{
"BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (all page sizes) caused by demand data loads. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -34,6 +38,7 @@
},
{
"BriefDescription": "Page walk completed due to a demand data load to a 1G page",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
"PublicDescription": "Counts completed page walks (1G sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -42,6 +47,7 @@
},
{
"BriefDescription": "Page walk completed due to a demand data load to a 2M/4M page",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -50,6 +56,7 @@
},
{
"BriefDescription": "Page walk completed due to a demand data load to a 4K page",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts completed page walks (4K sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -58,6 +65,7 @@
},
{
"BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
"PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake microarchitecture.",
@@ -66,6 +74,7 @@
},
{
"BriefDescription": "Store misses in all DTLB levels that cause page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "Counts demand data stores that caused a page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels, but the walk need not have completed.",
@@ -74,6 +83,7 @@
},
{
"BriefDescription": "Stores that miss the DTLB and hit the STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.STLB_HIT",
"PublicDescription": "Stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
@@ -82,6 +92,7 @@
},
{
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store. EPT page walk duration are excluded in Skylake.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
@@ -91,6 +102,7 @@
},
{
"BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (all page sizes) caused by demand data stores. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -99,6 +111,7 @@
},
{
"BriefDescription": "Page walk completed due to a demand data store to a 1G page",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
"PublicDescription": "Counts completed page walks (1G sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -107,6 +120,7 @@
},
{
"BriefDescription": "Page walk completed due to a demand data store to a 2M/4M page",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -115,6 +129,7 @@
},
{
"BriefDescription": "Page walk completed due to a demand data store to a 4K page",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts completed page walks (4K sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -123,6 +138,7 @@
},
{
"BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_PENDING",
"PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake microarchitecture.",
@@ -131,6 +147,7 @@
},
{
"BriefDescription": "Counts 1 per cycle for each PMH that is busy with a EPT (Extended Page Table) walk for any request type.",
+ "Counter": "0,1,2,3",
"EventCode": "0x4f",
"EventName": "EPT.WALK_PENDING",
"PublicDescription": "Counts cycles for each PMH (Page Miss Handler) that is busy with an EPT (Extended Page Table) walk for any request type.",
@@ -139,6 +156,7 @@
},
{
"BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAE",
"EventName": "ITLB.ITLB_FLUSH",
"PublicDescription": "Counts the number of flushes of the big or small ITLB pages. Counting include both TLB Flush (covering all sets) and TLB Set Clear (set-specific).",
@@ -147,6 +165,7 @@
},
{
"BriefDescription": "Misses at all ITLB levels that cause page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "Counts page walks of any page size (4K/2M/4M/1G) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB, but the walk need not have completed.",
@@ -155,6 +174,7 @@
},
{
"BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.STLB_HIT",
"SampleAfterValue": "100003",
@@ -162,6 +182,7 @@
},
{
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request. EPT page walk duration are excluded in Skylake.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_ACTIVE",
@@ -171,6 +192,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (all page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
@@ -179,6 +201,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (1G)",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
"PublicDescription": "Counts completed page walks (1G page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
@@ -187,6 +210,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts completed page walks (2M/4M page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
@@ -195,6 +219,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts completed page walks (4K page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
@@ -203,6 +228,7 @@
},
{
"BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake.",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_PENDING",
"PublicDescription": "Counts 1 per cycle for each PMH (Page Miss Handler) that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake microarchitecture.",
@@ -211,6 +237,7 @@
},
{
"BriefDescription": "DTLB flush attempts of the thread-specific entries",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "TLB_FLUSH.DTLB_THREAD",
"PublicDescription": "Counts the number of DTLB flush attempts of the thread-specific entries.",
@@ -219,6 +246,7 @@
},
{
"BriefDescription": "STLB flush attempts",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "TLB_FLUSH.STLB_ANY",
"PublicDescription": "Counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, etc.).",
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/cache.json b/tools/perf/pmu-events/arch/x86/skylakex/cache.json
index 14229f4b29d8..2ce070629c52 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/cache.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "L1D data line replacements",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "L1D.REPLACEMENT",
"PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Number of times a request needed a FB entry but there was no entry available for it. That is the FB unavailability was dominant reason for blocking the request. A request includes cacheable/uncacheable demands that is load, store or SW prefetch.",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.FB_FULL",
"PublicDescription": "Number of times a request needed a FB (Fill Buffer) entry but there was no entry available for it. A request includes cacheable/uncacheable demands that are load, store or SW prefetch instructions.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "L1D miss outstandings duration in cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING",
"PublicDescription": "Counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch.Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING_CYCLES",
@@ -35,6 +39,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
@@ -43,6 +48,7 @@
},
{
"BriefDescription": "L2 cache lines filling L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.ALL",
"PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
@@ -51,6 +57,7 @@
},
{
"BriefDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines can be either in modified state or clean state. Modified lines may either be written back to L3 or directly written to memory and not allocated in L3. Clean lines may either be allocated in L3 or dropped",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.NON_SILENT",
"PublicDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines can be either in modified state or clean state. Modified lines may either be written back to L3 or directly written to memory and not allocated in L3. Clean lines may either be allocated in L3 or dropped.",
@@ -59,6 +66,7 @@
},
{
"BriefDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared state. A non-threaded event.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.SILENT",
"SampleAfterValue": "200003",
@@ -66,6 +74,7 @@
},
{
"BriefDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.USELESS_HWPF",
"SampleAfterValue": "200003",
@@ -73,6 +82,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event L2_LINES_OUT.USELESS_HWPF",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.USELESS_PREF",
@@ -81,6 +91,7 @@
},
{
"BriefDescription": "L2 code requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_CODE_RD",
"PublicDescription": "Counts the total number of L2 code requests.",
@@ -89,6 +100,7 @@
},
{
"BriefDescription": "Demand Data Read requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
"PublicDescription": "Counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
@@ -97,6 +109,7 @@
},
{
"BriefDescription": "Demand requests that miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_MISS",
"PublicDescription": "Demand requests that miss L2 cache.",
@@ -105,6 +118,7 @@
},
{
"BriefDescription": "Demand requests to L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
"PublicDescription": "Demand requests to L2 cache.",
@@ -113,6 +127,7 @@
},
{
"BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_PF",
"PublicDescription": "Counts the total number of requests from the L2 hardware prefetchers.",
@@ -121,6 +136,7 @@
},
{
"BriefDescription": "RFO requests to L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_RFO",
"PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
@@ -129,6 +145,7 @@
},
{
"BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_HIT",
"PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
@@ -137,6 +154,7 @@
},
{
"BriefDescription": "L2 cache misses when fetching instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_MISS",
"PublicDescription": "Counts L2 cache misses when fetching instructions.",
@@ -145,6 +163,7 @@
},
{
"BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
"PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache",
@@ -153,6 +172,7 @@
},
{
"BriefDescription": "Demand Data Read miss L2, no rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
"PublicDescription": "Counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
@@ -161,6 +181,7 @@
},
{
"BriefDescription": "All requests that miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.MISS",
"PublicDescription": "All requests that miss L2 cache.",
@@ -169,6 +190,7 @@
},
{
"BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.PF_HIT",
"PublicDescription": "Counts requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that hit L2 cache.",
@@ -177,6 +199,7 @@
},
{
"BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.PF_MISS",
"PublicDescription": "Counts requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that miss L2 cache.",
@@ -185,6 +208,7 @@
},
{
"BriefDescription": "All L2 requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.REFERENCES",
"PublicDescription": "All L2 requests.",
@@ -193,6 +217,7 @@
},
{
"BriefDescription": "RFO requests that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_HIT",
"PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
@@ -201,6 +226,7 @@
},
{
"BriefDescription": "RFO requests that miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_MISS",
"PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
@@ -209,6 +235,7 @@
},
{
"BriefDescription": "L2 writebacks that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANS.L2_WB",
"PublicDescription": "Counts L2 writebacks that access L2 cache.",
@@ -217,6 +244,7 @@
},
{
"BriefDescription": "Core-originated cacheable demand requests missed L3",
+ "Counter": "0,1,2,3",
"Errata": "SKL057",
"EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.MISS",
@@ -226,6 +254,7 @@
},
{
"BriefDescription": "Core-originated cacheable demand requests that refer to L3",
+ "Counter": "0,1,2,3",
"Errata": "SKL057",
"EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
@@ -235,6 +264,7 @@
},
{
"BriefDescription": "Retired load instructions.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_INST_RETIRED.ALL_LOADS",
@@ -245,6 +275,7 @@
},
{
"BriefDescription": "Retired store instructions.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_INST_RETIRED.ALL_STORES",
@@ -255,6 +286,7 @@
},
{
"BriefDescription": "All retired memory instructions.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_INST_RETIRED.ANY",
@@ -265,6 +297,7 @@
},
{
"BriefDescription": "Retired load instructions with locked access.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_INST_RETIRED.LOCK_LOADS",
@@ -274,6 +307,7 @@
},
{
"BriefDescription": "Retired load instructions that split across a cacheline boundary.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
@@ -284,6 +318,7 @@
},
{
"BriefDescription": "Retired store instructions that split across a cacheline boundary.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_INST_RETIRED.SPLIT_STORES",
@@ -294,6 +329,7 @@
},
{
"BriefDescription": "Retired load instructions that miss the STLB.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
@@ -304,6 +340,7 @@
},
{
"BriefDescription": "Retired store instructions that miss the STLB.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD0",
"EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
@@ -314,6 +351,7 @@
},
{
"BriefDescription": "Retired load instructions which data sources were L3 and cross-core snoop hits in on-pkg core cache",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT",
@@ -324,6 +362,7 @@
},
{
"BriefDescription": "Retired load instructions which data sources were HitM responses from shared L3",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM",
@@ -334,6 +373,7 @@
},
{
"BriefDescription": "Retired load instructions which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
@@ -343,6 +383,7 @@
},
{
"BriefDescription": "Retired load instructions which data sources were hits in L3 without snoops required",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
@@ -353,6 +394,7 @@
},
{
"BriefDescription": "Retired load instructions which data sources missed L3 but serviced from local dram",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
@@ -363,6 +405,7 @@
},
{
"BriefDescription": "Retired load instructions which data sources missed L3 but serviced from remote dram",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM",
@@ -372,6 +415,7 @@
},
{
"BriefDescription": "Retired load instructions whose data sources was forwarded from a remote cache",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD",
@@ -382,6 +426,7 @@
},
{
"BriefDescription": "Retired load instructions whose data sources was remote HITM",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM",
@@ -392,6 +437,7 @@
},
{
"BriefDescription": "Retired instructions with at least 1 uncacheable load or lock.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD4",
"EventName": "MEM_LOAD_MISC_RETIRED.UC",
@@ -401,6 +447,7 @@
},
{
"BriefDescription": "Retired load instructions which data sources were load missed L1 but hit FB due to preceding miss to the same cache line with data not ready",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_RETIRED.FB_HIT",
@@ -411,6 +458,7 @@
},
{
"BriefDescription": "Retired load instructions with L1 cache hits as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_RETIRED.L1_HIT",
@@ -421,6 +469,7 @@
},
{
"BriefDescription": "Retired load instructions missed L1 cache as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_RETIRED.L1_MISS",
@@ -431,6 +480,7 @@
},
{
"BriefDescription": "Retired load instructions with L2 cache hits as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_RETIRED.L2_HIT",
@@ -441,6 +491,7 @@
},
{
"BriefDescription": "Retired load instructions missed L2 cache as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_RETIRED.L2_MISS",
@@ -451,6 +502,7 @@
},
{
"BriefDescription": "Retired load instructions with L3 cache hits as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_RETIRED.L3_HIT",
@@ -461,6 +513,7 @@
},
{
"BriefDescription": "Retired load instructions missed L3 cache as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xD1",
"EventName": "MEM_LOAD_RETIRED.L3_MISS",
@@ -471,6 +524,7 @@
},
{
"BriefDescription": "Demand and prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
"PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
@@ -479,6 +533,7 @@
},
{
"BriefDescription": "Any memory transaction that reached the SQ.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
"PublicDescription": "Counts memory transactions reached the super queue including requests initiated by the core, all L3 prefetches, page walks, etc..",
@@ -487,6 +542,7 @@
},
{
"BriefDescription": "Cacheable and non-cacheable code read requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
"PublicDescription": "Counts both cacheable and non-cacheable code read requests.",
@@ -495,6 +551,7 @@
},
{
"BriefDescription": "Demand Data Read requests sent to uncore",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
"PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
@@ -503,6 +560,7 @@
},
{
"BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
"PublicDescription": "Counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
@@ -511,6 +569,7 @@
},
{
"BriefDescription": "Offcore requests buffer cannot take more entries for this thread core.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
"PublicDescription": "Counts the number of cases when the offcore requests buffer cannot take more entries for the core. This can happen when the superqueue does not contain eligible entries, or when L1D writeback pending FIFO requests is full.Note: Writeback pending FIFO has six entries.",
@@ -519,6 +578,7 @@
},
{
"BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
"PublicDescription": "Counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
@@ -527,6 +587,7 @@
},
{
"BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
@@ -536,6 +597,7 @@
},
{
"BriefDescription": "Cycles with offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
@@ -545,6 +607,7 @@
},
{
"BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
@@ -554,6 +617,7 @@
},
{
"BriefDescription": "Cycles with offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
@@ -563,6 +627,7 @@
},
{
"BriefDescription": "Offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore, every cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
"PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
@@ -571,6 +636,7 @@
},
{
"BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
"PublicDescription": "Counts the number of offcore outstanding Demand Data Read transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor. See the corresponding Umask under OFFCORE_REQUESTS.Note: A prefetch promoted to Demand is counted from the promotion point.",
@@ -579,6 +645,7 @@
},
{
"BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
@@ -587,6 +654,7 @@
},
{
"BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
"PublicDescription": "Counts the number of offcore outstanding RFO (store) transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
@@ -595,6 +663,7 @@
},
{
"BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
@@ -603,6 +672,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads that have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -612,6 +682,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads that hit in the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -621,6 +692,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -630,6 +702,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -639,6 +712,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -648,6 +722,7 @@
},
{
"BriefDescription": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -657,6 +732,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads that have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -666,6 +742,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads that hit in the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -675,6 +752,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -684,6 +762,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -693,6 +772,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -702,6 +782,7 @@
},
{
"BriefDescription": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -711,6 +792,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs that have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -720,6 +802,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs that hit in the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -729,6 +812,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -738,6 +822,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -747,6 +832,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -756,6 +842,7 @@
},
{
"BriefDescription": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -765,6 +852,7 @@
},
{
"BriefDescription": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.HIT_OTHER_CORE_FWD hit in the L3 and the snoop to one of the sibling cores hits the line in E/S/F state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -774,6 +862,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs that have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -783,6 +872,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -792,6 +882,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -801,6 +892,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -810,6 +902,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -819,6 +912,7 @@
},
{
"BriefDescription": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -828,6 +922,7 @@
},
{
"BriefDescription": "Counts all demand code reads that have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -837,6 +932,7 @@
},
{
"BriefDescription": "Counts all demand code reads that hit in the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -846,6 +942,7 @@
},
{
"BriefDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -855,6 +952,7 @@
},
{
"BriefDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -864,6 +962,7 @@
},
{
"BriefDescription": "Counts all demand code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -873,6 +972,7 @@
},
{
"BriefDescription": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -882,6 +982,7 @@
},
{
"BriefDescription": "Counts demand data reads that have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -891,6 +992,7 @@
},
{
"BriefDescription": "Counts demand data reads that hit in the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -900,6 +1002,7 @@
},
{
"BriefDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -909,6 +1012,7 @@
},
{
"BriefDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -918,6 +1022,7 @@
},
{
"BriefDescription": "Counts demand data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -927,6 +1032,7 @@
},
{
"BriefDescription": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -936,6 +1042,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) that have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -945,6 +1052,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -954,6 +1062,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -963,6 +1072,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -972,6 +1082,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -981,6 +1092,7 @@
},
{
"BriefDescription": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -990,6 +1102,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -999,6 +1112,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1008,6 +1122,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1017,6 +1132,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1026,6 +1142,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1035,6 +1152,7 @@
},
{
"BriefDescription": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1044,6 +1162,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1053,6 +1172,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1062,6 +1182,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1071,6 +1192,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1080,6 +1202,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1089,6 +1212,7 @@
},
{
"BriefDescription": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1098,6 +1222,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1107,6 +1232,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1116,6 +1242,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1125,6 +1252,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1134,6 +1262,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1143,6 +1272,7 @@
},
{
"BriefDescription": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1152,6 +1282,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1161,6 +1292,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1170,6 +1302,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1179,6 +1312,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1188,6 +1322,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1197,6 +1332,7 @@
},
{
"BriefDescription": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1206,6 +1342,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that have any response type.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1215,6 +1352,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -1224,6 +1362,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1233,6 +1372,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1242,6 +1382,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -1251,6 +1392,7 @@
},
{
"BriefDescription": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1260,6 +1402,7 @@
},
{
"BriefDescription": "Number of cache line split locks sent to uncore.",
+ "Counter": "0,1,2,3",
"EventCode": "0xF4",
"EventName": "SQ_MISC.SPLIT_LOCK",
"PublicDescription": "Counts the number of cache line split locks sent to the uncore.",
@@ -1267,7 +1410,16 @@
"UMask": "0x10"
},
{
+ "BriefDescription": "Counts the number of PREFETCHNTA, PREFETCHW, PREFETCHT0, PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.ANY",
+ "SampleAfterValue": "2000003",
+ "UMask": "0xf"
+ },
+ {
"BriefDescription": "Number of PREFETCHNTA instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "SW_PREFETCH_ACCESS.NTA",
"SampleAfterValue": "2000003",
@@ -1275,6 +1427,7 @@
},
{
"BriefDescription": "Number of PREFETCHW instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
"SampleAfterValue": "2000003",
@@ -1282,6 +1435,7 @@
},
{
"BriefDescription": "Number of PREFETCHT0 instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "SW_PREFETCH_ACCESS.T0",
"SampleAfterValue": "2000003",
@@ -1289,6 +1443,7 @@
},
{
"BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "SW_PREFETCH_ACCESS.T1_T2",
"SampleAfterValue": "2000003",
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/counter.json b/tools/perf/pmu-events/arch/x86/skylakex/counter.json
new file mode 100644
index 000000000000..e94b76404856
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/skylakex/counter.json
@@ -0,0 +1,52 @@
+[
+ {
+ "Unit": "core",
+ "CountersNumFixed": "3",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "CHA",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "IIO",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "IRP",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "2"
+ },
+ {
+ "Unit": "UPI",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "M2M",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "iMC",
+ "CountersNumFixed": "1",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "M3UPI",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "3"
+ },
+ {
+ "Unit": "PCU",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "UBOX",
+ "CountersNumFixed": "1",
+ "CountersNumGeneric": "2"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/floating-point.json b/tools/perf/pmu-events/arch/x86/skylakex/floating-point.json
index 384b3c551a1f..25a864613c7d 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/floating-point.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts once for most SIMD 128-bit packed computational double precision floating-point instructions retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
"PublicDescription": "Counts once for most SIMD 128-bit packed computational double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Counts once for most SIMD 128-bit packed computational single precision floating-point instruction retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
"PublicDescription": "Counts once for most SIMD 128-bit packed computational single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Counts once for most SIMD 256-bit packed double computational precision floating-point instructions retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
"PublicDescription": "Counts once for most SIMD 256-bit packed double computational precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Counts once for most SIMD 256-bit packed single computational precision floating-point instructions retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
"PublicDescription": "Counts once for most SIMD 256-bit packed single computational precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 128-bit packed single and 256-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and packed double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.4_FLOPS",
"PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision and 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point and packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -49,6 +55,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE",
"PublicDescription": "Number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -57,6 +64,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision and 512-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RSQRT14 RCP RCP14 DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.8_FLOPS",
"PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision and 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision and double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RSQRT14 RCP RCP14 DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -65,6 +73,7 @@
},
{
"BriefDescription": "Counts once for most SIMD scalar computational floating-point instructions retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR",
"PublicDescription": "Counts once for most SIMD scalar computational single precision and double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SIMD scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -73,6 +82,7 @@
},
{
"BriefDescription": "Counts once for most SIMD scalar computational double precision floating-point instructions retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
"PublicDescription": "Counts once for most SIMD scalar computational double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SIMD scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -81,6 +91,7 @@
},
{
"BriefDescription": "Counts once for most SIMD scalar computational single precision floating-point instructions retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
"PublicDescription": "Counts once for most SIMD scalar computational single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SIMD scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -89,6 +100,7 @@
},
{
"BriefDescription": "Number of any Vector retired FP arithmetic instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.VECTOR",
"SampleAfterValue": "2000003",
@@ -96,6 +108,7 @@
},
{
"BriefDescription": "Cycles with any input/output SSE or FP assist",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.ANY",
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/frontend.json b/tools/perf/pmu-events/arch/x86/skylakex/frontend.json
index d6f543471b24..0e1dedce00f2 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/frontend.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "Counter": "0,1,2,3",
"EventCode": "0xE6",
"EventName": "BACLEARS.ANY",
"PublicDescription": "Counts the number of times the front-end is resteered when it finds a branch instruction in a fetch line. This occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Stalls caused by changing prefix length of the instruction. [This event is alias to ILD_STALL.LCP]",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "DECODE.LCP",
"PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk. [This event is alias to ILD_STALL.LCP]",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switches",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "DSB2MITE_SWITCHES.COUNT",
"PublicDescription": "This event counts the number of the Decode Stream Buffer (DSB)-to-MITE switches including all misses because of missing Decode Stream Buffer (DSB) cache and u-arch forced misses. Note: Invoking MITE requires two or three cycles delay.",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
"PublicDescription": "Counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. MM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.Penalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced DSB miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.ANY_DSB_MISS",
"MSRIndex": "0x3F7",
@@ -44,6 +49,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced a critical DSB miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.DSB_MISS",
"MSRIndex": "0x3F7",
@@ -55,6 +61,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced iTLB true miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.ITLB_MISS",
"MSRIndex": "0x3F7",
@@ -66,6 +73,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.L1I_MISS",
"MSRIndex": "0x3F7",
@@ -76,6 +84,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.L2_MISS",
"MSRIndex": "0x3F7",
@@ -86,6 +95,7 @@
},
{
"BriefDescription": "Retired instructions after front-end starvation of at least 1 cycle",
+ "Counter": "0,1,2,3",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_1",
"MSRIndex": "0x3F7",
@@ -97,6 +107,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
"MSRIndex": "0x3F7",
@@ -107,6 +118,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
"MSRIndex": "0x3F7",
@@ -118,6 +130,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
"MSRIndex": "0x3F7",
@@ -128,6 +141,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
"MSRIndex": "0x3F7",
@@ -138,6 +152,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
"MSRIndex": "0x3F7",
@@ -149,6 +164,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 2 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_2",
"MSRIndex": "0x3F7",
@@ -159,6 +175,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 3 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_3",
"MSRIndex": "0x3F7",
@@ -169,6 +186,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
"MSRIndex": "0x3F7",
@@ -180,6 +198,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
"MSRIndex": "0x3F7",
@@ -190,6 +209,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
"MSRIndex": "0x3F7",
@@ -200,6 +220,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
"MSRIndex": "0x3F7",
@@ -210,6 +231,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
"MSRIndex": "0x3F7",
@@ -221,6 +243,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.STLB_MISS",
"MSRIndex": "0x3F7",
@@ -232,6 +255,7 @@
},
{
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE_16B.IFDATA_STALL",
"PublicDescription": "Cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity.",
@@ -240,6 +264,7 @@
},
{
"BriefDescription": "Instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "ICACHE_64B.IFTAG_HIT",
"SampleAfterValue": "200003",
@@ -247,6 +272,7 @@
},
{
"BriefDescription": "Instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "ICACHE_64B.IFTAG_MISS",
"SampleAfterValue": "200003",
@@ -254,6 +280,7 @@
},
{
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_TAG.STALLS]",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "ICACHE_64B.IFTAG_STALL",
"SampleAfterValue": "200003",
@@ -261,6 +288,7 @@
},
{
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_64B.IFTAG_STALL]",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "ICACHE_TAG.STALLS",
"SampleAfterValue": "200003",
@@ -268,6 +296,7 @@
},
{
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 or more Uops [This event is alias to IDQ.DSB_CYCLES_OK]",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0x79",
"EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
@@ -277,6 +306,7 @@
},
{
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop [This event is alias to IDQ.DSB_CYCLES_ANY]",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
@@ -286,6 +316,7 @@
},
{
"BriefDescription": "Cycles MITE is delivering 4 Uops",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0x79",
"EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
@@ -295,6 +326,7 @@
},
{
"BriefDescription": "Cycles MITE is delivering any Uop",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
@@ -304,6 +336,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.DSB_CYCLES",
@@ -313,6 +346,7 @@
},
{
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop [This event is alias to IDQ.ALL_DSB_CYCLES_ANY_UOPS]",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.DSB_CYCLES_ANY",
@@ -322,6 +356,7 @@
},
{
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 or more Uops [This event is alias to IDQ.ALL_DSB_CYCLES_4_UOPS]",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0x79",
"EventName": "IDQ.DSB_CYCLES_OK",
@@ -331,6 +366,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.DSB_UOPS",
"PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
@@ -339,6 +375,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MITE_CYCLES",
@@ -348,6 +385,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MITE_UOPS",
"PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
@@ -356,6 +394,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MS_CYCLES",
@@ -365,6 +404,7 @@
},
{
"BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MS_DSB_CYCLES",
@@ -374,6 +414,7 @@
},
{
"BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_MITE_UOPS",
"PublicDescription": "Counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
@@ -382,6 +423,7 @@
},
{
"BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x79",
@@ -392,6 +434,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_UOPS",
"PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS). Any instruction over 4 uops will be delivered by the MS. Some instructions such as transcendentals may additionally generate uops from the MS.",
@@ -400,6 +443,7 @@
},
{
"BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
"PublicDescription": "Counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4 x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread. b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions). c. Instruction Decode Queue (IDQ) delivers four uops.",
@@ -408,6 +452,7 @@
},
{
"BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
@@ -417,6 +462,7 @@
},
{
"BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
@@ -426,6 +472,7 @@
},
{
"BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+ "Counter": "0,1,2,3",
"CounterMask": "3",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
@@ -435,6 +482,7 @@
},
{
"BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
@@ -444,6 +492,7 @@
},
{
"BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/memory.json b/tools/perf/pmu-events/arch/x86/skylakex/memory.json
index dba3cd6b3690..9ee7a9d44fd2 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/memory.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Cycles while L3 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L3_MISS",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one).",
+ "Counter": "0,1,2,3",
"EventCode": "0xC8",
"EventName": "HLE_RETIRED.ABORTED",
"PEBS": "1",
@@ -26,6 +29,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to unfriendly events (such as interrupts).",
+ "Counter": "0,1,2,3",
"EventCode": "0xC8",
"EventName": "HLE_RETIRED.ABORTED_EVENTS",
"SampleAfterValue": "2000003",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "Counter": "0,1,2,3",
"EventCode": "0xC8",
"EventName": "HLE_RETIRED.ABORTED_MEM",
"SampleAfterValue": "2000003",
@@ -40,6 +45,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to incompatible memory type",
+ "Counter": "0,1,2,3",
"EventCode": "0xC8",
"EventName": "HLE_RETIRED.ABORTED_MEMTYPE",
"PublicDescription": "Number of times an HLE execution aborted due to incompatible memory type.",
@@ -48,6 +54,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to hardware timer expiration.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC8",
"EventName": "HLE_RETIRED.ABORTED_TIMER",
"SampleAfterValue": "2000003",
@@ -55,6 +62,7 @@
},
{
"BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
+ "Counter": "0,1,2,3",
"EventCode": "0xC8",
"EventName": "HLE_RETIRED.ABORTED_UNFRIENDLY",
"SampleAfterValue": "2000003",
@@ -62,6 +70,7 @@
},
{
"BriefDescription": "Number of times an HLE execution successfully committed",
+ "Counter": "0,1,2,3",
"EventCode": "0xC8",
"EventName": "HLE_RETIRED.COMMIT",
"PublicDescription": "Number of times HLE commit succeeded.",
@@ -70,6 +79,7 @@
},
{
"BriefDescription": "Number of times an HLE execution started.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC8",
"EventName": "HLE_RETIRED.START",
"PublicDescription": "Number of times we entered an HLE region. Does not count nested transactions.",
@@ -78,6 +88,7 @@
},
{
"BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
+ "Counter": "0,1,2,3",
"Errata": "SKL089",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
@@ -87,6 +98,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
@@ -99,6 +111,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
@@ -111,6 +124,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
@@ -123,6 +137,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
@@ -135,6 +150,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
@@ -147,6 +163,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
@@ -159,6 +176,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
@@ -171,6 +189,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
@@ -183,6 +202,7 @@
},
{
"BriefDescription": "Demand Data Read requests who miss L3 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
"PublicDescription": "Demand Data Read requests who miss L3 cache.",
@@ -191,6 +211,7 @@
},
{
"BriefDescription": "Cycles with at least 1 Demand Data Read requests who miss L3 cache in the superQ.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD",
@@ -199,6 +220,7 @@
},
{
"BriefDescription": "Counts number of Offcore outstanding Demand Data Read requests that miss L3 cache in the superQ every cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD",
"SampleAfterValue": "2000003",
@@ -206,6 +228,7 @@
},
{
"BriefDescription": "Cycles with at least 6 Demand Data Read requests that miss L3 cache in the superQ.",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD_GE_6",
@@ -214,6 +237,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads that miss in the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -223,6 +247,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the modified data is transferred from remote cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -232,6 +257,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
@@ -241,6 +267,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local or remote dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -250,6 +277,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -259,6 +287,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from remote dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -268,6 +297,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads that miss in the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -277,6 +307,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads that miss the L3 and the modified data is transferred from remote cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -286,6 +317,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
@@ -295,6 +327,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from local or remote dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -304,6 +337,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from local dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -313,6 +347,7 @@
},
{
"BriefDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from remote dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -322,6 +357,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs that miss in the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -331,6 +367,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs that miss the L3 and the modified data is transferred from remote cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -340,6 +377,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs that miss the L3 and clean or shared data is transferred from remote cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
@@ -349,6 +387,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from local or remote dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -358,6 +397,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from local dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -367,6 +407,7 @@
},
{
"BriefDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from remote dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -376,6 +417,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs that miss in the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -385,6 +427,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and the modified data is transferred from remote cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -394,6 +437,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and clean or shared data is transferred from remote cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
@@ -403,6 +447,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local or remote dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -412,6 +457,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -421,6 +467,7 @@
},
{
"BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from remote dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -430,6 +477,7 @@
},
{
"BriefDescription": "Counts all demand code reads that miss in the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -439,6 +487,7 @@
},
{
"BriefDescription": "Counts all demand code reads that miss the L3 and the modified data is transferred from remote cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -448,6 +497,7 @@
},
{
"BriefDescription": "Counts all demand code reads that miss the L3 and clean or shared data is transferred from remote cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
@@ -457,6 +507,7 @@
},
{
"BriefDescription": "Counts all demand code reads that miss the L3 and the data is returned from local or remote dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -466,6 +517,7 @@
},
{
"BriefDescription": "Counts all demand code reads that miss the L3 and the data is returned from local dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -475,6 +527,7 @@
},
{
"BriefDescription": "Counts all demand code reads that miss the L3 and the data is returned from remote dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -484,6 +537,7 @@
},
{
"BriefDescription": "Counts demand data reads that miss in the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -493,6 +547,7 @@
},
{
"BriefDescription": "Counts demand data reads that miss the L3 and the modified data is transferred from remote cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -502,6 +557,7 @@
},
{
"BriefDescription": "Counts demand data reads that miss the L3 and clean or shared data is transferred from remote cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
@@ -511,6 +567,7 @@
},
{
"BriefDescription": "Counts demand data reads that miss the L3 and the data is returned from local or remote dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -520,6 +577,7 @@
},
{
"BriefDescription": "Counts demand data reads that miss the L3 and the data is returned from local dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -529,6 +587,7 @@
},
{
"BriefDescription": "Counts demand data reads that miss the L3 and the data is returned from remote dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -538,6 +597,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) that miss in the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -547,6 +607,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the modified data is transferred from remote cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -556,6 +617,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and clean or shared data is transferred from remote cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
@@ -565,6 +627,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local or remote dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -574,6 +637,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -583,6 +647,7 @@
},
{
"BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from remote dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -592,6 +657,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss in the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -601,6 +667,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the modified data is transferred from remote cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -610,6 +677,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and clean or shared data is transferred from remote cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
@@ -619,6 +687,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from local or remote dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -628,6 +697,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from local dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -637,6 +707,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from remote dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -646,6 +717,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss in the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -655,6 +727,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the modified data is transferred from remote cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -664,6 +737,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and clean or shared data is transferred from remote cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
@@ -673,6 +747,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from local or remote dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -682,6 +757,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from local dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -691,6 +767,7 @@
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from remote dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -700,6 +777,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss in the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -709,6 +787,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the modified data is transferred from remote cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -718,6 +797,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and clean or shared data is transferred from remote cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
@@ -727,6 +807,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from local or remote dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -736,6 +817,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from local dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -745,6 +827,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from remote dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -754,6 +837,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss in the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -763,6 +847,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the modified data is transferred from remote cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -772,6 +857,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and clean or shared data is transferred from remote cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
@@ -781,6 +867,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from local or remote dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -790,6 +877,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from local dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -799,6 +887,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from remote dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -808,6 +897,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
@@ -817,6 +907,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the modified data is transferred from remote cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -826,6 +917,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and clean or shared data is transferred from remote cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
@@ -835,6 +927,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from local or remote dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -844,6 +937,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from local dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -853,6 +947,7 @@
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from remote dram.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -862,6 +957,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one).",
+ "Counter": "0,1,2,3",
"EventCode": "0xC9",
"EventName": "RTM_RETIRED.ABORTED",
"PEBS": "2",
@@ -871,6 +967,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC9",
"EventName": "RTM_RETIRED.ABORTED_EVENTS",
"PublicDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
@@ -879,6 +976,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC9",
"EventName": "RTM_RETIRED.ABORTED_MEM",
"PublicDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).",
@@ -887,6 +985,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
+ "Counter": "0,1,2,3",
"EventCode": "0xC9",
"EventName": "RTM_RETIRED.ABORTED_MEMTYPE",
"PublicDescription": "Number of times an RTM execution aborted due to incompatible memory type.",
@@ -895,6 +994,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to uncommon conditions.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC9",
"EventName": "RTM_RETIRED.ABORTED_TIMER",
"SampleAfterValue": "2000003",
@@ -902,6 +1002,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0xC9",
"EventName": "RTM_RETIRED.ABORTED_UNFRIENDLY",
"PublicDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions.",
@@ -910,6 +1011,7 @@
},
{
"BriefDescription": "Number of times an RTM execution successfully committed",
+ "Counter": "0,1,2,3",
"EventCode": "0xC9",
"EventName": "RTM_RETIRED.COMMIT",
"PublicDescription": "Number of times RTM commit succeeded.",
@@ -918,6 +1020,7 @@
},
{
"BriefDescription": "Number of times an RTM execution started.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC9",
"EventName": "RTM_RETIRED.START",
"PublicDescription": "Number of times we entered an RTM region. Does not count nested transactions.",
@@ -926,6 +1029,7 @@
},
{
"BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed. Since this is the count of execution, it may not always cause a transactional abort.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC1",
"SampleAfterValue": "2000003",
@@ -933,6 +1037,7 @@
},
{
"BriefDescription": "Counts the number of times a class of instructions (e.g., vzeroupper) that may cause a transactional abort was executed inside a transactional region",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC2",
"PublicDescription": "Unfriendly TSX abort triggered by a vzeroupper instruction.",
@@ -941,6 +1046,7 @@
},
{
"BriefDescription": "Counts the number of times an instruction execution caused the transactional nest count supported to be exceeded",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC3",
"PublicDescription": "Unfriendly TSX abort triggered by a nest count that is too deep.",
@@ -949,6 +1055,7 @@
},
{
"BriefDescription": "Counts the number of times a XBEGIN instruction was executed inside an HLE transactional region.",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC4",
"PublicDescription": "RTM region detected inside HLE.",
@@ -957,6 +1064,7 @@
},
{
"BriefDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region",
+ "Counter": "0,1,2,3",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC5",
"PublicDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region.",
@@ -965,6 +1073,7 @@
},
{
"BriefDescription": "Number of times a transactional abort was signaled due to a data capacity limitation for transactional reads or writes.",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_CAPACITY",
"SampleAfterValue": "2000003",
@@ -972,6 +1081,7 @@
},
{
"BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_CONFLICT",
"PublicDescription": "Number of times a TSX line had a cache conflict.",
@@ -980,6 +1090,7 @@
},
{
"BriefDescription": "Number of times an HLE transactional execution aborted due to XRELEASE lock not satisfying the address and value requirements in the elision buffer",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
"PublicDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch.",
@@ -988,6 +1099,7 @@
},
{
"BriefDescription": "Number of times an HLE transactional execution aborted due to NoAllocatedElisionBuffer being non-zero.",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
"PublicDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty.",
@@ -996,6 +1108,7 @@
},
{
"BriefDescription": "Number of times an HLE transactional execution aborted due to an unsupported read alignment from the elision buffer.",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
"PublicDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer.",
@@ -1004,6 +1117,7 @@
},
{
"BriefDescription": "Number of times a HLE transactional region aborted due to a non XRELEASE prefixed instruction writing to an elided lock in the elision buffer",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
"PublicDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock.",
@@ -1012,6 +1126,7 @@
},
{
"BriefDescription": "Number of times HLE lock could not be elided due to ElisionBufferAvailable being zero.",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
"PublicDescription": "Number of times we could not allocate Lock Buffer.",
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/metricgroups.json b/tools/perf/pmu-events/arch/x86/skylakex/metricgroups.json
index 904d299c95a3..cccfcab3425e 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/metricgroups.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/metricgroups.json
@@ -5,7 +5,20 @@
"BigFootprint": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"BrMispredicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Branches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvBC": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvBO": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvCB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvFB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvIO": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvML": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMP": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMS": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMT": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvOB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvUW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"CacheHits": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "CacheMisses": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"CodeGen": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Compute": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Cor": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/other.json b/tools/perf/pmu-events/arch/x86/skylakex/other.json
index 2511d722327a..44c820518e12 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/other.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/other.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the Non-AVX turbo schedule.",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "CORE_POWER.LVL0_TURBO_LICENSE",
"PublicDescription": "Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX2 turbo schedule.",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "CORE_POWER.LVL1_TURBO_LICENSE",
"PublicDescription": "Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX512 turbo schedule.",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "CORE_POWER.LVL2_TURBO_LICENSE",
"PublicDescription": "Core cycles where the core was running with power-delivery for license level 2 (introduced in Skylake Server microarchitecture). This includes high current AVX 512-bit instructions.",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Core cycles the core was throttled due to a pending power level request.",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "CORE_POWER.THROTTLE",
"PublicDescription": "Core cycles the out-of-order engine was throttled due to a pending power level request.",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "CORE_SNOOP_RESPONSE.RSP_IFWDFE",
+ "Counter": "0,1,2,3",
"EventCode": "0xEF",
"EventName": "CORE_SNOOP_RESPONSE.RSP_IFWDFE",
"SampleAfterValue": "2000003",
@@ -40,6 +45,7 @@
},
{
"BriefDescription": "CORE_SNOOP_RESPONSE.RSP_IFWDM",
+ "Counter": "0,1,2,3",
"EventCode": "0xEF",
"EventName": "CORE_SNOOP_RESPONSE.RSP_IFWDM",
"SampleAfterValue": "2000003",
@@ -47,6 +53,7 @@
},
{
"BriefDescription": "CORE_SNOOP_RESPONSE.RSP_IHITFSE",
+ "Counter": "0,1,2,3",
"EventCode": "0xEF",
"EventName": "CORE_SNOOP_RESPONSE.RSP_IHITFSE",
"SampleAfterValue": "2000003",
@@ -54,6 +61,7 @@
},
{
"BriefDescription": "CORE_SNOOP_RESPONSE.RSP_IHITI",
+ "Counter": "0,1,2,3",
"EventCode": "0xEF",
"EventName": "CORE_SNOOP_RESPONSE.RSP_IHITI",
"SampleAfterValue": "2000003",
@@ -61,6 +69,7 @@
},
{
"BriefDescription": "CORE_SNOOP_RESPONSE.RSP_SFWDFE",
+ "Counter": "0,1,2,3",
"EventCode": "0xEF",
"EventName": "CORE_SNOOP_RESPONSE.RSP_SFWDFE",
"SampleAfterValue": "2000003",
@@ -68,6 +77,7 @@
},
{
"BriefDescription": "CORE_SNOOP_RESPONSE.RSP_SFWDM",
+ "Counter": "0,1,2,3",
"EventCode": "0xEF",
"EventName": "CORE_SNOOP_RESPONSE.RSP_SFWDM",
"SampleAfterValue": "2000003",
@@ -75,6 +85,7 @@
},
{
"BriefDescription": "CORE_SNOOP_RESPONSE.RSP_SHITFSE",
+ "Counter": "0,1,2,3",
"EventCode": "0xEF",
"EventName": "CORE_SNOOP_RESPONSE.RSP_SHITFSE",
"SampleAfterValue": "2000003",
@@ -82,6 +93,7 @@
},
{
"BriefDescription": "Number of hardware interrupts received by the processor.",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "HW_INTERRUPTS.RECEIVED",
"PublicDescription": "Counts the number of hardware interruptions received by the processor.",
@@ -90,6 +102,7 @@
},
{
"BriefDescription": "Counts number of cache lines that are dropped and not written back to L3 as they are deemed to be less likely to be reused shortly",
+ "Counter": "0,1,2,3",
"EventCode": "0xFE",
"EventName": "IDI_MISC.WB_DOWNGRADE",
"PublicDescription": "Counts number of cache lines that are dropped and not written back to L3 as they are deemed to be less likely to be reused shortly.",
@@ -98,6 +111,7 @@
},
{
"BriefDescription": "Counts number of cache lines that are allocated and written back to L3 with the intention that they are more likely to be reused shortly",
+ "Counter": "0,1,2,3",
"EventCode": "0xFE",
"EventName": "IDI_MISC.WB_UPGRADE",
"PublicDescription": "Counts number of cache lines that are allocated and written back to L3 with the intention that they are more likely to be reused shortly.",
@@ -106,6 +120,7 @@
},
{
"BriefDescription": "MEMORY_DISAMBIGUATION.HISTORY_RESET",
+ "Counter": "0,1,2,3",
"EventCode": "0x09",
"EventName": "MEMORY_DISAMBIGUATION.HISTORY_RESET",
"SampleAfterValue": "2000003",
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json b/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json
index c50ddf5b40dd..3dd296ab4d78 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Cycles when divide unit is busy executing divide or square root operations. Accounts for integer and floating-point operations.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x14",
"EventName": "ARITH.DIVIDER_ACTIVE",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "All (macro) branch instructions retired.",
+ "Counter": "0,1,2,3",
"Errata": "SKL091",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "All (macro) branch instructions retired.",
+ "Counter": "0,1,2,3",
"Errata": "SKL091",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
@@ -27,6 +30,7 @@
},
{
"BriefDescription": "Conditional branch instructions retired. [This event is alias to BR_INST_RETIRED.CONDITIONAL]",
+ "Counter": "0,1,2,3",
"Errata": "SKL091",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.COND",
@@ -36,6 +40,7 @@
},
{
"BriefDescription": "Conditional branch instructions retired. [This event is alias to BR_INST_RETIRED.COND]",
+ "Counter": "0,1,2,3",
"Errata": "SKL091",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
@@ -46,6 +51,7 @@
},
{
"BriefDescription": "Not taken branch instructions retired.",
+ "Counter": "0,1,2,3",
"Errata": "SKL091",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_NTAKEN",
@@ -55,6 +61,7 @@
},
{
"BriefDescription": "Far branch instructions retired.",
+ "Counter": "0,1,2,3",
"Errata": "SKL091",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
@@ -65,6 +72,7 @@
},
{
"BriefDescription": "Direct and indirect near call instructions retired.",
+ "Counter": "0,1,2,3",
"Errata": "SKL091",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
@@ -75,6 +83,7 @@
},
{
"BriefDescription": "Return instructions retired.",
+ "Counter": "0,1,2,3",
"Errata": "SKL091",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
@@ -85,6 +94,7 @@
},
{
"BriefDescription": "Taken branch instructions retired.",
+ "Counter": "0,1,2,3",
"Errata": "SKL091",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
@@ -95,6 +105,7 @@
},
{
"BriefDescription": "Not taken branch instructions retired.",
+ "Counter": "0,1,2,3",
"Errata": "SKL091",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NOT_TAKEN",
@@ -104,6 +115,7 @@
},
{
"BriefDescription": "Speculative and retired mispredicted macro conditional branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.ALL_BRANCHES",
"PublicDescription": "This event counts both taken and not taken speculative and retired mispredicted branch instructions.",
@@ -112,6 +124,7 @@
},
{
"BriefDescription": "Speculative mispredicted indirect branches",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.INDIRECT",
"PublicDescription": "Counts speculatively miss-predicted indirect branches at execution time. Counts for indirect near CALL or JMP instructions (RET excluded).",
@@ -120,6 +133,7 @@
},
{
"BriefDescription": "All mispredicted macro branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
@@ -127,6 +141,7 @@
},
{
"BriefDescription": "Mispredicted macro branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
"PEBS": "2",
@@ -136,6 +151,7 @@
},
{
"BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.CONDITIONAL",
"PEBS": "1",
@@ -145,6 +161,7 @@
},
{
"BriefDescription": "Mispredicted direct and indirect near call instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.NEAR_CALL",
"PEBS": "1",
@@ -154,6 +171,7 @@
},
{
"BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
"PEBS": "1",
@@ -162,6 +180,7 @@
},
{
"BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.RET",
"PEBS": "1",
@@ -171,6 +190,7 @@
},
{
"BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "25003",
@@ -178,6 +198,7 @@
},
{
"BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
"SampleAfterValue": "25003",
@@ -186,6 +207,7 @@
{
"AnyThread": "1",
"BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
"SampleAfterValue": "25003",
@@ -193,6 +215,7 @@
},
{
"BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "25003",
@@ -200,6 +223,7 @@
},
{
"BriefDescription": "Reference cycles when the core is not in halt state.",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
"SampleAfterValue": "2000003",
@@ -207,6 +231,7 @@
},
{
"BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.REF_XCLK",
"SampleAfterValue": "25003",
@@ -215,6 +240,7 @@
{
"AnyThread": "1",
"BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
"SampleAfterValue": "25003",
@@ -222,6 +248,7 @@
},
{
"BriefDescription": "Counts when there is a transition from ring 1, 2 or 3 to ring 0.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x3C",
@@ -231,6 +258,7 @@
},
{
"BriefDescription": "Core cycles when the thread is not in halt state",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"SampleAfterValue": "2000003",
@@ -239,12 +267,14 @@
{
"AnyThread": "1",
"BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
"SampleAfterValue": "2000003",
"UMask": "0x2"
},
{
"BriefDescription": "Thread cycles when thread is not in halt state",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
"PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
@@ -253,12 +283,14 @@
{
"AnyThread": "1",
"BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
"SampleAfterValue": "2000003"
},
{
"BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "8",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
@@ -267,6 +299,7 @@
},
{
"BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
@@ -275,6 +308,7 @@
},
{
"BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
"CounterMask": "16",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
@@ -283,6 +317,7 @@
},
{
"BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "12",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
@@ -291,6 +326,7 @@
},
{
"BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
@@ -299,6 +335,7 @@
},
{
"BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
"CounterMask": "20",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
@@ -307,6 +344,7 @@
},
{
"BriefDescription": "Total execution stalls.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xA3",
"EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
@@ -315,6 +353,7 @@
},
{
"BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "EXE_ACTIVITY.1_PORTS_UTIL",
"PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
@@ -323,6 +362,7 @@
},
{
"BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "EXE_ACTIVITY.2_PORTS_UTIL",
"PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
@@ -331,6 +371,7 @@
},
{
"BriefDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "EXE_ACTIVITY.3_PORTS_UTIL",
"PublicDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
@@ -339,6 +380,7 @@
},
{
"BriefDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "EXE_ACTIVITY.4_PORTS_UTIL",
"PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.",
@@ -347,6 +389,7 @@
},
{
"BriefDescription": "Cycles where the Store Buffer was full and no outstanding load.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "EXE_ACTIVITY.BOUND_ON_STORES",
"SampleAfterValue": "2000003",
@@ -354,6 +397,7 @@
},
{
"BriefDescription": "Cycles where no uops were executed, the Reservation Station was not empty, the Store Buffer was full and there was no outstanding load.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "EXE_ACTIVITY.EXE_BOUND_0_PORTS",
"PublicDescription": "Counts cycles during which no uops were executed on all ports and Reservation Station (RS) was not empty.",
@@ -362,6 +406,7 @@
},
{
"BriefDescription": "Stalls caused by changing prefix length of the instruction. [This event is alias to DECODE.LCP]",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.LCP",
"PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk. [This event is alias to DECODE.LCP]",
@@ -370,6 +415,7 @@
},
{
"BriefDescription": "Instruction decoders utilized in a cycle",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "INST_DECODED.DECODERS",
"PublicDescription": "Number of decoders utilized in a cycle when the MITE (legacy decode pipeline) fetches instructions.",
@@ -378,6 +424,7 @@
},
{
"BriefDescription": "Instructions retired from execution.",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PublicDescription": "Counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, Counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. Counting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
"SampleAfterValue": "2000003",
@@ -385,6 +432,7 @@
},
{
"BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Counter": "0,1,2,3",
"Errata": "SKL091, SKL044",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.ANY_P",
@@ -393,15 +441,17 @@
},
{
"BriefDescription": "Number of all retired NOP instructions.",
+ "Counter": "0,1,2,3",
"Errata": "SKL091, SKL044",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.NOP",
- "PEBS": "2",
+ "PEBS": "1",
"SampleAfterValue": "2000003",
"UMask": "0x2"
},
{
"BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
+ "Counter": "1",
"Errata": "SKL091, SKL044",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.PREC_DIST",
@@ -412,6 +462,7 @@
},
{
"BriefDescription": "Number of cycles using always true condition applied to PEBS instructions retired event.",
+ "Counter": "0,2,3",
"CounterMask": "10",
"Errata": "SKL091, SKL044",
"EventCode": "0xC0",
@@ -424,6 +475,7 @@
},
{
"BriefDescription": "Clears speculative count",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x0D",
@@ -434,6 +486,7 @@
},
{
"BriefDescription": "Cycles the issue-stage is waiting for front-end to fetch from resteered path following branch misprediction or machine clear events.",
+ "Counter": "0,1,2,3",
"EventCode": "0x0D",
"EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
"SampleAfterValue": "2000003",
@@ -441,6 +494,7 @@
},
{
"BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
+ "Counter": "0,1,2,3",
"EventCode": "0x0D",
"EventName": "INT_MISC.RECOVERY_CYCLES",
"PublicDescription": "Core cycles the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
@@ -450,6 +504,7 @@
{
"AnyThread": "1",
"BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "Counter": "0,1,2,3",
"EventCode": "0x0D",
"EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
"SampleAfterValue": "2000003",
@@ -457,6 +512,7 @@
},
{
"BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.NO_SR",
"PublicDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
@@ -465,6 +521,7 @@
},
{
"BriefDescription": "Loads blocked due to overlapping with a preceding store that cannot be forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.STORE_FORWARD",
"PublicDescription": "Counts the number of times where store forwarding was prevented for a load operation. The most common case is a load blocked due to the address of memory access (partially) overlapping with a preceding uncompleted store. Note: See the table of not supported store forwards in the Optimization Guide.",
@@ -473,6 +530,7 @@
},
{
"BriefDescription": "False dependencies in MOB due to partial compare on address.",
+ "Counter": "0,1,2,3",
"EventCode": "0x07",
"EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
"PublicDescription": "Counts false dependencies in MOB when the partial comparison upon loose net check and dependency was resolved by the Enhanced Loose net mechanism. This may not result in high performance penalties. Loose net checks can fail when loads and stores are 4k aliased.",
@@ -481,6 +539,7 @@
},
{
"BriefDescription": "Demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.",
+ "Counter": "0,1,2,3",
"EventCode": "0x4C",
"EventName": "LOAD_HIT_PRE.SW_PF",
"PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
@@ -489,6 +548,7 @@
},
{
"BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder. [This event is alias to LSD.CYCLES_OK]",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xA8",
"EventName": "LSD.CYCLES_4_UOPS",
@@ -498,6 +558,7 @@
},
{
"BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA8",
"EventName": "LSD.CYCLES_ACTIVE",
@@ -507,6 +568,7 @@
},
{
"BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder. [This event is alias to LSD.CYCLES_4_UOPS]",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xA8",
"EventName": "LSD.CYCLES_OK",
@@ -516,6 +578,7 @@
},
{
"BriefDescription": "Number of Uops delivered by the LSD.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA8",
"EventName": "LSD.UOPS",
"PublicDescription": "Number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
@@ -524,6 +587,7 @@
},
{
"BriefDescription": "Number of machine clears (nukes) of any type.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xC3",
@@ -533,6 +597,7 @@
},
{
"BriefDescription": "Self-modifying code (SMC) detected.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.SMC",
"PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
@@ -541,6 +606,7 @@
},
{
"BriefDescription": "Number of times a microcode assist is invoked by HW other than FP-assist. Examples include AD (page Access Dirty) and AVX* related assists.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "OTHER_ASSISTS.ANY",
"SampleAfterValue": "100003",
@@ -548,6 +614,7 @@
},
{
"BriefDescription": "Cycles where the pipeline is stalled due to serializing operations.",
+ "Counter": "0,1,2,3",
"EventCode": "0x59",
"EventName": "PARTIAL_RAT_STALLS.SCOREBOARD",
"PublicDescription": "This event counts cycles during which the microcode scoreboard stalls happen.",
@@ -556,6 +623,7 @@
},
{
"BriefDescription": "Resource-related stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xa2",
"EventName": "RESOURCE_STALLS.ANY",
"PublicDescription": "Counts resource-related stall cycles.",
@@ -564,6 +632,7 @@
},
{
"BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.SB",
"PublicDescription": "Counts allocation stall cycles caused by the store buffer (SB) being full. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
@@ -572,6 +641,7 @@
},
{
"BriefDescription": "Increments whenever there is an update to the LBR array.",
+ "Counter": "0,1,2,3",
"EventCode": "0xCC",
"EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
"PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR enable via IA32_DEBUGCTL MSR and branch type selection via MSR_LBR_SELECT.",
@@ -580,6 +650,7 @@
},
{
"BriefDescription": "Number of retired PAUSE instructions (that do not end up with a VMExit to the VMM; TSX aborted Instructions may be counted). This event is not supported on first SKL and KBL products.",
+ "Counter": "0,1,2,3",
"EventCode": "0xCC",
"EventName": "ROB_MISC_EVENTS.PAUSE_INST",
"SampleAfterValue": "2000003",
@@ -587,6 +658,7 @@
},
{
"BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "Counter": "0,1,2,3",
"EventCode": "0x5E",
"EventName": "RS_EVENTS.EMPTY_CYCLES",
"PublicDescription": "Counts cycles during which the reservation station (RS) is empty for the thread.; Note: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.",
@@ -595,6 +667,7 @@
},
{
"BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x5E",
@@ -606,6 +679,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_0",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 0.",
@@ -614,6 +688,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_1",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 1.",
@@ -622,6 +697,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_2",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 2.",
@@ -630,6 +706,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_3",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 3.",
@@ -638,6 +715,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_4",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 4.",
@@ -646,6 +724,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_5",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 5.",
@@ -654,6 +733,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_6",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 6.",
@@ -662,6 +742,7 @@
},
{
"BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_7",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 7.",
@@ -670,6 +751,7 @@
},
{
"BriefDescription": "Number of uops executed on the core.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE",
"PublicDescription": "Number of uops executed from any thread.",
@@ -678,6 +760,7 @@
},
{
"BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
@@ -686,6 +769,7 @@
},
{
"BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
@@ -694,6 +778,7 @@
},
{
"BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
@@ -702,6 +787,7 @@
},
{
"BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
@@ -710,6 +796,7 @@
},
{
"BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
@@ -719,6 +806,7 @@
},
{
"BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
@@ -728,6 +816,7 @@
},
{
"BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
@@ -737,6 +826,7 @@
},
{
"BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "Counter": "0,1,2,3",
"CounterMask": "3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
@@ -746,6 +836,7 @@
},
{
"BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "Counter": "0,1,2,3",
"CounterMask": "4",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
@@ -755,6 +846,7 @@
},
{
"BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.STALL_CYCLES",
@@ -765,6 +857,7 @@
},
{
"BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.THREAD",
"PublicDescription": "Number of uops to be executed per-thread each cycle.",
@@ -773,6 +866,7 @@
},
{
"BriefDescription": "Counts the number of x87 uops dispatched.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.X87",
"PublicDescription": "Counts the number of x87 uops executed.",
@@ -781,6 +875,7 @@
},
{
"BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.ANY",
"PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
@@ -789,6 +884,7 @@
},
{
"BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.SLOW_LEA",
"SampleAfterValue": "2000003",
@@ -796,6 +892,7 @@
},
{
"BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.STALL_CYCLES",
@@ -806,6 +903,7 @@
},
{
"BriefDescription": "Uops inserted at issue-stage in order to preserve upper bits of vector registers.",
+ "Counter": "0,1,2,3",
"EventCode": "0x0E",
"EventName": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH",
"PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to Mixing Intel AVX and Intel SSE Code section of the Optimization Guide.",
@@ -814,6 +912,7 @@
},
{
"BriefDescription": "Number of macro-fused uops retired. (non precise)",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.MACRO_FUSED",
"PublicDescription": "Counts the number of macro-fused uops retired. (non precise)",
@@ -822,6 +921,7 @@
},
{
"BriefDescription": "Retirement slots used.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.RETIRE_SLOTS",
"PublicDescription": "Counts the retirement slots used.",
@@ -830,6 +930,7 @@
},
{
"BriefDescription": "Cycles without actually retired uops.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.STALL_CYCLES",
@@ -840,6 +941,7 @@
},
{
"BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "Counter": "0,1,2,3",
"CounterMask": "16",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.TOTAL_CYCLES",
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
index 8126f952a30c..e5e86892d7bb 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
@@ -68,7 +68,7 @@
},
{
"BriefDescription": "Percentage of time spent in the active CPU power state C0",
- "MetricExpr": "tma_info_system_cpu_utilization",
+ "MetricExpr": "tma_info_system_cpus_utilized",
"MetricName": "cpu_utilization",
"ScaleUnit": "100%"
},
@@ -163,7 +163,7 @@
},
{
"BriefDescription": "Ratio of number of code read requests missing last level core cache (includes demand w/ prefetches) to the total number of completed instructions",
- "MetricExpr": "cha@UNC_CHA_TOR_INSERTS.IA_MISS\\,config1\\=0x12CC0233@ / INST_RETIRED.ANY",
+ "MetricExpr": "cha@UNC_CHA_TOR_INSERTS.IA_MISS\\,config1\\=0x12cc0233@ / INST_RETIRED.ANY",
"MetricName": "llc_code_read_mpi_demand_plus_prefetch",
"ScaleUnit": "1per_instr"
},
@@ -187,7 +187,7 @@
},
{
"BriefDescription": "Ratio of number of data read requests missing last level core cache (includes demand w/ prefetches) to the total number of completed instructions",
- "MetricExpr": "cha@UNC_CHA_TOR_INSERTS.IA_MISS\\,config1\\=0x12D40433@ / INST_RETIRED.ANY",
+ "MetricExpr": "cha@UNC_CHA_TOR_INSERTS.IA_MISS\\,config1\\=0x12d40433@ / INST_RETIRED.ANY",
"MetricName": "llc_data_read_mpi_demand_plus_prefetch",
"ScaleUnit": "1per_instr"
},
@@ -310,7 +310,7 @@
{
"BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists",
"MetricExpr": "34 * (FP_ASSIST.ANY + OTHER_ASSISTS.ANY) / tma_info_thread_slots",
- "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricGroup": "BvIO;TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_assists",
"MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
"PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: OTHER_ASSISTS.ANY",
@@ -319,7 +319,7 @@
{
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
"MetricExpr": "1 - tma_frontend_bound - (UOPS_ISSUED.ANY + 4 * (INT_MISC.RECOVERY_CYCLES_ANY / 2 if #SMT_on else INT_MISC.RECOVERY_CYCLES)) / tma_info_thread_slots",
- "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvOB;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
"MetricThreshold": "tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL1",
@@ -340,7 +340,7 @@
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * tma_bad_speculation",
- "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
+ "MetricGroup": "BadSpec;BrMispredicts;BvMP;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
"MetricName": "tma_branch_mispredicts",
"MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
@@ -378,7 +378,7 @@
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(44 * tma_info_system_core_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM * (OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE / (OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE + OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) + 44 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
- "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricGroup": "BvMS;DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_contested_accesses",
"MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS_PS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
@@ -399,7 +399,7 @@
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "44 * tma_info_system_core_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM * (1 - OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE / (OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE + OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
- "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricGroup": "BvMS;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_data_sharing",
"MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT_PS. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
@@ -417,7 +417,7 @@
{
"BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
"MetricExpr": "ARITH.DIVIDER_ACTIVE / tma_info_thread_clks",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
"MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_ACTIVE",
@@ -448,14 +448,14 @@
"MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_dsb_switches",
"MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
"MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "min(9 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_LOAD_MISSES.WALK_ACTIVE, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
- "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
+ "MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
"MetricName": "tma_dtlb_load",
"MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs, tma_info_bottleneck_memory_synchronization",
@@ -464,7 +464,7 @@
{
"BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
"MetricExpr": "(9 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_STORE_MISSES.WALK_ACTIVE) / tma_info_core_core_clks",
- "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
+ "MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
"MetricName": "tma_dtlb_store",
"MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load, tma_info_bottleneck_memory_data_tlbs, tma_info_bottleneck_memory_synchronization",
@@ -474,7 +474,7 @@
"BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(110 * tma_info_system_core_frequency * (OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.REMOTE_HITM + OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.REMOTE_HITM) + 47.5 * tma_info_system_core_frequency * (OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE + OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE)) / tma_info_thread_clks",
- "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
+ "MetricGroup": "BvMS;DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
"MetricName": "tma_false_sharing",
"MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
@@ -484,7 +484,7 @@
"BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
"MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "tma_info_memory_load_miss_real_latency * cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@ / tma_info_thread_clks",
- "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
+ "MetricGroup": "BvMS;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
"MetricName": "tma_fb_full",
"MetricThreshold": "tma_fb_full > 0.3",
"PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
@@ -497,7 +497,7 @@
"MetricName": "tma_fetch_bandwidth",
"MetricThreshold": "tma_fetch_bandwidth > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
{
@@ -512,6 +512,7 @@
},
{
"BriefDescription": "This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or up to ([SNB+] four; [ADL+] five) uops",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "tma_heavy_operations - tma_microcode_sequencer",
"MetricGroup": "TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueD0",
"MetricName": "tma_few_uops_instructions",
@@ -540,7 +541,7 @@
},
{
"BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired",
- "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricExpr": "FP_ARITH_INST_RETIRED.SCALAR / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_scalar",
"MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
@@ -587,7 +588,7 @@
{
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / tma_info_thread_slots",
- "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvFB;BvIO;PGO;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_frontend_bound",
"MetricThreshold": "tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL1",
@@ -597,7 +598,7 @@
{
"BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions",
"MetricExpr": "tma_light_operations * UOPS_RETIRED.MACRO_FUSED / UOPS_RETIRED.RETIRE_SLOTS",
- "MetricGroup": "Branches;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_fused_instructions",
"MetricThreshold": "tma_fused_instructions > 0.1 & tma_light_operations > 0.6",
"PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. CMP+JCC or DEC+JCC are common examples of legacy fusions. {([MTL] Note new MOV+OP and Load+OP fusions appear under Other_Light_Ops in MTL!)}",
@@ -616,7 +617,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses",
"MetricExpr": "(ICACHE_16B.IFDATA_STALL + 2 * cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=1\\,edge@) / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_icache_misses",
"MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS",
@@ -651,24 +652,6 @@
},
{
"BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts",
- "MetricExpr": "(100 * (1 - tma_core_bound / (((EXE_ACTIVITY.EXE_BOUND_0_PORTS + tma_core_bound * RS_EVENTS.EMPTY_CYCLES) / CPU_CLK_UNHALTED.THREAD * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) / CPU_CLK_UNHALTED.THREAD * CPU_CLK_UNHALTED.THREAD + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL)) / CPU_CLK_UNHALTED.THREAD if ARITH.DIVIDER_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) / CPU_CLK_UNHALTED.THREAD) if tma_core_bound < (((EXE_ACTIVITY.EXE_BOUND_0_PORTS + tma_core_bound * RS_EVENTS.EMPTY_CYCLES) / CPU_CLK_UNHALTED.THREAD * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) / CPU_CLK_UNHALTED.THREAD * CPU_CLK_UNHALTED.THREAD + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL)) / CPU_CLK_UNHALTED.THREAD if ARITH.DIVIDER_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) / CPU_CLK_UNHALTED.THREAD) else 1) if tma_info_system_smt_2t_utilization > 0.5 else 0)",
- "MetricGroup": "Cor;SMT",
- "MetricName": "tma_info_botlnk_core_bound_likely"
- },
- {
- "BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck.",
- "MetricExpr": "100 * (100 * (tma_fetch_latency * (DSB2MITE_SWITCHES.PENALTY_CYCLES / CPU_CLK_UNHALTED.THREAD) / ((ICACHE_16B.IFDATA_STALL + 2 * cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=0x1\\,edge\\=0x1@) / CPU_CLK_UNHALTED.THREAD + ICACHE_TAG.STALLS / CPU_CLK_UNHALTED.THREAD + (INT_MISC.CLEAR_RESTEER_CYCLES / CPU_CLK_UNHALTED.THREAD + 9 * BACLEARS.ANY / CPU_CLK_UNHALTED.THREAD) + min(2 * IDQ.MS_SWITCHES / CPU_CLK_UNHALTED.THREAD, 1) + DECODE.LCP / CPU_CLK_UNHALTED.THREAD + DSB2MITE_SWITCHES.PENALTY_CYCLES / CPU_CLK_UNHALTED.THREAD) + tma_fetch_bandwidth * tma_mite / (tma_mite + tma_dsb)))",
- "MetricGroup": "DSBmiss;Fed",
- "MetricName": "tma_info_botlnk_dsb_misses"
- },
- {
- "BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck.",
- "MetricExpr": "100 * (100 * (tma_fetch_latency * ((ICACHE_16B.IFDATA_STALL + 2 * cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=0x1\\,edge\\=0x1@) / CPU_CLK_UNHALTED.THREAD) / ((ICACHE_16B.IFDATA_STALL + 2 * cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=0x1\\,edge\\=0x1@) / CPU_CLK_UNHALTED.THREAD + ICACHE_TAG.STALLS / CPU_CLK_UNHALTED.THREAD + (INT_MISC.CLEAR_RESTEER_CYCLES / CPU_CLK_UNHALTED.THREAD + 9 * BACLEARS.ANY / CPU_CLK_UNHALTED.THREAD) + min(2 * IDQ.MS_SWITCHES / CPU_CLK_UNHALTED.THREAD, 1) + DECODE.LCP / CPU_CLK_UNHALTED.THREAD + DSB2MITE_SWITCHES.PENALTY_CYCLES / CPU_CLK_UNHALTED.THREAD)))",
- "MetricGroup": "Fed;FetchLat;IcMiss",
- "MetricName": "tma_info_botlnk_ic_misses"
- },
- {
- "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(100 * (1 - tma_core_bound / tma_ports_utilization if tma_core_bound < tma_ports_utilization else 1) if tma_info_system_smt_2t_utilization > 0.5 else 0)",
"MetricGroup": "Cor;SMT",
@@ -676,13 +659,21 @@
"MetricThreshold": "tma_info_botlnk_l0_core_bound_likely > 0.5"
},
{
+ "BriefDescription": "Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck",
+ "MetricExpr": "100 * (tma_frontend_bound * (tma_fetch_bandwidth / (tma_fetch_bandwidth + tma_fetch_latency)) * (tma_dsb / (tma_dsb + tma_mite)))",
+ "MetricGroup": "DSB;FetchBW;tma_issueFB",
+ "MetricName": "tma_info_botlnk_l2_dsb_bandwidth",
+ "MetricThreshold": "tma_info_botlnk_l2_dsb_bandwidth > 10",
+ "PublicDescription": "Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp"
+ },
+ {
"BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_mite))",
"MetricGroup": "DSBmiss;Fed;tma_issueFB",
"MetricName": "tma_info_botlnk_l2_dsb_misses",
"MetricThreshold": "tma_info_botlnk_l2_dsb_misses > 10",
- "PublicDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp"
+ "PublicDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp"
},
{
"BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
@@ -693,39 +684,33 @@
"PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: "
},
{
- "BriefDescription": "Total pipeline cost of \"useful operations\" - the baseline operations not covered by Branching_Overhead nor Irregular_Overhead.",
- "MetricExpr": "100 * (tma_retiring - (BR_INST_RETIRED.ALL_BRANCHES + BR_INST_RETIRED.NEAR_CALL) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
- "MetricGroup": "Ret",
- "MetricName": "tma_info_bottleneck_base_non_br",
- "MetricThreshold": "tma_info_bottleneck_base_non_br > 20"
- },
- {
"BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)",
- "MetricGroup": "BigFootprint;Fed;Frontend;IcMiss;MemoryTLB",
+ "MetricGroup": "BigFootprint;BvBC;Fed;Frontend;IcMiss;MemoryTLB",
"MetricName": "tma_info_bottleneck_big_code",
"MetricThreshold": "tma_info_bottleneck_big_code > 20"
},
{
- "BriefDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls)",
- "MetricExpr": "100 * ((BR_INST_RETIRED.ALL_BRANCHES + BR_INST_RETIRED.NEAR_CALL) / tma_info_thread_slots)",
- "MetricGroup": "Ret",
+ "BriefDescription": "Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA",
+ "MetricExpr": "100 * ((BR_INST_RETIRED.ALL_BRANCHES + 2 * BR_INST_RETIRED.NEAR_CALL + INST_RETIRED.NOP) / tma_info_thread_slots)",
+ "MetricGroup": "BvBO;Ret",
"MetricName": "tma_info_bottleneck_branching_overhead",
- "MetricThreshold": "tma_info_bottleneck_branching_overhead > 5"
+ "MetricThreshold": "tma_info_bottleneck_branching_overhead > 5",
+ "PublicDescription": "Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA. Examples include function calls; loops and alignments. (A lower bound)"
},
{
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
- "MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_hit_latency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
+ "MetricGroup": "BvMB;Mem;MemoryBW;Offcore;tma_issueBW",
"MetricName": "tma_info_bottleneck_cache_memory_bandwidth",
"MetricThreshold": "tma_info_bottleneck_cache_memory_bandwidth > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full"
},
{
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency)))",
- "MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l1_hit_latency / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_hit_latency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
+ "MetricGroup": "BvML;Mem;MemoryLat;Offcore;tma_issueLat",
"MetricName": "tma_info_bottleneck_cache_memory_latency",
"MetricThreshold": "tma_info_bottleneck_cache_memory_latency > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks. Related metrics: tma_l3_hit_latency, tma_mem_latency"
@@ -733,23 +718,23 @@
{
"BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation",
"MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_ports_utilization + tma_serializing_operation)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))",
- "MetricGroup": "Cor;tma_issueComp",
+ "MetricGroup": "BvCB;Cor;tma_issueComp",
"MetricName": "tma_info_bottleneck_compute_bound_est",
"MetricThreshold": "tma_info_bottleneck_compute_bound_est > 20",
"PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. Related metrics: "
},
{
- "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks",
+ "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end)",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * (10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts)) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_info_bottleneck_big_code",
- "MetricGroup": "Fed;FetchBW;Frontend",
+ "MetricGroup": "BvFB;Fed;FetchBW;Frontend",
"MetricName": "tma_info_bottleneck_instruction_fetch_bw",
"MetricThreshold": "tma_info_bottleneck_instruction_fetch_bw > 20"
},
{
"BriefDescription": "Total pipeline cost of irregular execution (e.g",
"MetricExpr": "100 * (tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * (10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts)) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + tma_core_bound * RS_EVENTS.EMPTY_CYCLES / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
- "MetricGroup": "Bad;Cor;Ret;tma_issueMS",
+ "MetricGroup": "Bad;BvIO;Cor;Ret;tma_issueMS",
"MetricName": "tma_info_bottleneck_irregular_overhead",
"MetricThreshold": "tma_info_bottleneck_irregular_overhead > 10",
"PublicDescription": "Total pipeline cost of irregular execution (e.g. FP-assists in HPC, Wait time with work imbalance multithreaded workloads, overhead in system services or virtualized environments). Related metrics: tma_microcode_sequencer, tma_ms_switches"
@@ -757,8 +742,8 @@
{
"BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency)))",
- "MetricGroup": "Mem;MemoryTLB;Offcore;tma_issueTLB",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_hit_latency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency)))",
+ "MetricGroup": "BvMT;Mem;MemoryTLB;Offcore;tma_issueTLB",
"MetricName": "tma_info_bottleneck_memory_data_tlbs",
"MetricThreshold": "tma_info_bottleneck_memory_data_tlbs > 20",
"PublicDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs). Related metrics: tma_dtlb_load, tma_dtlb_store, tma_info_bottleneck_memory_synchronization"
@@ -766,7 +751,7 @@
{
"BriefDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)",
"MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) * tma_remote_cache / (tma_local_mem + tma_remote_cache + tma_remote_mem) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * tma_false_sharing / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))",
- "MetricGroup": "Mem;Offcore;tma_issueTLB",
+ "MetricGroup": "BvMS;Mem;Offcore;tma_issueTLB",
"MetricName": "tma_info_bottleneck_memory_synchronization",
"MetricThreshold": "tma_info_bottleneck_memory_synchronization > 10",
"PublicDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors). Related metrics: tma_dtlb_load, tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs"
@@ -775,18 +760,25 @@
"BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
- "MetricGroup": "Bad;BadSpec;BrMispredicts;tma_issueBM",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts;BvMP;tma_issueBM",
"MetricName": "tma_info_bottleneck_mispredictions",
"MetricThreshold": "tma_info_bottleneck_mispredictions > 20",
"PublicDescription": "Total pipeline cost of Branch Misprediction related bottlenecks. Related metrics: tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost, tma_mispredicts_resteers"
},
{
- "BriefDescription": "Total pipeline cost of remaining bottlenecks (apart from those listed in the Info.Bottlenecks metrics class)",
- "MetricExpr": "100 - (tma_info_bottleneck_big_code + tma_info_bottleneck_instruction_fetch_bw + tma_info_bottleneck_mispredictions + tma_info_bottleneck_cache_memory_bandwidth + tma_info_bottleneck_cache_memory_latency + tma_info_bottleneck_memory_data_tlbs + tma_info_bottleneck_memory_synchronization + tma_info_bottleneck_compute_bound_est + tma_info_bottleneck_irregular_overhead + tma_info_bottleneck_branching_overhead + tma_info_bottleneck_base_non_br)",
- "MetricGroup": "Cor;Offcore",
+ "BriefDescription": "Total pipeline cost of remaining bottlenecks in the back-end",
+ "MetricExpr": "100 - (tma_info_bottleneck_big_code + tma_info_bottleneck_instruction_fetch_bw + tma_info_bottleneck_mispredictions + tma_info_bottleneck_cache_memory_bandwidth + tma_info_bottleneck_cache_memory_latency + tma_info_bottleneck_memory_data_tlbs + tma_info_bottleneck_memory_synchronization + tma_info_bottleneck_compute_bound_est + tma_info_bottleneck_irregular_overhead + tma_info_bottleneck_branching_overhead + tma_info_bottleneck_useful_work)",
+ "MetricGroup": "BvOB;Cor;Offcore",
"MetricName": "tma_info_bottleneck_other_bottlenecks",
"MetricThreshold": "tma_info_bottleneck_other_bottlenecks > 20",
- "PublicDescription": "Total pipeline cost of remaining bottlenecks (apart from those listed in the Info.Bottlenecks metrics class). Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls."
+ "PublicDescription": "Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls."
+ },
+ {
+ "BriefDescription": "Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead.",
+ "MetricExpr": "100 * (tma_retiring - (BR_INST_RETIRED.ALL_BRANCHES + 2 * BR_INST_RETIRED.NEAR_CALL + INST_RETIRED.NOP) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
+ "MetricGroup": "BvUW;Ret",
+ "MetricName": "tma_info_bottleneck_useful_work",
+ "MetricThreshold": "tma_info_bottleneck_useful_work > 20"
},
{
"BriefDescription": "Fraction of branches that are CALL or RET",
@@ -840,7 +832,7 @@
},
{
"BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
- "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@) / (2 * tma_info_core_core_clks)",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@) / (2 * tma_info_core_core_clks)",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "tma_info_core_fp_arith_utilization",
"PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
@@ -857,7 +849,7 @@
"MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
"MetricName": "tma_info_frontend_dsb_coverage",
"MetricThreshold": "tma_info_frontend_dsb_coverage < 0.7 & tma_info_thread_ipc / 4 > 0.35",
- "PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_inst_mix_iptb, tma_lcp"
+ "PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_inst_mix_iptb, tma_lcp"
},
{
"BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
@@ -918,7 +910,7 @@
{
"BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "INST_RETIRED.ANY / (cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@)",
"MetricGroup": "Flops;InsType",
"MetricName": "tma_info_inst_mix_iparith",
"MetricThreshold": "tma_info_inst_mix_iparith < 10",
@@ -1008,18 +1000,12 @@
"MetricThreshold": "tma_info_inst_mix_ipswpf < 100"
},
{
- "BriefDescription": "Instruction per taken branch",
+ "BriefDescription": "Instructions per taken branch",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
"MetricName": "tma_info_inst_mix_iptb",
"MetricThreshold": "tma_info_inst_mix_iptb < 9",
- "PublicDescription": "Instruction per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_lcp"
- },
- {
- "BriefDescription": "STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
- "MetricExpr": "tma_info_memory_tlb_code_stlb_mpki",
- "MetricGroup": "Fed;MemoryTLB",
- "MetricName": "tma_info_memory_code_stlb_mpki"
+ "PublicDescription": "Instructions per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_lcp"
},
{
"BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
@@ -1058,30 +1044,18 @@
"MetricName": "tma_info_memory_core_l3_cache_fill_bw_2t"
},
{
- "BriefDescription": "Average Parallel L2 cache miss data reads",
- "MetricExpr": "tma_info_memory_latency_data_l2_mlp",
- "MetricGroup": "Memory_BW;Offcore",
- "MetricName": "tma_info_memory_data_l2_mlp"
- },
- {
"BriefDescription": "Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)",
"MetricExpr": "1e3 * MEM_LOAD_RETIRED.FB_HIT / INST_RETIRED.ANY",
"MetricGroup": "CacheHits;Mem",
"MetricName": "tma_info_memory_fb_hpki"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
"MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l1d_cache_fill_bw"
},
{
- "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
- "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / (duration_time * 1e3 / 1e3)",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "tma_info_memory_l1d_cache_fill_bw_2t"
- },
- {
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
"MetricExpr": "1e3 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
"MetricGroup": "CacheHits;Mem",
@@ -1094,30 +1068,12 @@
"MetricName": "tma_info_memory_l1mpki_load"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
"MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l2_cache_fill_bw"
},
{
- "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
- "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / (duration_time * 1e3 / 1e3)",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "tma_info_memory_l2_cache_fill_bw_2t"
- },
- {
- "BriefDescription": "Rate of non silent evictions from the L2 cache per Kilo instruction",
- "MetricExpr": "1e3 * L2_LINES_OUT.NON_SILENT / INST_RETIRED.ANY",
- "MetricGroup": "L2Evicts;Mem;Server",
- "MetricName": "tma_info_memory_l2_evictions_nonsilent_pki"
- },
- {
- "BriefDescription": "Rate of silent evictions from the L2 cache per Kilo instruction where the evicted lines are dropped (no writeback to L3 or memory)",
- "MetricExpr": "1e3 * L2_LINES_OUT.SILENT / INST_RETIRED.ANY",
- "MetricGroup": "L2Evicts;Mem;Server",
- "MetricName": "tma_info_memory_l2_evictions_silent_pki"
- },
- {
"BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
"MetricExpr": "1e3 * (L2_RQSTS.REFERENCES - L2_RQSTS.MISS) / INST_RETIRED.ANY",
"MetricGroup": "CacheHits;Mem",
@@ -1148,30 +1104,24 @@
"MetricName": "tma_info_memory_l2mpki_load"
},
{
- "BriefDescription": "",
- "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / duration_time",
- "MetricGroup": "Mem;MemoryBW;Offcore",
- "MetricName": "tma_info_memory_l3_cache_access_bw"
+ "BriefDescription": "Offcore requests (L2 cache miss) per kilo instruction for demand RFOs",
+ "MetricExpr": "1e3 * OFFCORE_REQUESTS.DEMAND_RFO / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Offcore",
+ "MetricName": "tma_info_memory_l2mpki_rfo"
},
{
- "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / (duration_time * 1e3 / 1e3)",
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW;Offcore",
- "MetricName": "tma_info_memory_l3_cache_access_bw_2t"
+ "MetricName": "tma_info_memory_l3_cache_access_bw"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
"MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l3_cache_fill_bw"
},
{
- "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / (duration_time * 1e3 / 1e3)",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "tma_info_memory_l3_cache_fill_bw_2t"
- },
- {
"BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
"MetricExpr": "1e3 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
"MetricGroup": "Mem",
@@ -1185,27 +1135,15 @@
},
{
"BriefDescription": "Average Latency for L2 cache miss demand Loads",
- "MetricExpr": "tma_info_memory_load_l2_miss_latency",
- "MetricGroup": "Memory_Lat;Offcore",
- "MetricName": "tma_info_memory_latency_load_l2_miss_latency"
- },
- {
- "BriefDescription": "Average Parallel L2 cache miss demand Loads",
- "MetricExpr": "tma_info_memory_load_l2_mlp",
- "MetricGroup": "Memory_BW;Offcore",
- "MetricName": "tma_info_memory_latency_load_l2_mlp"
- },
- {
- "BriefDescription": "Average Latency for L2 cache miss demand Loads",
"MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD",
"MetricGroup": "Memory_Lat;Offcore",
- "MetricName": "tma_info_memory_load_l2_miss_latency"
+ "MetricName": "tma_info_memory_latency_load_l2_miss_latency"
},
{
"BriefDescription": "Average Parallel L2 cache miss demand Loads",
"MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
"MetricGroup": "Memory_BW;Offcore",
- "MetricName": "tma_info_memory_load_l2_mlp"
+ "MetricName": "tma_info_memory_latency_load_l2_mlp"
},
{
"BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
@@ -1214,14 +1152,8 @@
"MetricName": "tma_info_memory_load_miss_real_latency"
},
{
- "BriefDescription": "STLB (2nd level TLB) data load speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
- "MetricExpr": "tma_info_memory_tlb_load_stlb_mpki",
- "MetricGroup": "Mem;MemoryTLB",
- "MetricName": "tma_info_memory_load_stlb_mpki"
- },
- {
"BriefDescription": "Un-cacheable retired load per kilo instruction",
- "MetricExpr": "tma_info_memory_uc_load_pki",
+ "MetricExpr": "1e3 * MEM_LOAD_MISC_RETIRED.UC / INST_RETIRED.ANY",
"MetricGroup": "Mem",
"MetricName": "tma_info_memory_mix_uc_load_pki"
},
@@ -1233,18 +1165,6 @@
"PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)"
},
{
- "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "tma_info_memory_tlb_page_walks_utilization",
- "MetricGroup": "Mem;MemoryTLB",
- "MetricName": "tma_info_memory_page_walks_utilization"
- },
- {
- "BriefDescription": "STLB (2nd level TLB) data store speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
- "MetricExpr": "tma_info_memory_tlb_store_stlb_mpki",
- "MetricGroup": "Mem;MemoryTLB",
- "MetricName": "tma_info_memory_store_stlb_mpki"
- },
- {
"BriefDescription": "STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
"MetricExpr": "1e3 * ITLB_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
"MetricGroup": "Fed;MemoryTLB",
@@ -1271,18 +1191,24 @@
"MetricName": "tma_info_memory_tlb_store_stlb_mpki"
},
{
- "BriefDescription": "Un-cacheable retired load per kilo instruction",
- "MetricExpr": "1e3 * MEM_LOAD_MISC_RETIRED.UC / INST_RETIRED.ANY",
- "MetricGroup": "Mem",
- "MetricName": "tma_info_memory_uc_load_pki"
- },
- {
- "BriefDescription": "",
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per core",
"MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@)",
"MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
"MetricName": "tma_info_pipeline_execute"
},
{
+ "BriefDescription": "Average number of uops fetched from DSB per cycle",
+ "MetricExpr": "IDQ.DSB_UOPS / IDQ.DSB_CYCLES_ANY",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "tma_info_pipeline_fetch_dsb"
+ },
+ {
+ "BriefDescription": "Average number of uops fetched from MITE per cycle",
+ "MetricExpr": "IDQ.MITE_UOPS / IDQ.MITE_CYCLES",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "tma_info_pipeline_fetch_mite"
+ },
+ {
"BriefDescription": "Instructions per a microcode Assist invocation",
"MetricExpr": "INST_RETIRED.ANY / (FP_ASSIST.ANY + OTHER_ASSISTS.ANY)",
"MetricGroup": "MicroSeq;Pipeline;Ret;Retire",
@@ -1304,13 +1230,13 @@
},
{
"BriefDescription": "Average CPU Utilization (percentage)",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "tma_info_system_cpus_utilized / #num_cpus_online",
"MetricGroup": "HPC;Summary",
"MetricName": "tma_info_system_cpu_utilization"
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "#num_cpus_online * tma_info_system_cpu_utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized"
},
@@ -1470,7 +1396,7 @@
"MetricThreshold": "tma_info_thread_uoppi > 1.05"
},
{
- "BriefDescription": "Instruction per taken branch",
+ "BriefDescription": "Uops per taken branch",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW",
"MetricName": "tma_info_thread_uptb",
@@ -1479,7 +1405,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
"MetricExpr": "ICACHE_TAG.STALLS / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_itlb_misses",
"MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS",
@@ -1495,10 +1421,19 @@
"ScaleUnit": "100%"
},
{
+ "BriefDescription": "This metric roughly estimates fraction of cycles with demand load accesses that hit the L1 cache",
+ "MetricExpr": "min(2 * (MEM_INST_RETIRED.ALL_LOADS - MEM_LOAD_RETIRED.FB_HIT - MEM_LOAD_RETIRED.L1_MISS) * 20 / 100, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
+ "MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_l1_hit_latency",
+ "MetricThreshold": "tma_l1_hit_latency > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles with demand load accesses that hit the L1 cache. The short latency of the L1 data cache may be exposed in pointer-chasing memory access patterns as an example. Sample with: MEM_LOAD_RETIRED.L1_HIT",
+ "ScaleUnit": "100%"
+ },
+ {
"BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) / (MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@) * ((CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks)",
- "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricGroup": "BvML;CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l2_bound",
"MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT_PS",
@@ -1516,7 +1451,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
"MetricExpr": "17 * tma_info_system_core_frequency * (MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2)) / tma_info_thread_clks",
- "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
+ "MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
"MetricName": "tma_l3_hit_latency",
"MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_info_bottleneck_cache_memory_latency, tma_mem_latency",
@@ -1528,7 +1463,7 @@
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_lcp",
"MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
- "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
"ScaleUnit": "100%"
},
{
@@ -1573,7 +1508,7 @@
"MetricGroup": "Server;TopdownL5;tma_L5_group;tma_mem_latency_group",
"MetricName": "tma_local_mem",
"MetricThreshold": "tma_local_mem > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory. Caching will improve the latency and increase performance. Sample with: MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM_PS",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory. Caching will improve the latency and increase performance. Sample with: MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
"ScaleUnit": "100%"
},
{
@@ -1582,14 +1517,14 @@
"MetricGroup": "Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
"MetricName": "tma_lock_latency",
"MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
- "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS_PS. Related metrics: tma_store_latency",
+ "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS. Related metrics: tma_store_latency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_bad_speculation - tma_branch_mispredicts",
- "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
+ "MetricGroup": "BadSpec;BvMS;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
"MetricName": "tma_machine_clears",
"MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
@@ -1599,7 +1534,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_thread_clks",
- "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
"MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_sq_full",
@@ -1608,7 +1543,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM)",
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth",
- "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
+ "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
"MetricName": "tma_mem_latency",
"MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_info_bottleneck_cache_memory_latency, tma_l3_hit_latency",
@@ -1635,6 +1570,7 @@
},
{
"BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * IDQ.MS_UOPS / tma_info_thread_slots",
"MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS",
"MetricName": "tma_microcode_sequencer",
@@ -1645,7 +1581,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage",
"MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks",
- "MetricGroup": "BadSpec;BrMispredicts;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
+ "MetricGroup": "BadSpec;BrMispredicts;BvMP;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
"MetricName": "tma_mispredicts_resteers",
"MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost, tma_info_bottleneck_mispredictions",
@@ -1681,7 +1617,7 @@
{
"BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused",
"MetricExpr": "tma_light_operations * (BR_INST_RETIRED.ALL_BRANCHES - UOPS_RETIRED.MACRO_FUSED) / UOPS_RETIRED.RETIRE_SLOTS",
- "MetricGroup": "Branches;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_non_fused_branches",
"MetricThreshold": "tma_non_fused_branches > 0.1 & tma_light_operations > 0.6",
"PublicDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused.",
@@ -1690,7 +1626,7 @@
{
"BriefDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions",
"MetricExpr": "tma_light_operations * INST_RETIRED.NOP / UOPS_RETIRED.RETIRE_SLOTS",
- "MetricGroup": "Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
+ "MetricGroup": "BvBO;Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
"MetricName": "tma_nop_instructions",
"MetricThreshold": "tma_nop_instructions > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)",
"PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP",
@@ -1708,7 +1644,7 @@
{
"BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).",
"MetricExpr": "max(tma_branch_mispredicts * (1 - BR_MISP_RETIRED.ALL_BRANCHES / (INT_MISC.CLEARS_COUNT - MACHINE_CLEARS.COUNT)), 0.0001)",
- "MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
+ "MetricGroup": "BrMispredicts;BvIO;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_other_mispredicts",
"MetricThreshold": "tma_other_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
@@ -1716,7 +1652,7 @@
{
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.",
"MetricExpr": "max(tma_machine_clears * (1 - MACHINE_CLEARS.MEMORY_ORDERING / MACHINE_CLEARS.COUNT), 0.0001)",
- "MetricGroup": "Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group",
+ "MetricGroup": "BvIO;Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_other_nukes",
"MetricThreshold": "tma_other_nukes > 0.05 & (tma_machine_clears > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
@@ -1804,7 +1740,7 @@
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricExpr": "(EXE_ACTIVITY.EXE_BOUND_0_PORTS + tma_core_bound * RS_EVENTS.EMPTY_CYCLES) / tma_info_thread_clks * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) / tma_info_thread_clks",
+ "MetricExpr": "EXE_ACTIVITY.EXE_BOUND_0_PORTS / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_0",
"MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
@@ -1832,7 +1768,7 @@
{
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise).",
"MetricExpr": "(UOPS_EXECUTED.CORE_CYCLES_GE_3 / 2 if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_3) / tma_info_core_core_clks",
- "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricGroup": "BvCB;PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_3m",
"MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%"
@@ -1859,7 +1795,7 @@
{
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / tma_info_thread_slots",
- "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvUW;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_retiring",
"MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL1",
@@ -1869,7 +1805,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations",
"MetricExpr": "PARTIAL_RAT_STALLS.SCOREBOARD / tma_info_thread_clks",
- "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO",
+ "MetricGroup": "BvIO;PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO",
"MetricName": "tma_serializing_operation",
"MetricThreshold": "tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: PARTIAL_RAT_STALLS.SCOREBOARD. Related metrics: tma_ms_switches",
@@ -1897,7 +1833,7 @@
{
"BriefDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors)",
"MetricExpr": "(OFFCORE_REQUESTS_BUFFER.SQ_FULL / 2 if #SMT_on else OFFCORE_REQUESTS_BUFFER.SQ_FULL) / tma_info_core_core_clks",
- "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
+ "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
"MetricName": "tma_sq_full",
"MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth",
@@ -1925,7 +1861,7 @@
"BriefDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses",
"MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "(L2_RQSTS.RFO_HIT * 11 * (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) + (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_thread_clks",
- "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
+ "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
"MetricName": "tma_store_latency",
"MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
@@ -1958,7 +1894,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears",
"MetricExpr": "9 * BACLEARS.ANY / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
"MetricName": "tma_unknown_branches",
"MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: BACLEARS.ANY",
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/uncore-cache.json b/tools/perf/pmu-events/arch/x86/skylakex/uncore-cache.json
index 543dfc1e5ad7..da46a3aeb58c 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/uncore-cache.json
@@ -1,8 +1,10 @@
[
{
"BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -10,8 +12,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -19,8 +23,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -28,8 +34,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -37,8 +45,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -46,8 +56,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -55,8 +67,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -64,8 +78,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -73,8 +89,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -82,8 +100,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -91,8 +111,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -100,8 +122,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -109,8 +133,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -118,8 +144,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -127,8 +155,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -136,8 +166,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -145,8 +177,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -154,8 +188,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -163,8 +199,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -172,8 +210,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -181,8 +221,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -190,8 +232,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -199,8 +243,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -208,8 +254,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -217,8 +265,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -226,8 +276,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -235,8 +287,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -244,8 +298,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -253,8 +309,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -262,8 +320,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -271,8 +331,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -280,8 +342,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -289,8 +353,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -298,8 +364,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -307,8 +375,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -316,8 +386,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -325,8 +397,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -334,8 +408,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -343,8 +419,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -352,8 +430,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -361,8 +441,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -370,8 +452,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -379,8 +463,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -388,8 +474,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -397,8 +485,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -406,8 +496,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -415,8 +507,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -424,8 +518,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -433,8 +529,10 @@
},
{
"BriefDescription": "CHA to iMC Bypass; Intermediate bypass Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x57",
"EventName": "UNC_CHA_BYPASS_CHA_IMC.INTERMEDIATE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not.; Filter for transactions that succeeded in taking the intermediate bypass.",
"UMask": "0x2",
@@ -442,8 +540,10 @@
},
{
"BriefDescription": "CHA to iMC Bypass; Not Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x57",
"EventName": "UNC_CHA_BYPASS_CHA_IMC.NOT_TAKEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not.; Filter for transactions that could not take the bypass, and issues a read to memory. Note that transactions that did not take the bypass but did not issue read to memory will not be counted.",
"UMask": "0x4",
@@ -451,8 +551,10 @@
},
{
"BriefDescription": "CHA to iMC Bypass; Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x57",
"EventName": "UNC_CHA_BYPASS_CHA_IMC.TAKEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not.; Filter for transactions that succeeded in taking the full bypass.",
"UMask": "0x1",
@@ -460,6 +562,7 @@
},
{
"BriefDescription": "Clockticks of the uncore caching & home agent (CHA)",
+ "Counter": "0,1,2,3",
"EventName": "UNC_CHA_CLOCKTICKS",
"PerPkg": "1",
"PublicDescription": "Counts clockticks of the clock controlling the uncore caching and home agent (CHA).",
@@ -467,55 +570,69 @@
},
{
"BriefDescription": "CMS Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "UNC_CHA_CMS_CLOCKTICKS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "CHA"
},
{
"BriefDescription": "Core PMA Events; C1 State",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_CHA_CORE_PMA.C1_STATE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Core PMA Events; C1 Transition",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_CHA_CORE_PMA.C1_TRANSITION",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Core PMA Events; C6 State",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_CHA_CORE_PMA.C6_STATE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Core PMA Events; C6 Transition",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_CHA_CORE_PMA.C6_TRANSITION",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Core PMA Events; GV",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_CHA_CORE_PMA.GV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "Core Cross Snoops Issued; Any Cycle with Multiple Snoops",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.ANY_GTONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0xe2",
@@ -523,8 +640,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued; Any Single Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.ANY_ONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0xe1",
@@ -532,8 +651,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued; Any Snoop to Remote Node",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.ANY_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0xe4",
@@ -541,6 +662,7 @@
},
{
"BriefDescription": "Core Cross Snoops Issued; Multiple Core Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.CORE_GTONE",
"PerPkg": "1",
@@ -550,8 +672,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued; Single Core Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.CORE_ONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x41",
@@ -559,8 +683,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued; Core Request to Remote Node",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.CORE_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x44",
@@ -568,6 +694,7 @@
},
{
"BriefDescription": "Core Cross Snoops Issued; Multiple Eviction",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.EVICT_GTONE",
"PerPkg": "1",
@@ -577,8 +704,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued; Single Eviction",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.EVICT_ONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x81",
@@ -586,8 +715,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued; Eviction to Remote Node",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.EVICT_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x84",
@@ -595,8 +726,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued; Multiple External Snoops",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.EXT_GTONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x22",
@@ -604,8 +737,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued; Single External Snoops",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.EXT_ONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x21",
@@ -613,8 +748,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued; External Snoop to Remote Node",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.EXT_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x24",
@@ -622,14 +759,17 @@
},
{
"BriefDescription": "Counter 0 Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_CHA_COUNTER0_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Since occupancy counts can only be captured in the Cbo's 0 counter, this event allows a user to capture occupancy related information by filtering the Cb0 occupancy count captured in Counter 0. The filtering available is found in the control register - threshold, invert and edge detect. E.g. setting threshold to 1 can effectively monitor how many cycles the monitored queue has an entry.",
"Unit": "CHA"
},
{
"BriefDescription": "Multi-socket cacheline Directory state lookups; Snoop Not Needed",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_CHA_DIR_LOOKUP.NO_SNP",
"PerPkg": "1",
@@ -639,6 +779,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory state lookups; Snoop Needed",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_CHA_DIR_LOOKUP.SNP",
"PerPkg": "1",
@@ -648,6 +789,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory state updates; Directory Updated memory write from the HA pipe",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_CHA_DIR_UPDATE.HA",
"PerPkg": "1",
@@ -657,6 +799,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory state updates; Directory Updated memory write from TOR pipe",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_CHA_DIR_UPDATE.TOR",
"PerPkg": "1",
@@ -666,8 +809,10 @@
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements; Down",
+ "Counter": "0,1,2,3",
"EventCode": "0xAE",
"EventName": "UNC_CHA_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x4",
@@ -675,8 +820,10 @@
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements; Up",
+ "Counter": "0,1,2,3",
"EventCode": "0xAE",
"EventName": "UNC_CHA_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x1",
@@ -684,6 +831,7 @@
},
{
"BriefDescription": "FaST wire asserted; Horizontal",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_CHA_FAST_ASSERTED.HORZ",
"PerPkg": "1",
@@ -693,8 +841,10 @@
},
{
"BriefDescription": "FaST wire asserted; Vertical",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_CHA_FAST_ASSERTED.VERT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles either the local or incoming distress signals are asserted. Incoming distress includes up, dn and across.",
"UMask": "0x1",
@@ -702,6 +852,7 @@
},
{
"BriefDescription": "Read request from a remote socket which hit in the HitMe Cache to a line In the E state",
+ "Counter": "0,1,2,3",
"EventCode": "0x5F",
"EventName": "UNC_CHA_HITME_HIT.EX_RDS",
"PerPkg": "1",
@@ -711,80 +862,100 @@
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; Shared hit and op is RdInvOwn, RdInv, Inv*",
+ "Counter": "0,1,2,3",
"EventCode": "0x5F",
"EventName": "UNC_CHA_HITME_HIT.SHARED_OWNREQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; op is WbMtoE",
+ "Counter": "0,1,2,3",
"EventCode": "0x5F",
"EventName": "UNC_CHA_HITME_HIT.WBMTOE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Counts Number of Hits in HitMe Cache; op is WbMtoI, WbPushMtoI, WbFlush, or WbMtoS",
+ "Counter": "0,1,2,3",
"EventCode": "0x5F",
"EventName": "UNC_CHA_HITME_HIT.WBMTOI_OR_S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RdCode, RdData, RdDataMigratory, RdCur, RdInvOwn, RdInv, Inv*",
+ "Counter": "0,1,2,3",
"EventCode": "0x5E",
"EventName": "UNC_CHA_HITME_LOOKUP.READ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Counts Number of times HitMe Cache is accessed; op is WbMtoE, WbMtoI, WbPushMtoI, WbFlush, or WbMtoS",
+ "Counter": "0,1,2,3",
"EventCode": "0x5E",
"EventName": "UNC_CHA_HITME_LOOKUP.WRITE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Counts Number of Misses in HitMe Cache; No SF/LLC HitS/F and op is RdInvOwn",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_CHA_HITME_MISS.NOTSHARED_RDINVOWN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "Counts Number of Misses in HitMe Cache; op is RdCode, RdData, RdDataMigratory, RdCur, RdInv, Inv*",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_CHA_HITME_MISS.READ_OR_INV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "Counts Number of Misses in HitMe Cache; SF/LLC HitS/F and op is RdInvOwn",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_CHA_HITME_MISS.SHARED_RDINVOWN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "Counts the number of Allocate/Update to HitMe Cache; Deallocate HitME$ on Reads without RspFwdI*",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_CHA_HITME_UPDATE.DEALLOCATE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "Counts the number of Allocate/Update to HitMe Cache; op is RspIFwd or RspIFwdWb for a local request",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_CHA_HITME_UPDATE.DEALLOCATE_RSPFWDI_LOC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Received RspFwdI* for a local request, but converted HitME$ to SF entry",
"UMask": "0x1",
@@ -792,16 +963,20 @@
},
{
"BriefDescription": "Counts the number of Allocate/Update to HitMe Cache; Update HitMe Cache on RdInvOwn even if not RspFwdI*",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_CHA_HITME_UPDATE.RDINVOWN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Counts the number of Allocate/Update to HitMe Cache; op is RspIFwd or RspIFwdWb for a remote request",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_CHA_HITME_UPDATE.RSPFWDI_REM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Updated HitME$ on RspFwdI* or local HitM/E received for a remote request",
"UMask": "0x2",
@@ -809,16 +984,20 @@
},
{
"BriefDescription": "Counts the number of Allocate/Update to HitMe Cache; Update HitMe Cache to SHARed",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_CHA_HITME_UPDATE.SHARED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Horizontal AD Ring In Use; Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -826,8 +1005,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use; Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -835,8 +1016,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use; Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -844,8 +1027,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use; Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -853,8 +1038,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use; Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xA9",
"EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -862,8 +1049,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use; Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xA9",
"EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -871,8 +1060,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use; Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xA9",
"EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -880,8 +1071,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use; Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xA9",
"EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -889,8 +1082,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use; Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -898,8 +1093,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use; Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -907,8 +1104,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use; Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -916,8 +1115,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use; Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -925,8 +1126,10 @@
},
{
"BriefDescription": "Horizontal IV Ring in Use; Left",
+ "Counter": "0,1,2,3",
"EventCode": "0xAD",
"EventName": "UNC_CHA_HORZ_RING_IV_IN_USE.LEFT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x1",
@@ -934,8 +1137,10 @@
},
{
"BriefDescription": "Horizontal IV Ring in Use; Right",
+ "Counter": "0,1,2,3",
"EventCode": "0xAD",
"EventName": "UNC_CHA_HORZ_RING_IV_IN_USE.RIGHT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x4",
@@ -943,6 +1148,7 @@
},
{
"BriefDescription": "Normal priority reads issued to the memory controller from the CHA",
+ "Counter": "0,1,2,3",
"EventCode": "0x59",
"EventName": "UNC_CHA_IMC_READS_COUNT.NORMAL",
"PerPkg": "1",
@@ -952,8 +1158,10 @@
},
{
"BriefDescription": "HA to iMC Reads Issued; ISOCH",
+ "Counter": "0,1,2,3",
"EventCode": "0x59",
"EventName": "UNC_CHA_IMC_READS_COUNT.PRIORITY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count of the number of reads issued to any of the memory controller channels. This can be filtered by the priority of the reads.",
"UMask": "0x2",
@@ -961,6 +1169,7 @@
},
{
"BriefDescription": "CHA to iMC Full Line Writes Issued; Full Line Non-ISOCH",
+ "Counter": "0,1,2,3",
"EventCode": "0x5B",
"EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL",
"PerPkg": "1",
@@ -970,8 +1179,10 @@
},
{
"BriefDescription": "Writes Issued to the iMC by the HA; Full Line MIG",
+ "Counter": "0,1,2,3",
"EventCode": "0x5B",
"EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL_MIG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the total number of writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
"UMask": "0x10",
@@ -979,8 +1190,10 @@
},
{
"BriefDescription": "Writes Issued to the iMC by the HA; ISOCH Full Line",
+ "Counter": "0,1,2,3",
"EventCode": "0x5B",
"EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL_PRIORITY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the total number of writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
"UMask": "0x4",
@@ -988,8 +1201,10 @@
},
{
"BriefDescription": "Writes Issued to the iMC by the HA; Partial Non-ISOCH",
+ "Counter": "0,1,2,3",
"EventCode": "0x5B",
"EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the total number of writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
"UMask": "0x2",
@@ -997,8 +1212,10 @@
},
{
"BriefDescription": "Writes Issued to the iMC by the HA; Partial MIG",
+ "Counter": "0,1,2,3",
"EventCode": "0x5B",
"EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL_MIG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the total number of writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.; Filter for memory controller 5 only.",
"UMask": "0x20",
@@ -1006,8 +1223,10 @@
},
{
"BriefDescription": "Writes Issued to the iMC by the HA; ISOCH Partial",
+ "Counter": "0,1,2,3",
"EventCode": "0x5B",
"EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL_PRIORITY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the total number of writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
"UMask": "0x8",
@@ -1015,64 +1234,80 @@
},
{
"BriefDescription": "Counts Number of times IODC entry allocation is attempted; Number of IODC allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x62",
"EventName": "UNC_CHA_IODC_ALLOC.INVITOM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Counts Number of times IODC entry allocation is attempted; Number of IODC allocations dropped due to IODC Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x62",
"EventName": "UNC_CHA_IODC_ALLOC.IODCFULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Counts Number of times IODC entry allocation is attempted; Number of IDOC allocation dropped due to OSB gate",
+ "Counter": "0,1,2,3",
"EventCode": "0x62",
"EventName": "UNC_CHA_IODC_ALLOC.OSBGATED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Counts number of IODC deallocations; IODC deallocated due to any reason",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "UNC_CHA_IODC_DEALLOC.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "Counts number of IODC deallocations; IODC deallocated due to conflicting transaction",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "UNC_CHA_IODC_DEALLOC.SNPOUT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Counts number of IODC deallocations; IODC deallocated due to WbMtoE",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "UNC_CHA_IODC_DEALLOC.WBMTOE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Counts number of IODC deallocations; IODC deallocated due to WbMtoI",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "UNC_CHA_IODC_DEALLOC.WBMTOI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Counts number of IODC deallocations; IODC deallocated due to WbPushMtoI",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "UNC_CHA_IODC_DEALLOC.WBPUSHMTOI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Moved to Cbo section",
"UMask": "0x4",
@@ -1080,8 +1315,10 @@
},
{
"BriefDescription": "Cache and Snoop Filter Lookups; Any Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.ANY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.; Filters for any transaction originating from the IPQ or IRQ. This does not include lookups originating from the ISMQ.",
"UMask": "0x11",
@@ -1089,8 +1326,10 @@
},
{
"BriefDescription": "Cache and Snoop Filter Lookups; Data Read Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.; Read transactions",
"UMask": "0x3",
@@ -1098,8 +1337,10 @@
},
{
"BriefDescription": "Cache and Snoop Filter Lookups; Local",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.",
"UMask": "0x31",
@@ -1107,8 +1348,10 @@
},
{
"BriefDescription": "Cache and Snoop Filter Lookups; Remote",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.",
"UMask": "0x91",
@@ -1116,8 +1359,10 @@
},
{
"BriefDescription": "Cache and Snoop Filter Lookups; External Snoop Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_SNOOP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.; Filters for only snoop requests coming from the remote socket(s) through the IPQ.",
"UMask": "0x9",
@@ -1125,8 +1370,10 @@
},
{
"BriefDescription": "Cache and Snoop Filter Lookups; Write Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.WRITE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.; Writeback transactions from L2 to the LLC This includes all write transactions -- both Cacheable and UC.",
"UMask": "0x5",
@@ -1134,35 +1381,43 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_E",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.E_STATE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_F",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.F_STATE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "Lines Victimized; Local - All Lines",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x2f",
@@ -1170,8 +1425,10 @@
},
{
"BriefDescription": "Lines Victimized; Local - Lines in E State",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_E",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x22",
@@ -1179,8 +1436,10 @@
},
{
"BriefDescription": "Lines Victimized; Local - Lines in F State",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x28",
@@ -1188,8 +1447,10 @@
},
{
"BriefDescription": "Lines Victimized; Local - Lines in M State",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_M",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x21",
@@ -1197,8 +1458,10 @@
},
{
"BriefDescription": "Lines Victimized; Local - Lines in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_S",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x24",
@@ -1206,26 +1469,32 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_M",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.M_STATE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.REMOTE_ALL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "Lines Victimized; Remote - All Lines",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x8f",
@@ -1233,8 +1502,10 @@
},
{
"BriefDescription": "Lines Victimized; Remote - Lines in E State",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_E",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x82",
@@ -1242,8 +1513,10 @@
},
{
"BriefDescription": "Lines Victimized; Remote - Lines in F State",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x88",
@@ -1251,8 +1524,10 @@
},
{
"BriefDescription": "Lines Victimized; Remote - Lines in M State",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_M",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x81",
@@ -1260,8 +1535,10 @@
},
{
"BriefDescription": "Lines Victimized; Remote - Lines in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_S",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x84",
@@ -1269,15 +1546,18 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_S",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.S_STATE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Lines Victimized; Lines in E state",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_E",
"PerPkg": "1",
@@ -1287,6 +1567,7 @@
},
{
"BriefDescription": "Lines Victimized; Lines in F State",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_F",
"PerPkg": "1",
@@ -1296,6 +1577,7 @@
},
{
"BriefDescription": "Lines Victimized; Lines in M state",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_M",
"PerPkg": "1",
@@ -1305,6 +1587,7 @@
},
{
"BriefDescription": "Lines Victimized; Lines in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_S",
"PerPkg": "1",
@@ -1314,8 +1597,10 @@
},
{
"BriefDescription": "Cbo Misc; CV0 Prefetch Miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_CHA_MISC.CV0_PREF_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Miscellaneous events in the Cbo.",
"UMask": "0x20",
@@ -1323,8 +1608,10 @@
},
{
"BriefDescription": "Cbo Misc; CV0 Prefetch Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_CHA_MISC.CV0_PREF_VIC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Miscellaneous events in the Cbo.",
"UMask": "0x10",
@@ -1332,6 +1619,7 @@
},
{
"BriefDescription": "Number of times that an RFO hit in S state.",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_CHA_MISC.RFO_HIT_S",
"PerPkg": "1",
@@ -1341,8 +1629,10 @@
},
{
"BriefDescription": "Cbo Misc; Silent Snoop Eviction",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_CHA_MISC.RSPI_WAS_FSE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Miscellaneous events in the Cbo.; Counts the number of times when a Snoop hit in FSE states and triggered a silent eviction. This is useful because this information is lost in the PRE encodings.",
"UMask": "0x1",
@@ -1350,8 +1640,10 @@
},
{
"BriefDescription": "Cbo Misc; Write Combining Aliasing",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_CHA_MISC.WC_ALIASING",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Miscellaneous events in the Cbo.; Counts the number of times that a USWC write (WCIL(F)) transaction hit in the LLC in M state, triggering a WBMtoI followed by the USWC write. This occurs when there is WC aliasing.",
"UMask": "0x2",
@@ -1359,16 +1651,20 @@
},
{
"BriefDescription": "OSB Snoop Broadcast",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_CHA_OSB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
"Unit": "CHA"
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty; EDC0_SMI2",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.EDC0_SMI2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.; Filter for memory controller 2 only.",
"UMask": "0x4",
@@ -1376,8 +1672,10 @@
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty; EDC1_SMI3",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.EDC1_SMI3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.; Filter for memory controller 3 only.",
"UMask": "0x8",
@@ -1385,8 +1683,10 @@
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty; EDC2_SMI4",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.EDC2_SMI4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.; Filter for memory controller 4 only.",
"UMask": "0x10",
@@ -1394,8 +1694,10 @@
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty; EDC3_SMI5",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.EDC3_SMI5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.; Filter for memory controller 5 only.",
"UMask": "0x20",
@@ -1403,8 +1705,10 @@
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty; MC0_SMI0",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC0_SMI0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.; Filter for memory controller 0 only.",
"UMask": "0x1",
@@ -1412,8 +1716,10 @@
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty; MC1_SMI1",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC1_SMI1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.; Filter for memory controller 1 only.",
"UMask": "0x2",
@@ -1421,6 +1727,7 @@
},
{
"BriefDescription": "Local requests for exclusive ownership of a cache line without receiving data",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.INVITOE_LOCAL",
"PerPkg": "1",
@@ -1430,6 +1737,7 @@
},
{
"BriefDescription": "Local requests for exclusive ownership of a cache line without receiving data",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.INVITOE_REMOTE",
"PerPkg": "1",
@@ -1439,6 +1747,7 @@
},
{
"BriefDescription": "Read requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.READS",
"PerPkg": "1",
@@ -1448,6 +1757,7 @@
},
{
"BriefDescription": "Read requests from a unit on this socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.READS_LOCAL",
"PerPkg": "1",
@@ -1457,6 +1767,7 @@
},
{
"BriefDescription": "Read requests from a remote socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.READS_REMOTE",
"PerPkg": "1",
@@ -1466,6 +1777,7 @@
},
{
"BriefDescription": "Write requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.WRITES",
"PerPkg": "1",
@@ -1475,6 +1787,7 @@
},
{
"BriefDescription": "Write Requests from a unit on this socket",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.WRITES_LOCAL",
"PerPkg": "1",
@@ -1484,6 +1797,7 @@
},
{
"BriefDescription": "Read and Write Requests; Writes Remote",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.WRITES_REMOTE",
"PerPkg": "1",
@@ -1493,8 +1807,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring.; AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_CHA_RING_BOUNCES_HORZ.AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x1",
@@ -1502,8 +1818,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring.; AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_CHA_RING_BOUNCES_HORZ.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x2",
@@ -1511,8 +1829,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring.; BL",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_CHA_RING_BOUNCES_HORZ.BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x4",
@@ -1520,8 +1840,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring.; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_CHA_RING_BOUNCES_HORZ.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x8",
@@ -1529,8 +1851,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring.; AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_CHA_RING_BOUNCES_VERT.AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x1",
@@ -1538,8 +1862,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring.; Acknowledgements to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_CHA_RING_BOUNCES_VERT.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x2",
@@ -1547,8 +1873,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring.; Data Responses to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_CHA_RING_BOUNCES_VERT.BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x4",
@@ -1556,8 +1884,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring.; Snoops of processor's cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_CHA_RING_BOUNCES_VERT.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x8",
@@ -1565,87 +1895,109 @@
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring; AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.AD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring; AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring; Acknowledgements to Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring; BL",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.IV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring; AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_CHA_RING_SINK_STARVED_VERT.AD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring; Acknowledgements to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_CHA_RING_SINK_STARVED_VERT.AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring; Data Responses to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_CHA_RING_SINK_STARVED_VERT.BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring; Snoops of processor's cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_CHA_RING_SINK_STARVED_VERT.IV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Source Throttle",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_CHA_RING_SRC_THRTL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Allocations; IPQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_CHA_RxC_INSERTS.IPQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
"UMask": "0x4",
@@ -1653,6 +2005,7 @@
},
{
"BriefDescription": "Ingress (from CMS) Allocations; IRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_CHA_RxC_INSERTS.IRQ",
"PerPkg": "1",
@@ -1662,8 +2015,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Allocations; IRQ Rejected",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_CHA_RxC_INSERTS.IRQ_REJ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
"UMask": "0x2",
@@ -1671,8 +2026,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Allocations; PRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_CHA_RxC_INSERTS.PRQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
"UMask": "0x10",
@@ -1680,8 +2037,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Allocations; PRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_CHA_RxC_INSERTS.PRQ_REJ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
"UMask": "0x20",
@@ -1689,8 +2048,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Allocations; RRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_CHA_RxC_INSERTS.RRQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
"UMask": "0x40",
@@ -1698,8 +2059,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Allocations; WBQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_CHA_RxC_INSERTS.WBQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
"UMask": "0x80",
@@ -1707,238 +2070,297 @@
},
{
"BriefDescription": "Ingress Probe Queue Rejects; AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_CHA_RxC_IPQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress Probe Queue Rejects; AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_CHA_RxC_IPQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress Probe Queue Rejects; Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_CHA_RxC_IPQ0_REJECT.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress Probe Queue Rejects; BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress Probe Queue Rejects; BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress Probe Queue Rejects; BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress Probe Queue Rejects; BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress Probe Queue Rejects; Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_CHA_RxC_IPQ0_REJECT.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress Probe Queue Rejects; Allow Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_CHA_RxC_IPQ1_REJECT.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress Probe Queue Rejects; ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_CHA_RxC_IPQ1_REJECT.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress Probe Queue Rejects; HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_CHA_RxC_IPQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress Probe Queue Rejects; Merging these two together to make room for ANY_REJECT_*0",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_CHA_RxC_IPQ1_REJECT.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress Probe Queue Rejects; LLC Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_CHA_RxC_IPQ1_REJECT.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress Probe Queue Rejects; PhyAddr Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_CHA_RxC_IPQ1_REJECT.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress Probe Queue Rejects; SF Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_CHA_RxC_IPQ1_REJECT.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress Probe Queue Rejects; Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_CHA_RxC_IPQ1_REJECT.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; Allow Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; Merging these two together to make room for ANY_REJECT_*0",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; LLC Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; PhyAddr Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.PA_MATCH",
"PerPkg": "1",
@@ -1947,24 +2369,30 @@
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; SF Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "ISMQ Rejects; AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x1",
@@ -1972,8 +2400,10 @@
},
{
"BriefDescription": "ISMQ Rejects; AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x2",
@@ -1981,8 +2411,10 @@
},
{
"BriefDescription": "ISMQ Rejects; Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x40",
@@ -1990,8 +2422,10 @@
},
{
"BriefDescription": "ISMQ Rejects; BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x10",
@@ -1999,8 +2433,10 @@
},
{
"BriefDescription": "ISMQ Rejects; BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x20",
@@ -2008,8 +2444,10 @@
},
{
"BriefDescription": "ISMQ Rejects; BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x4",
@@ -2017,8 +2455,10 @@
},
{
"BriefDescription": "ISMQ Rejects; BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x8",
@@ -2026,8 +2466,10 @@
},
{
"BriefDescription": "ISMQ Rejects; Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x80",
@@ -2035,8 +2477,10 @@
},
{
"BriefDescription": "ISMQ Retries; AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x1",
@@ -2044,8 +2488,10 @@
},
{
"BriefDescription": "ISMQ Retries; AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x2",
@@ -2053,8 +2499,10 @@
},
{
"BriefDescription": "ISMQ Retries; Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x40",
@@ -2062,8 +2510,10 @@
},
{
"BriefDescription": "ISMQ Retries; BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x10",
@@ -2071,8 +2521,10 @@
},
{
"BriefDescription": "ISMQ Retries; BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x20",
@@ -2080,8 +2532,10 @@
},
{
"BriefDescription": "ISMQ Retries; BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x4",
@@ -2089,8 +2543,10 @@
},
{
"BriefDescription": "ISMQ Retries; BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x8",
@@ -2098,8 +2554,10 @@
},
{
"BriefDescription": "ISMQ Retries; Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x80",
@@ -2107,8 +2565,10 @@
},
{
"BriefDescription": "ISMQ Rejects; ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_CHA_RxC_ISMQ1_REJECT.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x1",
@@ -2116,8 +2576,10 @@
},
{
"BriefDescription": "ISMQ Rejects; HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_CHA_RxC_ISMQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x2",
@@ -2125,8 +2587,10 @@
},
{
"BriefDescription": "ISMQ Retries; ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2D",
"EventName": "UNC_CHA_RxC_ISMQ1_RETRY.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x1",
@@ -2134,8 +2598,10 @@
},
{
"BriefDescription": "ISMQ Retries; HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x2D",
"EventName": "UNC_CHA_RxC_ISMQ1_RETRY.HA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x2",
@@ -2143,8 +2609,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Occupancy; IPQ",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_CHA_RxC_OCCUPANCY.IPQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
"UMask": "0x4",
@@ -2152,6 +2620,7 @@
},
{
"BriefDescription": "Ingress (from CMS) Occupancy; IRQ",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_CHA_RxC_OCCUPANCY.IRQ",
"PerPkg": "1",
@@ -2161,8 +2630,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Occupancy; RRQ",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_CHA_RxC_OCCUPANCY.RRQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
"UMask": "0x40",
@@ -2170,8 +2641,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Occupancy; WBQ",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_CHA_RxC_OCCUPANCY.WBQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
"UMask": "0x80",
@@ -2179,8 +2652,10 @@
},
{
"BriefDescription": "Other Retries; AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x1",
@@ -2188,8 +2663,10 @@
},
{
"BriefDescription": "Other Retries; AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x2",
@@ -2197,8 +2674,10 @@
},
{
"BriefDescription": "Other Retries; Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x40",
@@ -2206,8 +2685,10 @@
},
{
"BriefDescription": "Other Retries; BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x10",
@@ -2215,8 +2696,10 @@
},
{
"BriefDescription": "Other Retries; BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x20",
@@ -2224,8 +2707,10 @@
},
{
"BriefDescription": "Other Retries; BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x4",
@@ -2233,8 +2718,10 @@
},
{
"BriefDescription": "Other Retries; BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x8",
@@ -2242,8 +2729,10 @@
},
{
"BriefDescription": "Other Retries; Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x80",
@@ -2251,8 +2740,10 @@
},
{
"BriefDescription": "Other Retries; Allow Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x40",
@@ -2260,8 +2751,10 @@
},
{
"BriefDescription": "Other Retries; ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x1",
@@ -2269,8 +2762,10 @@
},
{
"BriefDescription": "Other Retries; HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.HA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x2",
@@ -2278,8 +2773,10 @@
},
{
"BriefDescription": "Other Retries; Merging these two together to make room for ANY_REJECT_*0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x20",
@@ -2287,8 +2784,10 @@
},
{
"BriefDescription": "Other Retries; LLC Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x4",
@@ -2296,8 +2795,10 @@
},
{
"BriefDescription": "Other Retries; PhyAddr Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x80",
@@ -2305,8 +2806,10 @@
},
{
"BriefDescription": "Other Retries; SF Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x8",
@@ -2314,8 +2817,10 @@
},
{
"BriefDescription": "Other Retries; Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x10",
@@ -2323,136 +2828,170 @@
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; Allow Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; LLC OR SF Way",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; LLC Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; PhyAddr Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; SF Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "Request Queue Retries; AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x1",
@@ -2460,8 +2999,10 @@
},
{
"BriefDescription": "Request Queue Retries; AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x2",
@@ -2469,8 +3010,10 @@
},
{
"BriefDescription": "Request Queue Retries; Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x40",
@@ -2478,8 +3021,10 @@
},
{
"BriefDescription": "Request Queue Retries; BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x10",
@@ -2487,8 +3032,10 @@
},
{
"BriefDescription": "Request Queue Retries; BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x20",
@@ -2496,8 +3043,10 @@
},
{
"BriefDescription": "Request Queue Retries; BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x4",
@@ -2505,8 +3054,10 @@
},
{
"BriefDescription": "Request Queue Retries; BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x8",
@@ -2514,8 +3065,10 @@
},
{
"BriefDescription": "Request Queue Retries; Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x80",
@@ -2523,8 +3076,10 @@
},
{
"BriefDescription": "Request Queue Retries; Allow Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x40",
@@ -2532,8 +3087,10 @@
},
{
"BriefDescription": "Request Queue Retries; ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x1",
@@ -2541,8 +3098,10 @@
},
{
"BriefDescription": "Request Queue Retries; HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.HA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x2",
@@ -2550,8 +3109,10 @@
},
{
"BriefDescription": "Request Queue Retries; Merging these two together to make room for ANY_REJECT_*0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x20",
@@ -2559,8 +3120,10 @@
},
{
"BriefDescription": "Request Queue Retries; LLC Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x4",
@@ -2568,8 +3131,10 @@
},
{
"BriefDescription": "Request Queue Retries; PhyAddr Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x80",
@@ -2577,8 +3142,10 @@
},
{
"BriefDescription": "Request Queue Retries; SF Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x8",
@@ -2586,8 +3153,10 @@
},
{
"BriefDescription": "Request Queue Retries; Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x10",
@@ -2595,8 +3164,10 @@
},
{
"BriefDescription": "RRQ Rejects; AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_CHA_RxC_RRQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x1",
@@ -2604,8 +3175,10 @@
},
{
"BriefDescription": "RRQ Rejects; AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_CHA_RxC_RRQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x2",
@@ -2613,8 +3186,10 @@
},
{
"BriefDescription": "RRQ Rejects; Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_CHA_RxC_RRQ0_REJECT.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x40",
@@ -2622,8 +3197,10 @@
},
{
"BriefDescription": "RRQ Rejects; BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x10",
@@ -2631,8 +3208,10 @@
},
{
"BriefDescription": "RRQ Rejects; BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x20",
@@ -2640,8 +3219,10 @@
},
{
"BriefDescription": "RRQ Rejects; BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x4",
@@ -2649,8 +3230,10 @@
},
{
"BriefDescription": "RRQ Rejects; BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x8",
@@ -2658,8 +3241,10 @@
},
{
"BriefDescription": "RRQ Rejects; Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_CHA_RxC_RRQ0_REJECT.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x80",
@@ -2667,8 +3252,10 @@
},
{
"BriefDescription": "RRQ Rejects; Allow Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_CHA_RxC_RRQ1_REJECT.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x40",
@@ -2676,8 +3263,10 @@
},
{
"BriefDescription": "RRQ Rejects; ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_CHA_RxC_RRQ1_REJECT.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x1",
@@ -2685,8 +3274,10 @@
},
{
"BriefDescription": "RRQ Rejects; HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_CHA_RxC_RRQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x2",
@@ -2694,8 +3285,10 @@
},
{
"BriefDescription": "RRQ Rejects; Merging these two together to make room for ANY_REJECT_*0",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_CHA_RxC_RRQ1_REJECT.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x20",
@@ -2703,8 +3296,10 @@
},
{
"BriefDescription": "RRQ Rejects; LLC Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_CHA_RxC_RRQ1_REJECT.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x4",
@@ -2712,8 +3307,10 @@
},
{
"BriefDescription": "RRQ Rejects; PhyAddr Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_CHA_RxC_RRQ1_REJECT.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x80",
@@ -2721,8 +3318,10 @@
},
{
"BriefDescription": "RRQ Rejects; SF Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_CHA_RxC_RRQ1_REJECT.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x8",
@@ -2730,8 +3329,10 @@
},
{
"BriefDescription": "RRQ Rejects; Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_CHA_RxC_RRQ1_REJECT.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
"UMask": "0x10",
@@ -2739,8 +3340,10 @@
},
{
"BriefDescription": "WBQ Rejects; AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_CHA_RxC_WBQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x1",
@@ -2748,8 +3351,10 @@
},
{
"BriefDescription": "WBQ Rejects; AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_CHA_RxC_WBQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x2",
@@ -2757,8 +3362,10 @@
},
{
"BriefDescription": "WBQ Rejects; Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_CHA_RxC_WBQ0_REJECT.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x40",
@@ -2766,8 +3373,10 @@
},
{
"BriefDescription": "WBQ Rejects; BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x10",
@@ -2775,8 +3384,10 @@
},
{
"BriefDescription": "WBQ Rejects; BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x20",
@@ -2784,8 +3395,10 @@
},
{
"BriefDescription": "WBQ Rejects; BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x4",
@@ -2793,8 +3406,10 @@
},
{
"BriefDescription": "WBQ Rejects; BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x8",
@@ -2802,8 +3417,10 @@
},
{
"BriefDescription": "WBQ Rejects; Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_CHA_RxC_WBQ0_REJECT.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x80",
@@ -2811,8 +3428,10 @@
},
{
"BriefDescription": "WBQ Rejects; Allow Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_CHA_RxC_WBQ1_REJECT.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x40",
@@ -2820,8 +3439,10 @@
},
{
"BriefDescription": "WBQ Rejects; ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_CHA_RxC_WBQ1_REJECT.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x1",
@@ -2829,8 +3450,10 @@
},
{
"BriefDescription": "WBQ Rejects; HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_CHA_RxC_WBQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x2",
@@ -2838,8 +3461,10 @@
},
{
"BriefDescription": "WBQ Rejects; Merging these two together to make room for ANY_REJECT_*0",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_CHA_RxC_WBQ1_REJECT.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x20",
@@ -2847,8 +3472,10 @@
},
{
"BriefDescription": "WBQ Rejects; LLC Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_CHA_RxC_WBQ1_REJECT.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x4",
@@ -2856,8 +3483,10 @@
},
{
"BriefDescription": "WBQ Rejects; PhyAddr Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_CHA_RxC_WBQ1_REJECT.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x80",
@@ -2865,8 +3494,10 @@
},
{
"BriefDescription": "WBQ Rejects; SF Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_CHA_RxC_WBQ1_REJECT.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x8",
@@ -2874,8 +3505,10 @@
},
{
"BriefDescription": "WBQ Rejects; Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_CHA_RxC_WBQ1_REJECT.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
"UMask": "0x10",
@@ -2883,8 +3516,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_CHA_RxR_BUSY_STARVED.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x1",
@@ -2892,8 +3527,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_CHA_RxR_BUSY_STARVED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x10",
@@ -2901,8 +3538,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_CHA_RxR_BUSY_STARVED.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x4",
@@ -2910,8 +3549,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_CHA_RxR_BUSY_STARVED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x40",
@@ -2919,8 +3560,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_CHA_RxR_BYPASS.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the CMS Ingress",
"UMask": "0x1",
@@ -2928,8 +3571,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_CHA_RxR_BYPASS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the CMS Ingress",
"UMask": "0x10",
@@ -2937,8 +3582,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_CHA_RxR_BYPASS.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the CMS Ingress",
"UMask": "0x2",
@@ -2946,8 +3593,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_CHA_RxR_BYPASS.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the CMS Ingress",
"UMask": "0x4",
@@ -2955,8 +3604,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_CHA_RxR_BYPASS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the CMS Ingress",
"UMask": "0x40",
@@ -2964,8 +3615,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_CHA_RxR_BYPASS.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the CMS Ingress",
"UMask": "0x8",
@@ -2973,8 +3626,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_CHA_RxR_CRD_STARVED.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x1",
@@ -2982,8 +3637,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_CHA_RxR_CRD_STARVED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x10",
@@ -2991,8 +3648,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_CHA_RxR_CRD_STARVED.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x2",
@@ -3000,8 +3659,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_CHA_RxR_CRD_STARVED.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x4",
@@ -3009,8 +3670,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_CHA_RxR_CRD_STARVED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x40",
@@ -3018,8 +3681,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; IFV - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_CHA_RxR_CRD_STARVED.IFV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x80",
@@ -3027,8 +3692,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_CHA_RxR_CRD_STARVED.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x8",
@@ -3036,8 +3703,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_CHA_RxR_INSERTS.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x1",
@@ -3045,8 +3714,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_CHA_RxR_INSERTS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x10",
@@ -3054,8 +3725,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_CHA_RxR_INSERTS.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x2",
@@ -3063,8 +3736,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_CHA_RxR_INSERTS.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x4",
@@ -3072,8 +3747,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_CHA_RxR_INSERTS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x40",
@@ -3081,8 +3758,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_CHA_RxR_INSERTS.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x8",
@@ -3090,8 +3769,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_CHA_RxR_OCCUPANCY.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x1",
@@ -3099,8 +3780,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_CHA_RxR_OCCUPANCY.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x10",
@@ -3108,8 +3791,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_CHA_RxR_OCCUPANCY.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x2",
@@ -3117,8 +3802,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_CHA_RxR_OCCUPANCY.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x4",
@@ -3126,8 +3813,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_CHA_RxR_OCCUPANCY.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x40",
@@ -3135,8 +3824,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_CHA_RxR_OCCUPANCY.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x8",
@@ -3144,6 +3835,7 @@
},
{
"BriefDescription": "Snoop filter capacity evictions for E-state entries.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3D",
"EventName": "UNC_CHA_SF_EVICTION.E_STATE",
"PerPkg": "1",
@@ -3153,6 +3845,7 @@
},
{
"BriefDescription": "Snoop filter capacity evictions for M-state entries.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3D",
"EventName": "UNC_CHA_SF_EVICTION.M_STATE",
"PerPkg": "1",
@@ -3162,6 +3855,7 @@
},
{
"BriefDescription": "Snoop filter capacity evictions for S-state entries.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3D",
"EventName": "UNC_CHA_SF_EVICTION.S_STATE",
"PerPkg": "1",
@@ -3171,8 +3865,10 @@
},
{
"BriefDescription": "Snoops Sent; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_CHA_SNOOPS_SENT.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of snoops issued by the HA.",
"UMask": "0x1",
@@ -3180,8 +3876,10 @@
},
{
"BriefDescription": "Snoops Sent; Broadcast snoop for Local Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_CHA_SNOOPS_SENT.BCST_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of snoops issued by the HA.; Counts the number of broadcast snoops issued by the HA. This filter includes only requests coming from local sockets.",
"UMask": "0x10",
@@ -3189,8 +3887,10 @@
},
{
"BriefDescription": "Snoops Sent; Broadcast snoops for Remote Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_CHA_SNOOPS_SENT.BCST_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of snoops issued by the HA.; Counts the number of broadcast snoops issued by the HA.This filter includes only requests coming from remote sockets.",
"UMask": "0x20",
@@ -3198,8 +3898,10 @@
},
{
"BriefDescription": "Snoops Sent; Directed snoops for Local Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_CHA_SNOOPS_SENT.DIRECT_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of snoops issued by the HA.; Counts the number of directed snoops issued by the HA. This filter includes only requests coming from local sockets.",
"UMask": "0x40",
@@ -3207,8 +3909,10 @@
},
{
"BriefDescription": "Snoops Sent; Directed snoops for Remote Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_CHA_SNOOPS_SENT.DIRECT_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of snoops issued by the HA.; Counts the number of directed snoops issued by the HA. This filter includes only requests coming from remote sockets.",
"UMask": "0x80",
@@ -3216,8 +3920,10 @@
},
{
"BriefDescription": "Snoops Sent; Broadcast or directed Snoops sent for Local Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_CHA_SNOOPS_SENT.LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of snoops issued by the HA.; Counts the number of broadcast or directed snoops issued by the HA per request. This filter includes only requests coming from the local socket.",
"UMask": "0x4",
@@ -3225,8 +3931,10 @@
},
{
"BriefDescription": "Snoops Sent; Broadcast or directed Snoops sent for Remote Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_CHA_SNOOPS_SENT.REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of snoops issued by the HA.; Counts the number of broadcast or directed snoops issued by the HA per request. This filter includes only requests coming from the remote socket.",
"UMask": "0x8",
@@ -3234,6 +3942,7 @@
},
{
"BriefDescription": "RspCnflct* Snoop Responses Received",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "UNC_CHA_SNOOP_RESP.RSPCNFLCTS",
"PerPkg": "1",
@@ -3243,8 +3952,10 @@
},
{
"BriefDescription": "Snoop Responses Received; RspFwd",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "UNC_CHA_SNOOP_RESP.RSPFWD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for a snoop response of RspFwd to a CA request. This snoop response is only possible for RdCur when a snoop HITM/E in a remote caching agent and it directly forwards data to a requestor without changing the requestor's cache line state.",
"UMask": "0x80",
@@ -3252,6 +3963,7 @@
},
{
"BriefDescription": "RspI Snoop Responses Received",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "UNC_CHA_SNOOP_RESP.RSPI",
"PerPkg": "1",
@@ -3261,6 +3973,7 @@
},
{
"BriefDescription": "RspIFwd Snoop Responses Received",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "UNC_CHA_SNOOP_RESP.RSPIFWD",
"PerPkg": "1",
@@ -3270,8 +3983,10 @@
},
{
"BriefDescription": "Snoop Responses Received : RspS",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "UNC_CHA_SNOOP_RESP.RSPS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received : RspS : Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1. : Filters for snoop responses of RspS. RspS is returned when a remote cache has data but is not forwarding it. It is a way to let the requesting socket know that it cannot allocate the data in E state. No data is sent with S RspS.",
"UMask": "0x2",
@@ -3279,6 +3994,7 @@
},
{
"BriefDescription": "RspSFwd Snoop Responses Received",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "UNC_CHA_SNOOP_RESP.RSPSFWD",
"PerPkg": "1",
@@ -3288,6 +4004,7 @@
},
{
"BriefDescription": "Rsp*Fwd*WB Snoop Responses Received",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "UNC_CHA_SNOOP_RESP.RSP_FWD_WB",
"PerPkg": "1",
@@ -3297,6 +4014,7 @@
},
{
"BriefDescription": "Rsp*WB Snoop Responses Received",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "UNC_CHA_SNOOP_RESP.RSP_WBWB",
"PerPkg": "1",
@@ -3306,8 +4024,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local; RspCnflct",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPCNFLCT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of snoop responses received for a Local request; Filters for snoops responses of RspConflict to local CA requests. This is returned when a snoop finds an existing outstanding transaction in a remote caching agent when it CAMs that caching agent. This triggers conflict resolution hardware. This covers both RspCnflct and RspCnflctWbI.",
"UMask": "0x40",
@@ -3315,8 +4035,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local; RspFwd",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPFWD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of snoop responses received for a Local request; Filters for a snoop response of RspFwd to local CA requests. This snoop response is only possible for RdCur when a snoop HITM/E in a remote caching agent and it directly forwards data to a requestor without changing the requestor's cache line state.",
"UMask": "0x80",
@@ -3324,8 +4046,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local; RspI",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of snoop responses received for a Local request; Filters for snoops responses of RspI to local CA requests. RspI is returned when the remote cache does not have the data, or when the remote cache silently evicts data (such as when an RFO hits non-modified data).",
"UMask": "0x1",
@@ -3333,8 +4057,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local; RspIFwd",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPIFWD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of snoop responses received for a Local request; Filters for snoop responses of RspIFwd to local CA requests. This is returned when a remote caching agent forwards data and the requesting agent is able to acquire the data in E or M states. This is commonly returned with RFO transactions. It can be either a HitM or a HitFE.",
"UMask": "0x4",
@@ -3342,8 +4068,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local; RspS",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of snoop responses received for a Local request; Filters for snoop responses of RspS to local CA requests. RspS is returned when a remote cache has data but is not forwarding it. It is a way to let the requesting socket know that it cannot allocate the data in E state. No data is sent with S RspS.",
"UMask": "0x2",
@@ -3351,8 +4079,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local; RspSFwd",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPSFWD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of snoop responses received for a Local request; Filters for a snoop response of RspSFwd to local CA requests. This is returned when a remote caching agent forwards data but holds on to its current copy. This is common for data and code reads that hit in a remote socket in E or F state.",
"UMask": "0x8",
@@ -3360,8 +4090,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local; Rsp*FWD*WB",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSP_FWD_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of snoop responses received for a Local request; Filters for a snoop response of Rsp*Fwd*WB to local CA requests. This snoop response is only used in 4s systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to the home to be written back to memory.",
"UMask": "0x20",
@@ -3369,8 +4101,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local; Rsp*WB",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSP_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of snoop responses received for a Local request; Filters for a snoop response of RspIWB or RspSWB to local CA requests. This is returned when a non-RFO request hits in M state. Data and Code Reads can return either RspIWB or RspSWB depending on how the system has been configured. InvItoE transactions will also return RspIWB because they must acquire ownership.",
"UMask": "0x10",
@@ -3378,8 +4112,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -3387,8 +4123,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -3396,8 +4134,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -3405,8 +4145,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -3414,8 +4156,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -3423,8 +4167,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -3432,8 +4178,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -3441,8 +4189,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -3450,8 +4200,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -3459,8 +4211,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -3468,8 +4222,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -3477,8 +4233,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -3486,8 +4244,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -3495,8 +4255,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -3504,8 +4266,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -3513,8 +4277,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -3522,8 +4288,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -3531,8 +4299,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -3540,8 +4310,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -3549,8 +4321,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -3558,8 +4332,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -3567,8 +4343,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -3576,8 +4354,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -3585,8 +4365,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -3594,8 +4376,10 @@
},
{
"BriefDescription": "TOR Inserts; Hits from Local",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.ALL_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"UMask": "0x15",
@@ -3603,8 +4387,10 @@
},
{
"BriefDescription": "TOR Inserts; All from Local iA and IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.ALL_IO_IA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.; All locally initiated requests",
"UMask": "0x35",
@@ -3612,8 +4398,10 @@
},
{
"BriefDescription": "TOR Inserts; Misses from Local",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.ALL_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"UMask": "0x25",
@@ -3621,8 +4409,10 @@
},
{
"BriefDescription": "TOR Inserts; SF/LLC Evictions",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.EVICT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.; TOR allocation occurred as a result of SF/LLC evictions (came from the ISMQ)",
"UMask": "0x2",
@@ -3630,8 +4420,10 @@
},
{
"BriefDescription": "TOR Inserts; Hit (Not a Miss)",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.HIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.; HITs (hit is defined to be not a miss [see below], as a result for any request allocated into the TOR, one of either HIT or MISS must be true)",
"UMask": "0x10",
@@ -3639,6 +4431,7 @@
},
{
"BriefDescription": "TOR Inserts; All from Local iA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA",
"PerPkg": "1",
@@ -3648,6 +4441,7 @@
},
{
"BriefDescription": "TOR Inserts; Hits from Local iA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT",
"PerPkg": "1",
@@ -3657,6 +4451,7 @@
},
{
"BriefDescription": "TOR Inserts : CRds issued by iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD",
"Filter": "config1=0x40233",
@@ -3667,6 +4462,7 @@
},
{
"BriefDescription": "TOR Inserts : DRds issued by iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD",
"Filter": "config1=0x40433",
@@ -3677,6 +4473,7 @@
},
{
"BriefDescription": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefCRD",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefCRD",
"Filter": "config1=0x4b233",
@@ -3686,6 +4483,7 @@
},
{
"BriefDescription": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefDRD",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefDRD",
"Filter": "config1=0x4b433",
@@ -3695,6 +4493,7 @@
},
{
"BriefDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefRFO",
"Filter": "config1=0x4b033",
@@ -3705,6 +4504,7 @@
},
{
"BriefDescription": "TOR Inserts : RFOs issued by iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO",
"Filter": "config1=0x40033",
@@ -3715,6 +4515,7 @@
},
{
"BriefDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
"PerPkg": "1",
@@ -3724,6 +4525,7 @@
},
{
"BriefDescription": "TOR Inserts : CRds issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD",
"Filter": "config1=0x40233",
@@ -3734,6 +4536,7 @@
},
{
"BriefDescription": "TOR Inserts : DRds issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD",
"Filter": "config1=0x40433",
@@ -3744,6 +4547,7 @@
},
{
"BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefCRD",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefCRD",
"Filter": "config1=0x4b233",
@@ -3753,6 +4557,7 @@
},
{
"BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefDRD",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefDRD",
"Filter": "config1=0x4b433",
@@ -3762,6 +4567,7 @@
},
{
"BriefDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefRFO",
"Filter": "config1=0x4b033",
@@ -3772,6 +4578,7 @@
},
{
"BriefDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO",
"Filter": "config1=0x40033",
@@ -3782,8 +4589,10 @@
},
{
"BriefDescription": "TOR Inserts; All from Local IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.; All locally generated IO traffic",
"UMask": "0x34",
@@ -3791,6 +4600,7 @@
},
{
"BriefDescription": "TOR Inserts; Hits from Local IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_HIT",
"PerPkg": "1",
@@ -3800,6 +4610,7 @@
},
{
"BriefDescription": "TOR Inserts; Misses from Local IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS",
"PerPkg": "1",
@@ -3809,8 +4620,10 @@
},
{
"BriefDescription": "TOR Inserts; ItoM misses from Local IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM",
+ "Experimental": "1",
"Filter": "config1=0x49033",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that are generated from local IO ItoM requests that miss the LLC. An ItoM request is used by IIO to request a data write without first reading the data for ownership.",
@@ -3819,8 +4632,10 @@
},
{
"BriefDescription": "TOR Inserts; RdCur misses from Local IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_RDCUR",
+ "Experimental": "1",
"Filter": "config1=0x43C33",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that are generated from local IO RdCur requests and miss the LLC. A RdCur request is used by IIO to read data without changing state.",
@@ -3829,8 +4644,10 @@
},
{
"BriefDescription": "TOR Inserts; RFO misses from Local IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_RFO",
+ "Experimental": "1",
"Filter": "config1=0x40033",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that are generated from local IO RFO requests that miss the LLC. A read for ownership (RFO) requests a cache line to be cached in E state with the intent to modify.",
@@ -3839,8 +4656,10 @@
},
{
"BriefDescription": "TOR Inserts; IPQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IPQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"UMask": "0x8",
@@ -3848,26 +4667,32 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IPQ_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x18",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IPQ_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x28",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts; IRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IRQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"UMask": "0x1",
@@ -3875,17 +4700,21 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.LOC_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x37",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts; Miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.; Misses. (a miss is defined to be any transaction from the IRQ, PRQ, RRQ, IPQ or (in the victim case) the ISMQ, that required the CHA to spawn a new UPI/SMI3 request on the UPI fabric (including UPI snoops and/or any RD/WR to a local memory controller, in the event that the CHA is the home node)). Basically, if the LLC/SF/MLC complex were not able to service the request without involving another agent...it is a miss. If only IDI snoops were required, it is not a miss (that means the SF/MLC com",
"UMask": "0x20",
@@ -3893,8 +4722,10 @@
},
{
"BriefDescription": "TOR Inserts; PRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.PRQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
"UMask": "0x4",
@@ -3902,6 +4733,7 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.REM_ALL",
@@ -3911,44 +4743,54 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.RRQ_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x50",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.RRQ_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x60",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.WBQ_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x90",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.WBQ_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa0",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy; All from Local",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.ALL_FROM_LOC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); All remotely generated requests",
"UMask": "0x37",
@@ -3956,8 +4798,10 @@
},
{
"BriefDescription": "TOR Occupancy; Hits from Local",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.ALL_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"UMask": "0x17",
@@ -3965,8 +4809,10 @@
},
{
"BriefDescription": "TOR Occupancy; Misses from Local",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.ALL_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"UMask": "0x27",
@@ -3974,8 +4820,10 @@
},
{
"BriefDescription": "TOR Occupancy; SF/LLC Evictions",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.EVICT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T; TOR allocation occurred as a result of SF/LLC evictions (came from the ISMQ)",
"UMask": "0x2",
@@ -3983,8 +4831,10 @@
},
{
"BriefDescription": "TOR Occupancy; Hit (Not a Miss)",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.HIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T; HITs (hit is defined to be not a miss [see below], as a result for any request allocated into the TOR, one of either HIT or MISS must be true)",
"UMask": "0x10",
@@ -3992,6 +4842,7 @@
},
{
"BriefDescription": "TOR Occupancy; All from Local iA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA",
"PerPkg": "1",
@@ -4001,6 +4852,7 @@
},
{
"BriefDescription": "TOR Occupancy; Hits from Local iA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT",
"PerPkg": "1",
@@ -4010,6 +4862,7 @@
},
{
"BriefDescription": "TOR Occupancy : CRds issued by iA Cores that Hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD",
"Filter": "config1=0x40233",
@@ -4020,6 +4873,7 @@
},
{
"BriefDescription": "TOR Occupancy : DRds issued by iA Cores that Hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD",
"Filter": "config1=0x40433",
@@ -4030,6 +4884,7 @@
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefCRD",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefCRD",
"Filter": "config1=0x4b233",
@@ -4039,6 +4894,7 @@
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefDRD",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefDRD",
"Filter": "config1=0x4b433",
@@ -4048,6 +4904,7 @@
},
{
"BriefDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores that hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefRFO",
"Filter": "config1=0x4b033",
@@ -4058,6 +4915,7 @@
},
{
"BriefDescription": "TOR Occupancy : RFOs issued by iA Cores that Hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO",
"Filter": "config1=0x40033",
@@ -4068,6 +4926,7 @@
},
{
"BriefDescription": "TOR Occupancy; Misses from Local iA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS",
"PerPkg": "1",
@@ -4077,6 +4936,7 @@
},
{
"BriefDescription": "TOR Occupancy : CRds issued by iA Cores that Missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD",
"Filter": "config1=0x40233",
@@ -4087,6 +4947,7 @@
},
{
"BriefDescription": "TOR Occupancy : DRds issued by iA Cores that Missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD",
"Filter": "config1=0x40433",
@@ -4097,6 +4958,7 @@
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefCRD",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefCRD",
"Filter": "config1=0x4b233",
@@ -4106,6 +4968,7 @@
},
{
"BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefDRD",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefDRD",
"Filter": "config1=0x4b433",
@@ -4115,6 +4978,7 @@
},
{
"BriefDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores that missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefRFO",
"Filter": "config1=0x4b033",
@@ -4125,6 +4989,7 @@
},
{
"BriefDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO",
"Filter": "config1=0x40033",
@@ -4135,8 +5000,10 @@
},
{
"BriefDescription": "TOR Occupancy; All from Local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T; All locally generated IO traffic",
"UMask": "0x34",
@@ -4144,8 +5011,10 @@
},
{
"BriefDescription": "TOR Occupancy; Hits from Local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"UMask": "0x14",
@@ -4153,8 +5022,10 @@
},
{
"BriefDescription": "TOR Occupancy; Misses from Local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"UMask": "0x24",
@@ -4162,8 +5033,10 @@
},
{
"BriefDescription": "TOR Occupancy; ITOM Misses from Local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM",
+ "Experimental": "1",
"Filter": "config1=0x49033",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that are generated from local IO ItoM requests that miss the LLC. An ItoM is used by IIO to request a data write without first reading the data for ownership.",
@@ -4172,8 +5045,10 @@
},
{
"BriefDescription": "TOR Occupancy; RDCUR misses from Local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_RDCUR",
+ "Experimental": "1",
"Filter": "config1=0x43C33",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that are generated from local IO RdCur requests that miss the LLC. A RdCur request is used by IIO to read data without changing state.",
@@ -4182,8 +5057,10 @@
},
{
"BriefDescription": "TOR Occupancy; RFO misses from Local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_RFO",
+ "Experimental": "1",
"Filter": "config1=0x40033",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that are generated from local IO RFO requests that miss the LLC. A read for ownership (RFO) requests data to be cached in E state with the intent to modify.",
@@ -4192,8 +5069,10 @@
},
{
"BriefDescription": "TOR Occupancy; IPQ",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IPQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"UMask": "0x8",
@@ -4201,26 +5080,32 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IPQ_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x18",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IPQ_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x28",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy; IRQ",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IRQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"UMask": "0x1",
@@ -4228,17 +5113,21 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.ALL_FROM_LOC",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x37",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy; Miss",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T; Misses. (a miss is defined to be any transaction from the IRQ, PRQ, RRQ, IPQ or (in the victim case) the ISMQ, that required the CHA to spawn a new UPI/SMI3 request on the UPI fabric (including UPI snoops and/or any RD/WR to a local memory controller, in the event that the CHA is the home node)). Basically, if the LLC/SF/MLC complex were not able to service the request without involving another agent...it is a miss. If only IDI snoops were required, it is not a miss (that means the SF/MLC com",
"UMask": "0x20",
@@ -4246,8 +5135,10 @@
},
{
"BriefDescription": "TOR Occupancy; PRQ",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.PRQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
"UMask": "0x4",
@@ -4255,8 +5146,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_CHA_TxR_HORZ_ADS_USED.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -4264,8 +5157,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_CHA_TxR_HORZ_ADS_USED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -4273,8 +5168,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_CHA_TxR_HORZ_ADS_USED.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -4282,8 +5179,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_CHA_TxR_HORZ_ADS_USED.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -4291,8 +5190,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_CHA_TxR_HORZ_ADS_USED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -4300,8 +5201,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9F",
"EventName": "UNC_CHA_TxR_HORZ_BYPASS.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -4309,8 +5212,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x9F",
"EventName": "UNC_CHA_TxR_HORZ_BYPASS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -4318,8 +5223,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9F",
"EventName": "UNC_CHA_TxR_HORZ_BYPASS.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -4327,8 +5234,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9F",
"EventName": "UNC_CHA_TxR_HORZ_BYPASS.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -4336,8 +5245,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x9F",
"EventName": "UNC_CHA_TxR_HORZ_BYPASS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -4345,8 +5256,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9F",
"EventName": "UNC_CHA_TxR_HORZ_BYPASS.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x8",
@@ -4354,8 +5267,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -4363,8 +5278,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -4372,8 +5289,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -4381,8 +5300,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -4390,8 +5311,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -4399,8 +5322,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -4408,8 +5333,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -4417,8 +5344,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -4426,8 +5355,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -4435,8 +5366,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -4444,8 +5377,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -4453,8 +5388,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -4462,8 +5399,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_CHA_TxR_HORZ_INSERTS.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -4471,8 +5410,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_CHA_TxR_HORZ_INSERTS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -4480,8 +5421,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_CHA_TxR_HORZ_INSERTS.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -4489,8 +5432,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_CHA_TxR_HORZ_INSERTS.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -4498,8 +5443,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_CHA_TxR_HORZ_INSERTS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -4507,8 +5454,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_CHA_TxR_HORZ_INSERTS.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -4516,8 +5465,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_CHA_TxR_HORZ_NACK.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x1",
@@ -4525,8 +5476,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_CHA_TxR_HORZ_NACK.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x20",
@@ -4534,8 +5487,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_CHA_TxR_HORZ_NACK.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x2",
@@ -4543,8 +5498,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_CHA_TxR_HORZ_NACK.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x4",
@@ -4552,8 +5509,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_CHA_TxR_HORZ_NACK.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x40",
@@ -4561,8 +5520,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_CHA_TxR_HORZ_NACK.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x8",
@@ -4570,8 +5531,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -4579,8 +5542,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -4588,8 +5553,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -4597,8 +5564,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -4606,8 +5575,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -4615,8 +5586,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -4624,8 +5597,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9B",
"EventName": "UNC_CHA_TxR_HORZ_STARVED.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x1",
@@ -4633,8 +5608,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9B",
"EventName": "UNC_CHA_TxR_HORZ_STARVED.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x2",
@@ -4642,8 +5619,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9B",
"EventName": "UNC_CHA_TxR_HORZ_STARVED.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x4",
@@ -4651,8 +5630,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9B",
"EventName": "UNC_CHA_TxR_HORZ_STARVED.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x8",
@@ -4660,8 +5641,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_CHA_TxR_VERT_ADS_USED.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -4669,8 +5652,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_CHA_TxR_VERT_ADS_USED.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -4678,8 +5663,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_CHA_TxR_VERT_ADS_USED.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -4687,8 +5674,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_CHA_TxR_VERT_ADS_USED.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x20",
@@ -4696,8 +5685,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_CHA_TxR_VERT_ADS_USED.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -4705,8 +5696,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_CHA_TxR_VERT_ADS_USED.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -4714,8 +5707,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_CHA_TxR_VERT_BYPASS.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -4723,8 +5718,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_CHA_TxR_VERT_BYPASS.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -4732,8 +5729,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_CHA_TxR_VERT_BYPASS.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -4741,8 +5740,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_CHA_TxR_VERT_BYPASS.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x20",
@@ -4750,8 +5751,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_CHA_TxR_VERT_BYPASS.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -4759,8 +5762,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_CHA_TxR_VERT_BYPASS.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -4768,8 +5773,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_CHA_TxR_VERT_BYPASS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x8",
@@ -4777,8 +5784,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -4786,8 +5795,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -4795,8 +5806,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -4804,8 +5817,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -4813,8 +5828,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -4822,8 +5839,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -4831,8 +5850,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -4840,8 +5861,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -4849,8 +5872,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -4858,8 +5883,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -4867,8 +5894,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -4876,8 +5905,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -4885,8 +5916,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -4894,8 +5927,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -4903,8 +5938,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_CHA_TxR_VERT_INSERTS.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -4912,8 +5949,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_CHA_TxR_VERT_INSERTS.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -4921,8 +5960,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_CHA_TxR_VERT_INSERTS.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -4930,8 +5971,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_CHA_TxR_VERT_INSERTS.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -4939,8 +5982,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_CHA_TxR_VERT_INSERTS.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -4948,8 +5993,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_CHA_TxR_VERT_INSERTS.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -4957,8 +6004,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_CHA_TxR_VERT_INSERTS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -4966,8 +6015,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_CHA_TxR_VERT_NACK.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x1",
@@ -4975,8 +6026,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_CHA_TxR_VERT_NACK.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x10",
@@ -4984,8 +6037,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_CHA_TxR_VERT_NACK.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x2",
@@ -4993,8 +6048,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_CHA_TxR_VERT_NACK.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x20",
@@ -5002,8 +6059,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_CHA_TxR_VERT_NACK.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x4",
@@ -5011,8 +6070,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_CHA_TxR_VERT_NACK.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x40",
@@ -5020,8 +6081,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_CHA_TxR_VERT_NACK.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x8",
@@ -5029,8 +6092,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -5038,8 +6103,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -5047,8 +6114,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -5056,8 +6125,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -5065,8 +6136,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -5074,8 +6147,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -5083,8 +6158,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -5092,8 +6169,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_CHA_TxR_VERT_STARVED.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x1",
@@ -5101,8 +6180,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_CHA_TxR_VERT_STARVED.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x10",
@@ -5110,8 +6191,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_CHA_TxR_VERT_STARVED.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x2",
@@ -5119,8 +6202,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_CHA_TxR_VERT_STARVED.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x20",
@@ -5128,8 +6213,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_CHA_TxR_VERT_STARVED.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x4",
@@ -5137,8 +6224,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_CHA_TxR_VERT_STARVED.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x40",
@@ -5146,8 +6235,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_CHA_TxR_VERT_STARVED.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x8",
@@ -5155,8 +6246,10 @@
},
{
"BriefDescription": "UPI Ingress Credit Allocations; AD REQ Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of UPI credits acquired for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This can be used with the Credit Occupancy event in order to calculate average credit lifetime. This event supports filtering to cover the VNA/VN0 credits and the different message classes. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
"UMask": "0x4",
@@ -5164,8 +6257,10 @@
},
{
"BriefDescription": "UPI Ingress Credit Allocations; AD RSP VN0 Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of UPI credits acquired for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This can be used with the Credit Occupancy event in order to calculate average credit lifetime. This event supports filtering to cover the VNA/VN0 credits and the different message classes. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
"UMask": "0x8",
@@ -5173,8 +6268,10 @@
},
{
"BriefDescription": "UPI Ingress Credit Allocations; BL NCB Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of UPI credits acquired for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This can be used with the Credit Occupancy event in order to calculate average credit lifetime. This event supports filtering to cover the VNA/VN0 credits and the different message classes. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
"UMask": "0x40",
@@ -5182,8 +6279,10 @@
},
{
"BriefDescription": "UPI Ingress Credit Allocations; BL NCS Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of UPI credits acquired for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This can be used with the Credit Occupancy event in order to calculate average credit lifetime. This event supports filtering to cover the VNA/VN0 credits and the different message classes. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
"UMask": "0x80",
@@ -5191,8 +6290,10 @@
},
{
"BriefDescription": "UPI Ingress Credit Allocations; BL RSP Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of UPI credits acquired for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This can be used with the Credit Occupancy event in order to calculate average credit lifetime. This event supports filtering to cover the VNA/VN0 credits and the different message classes. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
"UMask": "0x10",
@@ -5200,8 +6301,10 @@
},
{
"BriefDescription": "UPI Ingress Credit Allocations; BL DRS Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of UPI credits acquired for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This can be used with the Credit Occupancy event in order to calculate average credit lifetime. This event supports filtering to cover the VNA/VN0 credits and the different message classes. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
"UMask": "0x20",
@@ -5209,8 +6312,10 @@
},
{
"BriefDescription": "UPI Ingress Credit Allocations; VN0 Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of UPI credits acquired for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This can be used with the Credit Occupancy event in order to calculate average credit lifetime. This event supports filtering to cover the VNA/VN0 credits and the different message classes. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
"UMask": "0x2",
@@ -5218,8 +6323,10 @@
},
{
"BriefDescription": "UPI Ingress Credit Allocations; VNA Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.VNA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of UPI credits acquired for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This can be used with the Credit Occupancy event in order to calculate average credit lifetime. This event supports filtering to cover the VNA/VN0 credits and the different message classes. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
"UMask": "0x1",
@@ -5227,8 +6334,10 @@
},
{
"BriefDescription": "UPI Ingress Credits In Use Cycles; AD REQ VN0 Credits",
+ "Counter": "0",
"EventCode": "0x3B",
"EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VN0_AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of UPI credits available in each cycle for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This stat increments by the number of credits that are available each cycle. This can be used in conjunction with the Credit Acquired event in order to calculate average credit lifetime. This event supports filtering for the different types of credits that are available. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
"UMask": "0x4",
@@ -5236,8 +6345,10 @@
},
{
"BriefDescription": "UPI Ingress Credits In Use Cycles; AD RSP VN0 Credits",
+ "Counter": "0",
"EventCode": "0x3B",
"EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VN0_AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of UPI credits available in each cycle for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This stat increments by the number of credits that are available each cycle. This can be used in conjunction with the Credit Acquired event in order to calculate average credit lifetime. This event supports filtering for the different types of credits that are available. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
"UMask": "0x8",
@@ -5245,8 +6356,10 @@
},
{
"BriefDescription": "UPI Ingress Credits In Use Cycles; BL NCB VN0 Credits",
+ "Counter": "0",
"EventCode": "0x3B",
"EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VN0_BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of UPI credits available in each cycle for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This stat increments by the number of credits that are available each cycle. This can be used in conjunction with the Credit Acquired event in order to calculate average credit lifetime. This event supports filtering for the different types of credits that are available. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
"UMask": "0x40",
@@ -5254,6 +6367,7 @@
},
{
"BriefDescription": "UPI Ingress Credits In Use Cycles; BL NCS VN0 Credits",
+ "Counter": "0",
"EventCode": "0x3B",
"EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VN0_BL_NCS",
"PerPkg": "1",
@@ -5263,8 +6377,10 @@
},
{
"BriefDescription": "UPI Ingress Credits In Use Cycles; BL RSP VN0 Credits",
+ "Counter": "0",
"EventCode": "0x3B",
"EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VN0_BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of UPI credits available in each cycle for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This stat increments by the number of credits that are available each cycle. This can be used in conjunction with the Credit Acquired event in order to calculate average credit lifetime. This event supports filtering for the different types of credits that are available. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
"UMask": "0x10",
@@ -5272,8 +6388,10 @@
},
{
"BriefDescription": "UPI Ingress Credits In Use Cycles; BL DRS VN0 Credits",
+ "Counter": "0",
"EventCode": "0x3B",
"EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VN0_BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of UPI credits available in each cycle for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This stat increments by the number of credits that are available each cycle. This can be used in conjunction with the Credit Acquired event in order to calculate average credit lifetime. This event supports filtering for the different types of credits that are available. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
"UMask": "0x20",
@@ -5281,8 +6399,10 @@
},
{
"BriefDescription": "UPI Ingress Credits In Use Cycles; AD VNA Credits",
+ "Counter": "0",
"EventCode": "0x3B",
"EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VNA_AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of UPI credits available in each cycle for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This stat increments by the number of credits that are available each cycle. This can be used in conjunction with the Credit Acquired event in order to calculate average credit lifetime. This event supports filtering for the different types of credits that are available. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
"UMask": "0x1",
@@ -5290,8 +6410,10 @@
},
{
"BriefDescription": "UPI Ingress Credits In Use Cycles; BL VNA Credits",
+ "Counter": "0",
"EventCode": "0x3B",
"EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VNA_BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of UPI credits available in each cycle for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This stat increments by the number of credits that are available each cycle. This can be used in conjunction with the Credit Acquired event in order to calculate average credit lifetime. This event supports filtering for the different types of credits that are available. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
"UMask": "0x2",
@@ -5299,8 +6421,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use; Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_CHA_VERT_RING_AD_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -5308,8 +6432,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use; Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_CHA_VERT_RING_AD_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -5317,8 +6443,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use; Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_CHA_VERT_RING_AD_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -5326,8 +6454,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use; Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_CHA_VERT_RING_AD_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -5335,8 +6465,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use; Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xA8",
"EventName": "UNC_CHA_VERT_RING_AK_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -5344,8 +6476,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use; Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xA8",
"EventName": "UNC_CHA_VERT_RING_AK_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -5353,8 +6487,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use; Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xA8",
"EventName": "UNC_CHA_VERT_RING_AK_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -5362,8 +6498,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use; Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xA8",
"EventName": "UNC_CHA_VERT_RING_AK_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -5371,8 +6509,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use; Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_CHA_VERT_RING_BL_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -5380,8 +6520,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use; Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_CHA_VERT_RING_BL_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -5389,8 +6531,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use; Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_CHA_VERT_RING_BL_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -5398,8 +6542,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use; Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_CHA_VERT_RING_BL_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -5407,8 +6553,10 @@
},
{
"BriefDescription": "Vertical IV Ring in Use; Down",
+ "Counter": "0,1,2,3",
"EventCode": "0xAC",
"EventName": "UNC_CHA_VERT_RING_IV_IN_USE.DN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x4",
@@ -5416,8 +6564,10 @@
},
{
"BriefDescription": "Vertical IV Ring in Use; Up",
+ "Counter": "0,1,2,3",
"EventCode": "0xAC",
"EventName": "UNC_CHA_VERT_RING_IV_IN_USE.UP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x1",
@@ -5425,8 +6575,10 @@
},
{
"BriefDescription": "WbPushMtoI; Pushed to LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_CHA_WB_PUSH_MTOI.LLC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times when the CHA was received WbPushMtoI; Counts the number of times when the CHA was able to push WbPushMToI to LLC",
"UMask": "0x1",
@@ -5434,8 +6586,10 @@
},
{
"BriefDescription": "WbPushMtoI; Pushed to Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_CHA_WB_PUSH_MTOI.MEM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times when the CHA was received WbPushMtoI; Counts the number of times when the CHA was unable to push WbPushMToI to LLC (hence pushed it to MEM)",
"UMask": "0x2",
@@ -5443,8 +6597,10 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty; EDC0_SMI2",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.EDC0_SMI2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.; Filter for memory controller 2 only.",
"UMask": "0x4",
@@ -5452,8 +6608,10 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty; EDC1_SMI3",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.EDC1_SMI3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.; Filter for memory controller 3 only.",
"UMask": "0x8",
@@ -5461,8 +6619,10 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty; EDC2_SMI4",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.EDC2_SMI4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.; Filter for memory controller 4 only.",
"UMask": "0x10",
@@ -5470,8 +6630,10 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty; EDC3_SMI5",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.EDC3_SMI5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.; Filter for memory controller 5 only.",
"UMask": "0x20",
@@ -5479,8 +6641,10 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty; MC0_SMI0",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC0_SMI0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.; Filter for memory controller 0 only.",
"UMask": "0x1",
@@ -5488,8 +6652,10 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty; MC1_SMI1",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC1_SMI1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.; Filter for memory controller 1 only.",
"UMask": "0x2",
@@ -5497,8 +6663,10 @@
},
{
"BriefDescription": "Core Cross Snoop Responses; Any RspIFwdFE",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_CHA_XSNP_RESP.ANY_RSPI_FWDFE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Any Request - Response I to Fwd F/E",
"UMask": "0xe4",
@@ -5506,8 +6674,10 @@
},
{
"BriefDescription": "Core Cross Snoop Responses",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_CHA_XSNP_RESP.ANY_RSPI_FWDM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Any Request - Response I to Fwd M",
"UMask": "0xf0",
@@ -5515,8 +6685,10 @@
},
{
"BriefDescription": "Core Cross Snoop Responses; Any RspSFwdFE",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_CHA_XSNP_RESP.ANY_RSPS_FWDFE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Any Request - Response S to Fwd F/E",
"UMask": "0xe2",
@@ -5524,8 +6696,10 @@
},
{
"BriefDescription": "Core Cross Snoop Responses; Any RspSFwdM",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_CHA_XSNP_RESP.ANY_RSPS_FWDM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Any Request - Response S to Fwd M",
"UMask": "0xe8",
@@ -5533,8 +6707,10 @@
},
{
"BriefDescription": "Core Cross Snoop Responses; Any RspHitFSE",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_CHA_XSNP_RESP.ANY_RSP_HITFSE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Any Request - Response any to Hit F/S/E",
"UMask": "0xe1",
@@ -5542,8 +6718,10 @@
},
{
"BriefDescription": "Core Cross Snoop Responses; Core RspIFwdFE",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_CHA_XSNP_RESP.CORE_RSPI_FWDFE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Core Request - Response I to Fwd F/E",
"UMask": "0x44",
@@ -5551,8 +6729,10 @@
},
{
"BriefDescription": "Core Cross Snoop Responses; Core RspIFwdM",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_CHA_XSNP_RESP.CORE_RSPI_FWDM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Core Request - Response I to Fwd M",
"UMask": "0x50",
@@ -5560,8 +6740,10 @@
},
{
"BriefDescription": "Core Cross Snoop Responses; Core RspSFwdFE",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_CHA_XSNP_RESP.CORE_RSPS_FWDFE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Core Request - Response S to Fwd F/E",
"UMask": "0x42",
@@ -5569,8 +6751,10 @@
},
{
"BriefDescription": "Core Cross Snoop Responses; Core RspSFwdM",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_CHA_XSNP_RESP.CORE_RSPS_FWDM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Core Request - Response S to Fwd M",
"UMask": "0x48",
@@ -5578,8 +6762,10 @@
},
{
"BriefDescription": "Core Cross Snoop Responses; Core RspHitFSE",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_CHA_XSNP_RESP.CORE_RSP_HITFSE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Core Request - Response any to Hit F/S/E",
"UMask": "0x41",
@@ -5587,8 +6773,10 @@
},
{
"BriefDescription": "Core Cross Snoop Responses; Evict RspIFwdFE",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_CHA_XSNP_RESP.EVICT_RSPI_FWDFE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Eviction Request - Response I to Fwd F/E",
"UMask": "0x84",
@@ -5596,8 +6784,10 @@
},
{
"BriefDescription": "Core Cross Snoop Responses; Evict RspIFwdM",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_CHA_XSNP_RESP.EVICT_RSPI_FWDM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Eviction Request - Response I to Fwd M",
"UMask": "0x90",
@@ -5605,8 +6795,10 @@
},
{
"BriefDescription": "Core Cross Snoop Responses; Evict RspSFwdFE",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_CHA_XSNP_RESP.EVICT_RSPS_FWDFE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Eviction Request - Response S to Fwd F/E",
"UMask": "0x82",
@@ -5614,8 +6806,10 @@
},
{
"BriefDescription": "Core Cross Snoop Responses; Evict RspSFwdM",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_CHA_XSNP_RESP.EVICT_RSPS_FWDM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Eviction Request - Response S to Fwd M",
"UMask": "0x88",
@@ -5623,8 +6817,10 @@
},
{
"BriefDescription": "Core Cross Snoop Responses; Evict RspHitFSE",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_CHA_XSNP_RESP.EVICT_RSP_HITFSE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Eviction Request - Response any to Hit F/S/E",
"UMask": "0x81",
@@ -5632,8 +6828,10 @@
},
{
"BriefDescription": "Core Cross Snoop Responses; External RspIFwdFE",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_CHA_XSNP_RESP.EXT_RSPI_FWDFE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; External Request - Response I to Fwd F/E",
"UMask": "0x24",
@@ -5641,8 +6839,10 @@
},
{
"BriefDescription": "Core Cross Snoop Responses; External RspIFwdM",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_CHA_XSNP_RESP.EXT_RSPI_FWDM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; External Request - Response I to Fwd M",
"UMask": "0x30",
@@ -5650,8 +6850,10 @@
},
{
"BriefDescription": "Core Cross Snoop Responses; External RspSFwdFE",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_CHA_XSNP_RESP.EXT_RSPS_FWDFE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; External Request - Response S to Fwd F/E",
"UMask": "0x22",
@@ -5659,8 +6861,10 @@
},
{
"BriefDescription": "Core Cross Snoop Responses; External RspSFwdM",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_CHA_XSNP_RESP.EXT_RSPS_FWDM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; External Request - Response S to Fwd M",
"UMask": "0x28",
@@ -5668,8 +6872,10 @@
},
{
"BriefDescription": "Core Cross Snoop Responses; External RspHitFSE",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_CHA_XSNP_RESP.EXT_RSP_HITFSE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; External Request - Response any to Hit F/S/E",
"UMask": "0x21",
@@ -5677,6 +6883,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CLOCKTICKS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventName": "UNC_C_CLOCKTICKS",
"PerPkg": "1",
@@ -5684,6 +6891,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_FAST_ASSERTED.HORZ",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA5",
"EventName": "UNC_C_FAST_ASSERTED",
@@ -5693,15 +6901,18 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.ANY",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.ANY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.DATA_READ",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.DATA_READ",
@@ -5711,24 +6922,29 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.LOCAL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x31",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.REMOTE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x91",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.REMOTE_SNOOP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.REMOTE_SNOOP",
@@ -5738,15 +6954,18 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.WRITE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.WRITE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_E",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.E_STATE",
@@ -5756,6 +6975,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_F",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.F_STATE",
@@ -5765,15 +6985,18 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.LOCAL_ALL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2f",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_M",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.M_STATE",
@@ -5783,15 +7006,18 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.REMOTE_ALL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_S",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.S_STATE",
@@ -5801,59 +7027,72 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SRC_THRTL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA4",
"EventName": "UNC_C_RING_SRC_THRTL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.EVICT",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.EVICT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.HIT",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.HIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IPQ",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.IPQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.IPQ_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x18",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.IPQ_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x28",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.IRQ",
@@ -5863,6 +7102,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA_HIT",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.IRQ_HIT",
@@ -5872,6 +7112,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.IRQ_MISS",
@@ -5881,51 +7122,62 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.LOC_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x37",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.LOC_IA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x31",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IO",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.LOC_IO",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x34",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.PRQ",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.PRQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IO_HIT",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.PRQ_HIT",
@@ -5935,6 +7187,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IO_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.PRQ_MISS",
@@ -5944,6 +7197,7 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.REM_ALL",
@@ -5953,87 +7207,106 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.RRQ_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x50",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.RRQ_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x60",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.WBQ_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x90",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_C_TOR_INSERTS.WBQ_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa0",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.EVICT",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.EVICT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.HIT",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.HIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IPQ",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.IPQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.IPQ_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x18",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.IPQ_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x28",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IA",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.IRQ",
@@ -6043,6 +7316,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IA_HIT",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.IRQ_HIT",
@@ -6052,6 +7326,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IA_MISS",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.IRQ_MISS",
@@ -6061,608 +7336,743 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.LOC_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x37",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IA",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.LOC_IA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x31",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IO",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.LOC_IO",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x34",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.MISS",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.PRQ",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.PRQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IO_HIT",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.PRQ_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x14",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IO_MISS",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.PRQ_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x24",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x80",
"EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x80",
"EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x80",
"EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x80",
"EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR4",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x80",
"EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR5",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x80",
"EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x82",
"EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x82",
"EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x82",
"EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x82",
"EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR4",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x82",
"EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR5",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x82",
"EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x88",
"EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x88",
"EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x88",
"EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x88",
"EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR4",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x88",
"EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR5",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x88",
"EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x8A",
"EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x8A",
"EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x8A",
"EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x8A",
"EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR4",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x8A",
"EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR5",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x8A",
"EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR4",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR5",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x86",
"EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x86",
"EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x86",
"EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x86",
"EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR4",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x86",
"EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR5",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x86",
"EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x8E",
"EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x8E",
"EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x8E",
"EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x8E",
"EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR4",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x8E",
"EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR5",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x8E",
"EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x8C",
"EventName": "UNC_H_AG1_BL_CREDITS_ACQUIRED.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x8C",
"EventName": "UNC_H_AG1_BL_CREDITS_ACQUIRED.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x8C",
"EventName": "UNC_H_AG1_BL_CREDITS_ACQUIRED.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x8C",
"EventName": "UNC_H_AG1_BL_CREDITS_ACQUIRED.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR4",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x8C",
"EventName": "UNC_H_AG1_BL_CREDITS_ACQUIRED.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR5",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x8C",
"EventName": "UNC_H_AG1_BL_CREDITS_ACQUIRED.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_BYPASS_CHA_IMC.INTERMEDIATE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x57",
"EventName": "UNC_H_BYPASS_CHA_IMC.INTERMEDIATE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_BYPASS_CHA_IMC.NOT_TAKEN",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x57",
"EventName": "UNC_H_BYPASS_CHA_IMC.NOT_TAKEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_BYPASS_CHA_IMC.TAKEN",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x57",
"EventName": "UNC_H_BYPASS_CHA_IMC.TAKEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CMS_CLOCKTICKS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_H_CLOCK",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_PMA.C1_STATE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x17",
"EventName": "UNC_H_CORE_PMA.C1_STATE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_PMA.C1_TRANSITION",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x17",
"EventName": "UNC_H_CORE_PMA.C1_TRANSITION",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_PMA.C6_STATE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x17",
"EventName": "UNC_H_CORE_PMA.C6_STATE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_PMA.C6_TRANSITION",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x17",
"EventName": "UNC_H_CORE_PMA.C6_TRANSITION",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_PMA.GV",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x17",
"EventName": "UNC_H_CORE_PMA.GV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.ANY_GTONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x33",
"EventName": "UNC_H_CORE_SNP.ANY_GTONE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.ANY_ONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x33",
"EventName": "UNC_H_CORE_SNP.ANY_ONE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.ANY_REMOTE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x33",
"EventName": "UNC_H_CORE_SNP.ANY_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.CORE_GTONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x33",
"EventName": "UNC_H_CORE_SNP.CORE_GTONE",
@@ -6672,24 +8082,29 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.CORE_ONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x33",
"EventName": "UNC_H_CORE_SNP.CORE_ONE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x41",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.CORE_REMOTE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x33",
"EventName": "UNC_H_CORE_SNP.CORE_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x44",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.EVICT_GTONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x33",
"EventName": "UNC_H_CORE_SNP.EVICT_GTONE",
@@ -6699,59 +8114,72 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.EVICT_ONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x33",
"EventName": "UNC_H_CORE_SNP.EVICT_ONE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x81",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.EVICT_REMOTE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x33",
"EventName": "UNC_H_CORE_SNP.EVICT_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x84",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.EXT_GTONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x33",
"EventName": "UNC_H_CORE_SNP.EXT_GTONE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x22",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.EXT_ONE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x33",
"EventName": "UNC_H_CORE_SNP.EXT_ONE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x21",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.EXT_REMOTE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x33",
"EventName": "UNC_H_CORE_SNP.EXT_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x24",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_COUNTER0_OCCUPANCY",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x1F",
"EventName": "UNC_H_COUNTER0_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_DIR_LOOKUP.NO_SNP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x53",
"EventName": "UNC_H_DIR_LOOKUP.NO_SNP",
@@ -6761,6 +8189,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_DIR_LOOKUP.SNP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x53",
"EventName": "UNC_H_DIR_LOOKUP.SNP",
@@ -6770,6 +8199,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_DIR_UPDATE.HA",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x54",
"EventName": "UNC_H_DIR_UPDATE.HA",
@@ -6779,6 +8209,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_DIR_UPDATE.TOR",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x54",
"EventName": "UNC_H_DIR_UPDATE.TOR",
@@ -6788,24 +8219,29 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xAE",
"EventName": "UNC_H_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xAE",
"EventName": "UNC_H_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_HIT.EX_RDS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5F",
"EventName": "UNC_H_HITME_HIT.EX_RDS",
@@ -6815,411 +8251,502 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_HIT.SHARED_OWNREQ",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5F",
"EventName": "UNC_H_HITME_HIT.SHARED_OWNREQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_HIT.WBMTOE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5F",
"EventName": "UNC_H_HITME_HIT.WBMTOE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_HIT.WBMTOI_OR_S",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5F",
"EventName": "UNC_H_HITME_HIT.WBMTOI_OR_S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_LOOKUP.READ",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5E",
"EventName": "UNC_H_HITME_LOOKUP.READ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_LOOKUP.WRITE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5E",
"EventName": "UNC_H_HITME_LOOKUP.WRITE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_MISS.NOTSHARED_RDINVOWN",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x60",
"EventName": "UNC_H_HITME_MISS.NOTSHARED_RDINVOWN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_MISS.READ_OR_INV",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x60",
"EventName": "UNC_H_HITME_MISS.READ_OR_INV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_MISS.SHARED_RDINVOWN",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x60",
"EventName": "UNC_H_HITME_MISS.SHARED_RDINVOWN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_UPDATE.DEALLOCATE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x61",
"EventName": "UNC_H_HITME_UPDATE.DEALLOCATE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_UPDATE.DEALLOCATE_RSPFWDI_LOC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x61",
"EventName": "UNC_H_HITME_UPDATE.DEALLOCATE_RSPFWDI_LOC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_UPDATE.RDINVOWN",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x61",
"EventName": "UNC_H_HITME_UPDATE.RDINVOWN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_UPDATE.RSPFWDI_REM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x61",
"EventName": "UNC_H_HITME_UPDATE.RSPFWDI_REM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_UPDATE.SHARED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x61",
"EventName": "UNC_H_HITME_UPDATE.SHARED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA7",
"EventName": "UNC_H_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA7",
"EventName": "UNC_H_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA7",
"EventName": "UNC_H_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA7",
"EventName": "UNC_H_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA9",
"EventName": "UNC_H_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA9",
"EventName": "UNC_H_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA9",
"EventName": "UNC_H_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA9",
"EventName": "UNC_H_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xAB",
"EventName": "UNC_H_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xAB",
"EventName": "UNC_H_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xAB",
"EventName": "UNC_H_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xAB",
"EventName": "UNC_H_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_IV_IN_USE.LEFT",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xAD",
"EventName": "UNC_H_HORZ_RING_IV_IN_USE.LEFT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_IV_IN_USE.RIGHT",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xAD",
"EventName": "UNC_H_HORZ_RING_IV_IN_USE.RIGHT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IMC_READS_COUNT.NORMAL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x59",
"EventName": "UNC_H_IMC_READS_COUNT.NORMAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IMC_READS_COUNT.PRIORITY",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x59",
"EventName": "UNC_H_IMC_READS_COUNT.PRIORITY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IMC_WRITES_COUNT.FULL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5B",
"EventName": "UNC_H_IMC_WRITES_COUNT.FULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IMC_WRITES_COUNT.FULL_MIG",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5B",
"EventName": "UNC_H_IMC_WRITES_COUNT.FULL_MIG",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IMC_WRITES_COUNT.FULL_PRIORITY",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5B",
"EventName": "UNC_H_IMC_WRITES_COUNT.FULL_PRIORITY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IMC_WRITES_COUNT.PARTIAL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5B",
"EventName": "UNC_H_IMC_WRITES_COUNT.PARTIAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IMC_WRITES_COUNT.PARTIAL_MIG",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5B",
"EventName": "UNC_H_IMC_WRITES_COUNT.PARTIAL_MIG",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IMC_WRITES_COUNT.PARTIAL_PRIORITY",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5B",
"EventName": "UNC_H_IMC_WRITES_COUNT.PARTIAL_PRIORITY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IODC_ALLOC.INVITOM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x62",
"EventName": "UNC_H_IODC_ALLOC.INVITOM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IODC_ALLOC.IODCFULL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x62",
"EventName": "UNC_H_IODC_ALLOC.IODCFULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IODC_ALLOC.OSBGATED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x62",
"EventName": "UNC_H_IODC_ALLOC.OSBGATED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IODC_DEALLOC.ALL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x63",
"EventName": "UNC_H_IODC_DEALLOC.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IODC_DEALLOC.SNPOUT",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x63",
"EventName": "UNC_H_IODC_DEALLOC.SNPOUT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IODC_DEALLOC.WBMTOE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x63",
"EventName": "UNC_H_IODC_DEALLOC.WBMTOE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IODC_DEALLOC.WBMTOI",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x63",
"EventName": "UNC_H_IODC_DEALLOC.WBMTOI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IODC_DEALLOC.WBPUSHMTOI",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x63",
"EventName": "UNC_H_IODC_DEALLOC.WBPUSHMTOI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_MISC.CV0_PREF_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x39",
"EventName": "UNC_H_MISC.CV0_PREF_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_MISC.CV0_PREF_VIC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x39",
"EventName": "UNC_H_MISC.CV0_PREF_VIC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_MISC.RFO_HIT_S",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x39",
"EventName": "UNC_H_MISC.RFO_HIT_S",
@@ -7229,86 +8756,105 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_MISC.RSPI_WAS_FSE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x39",
"EventName": "UNC_H_MISC.RSPI_WAS_FSE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_MISC.WC_ALIASING",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x39",
"EventName": "UNC_H_MISC.WC_ALIASING",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_OSB",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x55",
"EventName": "UNC_H_OSB",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_READ_NO_CREDITS.EDC0_SMI2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x58",
"EventName": "UNC_H_READ_NO_CREDITS.EDC0_SMI2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_READ_NO_CREDITS.EDC1_SMI3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x58",
"EventName": "UNC_H_READ_NO_CREDITS.EDC1_SMI3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_READ_NO_CREDITS.EDC2_SMI4",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x58",
"EventName": "UNC_H_READ_NO_CREDITS.EDC2_SMI4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_READ_NO_CREDITS.EDC3_SMI5",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x58",
"EventName": "UNC_H_READ_NO_CREDITS.EDC3_SMI5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_READ_NO_CREDITS.MC0_SMI0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x58",
"EventName": "UNC_H_READ_NO_CREDITS.MC0_SMI0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_READ_NO_CREDITS.MC1_SMI1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x58",
"EventName": "UNC_H_READ_NO_CREDITS.MC1_SMI1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_REQUESTS.INVITOE_LOCAL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x50",
"EventName": "UNC_H_REQUESTS.INVITOE_LOCAL",
@@ -7318,6 +8864,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_REQUESTS.INVITOE_REMOTE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x50",
"EventName": "UNC_H_REQUESTS.INVITOE_REMOTE",
@@ -7327,6 +8874,7 @@
},
{
"BriefDescription": "read requests from home agent",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x50",
"EventName": "UNC_H_REQUESTS.READS",
@@ -7336,6 +8884,7 @@
},
{
"BriefDescription": "read requests from local home agent",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x50",
"EventName": "UNC_H_REQUESTS.READS_LOCAL",
@@ -7345,15 +8894,18 @@
},
{
"BriefDescription": "read requests from remote home agent",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x50",
"EventName": "UNC_H_REQUESTS.READS_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "write requests from home agent",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x50",
"EventName": "UNC_H_REQUESTS.WRITES",
@@ -7363,6 +8915,7 @@
},
{
"BriefDescription": "write requests from local home agent",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x50",
"EventName": "UNC_H_REQUESTS.WRITES_LOCAL",
@@ -7372,177 +8925,216 @@
},
{
"BriefDescription": "write requests from remote home agent",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x50",
"EventName": "UNC_H_REQUESTS.WRITES_REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_BOUNCES_HORZ.AD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA1",
"EventName": "UNC_H_RING_BOUNCES_HORZ.AD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_BOUNCES_HORZ.AK",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA1",
"EventName": "UNC_H_RING_BOUNCES_HORZ.AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_BOUNCES_HORZ.BL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA1",
"EventName": "UNC_H_RING_BOUNCES_HORZ.BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_BOUNCES_HORZ.IV",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA1",
"EventName": "UNC_H_RING_BOUNCES_HORZ.IV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_BOUNCES_VERT.AD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA0",
"EventName": "UNC_H_RING_BOUNCES_VERT.AD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_BOUNCES_VERT.AK",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA0",
"EventName": "UNC_H_RING_BOUNCES_VERT.AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_BOUNCES_VERT.BL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA0",
"EventName": "UNC_H_RING_BOUNCES_VERT.BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_BOUNCES_VERT.IV",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA0",
"EventName": "UNC_H_RING_BOUNCES_VERT.IV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SINK_STARVED_HORZ.AD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA3",
"EventName": "UNC_H_RING_SINK_STARVED_HORZ.AD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SINK_STARVED_HORZ.AK",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA3",
"EventName": "UNC_H_RING_SINK_STARVED_HORZ.AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SINK_STARVED_HORZ.AK_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA3",
"EventName": "UNC_H_RING_SINK_STARVED_HORZ.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SINK_STARVED_HORZ.BL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA3",
"EventName": "UNC_H_RING_SINK_STARVED_HORZ.BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SINK_STARVED_HORZ.IV",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA3",
"EventName": "UNC_H_RING_SINK_STARVED_HORZ.IV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SINK_STARVED_VERT.AD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA2",
"EventName": "UNC_H_RING_SINK_STARVED_VERT.AD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SINK_STARVED_VERT.AK",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA2",
"EventName": "UNC_H_RING_SINK_STARVED_VERT.AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SINK_STARVED_VERT.BL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA2",
"EventName": "UNC_H_RING_SINK_STARVED_VERT.BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SINK_STARVED_VERT.IV",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA2",
"EventName": "UNC_H_RING_SINK_STARVED_VERT.IV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_INSERTS.IPQ",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x13",
"EventName": "UNC_H_RxC_INSERTS.IPQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_INSERTS.IRQ",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x13",
"EventName": "UNC_H_RxC_INSERTS.IRQ",
@@ -7552,276 +9144,337 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_INSERTS.IRQ_REJ",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x13",
"EventName": "UNC_H_RxC_INSERTS.IRQ_REJ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_INSERTS.PRQ",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x13",
"EventName": "UNC_H_RxC_INSERTS.PRQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_INSERTS.PRQ_REJ",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x13",
"EventName": "UNC_H_RxC_INSERTS.PRQ_REJ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_INSERTS.RRQ",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x13",
"EventName": "UNC_H_RxC_INSERTS.RRQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_INSERTS.WBQ",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x13",
"EventName": "UNC_H_RxC_INSERTS.WBQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ0_REJECT.AD_REQ_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x22",
"EventName": "UNC_H_RxC_IPQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ0_REJECT.AD_RSP_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x22",
"EventName": "UNC_H_RxC_IPQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ0_REJECT.BL_NCB_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x22",
"EventName": "UNC_H_RxC_IPQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ0_REJECT.BL_NCS_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x22",
"EventName": "UNC_H_RxC_IPQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ0_REJECT.BL_RSP_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x22",
"EventName": "UNC_H_RxC_IPQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ0_REJECT.BL_WB_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x22",
"EventName": "UNC_H_RxC_IPQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ1_REJECT.ALLOW_SNP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x23",
"EventName": "UNC_H_RxC_IPQ1_REJECT.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ1_REJECT.ANY0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x23",
"EventName": "UNC_H_RxC_IPQ1_REJECT.ANY_IPQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ1_REJECT.HA",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x23",
"EventName": "UNC_H_RxC_IPQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ1_REJECT.LLC_OR_SF_WAY",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x23",
"EventName": "UNC_H_RxC_IPQ1_REJECT.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ1_REJECT.LLC_VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x23",
"EventName": "UNC_H_RxC_IPQ1_REJECT.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ1_REJECT.PA_MATCH",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x23",
"EventName": "UNC_H_RxC_IPQ1_REJECT.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ1_REJECT.SF_VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x23",
"EventName": "UNC_H_RxC_IPQ1_REJECT.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ1_REJECT.VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x23",
"EventName": "UNC_H_RxC_IPQ1_REJECT.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ0_REJECT.AD_REQ_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x18",
"EventName": "UNC_H_RxC_IRQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ0_REJECT.AD_RSP_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x18",
"EventName": "UNC_H_RxC_IRQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ0_REJECT.BL_NCB_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x18",
"EventName": "UNC_H_RxC_IRQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ0_REJECT.BL_NCS_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x18",
"EventName": "UNC_H_RxC_IRQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ0_REJECT.BL_RSP_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x18",
"EventName": "UNC_H_RxC_IRQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ0_REJECT.BL_WB_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x18",
"EventName": "UNC_H_RxC_IRQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ1_REJECT.ALLOW_SNP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x19",
"EventName": "UNC_H_RxC_IRQ1_REJECT.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ1_REJECT.ANY0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x19",
"EventName": "UNC_H_RxC_IRQ1_REJECT.ANY_REJECT_IRQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ1_REJECT.HA",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x19",
"EventName": "UNC_H_RxC_IRQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ1_REJECT.LLC_OR_SF_WAY",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x19",
"EventName": "UNC_H_RxC_IRQ1_REJECT.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ1_REJECT.LLC_VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x19",
"EventName": "UNC_H_RxC_IRQ1_REJECT.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ1_REJECT.PA_MATCH",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x19",
"EventName": "UNC_H_RxC_IRQ1_REJECT.PA_MATCH",
@@ -7831,177 +9484,216 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ1_REJECT.SF_VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x19",
"EventName": "UNC_H_RxC_IRQ1_REJECT.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ1_REJECT.VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x19",
"EventName": "UNC_H_RxC_IRQ1_REJECT.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_REJECT.AD_REQ_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x24",
"EventName": "UNC_H_RxC_ISMQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_REJECT.AD_RSP_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x24",
"EventName": "UNC_H_RxC_ISMQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_REJECT.BL_NCB_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x24",
"EventName": "UNC_H_RxC_ISMQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_REJECT.BL_NCS_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x24",
"EventName": "UNC_H_RxC_ISMQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_REJECT.BL_RSP_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x24",
"EventName": "UNC_H_RxC_ISMQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_REJECT.BL_WB_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x24",
"EventName": "UNC_H_RxC_ISMQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_RETRY.AD_REQ_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2C",
"EventName": "UNC_H_RxC_ISMQ0_RETRY.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_RETRY.AD_RSP_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2C",
"EventName": "UNC_H_RxC_ISMQ0_RETRY.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_RETRY.BL_NCB_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2C",
"EventName": "UNC_H_RxC_ISMQ0_RETRY.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_RETRY.BL_NCS_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2C",
"EventName": "UNC_H_RxC_ISMQ0_RETRY.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_RETRY.BL_RSP_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2C",
"EventName": "UNC_H_RxC_ISMQ0_RETRY.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_RETRY.BL_WB_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2C",
"EventName": "UNC_H_RxC_ISMQ0_RETRY.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ1_REJECT.ANY0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x25",
"EventName": "UNC_H_RxC_ISMQ1_REJECT.ANY_ISMQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ1_REJECT.HA",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x25",
"EventName": "UNC_H_RxC_ISMQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ1_RETRY.ANY0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2D",
"EventName": "UNC_H_RxC_ISMQ1_RETRY.ANY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ1_RETRY.HA",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2D",
"EventName": "UNC_H_RxC_ISMQ1_RETRY.HA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OCCUPANCY.IPQ",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x11",
"EventName": "UNC_H_RxC_OCCUPANCY.IPQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OCCUPANCY.IRQ",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x11",
"EventName": "UNC_H_RxC_OCCUPANCY.IRQ",
@@ -8011,1005 +9703,1228 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OCCUPANCY.RRQ",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x11",
"EventName": "UNC_H_RxC_OCCUPANCY.RRQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OCCUPANCY.WBQ",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x11",
"EventName": "UNC_H_RxC_OCCUPANCY.WBQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER0_RETRY.AD_REQ_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2E",
"EventName": "UNC_H_RxC_OTHER0_RETRY.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER0_RETRY.AD_RSP_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2E",
"EventName": "UNC_H_RxC_OTHER0_RETRY.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER0_RETRY.BL_NCB_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2E",
"EventName": "UNC_H_RxC_OTHER0_RETRY.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER0_RETRY.BL_NCS_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2E",
"EventName": "UNC_H_RxC_OTHER0_RETRY.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER0_RETRY.BL_RSP_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2E",
"EventName": "UNC_H_RxC_OTHER0_RETRY.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER0_RETRY.BL_WB_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2E",
"EventName": "UNC_H_RxC_OTHER0_RETRY.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER1_RETRY.ALLOW_SNP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2F",
"EventName": "UNC_H_RxC_OTHER1_RETRY.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER1_RETRY.ANY0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2F",
"EventName": "UNC_H_RxC_OTHER1_RETRY.ANY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER1_RETRY.HA",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2F",
"EventName": "UNC_H_RxC_OTHER1_RETRY.HA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER1_RETRY.LLC_OR_SF_WAY",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2F",
"EventName": "UNC_H_RxC_OTHER1_RETRY.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER1_RETRY.LLC_VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2F",
"EventName": "UNC_H_RxC_OTHER1_RETRY.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER1_RETRY.PA_MATCH",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2F",
"EventName": "UNC_H_RxC_OTHER1_RETRY.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER1_RETRY.SF_VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2F",
"EventName": "UNC_H_RxC_OTHER1_RETRY.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER1_RETRY.VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2F",
"EventName": "UNC_H_RxC_OTHER1_RETRY.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ0_REJECT.AD_REQ_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x20",
"EventName": "UNC_H_RxC_PRQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ0_REJECT.AD_RSP_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x20",
"EventName": "UNC_H_RxC_PRQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ0_REJECT.BL_NCB_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x20",
"EventName": "UNC_H_RxC_PRQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ0_REJECT.BL_NCS_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x20",
"EventName": "UNC_H_RxC_PRQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ0_REJECT.BL_RSP_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x20",
"EventName": "UNC_H_RxC_PRQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ0_REJECT.BL_WB_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x20",
"EventName": "UNC_H_RxC_PRQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ1_REJECT.ALLOW_SNP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x21",
"EventName": "UNC_H_RxC_PRQ1_REJECT.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ1_REJECT.ANY0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x21",
"EventName": "UNC_H_RxC_PRQ1_REJECT.ANY_PRQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ1_REJECT.HA",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x21",
"EventName": "UNC_H_RxC_PRQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ1_REJECT.LLC_OR_SF_WAY",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x21",
"EventName": "UNC_H_RxC_PRQ1_REJECT.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ1_REJECT.LLC_VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x21",
"EventName": "UNC_H_RxC_PRQ1_REJECT.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ1_REJECT.PA_MATCH",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x21",
"EventName": "UNC_H_RxC_PRQ1_REJECT.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ1_REJECT.SF_VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x21",
"EventName": "UNC_H_RxC_PRQ1_REJECT.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ1_REJECT.VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x21",
"EventName": "UNC_H_RxC_PRQ1_REJECT.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q0_RETRY.AD_REQ_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2A",
"EventName": "UNC_H_RxC_REQ_Q0_RETRY.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q0_RETRY.AD_RSP_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2A",
"EventName": "UNC_H_RxC_REQ_Q0_RETRY.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q0_RETRY.BL_NCB_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2A",
"EventName": "UNC_H_RxC_REQ_Q0_RETRY.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q0_RETRY.BL_NCS_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2A",
"EventName": "UNC_H_RxC_REQ_Q0_RETRY.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q0_RETRY.BL_RSP_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2A",
"EventName": "UNC_H_RxC_REQ_Q0_RETRY.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q0_RETRY.BL_WB_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2A",
"EventName": "UNC_H_RxC_REQ_Q0_RETRY.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q1_RETRY.ALLOW_SNP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2B",
"EventName": "UNC_H_RxC_REQ_Q1_RETRY.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q1_RETRY.ANY0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2B",
"EventName": "UNC_H_RxC_REQ_Q1_RETRY.ANY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q1_RETRY.HA",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2B",
"EventName": "UNC_H_RxC_REQ_Q1_RETRY.HA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q1_RETRY.LLC_OR_SF_WAY",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2B",
"EventName": "UNC_H_RxC_REQ_Q1_RETRY.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q1_RETRY.LLC_VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2B",
"EventName": "UNC_H_RxC_REQ_Q1_RETRY.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q1_RETRY.PA_MATCH",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2B",
"EventName": "UNC_H_RxC_REQ_Q1_RETRY.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q1_RETRY.SF_VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2B",
"EventName": "UNC_H_RxC_REQ_Q1_RETRY.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q1_RETRY.VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2B",
"EventName": "UNC_H_RxC_REQ_Q1_RETRY.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ0_REJECT.AD_REQ_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x26",
"EventName": "UNC_H_RxC_RRQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ0_REJECT.AD_RSP_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x26",
"EventName": "UNC_H_RxC_RRQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ0_REJECT.BL_NCB_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x26",
"EventName": "UNC_H_RxC_RRQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ0_REJECT.BL_NCS_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x26",
"EventName": "UNC_H_RxC_RRQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ0_REJECT.BL_RSP_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x26",
"EventName": "UNC_H_RxC_RRQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ0_REJECT.BL_WB_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x26",
"EventName": "UNC_H_RxC_RRQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ1_REJECT.ALLOW_SNP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x27",
"EventName": "UNC_H_RxC_RRQ1_REJECT.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ1_REJECT.ANY0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x27",
"EventName": "UNC_H_RxC_RRQ1_REJECT.ANY_RRQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ1_REJECT.HA",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x27",
"EventName": "UNC_H_RxC_RRQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ1_REJECT.LLC_OR_SF_WAY",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x27",
"EventName": "UNC_H_RxC_RRQ1_REJECT.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ1_REJECT.LLC_VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x27",
"EventName": "UNC_H_RxC_RRQ1_REJECT.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ1_REJECT.PA_MATCH",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x27",
"EventName": "UNC_H_RxC_RRQ1_REJECT.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ1_REJECT.SF_VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x27",
"EventName": "UNC_H_RxC_RRQ1_REJECT.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ1_REJECT.VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x27",
"EventName": "UNC_H_RxC_RRQ1_REJECT.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ0_REJECT.AD_REQ_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x28",
"EventName": "UNC_H_RxC_WBQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ0_REJECT.AD_RSP_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x28",
"EventName": "UNC_H_RxC_WBQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ0_REJECT.BL_NCB_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x28",
"EventName": "UNC_H_RxC_WBQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ0_REJECT.BL_NCS_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x28",
"EventName": "UNC_H_RxC_WBQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ0_REJECT.BL_RSP_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x28",
"EventName": "UNC_H_RxC_WBQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ0_REJECT.BL_WB_VN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x28",
"EventName": "UNC_H_RxC_WBQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ1_REJECT.ALLOW_SNP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x29",
"EventName": "UNC_H_RxC_WBQ1_REJECT.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ1_REJECT.ANY0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x29",
"EventName": "UNC_H_RxC_WBQ1_REJECT.ANY_WBQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ1_REJECT.HA",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x29",
"EventName": "UNC_H_RxC_WBQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ1_REJECT.LLC_OR_SF_WAY",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x29",
"EventName": "UNC_H_RxC_WBQ1_REJECT.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ1_REJECT.LLC_VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x29",
"EventName": "UNC_H_RxC_WBQ1_REJECT.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ1_REJECT.PA_MATCH",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x29",
"EventName": "UNC_H_RxC_WBQ1_REJECT.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ1_REJECT.SF_VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x29",
"EventName": "UNC_H_RxC_WBQ1_REJECT.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ1_REJECT.VICTIM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x29",
"EventName": "UNC_H_RxC_WBQ1_REJECT.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_BUSY_STARVED.AD_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB4",
"EventName": "UNC_H_RxR_BUSY_STARVED.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_BUSY_STARVED.AD_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB4",
"EventName": "UNC_H_RxR_BUSY_STARVED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_BUSY_STARVED.BL_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB4",
"EventName": "UNC_H_RxR_BUSY_STARVED.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_BUSY_STARVED.BL_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB4",
"EventName": "UNC_H_RxR_BUSY_STARVED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_BYPASS.AD_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB2",
"EventName": "UNC_H_RxR_BYPASS.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_BYPASS.AD_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB2",
"EventName": "UNC_H_RxR_BYPASS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_BYPASS.AK_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB2",
"EventName": "UNC_H_RxR_BYPASS.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_BYPASS.BL_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB2",
"EventName": "UNC_H_RxR_BYPASS.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_BYPASS.BL_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB2",
"EventName": "UNC_H_RxR_BYPASS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_BYPASS.IV_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB2",
"EventName": "UNC_H_RxR_BYPASS.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_CRD_STARVED.AD_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB3",
"EventName": "UNC_H_RxR_CRD_STARVED.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_CRD_STARVED.AD_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB3",
"EventName": "UNC_H_RxR_CRD_STARVED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_CRD_STARVED.AK_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB3",
"EventName": "UNC_H_RxR_CRD_STARVED.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_CRD_STARVED.BL_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB3",
"EventName": "UNC_H_RxR_CRD_STARVED.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_CRD_STARVED.BL_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB3",
"EventName": "UNC_H_RxR_CRD_STARVED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_CRD_STARVED.IFV",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB3",
"EventName": "UNC_H_RxR_CRD_STARVED.IFV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_CRD_STARVED.IV_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB3",
"EventName": "UNC_H_RxR_CRD_STARVED.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_INSERTS.AD_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB1",
"EventName": "UNC_H_RxR_INSERTS.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_INSERTS.AD_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB1",
"EventName": "UNC_H_RxR_INSERTS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_INSERTS.AK_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB1",
"EventName": "UNC_H_RxR_INSERTS.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_INSERTS.BL_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB1",
"EventName": "UNC_H_RxR_INSERTS.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_INSERTS.BL_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB1",
"EventName": "UNC_H_RxR_INSERTS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_INSERTS.IV_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB1",
"EventName": "UNC_H_RxR_INSERTS.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_OCCUPANCY.AD_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB0",
"EventName": "UNC_H_RxR_OCCUPANCY.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_OCCUPANCY.AD_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB0",
"EventName": "UNC_H_RxR_OCCUPANCY.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_OCCUPANCY.AK_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB0",
"EventName": "UNC_H_RxR_OCCUPANCY.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_OCCUPANCY.BL_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB0",
"EventName": "UNC_H_RxR_OCCUPANCY.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_OCCUPANCY.BL_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB0",
"EventName": "UNC_H_RxR_OCCUPANCY.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_OCCUPANCY.IV_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB0",
"EventName": "UNC_H_RxR_OCCUPANCY.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SF_EVICTION.E_STATE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x3D",
"EventName": "UNC_H_SF_EVICTION.E_STATE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SF_EVICTION.M_STATE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x3D",
"EventName": "UNC_H_SF_EVICTION.M_STATE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SF_EVICTION.S_STATE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x3D",
"EventName": "UNC_H_SF_EVICTION.S_STATE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOPS_SENT.ALL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x51",
"EventName": "UNC_H_SNOOPS_SENT.",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOPS_SENT.BCST_LOCAL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x51",
"EventName": "UNC_H_SNOOPS_SENT.BCST_LOC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOPS_SENT.BCST_REMOTE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x51",
"EventName": "UNC_H_SNOOPS_SENT.BCST_REM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOPS_SENT.DIRECT_LOCAL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x51",
"EventName": "UNC_H_SNOOPS_SENT.DIRECT_LOC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOPS_SENT.DIRECT_REMOTE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x51",
"EventName": "UNC_H_SNOOPS_SENT.DIRECT_REM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOPS_SENT.LOCAL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x51",
"EventName": "UNC_H_SNOOPS_SENT.LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOPS_SENT.REMOTE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x51",
"EventName": "UNC_H_SNOOPS_SENT.REMOTE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSPCNFLCTS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5C",
"EventName": "UNC_H_SNOOP_RESP.RSPCNFLCT",
@@ -9019,24 +10934,29 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSPFWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5C",
"EventName": "UNC_H_SNOOP_RESP.RSPFWD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSPI",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5C",
"EventName": "UNC_H_SNOOP_RESP.RSPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSPIFWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5C",
"EventName": "UNC_H_SNOOP_RESP.RSPIFWD",
@@ -9046,15 +10966,18 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSPS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5C",
"EventName": "UNC_H_SNOOP_RESP.RSPS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSPSFWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5C",
"EventName": "UNC_H_SNOOP_RESP.RSPSFWD",
@@ -9064,6 +10987,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSP_FWD_WB",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5C",
"EventName": "UNC_H_SNOOP_RESP.RSP_FWD_WB",
@@ -9073,1575 +10997,1925 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSP_WBWB",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5C",
"EventName": "UNC_H_SNOOP_RESP.RSP_WB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSPCNFLCT",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5D",
"EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSPCNFLCT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSPFWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5D",
"EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSPFWD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSPI",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5D",
"EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSPIFWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5D",
"EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSPIFWD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSPS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5D",
"EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSPS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSPSFWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5D",
"EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSPSFWD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSP_FWD_WB",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5D",
"EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSP_FWD_WB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSP_WB",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5D",
"EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSP_WB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD0",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD0",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD0",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD0",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD0",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD0",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD2",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD2",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD2",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD2",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD2",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD2",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD4",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD4",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD4",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD4",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD4",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD4",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD6",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD6",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD6",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD6",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD6",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xD6",
"EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_ADS_USED.AD_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9D",
"EventName": "UNC_H_TxR_HORZ_ADS_USED.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_ADS_USED.AD_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9D",
"EventName": "UNC_H_TxR_HORZ_ADS_USED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_ADS_USED.AK_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9D",
"EventName": "UNC_H_TxR_HORZ_ADS_USED.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_ADS_USED.BL_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9D",
"EventName": "UNC_H_TxR_HORZ_ADS_USED.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_ADS_USED.BL_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9D",
"EventName": "UNC_H_TxR_HORZ_ADS_USED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_BYPASS.AD_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9F",
"EventName": "UNC_H_TxR_HORZ_BYPASS.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_BYPASS.AD_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9F",
"EventName": "UNC_H_TxR_HORZ_BYPASS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_BYPASS.AK_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9F",
"EventName": "UNC_H_TxR_HORZ_BYPASS.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_BYPASS.BL_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9F",
"EventName": "UNC_H_TxR_HORZ_BYPASS.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_BYPASS.BL_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9F",
"EventName": "UNC_H_TxR_HORZ_BYPASS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_BYPASS.IV_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9F",
"EventName": "UNC_H_TxR_HORZ_BYPASS.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_FULL.AD_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x96",
"EventName": "UNC_H_TxR_HORZ_CYCLES_FULL.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x96",
"EventName": "UNC_H_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_FULL.AK_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x96",
"EventName": "UNC_H_TxR_HORZ_CYCLES_FULL.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_FULL.BL_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x96",
"EventName": "UNC_H_TxR_HORZ_CYCLES_FULL.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x96",
"EventName": "UNC_H_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_FULL.IV_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x96",
"EventName": "UNC_H_TxR_HORZ_CYCLES_FULL.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_NE.AD_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x97",
"EventName": "UNC_H_TxR_HORZ_CYCLES_NE.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x97",
"EventName": "UNC_H_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_NE.AK_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x97",
"EventName": "UNC_H_TxR_HORZ_CYCLES_NE.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_NE.BL_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x97",
"EventName": "UNC_H_TxR_HORZ_CYCLES_NE.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x97",
"EventName": "UNC_H_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_NE.IV_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x97",
"EventName": "UNC_H_TxR_HORZ_CYCLES_NE.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_INSERTS.AD_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x95",
"EventName": "UNC_H_TxR_HORZ_INSERTS.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_INSERTS.AD_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x95",
"EventName": "UNC_H_TxR_HORZ_INSERTS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_INSERTS.AK_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x95",
"EventName": "UNC_H_TxR_HORZ_INSERTS.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_INSERTS.BL_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x95",
"EventName": "UNC_H_TxR_HORZ_INSERTS.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_INSERTS.BL_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x95",
"EventName": "UNC_H_TxR_HORZ_INSERTS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_INSERTS.IV_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x95",
"EventName": "UNC_H_TxR_HORZ_INSERTS.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_NACK.AD_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x99",
"EventName": "UNC_H_TxR_HORZ_NACK.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_NACK.AD_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x99",
"EventName": "UNC_H_TxR_HORZ_NACK.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_NACK.AK_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x99",
"EventName": "UNC_H_TxR_HORZ_NACK.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_NACK.BL_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x99",
"EventName": "UNC_H_TxR_HORZ_NACK.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_NACK.BL_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x99",
"EventName": "UNC_H_TxR_HORZ_NACK.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_NACK.IV_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x99",
"EventName": "UNC_H_TxR_HORZ_NACK.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_OCCUPANCY.AD_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x94",
"EventName": "UNC_H_TxR_HORZ_OCCUPANCY.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x94",
"EventName": "UNC_H_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_OCCUPANCY.AK_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x94",
"EventName": "UNC_H_TxR_HORZ_OCCUPANCY.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_OCCUPANCY.BL_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x94",
"EventName": "UNC_H_TxR_HORZ_OCCUPANCY.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x94",
"EventName": "UNC_H_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_OCCUPANCY.IV_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x94",
"EventName": "UNC_H_TxR_HORZ_OCCUPANCY.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_STARVED.AD_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9B",
"EventName": "UNC_H_TxR_HORZ_STARVED.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_STARVED.AK_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9B",
"EventName": "UNC_H_TxR_HORZ_STARVED.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_STARVED.BL_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9B",
"EventName": "UNC_H_TxR_HORZ_STARVED.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_STARVED.IV_BNC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9B",
"EventName": "UNC_H_TxR_HORZ_STARVED.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_ADS_USED.AD_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9C",
"EventName": "UNC_H_TxR_VERT_ADS_USED.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_ADS_USED.AD_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9C",
"EventName": "UNC_H_TxR_VERT_ADS_USED.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_ADS_USED.AK_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9C",
"EventName": "UNC_H_TxR_VERT_ADS_USED.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_ADS_USED.AK_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9C",
"EventName": "UNC_H_TxR_VERT_ADS_USED.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_ADS_USED.BL_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9C",
"EventName": "UNC_H_TxR_VERT_ADS_USED.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_ADS_USED.BL_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9C",
"EventName": "UNC_H_TxR_VERT_ADS_USED.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_BYPASS.AD_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9E",
"EventName": "UNC_H_TxR_VERT_BYPASS.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_BYPASS.AD_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9E",
"EventName": "UNC_H_TxR_VERT_BYPASS.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_BYPASS.AK_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9E",
"EventName": "UNC_H_TxR_VERT_BYPASS.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_BYPASS.AK_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9E",
"EventName": "UNC_H_TxR_VERT_BYPASS.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_BYPASS.BL_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9E",
"EventName": "UNC_H_TxR_VERT_BYPASS.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_BYPASS.BL_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9E",
"EventName": "UNC_H_TxR_VERT_BYPASS.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_BYPASS.IV",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9E",
"EventName": "UNC_H_TxR_VERT_BYPASS.IV_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_FULL.AD_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x92",
"EventName": "UNC_H_TxR_VERT_CYCLES_FULL.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_FULL.AD_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x92",
"EventName": "UNC_H_TxR_VERT_CYCLES_FULL.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_FULL.AK_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x92",
"EventName": "UNC_H_TxR_VERT_CYCLES_FULL.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_FULL.AK_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x92",
"EventName": "UNC_H_TxR_VERT_CYCLES_FULL.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_FULL.BL_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x92",
"EventName": "UNC_H_TxR_VERT_CYCLES_FULL.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_FULL.BL_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x92",
"EventName": "UNC_H_TxR_VERT_CYCLES_FULL.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_FULL.IV",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x92",
"EventName": "UNC_H_TxR_VERT_CYCLES_FULL.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_NE.AD_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x93",
"EventName": "UNC_H_TxR_VERT_CYCLES_NE.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_NE.AD_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x93",
"EventName": "UNC_H_TxR_VERT_CYCLES_NE.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_NE.AK_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x93",
"EventName": "UNC_H_TxR_VERT_CYCLES_NE.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_NE.AK_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x93",
"EventName": "UNC_H_TxR_VERT_CYCLES_NE.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_NE.BL_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x93",
"EventName": "UNC_H_TxR_VERT_CYCLES_NE.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_NE.BL_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x93",
"EventName": "UNC_H_TxR_VERT_CYCLES_NE.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_NE.IV",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x93",
"EventName": "UNC_H_TxR_VERT_CYCLES_NE.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_INSERTS.AD_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x91",
"EventName": "UNC_H_TxR_VERT_INSERTS.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_INSERTS.AD_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x91",
"EventName": "UNC_H_TxR_VERT_INSERTS.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_INSERTS.AK_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x91",
"EventName": "UNC_H_TxR_VERT_INSERTS.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_INSERTS.AK_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x91",
"EventName": "UNC_H_TxR_VERT_INSERTS.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_INSERTS.BL_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x91",
"EventName": "UNC_H_TxR_VERT_INSERTS.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_INSERTS.BL_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x91",
"EventName": "UNC_H_TxR_VERT_INSERTS.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_INSERTS.IV",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x91",
"EventName": "UNC_H_TxR_VERT_INSERTS.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_NACK.AD_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x98",
"EventName": "UNC_H_TxR_VERT_NACK.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_NACK.AD_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x98",
"EventName": "UNC_H_TxR_VERT_NACK.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_NACK.AK_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x98",
"EventName": "UNC_H_TxR_VERT_NACK.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_NACK.AK_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x98",
"EventName": "UNC_H_TxR_VERT_NACK.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_NACK.BL_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x98",
"EventName": "UNC_H_TxR_VERT_NACK.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_NACK.BL_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x98",
"EventName": "UNC_H_TxR_VERT_NACK.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_NACK.IV",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x98",
"EventName": "UNC_H_TxR_VERT_NACK.IV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_OCCUPANCY.AD_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x90",
"EventName": "UNC_H_TxR_VERT_OCCUPANCY.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_OCCUPANCY.AD_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x90",
"EventName": "UNC_H_TxR_VERT_OCCUPANCY.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_OCCUPANCY.AK_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x90",
"EventName": "UNC_H_TxR_VERT_OCCUPANCY.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_OCCUPANCY.AK_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x90",
"EventName": "UNC_H_TxR_VERT_OCCUPANCY.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_OCCUPANCY.BL_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x90",
"EventName": "UNC_H_TxR_VERT_OCCUPANCY.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_OCCUPANCY.BL_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x90",
"EventName": "UNC_H_TxR_VERT_OCCUPANCY.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_OCCUPANCY.IV",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x90",
"EventName": "UNC_H_TxR_VERT_OCCUPANCY.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_STARVED.AD_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9A",
"EventName": "UNC_H_TxR_VERT_STARVED.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_STARVED.AD_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9A",
"EventName": "UNC_H_TxR_VERT_STARVED.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_STARVED.AK_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9A",
"EventName": "UNC_H_TxR_VERT_STARVED.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_STARVED.AK_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9A",
"EventName": "UNC_H_TxR_VERT_STARVED.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_STARVED.BL_AG0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9A",
"EventName": "UNC_H_TxR_VERT_STARVED.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_STARVED.BL_AG1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9A",
"EventName": "UNC_H_TxR_VERT_STARVED.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_STARVED.IV",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x9A",
"EventName": "UNC_H_TxR_VERT_STARVED.IV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_AD_IN_USE.DN_EVEN",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA6",
"EventName": "UNC_H_VERT_RING_AD_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_AD_IN_USE.DN_ODD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA6",
"EventName": "UNC_H_VERT_RING_AD_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_AD_IN_USE.UP_EVEN",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA6",
"EventName": "UNC_H_VERT_RING_AD_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_AD_IN_USE.UP_ODD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA6",
"EventName": "UNC_H_VERT_RING_AD_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_AK_IN_USE.DN_EVEN",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA8",
"EventName": "UNC_H_VERT_RING_AK_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_AK_IN_USE.DN_ODD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA8",
"EventName": "UNC_H_VERT_RING_AK_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_AK_IN_USE.UP_EVEN",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA8",
"EventName": "UNC_H_VERT_RING_AK_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_AK_IN_USE.UP_ODD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xA8",
"EventName": "UNC_H_VERT_RING_AK_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_BL_IN_USE.DN_EVEN",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xAA",
"EventName": "UNC_H_VERT_RING_BL_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_BL_IN_USE.DN_ODD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xAA",
"EventName": "UNC_H_VERT_RING_BL_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_BL_IN_USE.UP_EVEN",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xAA",
"EventName": "UNC_H_VERT_RING_BL_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_BL_IN_USE.UP_ODD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xAA",
"EventName": "UNC_H_VERT_RING_BL_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_IV_IN_USE.DN",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xAC",
"EventName": "UNC_H_VERT_RING_IV_IN_USE.DN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_IV_IN_USE.UP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xAC",
"EventName": "UNC_H_VERT_RING_IV_IN_USE.UP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_WB_PUSH_MTOI.LLC",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x56",
"EventName": "UNC_H_WB_PUSH_MTOI.LLC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_WB_PUSH_MTOI.MEM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x56",
"EventName": "UNC_H_WB_PUSH_MTOI.MEM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_WRITE_NO_CREDITS.EDC0_SMI2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5A",
"EventName": "UNC_H_WRITE_NO_CREDITS.EDC0_SMI2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_WRITE_NO_CREDITS.EDC1_SMI3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5A",
"EventName": "UNC_H_WRITE_NO_CREDITS.EDC1_SMI3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_WRITE_NO_CREDITS.EDC2_SMI4",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5A",
"EventName": "UNC_H_WRITE_NO_CREDITS.EDC2_SMI4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_WRITE_NO_CREDITS.EDC3_SMI5",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5A",
"EventName": "UNC_H_WRITE_NO_CREDITS.EDC3_SMI5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_WRITE_NO_CREDITS.MC0_SMI0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5A",
"EventName": "UNC_H_WRITE_NO_CREDITS.MC0_SMI0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_WRITE_NO_CREDITS.MC1_SMI1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5A",
"EventName": "UNC_H_WRITE_NO_CREDITS.MC1_SMI1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.ANY_RSPI_FWDFE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x32",
"EventName": "UNC_H_XSNP_RESP.ANY_RSPI_FWDFE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe4",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.ANY_RSPI_FWDM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x32",
"EventName": "UNC_H_XSNP_RESP.ANY_RSPI_FWDM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf0",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.ANY_RSPS_FWDFE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x32",
"EventName": "UNC_H_XSNP_RESP.ANY_RSPS_FWDFE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe2",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.ANY_RSPS_FWDM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x32",
"EventName": "UNC_H_XSNP_RESP.ANY_RSPS_FWDM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe8",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.ANY_RSP_HITFSE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x32",
"EventName": "UNC_H_XSNP_RESP.ANY_RSP_HITFSE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe1",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.CORE_RSPI_FWDFE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x32",
"EventName": "UNC_H_XSNP_RESP.CORE_RSPI_FWDFE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x44",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.CORE_RSPI_FWDM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x32",
"EventName": "UNC_H_XSNP_RESP.CORE_RSPI_FWDM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x50",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.CORE_RSPS_FWDFE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x32",
"EventName": "UNC_H_XSNP_RESP.CORE_RSPS_FWDFE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x42",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.CORE_RSPS_FWDM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x32",
"EventName": "UNC_H_XSNP_RESP.CORE_RSPS_FWDM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x48",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.CORE_RSP_HITFSE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x32",
"EventName": "UNC_H_XSNP_RESP.CORE_RSP_HITFSE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x41",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.EVICT_RSPI_FWDFE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x32",
"EventName": "UNC_H_XSNP_RESP.EVICT_RSPI_FWDFE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x84",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.EVICT_RSPI_FWDM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x32",
"EventName": "UNC_H_XSNP_RESP.EVICT_RSPI_FWDM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x90",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.EVICT_RSPS_FWDFE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x32",
"EventName": "UNC_H_XSNP_RESP.EVICT_RSPS_FWDFE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x82",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.EVICT_RSPS_FWDM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x32",
"EventName": "UNC_H_XSNP_RESP.EVICT_RSPS_FWDM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x88",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.EVICT_RSP_HITFSE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x32",
"EventName": "UNC_H_XSNP_RESP.EVICT_RSP_HITFSE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x81",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.EXT_RSPI_FWDFE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x32",
"EventName": "UNC_H_XSNP_RESP.EXT_RSPI_FWDFE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x24",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.EXT_RSPI_FWDM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x32",
"EventName": "UNC_H_XSNP_RESP.EXT_RSPI_FWDM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x30",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.EXT_RSPS_FWDFE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x32",
"EventName": "UNC_H_XSNP_RESP.EXT_RSPS_FWDFE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x22",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.EXT_RSPS_FWDM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x32",
"EventName": "UNC_H_XSNP_RESP.EXT_RSPS_FWDM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x28",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.EXT_RSP_HITFSE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x32",
"EventName": "UNC_H_XSNP_RESP.EXT_RSP_HITFSE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x21",
"Unit": "CHA"
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/skylakex/uncore-interconnect.json
index f32d4d9d283a..216a00237cd1 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/uncore-interconnect.json
@@ -1,8 +1,10 @@
[
{
"BriefDescription": "Total Write Cache Occupancy; Any Source",
+ "Counter": "0,1",
"EventCode": "0xF",
"EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.ANY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.; Tracks all requests from any source port.",
"UMask": "0x1",
@@ -10,8 +12,10 @@
},
{
"BriefDescription": "Total Write Cache Occupancy; Snoops",
+ "Counter": "0,1",
"EventCode": "0xF",
"EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.IV_Q",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.",
"UMask": "0x2",
@@ -19,6 +23,7 @@
},
{
"BriefDescription": "Total IRP occupancy of inbound read and write requests.",
+ "Counter": "0,1",
"EventCode": "0xF",
"EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.MEM",
"PerPkg": "1",
@@ -28,15 +33,19 @@
},
{
"BriefDescription": "IRP Clocks",
+ "Counter": "0,1",
"EventCode": "0x1",
"EventName": "UNC_I_CLOCKTICKS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "Coherent Ops; CLFlush",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_I_COHERENT_OPS.CLFLUSH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of coherency related operations serviced by the IRP",
"UMask": "0x80",
@@ -44,8 +53,10 @@
},
{
"BriefDescription": "Coherent Ops; CRd",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_I_COHERENT_OPS.CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of coherency related operations serviced by the IRP",
"UMask": "0x2",
@@ -53,8 +64,10 @@
},
{
"BriefDescription": "Coherent Ops; DRd",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_I_COHERENT_OPS.DRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of coherency related operations serviced by the IRP",
"UMask": "0x4",
@@ -62,8 +75,10 @@
},
{
"BriefDescription": "Coherent Ops; PCIDCAHin5t",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_I_COHERENT_OPS.PCIDCAHINT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of coherency related operations serviced by the IRP",
"UMask": "0x20",
@@ -71,8 +86,10 @@
},
{
"BriefDescription": "Coherent Ops; PCIRdCur",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_I_COHERENT_OPS.PCIRDCUR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of coherency related operations serviced by the IRP",
"UMask": "0x1",
@@ -80,6 +97,7 @@
},
{
"BriefDescription": "PCIITOM request issued by the IRP unit to the mesh with the intention of writing a full cacheline.",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_I_COHERENT_OPS.PCITOM",
"PerPkg": "1",
@@ -89,6 +107,7 @@
},
{
"BriefDescription": "RFO request issued by the IRP unit to the mesh with the intention of writing a partial cacheline.",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_I_COHERENT_OPS.RFO",
"PerPkg": "1",
@@ -98,8 +117,10 @@
},
{
"BriefDescription": "Coherent Ops; WbMtoI",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_I_COHERENT_OPS.WBMTOI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of coherency related operations serviced by the IRP",
"UMask": "0x40",
@@ -107,13 +128,16 @@
},
{
"BriefDescription": "FAF RF full",
+ "Counter": "0,1",
"EventCode": "0x17",
"EventName": "UNC_I_FAF_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "Inbound read requests received by the IRP and inserted into the FAF queue.",
+ "Counter": "0,1",
"EventCode": "0x18",
"EventName": "UNC_I_FAF_INSERTS",
"PerPkg": "1",
@@ -122,6 +146,7 @@
},
{
"BriefDescription": "Occupancy of the IRP FAF queue.",
+ "Counter": "0,1",
"EventCode": "0x19",
"EventName": "UNC_I_FAF_OCCUPANCY",
"PerPkg": "1",
@@ -130,95 +155,119 @@
},
{
"BriefDescription": "FAF allocation -- sent to ADQ",
+ "Counter": "0,1",
"EventCode": "0x16",
"EventName": "UNC_I_FAF_TRANSACTIONS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "All Inserts Inbound (p2p + faf + cset)",
+ "Counter": "0,1",
"EventCode": "0x1E",
"EventName": "UNC_I_IRP_ALL.INBOUND_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "IRP"
},
{
"BriefDescription": "All Inserts Outbound (BL, AK, Snoops)",
+ "Counter": "0,1",
"EventCode": "0x1E",
"EventName": "UNC_I_IRP_ALL.OUTBOUND_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "IRP"
},
{
"BriefDescription": "Misc Events - Set 0; Cache Inserts of Atomic Transactions as Secondary",
+ "Counter": "0,1",
"EventCode": "0x1C",
"EventName": "UNC_I_MISC0.2ND_ATOMIC_INSERT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "IRP"
},
{
"BriefDescription": "Misc Events - Set 0; Cache Inserts of Read Transactions as Secondary",
+ "Counter": "0,1",
"EventCode": "0x1C",
"EventName": "UNC_I_MISC0.2ND_RD_INSERT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "IRP"
},
{
"BriefDescription": "Misc Events - Set 0; Cache Inserts of Write Transactions as Secondary",
+ "Counter": "0,1",
"EventCode": "0x1C",
"EventName": "UNC_I_MISC0.2ND_WR_INSERT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "IRP"
},
{
"BriefDescription": "Misc Events - Set 0; Fastpath Rejects",
+ "Counter": "0,1",
"EventCode": "0x1C",
"EventName": "UNC_I_MISC0.FAST_REJ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "IRP"
},
{
"BriefDescription": "Misc Events - Set 0; Fastpath Requests",
+ "Counter": "0,1",
"EventCode": "0x1C",
"EventName": "UNC_I_MISC0.FAST_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "IRP"
},
{
"BriefDescription": "Misc Events - Set 0; Fastpath Transfers From Primary to Secondary",
+ "Counter": "0,1",
"EventCode": "0x1C",
"EventName": "UNC_I_MISC0.FAST_XFER",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "IRP"
},
{
"BriefDescription": "Misc Events - Set 0; Prefetch Ack Hints From Primary to Secondary",
+ "Counter": "0,1",
"EventCode": "0x1C",
"EventName": "UNC_I_MISC0.PF_ACK_HINT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "IRP"
},
{
"BriefDescription": "Misc Events - Set 0",
+ "Counter": "0,1",
"EventCode": "0x1C",
"EventName": "UNC_I_MISC0.UNKNOWN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "IRP"
},
{
"BriefDescription": "Misc Events - Set 1; Lost Forward",
+ "Counter": "0,1",
"EventCode": "0x1D",
"EventName": "UNC_I_MISC1.LOST_FWD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop pulled away ownership before a write was committed",
"UMask": "0x10",
@@ -226,8 +275,10 @@
},
{
"BriefDescription": "Misc Events - Set 1; Received Invalid",
+ "Counter": "0,1",
"EventCode": "0x1D",
"EventName": "UNC_I_MISC1.SEC_RCVD_INVLD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Secondary received a transfer that did not have sufficient MESI state",
"UMask": "0x20",
@@ -235,8 +286,10 @@
},
{
"BriefDescription": "Misc Events - Set 1; Received Valid",
+ "Counter": "0,1",
"EventCode": "0x1D",
"EventName": "UNC_I_MISC1.SEC_RCVD_VLD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Secondary received a transfer that did have sufficient MESI state",
"UMask": "0x40",
@@ -244,8 +297,10 @@
},
{
"BriefDescription": "Misc Events - Set 1; Slow Transfer of E Line",
+ "Counter": "0,1",
"EventCode": "0x1D",
"EventName": "UNC_I_MISC1.SLOW_E",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Secondary received a transfer that did have sufficient MESI state",
"UMask": "0x4",
@@ -253,8 +308,10 @@
},
{
"BriefDescription": "Misc Events - Set 1; Slow Transfer of I Line",
+ "Counter": "0,1",
"EventCode": "0x1D",
"EventName": "UNC_I_MISC1.SLOW_I",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop took cacheline ownership before write from data was committed.",
"UMask": "0x1",
@@ -262,8 +319,10 @@
},
{
"BriefDescription": "Misc Events - Set 1; Slow Transfer of M Line",
+ "Counter": "0,1",
"EventCode": "0x1D",
"EventName": "UNC_I_MISC1.SLOW_M",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop took cacheline ownership before write from data was committed.",
"UMask": "0x8",
@@ -271,8 +330,10 @@
},
{
"BriefDescription": "Misc Events - Set 1; Slow Transfer of S Line",
+ "Counter": "0,1",
"EventCode": "0x1D",
"EventName": "UNC_I_MISC1.SLOW_S",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Secondary received a transfer that did not have sufficient MESI state",
"UMask": "0x2",
@@ -280,88 +341,110 @@
},
{
"BriefDescription": "P2P Requests",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_I_P2P_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "P2P requests from the ITC",
"Unit": "IRP"
},
{
"BriefDescription": "P2P Occupancy",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_P2P_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "P2P B & S Queue Occupancy",
"Unit": "IRP"
},
{
"BriefDescription": "P2P Transactions; P2P completions",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_P2P_TRANSACTIONS.CMPL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "IRP"
},
{
"BriefDescription": "P2P Transactions; match if local only",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_P2P_TRANSACTIONS.LOC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "IRP"
},
{
"BriefDescription": "P2P Transactions; match if local and target matches",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_P2P_TRANSACTIONS.LOC_AND_TGT_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "IRP"
},
{
"BriefDescription": "P2P Transactions; P2P Message",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_P2P_TRANSACTIONS.MSG",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "IRP"
},
{
"BriefDescription": "P2P Transactions; P2P reads",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_P2P_TRANSACTIONS.RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "IRP"
},
{
"BriefDescription": "P2P Transactions; Match if remote only",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_P2P_TRANSACTIONS.REM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "IRP"
},
{
"BriefDescription": "P2P Transactions; match if remote and target matches",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_P2P_TRANSACTIONS.REM_AND_TGT_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "IRP"
},
{
"BriefDescription": "P2P Transactions; P2P Writes",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_P2P_TRANSACTIONS.WR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "IRP"
},
{
"BriefDescription": "Responses to snoops of any type that hit M, E, S or I line in the IIO",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.ALL_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit M, E, S or I line in the IIO",
"UMask": "0x7e",
@@ -369,8 +452,10 @@
},
{
"BriefDescription": "Responses to snoops of any type that hit E or S line in the IIO cache",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.ALL_HIT_ES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit E or S line in the IIO cache",
"UMask": "0x74",
@@ -378,8 +463,10 @@
},
{
"BriefDescription": "Responses to snoops of any type that hit I line in the IIO cache",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.ALL_HIT_I",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit I line in the IIO cache",
"UMask": "0x72",
@@ -387,8 +474,10 @@
},
{
"BriefDescription": "Responses to snoops of any type that hit M line in the IIO cache",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.ALL_HIT_M",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit M line in the IIO cache",
"UMask": "0x78",
@@ -396,8 +485,10 @@
},
{
"BriefDescription": "Responses to snoops of any type that miss the IIO cache",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.ALL_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Responses to snoops of any type (code, data, invalidate) that miss the IIO cache",
"UMask": "0x71",
@@ -405,64 +496,80 @@
},
{
"BriefDescription": "Snoop Responses; Hit E or S",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.HIT_ES",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses; Hit I",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.HIT_I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses; Hit M",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.HIT_M",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses; Miss",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses; SnpCode",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.SNPCODE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses; SnpData",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.SNPDATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses; SnpInv",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.SNPINV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "IRP"
},
{
"BriefDescription": "Inbound Transaction Count; Atomic",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_I_TRANSACTIONS.ATOMIC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks the number of atomic transactions",
"UMask": "0x10",
@@ -470,8 +577,10 @@
},
{
"BriefDescription": "Inbound Transaction Count; Other",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_I_TRANSACTIONS.OTHER",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks the number of 'other' kinds of transactions.",
"UMask": "0x20",
@@ -479,8 +588,10 @@
},
{
"BriefDescription": "Inbound Transaction Count; Read Prefetches",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_I_TRANSACTIONS.RD_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks the number of read prefetches.",
"UMask": "0x4",
@@ -488,8 +599,10 @@
},
{
"BriefDescription": "Inbound Transaction Count; Reads",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_I_TRANSACTIONS.READS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks only read requests (not including read prefetches).",
"UMask": "0x1",
@@ -497,8 +610,10 @@
},
{
"BriefDescription": "Inbound Transaction Count; Writes",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_I_TRANSACTIONS.WRITES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks only write requests. Each write request should have a prefetch, so there is no need to explicitly track these requests. For writes that are tickled and have to retry, the counter will be incremented for each retry.",
"UMask": "0x2",
@@ -506,6 +621,7 @@
},
{
"BriefDescription": "Inbound write (fast path) requests received by the IRP.",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_I_TRANSACTIONS.WR_PREF",
"PerPkg": "1",
@@ -515,118 +631,150 @@
},
{
"BriefDescription": "AK Egress Allocations",
+ "Counter": "0,1",
"EventCode": "0xB",
"EventName": "UNC_I_TxC_AK_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL DRS Egress Cycles Full",
+ "Counter": "0,1",
"EventCode": "0x5",
"EventName": "UNC_I_TxC_BL_DRS_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL DRS Egress Inserts",
+ "Counter": "0,1",
"EventCode": "0x2",
"EventName": "UNC_I_TxC_BL_DRS_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL DRS Egress Occupancy",
+ "Counter": "0,1",
"EventCode": "0x8",
"EventName": "UNC_I_TxC_BL_DRS_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL NCB Egress Cycles Full",
+ "Counter": "0,1",
"EventCode": "0x6",
"EventName": "UNC_I_TxC_BL_NCB_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL NCB Egress Inserts",
+ "Counter": "0,1",
"EventCode": "0x3",
"EventName": "UNC_I_TxC_BL_NCB_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL NCB Egress Occupancy",
+ "Counter": "0,1",
"EventCode": "0x9",
"EventName": "UNC_I_TxC_BL_NCB_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL NCS Egress Cycles Full",
+ "Counter": "0,1",
"EventCode": "0x7",
"EventName": "UNC_I_TxC_BL_NCS_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL NCS Egress Inserts",
+ "Counter": "0,1",
"EventCode": "0x4",
"EventName": "UNC_I_TxC_BL_NCS_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL NCS Egress Occupancy",
+ "Counter": "0,1",
"EventCode": "0xA",
"EventName": "UNC_I_TxC_BL_NCS_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "No AD Egress Credit Stalls",
+ "Counter": "0,1",
"EventCode": "0x1A",
"EventName": "UNC_I_TxR2_AD_STALL_CREDIT_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number times when it is not possible to issue a request to the R2PCIe because there are no AD Egress Credits available.",
"Unit": "IRP"
},
{
"BriefDescription": "No BL Egress Credit Stalls",
+ "Counter": "0,1",
"EventCode": "0x1B",
"EventName": "UNC_I_TxR2_BL_STALL_CREDIT_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number times when it is not possible to issue data to the R2PCIe because there are no BL Egress Credits available.",
"Unit": "IRP"
},
{
"BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
"EventCode": "0xD",
"EventName": "UNC_I_TxS_DATA_INSERTS_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of requests issued to the switch (towards the devices).",
"Unit": "IRP"
},
{
"BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
"EventCode": "0xE",
"EventName": "UNC_I_TxS_DATA_INSERTS_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of requests issued to the switch (towards the devices).",
"Unit": "IRP"
},
{
"BriefDescription": "Outbound Request Queue Occupancy",
+ "Counter": "0,1",
"EventCode": "0xC",
"EventName": "UNC_I_TxS_REQUEST_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of outstanding outbound requests from the IRP to the switch (towards the devices). This can be used in conjunction with the allocations event in order to calculate average latency of outbound requests.",
"Unit": "IRP"
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -634,8 +782,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -643,8 +793,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -652,8 +804,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -661,8 +815,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -670,8 +826,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -679,8 +837,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -688,8 +848,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -697,8 +859,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -706,8 +870,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -715,8 +881,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -724,8 +892,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -733,8 +903,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -742,8 +914,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -751,8 +925,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -760,8 +936,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -769,8 +947,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -778,8 +958,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -787,8 +969,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -796,8 +980,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -805,8 +991,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -814,8 +1002,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -823,8 +1013,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -832,8 +1024,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -841,8 +1035,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -850,8 +1046,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -859,8 +1057,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -868,8 +1068,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -877,8 +1079,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -886,8 +1090,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -895,8 +1101,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -904,8 +1112,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -913,8 +1123,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -922,8 +1134,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -931,8 +1145,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -940,8 +1156,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -949,8 +1167,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -958,8 +1178,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -967,8 +1189,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -976,8 +1200,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -985,8 +1211,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -994,8 +1222,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -1003,8 +1233,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_M2M_AG1_BL_CREDITS_ACQUIRED.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -1012,8 +1244,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_M2M_AG1_BL_CREDITS_ACQUIRED.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -1021,8 +1255,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_M2M_AG1_BL_CREDITS_ACQUIRED.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -1030,8 +1266,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_M2M_AG1_BL_CREDITS_ACQUIRED.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -1039,8 +1277,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_M2M_AG1_BL_CREDITS_ACQUIRED.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -1048,8 +1288,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_M2M_AG1_BL_CREDITS_ACQUIRED.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -1057,6 +1299,7 @@
},
{
"BriefDescription": "Traffic in which the M2M to iMC Bypass was not taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M2M_BYPASS_M2M_Egress.NOT_TAKEN",
"PerPkg": "1",
@@ -1066,43 +1309,54 @@
},
{
"BriefDescription": "M2M to iMC Bypass; Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M2M_BYPASS_M2M_Egress.TAKEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M to iMC Bypass; Not Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_BYPASS_M2M_INGRESS.NOT_TAKEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "M2M to iMC Bypass; Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_BYPASS_M2M_INGRESS.TAKEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles - at UCLK",
+ "Counter": "0,1,2,3",
"EventName": "UNC_M2M_CLOCKTICKS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "CMS Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "UNC_M2M_CMS_CLOCKTICKS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles when direct to core mode (which bypasses the CHA) was disabled",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_DIRSTATE",
"PerPkg": "1",
@@ -1111,6 +1365,7 @@
},
{
"BriefDescription": "Messages sent direct to core (bypassing the CHA)",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_M2M_DIRECT2CORE_TAKEN",
"PerPkg": "1",
@@ -1119,6 +1374,7 @@
},
{
"BriefDescription": "Number of reads in which direct to core transaction were overridden",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_DIRECT2CORE_TXN_OVERRIDE",
"PerPkg": "1",
@@ -1127,6 +1383,7 @@
},
{
"BriefDescription": "Number of reads in which direct to Intel(R) UPI transactions were overridden",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_CREDITS",
"PerPkg": "1",
@@ -1135,6 +1392,7 @@
},
{
"BriefDescription": "Cycles when direct to Intel(R) UPI was disabled",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_DIRSTATE",
"PerPkg": "1",
@@ -1143,6 +1401,7 @@
},
{
"BriefDescription": "Messages sent direct to the Intel(R) UPI",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_M2M_DIRECT2UPI_TAKEN",
"PerPkg": "1",
@@ -1151,6 +1410,7 @@
},
{
"BriefDescription": "Number of reads that a message sent direct2 Intel(R) UPI was overridden",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_M2M_DIRECT2UPI_TXN_OVERRIDE",
"PerPkg": "1",
@@ -1159,70 +1419,87 @@
},
{
"BriefDescription": "Directory Hit; On NonDirty Line in A State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Hit; On NonDirty Line in I State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Hit; On NonDirty Line in L State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_P",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Hit; On NonDirty Line in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Hit; On Dirty Line in A State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Hit; On Dirty Line in I State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Hit; On Dirty Line in L State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_P",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Hit; On Dirty Line in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Multi-socket cacheline Directory lookups (any state found)",
+ "Counter": "0,1,2,3",
"EventCode": "0x2D",
"EventName": "UNC_M2M_DIRECTORY_LOOKUP.ANY",
"PerPkg": "1",
@@ -1232,6 +1509,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory lookups (cacheline found in A state)",
+ "Counter": "0,1,2,3",
"EventCode": "0x2D",
"EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_A",
"PerPkg": "1",
@@ -1241,6 +1519,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory lookup (cacheline found in I state)",
+ "Counter": "0,1,2,3",
"EventCode": "0x2D",
"EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_I",
"PerPkg": "1",
@@ -1250,6 +1529,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory lookup (cacheline found in S state)",
+ "Counter": "0,1,2,3",
"EventCode": "0x2D",
"EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_S",
"PerPkg": "1",
@@ -1259,70 +1539,87 @@
},
{
"BriefDescription": "Directory Miss; On NonDirty Line in A State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Miss; On NonDirty Line in I State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Miss; On NonDirty Line in L State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_P",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Miss; On NonDirty Line in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Miss; On Dirty Line in A State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_A",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Miss; On Dirty Line in I State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Miss; On Dirty Line in L State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_P",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Directory Miss; On Dirty Line in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_S",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Multi-socket cacheline Directory update from A to I",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.A2I",
"PerPkg": "1",
@@ -1332,6 +1629,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory update from A to S",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.A2S",
"PerPkg": "1",
@@ -1341,6 +1639,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory update from/to Any state",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.ANY",
"PerPkg": "1",
@@ -1350,6 +1649,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory update from I to A",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.I2A",
"PerPkg": "1",
@@ -1359,6 +1659,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory update from I to S",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.I2S",
"PerPkg": "1",
@@ -1368,6 +1669,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory update from S to A",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.S2A",
"PerPkg": "1",
@@ -1377,6 +1679,7 @@
},
{
"BriefDescription": "Multi-socket cacheline Directory update from S to I",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_M2M_DIRECTORY_UPDATE.S2I",
"PerPkg": "1",
@@ -1386,8 +1689,10 @@
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements; Down",
+ "Counter": "0,1,2,3",
"EventCode": "0xAE",
"EventName": "UNC_M2M_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x4",
@@ -1395,8 +1700,10 @@
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements; Up",
+ "Counter": "0,1,2,3",
"EventCode": "0xAE",
"EventName": "UNC_M2M_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x1",
@@ -1404,8 +1711,10 @@
},
{
"BriefDescription": "FaST wire asserted; Horizontal",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_M2M_FAST_ASSERTED.HORZ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles either the local or incoming distress signals are asserted. Incoming distress includes up, dn and across.",
"UMask": "0x2",
@@ -1413,8 +1722,10 @@
},
{
"BriefDescription": "FaST wire asserted; Vertical",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_M2M_FAST_ASSERTED.VERT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles either the local or incoming distress signals are asserted. Incoming distress includes up, dn and across.",
"UMask": "0x1",
@@ -1422,8 +1733,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use; Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -1431,8 +1744,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use; Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -1440,8 +1755,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use; Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -1449,8 +1766,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use; Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -1458,8 +1777,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use; Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xA9",
"EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -1467,8 +1788,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use; Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xA9",
"EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -1476,8 +1799,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use; Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xA9",
"EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -1485,8 +1810,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use; Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xA9",
"EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -1494,8 +1821,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use; Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -1503,8 +1832,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use; Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -1512,8 +1843,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use; Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -1521,8 +1854,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use; Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -1530,8 +1865,10 @@
},
{
"BriefDescription": "Horizontal IV Ring in Use; Left",
+ "Counter": "0,1,2,3",
"EventCode": "0xAD",
"EventName": "UNC_M2M_HORZ_RING_IV_IN_USE.LEFT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x1",
@@ -1539,8 +1876,10 @@
},
{
"BriefDescription": "Horizontal IV Ring in Use; Right",
+ "Counter": "0,1,2,3",
"EventCode": "0xAD",
"EventName": "UNC_M2M_HORZ_RING_IV_IN_USE.RIGHT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x4",
@@ -1548,6 +1887,7 @@
},
{
"BriefDescription": "Reads to iMC issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.ALL",
"PerPkg": "1",
@@ -1557,22 +1897,27 @@
},
{
"BriefDescription": "M2M Reads Issued to iMC; All, regardless of priority.",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.FROM_TRANSGRESS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Reads Issued to iMC; Critical Priority",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Reads to iMC issued at Normal Priority (Non-Isochronous)",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.NORMAL",
"PerPkg": "1",
@@ -1582,6 +1927,7 @@
},
{
"BriefDescription": "Writes to iMC issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.ALL",
"PerPkg": "1",
@@ -1591,30 +1937,37 @@
},
{
"BriefDescription": "M2M Writes Issued to iMC; All, regardless of priority.",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.FROM_TRANSGRESS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC; Full Line Non-ISOCH",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.FULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC; ISOCH Full Line",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.FULL_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC; All, regardless of priority.",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.NI",
"PerPkg": "1",
@@ -1623,6 +1976,7 @@
},
{
"BriefDescription": "Partial Non-Isochronous writes to the iMC",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.PARTIAL",
"PerPkg": "1",
@@ -1632,44 +1986,55 @@
},
{
"BriefDescription": "M2M Writes Issued to iMC; ISOCH Partial",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.PARTIAL_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Number Packet Header Matches; MC Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x4C",
"EventName": "UNC_M2M_PKT_MATCH.MC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Number Packet Header Matches; Mesh Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x4C",
"EventName": "UNC_M2M_PKT_MATCH.MESH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Cycles Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x53",
"EventName": "UNC_M2M_PREFCAM_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "UNC_M2M_PREFCAM_CYCLES_NE",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch requests that got turn into a demand request",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_M2M_PREFCAM_DEMAND_PROMOTIONS",
"PerPkg": "1",
@@ -1678,6 +2043,7 @@
},
{
"BriefDescription": "Inserts into the Memory Controller Prefetch Queue",
+ "Counter": "0,1,2,3",
"EventCode": "0x57",
"EventName": "UNC_M2M_PREFCAM_INSERTS",
"PerPkg": "1",
@@ -1686,15 +2052,19 @@
},
{
"BriefDescription": "Prefetch CAM Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_M2M_PREFCAM_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring.; AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M2M_RING_BOUNCES_HORZ.AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x1",
@@ -1702,8 +2072,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring.; AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M2M_RING_BOUNCES_HORZ.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x2",
@@ -1711,8 +2083,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring.; BL",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M2M_RING_BOUNCES_HORZ.BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x4",
@@ -1720,8 +2094,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring.; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M2M_RING_BOUNCES_HORZ.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x8",
@@ -1729,8 +2105,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring.; AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M2M_RING_BOUNCES_VERT.AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x1",
@@ -1738,8 +2116,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring.; Acknowledgements to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M2M_RING_BOUNCES_VERT.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x2",
@@ -1747,8 +2127,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring.; Data Responses to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M2M_RING_BOUNCES_VERT.BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x4",
@@ -1756,8 +2138,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring.; Snoops of processor's cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M2M_RING_BOUNCES_VERT.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x8",
@@ -1765,174 +2149,217 @@
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring; AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.AD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring; AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring; Acknowledgements to Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring; BL",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.IV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring; AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_M2M_RING_SINK_STARVED_VERT.AD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring; Acknowledgements to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_M2M_RING_SINK_STARVED_VERT.AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring; Data Responses to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_M2M_RING_SINK_STARVED_VERT.BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring; Snoops of processor's cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_M2M_RING_SINK_STARVED_VERT.IV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Source Throttle",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_M2M_RING_SRC_THRTL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_RPQ_CYCLES_SPEC_CREDITS.CHN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x44",
"EventName": "UNC_M2M_RPQ_CYCLES_NO_SPEC_CREDITS.CHN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_RPQ_CYCLES_SPEC_CREDITS.CHN1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x44",
"EventName": "UNC_M2M_RPQ_CYCLES_NO_SPEC_CREDITS.CHN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_RPQ_CYCLES_SPEC_CREDITS.CHN2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x44",
"EventName": "UNC_M2M_RPQ_CYCLES_NO_SPEC_CREDITS.CHN2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Regular; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M2M_RPQ_CYCLES_REG_CREDITS.CHN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Regular; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M2M_RPQ_CYCLES_REG_CREDITS.CHN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Regular; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M2M_RPQ_CYCLES_REG_CREDITS.CHN2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Special; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M2M_RPQ_CYCLES_SPEC_CREDITS.CHN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Special; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M2M_RPQ_CYCLES_SPEC_CREDITS.CHN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Special; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M2M_RPQ_CYCLES_SPEC_CREDITS.CHN2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "AD Ingress (from CMS) Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M2M_RxC_AD_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "AD Ingress (from CMS) Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_M2M_RxC_AD_CYCLES_NE",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "AD Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_M2M_RxC_AD_INSERTS",
"PerPkg": "1",
@@ -1941,6 +2368,7 @@
},
{
"BriefDescription": "AD Ingress (from CMS) Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M2M_RxC_AD_OCCUPANCY",
"PerPkg": "1",
@@ -1948,20 +2376,25 @@
},
{
"BriefDescription": "BL Ingress (from CMS) Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_M2M_RxC_BL_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "BL Ingress (from CMS) Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_M2M_RxC_BL_CYCLES_NE",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "BL Ingress (from CMS) Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_M2M_RxC_BL_INSERTS",
"PerPkg": "1",
@@ -1969,6 +2402,7 @@
},
{
"BriefDescription": "BL Ingress (from CMS) Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_M2M_RxC_BL_OCCUPANCY",
"PerPkg": "1",
@@ -1976,8 +2410,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M2M_RxR_BUSY_STARVED.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x1",
@@ -1985,8 +2421,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M2M_RxR_BUSY_STARVED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x10",
@@ -1994,8 +2432,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M2M_RxR_BUSY_STARVED.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x4",
@@ -2003,8 +2443,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M2M_RxR_BUSY_STARVED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x40",
@@ -2012,8 +2454,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M2M_RxR_BYPASS.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the CMS Ingress",
"UMask": "0x1",
@@ -2021,8 +2465,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M2M_RxR_BYPASS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the CMS Ingress",
"UMask": "0x10",
@@ -2030,8 +2476,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M2M_RxR_BYPASS.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the CMS Ingress",
"UMask": "0x2",
@@ -2039,8 +2487,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M2M_RxR_BYPASS.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the CMS Ingress",
"UMask": "0x4",
@@ -2048,8 +2498,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M2M_RxR_BYPASS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the CMS Ingress",
"UMask": "0x40",
@@ -2057,8 +2509,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M2M_RxR_BYPASS.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the CMS Ingress",
"UMask": "0x8",
@@ -2066,8 +2520,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M2M_RxR_CRD_STARVED.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x1",
@@ -2075,8 +2531,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M2M_RxR_CRD_STARVED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x10",
@@ -2084,8 +2542,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M2M_RxR_CRD_STARVED.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x2",
@@ -2093,8 +2553,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M2M_RxR_CRD_STARVED.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x4",
@@ -2102,8 +2564,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M2M_RxR_CRD_STARVED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x40",
@@ -2111,8 +2575,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; IFV - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M2M_RxR_CRD_STARVED.IFV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x80",
@@ -2120,8 +2586,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M2M_RxR_CRD_STARVED.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x8",
@@ -2129,8 +2597,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M2M_RxR_INSERTS.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x1",
@@ -2138,8 +2608,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M2M_RxR_INSERTS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x10",
@@ -2147,8 +2619,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M2M_RxR_INSERTS.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x2",
@@ -2156,8 +2630,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M2M_RxR_INSERTS.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x4",
@@ -2165,8 +2641,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M2M_RxR_INSERTS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x40",
@@ -2174,8 +2652,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M2M_RxR_INSERTS.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x8",
@@ -2183,8 +2663,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M2M_RxR_OCCUPANCY.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x1",
@@ -2192,8 +2674,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M2M_RxR_OCCUPANCY.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x10",
@@ -2201,8 +2685,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M2M_RxR_OCCUPANCY.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x2",
@@ -2210,8 +2696,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M2M_RxR_OCCUPANCY.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x4",
@@ -2219,8 +2707,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M2M_RxR_OCCUPANCY.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x40",
@@ -2228,8 +2718,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M2M_RxR_OCCUPANCY.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x8",
@@ -2237,8 +2729,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -2246,8 +2740,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -2255,8 +2751,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -2264,8 +2762,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -2273,8 +2773,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -2282,8 +2784,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -2291,8 +2795,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -2300,8 +2806,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -2309,8 +2817,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -2318,8 +2828,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -2327,8 +2839,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -2336,8 +2850,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -2345,8 +2861,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -2354,8 +2872,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -2363,8 +2883,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -2372,8 +2894,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -2381,8 +2905,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -2390,8 +2916,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -2399,8 +2927,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -2408,8 +2938,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -2417,8 +2949,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -2426,8 +2960,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -2435,8 +2971,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -2444,8 +2982,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -2453,151 +2993,190 @@
},
{
"BriefDescription": "Number AD Ingress Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M2M_TGR_AD_CREDITS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Number BL Ingress Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M2M_TGR_BL_CREDITS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Cycles Full; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_M2M_TRACKER_CYCLES_FULL.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Cycles Full; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_M2M_TRACKER_CYCLES_FULL.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Cycles Full; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_M2M_TRACKER_CYCLES_FULL.CH2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Cycles Not Empty; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2M_TRACKER_CYCLES_NE.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Cycles Not Empty; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2M_TRACKER_CYCLES_NE.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Cycles Not Empty; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2M_TRACKER_CYCLES_NE.CH2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Inserts; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "UNC_M2M_TRACKER_INSERTS.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Inserts; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "UNC_M2M_TRACKER_INSERTS.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Inserts; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "UNC_M2M_TRACKER_INSERTS.CH2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Occupancy; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Occupancy; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Occupancy; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Data Pending Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "UNC_M2M_TRACKER_PENDING_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "AD Egress (to CMS) Credit Acquired",
+ "Counter": "0,1,2,3",
"EventCode": "0xD",
"EventName": "UNC_M2M_TxC_AD_CREDITS_ACQUIRED",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "AD Egress (to CMS) Credits Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0xE",
"EventName": "UNC_M2M_TxC_AD_CREDIT_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "AD Egress (to CMS) Full",
+ "Counter": "0,1,2,3",
"EventCode": "0xC",
"EventName": "UNC_M2M_TxC_AD_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "AD Egress (to CMS) Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0xB",
"EventName": "UNC_M2M_TxC_AD_CYCLES_NE",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "AD Egress (to CMS) Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_M2M_TxC_AD_INSERTS",
"PerPkg": "1",
@@ -2605,20 +3184,25 @@
},
{
"BriefDescription": "Cycles with No AD Egress (to CMS) Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0xF",
"EventName": "UNC_M2M_TxC_AD_NO_CREDIT_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles Stalled with No AD Egress (to CMS) Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M2M_TxC_AD_NO_CREDIT_STALLED",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "AD Egress (to CMS) Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0xA",
"EventName": "UNC_M2M_TxC_AD_OCCUPANCY",
"PerPkg": "1",
@@ -2626,430 +3210,537 @@
},
{
"BriefDescription": "Outbound Ring Transactions on AK; CRD Transactions to Cbo",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_M2M_TxC_AK.CRD_CBO",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Outbound Ring Transactions on AK; NDR Transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_M2M_TxC_AK.NDR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Credit Acquired; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_M2M_TxC_AK_CREDITS_ACQUIRED.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Credit Acquired; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_M2M_TxC_AK_CREDITS_ACQUIRED.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Credits Occupancy; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_M2M_TxC_AK_CREDIT_OCCUPANCY.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Credits Occupancy; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_M2M_TxC_AK_CREDIT_OCCUPANCY.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Full; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Full; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Full; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Full; Read Credit Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.RDCRD0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Full; Read Credit Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.RDCRD1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x88",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Full; Write Compare Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCMP0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Full; Write Compare Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCMP1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa0",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Full; Write Credit Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCRD0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Full; Write Credit Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCRD1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x90",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Not Empty; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_M2M_TxC_AK_CYCLES_NE.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Not Empty; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_M2M_TxC_AK_CYCLES_NE.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Not Empty; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_M2M_TxC_AK_CYCLES_NE.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Not Empty; Read Credit Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_M2M_TxC_AK_CYCLES_NE.RDCRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Not Empty; Write Compare Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_M2M_TxC_AK_CYCLES_NE.WRCMP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Not Empty; Write Credit Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_M2M_TxC_AK_CYCLES_NE.WRCRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Allocations; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2M_TxC_AK_INSERTS.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Allocations; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2M_TxC_AK_INSERTS.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Allocations; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2M_TxC_AK_INSERTS.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Allocations; Prefetch Read Cam Hit",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2M_TxC_AK_INSERTS.PREF_RD_CAM_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Allocations; Read Credit Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2M_TxC_AK_INSERTS.RDCRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Allocations; Write Compare Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2M_TxC_AK_INSERTS.WRCMP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Allocations; Write Credit Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2M_TxC_AK_INSERTS.WRCRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles with No AK Egress (to CMS) Credits; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_M2M_TxC_AK_NO_CREDIT_CYCLES.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles with No AK Egress (to CMS) Credits; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_M2M_TxC_AK_NO_CREDIT_CYCLES.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles Stalled with No AK Egress (to CMS) Credits; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M2M_TxC_AK_NO_CREDIT_STALLED.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles Stalled with No AK Egress (to CMS) Credits; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M2M_TxC_AK_NO_CREDIT_STALLED.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Occupancy; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_M2M_TxC_AK_OCCUPANCY.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Occupancy; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_M2M_TxC_AK_OCCUPANCY.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Occupancy; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_M2M_TxC_AK_OCCUPANCY.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Occupancy; Read Credit Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_M2M_TxC_AK_OCCUPANCY.RDCRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Occupancy; Write Compare Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_M2M_TxC_AK_OCCUPANCY.WRCMP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Occupancy; Write Credit Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_M2M_TxC_AK_OCCUPANCY.WRCRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Sideband",
+ "Counter": "0,1,2,3",
"EventCode": "0x6B",
"EventName": "UNC_M2M_TxC_AK_SIDEBAND.RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Sideband",
+ "Counter": "0,1,2,3",
"EventCode": "0x6B",
"EventName": "UNC_M2M_TxC_AK_SIDEBAND.WR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2M_TxC_BL.DRS_CACHE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Core",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2M_TxC_BL.DRS_CORE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to QPI",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2M_TxC_BL.DRS_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Credit Acquired; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2M_TxC_BL_CREDITS_ACQUIRED.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Credit Acquired; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2M_TxC_BL_CREDITS_ACQUIRED.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Credits Occupancy; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_M2M_TxC_BL_CREDIT_OCCUPANCY.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Credits Occupancy; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_M2M_TxC_BL_CREDIT_OCCUPANCY.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Full; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M2M_TxC_BL_CYCLES_FULL.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Full; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M2M_TxC_BL_CYCLES_FULL.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Full; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M2M_TxC_BL_CYCLES_FULL.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Not Empty; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M2M_TxC_BL_CYCLES_NE.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Not Empty; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M2M_TxC_BL_CYCLES_NE.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Not Empty; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M2M_TxC_BL_CYCLES_NE.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Allocations; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_M2M_TxC_BL_INSERTS.ALL",
"PerPkg": "1",
@@ -3058,54 +3749,67 @@
},
{
"BriefDescription": "BL Egress (to CMS) Allocations; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_M2M_TxC_BL_INSERTS.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Allocations; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_M2M_TxC_BL_INSERTS.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles with No BL Egress (to CMS) Credits; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_M2M_TxC_BL_NO_CREDIT_CYCLES.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles with No BL Egress (to CMS) Credits; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_M2M_TxC_BL_NO_CREDIT_CYCLES.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles Stalled with No BL Egress (to CMS) Credits; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_M2M_TxC_BL_NO_CREDIT_STALLED.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles Stalled with No BL Egress (to CMS) Credits; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_M2M_TxC_BL_NO_CREDIT_STALLED.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Occupancy; All",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_M2M_TxC_BL_OCCUPANCY.ALL",
"PerPkg": "1",
@@ -3114,24 +3818,30 @@
},
{
"BriefDescription": "BL Egress (to CMS) Occupancy; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_M2M_TxC_BL_OCCUPANCY.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Occupancy; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_M2M_TxC_BL_OCCUPANCY.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "CMS Horizontal ADS Used; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_M2M_TxR_HORZ_ADS_USED.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -3139,8 +3849,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_M2M_TxR_HORZ_ADS_USED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -3148,8 +3860,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_M2M_TxR_HORZ_ADS_USED.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -3157,8 +3871,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_M2M_TxR_HORZ_ADS_USED.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -3166,8 +3882,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_M2M_TxR_HORZ_ADS_USED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -3175,8 +3893,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9F",
"EventName": "UNC_M2M_TxR_HORZ_BYPASS.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -3184,8 +3904,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x9F",
"EventName": "UNC_M2M_TxR_HORZ_BYPASS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -3193,8 +3915,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9F",
"EventName": "UNC_M2M_TxR_HORZ_BYPASS.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -3202,8 +3926,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9F",
"EventName": "UNC_M2M_TxR_HORZ_BYPASS.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -3211,8 +3937,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x9F",
"EventName": "UNC_M2M_TxR_HORZ_BYPASS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -3220,8 +3948,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9F",
"EventName": "UNC_M2M_TxR_HORZ_BYPASS.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x8",
@@ -3229,8 +3959,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -3238,8 +3970,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -3247,8 +3981,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -3256,8 +3992,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -3265,8 +4003,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -3274,8 +4014,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -3283,8 +4025,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -3292,8 +4036,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -3301,8 +4047,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -3310,8 +4058,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -3319,8 +4069,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -3328,8 +4080,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -3337,8 +4091,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_M2M_TxR_HORZ_INSERTS.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -3346,8 +4102,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_M2M_TxR_HORZ_INSERTS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -3355,8 +4113,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_M2M_TxR_HORZ_INSERTS.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -3364,8 +4124,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_M2M_TxR_HORZ_INSERTS.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -3373,8 +4135,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_M2M_TxR_HORZ_INSERTS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -3382,8 +4146,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_M2M_TxR_HORZ_INSERTS.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -3391,8 +4157,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_M2M_TxR_HORZ_NACK.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x1",
@@ -3400,8 +4168,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_M2M_TxR_HORZ_NACK.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x20",
@@ -3409,8 +4179,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_M2M_TxR_HORZ_NACK.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x2",
@@ -3418,8 +4190,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_M2M_TxR_HORZ_NACK.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x4",
@@ -3427,8 +4201,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_M2M_TxR_HORZ_NACK.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x40",
@@ -3436,8 +4212,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_M2M_TxR_HORZ_NACK.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x8",
@@ -3445,8 +4223,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -3454,8 +4234,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy; AD - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -3463,8 +4245,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -3472,8 +4256,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -3481,8 +4267,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy; BL - Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -3490,8 +4278,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -3499,8 +4289,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation; AD - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9B",
"EventName": "UNC_M2M_TxR_HORZ_STARVED.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x1",
@@ -3508,8 +4300,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation; AK - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9B",
"EventName": "UNC_M2M_TxR_HORZ_STARVED.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x2",
@@ -3517,8 +4311,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation; BL - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9B",
"EventName": "UNC_M2M_TxR_HORZ_STARVED.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x4",
@@ -3526,8 +4322,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation; IV - Bounce",
+ "Counter": "0,1,2,3",
"EventCode": "0x9B",
"EventName": "UNC_M2M_TxR_HORZ_STARVED.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x8",
@@ -3535,8 +4333,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_M2M_TxR_VERT_ADS_USED.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -3544,8 +4344,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_M2M_TxR_VERT_ADS_USED.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -3553,8 +4355,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_M2M_TxR_VERT_ADS_USED.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -3562,8 +4366,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_M2M_TxR_VERT_ADS_USED.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x20",
@@ -3571,8 +4377,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_M2M_TxR_VERT_ADS_USED.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -3580,8 +4388,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_M2M_TxR_VERT_ADS_USED.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -3589,8 +4399,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_M2M_TxR_VERT_BYPASS.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -3598,8 +4410,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_M2M_TxR_VERT_BYPASS.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -3607,8 +4421,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_M2M_TxR_VERT_BYPASS.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -3616,8 +4432,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_M2M_TxR_VERT_BYPASS.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x20",
@@ -3625,8 +4443,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_M2M_TxR_VERT_BYPASS.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -3634,8 +4454,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_M2M_TxR_VERT_BYPASS.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -3643,8 +4465,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_M2M_TxR_VERT_BYPASS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x8",
@@ -3652,8 +4476,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -3661,8 +4487,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -3670,8 +4498,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -3679,8 +4509,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -3688,8 +4520,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -3697,8 +4531,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -3706,8 +4542,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -3715,8 +4553,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -3724,8 +4564,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -3733,8 +4575,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -3742,8 +4586,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -3751,8 +4597,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -3760,8 +4608,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -3769,8 +4619,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -3778,8 +4630,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_M2M_TxR_VERT_INSERTS.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -3787,8 +4641,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_M2M_TxR_VERT_INSERTS.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -3796,8 +4652,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_M2M_TxR_VERT_INSERTS.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -3805,8 +4663,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_M2M_TxR_VERT_INSERTS.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -3814,8 +4674,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_M2M_TxR_VERT_INSERTS.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -3823,8 +4685,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_M2M_TxR_VERT_INSERTS.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -3832,8 +4696,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_M2M_TxR_VERT_INSERTS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -3841,8 +4707,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2M_TxR_VERT_NACK.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x1",
@@ -3850,8 +4718,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2M_TxR_VERT_NACK.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x10",
@@ -3859,8 +4729,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2M_TxR_VERT_NACK.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x2",
@@ -3868,8 +4740,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2M_TxR_VERT_NACK.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x20",
@@ -3877,8 +4751,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2M_TxR_VERT_NACK.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x4",
@@ -3886,8 +4762,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2M_TxR_VERT_NACK.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x40",
@@ -3895,8 +4773,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2M_TxR_VERT_NACK.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x8",
@@ -3904,8 +4784,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -3913,8 +4795,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -3922,8 +4806,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -3931,8 +4817,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -3940,8 +4828,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -3949,8 +4839,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -3958,8 +4850,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -3967,8 +4861,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_M2M_TxR_VERT_STARVED.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x1",
@@ -3976,8 +4872,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_M2M_TxR_VERT_STARVED.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x10",
@@ -3985,8 +4883,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_M2M_TxR_VERT_STARVED.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x2",
@@ -3994,8 +4894,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_M2M_TxR_VERT_STARVED.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x20",
@@ -4003,8 +4905,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_M2M_TxR_VERT_STARVED.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x4",
@@ -4012,8 +4916,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_M2M_TxR_VERT_STARVED.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x40",
@@ -4021,8 +4927,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_M2M_TxR_VERT_STARVED.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x8",
@@ -4030,8 +4938,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use; Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_M2M_VERT_RING_AD_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -4039,8 +4949,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use; Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_M2M_VERT_RING_AD_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -4048,8 +4960,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use; Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_M2M_VERT_RING_AD_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -4057,8 +4971,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use; Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_M2M_VERT_RING_AD_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -4066,8 +4982,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use; Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xA8",
"EventName": "UNC_M2M_VERT_RING_AK_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -4075,8 +4993,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use; Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xA8",
"EventName": "UNC_M2M_VERT_RING_AK_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -4084,8 +5004,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use; Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xA8",
"EventName": "UNC_M2M_VERT_RING_AK_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -4093,8 +5015,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use; Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xA8",
"EventName": "UNC_M2M_VERT_RING_AK_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -4102,8 +5026,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use; Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_M2M_VERT_RING_BL_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -4111,8 +5037,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use; Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_M2M_VERT_RING_BL_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -4120,8 +5048,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use; Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_M2M_VERT_RING_BL_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -4129,8 +5059,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use; Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_M2M_VERT_RING_BL_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -4138,8 +5070,10 @@
},
{
"BriefDescription": "Vertical IV Ring in Use; Down",
+ "Counter": "0,1,2,3",
"EventCode": "0xAC",
"EventName": "UNC_M2M_VERT_RING_IV_IN_USE.DN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x4",
@@ -4147,8 +5081,10 @@
},
{
"BriefDescription": "Vertical IV Ring in Use; Up",
+ "Counter": "0,1,2,3",
"EventCode": "0xAC",
"EventName": "UNC_M2M_VERT_RING_IV_IN_USE.UP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x1",
@@ -4156,179 +5092,223 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_WPQ_CYCLES_REG_CREDITS.CHN0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x4D",
"EventName": "UNC_M2M_WPQ_CYCLES_NO_REG_CREDITS.CHN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_WPQ_CYCLES_REG_CREDITS.CHN1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x4D",
"EventName": "UNC_M2M_WPQ_CYCLES_NO_REG_CREDITS.CHN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_WPQ_CYCLES_REG_CREDITS.CHN2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x4D",
"EventName": "UNC_M2M_WPQ_CYCLES_NO_REG_CREDITS.CHN2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x4D",
"EventName": "UNC_M2M_WPQ_CYCLES_REG_CREDITS.CHN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x4D",
"EventName": "UNC_M2M_WPQ_CYCLES_REG_CREDITS.CHN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x4D",
"EventName": "UNC_M2M_WPQ_CYCLES_REG_CREDITS.CHN2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Special; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x4E",
"EventName": "UNC_M2M_WPQ_CYCLES_SPEC_CREDITS.CHN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Special; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x4E",
"EventName": "UNC_M2M_WPQ_CYCLES_SPEC_CREDITS.CHN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Special; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x4E",
"EventName": "UNC_M2M_WPQ_CYCLES_SPEC_CREDITS.CHN2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Cycles Full; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x4A",
"EventName": "UNC_M2M_WRITE_TRACKER_CYCLES_FULL.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Cycles Full; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x4A",
"EventName": "UNC_M2M_WRITE_TRACKER_CYCLES_FULL.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Cycles Full; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x4A",
"EventName": "UNC_M2M_WRITE_TRACKER_CYCLES_FULL.CH2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Cycles Not Empty; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_M2M_WRITE_TRACKER_CYCLES_NE.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Cycles Not Empty; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_M2M_WRITE_TRACKER_CYCLES_NE.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Cycles Not Empty; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_M2M_WRITE_TRACKER_CYCLES_NE.CH2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Inserts; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_M2M_WRITE_TRACKER_INSERTS.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Inserts; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_M2M_WRITE_TRACKER_INSERTS.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Inserts; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x61",
"EventName": "UNC_M2M_WRITE_TRACKER_INSERTS.CH2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Occupancy; Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_M2M_WRITE_TRACKER_OCCUPANCY.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Occupancy; Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_M2M_WRITE_TRACKER_OCCUPANCY.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Occupancy; Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_M2M_WRITE_TRACKER_OCCUPANCY.CH2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2",
"EventCode": "0x80",
"EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -4336,8 +5316,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2",
"EventCode": "0x80",
"EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -4345,8 +5327,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2",
"EventCode": "0x80",
"EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -4354,8 +5338,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2",
"EventCode": "0x80",
"EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -4363,8 +5349,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2",
"EventCode": "0x80",
"EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -4372,8 +5360,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2",
"EventCode": "0x80",
"EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -4381,8 +5371,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2",
"EventCode": "0x82",
"EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -4390,8 +5382,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2",
"EventCode": "0x82",
"EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -4399,8 +5393,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2",
"EventCode": "0x82",
"EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -4408,8 +5404,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2",
"EventCode": "0x82",
"EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -4417,8 +5415,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2",
"EventCode": "0x82",
"EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -4426,8 +5426,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2",
"EventCode": "0x82",
"EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -4435,8 +5437,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2",
"EventCode": "0x88",
"EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -4444,8 +5448,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2",
"EventCode": "0x88",
"EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -4453,8 +5459,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2",
"EventCode": "0x88",
"EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -4462,8 +5470,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2",
"EventCode": "0x88",
"EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -4471,8 +5481,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2",
"EventCode": "0x88",
"EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -4480,8 +5492,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2",
"EventCode": "0x88",
"EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -4489,8 +5503,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2",
"EventCode": "0x8A",
"EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -4498,8 +5514,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2",
"EventCode": "0x8A",
"EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -4507,8 +5525,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2",
"EventCode": "0x8A",
"EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -4516,8 +5536,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2",
"EventCode": "0x8A",
"EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -4525,8 +5547,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2",
"EventCode": "0x8A",
"EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -4534,8 +5558,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2",
"EventCode": "0x8A",
"EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -4543,8 +5569,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2",
"EventCode": "0x84",
"EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -4552,8 +5580,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2",
"EventCode": "0x84",
"EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -4561,8 +5591,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2",
"EventCode": "0x84",
"EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -4570,8 +5602,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2",
"EventCode": "0x84",
"EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -4579,8 +5613,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2",
"EventCode": "0x84",
"EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -4588,8 +5624,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2",
"EventCode": "0x84",
"EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -4597,8 +5635,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2",
"EventCode": "0x86",
"EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -4606,8 +5646,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2",
"EventCode": "0x86",
"EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -4615,8 +5657,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2",
"EventCode": "0x86",
"EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -4624,8 +5668,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2",
"EventCode": "0x86",
"EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -4633,8 +5679,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2",
"EventCode": "0x86",
"EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -4642,8 +5690,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2",
"EventCode": "0x86",
"EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -4651,8 +5701,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 0",
+ "Counter": "0",
"EventCode": "0x8E",
"EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -4660,8 +5712,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 1",
+ "Counter": "0",
"EventCode": "0x8E",
"EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -4669,8 +5723,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 2",
+ "Counter": "0",
"EventCode": "0x8E",
"EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -4678,8 +5734,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 3",
+ "Counter": "0",
"EventCode": "0x8E",
"EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -4687,8 +5745,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 4",
+ "Counter": "0",
"EventCode": "0x8E",
"EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -4696,8 +5756,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 5",
+ "Counter": "0",
"EventCode": "0x8E",
"EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -4705,8 +5767,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2",
"EventCode": "0x8C",
"EventName": "UNC_M3UPI_AG1_BL_CREDITS_ACQUIRED.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -4714,8 +5778,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2",
"EventCode": "0x8C",
"EventName": "UNC_M3UPI_AG1_BL_CREDITS_ACQUIRED.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -4723,8 +5789,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2",
"EventCode": "0x8C",
"EventName": "UNC_M3UPI_AG1_BL_CREDITS_ACQUIRED.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -4732,8 +5800,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2",
"EventCode": "0x8C",
"EventName": "UNC_M3UPI_AG1_BL_CREDITS_ACQUIRED.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -4741,8 +5811,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2",
"EventCode": "0x8C",
"EventName": "UNC_M3UPI_AG1_BL_CREDITS_ACQUIRED.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -4750,8 +5822,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2",
"EventCode": "0x8C",
"EventName": "UNC_M3UPI_AG1_BL_CREDITS_ACQUIRED.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -4759,8 +5833,10 @@
},
{
"BriefDescription": "CBox AD Credits Empty; Requests",
+ "Counter": "0,1,2",
"EventCode": "0x22",
"EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)",
"UMask": "0x4",
@@ -4768,8 +5844,10 @@
},
{
"BriefDescription": "CBox AD Credits Empty; Snoops",
+ "Counter": "0,1,2",
"EventCode": "0x22",
"EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)",
"UMask": "0x8",
@@ -4777,8 +5855,10 @@
},
{
"BriefDescription": "CBox AD Credits Empty; VNA Messages",
+ "Counter": "0,1,2",
"EventCode": "0x22",
"EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.VNA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)",
"UMask": "0x1",
@@ -4786,8 +5866,10 @@
},
{
"BriefDescription": "CBox AD Credits Empty; Writebacks",
+ "Counter": "0,1,2",
"EventCode": "0x22",
"EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)",
"UMask": "0x2",
@@ -4795,39 +5877,49 @@
},
{
"BriefDescription": "Number of uclks in domain",
+ "Counter": "0,1,2",
"EventCode": "0x1",
"EventName": "UNC_M3UPI_CLOCKTICKS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of uclks in the M3 uclk domain. This could be slightly different than the count in the Ubox because of enable/freeze delays. However, because the M3 is close to the Ubox, they generally should not diverge by more than a handful of cycles.",
"Unit": "M3UPI"
},
{
"BriefDescription": "CMS Clockticks",
+ "Counter": "0,1,2",
"EventCode": "0xC0",
"EventName": "UNC_M3UPI_CMS_CLOCKTICKS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M3UPI"
},
{
"BriefDescription": "D2C Sent",
+ "Counter": "0,1,2",
"EventCode": "0x2B",
"EventName": "UNC_M3UPI_D2C_SENT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases BL sends direct to core",
"Unit": "M3UPI"
},
{
"BriefDescription": "D2U Sent",
+ "Counter": "0,1,2",
"EventCode": "0x2A",
"EventName": "UNC_M3UPI_D2U_SENT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cases where SMI3 sends D2U command",
"Unit": "M3UPI"
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements; Down",
+ "Counter": "0,1,2",
"EventCode": "0xAE",
"EventName": "UNC_M3UPI_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x4",
@@ -4835,8 +5927,10 @@
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements; Up",
+ "Counter": "0,1,2",
"EventCode": "0xAE",
"EventName": "UNC_M3UPI_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x1",
@@ -4844,8 +5938,10 @@
},
{
"BriefDescription": "FaST wire asserted; Horizontal",
+ "Counter": "0,1,2",
"EventCode": "0xA5",
"EventName": "UNC_M3UPI_FAST_ASSERTED.HORZ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles either the local or incoming distress signals are asserted. Incoming distress includes up, dn and across.",
"UMask": "0x2",
@@ -4853,8 +5949,10 @@
},
{
"BriefDescription": "FaST wire asserted; Vertical",
+ "Counter": "0,1,2",
"EventCode": "0xA5",
"EventName": "UNC_M3UPI_FAST_ASSERTED.VERT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles either the local or incoming distress signals are asserted. Incoming distress includes up, dn and across.",
"UMask": "0x1",
@@ -4862,8 +5960,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use; Left and Even",
+ "Counter": "0,1,2",
"EventCode": "0xA7",
"EventName": "UNC_M3UPI_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -4871,8 +5971,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use; Left and Odd",
+ "Counter": "0,1,2",
"EventCode": "0xA7",
"EventName": "UNC_M3UPI_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -4880,8 +5982,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use; Right and Even",
+ "Counter": "0,1,2",
"EventCode": "0xA7",
"EventName": "UNC_M3UPI_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -4889,8 +5993,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use; Right and Odd",
+ "Counter": "0,1,2",
"EventCode": "0xA7",
"EventName": "UNC_M3UPI_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -4898,8 +6004,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use; Left and Even",
+ "Counter": "0,1,2",
"EventCode": "0xA9",
"EventName": "UNC_M3UPI_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -4907,8 +6015,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use; Left and Odd",
+ "Counter": "0,1,2",
"EventCode": "0xA9",
"EventName": "UNC_M3UPI_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -4916,8 +6026,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use; Right and Even",
+ "Counter": "0,1,2",
"EventCode": "0xA9",
"EventName": "UNC_M3UPI_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -4925,8 +6037,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use; Right and Odd",
+ "Counter": "0,1,2",
"EventCode": "0xA9",
"EventName": "UNC_M3UPI_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -4934,8 +6048,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use; Left and Even",
+ "Counter": "0,1,2",
"EventCode": "0xAB",
"EventName": "UNC_M3UPI_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -4943,8 +6059,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use; Left and Odd",
+ "Counter": "0,1,2",
"EventCode": "0xAB",
"EventName": "UNC_M3UPI_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -4952,8 +6070,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use; Right and Even",
+ "Counter": "0,1,2",
"EventCode": "0xAB",
"EventName": "UNC_M3UPI_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -4961,8 +6081,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use; Right and Odd",
+ "Counter": "0,1,2",
"EventCode": "0xAB",
"EventName": "UNC_M3UPI_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -4970,8 +6092,10 @@
},
{
"BriefDescription": "Horizontal IV Ring in Use; Left",
+ "Counter": "0,1,2",
"EventCode": "0xAD",
"EventName": "UNC_M3UPI_HORZ_RING_IV_IN_USE.LEFT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x1",
@@ -4979,8 +6103,10 @@
},
{
"BriefDescription": "Horizontal IV Ring in Use; Right",
+ "Counter": "0,1,2",
"EventCode": "0xAD",
"EventName": "UNC_M3UPI_HORZ_RING_IV_IN_USE.RIGHT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x4",
@@ -4988,8 +6114,10 @@
},
{
"BriefDescription": "M2 BL Credits Empty; IIO0 and IIO1 share the same ring destination. (1 VN0 credit only)",
+ "Counter": "0,1,2",
"EventCode": "0x23",
"EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO0_IIO1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No vn0 and vna credits available to send to M2",
"UMask": "0x1",
@@ -4997,8 +6125,10 @@
},
{
"BriefDescription": "M2 BL Credits Empty; IIO2",
+ "Counter": "0,1,2",
"EventCode": "0x23",
"EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO2_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No vn0 and vna credits available to send to M2",
"UMask": "0x2",
@@ -5006,8 +6136,10 @@
},
{
"BriefDescription": "M2 BL Credits Empty; IIO3",
+ "Counter": "0,1,2",
"EventCode": "0x23",
"EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO3_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No vn0 and vna credits available to send to M2",
"UMask": "0x4",
@@ -5015,8 +6147,10 @@
},
{
"BriefDescription": "M2 BL Credits Empty; IIO4",
+ "Counter": "0,1,2",
"EventCode": "0x23",
"EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO4_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No vn0 and vna credits available to send to M2",
"UMask": "0x8",
@@ -5024,8 +6158,10 @@
},
{
"BriefDescription": "M2 BL Credits Empty; IIO5",
+ "Counter": "0,1,2",
"EventCode": "0x23",
"EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO5_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No vn0 and vna credits available to send to M2",
"UMask": "0x10",
@@ -5033,8 +6169,10 @@
},
{
"BriefDescription": "M2 BL Credits Empty; All IIO targets for NCS are in single mask. ORs them together",
+ "Counter": "0,1,2",
"EventCode": "0x23",
"EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No vn0 and vna credits available to send to M2",
"UMask": "0x20",
@@ -5042,8 +6180,10 @@
},
{
"BriefDescription": "M2 BL Credits Empty; Selected M2p BL NCS credits",
+ "Counter": "0,1,2",
"EventCode": "0x23",
"EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.NCS_SEL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No vn0 and vna credits available to send to M2",
"UMask": "0x40",
@@ -5051,8 +6191,10 @@
},
{
"BriefDescription": "Multi Slot Flit Received; AD - Slot 0",
+ "Counter": "0,1,2",
"EventCode": "0x3E",
"EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AD_SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
"UMask": "0x1",
@@ -5060,8 +6202,10 @@
},
{
"BriefDescription": "Multi Slot Flit Received; AD - Slot 1",
+ "Counter": "0,1,2",
"EventCode": "0x3E",
"EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AD_SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
"UMask": "0x2",
@@ -5069,8 +6213,10 @@
},
{
"BriefDescription": "Multi Slot Flit Received; AD - Slot 2",
+ "Counter": "0,1,2",
"EventCode": "0x3E",
"EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AD_SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
"UMask": "0x4",
@@ -5078,8 +6224,10 @@
},
{
"BriefDescription": "Multi Slot Flit Received; AK - Slot 0",
+ "Counter": "0,1,2",
"EventCode": "0x3E",
"EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AK_SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
"UMask": "0x10",
@@ -5087,8 +6235,10 @@
},
{
"BriefDescription": "Multi Slot Flit Received; AK - Slot 2",
+ "Counter": "0,1,2",
"EventCode": "0x3E",
"EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AK_SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
"UMask": "0x20",
@@ -5096,8 +6246,10 @@
},
{
"BriefDescription": "Multi Slot Flit Received; BL - Slot 0",
+ "Counter": "0,1,2",
"EventCode": "0x3E",
"EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.BL_SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
"UMask": "0x8",
@@ -5105,8 +6257,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring.; AD",
+ "Counter": "0,1,2",
"EventCode": "0xA1",
"EventName": "UNC_M3UPI_RING_BOUNCES_HORZ.AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x1",
@@ -5114,8 +6268,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring.; AK",
+ "Counter": "0,1,2",
"EventCode": "0xA1",
"EventName": "UNC_M3UPI_RING_BOUNCES_HORZ.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x2",
@@ -5123,8 +6279,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring.; BL",
+ "Counter": "0,1,2",
"EventCode": "0xA1",
"EventName": "UNC_M3UPI_RING_BOUNCES_HORZ.BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x4",
@@ -5132,8 +6290,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring.; IV",
+ "Counter": "0,1,2",
"EventCode": "0xA1",
"EventName": "UNC_M3UPI_RING_BOUNCES_HORZ.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x8",
@@ -5141,8 +6301,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring.; AD",
+ "Counter": "0,1,2",
"EventCode": "0xA0",
"EventName": "UNC_M3UPI_RING_BOUNCES_VERT.AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x1",
@@ -5150,8 +6312,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring.; Acknowledgements to core",
+ "Counter": "0,1,2",
"EventCode": "0xA0",
"EventName": "UNC_M3UPI_RING_BOUNCES_VERT.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x2",
@@ -5159,8 +6323,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring.; Data Responses to core",
+ "Counter": "0,1,2",
"EventCode": "0xA0",
"EventName": "UNC_M3UPI_RING_BOUNCES_VERT.BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x4",
@@ -5168,8 +6334,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring.; Snoops of processor's cache.",
+ "Counter": "0,1,2",
"EventCode": "0xA0",
"EventName": "UNC_M3UPI_RING_BOUNCES_VERT.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x8",
@@ -5177,87 +6345,109 @@
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring; AD",
+ "Counter": "0,1,2",
"EventCode": "0xA3",
"EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.AD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M3UPI"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring; AK",
+ "Counter": "0,1,2",
"EventCode": "0xA3",
"EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M3UPI"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring; Acknowledgements to Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0xA3",
"EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M3UPI"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring; BL",
+ "Counter": "0,1,2",
"EventCode": "0xA3",
"EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M3UPI"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring; IV",
+ "Counter": "0,1,2",
"EventCode": "0xA3",
"EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.IV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M3UPI"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring; AD",
+ "Counter": "0,1,2",
"EventCode": "0xA2",
"EventName": "UNC_M3UPI_RING_SINK_STARVED_VERT.AD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M3UPI"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring; Acknowledgements to core",
+ "Counter": "0,1,2",
"EventCode": "0xA2",
"EventName": "UNC_M3UPI_RING_SINK_STARVED_VERT.AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M3UPI"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring; Data Responses to core",
+ "Counter": "0,1,2",
"EventCode": "0xA2",
"EventName": "UNC_M3UPI_RING_SINK_STARVED_VERT.BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M3UPI"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring; Snoops of processor's cache.",
+ "Counter": "0,1,2",
"EventCode": "0xA2",
"EventName": "UNC_M3UPI_RING_SINK_STARVED_VERT.IV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M3UPI"
},
{
"BriefDescription": "Source Throttle",
+ "Counter": "0,1,2",
"EventCode": "0xA4",
"EventName": "UNC_M3UPI_RING_SRC_THRTL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M3UPI"
},
{
"BriefDescription": "Lost Arb for VN0; REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4B",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message requested but lost arbitration; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -5265,8 +6455,10 @@
},
{
"BriefDescription": "Lost Arb for VN0; RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4B",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message requested but lost arbitration; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -5274,8 +6466,10 @@
},
{
"BriefDescription": "Lost Arb for VN0; SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4B",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message requested but lost arbitration; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -5283,8 +6477,10 @@
},
{
"BriefDescription": "Lost Arb for VN0; NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4B",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message requested but lost arbitration; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -5292,8 +6488,10 @@
},
{
"BriefDescription": "Lost Arb for VN0; NCS on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4B",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message requested but lost arbitration; Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -5301,8 +6499,10 @@
},
{
"BriefDescription": "Lost Arb for VN0; RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4B",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message requested but lost arbitration; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -5310,8 +6510,10 @@
},
{
"BriefDescription": "Lost Arb for VN0; WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4B",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message requested but lost arbitration; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -5319,8 +6521,10 @@
},
{
"BriefDescription": "Lost Arb for VN1; REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4C",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message requested but lost arbitration; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -5328,8 +6532,10 @@
},
{
"BriefDescription": "Lost Arb for VN1; RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4C",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message requested but lost arbitration; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -5337,8 +6543,10 @@
},
{
"BriefDescription": "Lost Arb for VN1; SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4C",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message requested but lost arbitration; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -5346,8 +6554,10 @@
},
{
"BriefDescription": "Lost Arb for VN1; NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4C",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message requested but lost arbitration; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -5355,8 +6565,10 @@
},
{
"BriefDescription": "Lost Arb for VN1; NCS on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4C",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message requested but lost arbitration; Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -5364,8 +6576,10 @@
},
{
"BriefDescription": "Lost Arb for VN1; RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4C",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message requested but lost arbitration; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -5373,8 +6587,10 @@
},
{
"BriefDescription": "Lost Arb for VN1; WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4C",
"EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message requested but lost arbitration; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -5382,8 +6598,10 @@
},
{
"BriefDescription": "Arb Miscellaneous; AD, BL Parallel Win",
+ "Counter": "0,1,2",
"EventCode": "0x4D",
"EventName": "UNC_M3UPI_RxC_ARB_MISC.ADBL_PARALLEL_WIN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD and BL messages won arbitration concurrently / in parallel",
"UMask": "0x40",
@@ -5391,8 +6609,10 @@
},
{
"BriefDescription": "Arb Miscellaneous; No Progress on Pending AD VN0",
+ "Counter": "0,1,2",
"EventCode": "0x4D",
"EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_AD_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Arbitration stage made no progress on pending ad vn0 messages because slotting stage cannot accept new message",
"UMask": "0x4",
@@ -5400,8 +6620,10 @@
},
{
"BriefDescription": "Arb Miscellaneous; No Progress on Pending AD VN1",
+ "Counter": "0,1,2",
"EventCode": "0x4D",
"EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_AD_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Arbitration stage made no progress on pending ad vn1 messages because slotting stage cannot accept new message",
"UMask": "0x8",
@@ -5409,8 +6631,10 @@
},
{
"BriefDescription": "Arb Miscellaneous; No Progress on Pending BL VN0",
+ "Counter": "0,1,2",
"EventCode": "0x4D",
"EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_BL_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Arbitration stage made no progress on pending bl vn0 messages because slotting stage cannot accept new message",
"UMask": "0x10",
@@ -5418,8 +6642,10 @@
},
{
"BriefDescription": "Arb Miscellaneous; No Progress on Pending BL VN1",
+ "Counter": "0,1,2",
"EventCode": "0x4D",
"EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_BL_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Arbitration stage made no progress on pending bl vn1 messages because slotting stage cannot accept new message",
"UMask": "0x20",
@@ -5427,8 +6653,10 @@
},
{
"BriefDescription": "Arb Miscellaneous; Parallel Bias to VN0",
+ "Counter": "0,1,2",
"EventCode": "0x4D",
"EventName": "UNC_M3UPI_RxC_ARB_MISC.PAR_BIAS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0/VN1 arbiter gave second, consecutive win to vn0, delaying vn1 win, because vn0 offered parallel ad/bl",
"UMask": "0x1",
@@ -5436,8 +6664,10 @@
},
{
"BriefDescription": "Arb Miscellaneous; Parallel Bias to VN1",
+ "Counter": "0,1,2",
"EventCode": "0x4D",
"EventName": "UNC_M3UPI_RxC_ARB_MISC.PAR_BIAS_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0/VN1 arbiter gave second, consecutive win to vn1, delaying vn0 win, because vn1 offered parallel ad/bl",
"UMask": "0x2",
@@ -5445,8 +6675,10 @@
},
{
"BriefDescription": "Can't Arb for VN0; REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x49",
"EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message was not able to request arbitration while some other message won arbitration; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -5454,8 +6686,10 @@
},
{
"BriefDescription": "Can't Arb for VN0; RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x49",
"EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message was not able to request arbitration while some other message won arbitration; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -5463,8 +6697,10 @@
},
{
"BriefDescription": "Can't Arb for VN0; SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x49",
"EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message was not able to request arbitration while some other message won arbitration; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -5472,8 +6708,10 @@
},
{
"BriefDescription": "Can't Arb for VN0; NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x49",
"EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message was not able to request arbitration while some other message won arbitration; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -5481,8 +6719,10 @@
},
{
"BriefDescription": "Can't Arb for VN0; NCS on BL",
+ "Counter": "0,1,2",
"EventCode": "0x49",
"EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message was not able to request arbitration while some other message won arbitration; Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -5490,8 +6730,10 @@
},
{
"BriefDescription": "Can't Arb for VN0; RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x49",
"EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message was not able to request arbitration while some other message won arbitration; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -5499,8 +6741,10 @@
},
{
"BriefDescription": "Can't Arb for VN0; WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x49",
"EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message was not able to request arbitration while some other message won arbitration; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -5508,8 +6752,10 @@
},
{
"BriefDescription": "Can't Arb for VN1; REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4A",
"EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message was not able to request arbitration while some other message won arbitration; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -5517,8 +6763,10 @@
},
{
"BriefDescription": "Can't Arb for VN1; RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4A",
"EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message was not able to request arbitration while some other message won arbitration; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -5526,8 +6774,10 @@
},
{
"BriefDescription": "Can't Arb for VN1; SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4A",
"EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message was not able to request arbitration while some other message won arbitration; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -5535,8 +6785,10 @@
},
{
"BriefDescription": "Can't Arb for VN1; NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4A",
"EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message was not able to request arbitration while some other message won arbitration; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -5544,8 +6796,10 @@
},
{
"BriefDescription": "Can't Arb for VN1; NCS on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4A",
"EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message was not able to request arbitration while some other message won arbitration; Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -5553,8 +6807,10 @@
},
{
"BriefDescription": "Can't Arb for VN1; RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4A",
"EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message was not able to request arbitration while some other message won arbitration; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -5562,8 +6818,10 @@
},
{
"BriefDescription": "Can't Arb for VN1; WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4A",
"EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message was not able to request arbitration while some other message won arbitration; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -5571,8 +6829,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN0; REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x47",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -5580,8 +6840,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN0; RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x47",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -5589,8 +6851,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN0; SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x47",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -5598,8 +6862,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN0; NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x47",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -5607,8 +6873,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN0; NCS on BL",
+ "Counter": "0,1,2",
"EventCode": "0x47",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits; Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -5616,8 +6884,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN0; RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x47",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -5625,8 +6895,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN0; WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x47",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -5634,8 +6906,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN1; REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x48",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -5643,8 +6917,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN1; RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x48",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -5652,8 +6928,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN1; SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x48",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -5661,8 +6939,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN1; NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x48",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -5670,8 +6950,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN1; NCS on BL",
+ "Counter": "0,1,2",
"EventCode": "0x48",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits; Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -5679,8 +6961,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN1; RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x48",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -5688,8 +6972,10 @@
},
{
"BriefDescription": "No Credits to Arb for VN1; WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x48",
"EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -5697,8 +6983,10 @@
},
{
"BriefDescription": "Ingress Queue Bypasses; AD to Slot 0 on BL Arb",
+ "Counter": "0,1,2",
"EventCode": "0x40",
"EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S0_BL_ARB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times message is bypassed around the Ingress Queue; AD is taking bypass to slot 0 of independent flit while bl message is in arbitration",
"UMask": "0x2",
@@ -5706,8 +6994,10 @@
},
{
"BriefDescription": "Ingress Queue Bypasses; AD to Slot 0 on Idle",
+ "Counter": "0,1,2",
"EventCode": "0x40",
"EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S0_IDLE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times message is bypassed around the Ingress Queue; AD is taking bypass to slot 0 of independent flit while pipeline is idle",
"UMask": "0x1",
@@ -5715,8 +7005,10 @@
},
{
"BriefDescription": "Ingress Queue Bypasses; AD + BL to Slot 1",
+ "Counter": "0,1,2",
"EventCode": "0x40",
"EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S1_BL_SLOT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times message is bypassed around the Ingress Queue; AD is taking bypass to flit slot 1 while merging with bl message in same flit",
"UMask": "0x4",
@@ -5724,8 +7016,10 @@
},
{
"BriefDescription": "Ingress Queue Bypasses; AD + BL to Slot 2",
+ "Counter": "0,1,2",
"EventCode": "0x40",
"EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S2_BL_SLOT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times message is bypassed around the Ingress Queue; AD is taking bypass to flit slot 2 while merging with bl message in same flit",
"UMask": "0x8",
@@ -5733,8 +7027,10 @@
},
{
"BriefDescription": "VN0 message lost contest for flit; REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x50",
"EventName": "UNC_M3UPI_RxC_COLLISION_VN0.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress VN0 packets lost the contest for Flit Slot 0.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -5742,8 +7038,10 @@
},
{
"BriefDescription": "VN0 message lost contest for flit; RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x50",
"EventName": "UNC_M3UPI_RxC_COLLISION_VN0.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress VN0 packets lost the contest for Flit Slot 0.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -5751,8 +7049,10 @@
},
{
"BriefDescription": "VN0 message lost contest for flit; SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x50",
"EventName": "UNC_M3UPI_RxC_COLLISION_VN0.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress VN0 packets lost the contest for Flit Slot 0.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -5760,8 +7060,10 @@
},
{
"BriefDescription": "VN0 message lost contest for flit; NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x50",
"EventName": "UNC_M3UPI_RxC_COLLISION_VN0.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress VN0 packets lost the contest for Flit Slot 0.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -5769,8 +7071,10 @@
},
{
"BriefDescription": "VN0 message lost contest for flit; NCS on BL",
+ "Counter": "0,1,2",
"EventCode": "0x50",
"EventName": "UNC_M3UPI_RxC_COLLISION_VN0.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress VN0 packets lost the contest for Flit Slot 0.; Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -5778,8 +7082,10 @@
},
{
"BriefDescription": "VN0 message lost contest for flit; RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x50",
"EventName": "UNC_M3UPI_RxC_COLLISION_VN0.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress VN0 packets lost the contest for Flit Slot 0.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -5787,8 +7093,10 @@
},
{
"BriefDescription": "VN0 message lost contest for flit; WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x50",
"EventName": "UNC_M3UPI_RxC_COLLISION_VN0.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress VN0 packets lost the contest for Flit Slot 0.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -5796,8 +7104,10 @@
},
{
"BriefDescription": "VN1 message lost contest for flit; REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x51",
"EventName": "UNC_M3UPI_RxC_COLLISION_VN1.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress VN1 packets lost the contest for Flit Slot 0.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -5805,8 +7115,10 @@
},
{
"BriefDescription": "VN1 message lost contest for flit; RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x51",
"EventName": "UNC_M3UPI_RxC_COLLISION_VN1.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress VN1 packets lost the contest for Flit Slot 0.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -5814,8 +7126,10 @@
},
{
"BriefDescription": "VN1 message lost contest for flit; SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x51",
"EventName": "UNC_M3UPI_RxC_COLLISION_VN1.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress VN1 packets lost the contest for Flit Slot 0.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -5823,8 +7137,10 @@
},
{
"BriefDescription": "VN1 message lost contest for flit; NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x51",
"EventName": "UNC_M3UPI_RxC_COLLISION_VN1.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress VN1 packets lost the contest for Flit Slot 0.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -5832,8 +7148,10 @@
},
{
"BriefDescription": "VN1 message lost contest for flit; NCS on BL",
+ "Counter": "0,1,2",
"EventCode": "0x51",
"EventName": "UNC_M3UPI_RxC_COLLISION_VN1.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress VN1 packets lost the contest for Flit Slot 0.; Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -5841,8 +7159,10 @@
},
{
"BriefDescription": "VN1 message lost contest for flit; RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x51",
"EventName": "UNC_M3UPI_RxC_COLLISION_VN1.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress VN1 packets lost the contest for Flit Slot 0.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -5850,8 +7170,10 @@
},
{
"BriefDescription": "VN1 message lost contest for flit; WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x51",
"EventName": "UNC_M3UPI_RxC_COLLISION_VN1.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress VN1 packets lost the contest for Flit Slot 0.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -5859,8 +7181,10 @@
},
{
"BriefDescription": "Miscellaneous Credit Events; Any In BGF FIFO",
+ "Counter": "0,1,2",
"EventCode": "0x60",
"EventName": "UNC_M3UPI_RxC_CRD_MISC.ANY_BGF_FIFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Indication that at least one packet (flit) is in the bgf (fifo only)",
"UMask": "0x1",
@@ -5868,8 +7192,10 @@
},
{
"BriefDescription": "Miscellaneous Credit Events; Any in BGF Path",
+ "Counter": "0,1,2",
"EventCode": "0x60",
"EventName": "UNC_M3UPI_RxC_CRD_MISC.ANY_BGF_PATH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Indication that at least one packet (flit) is in the bgf path (i.e. pipe to fifo)",
"UMask": "0x2",
@@ -5877,8 +7203,10 @@
},
{
"BriefDescription": "Miscellaneous Credit Events; No D2K For Arb",
+ "Counter": "0,1,2",
"EventCode": "0x60",
"EventName": "UNC_M3UPI_RxC_CRD_MISC.NO_D2K_FOR_ARB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VN0 or VN1 BL RSP message was blocked from arbitration request due to lack of D2K CMP credits",
"UMask": "0x4",
@@ -5886,8 +7214,10 @@
},
{
"BriefDescription": "Credit Occupancy; D2K Credits",
+ "Counter": "0,1,2",
"EventCode": "0x61",
"EventName": "UNC_M3UPI_RxC_CRD_OCC.D2K_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "D2K completion fifo credit occupancy (credits in use), accumulated across all cycles",
"UMask": "0x10",
@@ -5895,8 +7225,10 @@
},
{
"BriefDescription": "Credit Occupancy; Packets in BGF FIFO",
+ "Counter": "0,1,2",
"EventCode": "0x61",
"EventName": "UNC_M3UPI_RxC_CRD_OCC.FLITS_IN_FIFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy of m3upi ingress -> upi link layer bgf; packets (flits) in fifo",
"UMask": "0x2",
@@ -5904,8 +7236,10 @@
},
{
"BriefDescription": "Credit Occupancy; Packets in BGF Path",
+ "Counter": "0,1,2",
"EventCode": "0x61",
"EventName": "UNC_M3UPI_RxC_CRD_OCC.FLITS_IN_PATH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy of m3upi ingress -> upi link layer bgf; packets (flits) in path (i.e. pipe to fifo or fifo)",
"UMask": "0x4",
@@ -5913,8 +7247,10 @@
},
{
"BriefDescription": "Credit Occupancy",
+ "Counter": "0,1,2",
"EventCode": "0x61",
"EventName": "UNC_M3UPI_RxC_CRD_OCC.P1P_FIFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "count of bl messages in pump-1-pending state, in completion fifo only",
"UMask": "0x40",
@@ -5922,8 +7258,10 @@
},
{
"BriefDescription": "Credit Occupancy",
+ "Counter": "0,1,2",
"EventCode": "0x61",
"EventName": "UNC_M3UPI_RxC_CRD_OCC.P1P_TOTAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "count of bl messages in pump-1-pending state, in marker table and in fifo",
"UMask": "0x20",
@@ -5931,8 +7269,10 @@
},
{
"BriefDescription": "Credit Occupancy; Transmit Credits",
+ "Counter": "0,1,2",
"EventCode": "0x61",
"EventName": "UNC_M3UPI_RxC_CRD_OCC.TxQ_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Link layer transmit queue credit occupancy (credits in use), accumulated across all cycles",
"UMask": "0x8",
@@ -5940,8 +7280,10 @@
},
{
"BriefDescription": "Credit Occupancy; VNA In Use",
+ "Counter": "0,1,2",
"EventCode": "0x61",
"EventName": "UNC_M3UPI_RxC_CRD_OCC.VNA_IN_USE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Remote UPI VNA credit occupancy (number of credits in use), accumulated across all cycles",
"UMask": "0x1",
@@ -5949,8 +7291,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x43",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -5958,8 +7302,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x43",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -5967,8 +7313,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x43",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -5976,8 +7324,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x43",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -5985,8 +7335,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; NCS on BL",
+ "Counter": "0,1,2",
"EventCode": "0x43",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -5994,8 +7346,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x43",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -6003,8 +7357,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x43",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -6012,8 +7368,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x44",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -6021,8 +7379,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x44",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -6030,8 +7390,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x44",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -6039,8 +7401,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x44",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -6048,8 +7412,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; NCS on BL",
+ "Counter": "0,1,2",
"EventCode": "0x44",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -6057,8 +7423,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x44",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -6066,8 +7434,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x44",
"EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -6075,8 +7445,10 @@
},
{
"BriefDescription": "Data Flit Not Sent; All",
+ "Counter": "0,1,2",
"EventCode": "0x57",
"EventName": "UNC_M3UPI_RxC_FLITS_DATA_NOT_SENT.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Data flit is ready for transmission but could not be sent",
"UMask": "0x1",
@@ -6084,8 +7456,10 @@
},
{
"BriefDescription": "Data Flit Not Sent; No BGF Credits",
+ "Counter": "0,1,2",
"EventCode": "0x57",
"EventName": "UNC_M3UPI_RxC_FLITS_DATA_NOT_SENT.NO_BGF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Data flit is ready for transmission but could not be sent",
"UMask": "0x2",
@@ -6093,8 +7467,10 @@
},
{
"BriefDescription": "Data Flit Not Sent; No TxQ Credits",
+ "Counter": "0,1,2",
"EventCode": "0x57",
"EventName": "UNC_M3UPI_RxC_FLITS_DATA_NOT_SENT.NO_TXQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Data flit is ready for transmission but could not be sent",
"UMask": "0x4",
@@ -6102,8 +7478,10 @@
},
{
"BriefDescription": "Generating BL Data Flit Sequence; Wait on Pump 0",
+ "Counter": "0,1,2",
"EventCode": "0x59",
"EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P0_WAIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "generating bl data flit sequence; waiting for data pump 0",
"UMask": "0x1",
@@ -6111,8 +7489,10 @@
},
{
"BriefDescription": "Generating BL Data Flit Sequence",
+ "Counter": "0,1,2",
"EventCode": "0x59",
"EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_AT_LIMIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "pump-1-pending logic is at capacity (pending table plus completion fifo at limit)",
"UMask": "0x10",
@@ -6120,8 +7500,10 @@
},
{
"BriefDescription": "Generating BL Data Flit Sequence",
+ "Counter": "0,1,2",
"EventCode": "0x59",
"EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_BUSY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "pump-1-pending logic is tracking at least one message",
"UMask": "0x8",
@@ -6129,8 +7511,10 @@
},
{
"BriefDescription": "Generating BL Data Flit Sequence",
+ "Counter": "0,1,2",
"EventCode": "0x59",
"EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_FIFO_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "pump-1-pending completion fifo is full",
"UMask": "0x40",
@@ -6138,8 +7522,10 @@
},
{
"BriefDescription": "Generating BL Data Flit Sequence",
+ "Counter": "0,1,2",
"EventCode": "0x59",
"EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_HOLD_P0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "pump-1-pending logic is at or near capacity, such that pump-0-only bl messages are getting stalled in slotting stage",
"UMask": "0x20",
@@ -6147,8 +7533,10 @@
},
{
"BriefDescription": "Generating BL Data Flit Sequence",
+ "Counter": "0,1,2",
"EventCode": "0x59",
"EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_TO_LIMBO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "a bl message finished but is in limbo and moved to pump-1-pending logic",
"UMask": "0x4",
@@ -6156,8 +7544,10 @@
},
{
"BriefDescription": "Generating BL Data Flit Sequence; Wait on Pump 1",
+ "Counter": "0,1,2",
"EventCode": "0x59",
"EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1_WAIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "generating bl data flit sequence; waiting for data pump 1",
"UMask": "0x2",
@@ -6165,15 +7555,19 @@
},
{
"BriefDescription": "UNC_M3UPI_RxC_FLITS_MISC",
+ "Counter": "0,1,2",
"EventCode": "0x5A",
"EventName": "UNC_M3UPI_RxC_FLITS_MISC",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M3UPI"
},
{
"BriefDescription": "Sent Header Flit; One Message",
+ "Counter": "0,1,2",
"EventCode": "0x56",
"EventName": "UNC_M3UPI_RxC_FLITS_SENT.1_MSG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "One message in flit; VNA or non-VNA flit",
"UMask": "0x1",
@@ -6181,8 +7575,10 @@
},
{
"BriefDescription": "Sent Header Flit; One Message in non-VNA",
+ "Counter": "0,1,2",
"EventCode": "0x56",
"EventName": "UNC_M3UPI_RxC_FLITS_SENT.1_MSG_VNX",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "One message in flit; non-VNA flit",
"UMask": "0x8",
@@ -6190,8 +7586,10 @@
},
{
"BriefDescription": "Sent Header Flit; Two Messages",
+ "Counter": "0,1,2",
"EventCode": "0x56",
"EventName": "UNC_M3UPI_RxC_FLITS_SENT.2_MSGS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Two messages in flit; VNA flit",
"UMask": "0x2",
@@ -6199,8 +7597,10 @@
},
{
"BriefDescription": "Sent Header Flit; Three Messages",
+ "Counter": "0,1,2",
"EventCode": "0x56",
"EventName": "UNC_M3UPI_RxC_FLITS_SENT.3_MSGS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Three messages in flit; VNA flit",
"UMask": "0x4",
@@ -6208,40 +7608,50 @@
},
{
"BriefDescription": "Sent Header Flit",
+ "Counter": "0,1,2",
"EventCode": "0x56",
"EventName": "UNC_M3UPI_RxC_FLITS_SENT.SLOTS_1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M3UPI"
},
{
"BriefDescription": "Sent Header Flit",
+ "Counter": "0,1,2",
"EventCode": "0x56",
"EventName": "UNC_M3UPI_RxC_FLITS_SENT.SLOTS_2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M3UPI"
},
{
"BriefDescription": "Sent Header Flit",
+ "Counter": "0,1,2",
"EventCode": "0x56",
"EventName": "UNC_M3UPI_RxC_FLITS_SENT.SLOTS_3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M3UPI"
},
{
"BriefDescription": "Slotting BL Message Into Header Flit; All",
+ "Counter": "0,1,2",
"EventCode": "0x58",
"EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M3UPI"
},
{
"BriefDescription": "Slotting BL Message Into Header Flit; Needs Data Flit",
+ "Counter": "0,1,2",
"EventCode": "0x58",
"EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.NEED_DATA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL message requires data flit sequence",
"UMask": "0x2",
@@ -6249,8 +7659,10 @@
},
{
"BriefDescription": "Slotting BL Message Into Header Flit; Wait on Pump 0",
+ "Counter": "0,1,2",
"EventCode": "0x58",
"EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P0_WAIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Waiting for header pump 0",
"UMask": "0x4",
@@ -6258,8 +7670,10 @@
},
{
"BriefDescription": "Slotting BL Message Into Header Flit; Don't Need Pump 1",
+ "Counter": "0,1,2",
"EventCode": "0x58",
"EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_NOT_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Header pump 1 is not required for flit",
"UMask": "0x10",
@@ -6267,8 +7681,10 @@
},
{
"BriefDescription": "Slotting BL Message Into Header Flit; Don't Need Pump 1 - Bubble",
+ "Counter": "0,1,2",
"EventCode": "0x58",
"EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_NOT_REQ_BUT_BUBBLE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Header pump 1 is not required for flit but flit transmission delayed",
"UMask": "0x20",
@@ -6276,8 +7692,10 @@
},
{
"BriefDescription": "Slotting BL Message Into Header Flit; Don't Need Pump 1 - Not Avail",
+ "Counter": "0,1,2",
"EventCode": "0x58",
"EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_NOT_REQ_NOT_AVAIL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Header pump 1 is not required for flit and not available",
"UMask": "0x40",
@@ -6285,8 +7703,10 @@
},
{
"BriefDescription": "Slotting BL Message Into Header Flit; Wait on Pump 1",
+ "Counter": "0,1,2",
"EventCode": "0x58",
"EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_WAIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Waiting for header pump 1",
"UMask": "0x8",
@@ -6294,8 +7714,10 @@
},
{
"BriefDescription": "Flit Gen - Header 1; Accumulate",
+ "Counter": "0,1,2",
"EventCode": "0x53",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.ACCUM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Events related to Header Flit Generation - Set 1; Header flit slotting control state machine is in any accumulate state; multi-message flit may be assembled over multiple cycles",
"UMask": "0x1",
@@ -6303,8 +7725,10 @@
},
{
"BriefDescription": "Flit Gen - Header 1; Accumulate Ready",
+ "Counter": "0,1,2",
"EventCode": "0x53",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.ACCUM_READ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Events related to Header Flit Generation - Set 1; header flit slotting control state machine is in accum_ready state; flit is ready to send but transmission is blocked; more messages may be slotted into flit",
"UMask": "0x2",
@@ -6312,8 +7736,10 @@
},
{
"BriefDescription": "Flit Gen - Header 1; Accumulate Wasted",
+ "Counter": "0,1,2",
"EventCode": "0x53",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.ACCUM_WASTED",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Events related to Header Flit Generation - Set 1; Flit is being assembled over multiple cycles, but no additional message is being slotted into flit in current cycle; accumulate cycle is wasted",
"UMask": "0x4",
@@ -6321,8 +7747,10 @@
},
{
"BriefDescription": "Flit Gen - Header 1; Run-Ahead - Blocked",
+ "Counter": "0,1,2",
"EventCode": "0x53",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_BLOCKED",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Events related to Header Flit Generation - Set 1; Header flit slotting entered run-ahead state; new header flit is started while transmission of prior, fully assembled flit is blocked",
"UMask": "0x8",
@@ -6330,8 +7758,10 @@
},
{
"BriefDescription": "Flit Gen - Header 1; Run-Ahead - Message",
+ "Counter": "0,1,2",
"EventCode": "0x53",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_MSG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Events related to Header Flit Generation - Set 1; Header flit slotting is in run-ahead to start new flit, and message is actually slotted into new flit",
"UMask": "0x10",
@@ -6339,8 +7769,10 @@
},
{
"BriefDescription": "Flit Gen - Header 1; Parallel Ok",
+ "Counter": "0,1,2",
"EventCode": "0x53",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.PAR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Events related to Header Flit Generation - Set 1; New header flit construction may proceed in parallel with data flit sequence",
"UMask": "0x20",
@@ -6348,8 +7780,10 @@
},
{
"BriefDescription": "Flit Gen - Header 1; Parallel Flit Finished",
+ "Counter": "0,1,2",
"EventCode": "0x53",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.PAR_FLIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Events related to Header Flit Generation - Set 1; Header flit finished assembly in parallel with data flit sequence",
"UMask": "0x80",
@@ -6357,8 +7791,10 @@
},
{
"BriefDescription": "Flit Gen - Header 1; Parallel Message",
+ "Counter": "0,1,2",
"EventCode": "0x53",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.PAR_MSG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Events related to Header Flit Generation - Set 1; Message is slotted into header flit in parallel with data flit sequence",
"UMask": "0x40",
@@ -6366,8 +7802,10 @@
},
{
"BriefDescription": "Flit Gen - Header 2; Rate-matching Stall",
+ "Counter": "0,1,2",
"EventCode": "0x54",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.RMSTALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Events related to Header Flit Generation - Set 2; Rate-matching stall injected",
"UMask": "0x1",
@@ -6375,8 +7813,10 @@
},
{
"BriefDescription": "Flit Gen - Header 2; Rate-matching Stall - No Message",
+ "Counter": "0,1,2",
"EventCode": "0x54",
"EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.RMSTALL_NOMSG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Events related to Header Flit Generation - Set 2; Rate matching stall injected, but no additional message slotted during stall cycle",
"UMask": "0x2",
@@ -6384,8 +7824,10 @@
},
{
"BriefDescription": "Header Not Sent; All",
+ "Counter": "0,1,2",
"EventCode": "0x55",
"EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "header flit is ready for transmission but could not be sent",
"UMask": "0x1",
@@ -6393,8 +7835,10 @@
},
{
"BriefDescription": "Header Not Sent; No BGF Credits",
+ "Counter": "0,1,2",
"EventCode": "0x55",
"EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.NO_BGF_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "header flit is ready for transmission but could not be sent; No BGF credits available",
"UMask": "0x2",
@@ -6402,8 +7846,10 @@
},
{
"BriefDescription": "Header Not Sent; No BGF Credits + No Extra Message Slotted",
+ "Counter": "0,1,2",
"EventCode": "0x55",
"EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.NO_BGF_NO_MSG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "header flit is ready for transmission but could not be sent; No BGF credits available; no additional message slotted into flit",
"UMask": "0x8",
@@ -6411,8 +7857,10 @@
},
{
"BriefDescription": "Header Not Sent; No TxQ Credits",
+ "Counter": "0,1,2",
"EventCode": "0x55",
"EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.NO_TXQ_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "header flit is ready for transmission but could not be sent; No TxQ credits available",
"UMask": "0x4",
@@ -6420,8 +7868,10 @@
},
{
"BriefDescription": "Header Not Sent; No TxQ Credits + No Extra Message Slotted",
+ "Counter": "0,1,2",
"EventCode": "0x55",
"EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.NO_TXQ_NO_MSG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "header flit is ready for transmission but could not be sent; No TxQ credits available; no additional message slotted into flit",
"UMask": "0x10",
@@ -6429,8 +7879,10 @@
},
{
"BriefDescription": "Header Not Sent; Sent - One Slot Taken",
+ "Counter": "0,1,2",
"EventCode": "0x55",
"EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.ONE_TAKEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "header flit is ready for transmission but could not be sent; sending header flit with only one slot taken (two slots free)",
"UMask": "0x20",
@@ -6438,8 +7890,10 @@
},
{
"BriefDescription": "Header Not Sent; Sent - Three Slots Taken",
+ "Counter": "0,1,2",
"EventCode": "0x55",
"EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.THREE_TAKEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "header flit is ready for transmission but could not be sent; sending header flit with three slots taken (no slots free)",
"UMask": "0x80",
@@ -6447,8 +7901,10 @@
},
{
"BriefDescription": "Header Not Sent; Sent - Two Slots Taken",
+ "Counter": "0,1,2",
"EventCode": "0x55",
"EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.TWO_TAKEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "header flit is ready for transmission but could not be sent; sending header flit with only two slots taken (one slots free)",
"UMask": "0x40",
@@ -6456,8 +7912,10 @@
},
{
"BriefDescription": "Message Held; Can't Slot AD",
+ "Counter": "0,1,2",
"EventCode": "0x52",
"EventName": "UNC_M3UPI_RxC_HELD.CANT_SLOT_AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "some AD message could not be slotted (logical OR of all AD events under INGR_SLOT_CANT_MC_VN{0,1})",
"UMask": "0x40",
@@ -6465,8 +7923,10 @@
},
{
"BriefDescription": "Message Held; Can't Slot BL",
+ "Counter": "0,1,2",
"EventCode": "0x52",
"EventName": "UNC_M3UPI_RxC_HELD.CANT_SLOT_BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "some BL message could not be slotted (logical OR of all BL events under INGR_SLOT_CANT_MC_VN{0,1})",
"UMask": "0x80",
@@ -6474,8 +7934,10 @@
},
{
"BriefDescription": "Message Held; Parallel AD Lost",
+ "Counter": "0,1,2",
"EventCode": "0x52",
"EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_AD_LOST",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "some AD message lost contest for slot 0 (logical OR of all AD events under INGR_SLOT_LOST_MC_VN{0,1})",
"UMask": "0x10",
@@ -6483,8 +7945,10 @@
},
{
"BriefDescription": "Message Held; Parallel Attempt",
+ "Counter": "0,1,2",
"EventCode": "0x52",
"EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_ATTEMPT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ad and bl messages attempted to slot into the same flit in parallel",
"UMask": "0x4",
@@ -6492,8 +7956,10 @@
},
{
"BriefDescription": "Message Held; Parallel BL Lost",
+ "Counter": "0,1,2",
"EventCode": "0x52",
"EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_BL_LOST",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "some BL message lost contest for slot 0 (logical OR of all BL events under INGR_SLOT_LOST_MC_VN{0,1})",
"UMask": "0x20",
@@ -6501,8 +7967,10 @@
},
{
"BriefDescription": "Message Held; Parallel Success",
+ "Counter": "0,1,2",
"EventCode": "0x52",
"EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_SUCCESS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ad and bl messages were actually slotted into the same flit in parallel",
"UMask": "0x8",
@@ -6510,8 +7978,10 @@
},
{
"BriefDescription": "Message Held; VN0",
+ "Counter": "0,1,2",
"EventCode": "0x52",
"EventName": "UNC_M3UPI_RxC_HELD.VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "vn0 message(s) that couldn't be slotted into last vn0 flit are held in slotting stage while processing vn1 flit",
"UMask": "0x1",
@@ -6519,8 +7989,10 @@
},
{
"BriefDescription": "Message Held; VN1",
+ "Counter": "0,1,2",
"EventCode": "0x52",
"EventName": "UNC_M3UPI_RxC_HELD.VN1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "vn1 message(s) that couldn't be slotted into last vn1 flit are held in slotting stage while processing vn0 flit",
"UMask": "0x2",
@@ -6528,8 +8000,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x41",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN0.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -6537,8 +8011,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x41",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN0.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -6546,8 +8022,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x41",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN0.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -6555,8 +8033,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x41",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN0.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -6564,8 +8044,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; NCS on BL",
+ "Counter": "0,1,2",
"EventCode": "0x41",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN0.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -6573,8 +8055,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x41",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN0.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -6582,8 +8066,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x41",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN0.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -6591,8 +8077,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x42",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN1.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -6600,8 +8088,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x42",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN1.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -6609,8 +8099,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x42",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN1.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -6618,8 +8110,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x42",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN1.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -6627,8 +8121,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; NCS on BL",
+ "Counter": "0,1,2",
"EventCode": "0x42",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN1.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -6636,8 +8132,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x42",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN1.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -6645,8 +8143,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x42",
"EventName": "UNC_M3UPI_RxC_INSERTS_VN1.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -6654,8 +8154,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x45",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -6663,8 +8165,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x45",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -6672,8 +8176,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x45",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -6681,8 +8187,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x45",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -6690,8 +8198,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; NCS on BL",
+ "Counter": "0,1,2",
"EventCode": "0x45",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -6699,8 +8209,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x45",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -6708,8 +8220,10 @@
},
{
"BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x45",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -6717,8 +8231,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x46",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -6726,8 +8242,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x46",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -6735,8 +8253,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x46",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -6744,8 +8264,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x46",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -6753,8 +8275,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; NCS on BL",
+ "Counter": "0,1,2",
"EventCode": "0x46",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -6762,8 +8286,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x46",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -6771,8 +8297,10 @@
},
{
"BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x46",
"EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -6780,8 +8308,10 @@
},
{
"BriefDescription": "VN0 message can't slot into flit; REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4E",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -6789,8 +8319,10 @@
},
{
"BriefDescription": "VN0 message can't slot into flit; RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4E",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -6798,8 +8330,10 @@
},
{
"BriefDescription": "VN0 message can't slot into flit; SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4E",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -6807,8 +8341,10 @@
},
{
"BriefDescription": "VN0 message can't slot into flit; NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4E",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -6816,8 +8352,10 @@
},
{
"BriefDescription": "VN0 message can't slot into flit; NCS on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4E",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -6825,8 +8363,10 @@
},
{
"BriefDescription": "VN0 message can't slot into flit; RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4E",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -6834,8 +8374,10 @@
},
{
"BriefDescription": "VN0 message can't slot into flit; WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4E",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -6843,8 +8385,10 @@
},
{
"BriefDescription": "VN1 message can't slot into flit; REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4F",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -6852,8 +8396,10 @@
},
{
"BriefDescription": "VN1 message can't slot into flit; RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4F",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -6861,8 +8407,10 @@
},
{
"BriefDescription": "VN1 message can't slot into flit; SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x4F",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.AD_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -6870,8 +8418,10 @@
},
{
"BriefDescription": "VN1 message can't slot into flit; NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4F",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -6879,8 +8429,10 @@
},
{
"BriefDescription": "VN1 message can't slot into flit; NCS on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4F",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Non-Coherent Standard (NCS) messages on BL.",
"UMask": "0x40",
@@ -6888,8 +8440,10 @@
},
{
"BriefDescription": "VN1 message can't slot into flit; RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4F",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -6897,8 +8451,10 @@
},
{
"BriefDescription": "VN1 message can't slot into flit; WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x4F",
"EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -6906,32 +8462,40 @@
},
{
"BriefDescription": "SMI3 Prefetch Messages; Lost Arbitration",
+ "Counter": "0,1,2",
"EventCode": "0x62",
"EventName": "UNC_M3UPI_RxC_SMI3_PFTCH.ARB_LOST",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M3UPI"
},
{
"BriefDescription": "SMI3 Prefetch Messages; Arrived",
+ "Counter": "0,1,2",
"EventCode": "0x62",
"EventName": "UNC_M3UPI_RxC_SMI3_PFTCH.ARRIVED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M3UPI"
},
{
"BriefDescription": "SMI3 Prefetch Messages; Dropped - Old",
+ "Counter": "0,1,2",
"EventCode": "0x62",
"EventName": "UNC_M3UPI_RxC_SMI3_PFTCH.DROP_OLD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M3UPI"
},
{
"BriefDescription": "SMI3 Prefetch Messages; Dropped - Wrap",
+ "Counter": "0,1,2",
"EventCode": "0x62",
"EventName": "UNC_M3UPI_RxC_SMI3_PFTCH.DROP_WRAP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Dropped because it was overwritten by new message while prefetch queue was full",
"UMask": "0x10",
@@ -6939,16 +8503,20 @@
},
{
"BriefDescription": "SMI3 Prefetch Messages; Slotted",
+ "Counter": "0,1,2",
"EventCode": "0x62",
"EventName": "UNC_M3UPI_RxC_SMI3_PFTCH.SLOTTED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M3UPI"
},
{
"BriefDescription": "Remote VNA Credits; Any In Use",
+ "Counter": "0,1,2",
"EventCode": "0x5B",
"EventName": "UNC_M3UPI_RxC_VNA_CRD.ANY_IN_USE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "At least one remote vna credit is in use",
"UMask": "0x20",
@@ -6956,8 +8524,10 @@
},
{
"BriefDescription": "Remote VNA Credits; Corrected",
+ "Counter": "0,1,2",
"EventCode": "0x5B",
"EventName": "UNC_M3UPI_RxC_VNA_CRD.CORRECTED",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of remote vna credits corrected (local return) per cycle",
"UMask": "0x2",
@@ -6965,8 +8535,10 @@
},
{
"BriefDescription": "Remote VNA Credits; Level < 1",
+ "Counter": "0,1,2",
"EventCode": "0x5B",
"EventName": "UNC_M3UPI_RxC_VNA_CRD.LT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Remote vna credit level is less than 1 (i.e. no vna credits available)",
"UMask": "0x4",
@@ -6974,8 +8546,10 @@
},
{
"BriefDescription": "Remote VNA Credits; Level < 4",
+ "Counter": "0,1,2",
"EventCode": "0x5B",
"EventName": "UNC_M3UPI_RxC_VNA_CRD.LT4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Remote vna credit level is less than 4; bl (or ad requiring 4 vna) cannot arb on vna",
"UMask": "0x8",
@@ -6983,8 +8557,10 @@
},
{
"BriefDescription": "Remote VNA Credits; Level < 5",
+ "Counter": "0,1,2",
"EventCode": "0x5B",
"EventName": "UNC_M3UPI_RxC_VNA_CRD.LT5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Remote vna credit level is less than 5; parallel ad/bl arb on vna not possible",
"UMask": "0x10",
@@ -6992,8 +8568,10 @@
},
{
"BriefDescription": "Remote VNA Credits; Used",
+ "Counter": "0,1,2",
"EventCode": "0x5B",
"EventName": "UNC_M3UPI_RxC_VNA_CRD.USED",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of remote vna credits consumed per cycle",
"UMask": "0x1",
@@ -7001,8 +8579,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; AD - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0xB4",
"EventName": "UNC_M3UPI_RxR_BUSY_STARVED.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x1",
@@ -7010,8 +8590,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; AD - Credit",
+ "Counter": "0,1,2",
"EventCode": "0xB4",
"EventName": "UNC_M3UPI_RxR_BUSY_STARVED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x10",
@@ -7019,8 +8601,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; BL - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0xB4",
"EventName": "UNC_M3UPI_RxR_BUSY_STARVED.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x4",
@@ -7028,8 +8612,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; BL - Credit",
+ "Counter": "0,1,2",
"EventCode": "0xB4",
"EventName": "UNC_M3UPI_RxR_BUSY_STARVED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x40",
@@ -7037,8 +8623,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass; AD - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0xB2",
"EventName": "UNC_M3UPI_RxR_BYPASS.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the CMS Ingress",
"UMask": "0x1",
@@ -7046,8 +8634,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass; AD - Credit",
+ "Counter": "0,1,2",
"EventCode": "0xB2",
"EventName": "UNC_M3UPI_RxR_BYPASS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the CMS Ingress",
"UMask": "0x10",
@@ -7055,8 +8645,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass; AK - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0xB2",
"EventName": "UNC_M3UPI_RxR_BYPASS.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the CMS Ingress",
"UMask": "0x2",
@@ -7064,8 +8656,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass; BL - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0xB2",
"EventName": "UNC_M3UPI_RxR_BYPASS.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the CMS Ingress",
"UMask": "0x4",
@@ -7073,8 +8667,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass; BL - Credit",
+ "Counter": "0,1,2",
"EventCode": "0xB2",
"EventName": "UNC_M3UPI_RxR_BYPASS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the CMS Ingress",
"UMask": "0x40",
@@ -7082,8 +8678,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass; IV - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0xB2",
"EventName": "UNC_M3UPI_RxR_BYPASS.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the CMS Ingress",
"UMask": "0x8",
@@ -7091,8 +8689,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; AD - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0xB3",
"EventName": "UNC_M3UPI_RxR_CRD_STARVED.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x1",
@@ -7100,8 +8700,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; AD - Credit",
+ "Counter": "0,1,2",
"EventCode": "0xB3",
"EventName": "UNC_M3UPI_RxR_CRD_STARVED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x10",
@@ -7109,8 +8711,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; AK - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0xB3",
"EventName": "UNC_M3UPI_RxR_CRD_STARVED.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x2",
@@ -7118,8 +8722,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; BL - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0xB3",
"EventName": "UNC_M3UPI_RxR_CRD_STARVED.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x4",
@@ -7127,8 +8733,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; BL - Credit",
+ "Counter": "0,1,2",
"EventCode": "0xB3",
"EventName": "UNC_M3UPI_RxR_CRD_STARVED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x40",
@@ -7136,8 +8744,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; IFV - Credit",
+ "Counter": "0,1,2",
"EventCode": "0xB3",
"EventName": "UNC_M3UPI_RxR_CRD_STARVED.IFV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x80",
@@ -7145,8 +8755,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation; IV - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0xB3",
"EventName": "UNC_M3UPI_RxR_CRD_STARVED.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x8",
@@ -7154,8 +8766,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations; AD - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0xB1",
"EventName": "UNC_M3UPI_RxR_INSERTS.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x1",
@@ -7163,8 +8777,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations; AD - Credit",
+ "Counter": "0,1,2",
"EventCode": "0xB1",
"EventName": "UNC_M3UPI_RxR_INSERTS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x10",
@@ -7172,8 +8788,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations; AK - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0xB1",
"EventName": "UNC_M3UPI_RxR_INSERTS.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x2",
@@ -7181,8 +8799,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations; BL - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0xB1",
"EventName": "UNC_M3UPI_RxR_INSERTS.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x4",
@@ -7190,8 +8810,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations; BL - Credit",
+ "Counter": "0,1,2",
"EventCode": "0xB1",
"EventName": "UNC_M3UPI_RxR_INSERTS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x40",
@@ -7199,8 +8821,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations; IV - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0xB1",
"EventName": "UNC_M3UPI_RxR_INSERTS.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x8",
@@ -7208,8 +8832,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy; AD - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0xB0",
"EventName": "UNC_M3UPI_RxR_OCCUPANCY.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x1",
@@ -7217,8 +8843,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy; AD - Credit",
+ "Counter": "0,1,2",
"EventCode": "0xB0",
"EventName": "UNC_M3UPI_RxR_OCCUPANCY.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x10",
@@ -7226,8 +8854,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy; AK - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0xB0",
"EventName": "UNC_M3UPI_RxR_OCCUPANCY.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x2",
@@ -7235,8 +8865,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy; BL - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0xB0",
"EventName": "UNC_M3UPI_RxR_OCCUPANCY.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x4",
@@ -7244,8 +8876,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy; BL - Credit",
+ "Counter": "0,1,2",
"EventCode": "0xB0",
"EventName": "UNC_M3UPI_RxR_OCCUPANCY.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x40",
@@ -7253,8 +8887,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy; IV - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0xB0",
"EventName": "UNC_M3UPI_RxR_OCCUPANCY.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x8",
@@ -7262,8 +8898,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2",
"EventCode": "0xD0",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -7271,8 +8909,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2",
"EventCode": "0xD0",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -7280,8 +8920,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2",
"EventCode": "0xD0",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -7289,8 +8931,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2",
"EventCode": "0xD0",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -7298,8 +8942,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2",
"EventCode": "0xD0",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -7307,8 +8953,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2",
"EventCode": "0xD0",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -7316,8 +8964,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2",
"EventCode": "0xD2",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -7325,8 +8975,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2",
"EventCode": "0xD2",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -7334,8 +8986,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2",
"EventCode": "0xD2",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -7343,8 +8997,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2",
"EventCode": "0xD2",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -7352,8 +9008,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2",
"EventCode": "0xD2",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -7361,8 +9019,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2",
"EventCode": "0xD2",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -7370,8 +9030,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2",
"EventCode": "0xD4",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -7379,8 +9041,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2",
"EventCode": "0xD4",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -7388,8 +9052,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2",
"EventCode": "0xD4",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -7397,8 +9063,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2",
"EventCode": "0xD4",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -7406,8 +9074,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2",
"EventCode": "0xD4",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -7415,8 +9085,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2",
"EventCode": "0xD4",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -7424,8 +9096,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2",
"EventCode": "0xD6",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -7433,8 +9107,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2",
"EventCode": "0xD6",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -7442,8 +9118,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2",
"EventCode": "0xD6",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -7451,8 +9129,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2",
"EventCode": "0xD6",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -7460,8 +9140,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2",
"EventCode": "0xD6",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -7469,8 +9151,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2",
"EventCode": "0xD6",
"EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -7478,8 +9162,10 @@
},
{
"BriefDescription": "Failed ARB for AD; VN0 REQ Messages",
+ "Counter": "0,1,2",
"EventCode": "0x30",
"EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD arb but no win; arb request asserted but not won",
"UMask": "0x1",
@@ -7487,8 +9173,10 @@
},
{
"BriefDescription": "Failed ARB for AD; VN0 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x30",
"EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD arb but no win; arb request asserted but not won",
"UMask": "0x4",
@@ -7496,8 +9184,10 @@
},
{
"BriefDescription": "Failed ARB for AD; VN0 SNP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x30",
"EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD arb but no win; arb request asserted but not won",
"UMask": "0x2",
@@ -7505,8 +9195,10 @@
},
{
"BriefDescription": "Failed ARB for AD; VN0 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x30",
"EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD arb but no win; arb request asserted but not won",
"UMask": "0x8",
@@ -7514,8 +9206,10 @@
},
{
"BriefDescription": "Failed ARB for AD; VN1 REQ Messages",
+ "Counter": "0,1,2",
"EventCode": "0x30",
"EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD arb but no win; arb request asserted but not won",
"UMask": "0x10",
@@ -7523,8 +9217,10 @@
},
{
"BriefDescription": "Failed ARB for AD; VN1 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x30",
"EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD arb but no win; arb request asserted but not won",
"UMask": "0x40",
@@ -7532,8 +9228,10 @@
},
{
"BriefDescription": "Failed ARB for AD; VN1 SNP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x30",
"EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD arb but no win; arb request asserted but not won",
"UMask": "0x20",
@@ -7541,8 +9239,10 @@
},
{
"BriefDescription": "Failed ARB for AD; VN1 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x30",
"EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD arb but no win; arb request asserted but not won",
"UMask": "0x80",
@@ -7550,8 +9250,10 @@
},
{
"BriefDescription": "AD FlowQ Bypass",
+ "Counter": "0,1,2",
"EventCode": "0x2C",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.AD_SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
"UMask": "0x1",
@@ -7559,8 +9261,10 @@
},
{
"BriefDescription": "AD FlowQ Bypass",
+ "Counter": "0,1,2",
"EventCode": "0x2C",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.AD_SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
"UMask": "0x2",
@@ -7568,8 +9272,10 @@
},
{
"BriefDescription": "AD FlowQ Bypass",
+ "Counter": "0,1,2",
"EventCode": "0x2C",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.AD_SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
"UMask": "0x4",
@@ -7577,8 +9283,10 @@
},
{
"BriefDescription": "AD FlowQ Bypass",
+ "Counter": "0,1,2",
"EventCode": "0x2C",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.BL_EARLY_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
"UMask": "0x8",
@@ -7586,8 +9294,10 @@
},
{
"BriefDescription": "AD Flow Q Not Empty; VN0 REQ Messages",
+ "Counter": "0,1,2",
"EventCode": "0x27",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Egress queue is Not Empty",
"UMask": "0x1",
@@ -7595,8 +9305,10 @@
},
{
"BriefDescription": "AD Flow Q Not Empty; VN0 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x27",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Egress queue is Not Empty",
"UMask": "0x4",
@@ -7604,8 +9316,10 @@
},
{
"BriefDescription": "AD Flow Q Not Empty; VN0 SNP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x27",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Egress queue is Not Empty",
"UMask": "0x2",
@@ -7613,8 +9327,10 @@
},
{
"BriefDescription": "AD Flow Q Not Empty; VN0 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x27",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Egress queue is Not Empty",
"UMask": "0x8",
@@ -7622,8 +9338,10 @@
},
{
"BriefDescription": "AD Flow Q Not Empty; VN1 REQ Messages",
+ "Counter": "0,1,2",
"EventCode": "0x27",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Egress queue is Not Empty",
"UMask": "0x10",
@@ -7631,8 +9349,10 @@
},
{
"BriefDescription": "AD Flow Q Not Empty; VN1 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x27",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Egress queue is Not Empty",
"UMask": "0x40",
@@ -7640,8 +9360,10 @@
},
{
"BriefDescription": "AD Flow Q Not Empty; VN1 SNP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x27",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Egress queue is Not Empty",
"UMask": "0x20",
@@ -7649,8 +9371,10 @@
},
{
"BriefDescription": "AD Flow Q Not Empty; VN1 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x27",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the AD Egress queue is Not Empty",
"UMask": "0x80",
@@ -7658,8 +9382,10 @@
},
{
"BriefDescription": "AD Flow Q Inserts; VN0 REQ Messages",
+ "Counter": "0,1,2",
"EventCode": "0x2D",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x1",
@@ -7667,8 +9393,10 @@
},
{
"BriefDescription": "AD Flow Q Inserts; VN0 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x2D",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x4",
@@ -7676,8 +9404,10 @@
},
{
"BriefDescription": "AD Flow Q Inserts; VN0 SNP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x2D",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x2",
@@ -7685,8 +9415,10 @@
},
{
"BriefDescription": "AD Flow Q Inserts; VN0 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x2D",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x8",
@@ -7694,8 +9426,10 @@
},
{
"BriefDescription": "AD Flow Q Inserts; VN1 REQ Messages",
+ "Counter": "0,1,2",
"EventCode": "0x2D",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x10",
@@ -7703,8 +9437,10 @@
},
{
"BriefDescription": "AD Flow Q Inserts; VN1 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x2D",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x40",
@@ -7712,8 +9448,10 @@
},
{
"BriefDescription": "AD Flow Q Inserts; VN1 SNP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x2D",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN1_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x20",
@@ -7721,64 +9459,80 @@
},
{
"BriefDescription": "AD Flow Q Occupancy; VN0 REQ Messages",
+ "Counter": "0",
"EventCode": "0x1C",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M3UPI"
},
{
"BriefDescription": "AD Flow Q Occupancy; VN0 RSP Messages",
+ "Counter": "0",
"EventCode": "0x1C",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M3UPI"
},
{
"BriefDescription": "AD Flow Q Occupancy; VN0 SNP Messages",
+ "Counter": "0",
"EventCode": "0x1C",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M3UPI"
},
{
"BriefDescription": "AD Flow Q Occupancy; VN0 WB Messages",
+ "Counter": "0",
"EventCode": "0x1C",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M3UPI"
},
{
"BriefDescription": "AD Flow Q Occupancy; VN1 REQ Messages",
+ "Counter": "0",
"EventCode": "0x1C",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M3UPI"
},
{
"BriefDescription": "AD Flow Q Occupancy; VN1 RSP Messages",
+ "Counter": "0",
"EventCode": "0x1C",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M3UPI"
},
{
"BriefDescription": "AD Flow Q Occupancy; VN1 SNP Messages",
+ "Counter": "0",
"EventCode": "0x1C",
"EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN1_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M3UPI"
},
{
"BriefDescription": "Number of Snoop Targets; CHA on VN0",
+ "Counter": "0",
"EventCode": "0x3C",
"EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN0_CHA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency; Number of VN0 Snpf to CHA",
"UMask": "0x4",
@@ -7786,8 +9540,10 @@
},
{
"BriefDescription": "Number of Snoop Targets; Non Idle cycles on VN0",
+ "Counter": "0",
"EventCode": "0x3C",
"EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN0_NON_IDLE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency; Number of non-idle cycles in issuing Vn0 Snpf",
"UMask": "0x40",
@@ -7795,8 +9551,10 @@
},
{
"BriefDescription": "Number of Snoop Targets; Peer UPI0 on VN0",
+ "Counter": "0",
"EventCode": "0x3C",
"EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN0_PEER_UPI0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency; Number of VN0 Snpf to peer UPI0",
"UMask": "0x1",
@@ -7804,8 +9562,10 @@
},
{
"BriefDescription": "Number of Snoop Targets; Peer UPI1 on VN0",
+ "Counter": "0",
"EventCode": "0x3C",
"EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN0_PEER_UPI1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency; Number of VN0 Snpf to peer UPI1",
"UMask": "0x2",
@@ -7813,8 +9573,10 @@
},
{
"BriefDescription": "Number of Snoop Targets; CHA on VN1",
+ "Counter": "0",
"EventCode": "0x3C",
"EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN1_CHA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency; Number of VN1 Snpf to CHA",
"UMask": "0x20",
@@ -7822,8 +9584,10 @@
},
{
"BriefDescription": "Number of Snoop Targets; Non Idle cycles on VN1",
+ "Counter": "0",
"EventCode": "0x3C",
"EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN1_NON_IDLE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency; Number of non-idle cycles in issuing Vn1 Snpf",
"UMask": "0x80",
@@ -7831,8 +9595,10 @@
},
{
"BriefDescription": "Number of Snoop Targets; Peer UPI0 on VN1",
+ "Counter": "0",
"EventCode": "0x3C",
"EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN1_PEER_UPI0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency; Number of VN1 Snpf to peer UPI0",
"UMask": "0x8",
@@ -7840,8 +9606,10 @@
},
{
"BriefDescription": "Number of Snoop Targets; Peer UPI1 on VN1",
+ "Counter": "0",
"EventCode": "0x3C",
"EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN1_PEER_UPI1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency; Number of VN1 Snpf to peer UPI1",
"UMask": "0x10",
@@ -7849,8 +9617,10 @@
},
{
"BriefDescription": "Snoop Arbitration; FlowQ Won",
+ "Counter": "0,1,2",
"EventCode": "0x3D",
"EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP2_VN1.VN0_SNPFP_NONSNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Outcome of SnpF pending arbitration; FlowQ txn issued when SnpF pending on Vn0",
"UMask": "0x1",
@@ -7858,8 +9628,10 @@
},
{
"BriefDescription": "Snoop Arbitration; FlowQ SnpF Won",
+ "Counter": "0,1,2",
"EventCode": "0x3D",
"EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP2_VN1.VN0_SNPFP_VN2SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Outcome of SnpF pending arbitration; FlowQ Vn0 SnpF issued when SnpF pending on Vn1",
"UMask": "0x4",
@@ -7867,8 +9639,10 @@
},
{
"BriefDescription": "Snoop Arbitration; FlowQ Won",
+ "Counter": "0,1,2",
"EventCode": "0x3D",
"EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP2_VN1.VN1_SNPFP_NONSNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Outcome of SnpF pending arbitration; FlowQ txn issued when SnpF pending on Vn1",
"UMask": "0x2",
@@ -7876,8 +9650,10 @@
},
{
"BriefDescription": "Snoop Arbitration; FlowQ SnpF Won",
+ "Counter": "0,1,2",
"EventCode": "0x3D",
"EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP2_VN1.VN1_SNPFP_VN0SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Outcome of SnpF pending arbitration; FlowQ Vn1 SnpF issued when SnpF pending on Vn0",
"UMask": "0x8",
@@ -7885,8 +9661,10 @@
},
{
"BriefDescription": "Speculative ARB for AD - Credit Available; VN0 REQ Messages",
+ "Counter": "0,1,2",
"EventCode": "0x34",
"EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_CRD_AVAIL.VN0_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD speculative arb request with prior cycle credit check complete and credit avail",
"UMask": "0x1",
@@ -7894,8 +9672,10 @@
},
{
"BriefDescription": "Speculative ARB for AD - Credit Available; VN0 SNP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x34",
"EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_CRD_AVAIL.VN0_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD speculative arb request with prior cycle credit check complete and credit avail",
"UMask": "0x2",
@@ -7903,8 +9683,10 @@
},
{
"BriefDescription": "Speculative ARB for AD - Credit Available; VN0 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x34",
"EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_CRD_AVAIL.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD speculative arb request with prior cycle credit check complete and credit avail",
"UMask": "0x8",
@@ -7912,8 +9694,10 @@
},
{
"BriefDescription": "Speculative ARB for AD - Credit Available; VN1 REQ Messages",
+ "Counter": "0,1,2",
"EventCode": "0x34",
"EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_CRD_AVAIL.VN1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD speculative arb request with prior cycle credit check complete and credit avail",
"UMask": "0x10",
@@ -7921,8 +9705,10 @@
},
{
"BriefDescription": "Speculative ARB for AD - Credit Available; VN1 SNP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x34",
"EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_CRD_AVAIL.VN1_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD speculative arb request with prior cycle credit check complete and credit avail",
"UMask": "0x20",
@@ -7930,8 +9716,10 @@
},
{
"BriefDescription": "Speculative ARB for AD - Credit Available; VN1 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x34",
"EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_CRD_AVAIL.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD speculative arb request with prior cycle credit check complete and credit avail",
"UMask": "0x80",
@@ -7939,8 +9727,10 @@
},
{
"BriefDescription": "Speculative ARB for AD - New Message; VN0 REQ Messages",
+ "Counter": "0,1,2",
"EventCode": "0x33",
"EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NEW_MSG.VN0_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD speculative arb request due to new message arriving on a specific channel (MC/VN)",
"UMask": "0x1",
@@ -7948,8 +9738,10 @@
},
{
"BriefDescription": "Speculative ARB for AD - New Message; VN0 SNP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x33",
"EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NEW_MSG.VN0_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD speculative arb request due to new message arriving on a specific channel (MC/VN)",
"UMask": "0x2",
@@ -7957,8 +9749,10 @@
},
{
"BriefDescription": "Speculative ARB for AD - New Message; VN0 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x33",
"EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NEW_MSG.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD speculative arb request due to new message arriving on a specific channel (MC/VN)",
"UMask": "0x8",
@@ -7966,8 +9760,10 @@
},
{
"BriefDescription": "Speculative ARB for AD - New Message; VN1 REQ Messages",
+ "Counter": "0,1,2",
"EventCode": "0x33",
"EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NEW_MSG.VN1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD speculative arb request due to new message arriving on a specific channel (MC/VN)",
"UMask": "0x10",
@@ -7975,8 +9771,10 @@
},
{
"BriefDescription": "Speculative ARB for AD - New Message; VN1 SNP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x33",
"EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NEW_MSG.VN1_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD speculative arb request due to new message arriving on a specific channel (MC/VN)",
"UMask": "0x20",
@@ -7984,8 +9782,10 @@
},
{
"BriefDescription": "Speculative ARB for AD - New Message; VN1 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x33",
"EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NEW_MSG.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD speculative arb request due to new message arriving on a specific channel (MC/VN)",
"UMask": "0x80",
@@ -7993,8 +9793,10 @@
},
{
"BriefDescription": "Speculative ARB for AD - No Credit; VN0 REQ Messages",
+ "Counter": "0,1,2",
"EventCode": "0x32",
"EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN0_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
"UMask": "0x1",
@@ -8002,8 +9804,10 @@
},
{
"BriefDescription": "Speculative ARB for AD - No Credit; VN0 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x32",
"EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
"UMask": "0x4",
@@ -8011,8 +9815,10 @@
},
{
"BriefDescription": "Speculative ARB for AD - No Credit; VN0 SNP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x32",
"EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN0_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
"UMask": "0x2",
@@ -8020,8 +9826,10 @@
},
{
"BriefDescription": "Speculative ARB for AD - No Credit; VN0 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x32",
"EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
"UMask": "0x8",
@@ -8029,8 +9837,10 @@
},
{
"BriefDescription": "Speculative ARB for AD - No Credit; VN1 REQ Messages",
+ "Counter": "0,1,2",
"EventCode": "0x32",
"EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
"UMask": "0x10",
@@ -8038,8 +9848,10 @@
},
{
"BriefDescription": "Speculative ARB for AD - No Credit; VN1 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x32",
"EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
"UMask": "0x40",
@@ -8047,8 +9859,10 @@
},
{
"BriefDescription": "Speculative ARB for AD - No Credit; VN1 SNP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x32",
"EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN1_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
"UMask": "0x20",
@@ -8056,8 +9870,10 @@
},
{
"BriefDescription": "Speculative ARB for AD - No Credit; VN1 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x32",
"EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
"UMask": "0x80",
@@ -8065,22 +9881,28 @@
},
{
"BriefDescription": "AK Flow Q Inserts",
+ "Counter": "0,1,2",
"EventCode": "0x2F",
"EventName": "UNC_M3UPI_TxC_AK_FLQ_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M3UPI"
},
{
"BriefDescription": "AK Flow Q Occupancy",
+ "Counter": "0",
"EventCode": "0x1E",
"EventName": "UNC_M3UPI_TxC_AK_FLQ_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M3UPI"
},
{
"BriefDescription": "Failed ARB for BL; VN0 NCB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x35",
"EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL arb but no win; arb request asserted but not won",
"UMask": "0x4",
@@ -8088,8 +9910,10 @@
},
{
"BriefDescription": "Failed ARB for BL; VN0 NCS Messages",
+ "Counter": "0,1,2",
"EventCode": "0x35",
"EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL arb but no win; arb request asserted but not won",
"UMask": "0x8",
@@ -8097,8 +9921,10 @@
},
{
"BriefDescription": "Failed ARB for BL; VN0 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x35",
"EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL arb but no win; arb request asserted but not won",
"UMask": "0x1",
@@ -8106,8 +9932,10 @@
},
{
"BriefDescription": "Failed ARB for BL; VN0 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x35",
"EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL arb but no win; arb request asserted but not won",
"UMask": "0x2",
@@ -8115,8 +9943,10 @@
},
{
"BriefDescription": "Failed ARB for BL; VN1 NCS Messages",
+ "Counter": "0,1,2",
"EventCode": "0x35",
"EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL arb but no win; arb request asserted but not won",
"UMask": "0x40",
@@ -8124,8 +9954,10 @@
},
{
"BriefDescription": "Failed ARB for BL; VN1 NCB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x35",
"EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL arb but no win; arb request asserted but not won",
"UMask": "0x80",
@@ -8133,8 +9965,10 @@
},
{
"BriefDescription": "Failed ARB for BL; VN1 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x35",
"EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL arb but no win; arb request asserted but not won",
"UMask": "0x10",
@@ -8142,8 +9976,10 @@
},
{
"BriefDescription": "Failed ARB for BL; VN1 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x35",
"EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL arb but no win; arb request asserted but not won",
"UMask": "0x20",
@@ -8151,8 +9987,10 @@
},
{
"BriefDescription": "BL Flow Q Not Empty; VN0 REQ Messages",
+ "Counter": "0,1,2",
"EventCode": "0x28",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Egress queue is Not Empty",
"UMask": "0x1",
@@ -8160,8 +9998,10 @@
},
{
"BriefDescription": "BL Flow Q Not Empty; VN0 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x28",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Egress queue is Not Empty",
"UMask": "0x4",
@@ -8169,8 +10009,10 @@
},
{
"BriefDescription": "BL Flow Q Not Empty; VN0 SNP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x28",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Egress queue is Not Empty",
"UMask": "0x2",
@@ -8178,8 +10020,10 @@
},
{
"BriefDescription": "BL Flow Q Not Empty; VN0 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x28",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Egress queue is Not Empty",
"UMask": "0x8",
@@ -8187,8 +10031,10 @@
},
{
"BriefDescription": "BL Flow Q Not Empty; VN1 REQ Messages",
+ "Counter": "0,1,2",
"EventCode": "0x28",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Egress queue is Not Empty",
"UMask": "0x10",
@@ -8196,8 +10042,10 @@
},
{
"BriefDescription": "BL Flow Q Not Empty; VN1 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x28",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Egress queue is Not Empty",
"UMask": "0x40",
@@ -8205,8 +10053,10 @@
},
{
"BriefDescription": "BL Flow Q Not Empty; VN1 SNP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x28",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Egress queue is Not Empty",
"UMask": "0x20",
@@ -8214,8 +10064,10 @@
},
{
"BriefDescription": "BL Flow Q Not Empty; VN1 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x28",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the BL Egress queue is Not Empty",
"UMask": "0x80",
@@ -8223,8 +10075,10 @@
},
{
"BriefDescription": "BL Flow Q Inserts; VN0 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x2E",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x1",
@@ -8232,8 +10086,10 @@
},
{
"BriefDescription": "BL Flow Q Inserts; VN0 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x2E",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x2",
@@ -8241,8 +10097,10 @@
},
{
"BriefDescription": "BL Flow Q Inserts; VN0 NCS Messages",
+ "Counter": "0,1,2",
"EventCode": "0x2E",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x8",
@@ -8250,8 +10108,10 @@
},
{
"BriefDescription": "BL Flow Q Inserts; VN0 NCB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x2E",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x4",
@@ -8259,8 +10119,10 @@
},
{
"BriefDescription": "BL Flow Q Inserts; VN1 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x2E",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x10",
@@ -8268,8 +10130,10 @@
},
{
"BriefDescription": "BL Flow Q Inserts; VN1 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x2E",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x20",
@@ -8277,8 +10141,10 @@
},
{
"BriefDescription": "BL Flow Q Inserts; VN1_NCB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x2E",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x80",
@@ -8286,8 +10152,10 @@
},
{
"BriefDescription": "BL Flow Q Inserts; VN1_NCS Messages",
+ "Counter": "0,1,2",
"EventCode": "0x2E",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
"UMask": "0x40",
@@ -8295,72 +10163,90 @@
},
{
"BriefDescription": "BL Flow Q Occupancy; VN0 NCB Messages",
+ "Counter": "0",
"EventCode": "0x1D",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy; VN0 NCS Messages",
+ "Counter": "0",
"EventCode": "0x1D",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy; VN0 RSP Messages",
+ "Counter": "0",
"EventCode": "0x1D",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy; VN0 WB Messages",
+ "Counter": "0",
"EventCode": "0x1D",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy; VN1_NCS Messages",
+ "Counter": "0",
"EventCode": "0x1D",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy; VN1_NCB Messages",
+ "Counter": "0",
"EventCode": "0x1D",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy; VN1 RSP Messages",
+ "Counter": "0",
"EventCode": "0x1D",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M3UPI"
},
{
"BriefDescription": "BL Flow Q Occupancy; VN1 WB Messages",
+ "Counter": "0",
"EventCode": "0x1D",
"EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M3UPI"
},
{
"BriefDescription": "Speculative ARB for BL - New Message; VN0 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x38",
"EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NEW_MSG.VN0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL speculative arb request due to new message arriving on a specific channel (MC/VN)",
"UMask": "0x2",
@@ -8368,8 +10254,10 @@
},
{
"BriefDescription": "Speculative ARB for BL - New Message; VN0 NCS Messages",
+ "Counter": "0,1,2",
"EventCode": "0x38",
"EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NEW_MSG.VN0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL speculative arb request due to new message arriving on a specific channel (MC/VN)",
"UMask": "0x8",
@@ -8377,8 +10265,10 @@
},
{
"BriefDescription": "Speculative ARB for BL - New Message; VN0 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x38",
"EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NEW_MSG.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL speculative arb request due to new message arriving on a specific channel (MC/VN)",
"UMask": "0x1",
@@ -8386,8 +10276,10 @@
},
{
"BriefDescription": "Speculative ARB for BL - New Message; VN1 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x38",
"EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NEW_MSG.VN1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL speculative arb request due to new message arriving on a specific channel (MC/VN)",
"UMask": "0x20",
@@ -8395,8 +10287,10 @@
},
{
"BriefDescription": "Speculative ARB for BL - New Message; VN1 NCB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x38",
"EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NEW_MSG.VN1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL speculative arb request due to new message arriving on a specific channel (MC/VN)",
"UMask": "0x80",
@@ -8404,8 +10298,10 @@
},
{
"BriefDescription": "Speculative ARB for BL - New Message; VN1 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x38",
"EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NEW_MSG.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL speculative arb request due to new message arriving on a specific channel (MC/VN)",
"UMask": "0x10",
@@ -8413,8 +10309,10 @@
},
{
"BriefDescription": "Speculative ARB for AD Failed - No Credit; VN0 NCB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x37",
"EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
"UMask": "0x4",
@@ -8422,8 +10320,10 @@
},
{
"BriefDescription": "Speculative ARB for AD Failed - No Credit; VN0 NCS Messages",
+ "Counter": "0,1,2",
"EventCode": "0x37",
"EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
"UMask": "0x8",
@@ -8431,8 +10331,10 @@
},
{
"BriefDescription": "Speculative ARB for AD Failed - No Credit; VN0 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x37",
"EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
"UMask": "0x1",
@@ -8440,8 +10342,10 @@
},
{
"BriefDescription": "Speculative ARB for AD Failed - No Credit; VN0 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x37",
"EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
"UMask": "0x2",
@@ -8449,8 +10353,10 @@
},
{
"BriefDescription": "Speculative ARB for AD Failed - No Credit; VN1 NCS Messages",
+ "Counter": "0,1,2",
"EventCode": "0x37",
"EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
"UMask": "0x40",
@@ -8458,8 +10364,10 @@
},
{
"BriefDescription": "Speculative ARB for AD Failed - No Credit; VN1 NCB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x37",
"EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
"UMask": "0x80",
@@ -8467,8 +10375,10 @@
},
{
"BriefDescription": "Speculative ARB for AD Failed - No Credit; VN1 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x37",
"EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
"UMask": "0x10",
@@ -8476,8 +10386,10 @@
},
{
"BriefDescription": "Speculative ARB for AD Failed - No Credit; VN1 WB Messages",
+ "Counter": "0,1,2",
"EventCode": "0x37",
"EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
"UMask": "0x20",
@@ -8485,8 +10397,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used; AD - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x9D",
"EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -8494,8 +10408,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used; AD - Credit",
+ "Counter": "0,1,2",
"EventCode": "0x9D",
"EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -8503,8 +10419,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used; AK - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x9D",
"EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -8512,8 +10430,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used; BL - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x9D",
"EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -8521,8 +10441,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used; BL - Credit",
+ "Counter": "0,1,2",
"EventCode": "0x9D",
"EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -8530,8 +10452,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used; AD - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x9F",
"EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -8539,8 +10463,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used; AD - Credit",
+ "Counter": "0,1,2",
"EventCode": "0x9F",
"EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -8548,8 +10474,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used; AK - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x9F",
"EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -8557,8 +10485,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used; BL - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x9F",
"EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -8566,8 +10496,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used; BL - Credit",
+ "Counter": "0,1,2",
"EventCode": "0x9F",
"EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -8575,8 +10507,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used; IV - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x9F",
"EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x8",
@@ -8584,8 +10518,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AD - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x96",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -8593,8 +10529,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AD - Credit",
+ "Counter": "0,1,2",
"EventCode": "0x96",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -8602,8 +10540,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AK - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x96",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -8611,8 +10551,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; BL - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x96",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -8620,8 +10562,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; BL - Credit",
+ "Counter": "0,1,2",
"EventCode": "0x96",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -8629,8 +10573,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; IV - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x96",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -8638,8 +10584,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AD - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x97",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -8647,8 +10595,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AD - Credit",
+ "Counter": "0,1,2",
"EventCode": "0x97",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -8656,8 +10606,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AK - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x97",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -8665,8 +10617,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; BL - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x97",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -8674,8 +10628,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; BL - Credit",
+ "Counter": "0,1,2",
"EventCode": "0x97",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -8683,8 +10639,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; IV - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x97",
"EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -8692,8 +10650,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts; AD - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x95",
"EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -8701,8 +10661,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts; AD - Credit",
+ "Counter": "0,1,2",
"EventCode": "0x95",
"EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -8710,8 +10672,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts; AK - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x95",
"EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -8719,8 +10683,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts; BL - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x95",
"EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -8728,8 +10694,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts; BL - Credit",
+ "Counter": "0,1,2",
"EventCode": "0x95",
"EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -8737,8 +10705,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts; IV - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x95",
"EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -8746,8 +10716,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs; AD - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x99",
"EventName": "UNC_M3UPI_TxR_HORZ_NACK.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x1",
@@ -8755,8 +10727,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs; AD - Credit",
+ "Counter": "0,1,2",
"EventCode": "0x99",
"EventName": "UNC_M3UPI_TxR_HORZ_NACK.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x20",
@@ -8764,8 +10738,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs; AK - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x99",
"EventName": "UNC_M3UPI_TxR_HORZ_NACK.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x2",
@@ -8773,8 +10749,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs; BL - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x99",
"EventName": "UNC_M3UPI_TxR_HORZ_NACK.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x4",
@@ -8782,8 +10760,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs; BL - Credit",
+ "Counter": "0,1,2",
"EventCode": "0x99",
"EventName": "UNC_M3UPI_TxR_HORZ_NACK.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x40",
@@ -8791,8 +10771,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs; IV - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x99",
"EventName": "UNC_M3UPI_TxR_HORZ_NACK.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x8",
@@ -8800,8 +10782,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy; AD - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x94",
"EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -8809,8 +10793,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy; AD - Credit",
+ "Counter": "0,1,2",
"EventCode": "0x94",
"EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -8818,8 +10804,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy; AK - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x94",
"EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -8827,8 +10815,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy; BL - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x94",
"EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -8836,8 +10826,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy; BL - Credit",
+ "Counter": "0,1,2",
"EventCode": "0x94",
"EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -8845,8 +10837,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy; IV - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x94",
"EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -8854,8 +10848,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation; AD - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x9B",
"EventName": "UNC_M3UPI_TxR_HORZ_STARVED.AD_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x1",
@@ -8863,8 +10859,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation; AK - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x9B",
"EventName": "UNC_M3UPI_TxR_HORZ_STARVED.AK_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x2",
@@ -8872,8 +10870,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation; BL - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x9B",
"EventName": "UNC_M3UPI_TxR_HORZ_STARVED.BL_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x4",
@@ -8881,8 +10881,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation; IV - Bounce",
+ "Counter": "0,1,2",
"EventCode": "0x9B",
"EventName": "UNC_M3UPI_TxR_HORZ_STARVED.IV_BNC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x8",
@@ -8890,8 +10892,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AD - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x9C",
"EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -8899,8 +10903,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AD - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x9C",
"EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -8908,8 +10914,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AK - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x9C",
"EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -8917,8 +10925,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AK - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x9C",
"EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x20",
@@ -8926,8 +10936,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; BL - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x9C",
"EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -8935,8 +10947,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; BL - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x9C",
"EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -8944,8 +10958,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AD - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x9E",
"EventName": "UNC_M3UPI_TxR_VERT_BYPASS.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -8953,8 +10969,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AD - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x9E",
"EventName": "UNC_M3UPI_TxR_VERT_BYPASS.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -8962,8 +10980,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AK - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x9E",
"EventName": "UNC_M3UPI_TxR_VERT_BYPASS.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -8971,8 +10991,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; AK - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x9E",
"EventName": "UNC_M3UPI_TxR_VERT_BYPASS.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x20",
@@ -8980,8 +11002,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; BL - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x9E",
"EventName": "UNC_M3UPI_TxR_VERT_BYPASS.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -8989,8 +11013,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; BL - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x9E",
"EventName": "UNC_M3UPI_TxR_VERT_BYPASS.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -8998,8 +11024,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used; IV",
+ "Counter": "0,1,2",
"EventCode": "0x9E",
"EventName": "UNC_M3UPI_TxR_VERT_BYPASS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x8",
@@ -9007,8 +11035,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AD - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x92",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -9016,8 +11046,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AD - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x92",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -9025,8 +11057,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AK - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x92",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -9034,8 +11068,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AK - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x92",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -9043,8 +11079,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; BL - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x92",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -9052,8 +11090,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; BL - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x92",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -9061,8 +11101,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; IV",
+ "Counter": "0,1,2",
"EventCode": "0x92",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -9070,8 +11112,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AD - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x93",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -9079,8 +11123,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AD - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x93",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -9088,8 +11134,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AK - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x93",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -9097,8 +11145,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AK - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x93",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -9106,8 +11156,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; BL - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x93",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -9115,8 +11167,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; BL - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x93",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -9124,8 +11178,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; IV",
+ "Counter": "0,1,2",
"EventCode": "0x93",
"EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -9133,8 +11189,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; AD - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x91",
"EventName": "UNC_M3UPI_TxR_VERT_INSERTS.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -9142,8 +11200,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; AD - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x91",
"EventName": "UNC_M3UPI_TxR_VERT_INSERTS.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -9151,8 +11211,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; AK - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x91",
"EventName": "UNC_M3UPI_TxR_VERT_INSERTS.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -9160,8 +11222,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; AK - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x91",
"EventName": "UNC_M3UPI_TxR_VERT_INSERTS.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -9169,8 +11233,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; BL - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x91",
"EventName": "UNC_M3UPI_TxR_VERT_INSERTS.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -9178,8 +11244,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; BL - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x91",
"EventName": "UNC_M3UPI_TxR_VERT_INSERTS.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -9187,8 +11255,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations; IV",
+ "Counter": "0,1,2",
"EventCode": "0x91",
"EventName": "UNC_M3UPI_TxR_VERT_INSERTS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -9196,8 +11266,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; AD - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x98",
"EventName": "UNC_M3UPI_TxR_VERT_NACK.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x1",
@@ -9205,8 +11277,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; AD - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x98",
"EventName": "UNC_M3UPI_TxR_VERT_NACK.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x10",
@@ -9214,8 +11288,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; AK - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x98",
"EventName": "UNC_M3UPI_TxR_VERT_NACK.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x2",
@@ -9223,8 +11299,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; AK - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x98",
"EventName": "UNC_M3UPI_TxR_VERT_NACK.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x20",
@@ -9232,8 +11310,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; BL - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x98",
"EventName": "UNC_M3UPI_TxR_VERT_NACK.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x4",
@@ -9241,8 +11321,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; BL - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x98",
"EventName": "UNC_M3UPI_TxR_VERT_NACK.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x40",
@@ -9250,8 +11332,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs; IV",
+ "Counter": "0,1,2",
"EventCode": "0x98",
"EventName": "UNC_M3UPI_TxR_VERT_NACK.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x8",
@@ -9259,8 +11343,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; AD - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x90",
"EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -9268,8 +11354,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; AD - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x90",
"EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -9277,8 +11365,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; AK - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x90",
"EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -9286,8 +11376,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; AK - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x90",
"EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -9295,8 +11387,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; BL - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x90",
"EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -9304,8 +11398,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; BL - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x90",
"EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -9313,8 +11409,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy; IV",
+ "Counter": "0,1,2",
"EventCode": "0x90",
"EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -9322,8 +11420,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; AD - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x9A",
"EventName": "UNC_M3UPI_TxR_VERT_STARVED.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x1",
@@ -9331,8 +11431,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; AD - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x9A",
"EventName": "UNC_M3UPI_TxR_VERT_STARVED.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x10",
@@ -9340,8 +11442,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; AK - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x9A",
"EventName": "UNC_M3UPI_TxR_VERT_STARVED.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x2",
@@ -9349,8 +11453,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; AK - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x9A",
"EventName": "UNC_M3UPI_TxR_VERT_STARVED.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x20",
@@ -9358,8 +11464,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; BL - Agent 0",
+ "Counter": "0,1,2",
"EventCode": "0x9A",
"EventName": "UNC_M3UPI_TxR_VERT_STARVED.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x4",
@@ -9367,8 +11475,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; BL - Agent 1",
+ "Counter": "0,1,2",
"EventCode": "0x9A",
"EventName": "UNC_M3UPI_TxR_VERT_STARVED.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x40",
@@ -9376,8 +11486,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation; IV",
+ "Counter": "0,1,2",
"EventCode": "0x9A",
"EventName": "UNC_M3UPI_TxR_VERT_STARVED.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x8",
@@ -9385,8 +11497,10 @@
},
{
"BriefDescription": "UPI0 AD Credits Empty; VN0 REQ Messages",
+ "Counter": "0,1,2",
"EventCode": "0x20",
"EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN0_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No credits available to send to UPIs on the AD Ring",
"UMask": "0x2",
@@ -9394,8 +11508,10 @@
},
{
"BriefDescription": "UPI0 AD Credits Empty; VN0 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x20",
"EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No credits available to send to UPIs on the AD Ring",
"UMask": "0x8",
@@ -9403,8 +11519,10 @@
},
{
"BriefDescription": "UPI0 AD Credits Empty; VN0 SNP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x20",
"EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN0_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No credits available to send to UPIs on the AD Ring",
"UMask": "0x4",
@@ -9412,8 +11530,10 @@
},
{
"BriefDescription": "UPI0 AD Credits Empty; VN1 REQ Messages",
+ "Counter": "0,1,2",
"EventCode": "0x20",
"EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No credits available to send to UPIs on the AD Ring",
"UMask": "0x10",
@@ -9421,8 +11541,10 @@
},
{
"BriefDescription": "UPI0 AD Credits Empty; VN1 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x20",
"EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No credits available to send to UPIs on the AD Ring",
"UMask": "0x40",
@@ -9430,8 +11552,10 @@
},
{
"BriefDescription": "UPI0 AD Credits Empty; VN1 SNP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x20",
"EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN1_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No credits available to send to UPIs on the AD Ring",
"UMask": "0x20",
@@ -9439,8 +11563,10 @@
},
{
"BriefDescription": "UPI0 AD Credits Empty; VNA",
+ "Counter": "0,1,2",
"EventCode": "0x20",
"EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VNA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No credits available to send to UPIs on the AD Ring",
"UMask": "0x1",
@@ -9448,8 +11574,10 @@
},
{
"BriefDescription": "UPI0 BL Credits Empty; VN0 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x21",
"EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN0_NCS_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
"UMask": "0x4",
@@ -9457,8 +11585,10 @@
},
{
"BriefDescription": "UPI0 BL Credits Empty; VN0 REQ Messages",
+ "Counter": "0,1,2",
"EventCode": "0x21",
"EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN0_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
"UMask": "0x2",
@@ -9466,8 +11596,10 @@
},
{
"BriefDescription": "UPI0 BL Credits Empty; VN0 SNP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x21",
"EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN0_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
"UMask": "0x8",
@@ -9475,8 +11607,10 @@
},
{
"BriefDescription": "UPI0 BL Credits Empty; VN1 RSP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x21",
"EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN1_NCS_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
"UMask": "0x20",
@@ -9484,8 +11618,10 @@
},
{
"BriefDescription": "UPI0 BL Credits Empty; VN1 REQ Messages",
+ "Counter": "0,1,2",
"EventCode": "0x21",
"EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN1_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
"UMask": "0x10",
@@ -9493,8 +11629,10 @@
},
{
"BriefDescription": "UPI0 BL Credits Empty; VN1 SNP Messages",
+ "Counter": "0,1,2",
"EventCode": "0x21",
"EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN1_WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
"UMask": "0x40",
@@ -9502,8 +11640,10 @@
},
{
"BriefDescription": "UPI0 BL Credits Empty; VNA",
+ "Counter": "0,1,2",
"EventCode": "0x21",
"EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VNA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
"UMask": "0x1",
@@ -9511,6 +11651,7 @@
},
{
"BriefDescription": "Prefetches generated by the flow control queue of the M3UPI unit.",
+ "Counter": "0,1,2",
"EventCode": "0x29",
"EventName": "UNC_M3UPI_UPI_PREFETCH_SPAWN",
"PerPkg": "1",
@@ -9519,8 +11660,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use; Down and Even",
+ "Counter": "0,1,2",
"EventCode": "0xA6",
"EventName": "UNC_M3UPI_VERT_RING_AD_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -9528,8 +11671,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use; Down and Odd",
+ "Counter": "0,1,2",
"EventCode": "0xA6",
"EventName": "UNC_M3UPI_VERT_RING_AD_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -9537,8 +11682,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use; Up and Even",
+ "Counter": "0,1,2",
"EventCode": "0xA6",
"EventName": "UNC_M3UPI_VERT_RING_AD_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -9546,8 +11693,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use; Up and Odd",
+ "Counter": "0,1,2",
"EventCode": "0xA6",
"EventName": "UNC_M3UPI_VERT_RING_AD_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -9555,8 +11704,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use; Down and Even",
+ "Counter": "0,1,2",
"EventCode": "0xA8",
"EventName": "UNC_M3UPI_VERT_RING_AK_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -9564,8 +11715,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use; Down and Odd",
+ "Counter": "0,1,2",
"EventCode": "0xA8",
"EventName": "UNC_M3UPI_VERT_RING_AK_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -9573,8 +11726,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use; Up and Even",
+ "Counter": "0,1,2",
"EventCode": "0xA8",
"EventName": "UNC_M3UPI_VERT_RING_AK_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -9582,8 +11737,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use; Up and Odd",
+ "Counter": "0,1,2",
"EventCode": "0xA8",
"EventName": "UNC_M3UPI_VERT_RING_AK_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -9591,8 +11748,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use; Down and Even",
+ "Counter": "0,1,2",
"EventCode": "0xAA",
"EventName": "UNC_M3UPI_VERT_RING_BL_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -9600,8 +11759,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use; Down and Odd",
+ "Counter": "0,1,2",
"EventCode": "0xAA",
"EventName": "UNC_M3UPI_VERT_RING_BL_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -9609,8 +11770,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use; Up and Even",
+ "Counter": "0,1,2",
"EventCode": "0xAA",
"EventName": "UNC_M3UPI_VERT_RING_BL_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -9618,8 +11781,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use; Up and Odd",
+ "Counter": "0,1,2",
"EventCode": "0xAA",
"EventName": "UNC_M3UPI_VERT_RING_BL_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -9627,8 +11792,10 @@
},
{
"BriefDescription": "Vertical IV Ring in Use; Down",
+ "Counter": "0,1,2",
"EventCode": "0xAC",
"EventName": "UNC_M3UPI_VERT_RING_IV_IN_USE.DN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x4",
@@ -9636,8 +11803,10 @@
},
{
"BriefDescription": "Vertical IV Ring in Use; Up",
+ "Counter": "0,1,2",
"EventCode": "0xAC",
"EventName": "UNC_M3UPI_VERT_RING_IV_IN_USE.UP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x1",
@@ -9645,8 +11814,10 @@
},
{
"BriefDescription": "VN0 Credit Used; WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x5C",
"EventName": "UNC_M3UPI_VN0_CREDITS_USED.NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -9654,8 +11825,10 @@
},
{
"BriefDescription": "VN0 Credit Used; NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x5C",
"EventName": "UNC_M3UPI_VN0_CREDITS_USED.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -9663,8 +11836,10 @@
},
{
"BriefDescription": "VN0 Credit Used; REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x5C",
"EventName": "UNC_M3UPI_VN0_CREDITS_USED.REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -9672,8 +11847,10 @@
},
{
"BriefDescription": "VN0 Credit Used; RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x5C",
"EventName": "UNC_M3UPI_VN0_CREDITS_USED.RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -9681,8 +11858,10 @@
},
{
"BriefDescription": "VN0 Credit Used; SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x5C",
"EventName": "UNC_M3UPI_VN0_CREDITS_USED.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -9690,8 +11869,10 @@
},
{
"BriefDescription": "VN0 Credit Used; RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x5C",
"EventName": "UNC_M3UPI_VN0_CREDITS_USED.WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -9699,8 +11880,10 @@
},
{
"BriefDescription": "VN0 No Credits; WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x5E",
"EventName": "UNC_M3UPI_VN0_NO_CREDITS.NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of Cycles there were no VN0 Credits; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -9708,8 +11891,10 @@
},
{
"BriefDescription": "VN0 No Credits; NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x5E",
"EventName": "UNC_M3UPI_VN0_NO_CREDITS.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of Cycles there were no VN0 Credits; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -9717,8 +11902,10 @@
},
{
"BriefDescription": "VN0 No Credits; REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x5E",
"EventName": "UNC_M3UPI_VN0_NO_CREDITS.REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of Cycles there were no VN0 Credits; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -9726,8 +11913,10 @@
},
{
"BriefDescription": "VN0 No Credits; RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x5E",
"EventName": "UNC_M3UPI_VN0_NO_CREDITS.RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of Cycles there were no VN0 Credits; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -9735,8 +11924,10 @@
},
{
"BriefDescription": "VN0 No Credits; SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x5E",
"EventName": "UNC_M3UPI_VN0_NO_CREDITS.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of Cycles there were no VN0 Credits; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -9744,8 +11935,10 @@
},
{
"BriefDescription": "VN0 No Credits; RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x5E",
"EventName": "UNC_M3UPI_VN0_NO_CREDITS.WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of Cycles there were no VN0 Credits; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -9753,8 +11946,10 @@
},
{
"BriefDescription": "VN1 Credit Used; WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x5D",
"EventName": "UNC_M3UPI_VN1_CREDITS_USED.NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -9762,8 +11957,10 @@
},
{
"BriefDescription": "VN1 Credit Used; NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x5D",
"EventName": "UNC_M3UPI_VN1_CREDITS_USED.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -9771,8 +11968,10 @@
},
{
"BriefDescription": "VN1 Credit Used; REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x5D",
"EventName": "UNC_M3UPI_VN1_CREDITS_USED.REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -9780,8 +11979,10 @@
},
{
"BriefDescription": "VN1 Credit Used; RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x5D",
"EventName": "UNC_M3UPI_VN1_CREDITS_USED.RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -9789,8 +11990,10 @@
},
{
"BriefDescription": "VN1 Credit Used; SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x5D",
"EventName": "UNC_M3UPI_VN1_CREDITS_USED.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -9798,8 +12001,10 @@
},
{
"BriefDescription": "VN1 Credit Used; RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x5D",
"EventName": "UNC_M3UPI_VN1_CREDITS_USED.WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -9807,8 +12012,10 @@
},
{
"BriefDescription": "VN1 No Credits; WB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x5F",
"EventName": "UNC_M3UPI_VN1_NO_CREDITS.NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of Cycles there were no VN1 Credits; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
"UMask": "0x10",
@@ -9816,8 +12023,10 @@
},
{
"BriefDescription": "VN1 No Credits; NCB on BL",
+ "Counter": "0,1,2",
"EventCode": "0x5F",
"EventName": "UNC_M3UPI_VN1_NO_CREDITS.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of Cycles there were no VN1 Credits; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x20",
@@ -9825,8 +12034,10 @@
},
{
"BriefDescription": "VN1 No Credits; REQ on AD",
+ "Counter": "0,1,2",
"EventCode": "0x5F",
"EventName": "UNC_M3UPI_VN1_NO_CREDITS.REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of Cycles there were no VN1 Credits; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
@@ -9834,8 +12045,10 @@
},
{
"BriefDescription": "VN1 No Credits; RSP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x5F",
"EventName": "UNC_M3UPI_VN1_NO_CREDITS.RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of Cycles there were no VN1 Credits; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
@@ -9843,8 +12056,10 @@
},
{
"BriefDescription": "VN1 No Credits; SNP on AD",
+ "Counter": "0,1,2",
"EventCode": "0x5F",
"EventName": "UNC_M3UPI_VN1_NO_CREDITS.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of Cycles there were no VN1 Credits; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
"UMask": "0x2",
@@ -9852,8 +12067,10 @@
},
{
"BriefDescription": "VN1 No Credits; RSP on BL",
+ "Counter": "0,1,2",
"EventCode": "0x5F",
"EventName": "UNC_M3UPI_VN1_NO_CREDITS.WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of Cycles there were no VN1 Credits; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x8",
@@ -9861,15 +12078,18 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_TxC_BL.DRS_UPI",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x40",
"EventName": "UNC_NoUnit_TxC_BL.DRS_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Clocks of the Intel(R) Ultra Path Interconnect (UPI)",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_UPI_CLOCKTICKS",
"PerPkg": "1",
@@ -9878,6 +12098,7 @@
},
{
"BriefDescription": "Data Response packets that go direct to core",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2C",
"PerPkg": "1",
@@ -9887,6 +12108,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_DIRECT_ATTEMPTS.D2U",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x12",
"EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2K",
@@ -9896,6 +12118,7 @@
},
{
"BriefDescription": "Data Response packets that go direct to Intel(R) UPI",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2U",
"PerPkg": "1",
@@ -9905,70 +12128,87 @@
},
{
"BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ1",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ2",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ1",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ2",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ3",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.BL_VNA_EQ0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.BL_VNA_EQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "Cycles Intel(R) UPI is in L1 power mode (shutdown)",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_UPI_L1_POWER_CYCLES",
"PerPkg": "1",
@@ -9977,164 +12217,205 @@
},
{
"BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.BGF_CRD",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_UPI_M3_BYP_BLOCKED.BGF_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.GV_BLOCK",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_UPI_M3_BYP_BLOCKED.GV_BLOCK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_CRD_RETURN_BLOCKED",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_UPI_M3_CRD_RETURN_BLOCKED",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.BGF_CRD",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_UPI_M3_RXQ_BLOCKED.BGF_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_BTW_2_THRESH",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_BTW_2_THRESH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_BTW_0_THRESH",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_BTW_0_THRESH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.GV_BLOCK",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_UPI_M3_RXQ_BLOCKED.GV_BLOCK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "UPI"
},
{
"BriefDescription": "Cycles where phy is not in L0, L0c, L0p, L1",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_UPI_PHY_INIT_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UPI"
},
{
"BriefDescription": "L1 Req Nack",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_UPI_POWER_L1_NACK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times a link sends/receives a LinkReqNAck. When the UPI links would like to change power state, the Tx side initiates a request to the Rx side requesting to change states. This requests can either be accepted or denied. If the Rx side replies with an Ack, the power mode will change. If it replies with NAck, no change will take place. This can be filtered based on Rx and Tx. An Rx LinkReqNAck refers to receiving an NAck (meaning this agent's Tx originally requested the power change). A Tx LinkReqNAck refers to sending this command (meaning the peer agent's Tx originally requested the power change and this agent accepted it).",
"Unit": "UPI"
},
{
"BriefDescription": "L1 Req (same as L1 Ack).",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_UPI_POWER_L1_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times a link sends/receives a LinkReqAck. When the UPI links would like to change power state, the Tx side initiates a request to the Rx side requesting to change states. This requests can either be accepted or denied. If the Rx side replies with an Ack, the power mode will change. If it replies with NAck, no change will take place. This can be filtered based on Rx and Tx. An Rx LinkReqAck refers to receiving an Ack (meaning this agent's Tx originally requested the power change). A Tx LinkReqAck refers to sending this command (meaning the peer agent's Tx originally requested the power change and this agent accepted it).",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.ACK",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.ACK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.VN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.VN1",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.VN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.VNA",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.VNA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UPI"
},
{
"BriefDescription": "Cycles the Rx of the Intel(R) UPI is in L0p power mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_UPI_RxL0P_POWER_CYCLES",
"PerPkg": "1",
@@ -10143,16 +12424,20 @@
},
{
"BriefDescription": "Cycles in L0. Receive side.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_UPI_RxL0_POWER_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of UPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Receive path of a UPI Port; Non-Coherent Bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match Message Class - NCB",
"UMask": "0xe",
@@ -10160,8 +12445,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port; Non-Coherent Bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match Message Class - NCB",
"UMask": "0x10e",
@@ -10169,8 +12456,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port; Non-Coherent Standard",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match Message Class - NCS",
"UMask": "0xf",
@@ -10178,8 +12467,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port; Non-Coherent Standard",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match Message Class - NCS",
"UMask": "0x10f",
@@ -10187,8 +12478,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port; Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "REQ Message Class",
"UMask": "0x8",
@@ -10196,8 +12489,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port; Request Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.REQ_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match REQ Opcodes - Specified in Umask[7:4]",
"UMask": "0x108",
@@ -10205,24 +12500,30 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port; Response - Conflict",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSPCNFLT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1aa",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Receive path of a UPI Port; Response - Invalid",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x12a",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Receive path of a UPI Port; Response - Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_DATA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match Message Class -WB",
"UMask": "0xc",
@@ -10230,8 +12531,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port; Response - Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_DATA_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match Message Class -WB",
"UMask": "0x10c",
@@ -10239,8 +12542,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port; Response - No Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_NODATA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match Message Class - RSP",
"UMask": "0xa",
@@ -10248,8 +12553,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port; Response - No Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_NODATA_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match Message Class - RSP",
"UMask": "0x10a",
@@ -10257,8 +12564,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port; Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "SNP Message Class",
"UMask": "0x9",
@@ -10266,8 +12575,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port; Snoop Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.SNP_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match SNP Opcodes - Specified in Umask[7:4]",
"UMask": "0x109",
@@ -10275,8 +12586,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port; Writeback",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match Message Class -WB",
"UMask": "0xd",
@@ -10284,8 +12597,10 @@
},
{
"BriefDescription": "Matches on Receive path of a UPI Port; Writeback",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.WB_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match Message Class -WB",
"UMask": "0x10d",
@@ -10293,6 +12608,7 @@
},
{
"BriefDescription": "FLITs received which bypassed the Slot0 Receive Buffer",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_UPI_RxL_BYPASSED.SLOT0",
"PerPkg": "1",
@@ -10302,6 +12618,7 @@
},
{
"BriefDescription": "FLITs received which bypassed the Slot0 Receive Buffer",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_UPI_RxL_BYPASSED.SLOT1",
"PerPkg": "1",
@@ -10311,6 +12628,7 @@
},
{
"BriefDescription": "FLITs received which bypassed the Slot0 Receive Buffer",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_UPI_RxL_BYPASSED.SLOT2",
"PerPkg": "1",
@@ -10320,46 +12638,57 @@
},
{
"BriefDescription": "CRC Errors Detected",
+ "Counter": "0,1,2,3",
"EventCode": "0xB",
"EventName": "UNC_UPI_RxL_CRC_ERRORS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of CRC errors detected in the UPI Agent. Each UPI flit incorporates 8 bits of CRC for error detection. This counts the number of flits where the CRC was able to detect an error. After an error has been detected, the UPI agent will send a request to the transmitting socket to resend the flit (as well as any flits that came after it).",
"Unit": "UPI"
},
{
"BriefDescription": "LLR Requests Sent",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_UPI_RxL_CRC_LLR_REQ_TRANSMIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of LLR Requests were transmitted. This should generally be <= the number of CRC errors detected. If multiple errors are detected before the Rx side receives a LLC_REQ_ACK from the Tx side, there is no need to send more LLR_REQ_NACKs.",
"Unit": "UPI"
},
{
"BriefDescription": "VN0 Credit Consumed",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_UPI_RxL_CREDITS_CONSUMED_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
"Unit": "UPI"
},
{
"BriefDescription": "VN1 Credit Consumed",
+ "Counter": "0,1,2,3",
"EventCode": "0x3A",
"EventName": "UNC_UPI_RxL_CREDITS_CONSUMED_VN1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
"Unit": "UPI"
},
{
"BriefDescription": "VNA Credit Consumed",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_UPI_RxL_CREDITS_CONSUMED_VNA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times that an RxQ VNA credit was consumed (i.e. message uses a VNA credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
"Unit": "UPI"
},
{
"BriefDescription": "Valid data FLITs received from any slot",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_UPI_RxL_FLITS.ALL_DATA",
"PerPkg": "1",
@@ -10369,6 +12698,7 @@
},
{
"BriefDescription": "Null FLITs received from any slot",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_UPI_RxL_FLITS.ALL_NULL",
"PerPkg": "1",
@@ -10378,8 +12708,10 @@
},
{
"BriefDescription": "Valid Flits Received; Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_UPI_RxL_FLITS.DATA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Count Data Flits (which consume all slots), but how much to count is based on Slot0-2 mask, so count can be 0-3 depending on which slots are enabled for counting..",
"UMask": "0x8",
@@ -10387,8 +12719,10 @@
},
{
"BriefDescription": "Valid Flits Received; Idle",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_UPI_RxL_FLITS.IDLE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).",
"UMask": "0x47",
@@ -10396,8 +12730,10 @@
},
{
"BriefDescription": "Valid Flits Received; LLCRD Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_UPI_RxL_FLITS.LLCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Enables counting of LLCRD (with non-zero payload). This only applies to slot 2 since LLCRD is only allowed in slot 2",
"UMask": "0x10",
@@ -10405,8 +12741,10 @@
},
{
"BriefDescription": "Valid Flits Received; LLCTRL",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_UPI_RxL_FLITS.LLCTRL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Equivalent to an idle packet. Enables counting of slot 0 LLCTRL messages.",
"UMask": "0x40",
@@ -10414,6 +12752,7 @@
},
{
"BriefDescription": "Protocol header and credit FLITs received from any slot",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_UPI_RxL_FLITS.NON_DATA",
"PerPkg": "1",
@@ -10423,6 +12762,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_FLITS.ALL_NULL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x3",
"EventName": "UNC_UPI_RxL_FLITS.NULL",
@@ -10432,8 +12772,10 @@
},
{
"BriefDescription": "Valid Flits Received; Protocol Header",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_UPI_RxL_FLITS.PROTHDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Enables count of protocol headers in slot 0,1,2 (depending on slot uMask bits)",
"UMask": "0x80",
@@ -10441,17 +12783,21 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_FLITS.PROTHDR",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x3",
"EventName": "UNC_UPI_RxL_FLITS.PROT_HDR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "UPI"
},
{
"BriefDescription": "Valid Flits Received; Slot 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_UPI_RxL_FLITS.SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Count Slot 0 - Other mask bits determine types of headers to count.",
"UMask": "0x1",
@@ -10459,8 +12805,10 @@
},
{
"BriefDescription": "Valid Flits Received; Slot 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_UPI_RxL_FLITS.SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Count Slot 1 - Other mask bits determine types of headers to count.",
"UMask": "0x2",
@@ -10468,8 +12816,10 @@
},
{
"BriefDescription": "Valid Flits Received; Slot 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_UPI_RxL_FLITS.SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Count Slot 2 - Other mask bits determine types of headers to count.",
"UMask": "0x4",
@@ -10477,62 +12827,76 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_BASIC_HDR_MATCH.NCB",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_HDR_MATCH.NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "UPI"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_BASIC_HDR_MATCH.NCS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_HDR_MATCH.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd",
"Unit": "UPI"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_BASIC_HDR_MATCH.REQ",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_HDR_MATCH.REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_DATA",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_HDR_MATCH.RSP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "UPI"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_BASIC_HDR_MATCH.SNP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_HDR_MATCH.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x9",
"Unit": "UPI"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_BASIC_HDR_MATCH.WB",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x5",
"EventName": "UNC_UPI_RxL_HDR_MATCH.WB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xb",
"Unit": "UPI"
},
{
"BriefDescription": "RxQ Flit Buffer Allocations; Slot 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_UPI_RxL_INSERTS.SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
"UMask": "0x1",
@@ -10540,8 +12904,10 @@
},
{
"BriefDescription": "RxQ Flit Buffer Allocations; Slot 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_UPI_RxL_INSERTS.SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
"UMask": "0x2",
@@ -10549,8 +12915,10 @@
},
{
"BriefDescription": "RxQ Flit Buffer Allocations; Slot 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_UPI_RxL_INSERTS.SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
"UMask": "0x4",
@@ -10558,8 +12926,10 @@
},
{
"BriefDescription": "RxQ Occupancy - All Packets; Slot 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of elements in the UPI RxQ in each cycle. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.",
"UMask": "0x1",
@@ -10567,8 +12937,10 @@
},
{
"BriefDescription": "RxQ Occupancy - All Packets; Slot 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of elements in the UPI RxQ in each cycle. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.",
"UMask": "0x2",
@@ -10576,8 +12948,10 @@
},
{
"BriefDescription": "RxQ Occupancy - All Packets; Slot 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of elements in the UPI RxQ in each cycle. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.",
"UMask": "0x4",
@@ -10585,118 +12959,147 @@
},
{
"BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ1",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ2",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ0",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ2",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ0",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ1",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.CFG_CTL",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.CFG_CTL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.DFX",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.DFX",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RETRY",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RETRY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_BYPASS",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_BYPASS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_CRED",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_CRED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.SPARE",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.SPARE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.TXQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.TXQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "UPI"
},
{
"BriefDescription": "Cycles in which the Tx of the Intel(R) Ultra Path Interconnect (UPI) is in L0p power mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_UPI_TxL0P_POWER_CYCLES",
"PerPkg": "1",
@@ -10705,30 +13108,38 @@
},
{
"BriefDescription": "UNC_UPI_TxL0P_POWER_CYCLES_LL_ENTER",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_UPI_TxL0P_POWER_CYCLES_LL_ENTER",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_TxL0P_POWER_CYCLES_M3_EXIT",
+ "Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_UPI_TxL0P_POWER_CYCLES_M3_EXIT",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UPI"
},
{
"BriefDescription": "Cycles in L0. Transmit side.",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_UPI_TxL0_POWER_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of UPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port; Non-Coherent Bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match Message Class - NCB",
"UMask": "0xe",
@@ -10736,8 +13147,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port; Non-Coherent Bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match Message Class - NCB",
"UMask": "0x10e",
@@ -10745,8 +13158,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port; Non-Coherent Standard",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match Message Class - NCS",
"UMask": "0xf",
@@ -10754,8 +13169,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port; Non-Coherent Standard",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match Message Class - NCS",
"UMask": "0x10f",
@@ -10763,8 +13180,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port; Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "REQ Message Class",
"UMask": "0x8",
@@ -10772,8 +13191,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port; Request Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.REQ_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match REQ Opcodes - Specified in Umask[7:4]",
"UMask": "0x108",
@@ -10781,24 +13202,30 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port; Response - Conflict",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSPCNFLT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1aa",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port; Response - Invalid",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x12a",
"Unit": "UPI"
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port; Response - Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_DATA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match Message Class -WB",
"UMask": "0xc",
@@ -10806,8 +13233,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port; Response - Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_DATA_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match Message Class -WB",
"UMask": "0x10c",
@@ -10815,8 +13244,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port; Response - No Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_NODATA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match Message Class - RSP",
"UMask": "0xa",
@@ -10824,8 +13255,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port; Response - No Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_NODATA_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match Message Class - RSP",
"UMask": "0x10a",
@@ -10833,8 +13266,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port; Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "SNP Message Class",
"UMask": "0x9",
@@ -10842,8 +13277,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port; Snoop Opcode",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.SNP_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match SNP Opcodes - Specified in Umask[7:4]",
"UMask": "0x109",
@@ -10851,8 +13288,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port; Writeback",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.WB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match Message Class -WB",
"UMask": "0xd",
@@ -10860,8 +13299,10 @@
},
{
"BriefDescription": "Matches on Transmit path of a UPI Port; Writeback",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.WB_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Match Message Class -WB",
"UMask": "0x10d",
@@ -10869,6 +13310,7 @@
},
{
"BriefDescription": "FLITs that bypassed the TxL Buffer",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_UPI_TxL_BYPASSED",
"PerPkg": "1",
@@ -10877,6 +13319,7 @@
},
{
"BriefDescription": "Valid data FLITs transmitted via any slot",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_UPI_TxL_FLITS.ALL_DATA",
"PerPkg": "1",
@@ -10886,6 +13329,7 @@
},
{
"BriefDescription": "Null FLITs transmitted from any slot",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_UPI_TxL_FLITS.ALL_NULL",
"PerPkg": "1",
@@ -10895,6 +13339,7 @@
},
{
"BriefDescription": "Valid Flits Sent; Data",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_UPI_TxL_FLITS.DATA",
"PerPkg": "1",
@@ -10904,6 +13349,7 @@
},
{
"BriefDescription": "Idle FLITs transmitted",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_UPI_TxL_FLITS.IDLE",
"PerPkg": "1",
@@ -10913,8 +13359,10 @@
},
{
"BriefDescription": "Valid Flits Sent; LLCRD Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_UPI_TxL_FLITS.LLCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Enables counting of LLCRD (with non-zero payload). This only applies to slot 2 since LLCRD is only allowed in slot 2",
"UMask": "0x10",
@@ -10922,8 +13370,10 @@
},
{
"BriefDescription": "Valid Flits Sent; LLCTRL",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_UPI_TxL_FLITS.LLCTRL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Equivalent to an idle packet. Enables counting of slot 0 LLCTRL messages.",
"UMask": "0x40",
@@ -10931,6 +13381,7 @@
},
{
"BriefDescription": "Protocol header and credit FLITs transmitted across any slot",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_UPI_TxL_FLITS.NON_DATA",
"PerPkg": "1",
@@ -10940,6 +13391,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_FLITS.ALL_NULL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2",
"EventName": "UNC_UPI_TxL_FLITS.NULL",
@@ -10949,8 +13401,10 @@
},
{
"BriefDescription": "Valid Flits Sent; Protocol Header",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_UPI_TxL_FLITS.PROTHDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Enables count of protocol headers in slot 0,1,2 (depending on slot uMask bits)",
"UMask": "0x80",
@@ -10958,17 +13412,21 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_FLITS.PROTHDR",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x2",
"EventName": "UNC_UPI_TxL_FLITS.PROT_HDR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "UPI"
},
{
"BriefDescription": "Valid Flits Sent; Slot 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_UPI_TxL_FLITS.SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Count Slot 0 - Other mask bits determine types of headers to count.",
"UMask": "0x1",
@@ -10976,8 +13434,10 @@
},
{
"BriefDescription": "Valid Flits Sent; Slot 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_UPI_TxL_FLITS.SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Count Slot 1 - Other mask bits determine types of headers to count.",
"UMask": "0x2",
@@ -10985,8 +13445,10 @@
},
{
"BriefDescription": "Valid Flits Sent; Slot 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_UPI_TxL_FLITS.SLOT2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Count Slot 2 - Other mask bits determine types of headers to count.",
"UMask": "0x4",
@@ -10994,157 +13456,195 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_HDR_MATCH.DATA_HDR",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UPI"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_HDR_MATCH.DUAL_SLOT_HDR",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UPI"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_HDR_MATCH.LOC",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UPI"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.NCB",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_HDR_MATCH.NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe",
"Unit": "UPI"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.NCS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_HDR_MATCH.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf",
"Unit": "UPI"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_HDR_MATCH.NON_DATA_HDR",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UPI"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_HDR_MATCH.REM",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UPI"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.REQ",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_HDR_MATCH.REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UPI"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_DATA",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_HDR_MATCH.RSP_DATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "UPI"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_NODATA",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_HDR_MATCH.RSP_NODATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "UPI"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_HDR_MATCH.SGL_SLOT_HDR",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UPI"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.SNP",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_HDR_MATCH.SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x9",
"Unit": "UPI"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.WB",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x4",
"EventName": "UNC_UPI_TxL_HDR_MATCH.WB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "UPI"
},
{
"BriefDescription": "Tx Flit Buffer Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_UPI_TxL_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the UPI Tx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
"Unit": "UPI"
},
{
"BriefDescription": "Tx Flit Buffer Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_UPI_TxL_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of flits in the TxQ. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This can be used with the cycles not empty event to track average occupancy, or the allocations event to track average lifetime in the TxQ.",
"Unit": "UPI"
},
{
"BriefDescription": "UNC_UPI_VNA_CREDIT_RETURN_BLOCKED_VN01",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_UPI_VNA_CREDIT_RETURN_BLOCKED_VN01",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UPI"
},
{
"BriefDescription": "VNA Credits Pending Return - Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_UPI_VNA_CREDIT_RETURN_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of VNA credits in the Rx side that are waitng to be returned back across the link.",
"Unit": "UPI"
},
{
"BriefDescription": "Clockticks in the UBOX using a dedicated 48-bit Fixed Counter",
+ "Counter": "FIXED",
"EventCode": "0xff",
"EventName": "UNC_U_CLOCKTICKS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "UBOX"
},
{
"BriefDescription": "Message Received",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.DOORBELL_RCVD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore.",
"UMask": "0x8",
@@ -11152,8 +13652,10 @@
},
{
"BriefDescription": "Message Received",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.INT_PRIO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore.",
"UMask": "0x10",
@@ -11161,8 +13663,10 @@
},
{
"BriefDescription": "Message Received; IPI",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.IPI_RCVD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore.; Inter Processor Interrupts",
"UMask": "0x4",
@@ -11170,8 +13674,10 @@
},
{
"BriefDescription": "Message Received; MSI",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.MSI_RCVD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore.; Message Signaled Interrupts - interrupts sent by devices (including PCIe via IOxAPIC) (Socket Mode only)",
"UMask": "0x2",
@@ -11179,8 +13685,10 @@
},
{
"BriefDescription": "Message Received; VLW",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.VLW_RCVD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore.",
"UMask": "0x1",
@@ -11188,16 +13696,20 @@
},
{
"BriefDescription": "IDI Lock/SplitLock Cycles",
+ "Counter": "0,1",
"EventCode": "0x44",
"EventName": "UNC_U_LOCK_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of times an IDI Lock/SplitLock sequence was started",
"Unit": "UBOX"
},
{
"BriefDescription": "Cycles PHOLD Assert to Ack; Assert to ACK",
+ "Counter": "0,1",
"EventCode": "0x45",
"EventName": "UNC_U_PHOLD_CYCLES.ASSERT_TO_ACK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PHOLD cycles.",
"UMask": "0x1",
@@ -11205,38 +13717,47 @@
},
{
"BriefDescription": "UNC_U_RACU_DRNG.PFTCH_BUF_EMPTY",
+ "Counter": "0,1",
"EventCode": "0x4C",
"EventName": "UNC_U_RACU_DRNG.PFTCH_BUF_EMPTY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_RACU_DRNG.RDRAND",
+ "Counter": "0,1",
"EventCode": "0x4C",
"EventName": "UNC_U_RACU_DRNG.RDRAND",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_RACU_DRNG.RDSEED",
+ "Counter": "0,1",
"EventCode": "0x4C",
"EventName": "UNC_U_RACU_DRNG.RDSEED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UBOX"
},
{
"BriefDescription": "RACU Request",
+ "Counter": "0,1",
"EventCode": "0x46",
"EventName": "UNC_U_RACU_REQUESTS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number outstanding register requests within message channel tracker",
"Unit": "UBOX"
},
{
"BriefDescription": "UPI interconnect send bandwidth for payload. Derived from unc_upi_txl_flits.all_data",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UPI_DATA_BANDWIDTH_TX",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/uncore-io.json b/tools/perf/pmu-events/arch/x86/skylakex/uncore-io.json
index 743c91f3d2f0..bce46dd4f395 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/uncore-io.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/uncore-io.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "PCI Express bandwidth reading at IIO. Derived from unc_iio_data_req_of_cpu.mem_read.part0",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "LLC_MISSES.PCIE_READ",
"FCMask": "0x07",
@@ -16,6 +17,7 @@
},
{
"BriefDescription": "PCI Express bandwidth writing at IIO. Derived from unc_iio_data_req_of_cpu.mem_write.part0",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "LLC_MISSES.PCIE_WRITE",
"FCMask": "0x07",
@@ -31,6 +33,7 @@
},
{
"BriefDescription": "Clockticks of the IIO Traffic Controller",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_IIO_CLOCKTICKS",
"PerPkg": "1",
@@ -39,6 +42,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0-3",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.ALL_PARTS",
"FCMask": "0x4",
@@ -49,6 +53,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART0",
"FCMask": "0x4",
@@ -59,6 +64,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART1",
"FCMask": "0x4",
@@ -69,6 +75,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART2",
"FCMask": "0x4",
@@ -79,6 +86,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART3",
"FCMask": "0x4",
@@ -89,8 +97,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts; Port 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.PORT0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x01",
@@ -99,8 +109,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts; Port 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.PORT1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x02",
@@ -109,8 +121,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts; Port 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.PORT2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x04",
@@ -119,8 +133,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts; Port 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.PORT3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x08",
@@ -129,6 +145,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 0-3",
+ "Counter": "2,3",
"EventCode": "0xD5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL_PARTS",
"FCMask": "0x04",
@@ -138,6 +155,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 0",
+ "Counter": "2,3",
"EventCode": "0xD5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART0",
"FCMask": "0x04",
@@ -147,6 +165,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 1",
+ "Counter": "2,3",
"EventCode": "0xD5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART1",
"FCMask": "0x04",
@@ -156,6 +175,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 2",
+ "Counter": "2,3",
"EventCode": "0xD5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART2",
"FCMask": "0x04",
@@ -165,6 +185,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 3",
+ "Counter": "2,3",
"EventCode": "0xD5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART3",
"FCMask": "0x04",
@@ -174,8 +195,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core reading from Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -185,8 +208,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core reading from Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -196,8 +221,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core reading from Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -207,8 +234,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core reading from Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -218,8 +247,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core reading from Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -229,8 +260,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core reading from Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -240,8 +273,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core writing to Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -251,8 +286,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core writing to Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -262,8 +299,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core writing to Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -273,8 +312,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core writing to Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -284,8 +325,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core writing to Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -295,8 +338,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core writing to Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -306,8 +351,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core reading from Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -317,8 +364,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core reading from Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -328,8 +377,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core reading from Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -339,8 +390,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core reading from Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -350,8 +403,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core reading from Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -361,8 +416,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core reading from Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -372,8 +429,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core writing to Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -383,8 +442,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core writing to Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -394,8 +455,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core writing to Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -405,8 +468,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core writing to Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -416,8 +481,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core writing to Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -427,8 +494,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core writing to Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -438,6 +507,7 @@
},
{
"BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part0",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART0",
"FCMask": "0x07",
@@ -449,6 +519,7 @@
},
{
"BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part1",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART1",
"FCMask": "0x07",
@@ -460,6 +531,7 @@
},
{
"BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part2",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART2",
"FCMask": "0x07",
@@ -471,6 +543,7 @@
},
{
"BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part3",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART3",
"FCMask": "0x07",
@@ -482,8 +555,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core reading from Card's MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -493,8 +568,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core reading from Card's MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -504,6 +581,7 @@
},
{
"BriefDescription": "Write request of 4 bytes made to IIO Part0 by the CPU",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART0",
"FCMask": "0x07",
@@ -515,6 +593,7 @@
},
{
"BriefDescription": "Write request of 4 bytes made to IIO Part1 by the CPU",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART1",
"FCMask": "0x07",
@@ -526,6 +605,7 @@
},
{
"BriefDescription": "Write request of 4 bytes made to IIO Part2 by the CPU",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART2",
"FCMask": "0x07",
@@ -537,6 +617,7 @@
},
{
"BriefDescription": "Write request of 4 bytes made to IIO Part3 by the CPU",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART3",
"FCMask": "0x07",
@@ -548,8 +629,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core writing to Card's MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -559,8 +642,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Core writing to Card's MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -570,6 +655,7 @@
},
{
"BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part0",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART0",
"FCMask": "0x07",
@@ -581,6 +667,7 @@
},
{
"BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part1",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART1",
"FCMask": "0x07",
@@ -592,6 +679,7 @@
},
{
"BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part2",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART2",
"FCMask": "0x07",
@@ -603,6 +691,7 @@
},
{
"BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part3",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART3",
"FCMask": "0x07",
@@ -614,8 +703,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Another card (different IIO stack) reading from this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -625,8 +716,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Another card (different IIO stack) reading from this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -636,6 +729,7 @@
},
{
"BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part0 by a different IIO unit",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART0",
"FCMask": "0x07",
@@ -647,6 +741,7 @@
},
{
"BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part1 by a different IIO unit",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART1",
"FCMask": "0x07",
@@ -658,6 +753,7 @@
},
{
"BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part2 by a different IIO unit",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART2",
"FCMask": "0x07",
@@ -669,6 +765,7 @@
},
{
"BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part3 by a different IIO unit",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART3",
"FCMask": "0x07",
@@ -680,8 +777,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Another card (different IIO stack) writing to this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -691,8 +790,10 @@
},
{
"BriefDescription": "Data requested by the CPU; Another card (different IIO stack) writing to this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -702,8 +803,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Atomic requests targeting DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -713,8 +816,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Atomic requests targeting DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -724,8 +829,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Atomic requests targeting DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -735,8 +842,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Atomic requests targeting DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -746,8 +855,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Atomic requests targeting DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -757,8 +868,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Atomic requests targeting DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -768,8 +881,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Completion of atomic requests targeting DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -779,8 +894,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Completion of atomic requests targeting DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -790,8 +907,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Completion of atomic requests targeting DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -801,8 +920,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Completion of atomic requests targeting DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -812,6 +933,7 @@
},
{
"BriefDescription": "PCI Express bandwidth reading at IIO, part 0",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0",
"FCMask": "0x07",
@@ -823,6 +945,7 @@
},
{
"BriefDescription": "PCI Express bandwidth reading at IIO, part 1",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1",
"FCMask": "0x07",
@@ -834,6 +957,7 @@
},
{
"BriefDescription": "PCI Express bandwidth reading at IIO, part 2",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2",
"FCMask": "0x07",
@@ -845,6 +969,7 @@
},
{
"BriefDescription": "PCI Express bandwidth reading at IIO, part 3",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
"FCMask": "0x07",
@@ -856,8 +981,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -867,8 +994,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -878,6 +1007,7 @@
},
{
"BriefDescription": "PCI Express bandwidth writing at IIO, part 0",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0",
"FCMask": "0x07",
@@ -889,6 +1019,7 @@
},
{
"BriefDescription": "PCI Express bandwidth writing at IIO, part 1",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1",
"FCMask": "0x07",
@@ -900,6 +1031,7 @@
},
{
"BriefDescription": "PCI Express bandwidth writing at IIO, part 2",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2",
"FCMask": "0x07",
@@ -911,6 +1043,7 @@
},
{
"BriefDescription": "PCI Express bandwidth writing at IIO, part 3",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
"FCMask": "0x07",
@@ -922,8 +1055,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -933,8 +1068,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -944,8 +1081,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Messages",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -955,8 +1094,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Messages",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -966,8 +1107,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Messages",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -977,8 +1120,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Messages",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -988,8 +1133,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Messages",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -999,8 +1146,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Messages",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -1010,6 +1159,7 @@
},
{
"BriefDescription": "Peer to peer read request for 4 bytes made by IIO Part0 to an IIO target",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART0",
"FCMask": "0x07",
@@ -1021,6 +1171,7 @@
},
{
"BriefDescription": "Peer to peer read request for 4 bytes made by IIO Part1 to an IIO target",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART1",
"FCMask": "0x07",
@@ -1032,6 +1183,7 @@
},
{
"BriefDescription": "Peer to peer read request for 4 bytes made by IIO Part2 to an IIO target",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART2",
"FCMask": "0x07",
@@ -1043,6 +1195,7 @@
},
{
"BriefDescription": "Peer to peer read request for 4 bytes made by IIO Part3 to an IIO target",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART3",
"FCMask": "0x07",
@@ -1054,8 +1207,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -1065,8 +1220,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -1076,6 +1233,7 @@
},
{
"BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART0",
"FCMask": "0x07",
@@ -1087,6 +1245,7 @@
},
{
"BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART1",
"FCMask": "0x07",
@@ -1098,6 +1257,7 @@
},
{
"BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART2",
"FCMask": "0x07",
@@ -1109,6 +1269,7 @@
},
{
"BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART3",
"FCMask": "0x07",
@@ -1120,8 +1281,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -1131,8 +1294,10 @@
},
{
"BriefDescription": "Data requested of the CPU; Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -1142,29 +1307,37 @@
},
{
"BriefDescription": "Num Link Correctable Errors",
+ "Counter": "0,1,2,3",
"EventCode": "0xF",
"EventName": "UNC_IIO_LINK_NUM_CORR_ERR",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IIO"
},
{
"BriefDescription": "Num Link Retries",
+ "Counter": "0,1,2,3",
"EventCode": "0xE",
"EventName": "UNC_IIO_LINK_NUM_RETRIES",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IIO"
},
{
"BriefDescription": "Number packets that passed the Mask/Match Filter",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_IIO_MASK_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IIO"
},
{
"BriefDescription": "AND Mask/match for debug bus; Non-PCIE bus",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_IIO_MASK_MATCH_AND.BUS0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Asserted if all bits specified by mask match",
"UMask": "0x1",
@@ -1172,8 +1345,10 @@
},
{
"BriefDescription": "AND Mask/match for debug bus; Non-PCIE bus and PCIE bus",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_IIO_MASK_MATCH_AND.BUS0_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Asserted if all bits specified by mask match",
"UMask": "0x8",
@@ -1181,8 +1356,10 @@
},
{
"BriefDescription": "AND Mask/match for debug bus; Non-PCIE bus and !(PCIE bus)",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_IIO_MASK_MATCH_AND.BUS0_NOT_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Asserted if all bits specified by mask match",
"UMask": "0x4",
@@ -1190,8 +1367,10 @@
},
{
"BriefDescription": "AND Mask/match for debug bus; PCIE bus",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_IIO_MASK_MATCH_AND.BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Asserted if all bits specified by mask match",
"UMask": "0x2",
@@ -1199,8 +1378,10 @@
},
{
"BriefDescription": "AND Mask/match for debug bus; !(Non-PCIE bus) and PCIE bus",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_IIO_MASK_MATCH_AND.NOT_BUS0_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Asserted if all bits specified by mask match",
"UMask": "0x10",
@@ -1208,8 +1389,10 @@
},
{
"BriefDescription": "AND Mask/match for debug bus",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_IIO_MASK_MATCH_AND.NOT_BUS0_NOT_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Asserted if all bits specified by mask match",
"UMask": "0x20",
@@ -1217,8 +1400,10 @@
},
{
"BriefDescription": "OR Mask/match for debug bus; Non-PCIE bus",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_IIO_MASK_MATCH_OR.BUS0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Asserted if any bits specified by mask match",
"UMask": "0x1",
@@ -1226,8 +1411,10 @@
},
{
"BriefDescription": "OR Mask/match for debug bus; Non-PCIE bus and PCIE bus",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_IIO_MASK_MATCH_OR.BUS0_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Asserted if any bits specified by mask match",
"UMask": "0x8",
@@ -1235,8 +1422,10 @@
},
{
"BriefDescription": "OR Mask/match for debug bus; Non-PCIE bus and !(PCIE bus)",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_IIO_MASK_MATCH_OR.BUS0_NOT_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Asserted if any bits specified by mask match",
"UMask": "0x4",
@@ -1244,8 +1433,10 @@
},
{
"BriefDescription": "OR Mask/match for debug bus; PCIE bus",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_IIO_MASK_MATCH_OR.BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Asserted if any bits specified by mask match",
"UMask": "0x2",
@@ -1253,8 +1444,10 @@
},
{
"BriefDescription": "OR Mask/match for debug bus; !(Non-PCIE bus) and PCIE bus",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_IIO_MASK_MATCH_OR.NOT_BUS0_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Asserted if any bits specified by mask match",
"UMask": "0x10",
@@ -1262,8 +1455,10 @@
},
{
"BriefDescription": "OR Mask/match for debug bus; !(Non-PCIE bus) and !(PCIE bus)",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_IIO_MASK_MATCH_OR.NOT_BUS0_NOT_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Asserted if any bits specified by mask match",
"UMask": "0x20",
@@ -1271,15 +1466,19 @@
},
{
"BriefDescription": "Counting disabled",
+ "Counter": "0,1,2,3",
"EventName": "UNC_IIO_NOTHING",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IIO"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART0",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMIC.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -1288,9 +1487,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART1",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMIC.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -1299,9 +1500,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART2",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMIC.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -1310,9 +1513,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART3",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMIC.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -1321,9 +1526,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.VTD0",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMIC.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -1332,9 +1539,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.VTD1",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMIC.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -1343,9 +1552,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART0",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMICCMP.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -1354,9 +1565,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART1",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMICCMP.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -1365,9 +1578,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART2",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMICCMP.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -1376,9 +1591,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART3",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMICCMP.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -1387,6 +1604,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_READ.PART0",
@@ -1398,6 +1616,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_READ.PART1",
@@ -1409,6 +1628,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_READ.PART2",
@@ -1420,6 +1640,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_READ.PART3",
@@ -1431,9 +1652,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.VTD0",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -1442,9 +1665,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.VTD1",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -1453,6 +1678,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.PART0",
@@ -1464,6 +1690,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.PART1",
@@ -1475,6 +1702,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.PART2",
@@ -1486,6 +1714,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.PART3",
@@ -1497,9 +1726,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.VTD0",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -1508,9 +1739,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.VTD1",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -1519,9 +1752,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MSG.PART0",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MSG.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -1530,9 +1765,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MSG.PART1",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MSG.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -1541,9 +1778,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MSG.PART2",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MSG.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -1552,9 +1791,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MSG.PART3",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MSG.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -1563,9 +1804,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MSG.VTD0",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MSG.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -1574,9 +1817,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MSG.VTD1",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MSG.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -1585,9 +1830,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART0",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -1596,9 +1843,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART1",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -1607,9 +1856,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART2",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -1618,9 +1869,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART3",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -1629,9 +1882,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.VTD0",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -1640,9 +1895,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.VTD1",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -1651,9 +1908,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART0",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -1662,9 +1921,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART1",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -1673,9 +1934,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART2",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -1684,9 +1947,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART3",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -1695,9 +1960,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.VTD0",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -1706,9 +1973,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.VTD1",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x83",
"EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -1717,9 +1986,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART0",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -1728,9 +1999,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART1",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -1739,9 +2012,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART2",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -1750,9 +2025,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART3",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -1761,9 +2038,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.VTD0",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -1772,9 +2051,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.VTD1",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -1783,9 +2064,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART0",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -1794,9 +2077,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART1",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -1805,9 +2090,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART2",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -1816,9 +2103,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART3",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -1827,9 +2116,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.VTD0",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -1838,9 +2129,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.VTD1",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -1849,9 +2142,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART0",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -1860,9 +2155,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART1",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -1871,9 +2168,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART2",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -1882,9 +2181,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART3",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -1893,9 +2194,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_READ.VTD0",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -1904,9 +2207,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_READ.VTD1",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -1915,9 +2220,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART0",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -1926,9 +2233,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART1",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -1937,9 +2246,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART2",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -1948,9 +2259,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART3",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -1959,9 +2272,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.VTD0",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -1970,9 +2285,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.VTD1",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -1981,9 +2298,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART0",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -1992,9 +2311,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART1",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -2003,9 +2324,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART2",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -2014,9 +2337,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART3",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -2025,9 +2350,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.VTD0",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -2036,9 +2363,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.VTD1",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -2047,9 +2376,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART0",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -2058,9 +2389,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART1",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -2069,9 +2402,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART2",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -2080,9 +2415,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART3",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -2091,9 +2428,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.VTD0",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -2102,9 +2441,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.VTD1",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -2113,9 +2454,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART0",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -2124,9 +2467,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART1",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -2135,9 +2480,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART2",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -2146,9 +2493,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART3",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -2157,9 +2506,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.VTD0",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -2168,9 +2519,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.VTD1",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -2179,9 +2532,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART0",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -2190,9 +2545,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART1",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -2201,9 +2558,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART2",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -2212,9 +2571,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART3",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -2223,9 +2584,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.VTD0",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -2234,9 +2597,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.VTD1",
+ "Counter": "2,3",
"Deprecated": "1",
"EventCode": "0xC0",
"EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -2245,17 +2610,21 @@
},
{
"BriefDescription": "Symbol Times on Link",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_IIO_SYMBOL_TIMES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Gen1 - increment once every 4nS, Gen2 - increment once every 2nS, Gen3 - increment once every 1nS",
"Unit": "IIO"
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.ATOMIC.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -2264,9 +2633,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.ATOMIC.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -2275,9 +2646,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.ATOMIC.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -2286,9 +2659,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.ATOMIC.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -2297,9 +2672,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.ATOMIC.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -2308,9 +2685,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.ATOMIC.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -2319,9 +2698,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.ATOMICCMP.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -2330,9 +2711,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.ATOMICCMP.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -2341,9 +2724,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.ATOMICCMP.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -2352,9 +2737,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.ATOMICCMP.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -2363,9 +2750,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.MEM_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -2374,9 +2763,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.MEM_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -2385,9 +2776,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.MEM_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -2396,9 +2789,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.MEM_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -2407,9 +2802,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.MEM_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -2418,9 +2815,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.MEM_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -2429,9 +2828,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.MEM_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -2440,9 +2841,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.MEM_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -2451,9 +2854,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.MEM_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -2462,9 +2867,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.MEM_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -2473,9 +2880,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.VTD0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.MEM_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -2484,9 +2893,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.VTD1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.MEM_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -2495,9 +2906,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.MSG.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -2506,9 +2919,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.MSG.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -2517,9 +2932,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.MSG.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -2528,9 +2945,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.MSG.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -2539,9 +2958,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.MSG.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -2550,9 +2971,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.MSG.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -2561,9 +2984,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.PEER_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -2572,9 +2997,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.PEER_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -2583,9 +3010,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.PEER_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -2594,9 +3023,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.PEER_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -2605,9 +3036,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.PEER_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -2616,9 +3049,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.PEER_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -2627,9 +3062,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.PEER_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -2638,9 +3075,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.PEER_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -2649,9 +3088,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.PEER_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -2660,9 +3101,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.PEER_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -2671,9 +3114,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.PEER_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -2682,9 +3127,11 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_IN.PEER_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -2693,9 +3140,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.CFG_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -2704,9 +3153,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.CFG_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -2715,9 +3166,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.CFG_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -2726,9 +3179,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.CFG_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -2737,9 +3192,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.VTD0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.CFG_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -2748,9 +3205,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.VTD1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.CFG_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -2759,9 +3218,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.CFG_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -2770,9 +3231,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.CFG_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -2781,9 +3244,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.CFG_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -2792,9 +3257,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.CFG_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -2803,9 +3270,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.VTD0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.CFG_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -2814,9 +3283,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.IO_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -2825,9 +3296,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.IO_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -2836,9 +3309,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.IO_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -2847,9 +3322,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.IO_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -2858,9 +3335,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_READ.VTD0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.IO_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -2869,9 +3348,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_READ.VTD1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.IO_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -2880,9 +3361,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.IO_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -2891,9 +3374,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.IO_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -2902,9 +3387,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.IO_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -2913,9 +3400,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.IO_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -2924,9 +3413,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.VTD0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.IO_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -2935,9 +3426,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.VTD1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.IO_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -2946,9 +3439,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.MEM_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -2957,9 +3452,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.MEM_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -2968,9 +3465,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.MEM_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -2979,9 +3478,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.MEM_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -2990,9 +3491,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.VTD0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.MEM_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3001,9 +3504,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.VTD1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.MEM_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3012,9 +3517,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.MEM_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -3023,9 +3530,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.MEM_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -3034,9 +3543,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.MEM_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -3045,9 +3556,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.MEM_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -3056,9 +3569,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.VTD0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.MEM_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3067,9 +3582,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.VTD1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.MEM_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3078,9 +3595,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.PEER_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -3089,9 +3608,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.PEER_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -3100,9 +3621,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.PEER_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -3111,9 +3634,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.PEER_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -3122,9 +3647,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.VTD0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.PEER_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3133,9 +3660,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.VTD1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.PEER_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3144,9 +3673,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.PEER_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x1",
@@ -3155,9 +3686,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.PEER_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x2",
@@ -3166,9 +3699,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART2",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.PEER_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x4",
@@ -3177,9 +3712,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART3",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.PEER_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x8",
@@ -3188,9 +3725,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.VTD0",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.PEER_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3199,9 +3738,11 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.VTD1",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_OUT.PEER_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3210,8 +3751,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -3221,8 +3764,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -3232,8 +3777,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -3243,8 +3790,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -3254,8 +3803,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3265,8 +3816,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3276,8 +3829,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -3287,8 +3842,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -3298,8 +3855,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -3309,8 +3868,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -3320,8 +3881,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3331,8 +3894,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3342,8 +3907,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -3353,8 +3920,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -3364,8 +3933,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -3375,8 +3946,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -3386,8 +3959,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3397,8 +3972,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3408,8 +3985,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -3419,8 +3998,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -3430,8 +4011,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -3441,8 +4024,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -3452,8 +4037,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3463,8 +4050,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3474,6 +4063,7 @@
},
{
"BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part0",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART0",
"FCMask": "0x07",
@@ -3485,6 +4075,7 @@
},
{
"BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part1",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART1",
"FCMask": "0x07",
@@ -3496,6 +4087,7 @@
},
{
"BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part2",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART2",
"FCMask": "0x07",
@@ -3507,6 +4099,7 @@
},
{
"BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part3",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART3",
"FCMask": "0x07",
@@ -3518,8 +4111,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3529,8 +4124,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3540,6 +4137,7 @@
},
{
"BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part0 by the CPU",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART0",
"FCMask": "0x07",
@@ -3551,6 +4149,7 @@
},
{
"BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part1 by the CPU",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART1",
"FCMask": "0x07",
@@ -3562,6 +4161,7 @@
},
{
"BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part2 by the CPU",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART2",
"FCMask": "0x07",
@@ -3573,6 +4173,7 @@
},
{
"BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part3 by the CPU",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART3",
"FCMask": "0x07",
@@ -3584,8 +4185,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3595,8 +4198,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3606,6 +4211,7 @@
},
{
"BriefDescription": "Peer to peer read request for up to a 64 byte transaction is made by a different IIO unit to IIO Part0",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART0",
"FCMask": "0x07",
@@ -3617,6 +4223,7 @@
},
{
"BriefDescription": "Peer to peer read request for up to a 64 byte transaction is made by a different IIO unit to IIO Part1",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART1",
"FCMask": "0x07",
@@ -3628,6 +4235,7 @@
},
{
"BriefDescription": "Peer to peer read request for up to a 64 byte transaction is made by a different IIO unit to IIO Part2",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART2",
"FCMask": "0x07",
@@ -3639,6 +4247,7 @@
},
{
"BriefDescription": "Peer to peer read request for up to a 64 byte transaction is made by a different IIO unit to IIO Part3",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART3",
"FCMask": "0x07",
@@ -3650,8 +4259,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Another card (different IIO stack) reading from this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3661,8 +4272,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Another card (different IIO stack) reading from this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3672,6 +4285,7 @@
},
{
"BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made to IIO Part0 by a different IIO unit",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART0",
"FCMask": "0x07",
@@ -3683,6 +4297,7 @@
},
{
"BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made to IIO Part1 by a different IIO unit",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART1",
"FCMask": "0x07",
@@ -3694,6 +4309,7 @@
},
{
"BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made to IIO Part2 by a different IIO unit",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART2",
"FCMask": "0x07",
@@ -3705,6 +4321,7 @@
},
{
"BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made to IIO Part3 by a different IIO unit",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART3",
"FCMask": "0x07",
@@ -3716,8 +4333,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Another card (different IIO stack) writing to this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3727,8 +4346,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU; Another card (different IIO stack) writing to this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3738,8 +4359,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -3749,8 +4372,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -3760,8 +4385,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -3771,8 +4398,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -3782,8 +4411,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3793,8 +4424,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3804,8 +4437,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Completion of atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMICCMP.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -3815,8 +4450,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Completion of atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMICCMP.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -3826,8 +4463,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Completion of atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMICCMP.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -3837,8 +4476,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Completion of atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMICCMP.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -3848,6 +4489,7 @@
},
{
"BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part0 to Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART0",
"FCMask": "0x07",
@@ -3859,6 +4501,7 @@
},
{
"BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part1 to Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART1",
"FCMask": "0x07",
@@ -3870,6 +4513,7 @@
},
{
"BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part2 to Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART2",
"FCMask": "0x07",
@@ -3881,6 +4525,7 @@
},
{
"BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part3 to Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART3",
"FCMask": "0x07",
@@ -3892,8 +4537,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Card reading from DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3903,8 +4550,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Card reading from DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3914,6 +4563,7 @@
},
{
"BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part0 to Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART0",
"FCMask": "0x07",
@@ -3925,6 +4575,7 @@
},
{
"BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part1 to Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART1",
"FCMask": "0x07",
@@ -3936,6 +4587,7 @@
},
{
"BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part2 to Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART2",
"FCMask": "0x07",
@@ -3947,6 +4599,7 @@
},
{
"BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part3 to Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART3",
"FCMask": "0x07",
@@ -3958,8 +4611,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Card writing to DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3969,8 +4624,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Card writing to DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3980,8 +4637,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -3991,8 +4650,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -4002,8 +4663,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -4013,8 +4676,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -4024,8 +4689,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -4035,8 +4702,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -4046,6 +4715,7 @@
},
{
"BriefDescription": "Peer to peer read request of up to a 64 byte transaction is made by IIO Part0 to an IIO target",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART0",
"FCMask": "0x07",
@@ -4057,6 +4727,7 @@
},
{
"BriefDescription": "Peer to peer read request of up to a 64 byte transaction is made by IIO Part1 to an IIO target",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART1",
"FCMask": "0x07",
@@ -4068,6 +4739,7 @@
},
{
"BriefDescription": "Peer to peer read request of up to a 64 byte transaction is made by IIO Part2 to an IIO target",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART2",
"FCMask": "0x07",
@@ -4079,6 +4751,7 @@
},
{
"BriefDescription": "Peer to peer read request of up to a 64 byte transaction is made by IIO Part3 to an IIO target",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART3",
"FCMask": "0x07",
@@ -4090,8 +4763,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Card reading from another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -4101,8 +4776,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Card reading from another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -4112,6 +4789,7 @@
},
{
"BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made by IIO Part0 to an IIO target",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART0",
"FCMask": "0x07",
@@ -4123,6 +4801,7 @@
},
{
"BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made by IIO Part1 to an IIO target",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART1",
"FCMask": "0x07",
@@ -4134,6 +4813,7 @@
},
{
"BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made by IIO Part2 to an IIO target",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART2",
"FCMask": "0x07",
@@ -4145,6 +4825,7 @@
},
{
"BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made by IIO Part3 to an IIO target",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART3",
"FCMask": "0x07",
@@ -4156,8 +4837,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.VTD0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -4167,8 +4850,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU; Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.VTD1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -4178,72 +4863,90 @@
},
{
"BriefDescription": "VTd Access; context cache miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_VTD_ACCESS.CTXT_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "IIO"
},
{
"BriefDescription": "VTd Access; L1 miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_VTD_ACCESS.L1_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "IIO"
},
{
"BriefDescription": "VTd Access; L2 miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_VTD_ACCESS.L2_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "IIO"
},
{
"BriefDescription": "VTd Access; L3 miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_VTD_ACCESS.L3_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "IIO"
},
{
"BriefDescription": "VTd Access; Vtd hit",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_VTD_ACCESS.L4_PAGE_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "IIO"
},
{
"BriefDescription": "VTd Access; TLB miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_VTD_ACCESS.TLB1_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "IIO"
},
{
"BriefDescription": "VTd Access; TLB is full",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_VTD_ACCESS.TLB_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "IIO"
},
{
"BriefDescription": "VTd Access; TLB miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_VTD_ACCESS.TLB_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "IIO"
},
{
"BriefDescription": "VTd Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_IIO_VTD_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IIO"
}
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/uncore-memory.json b/tools/perf/pmu-events/arch/x86/skylakex/uncore-memory.json
index 7a40aa0f1018..96cdb52f2778 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/uncore-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "read requests to memory controller. Derived from unc_m_cas_count.rd",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "LLC_MISSES.MEM_READ",
"PerPkg": "1",
@@ -11,6 +12,7 @@
},
{
"BriefDescription": "write requests to memory controller. Derived from unc_m_cas_count.wr",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "LLC_MISSES.MEM_WRITE",
"PerPkg": "1",
@@ -21,8 +23,10 @@
},
{
"BriefDescription": "DRAM Activate Count; Activate due to Bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_M_ACT_COUNT.BYP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
"UMask": "0x8",
@@ -30,8 +34,10 @@
},
{
"BriefDescription": "DRAM Activate Count; Activate due to Read",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_M_ACT_COUNT.RD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
"UMask": "0x1",
@@ -39,6 +45,7 @@
},
{
"BriefDescription": "DRAM Page Activate commands sent due to a write request",
+ "Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_M_ACT_COUNT.WR",
"PerPkg": "1",
@@ -48,30 +55,37 @@
},
{
"BriefDescription": "ACT command issued by 2 cycle bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M_BYP_CMDS.ACT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "CAS command issued by 2 cycle bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M_BYP_CMDS.CAS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "PRE command issued by 2 cycle bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M_BYP_CMDS.PRE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "All DRAM CAS Commands issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.ALL",
"PerPkg": "1",
@@ -81,6 +95,7 @@
},
{
"BriefDescription": "All DRAM Read CAS Commands issued (including underfills)",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.RD",
"PerPkg": "1",
@@ -90,14 +105,17 @@
},
{
"BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; Read CAS issued in Read ISOCH Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.RD_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "iMC"
},
{
"BriefDescription": "All DRAM Read CAS Commands issued (does not include underfills)",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.RD_REG",
"PerPkg": "1",
@@ -107,14 +125,17 @@
},
{
"BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; Read CAS issued in RMM",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.RD_RMM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "iMC"
},
{
"BriefDescription": "DRAM Underfill Read CAS Commands issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
"PerPkg": "1",
@@ -124,14 +145,17 @@
},
{
"BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; Read CAS issued in WMM",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.RD_WMM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "All DRAM Write CAS commands issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.WR",
"PerPkg": "1",
@@ -141,16 +165,20 @@
},
{
"BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; Read CAS issued in Write ISOCH Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.WR_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "iMC"
},
{
"BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Read Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.WR_RMM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the total number of Opportunistic DRAM Write CAS commands issued on this channel while in Read-Major-Mode.",
"UMask": "0x8",
@@ -158,6 +186,7 @@
},
{
"BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Write Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_M_CAS_COUNT.WR_WMM",
"PerPkg": "1",
@@ -167,6 +196,7 @@
},
{
"BriefDescription": "Memory controller clock ticks",
+ "Counter": "0,1,2,3",
"EventName": "UNC_M_CLOCKTICKS",
"PerPkg": "1",
"PublicDescription": "Counts clockticks of the fixed frequency clock of the memory controller using one of the programmable counters.",
@@ -174,23 +204,29 @@
},
{
"BriefDescription": "Clockticks in the Memory Controller using a dedicated 48-bit Fixed Counter",
+ "Counter": "FIXED",
"EventCode": "0xff",
"EventName": "UNC_M_CLOCKTICKS_F",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "DRAM Precharge All Commands",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_M_DRAM_PRE_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times that the precharge all command was sent.",
"Unit": "iMC"
},
{
"BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_M_DRAM_REFRESH.HIGH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of refreshes issued.",
"UMask": "0x4",
@@ -198,8 +234,10 @@
},
{
"BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_M_DRAM_REFRESH.PANIC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of refreshes issued.",
"UMask": "0x2",
@@ -207,16 +245,20 @@
},
{
"BriefDescription": "ECC Correctable Errors",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_M_ECC_CORRECTABLE_ERRORS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of ECC errors detected and corrected by the iMC on this channel. This counter is only useful with ECC DRAM devices. This count will increment one time for each correction regardless of the number of bits corrected. The iMC can correct up to 4 bit errors in independent channel mode and 8 bit errors in lockstep mode.",
"Unit": "iMC"
},
{
"BriefDescription": "Cycles in a Major Mode; Isoch Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_M_MAJOR_MODES.ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.; We group these two modes together so that we can use four counters to track each of the major modes at one time. These major modes are used whenever there is an ISOCH txn in the memory controller. In these mode, only ISOCH transactions are processed.",
"UMask": "0x8",
@@ -224,8 +266,10 @@
},
{
"BriefDescription": "Cycles in a Major Mode; Partial Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_M_MAJOR_MODES.PARTIAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.; This major mode is used to drain starved underfill reads. Regular reads and writes are blocked and only underfill reads will be processed.",
"UMask": "0x4",
@@ -233,8 +277,10 @@
},
{
"BriefDescription": "Cycles in a Major Mode; Read Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_M_MAJOR_MODES.READ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.; Read Major Mode is the default mode for the iMC, as reads are generally more critical to forward progress than writes.",
"UMask": "0x1",
@@ -242,8 +288,10 @@
},
{
"BriefDescription": "Cycles in a Major Mode; Write Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_M_MAJOR_MODES.WRITE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.; This mode is triggered when the WPQ hits high occupancy and causes writes to be higher priority than reads. This can cause blips in the available read bandwidth in the system and temporarily increase read latencies in order to achieve better bus utilizations and higher bandwidth.",
"UMask": "0x2",
@@ -251,14 +299,17 @@
},
{
"BriefDescription": "Channel DLLOFF Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M_POWER_CHANNEL_DLLOFF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles when all the ranks in the channel are in CKE Slow (DLLOFF) mode.",
"Unit": "iMC"
},
{
"BriefDescription": "Cycles where DRAM ranks are in power down (CKE) mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_M_POWER_CHANNEL_PPD",
"MetricExpr": "(UNC_M_POWER_CHANNEL_PPD / UNC_M_CLOCKTICKS) * 100",
@@ -269,8 +320,10 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
"UMask": "0x1",
@@ -278,8 +331,10 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
"UMask": "0x2",
@@ -287,8 +342,10 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
"UMask": "0x4",
@@ -296,8 +353,10 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
"UMask": "0x8",
@@ -305,8 +364,10 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
"UMask": "0x10",
@@ -314,8 +375,10 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
"UMask": "0x20",
@@ -323,8 +386,10 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
"UMask": "0x40",
@@ -332,8 +397,10 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
"UMask": "0x80",
@@ -341,21 +408,26 @@
},
{
"BriefDescription": "Critical Throttle Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M_POWER_CRITICAL_THROTTLE_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the iMC is in critical thermal throttling. When this happens, all traffic is blocked. This should be rare unless something bad is going on in the platform. There is no filtering by rank for this event.",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_POWER_PCU_THROTTLING",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M_POWER_PCU_THROTTLING",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "Cycles Memory is in self refresh power mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M_POWER_SELF_REFRESH",
"MetricExpr": "(UNC_M_POWER_SELF_REFRESH / UNC_M_CLOCKTICKS) * 100",
@@ -366,8 +438,10 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.; Thermal throttling is performed per DIMM. We support 3 DIMMs per channel. This ID allows us to filter by ID.",
"UMask": "0x1",
@@ -375,8 +449,10 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
"UMask": "0x2",
@@ -384,8 +460,10 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
"UMask": "0x4",
@@ -393,8 +471,10 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
"UMask": "0x8",
@@ -402,8 +482,10 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
"UMask": "0x10",
@@ -411,8 +493,10 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
"UMask": "0x20",
@@ -420,8 +504,10 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
"UMask": "0x40",
@@ -429,8 +515,10 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
"UMask": "0x80",
@@ -438,8 +526,10 @@
},
{
"BriefDescription": "Read Preemption Count; Read over Read Preemption",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_M_PREEMPTION.RD_PREEMPT_RD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.; Filter for when a read preempts another read.",
"UMask": "0x1",
@@ -447,8 +537,10 @@
},
{
"BriefDescription": "Read Preemption Count; Read over Write Preemption",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_M_PREEMPTION.RD_PREEMPT_WR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.; Filter for when a read preempts a write.",
"UMask": "0x2",
@@ -456,8 +548,10 @@
},
{
"BriefDescription": "DRAM Precharge commands.; Precharge due to bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.BYP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.",
"UMask": "0x10",
@@ -465,8 +559,10 @@
},
{
"BriefDescription": "DRAM Precharge commands.; Precharge due to timer expiration",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.PAGE_CLOSE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.; Counts the number of DRAM Precharge commands sent on this channel as a result of the page close counter expiring. This does not include implicit precharge commands sent in auto-precharge mode.",
"UMask": "0x2",
@@ -474,6 +570,7 @@
},
{
"BriefDescription": "Pre-charges due to page misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.PAGE_MISS",
"PerPkg": "1",
@@ -483,6 +580,7 @@
},
{
"BriefDescription": "Pre-charge for reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.RD",
"PerPkg": "1",
@@ -492,8 +590,10 @@
},
{
"BriefDescription": "Pre-charge for writes",
+ "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.WR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.",
"UMask": "0x8",
@@ -501,1390 +601,1739 @@
},
{
"BriefDescription": "Read CAS issued with HIGH priority",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M_RD_CAS_PRIO.HIGH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "Read CAS issued with LOW priority",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M_RD_CAS_PRIO.LOW",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "Read CAS issued with MEDIUM priority",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M_RD_CAS_PRIO.MED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "Read CAS issued with PANIC NON ISOCH priority (starved)",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M_RD_CAS_PRIO.PANIC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.ALLBANKS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK0",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK10",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK11",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xb",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK12",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK13",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK14",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK15",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK6",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x6",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK7",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK8",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANK9",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x9",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANKG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANKG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x12",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANKG2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x13",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 0; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M_RD_CAS_RANK0.BANKG3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x14",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.ALLBANKS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK0",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK10",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK11",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xb",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK12",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK13",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK14",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK15",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK6",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x6",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK7",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK8",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANK9",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x9",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANKG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANKG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x12",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANKG2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x13",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 1; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M_RD_CAS_RANK1.BANKG3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x14",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.ALLBANKS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK0",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK10",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK11",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xb",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK12",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK13",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK14",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK15",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK6",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x6",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK7",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK8",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANK9",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x9",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANKG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANKG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x12",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANKG2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x13",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 2; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M_RD_CAS_RANK2.BANKG3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x14",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.ALLBANKS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANK0",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANK1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANK10",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANK11",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xb",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANK12",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANK13",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANK14",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANK15",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANK2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANK3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANK4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANK5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANK6",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x6",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANK7",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANK8",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANK9",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x9",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANKG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANKG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x12",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANKG2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x13",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 3; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M_RD_CAS_RANK3.BANKG3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x14",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.ALLBANKS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK0",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK10",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK11",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xb",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK12",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK13",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK14",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK15",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK6",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x6",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK7",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK8",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANK9",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x9",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANKG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANKG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x12",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANKG2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x13",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 4; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M_RD_CAS_RANK4.BANKG3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x14",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.ALLBANKS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK0",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK10",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK11",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xb",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK12",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK13",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK14",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK15",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK6",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x6",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK7",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK8",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANK9",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x9",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANKG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANKG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x12",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANKG2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x13",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 5; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M_RD_CAS_RANK5.BANKG3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x14",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.ALLBANKS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK0",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK10",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK11",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xb",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK12",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK13",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK14",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK15",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK6",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x6",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK7",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK8",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANK9",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x9",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANKG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANKG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x12",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANKG2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x13",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 6; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M_RD_CAS_RANK6.BANKG3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x14",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.ALLBANKS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK0",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK10",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK11",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xb",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK12",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK13",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK14",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK15",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK6",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x6",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK7",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK8",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANK9",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x9",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANKG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANKG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x12",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANKG2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x13",
"Unit": "iMC"
},
{
"BriefDescription": "RD_CAS Access to Rank 7; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M_RD_CAS_RANK7.BANKG3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x14",
"Unit": "iMC"
},
{
"BriefDescription": "Read Pending Queue Full Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_M_RPQ_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the Read Pending Queue is full. When the RPQ is full, the HA will not be able to issue any additional read requests into the iMC. This count should be similar count in the HA which tracks the number of cycles that the HA has no RPQ credits, just somewhat smaller to account for the credit return overhead. We generally do not expect to see RPQ become full except for potentially during Write Major Mode or while running with slow DRAM. This event only tracks non-ISOC queue entries.",
"Unit": "iMC"
},
{
"BriefDescription": "Read Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M_RPQ_CYCLES_NE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Read Pending Queue is not empty. This can then be used to calculate the average occupancy (in conjunction with the Read Pending Queue Occupancy count). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This filter is to be used in conjunction with the occupancy filter so that one can correctly track the average occupancies for schedulable entries and scheduled requests.",
"Unit": "iMC"
},
{
"BriefDescription": "Read Pending Queue Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M_RPQ_INSERTS",
"PerPkg": "1",
@@ -1893,6 +2342,7 @@
},
{
"BriefDescription": "Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M_RPQ_OCCUPANCY",
"PerPkg": "1",
@@ -1901,46 +2351,57 @@
},
{
"BriefDescription": "Transition from WMM to RMM because of low threshold; Transition from WMM to RMM because of starve counter",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "UNC_M_WMM_TO_RMM.LOW_THRESH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "Transition from WMM to RMM because of low threshold",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "UNC_M_WMM_TO_RMM.STARVE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "Transition from WMM to RMM because of low threshold",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "UNC_M_WMM_TO_RMM.VMSE_RETRY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "Write Pending Queue Full Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M_WPQ_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the Write Pending Queue is full. When the WPQ is full, the HA will not be able to issue any additional write requests into the iMC. This count should be similar count in the CHA which tracks the number of cycles that the CHA has no WPQ credits, just somewhat smaller to account for the credit return overhead.",
"Unit": "iMC"
},
{
"BriefDescription": "Write Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M_WPQ_CYCLES_NE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the Write Pending Queue is not empty. This can then be used to calculate the average queue occupancy (in conjunction with the WPQ Occupancy Accumulation count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies.",
"Unit": "iMC"
},
{
"BriefDescription": "Write Pending Queue Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M_WPQ_INSERTS",
"PerPkg": "1",
@@ -1949,6 +2410,7 @@
},
{
"BriefDescription": "Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "UNC_M_WPQ_OCCUPANCY",
"PerPkg": "1",
@@ -1957,1359 +2419,1701 @@
},
{
"BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_M_WPQ_READ_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
"Unit": "iMC"
},
{
"BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M_WPQ_WRITE_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
"Unit": "iMC"
},
{
"BriefDescription": "Not getting the requested Major Mode",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_M_WRONG_MM",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.ALLBANKS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK0",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK10",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK11",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xb",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK12",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK13",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK14",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK15",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK6",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x6",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK7",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK8",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANK9",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x9",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANKG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANKG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x12",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANKG2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x13",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 0; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M_WR_CAS_RANK0.BANKG3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x14",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.ALLBANKS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK0",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK10",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK11",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xb",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK12",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK13",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK14",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK15",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK6",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x6",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK7",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK8",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANK9",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x9",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANKG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANKG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x12",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANKG2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x13",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 1; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M_WR_CAS_RANK1.BANKG3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x14",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.ALLBANKS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANK0",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANK1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANK10",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANK11",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xb",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANK12",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANK13",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANK14",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANK15",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANK2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANK3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANK4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANK5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANK6",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x6",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANK7",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANK8",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANK9",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x9",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANKG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANKG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x12",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANKG2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x13",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 2; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M_WR_CAS_RANK2.BANKG3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x14",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.ALLBANKS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANK0",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANK1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANK10",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANK11",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xb",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANK12",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANK13",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANK14",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANK15",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANK2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANK3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANK4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANK5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANK6",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x6",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANK7",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANK8",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANK9",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x9",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANKG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANKG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x12",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANKG2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x13",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 3; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M_WR_CAS_RANK3.BANKG3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x14",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.ALLBANKS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK0",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK10",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK11",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xb",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK12",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK13",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK14",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK15",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK6",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x6",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK7",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK8",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANK9",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x9",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANKG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANKG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x12",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANKG2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x13",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 4; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBC",
"EventName": "UNC_M_WR_CAS_RANK4.BANKG3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x14",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.ALLBANKS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK0",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK10",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK11",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xb",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK12",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK13",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK14",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK15",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK6",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x6",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK7",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK8",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANK9",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x9",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANKG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANKG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x12",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANKG2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x13",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 5; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "UNC_M_WR_CAS_RANK5.BANKG3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x14",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.ALLBANKS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK0",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK10",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK11",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xb",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK12",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK13",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK14",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK15",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK6",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x6",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK7",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK8",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANK9",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x9",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANKG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANKG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x12",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANKG2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x13",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 6; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBE",
"EventName": "UNC_M_WR_CAS_RANK6.BANKG3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x14",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; All Banks",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.ALLBANKS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK0",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK10",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 11",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK11",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xb",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 12",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK12",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 13",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK13",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xd",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 14",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK14",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xe",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 15",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK15",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xf",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x5",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK6",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x6",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK7",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK8",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANK9",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x9",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANKG0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANKG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x12",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANKG2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x13",
"Unit": "iMC"
},
{
"BriefDescription": "WR_CAS Access to Rank 7; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
"EventCode": "0xBF",
"EventName": "UNC_M_WR_CAS_RANK7.BANKG3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x14",
"Unit": "iMC"
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/uncore-power.json b/tools/perf/pmu-events/arch/x86/skylakex/uncore-power.json
index ceef46046488..809b86dde933 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/uncore-power.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/uncore-power.json
@@ -1,147 +1,185 @@
[
{
"BriefDescription": "pclk Cycles",
+ "Counter": "0,1,2,3",
"EventName": "UNC_P_CLOCKTICKS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "The PCU runs off a fixed 1 GHz clock. This event counts the number of pclk cycles measured while the counter was enabled. The pclk, like the Memory Controller's dclk, counts at a constant rate making it a good measure of actual wall time.",
"Unit": "PCU"
},
{
"BriefDescription": "UNC_P_CORE_TRANSITION_CYCLES",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_P_CORE_TRANSITION_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "PCU"
},
{
"BriefDescription": "UNC_P_DEMOTIONS",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_P_DEMOTIONS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "PCU"
},
{
"BriefDescription": "Phase Shed 0 Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x75",
"EventName": "UNC_P_FIVR_PS_PS0_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles spent in phase-shedding power state 0",
"Unit": "PCU"
},
{
"BriefDescription": "Phase Shed 1 Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x76",
"EventName": "UNC_P_FIVR_PS_PS1_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles spent in phase-shedding power state 1",
"Unit": "PCU"
},
{
"BriefDescription": "Phase Shed 2 Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x77",
"EventName": "UNC_P_FIVR_PS_PS2_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles spent in phase-shedding power state 2",
"Unit": "PCU"
},
{
"BriefDescription": "Phase Shed 3 Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x78",
"EventName": "UNC_P_FIVR_PS_PS3_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles spent in phase-shedding power state 3",
"Unit": "PCU"
},
{
"BriefDescription": "Thermal Strongest Upper Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when thermal conditions are the upper limit on frequency. This is related to the THERMAL_THROTTLE CYCLES_ABOVE_TEMP event, which always counts cycles when we are above the thermal temperature. This event (STRONGEST_UPPER_LIMIT) is sampled at the output of the algorithm that determines the actual frequency, while THERMAL_THROTTLE looks at the input.",
"Unit": "PCU"
},
{
"BriefDescription": "Power Strongest Upper Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_P_FREQ_MAX_POWER_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when power is the upper limit on frequency.",
"Unit": "PCU"
},
{
"BriefDescription": "IO P Limit Strongest Lower Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x73",
"EventName": "UNC_P_FREQ_MIN_IO_P_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when IO P Limit is preventing us from dropping the frequency lower. This algorithm monitors the needs to the IO subsystem on both local and remote sockets and will maintain a frequency high enough to maintain good IO BW. This is necessary for when all the IA cores on a socket are idle but a user still would like to maintain high IO Bandwidth.",
"Unit": "PCU"
},
{
"BriefDescription": "Cycles spent changing Frequency",
+ "Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "UNC_P_FREQ_TRANS_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the system is changing frequency. This can not be filtered by thread ID. One can also use it with the occupancy counter that monitors number of threads in C0 to estimate the performance impact that frequency transitions had on the system.",
"Unit": "PCU"
},
{
"BriefDescription": "UNC_P_MCP_PROCHOT_CYCLES",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_P_MCP_PROCHOT_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "PCU"
},
{
"BriefDescription": "Memory Phase Shedding Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_P_MEMORY_PHASE_SHEDDING_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the PCU has triggered memory phase shedding. This is a mode that can be run in the iMC physicals that saves power at the expense of additional latency.",
"Unit": "PCU"
},
{
"BriefDescription": "Package C State Residency - C0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_P_PKG_RESIDENCY_C0_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the package was in C0. This event can be used in conjunction with edge detect to count C0 entrances (or exits using invert). Residency events do not include transition times.",
"Unit": "PCU"
},
{
"BriefDescription": "Package C State Residency - C2E",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_P_PKG_RESIDENCY_C2E_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the package was in C2E. This event can be used in conjunction with edge detect to count C2E entrances (or exits using invert). Residency events do not include transition times.",
"Unit": "PCU"
},
{
"BriefDescription": "Package C State Residency - C3",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_P_PKG_RESIDENCY_C3_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the package was in C3. This event can be used in conjunction with edge detect to count C3 entrances (or exits using invert). Residency events do not include transition times.",
"Unit": "PCU"
},
{
"BriefDescription": "Package C State Residency - C6",
+ "Counter": "0,1,2,3",
"EventCode": "0x2D",
"EventName": "UNC_P_PKG_RESIDENCY_C6_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the package was in C6. This event can be used in conjunction with edge detect to count C6 entrances (or exits using invert). Residency events do not include transition times.",
"Unit": "PCU"
},
{
"BriefDescription": "UNC_P_PMAX_THROTTLED_CYCLES",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_P_PMAX_THROTTLED_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "PCU"
},
{
"BriefDescription": "Number of cores in C-State; C0 and C1",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
"UMask": "0x40",
@@ -149,8 +187,10 @@
},
{
"BriefDescription": "Number of cores in C-State; C3",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
"UMask": "0x80",
@@ -158,8 +198,10 @@
},
{
"BriefDescription": "Number of cores in C-State; C6 and C7",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
"UMask": "0xc0",
@@ -167,32 +209,40 @@
},
{
"BriefDescription": "External Prochot",
+ "Counter": "0,1,2,3",
"EventCode": "0xA",
"EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip.",
"Unit": "PCU"
},
{
"BriefDescription": "Internal Prochot",
+ "Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_P_PROCHOT_INTERNAL_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that we are in Internal PROCHOT mode. This mode is triggered when a sensor on the die determines that we are too hot and must throttle to avoid damaging the chip.",
"Unit": "PCU"
},
{
"BriefDescription": "Total Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_P_TOTAL_TRANSITION_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cycles spent performing core C state transitions across all cores.",
"Unit": "PCU"
},
{
"BriefDescription": "VR Hot",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_P_VR_HOT_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "PCU"
}
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/virtual-memory.json b/tools/perf/pmu-events/arch/x86/skylakex/virtual-memory.json
index 73feadaf7674..ad33fff57c03 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/virtual-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Load misses in all DTLB levels that cause page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "Counts demand data loads that caused a page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels, but the walk need not have completed.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Loads that miss the DTLB and hit the STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for a load. EPT page walk duration are excluded in Skylake.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
@@ -26,6 +29,7 @@
},
{
"BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (all page sizes) caused by demand data loads. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -34,6 +38,7 @@
},
{
"BriefDescription": "Page walk completed due to a demand data load to a 1G page",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
"PublicDescription": "Counts completed page walks (1G sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -42,6 +47,7 @@
},
{
"BriefDescription": "Page walk completed due to a demand data load to a 2M/4M page",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -50,6 +56,7 @@
},
{
"BriefDescription": "Page walk completed due to a demand data load to a 4K page",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts completed page walks (4K sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -58,6 +65,7 @@
},
{
"BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
"PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake microarchitecture.",
@@ -66,6 +74,7 @@
},
{
"BriefDescription": "Store misses in all DTLB levels that cause page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "Counts demand data stores that caused a page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels, but the walk need not have completed.",
@@ -74,6 +83,7 @@
},
{
"BriefDescription": "Stores that miss the DTLB and hit the STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.STLB_HIT",
"PublicDescription": "Stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
@@ -82,6 +92,7 @@
},
{
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store. EPT page walk duration are excluded in Skylake.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
@@ -91,6 +102,7 @@
},
{
"BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (all page sizes) caused by demand data stores. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -99,6 +111,7 @@
},
{
"BriefDescription": "Page walk completed due to a demand data store to a 1G page",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
"PublicDescription": "Counts completed page walks (1G sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -107,6 +120,7 @@
},
{
"BriefDescription": "Page walk completed due to a demand data store to a 2M/4M page",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -115,6 +129,7 @@
},
{
"BriefDescription": "Page walk completed due to a demand data store to a 4K page",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts completed page walks (4K sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -123,6 +138,7 @@
},
{
"BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_PENDING",
"PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake microarchitecture.",
@@ -131,6 +147,7 @@
},
{
"BriefDescription": "Counts 1 per cycle for each PMH that is busy with a EPT (Extended Page Table) walk for any request type.",
+ "Counter": "0,1,2,3",
"EventCode": "0x4f",
"EventName": "EPT.WALK_PENDING",
"PublicDescription": "Counts cycles for each PMH (Page Miss Handler) that is busy with an EPT (Extended Page Table) walk for any request type.",
@@ -139,6 +156,7 @@
},
{
"BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAE",
"EventName": "ITLB.ITLB_FLUSH",
"PublicDescription": "Counts the number of flushes of the big or small ITLB pages. Counting include both TLB Flush (covering all sets) and TLB Set Clear (set-specific).",
@@ -147,6 +165,7 @@
},
{
"BriefDescription": "Misses at all ITLB levels that cause page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "Counts page walks of any page size (4K/2M/4M/1G) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB, but the walk need not have completed.",
@@ -155,6 +174,7 @@
},
{
"BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.STLB_HIT",
"SampleAfterValue": "100003",
@@ -162,6 +182,7 @@
},
{
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request. EPT page walk duration are excluded in Skylake.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_ACTIVE",
@@ -171,6 +192,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (all page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
@@ -179,6 +201,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (1G)",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
"PublicDescription": "Counts completed page walks (1G page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
@@ -187,6 +210,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts completed page walks (2M/4M page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
@@ -195,6 +219,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts completed page walks (4K page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
@@ -203,6 +228,7 @@
},
{
"BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake.",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_PENDING",
"PublicDescription": "Counts 1 per cycle for each PMH (Page Miss Handler) that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake microarchitecture.",
@@ -211,6 +237,7 @@
},
{
"BriefDescription": "DTLB flush attempts of the thread-specific entries",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "TLB_FLUSH.DTLB_THREAD",
"PublicDescription": "Counts the number of DTLB flush attempts of the thread-specific entries.",
@@ -219,6 +246,7 @@
},
{
"BriefDescription": "STLB flush attempts",
+ "Counter": "0,1,2,3",
"EventCode": "0xBD",
"EventName": "TLB_FLUSH.STLB_ANY",
"PublicDescription": "Counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, etc.).",
diff --git a/tools/perf/pmu-events/arch/x86/snowridgex/cache.json b/tools/perf/pmu-events/arch/x86/snowridgex/cache.json
index c6be60584522..7882dca9d5e1 100644
--- a/tools/perf/pmu-events/arch/x86/snowridgex/cache.json
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of core requests (demand and L1 prefetchers) rejected by the L2 queue (L2Q) due to a full condition.",
+ "Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "CORE_REJECT_L2Q.ANY",
"PublicDescription": "Counts the number of (demand and L1 prefetchers) core requests rejected by the L2 queue (L2Q) due to a full or nearly full condition, which likely indicates back pressure from L2Q. It also counts requests that would have gone directly to the External Queue (XQ), but are rejected due to a full or nearly full condition, indicating back pressure from the IDI link. The L2Q may also reject transactions from a core to ensure fairness between cores, or to delay a cores dirty eviction when the address conflicts incoming external snoops. (Note that L2 prefetcher requests that are dropped are not counted by this event). Counts on a per core basis.",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Counts the number of L1D cacheline (dirty) evictions caused by load misses, stores, and prefetches.",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "DL1.DIRTY_EVICTION",
"PublicDescription": "Counts the number of L1D cacheline (dirty) evictions caused by load misses, stores, and prefetches. Does not count evictions or dirty writebacks caused by snoops. Does not count a replacement unless a (dirty) line was written back.",
@@ -16,6 +18,7 @@
},
{
"BriefDescription": "Counts the number of demand and prefetch transactions that the External Queue (XQ) rejects due to a full or near full condition.",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "L2_REJECT_XQ.ANY",
"PublicDescription": "Counts the number of demand and prefetch transactions that the External Queue (XQ) rejects due to a full or near full condition which likely indicates back pressure from the IDI link. The XQ may reject transactions from the L2Q (non-cacheable requests), BBL (L2 misses) and WOB (L2 write-back victims).",
@@ -23,6 +26,7 @@
},
{
"BriefDescription": "Counts the total number of L2 Cache accesses. Counts on a per core basis.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_REQUEST.ALL",
"PublicDescription": "Counts the total number of L2 Cache Accesses, includes hits, misses, rejects front door requests for CRd/DRd/RFO/ItoM/L2 Prefetches only. Counts on a per core basis.",
@@ -30,6 +34,7 @@
},
{
"BriefDescription": "Counts the number of L2 Cache accesses that resulted in a hit. Counts on a per core basis.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_REQUEST.HIT",
"PublicDescription": "Counts the number of L2 Cache accesses that resulted in a hit from a front door request only (does not include rejects or recycles), Counts on a per core basis.",
@@ -38,6 +43,7 @@
},
{
"BriefDescription": "Counts the number of L2 Cache accesses that resulted in a miss. Counts on a per core basis.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_REQUEST.MISS",
"PublicDescription": "Counts the number of L2 Cache accesses that resulted in a miss from a front door request only (does not include rejects or recycles). Counts on a per core basis.",
@@ -46,6 +52,7 @@
},
{
"BriefDescription": "Counts the number of L2 Cache accesses that miss the L2 and get rejected. Counts on a per core basis.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_REQUEST.REJECTS",
"PublicDescription": "Counts the number of L2 Cache accesses that miss the L2 and get BBL reject short and long rejects (includes those counted in L2_reject_XQ.any). Counts on a per core basis.",
@@ -54,6 +61,7 @@
},
{
"BriefDescription": "Counts the number of cacheable memory requests that miss in the LLC. Counts on a per core basis.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.MISS",
"PublicDescription": "Counts the number of cacheable memory requests that miss in the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the platform has an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
@@ -62,6 +70,7 @@
},
{
"BriefDescription": "Counts the number of cacheable memory requests that access the LLC. Counts on a per core basis.",
+ "Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
"PublicDescription": "Counts the number of cacheable memory requests that access the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the platform has an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
@@ -70,6 +79,7 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in the L2, LLC, DRAM or MMIO (Non-DRAM).",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.IFETCH",
"PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or translation lookaside buffer (TLB) miss which hit in the L2, LLC, DRAM or MMIO (Non-DRAM).",
@@ -78,6 +88,7 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in DRAM or MMIO (Non-DRAM).",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.IFETCH_DRAM_HIT",
"PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or translation lookaside buffer (TLB) miss which hit in DRAM or MMIO (non-DRAM).",
@@ -86,6 +97,7 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.IFETCH_L2_HIT",
"PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or Translation Lookaside Buffer (TLB) miss which hit in the L2 cache.",
@@ -94,6 +106,7 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in the LLC or other core with HITE/F/M.",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.IFETCH_LLC_HIT",
"PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or Translation Lookaside Buffer (TLB) miss which hit in the Last Level Cache (LLC) or other core with HITE/F/M.",
@@ -102,6 +115,7 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to a demand load miss which hit in the L2, LLC, DRAM or MMIO (Non-DRAM).",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.LOAD",
"SampleAfterValue": "200003",
@@ -109,6 +123,7 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to a demand load miss which hit in DRAM or MMIO (Non-DRAM).",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.LOAD_DRAM_HIT",
"SampleAfterValue": "200003",
@@ -116,6 +131,7 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to a demand load which hit in the L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.LOAD_L2_HIT",
"SampleAfterValue": "200003",
@@ -123,6 +139,7 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to a demand load which hit in the LLC or other core with HITE/F/M.",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.LOAD_LLC_HIT",
"PublicDescription": "Counts the number of cycles the core is stalled due to a demand load which hit in the Last Level Cache (LLC) or other core with HITE/F/M.",
@@ -131,6 +148,7 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to a store buffer being full.",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.STORE_BUFFER_FULL",
"SampleAfterValue": "200003",
@@ -138,6 +156,7 @@
},
{
"BriefDescription": "Counts the number of load uops retired that hit in DRAM.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.DRAM_HIT",
@@ -147,6 +166,7 @@
},
{
"BriefDescription": "Counts the number of load uops retired that hit in the L3 cache, in which a snoop was required and modified data was forwarded from another core or module.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.HITM",
@@ -156,6 +176,7 @@
},
{
"BriefDescription": "Counts the number of load uops retired that hit in the L1 data cache.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
@@ -165,6 +186,7 @@
},
{
"BriefDescription": "Counts the number of load uops retired that miss in the L1 data cache.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
@@ -174,6 +196,7 @@
},
{
"BriefDescription": "Counts the number of load uops retired that hit in the L2 cache.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
@@ -183,6 +206,7 @@
},
{
"BriefDescription": "Counts the number of load uops retired that miss in the L2 cache.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
@@ -192,6 +216,7 @@
},
{
"BriefDescription": "Counts the number of load uops retired that hit in the L3 cache.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
@@ -201,6 +226,7 @@
},
{
"BriefDescription": "Counts the number of memory uops retired.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.ALL",
@@ -211,6 +237,7 @@
},
{
"BriefDescription": "Counts the number of load uops retired.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
@@ -221,6 +248,7 @@
},
{
"BriefDescription": "Counts the number of store uops retired.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
@@ -231,6 +259,7 @@
},
{
"BriefDescription": "Counts the number of load uops retired that performed one or more locks.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
@@ -240,6 +269,7 @@
},
{
"BriefDescription": "Counts the number of memory uops retired that were splits.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.SPLIT",
@@ -249,6 +279,7 @@
},
{
"BriefDescription": "Counts the number of retired split load uops.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
@@ -258,6 +289,7 @@
},
{
"BriefDescription": "Counts the number of retired split store uops.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
@@ -267,6 +299,7 @@
},
{
"BriefDescription": "Counts all code reads that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.ALL_CODE_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -276,6 +309,7 @@
},
{
"BriefDescription": "Counts all code reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.ALL_CODE_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -285,6 +319,7 @@
},
{
"BriefDescription": "Counts all code reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.ALL_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -294,6 +329,7 @@
},
{
"BriefDescription": "Counts all code reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.ALL_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -303,6 +339,7 @@
},
{
"BriefDescription": "Counts all code reads that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.ALL_CODE_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -312,6 +349,7 @@
},
{
"BriefDescription": "Counts all code reads that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.ALL_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -321,6 +359,7 @@
},
{
"BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.COREWB_M.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -330,6 +369,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -339,6 +379,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -348,6 +389,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -357,6 +399,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -366,6 +409,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -375,6 +419,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -384,6 +429,7 @@
},
{
"BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -393,6 +439,7 @@
},
{
"BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -402,6 +449,7 @@
},
{
"BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -411,6 +459,7 @@
},
{
"BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -420,6 +469,7 @@
},
{
"BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -429,6 +479,7 @@
},
{
"BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -438,6 +489,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT",
@@ -448,6 +500,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HITM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
@@ -458,6 +511,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
@@ -468,6 +522,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
@@ -478,6 +533,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
@@ -488,6 +544,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
@@ -498,6 +555,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_RFO.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -507,6 +565,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -516,6 +575,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -525,6 +585,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -534,6 +595,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -543,6 +605,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -552,6 +615,7 @@
},
{
"BriefDescription": "Counts streaming stores which modify a full 64 byte cacheline that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.FULL_STREAMING_WR.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -561,6 +625,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetches and software prefetches (except PREFETCHW and PFRFO) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L1D_AND_SWPF.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -570,6 +635,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -579,6 +645,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -588,6 +655,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -597,6 +665,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -606,6 +675,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -615,6 +685,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -624,6 +695,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -633,6 +705,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -642,6 +715,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -651,6 +725,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -660,6 +735,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -669,6 +745,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -678,6 +755,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_RFO.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -687,6 +765,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -696,6 +775,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -705,6 +785,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -714,6 +795,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -723,6 +805,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -732,6 +815,7 @@
},
{
"BriefDescription": "Counts modified writebacks from L1 cache that miss the L2 cache that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.L1WB_M.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -741,6 +825,7 @@
},
{
"BriefDescription": "Counts modified writeBacks from L2 cache that miss the L3 cache that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.L2WB_M.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -750,6 +835,7 @@
},
{
"BriefDescription": "Counts streaming stores which modify only part of a 64 byte cacheline that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.PARTIAL_STREAMING_WR.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -759,6 +845,7 @@
},
{
"BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.READS_TO_CORE.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -768,6 +855,7 @@
},
{
"BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -777,6 +865,7 @@
},
{
"BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -786,6 +875,7 @@
},
{
"BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -795,6 +885,7 @@
},
{
"BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -804,6 +895,7 @@
},
{
"BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -813,6 +905,7 @@
},
{
"BriefDescription": "Counts streaming stores that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.STREAMING_WR.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -822,6 +915,7 @@
},
{
"BriefDescription": "Counts uncached memory reads that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.UC_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -831,6 +925,7 @@
},
{
"BriefDescription": "Counts uncached memory reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.UC_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -840,6 +935,7 @@
},
{
"BriefDescription": "Counts uncached memory reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.UC_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -849,6 +945,7 @@
},
{
"BriefDescription": "Counts uncached memory reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.UC_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -858,6 +955,7 @@
},
{
"BriefDescription": "Counts uncached memory reads that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.UC_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -867,6 +965,7 @@
},
{
"BriefDescription": "Counts uncached memory reads that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.UC_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
@@ -876,6 +975,7 @@
},
{
"BriefDescription": "Counts uncached memory writes that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.UC_WR.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -885,6 +985,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to instruction cache misses.",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.ICACHE",
"SampleAfterValue": "1000003",
diff --git a/tools/perf/pmu-events/arch/x86/snowridgex/counter.json b/tools/perf/pmu-events/arch/x86/snowridgex/counter.json
new file mode 100644
index 000000000000..5ae30dc3c1ac
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/counter.json
@@ -0,0 +1,47 @@
+[
+ {
+ "Unit": "core",
+ "CountersNumFixed": "3",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "CHA",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "IIO",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "IRP",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "2"
+ },
+ {
+ "Unit": "iMC",
+ "CountersNumFixed": "1",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "M2M",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "M2PCIe",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "PCU",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "4"
+ },
+ {
+ "Unit": "UBOX",
+ "CountersNumFixed": 1,
+ "CountersNumGeneric": "2"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/snowridgex/floating-point.json b/tools/perf/pmu-events/arch/x86/snowridgex/floating-point.json
index 88522244b760..79a4beba4b78 100644
--- a/tools/perf/pmu-events/arch/x86/snowridgex/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/floating-point.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of cycles the floating point divider is busy.",
+ "Counter": "0,1,2,3",
"EventCode": "0xcd",
"EventName": "CYCLES_DIV_BUSY.FPDIV",
"PublicDescription": "Counts the number of cycles the floating point divider is busy. Does not imply a stall waiting for the divider.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Counts the number of floating point operations retired that required microcode assist.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.FP_ASSIST",
"PublicDescription": "Counts the number of floating point operations retired that required microcode assist, which is not a reflection of the number of FP operations, instructions or uops.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Counts the number of floating point divide uops retired (x87 and SSE, including x87 sqrt).",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.FPDIV",
"PEBS": "1",
diff --git a/tools/perf/pmu-events/arch/x86/snowridgex/frontend.json b/tools/perf/pmu-events/arch/x86/snowridgex/frontend.json
index 5ba998e06592..6d131ed90242 100644
--- a/tools/perf/pmu-events/arch/x86/snowridgex/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/frontend.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the total number of BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0xe6",
"EventName": "BACLEARS.ANY",
"PublicDescription": "Counts the total number of BACLEARS, which occur when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Counts the number of BACLEARS due to a conditional jump.",
+ "Counter": "0,1,2,3",
"EventCode": "0xe6",
"EventName": "BACLEARS.COND",
"SampleAfterValue": "200003",
@@ -16,6 +18,7 @@
},
{
"BriefDescription": "Counts the number of BACLEARS due to an indirect branch.",
+ "Counter": "0,1,2,3",
"EventCode": "0xe6",
"EventName": "BACLEARS.INDIRECT",
"SampleAfterValue": "200003",
@@ -23,6 +26,7 @@
},
{
"BriefDescription": "Counts the number of BACLEARS due to a return branch.",
+ "Counter": "0,1,2,3",
"EventCode": "0xe6",
"EventName": "BACLEARS.RETURN",
"SampleAfterValue": "200003",
@@ -30,6 +34,7 @@
},
{
"BriefDescription": "Counts the number of BACLEARS due to a direct, unconditional jump.",
+ "Counter": "0,1,2,3",
"EventCode": "0xe6",
"EventName": "BACLEARS.UNCOND",
"SampleAfterValue": "200003",
@@ -37,6 +42,7 @@
},
{
"BriefDescription": "Counts the number of times a decode restriction reduces the decode throughput due to wrong instruction length prediction.",
+ "Counter": "0,1,2,3",
"EventCode": "0xe9",
"EventName": "DECODE_RESTRICTION.PREDECODE_WRONG",
"SampleAfterValue": "200003",
@@ -44,6 +50,7 @@
},
{
"BriefDescription": "Counts the number of requests to the instruction cache for one or more bytes of a cache line.",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.ACCESSES",
"PublicDescription": "Counts the total number of requests to the instruction cache. The event only counts new cache line accesses, so that multiple back to back fetches to the exact same cache line or byte chunk count as one. Specifically, the event counts when accesses from sequential code crosses the cache line boundary, or when a branch target is moved to a new line or to a non-sequential byte chunk of the same line.",
@@ -52,6 +59,7 @@
},
{
"BriefDescription": "Counts the number of instruction cache hits.",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.HIT",
"PublicDescription": "Counts the number of requests that hit in the instruction cache. The event only counts new cache line accesses, so that multiple back to back fetches to the exact same cache line and byte chunk count as one. Specifically, the event counts when accesses from sequential code crosses the cache line boundary, or when a branch target is moved to a new line or to a non-sequential byte chunk of the same line.",
@@ -60,6 +68,7 @@
},
{
"BriefDescription": "Counts the number of instruction cache misses.",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.MISSES",
"PublicDescription": "Counts the number of missed requests to the instruction cache. The event only counts new cache line accesses, so that multiple back to back fetches to the exact same cache line and byte chunk count as one. Specifically, the event counts when accesses from sequential code crosses the cache line boundary, or when a branch target is moved to a new line or to a non-sequential byte chunk of the same line.",
diff --git a/tools/perf/pmu-events/arch/x86/snowridgex/memory.json b/tools/perf/pmu-events/arch/x86/snowridgex/memory.json
index c02eb0e836ad..34306ec24e9b 100644
--- a/tools/perf/pmu-events/arch/x86/snowridgex/memory.json
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of machine clears due to memory ordering caused by a snoop from an external agent. Does not count internally generated machine clears such as those due to memory disambiguation.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
"SampleAfterValue": "20003",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Counts the number of misaligned load uops that are 4K page splits.",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "MISALIGN_MEM_REF.LOAD_PAGE_SPLIT",
"PEBS": "1",
@@ -16,6 +18,7 @@
},
{
"BriefDescription": "Counts the number of misaligned store uops that are 4K page splits.",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "MISALIGN_MEM_REF.STORE_PAGE_SPLIT",
"PEBS": "1",
@@ -24,6 +27,7 @@
},
{
"BriefDescription": "Counts all code reads that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.ALL_CODE_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Counts all code reads that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.ALL_CODE_RD.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -42,6 +47,7 @@
},
{
"BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.COREWB_M.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -51,6 +57,7 @@
},
{
"BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.COREWB_M.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -60,6 +67,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -69,6 +77,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -78,6 +87,7 @@
},
{
"BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -87,6 +97,7 @@
},
{
"BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -96,6 +107,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_MISS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
@@ -106,6 +118,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_MISS_LOCAL",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL",
@@ -116,6 +129,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -125,6 +139,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -134,6 +149,7 @@
},
{
"BriefDescription": "Counts streaming stores which modify a full 64 byte cacheline that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.FULL_STREAMING_WR.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -143,6 +159,7 @@
},
{
"BriefDescription": "Counts streaming stores which modify a full 64 byte cacheline that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.FULL_STREAMING_WR.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -152,6 +169,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_CODE_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -161,6 +179,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_CODE_RD.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -170,6 +189,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_DATA_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -179,6 +199,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_DATA_RD.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -188,6 +209,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -197,6 +219,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_RFO.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -206,6 +229,7 @@
},
{
"BriefDescription": "Counts modified writebacks from L1 cache that miss the L2 cache that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.L1WB_M.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -215,6 +239,7 @@
},
{
"BriefDescription": "Counts modified writebacks from L1 cache that miss the L2 cache that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.L1WB_M.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -224,6 +249,7 @@
},
{
"BriefDescription": "Counts modified writeBacks from L2 cache that miss the L3 cache that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.L2WB_M.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -233,6 +259,7 @@
},
{
"BriefDescription": "Counts modified writeBacks from L2 cache that miss the L3 cache that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.L2WB_M.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -242,6 +269,7 @@
},
{
"BriefDescription": "Counts miscellaneous requests, such as I/O accesses, that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.OTHER.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -251,6 +279,7 @@
},
{
"BriefDescription": "Counts miscellaneous requests, such as I/O accesses, that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.OTHER.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -260,6 +289,7 @@
},
{
"BriefDescription": "Counts streaming stores which modify only part of a 64 byte cacheline that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.PARTIAL_STREAMING_WR.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -269,6 +299,7 @@
},
{
"BriefDescription": "Counts streaming stores which modify only part of a 64 byte cacheline that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.PARTIAL_STREAMING_WR.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -278,6 +309,7 @@
},
{
"BriefDescription": "Counts all hardware and software prefetches that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.PREFETCHES.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -287,6 +319,7 @@
},
{
"BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.READS_TO_CORE.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -296,6 +329,7 @@
},
{
"BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.READS_TO_CORE.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -305,6 +339,7 @@
},
{
"BriefDescription": "Counts streaming stores that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.STREAMING_WR.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -314,6 +349,7 @@
},
{
"BriefDescription": "Counts streaming stores that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.STREAMING_WR.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -323,6 +359,7 @@
},
{
"BriefDescription": "Counts uncached memory reads that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.UC_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -332,6 +369,7 @@
},
{
"BriefDescription": "Counts uncached memory reads that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.UC_RD.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
@@ -341,6 +379,7 @@
},
{
"BriefDescription": "Counts uncached memory writes that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.UC_WR.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -350,6 +389,7 @@
},
{
"BriefDescription": "Counts uncached memory writes that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.UC_WR.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
diff --git a/tools/perf/pmu-events/arch/x86/snowridgex/other.json b/tools/perf/pmu-events/arch/x86/snowridgex/other.json
index fefbc383b840..57613207f7ad 100644
--- a/tools/perf/pmu-events/arch/x86/snowridgex/other.json
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/other.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "This event is deprecated. Refer to new event BUS_LOCK.SELF_LOCKS",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EdgeDetect": "1",
"EventCode": "0x63",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Counts the number of unhalted cycles a core is blocked due to an accepted lock issued by other cores.",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "BUS_LOCK.BLOCK_CYCLES",
"PublicDescription": "Counts the number of unhalted cycles a core is blocked due to an accepted lock issued by other cores. Counts on a per core basis.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event BUS_LOCK.BLOCK_CYCLES",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x63",
"EventName": "BUS_LOCK.CYCLES_OTHER_BLOCK",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event BUS_LOCK.LOCK_CYCLES",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x63",
"EventName": "BUS_LOCK.CYCLES_SELF_BLOCK",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Counts the number of unhalted cycles a core is blocked due to an accepted lock it issued.",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "BUS_LOCK.LOCK_CYCLES",
"PublicDescription": "Counts the number of unhalted cycles a core is blocked due to an accepted lock it issued. Counts on a per core basis.",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "Counts the number of bus locks a core issued its self (e.g. lock to UC or Split Lock) and does not include cache locks.",
+ "Counter": "0,1,2,3",
"EdgeDetect": "1",
"EventCode": "0x63",
"EventName": "BUS_LOCK.SELF_LOCKS",
@@ -49,6 +55,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event MEM_BOUND_STALLS.LOAD_DRAM_HIT",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x34",
"EventName": "C0_STALLS.LOAD_DRAM_HIT",
@@ -57,6 +64,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event MEM_BOUND_STALLS.LOAD_L2_HIT",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x34",
"EventName": "C0_STALLS.LOAD_L2_HIT",
@@ -65,6 +73,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event MEM_BOUND_STALLS.LOAD_LLC_HIT",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x34",
"EventName": "C0_STALLS.LOAD_LLC_HIT",
@@ -73,6 +82,7 @@
},
{
"BriefDescription": "Counts the number of core cycles during which interrupts are masked (disabled).",
+ "Counter": "0,1,2,3",
"EventCode": "0xcb",
"EventName": "HW_INTERRUPTS.MASKED",
"PublicDescription": "Counts the number of core cycles during which interrupts are masked (disabled). Increments by 1 each core cycle that EFLAGS.IF is 0, regardless of whether interrupts are pending or not.",
@@ -81,6 +91,7 @@
},
{
"BriefDescription": "Counts the number of core cycles during which there are pending interrupts while interrupts are masked (disabled).",
+ "Counter": "0,1,2,3",
"EventCode": "0xcb",
"EventName": "HW_INTERRUPTS.PENDING_AND_MASKED",
"PublicDescription": "Counts the number of core cycles during which there are pending interrupts while interrupts are masked (disabled). Increments by 1 each core cycle that both EFLAGS.IF is 0 and an INTR is pending (which means the APIC is telling the ROB to cause an INTR). This event does not increment if EFLAGS.IF is 0 but all interrupt in the APICs Interrupt Request Register (IRR) are inhibited by the PPR (thus either by ISRV or TPR) because in these cases the interrupts would be held up in the APIC and would not be pended to the ROB. This event does count when an interrupt is only inhibited by MOV/POP SS state machines or the STI state machine. These extra inhibits only last for a single instructions and would not be important.",
@@ -89,6 +100,7 @@
},
{
"BriefDescription": "Counts the number of hardware interrupts received by the processor.",
+ "Counter": "0,1,2,3",
"EventCode": "0xcb",
"EventName": "HW_INTERRUPTS.RECEIVED",
"SampleAfterValue": "203",
@@ -96,6 +108,7 @@
},
{
"BriefDescription": "Counts all code reads that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.ALL_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -105,6 +118,7 @@
},
{
"BriefDescription": "Counts all code reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.ALL_CODE_RD.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -114,6 +128,7 @@
},
{
"BriefDescription": "Counts all code reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.ALL_CODE_RD.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -123,6 +138,7 @@
},
{
"BriefDescription": "Counts all code reads that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.ALL_CODE_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -132,6 +148,7 @@
},
{
"BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.COREWB_M.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -141,6 +158,7 @@
},
{
"BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.COREWB_M.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -150,6 +168,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -159,6 +178,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_CODE_RD.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -168,6 +188,7 @@
},
{
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_CODE_RD.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -177,6 +198,7 @@
},
{
"BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -186,6 +208,7 @@
},
{
"BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -195,6 +218,7 @@
},
{
"BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -204,6 +228,7 @@
},
{
"BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -213,6 +238,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.ANY_RESPONSE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
@@ -223,6 +249,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.DRAM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_RD.DRAM",
@@ -233,6 +260,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.LOCAL_DRAM",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM",
@@ -243,6 +271,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.OUTSTANDING",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_RD.OUTSTANDING",
@@ -253,6 +282,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -262,6 +292,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_RFO.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -271,6 +302,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_RFO.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -280,6 +312,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_RFO.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -289,6 +322,7 @@
},
{
"BriefDescription": "Counts streaming stores which modify a full 64 byte cacheline that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.FULL_STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -298,6 +332,7 @@
},
{
"BriefDescription": "Counts L1 data cache hardware prefetches and software prefetches (except PREFETCHW and PFRFO) that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L1D_AND_SWPF.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -307,6 +342,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -316,6 +352,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_CODE_RD.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -325,6 +362,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_CODE_RD.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -334,6 +372,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_CODE_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -343,6 +382,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -352,6 +392,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_DATA_RD.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -361,6 +402,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_DATA_RD.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -370,6 +412,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -379,6 +422,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_RFO.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -388,6 +432,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_RFO.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -397,6 +442,7 @@
},
{
"BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.HWPF_L2_RFO.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -406,6 +452,7 @@
},
{
"BriefDescription": "Counts modified writebacks from L1 cache that miss the L2 cache that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.L1WB_M.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -415,6 +462,7 @@
},
{
"BriefDescription": "Counts modified writeBacks from L2 cache that miss the L3 cache that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.L2WB_M.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -424,6 +472,7 @@
},
{
"BriefDescription": "Counts miscellaneous requests, such as I/O accesses, that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.OTHER.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -433,6 +482,7 @@
},
{
"BriefDescription": "Counts streaming stores which modify only part of a 64 byte cacheline that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.PARTIAL_STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -442,6 +492,7 @@
},
{
"BriefDescription": "Counts all hardware and software prefetches that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.PREFETCHES.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -451,6 +502,7 @@
},
{
"BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.READS_TO_CORE.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -460,6 +512,7 @@
},
{
"BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.READS_TO_CORE.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -469,6 +522,7 @@
},
{
"BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.READS_TO_CORE.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -478,6 +532,7 @@
},
{
"BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.READS_TO_CORE.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -487,6 +542,7 @@
},
{
"BriefDescription": "Counts streaming stores that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -496,6 +552,7 @@
},
{
"BriefDescription": "Counts uncached memory reads that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.UC_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -505,6 +562,7 @@
},
{
"BriefDescription": "Counts uncached memory reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.UC_RD.DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -514,6 +572,7 @@
},
{
"BriefDescription": "Counts uncached memory reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.UC_RD.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -523,6 +582,7 @@
},
{
"BriefDescription": "Counts uncached memory reads that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.UC_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
@@ -532,6 +592,7 @@
},
{
"BriefDescription": "Counts uncached memory writes that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.UC_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
diff --git a/tools/perf/pmu-events/arch/x86/snowridgex/pipeline.json b/tools/perf/pmu-events/arch/x86/snowridgex/pipeline.json
index c483c0838e08..e4e7902c1162 100644
--- a/tools/perf/pmu-events/arch/x86/snowridgex/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/pipeline.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the total number of branch instructions retired for all branch types.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Counts the number of near CALL branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.CALL",
"PEBS": "1",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Counts the number of far branch instructions retired, includes far jump, far call and return, and interrupt call and return.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"PEBS": "1",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Counts the number of near indirect CALL branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.IND_CALL",
"PEBS": "1",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Counts the number of retired JCC (Jump on Conditional Code) branch instructions retired, includes both taken and not taken branches.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.JCC",
"PEBS": "1",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "Counts the number of near indirect JMP and near indirect CALL branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NON_RETURN_IND",
"PEBS": "1",
@@ -49,6 +55,7 @@
},
{
"BriefDescription": "Counts the number of near relative CALL branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.REL_CALL",
"PEBS": "1",
@@ -57,6 +64,7 @@
},
{
"BriefDescription": "Counts the number of near RET branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.RETURN",
"PEBS": "1",
@@ -65,6 +73,7 @@
},
{
"BriefDescription": "Counts the number of taken JCC (Jump on Conditional Code) branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.TAKEN_JCC",
"PEBS": "1",
@@ -73,6 +82,7 @@
},
{
"BriefDescription": "Counts the total number of mispredicted branch instructions retired for all branch types.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -81,6 +91,7 @@
},
{
"BriefDescription": "Counts the number of mispredicted near indirect CALL branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.IND_CALL",
"PEBS": "1",
@@ -89,6 +100,7 @@
},
{
"BriefDescription": "Counts the number of mispredicted JCC (Jump on Conditional Code) branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.JCC",
"PEBS": "1",
@@ -97,6 +109,7 @@
},
{
"BriefDescription": "Counts the number of mispredicted near indirect JMP and near indirect CALL branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.NON_RETURN_IND",
"PEBS": "1",
@@ -105,6 +118,7 @@
},
{
"BriefDescription": "Counts the number of mispredicted near RET branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.RETURN",
"PEBS": "1",
@@ -113,6 +127,7 @@
},
{
"BriefDescription": "Counts the number of mispredicted taken JCC (Jump on Conditional Code) branch instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.TAKEN_JCC",
"PEBS": "1",
@@ -121,6 +136,7 @@
},
{
"BriefDescription": "Counts the total number of BTCLEARS.",
+ "Counter": "0,1,2,3",
"EventCode": "0xe8",
"EventName": "BTCLEAR.ANY",
"PublicDescription": "Counts the total number of BTCLEARS which occurs when the Branch Target Buffer (BTB) predicts a taken branch.",
@@ -128,6 +144,7 @@
},
{
"BriefDescription": "Counts the number of unhalted core clock cycles. (Fixed event)",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.CORE",
"PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1.",
"SampleAfterValue": "2000003",
@@ -135,6 +152,7 @@
},
{
"BriefDescription": "Counts the number of unhalted core clock cycles.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.CORE_P",
"PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses a programmable general purpose performance counter.",
@@ -142,6 +160,7 @@
},
{
"BriefDescription": "Counts the number of unhalted reference clock cycles at TSC frequency.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.REF",
"PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is not affected by core frequency changes and increments at a fixed frequency that is also used for the Time Stamp Counter (TSC). This event uses fixed counter 2.",
@@ -150,6 +169,7 @@
},
{
"BriefDescription": "Counts the number of unhalted reference clock cycles at TSC frequency. (Fixed event)",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is not affected by core frequency changes and increments at a fixed frequency that is also used for the Time Stamp Counter (TSC). This event uses fixed counter 2.",
"SampleAfterValue": "2000003",
@@ -157,6 +177,7 @@
},
{
"BriefDescription": "Counts the number of unhalted reference clock cycles at TSC frequency.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.REF_TSC_P",
"PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is not affected by core frequency changes and increments at a fixed frequency that is also used for the Time Stamp Counter (TSC). This event uses a programmable general purpose performance counter.",
@@ -165,6 +186,7 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xcd",
"EventName": "CYCLES_DIV_BUSY.ANY",
@@ -172,6 +194,7 @@
},
{
"BriefDescription": "Counts the number of cycles the integer divider is busy.",
+ "Counter": "0,1,2,3",
"EventCode": "0xcd",
"EventName": "CYCLES_DIV_BUSY.IDIV",
"PublicDescription": "Counts the number of cycles the integer divider is busy. Does not imply a stall waiting for the divider.",
@@ -180,6 +203,7 @@
},
{
"BriefDescription": "Counts the total number of instructions retired. (Fixed event)",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PEBS": "1",
"PublicDescription": "Counts the total number of instructions that retired. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. This event continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0.",
@@ -188,6 +212,7 @@
},
{
"BriefDescription": "Counts the total number of instructions retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.ANY_P",
"PEBS": "1",
@@ -196,6 +221,7 @@
},
{
"BriefDescription": "Counts the number of retired loads that are blocked because it initially appears to be store forward blocked, but subsequently is shown not to be blocked based on 4K alias check.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.4K_ALIAS",
"PEBS": "1",
@@ -204,6 +230,7 @@
},
{
"BriefDescription": "Counts the number of retired loads that are blocked for any of the following reasons: DTLB miss, address alias, store forward or data unknown (includes memory disambiguation blocks and ESP consuming load blocks).",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.ALL",
"PEBS": "1",
@@ -212,6 +239,7 @@
},
{
"BriefDescription": "Counts the number of retired loads that are blocked because its address exactly matches an older store whose data is not ready.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.DATA_UNKNOWN",
"PEBS": "1",
@@ -220,6 +248,7 @@
},
{
"BriefDescription": "Counts the number of retired loads that are blocked because its address partially overlapped with an older store.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.STORE_FORWARD",
"PEBS": "1",
@@ -228,12 +257,14 @@
},
{
"BriefDescription": "Counts the total number of machine clears for any reason including, but not limited to, memory ordering, memory disambiguation, SMC, and FP assist.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.ANY",
"SampleAfterValue": "20003"
},
{
"BriefDescription": "Counts the number of machine clears due to memory ordering in which an internal load passes an older store within the same CPU.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.DISAMBIGUATION",
"SampleAfterValue": "20003",
@@ -241,6 +272,7 @@
},
{
"BriefDescription": "Counts the number of machine clears due to a page fault. Counts both I-Side and D-Side (Loads/Stores) page faults. A page fault occurs when either the page is not present, or an access violation occurs.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.PAGE_FAULT",
"SampleAfterValue": "20003",
@@ -248,6 +280,7 @@
},
{
"BriefDescription": "Counts the number of machine clears due to program modifying data (self modifying code) within 1K of a recently fetched code page.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.SMC",
"SampleAfterValue": "20003",
@@ -255,6 +288,7 @@
},
{
"BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear.",
+ "Counter": "0,1,2,3",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.ALL",
"PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window including relevant microcode flows and while uops are not yet available in the instruction queue (IQ) even if an FE_bound event occurs during this period. Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.",
@@ -263,6 +297,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to fast nukes such as memory ordering and memory disambiguation machine clears.",
+ "Counter": "0,1,2,3",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.FASTNUKE",
"SampleAfterValue": "1000003",
@@ -270,6 +305,7 @@
},
{
"BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a machine clear (nuke) of any kind including memory ordering and memory disambiguation.",
+ "Counter": "0,1,2,3",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS",
"SampleAfterValue": "1000003",
@@ -277,6 +313,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to branch mispredicts.",
+ "Counter": "0,1,2,3",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.MISPREDICT",
"SampleAfterValue": "1000003",
@@ -284,6 +321,7 @@
},
{
"BriefDescription": "This event is deprecated. Refer to new event TOPDOWN_BAD_SPECULATION.FASTNUKE",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.MONUKE",
@@ -292,12 +330,14 @@
},
{
"BriefDescription": "Counts the total number of issue slots every cycle that were not consumed by the backend due to backend stalls.",
+ "Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.ALL",
"SampleAfterValue": "1000003"
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to certain allocation restrictions.",
+ "Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS",
"SampleAfterValue": "1000003",
@@ -305,6 +345,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to memory reservation stalls in which a scheduler is not able to accept uops.",
+ "Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.MEM_SCHEDULER",
"SampleAfterValue": "1000003",
@@ -312,6 +353,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to IEC or FPC RAT stalls, which can be due to FIQ or IEC reservation stalls in which the integer, floating point or SIMD scheduler is not able to accept uops.",
+ "Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER",
"SampleAfterValue": "1000003",
@@ -319,6 +361,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to the physical register file unable to accept an entry (marble stalls).",
+ "Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.REGISTER",
"SampleAfterValue": "1000003",
@@ -326,6 +369,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to the reorder buffer being full (ROB stalls).",
+ "Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.REORDER_BUFFER",
"SampleAfterValue": "1000003",
@@ -333,6 +377,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to scoreboards from the instruction queue (IQ), jump execution unit (JEU), or microcode sequencer (MS).",
+ "Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.SERIALIZATION",
"SampleAfterValue": "1000003",
@@ -340,6 +385,7 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.STORE_BUFFER",
@@ -348,12 +394,14 @@
},
{
"BriefDescription": "Counts the total number of issue slots every cycle that were not consumed by the backend due to frontend stalls.",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.ALL",
"SampleAfterValue": "1000003"
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BACLEARS.",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.BRANCH_DETECT",
"PublicDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
@@ -362,6 +410,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BTCLEARS.",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.BRANCH_RESTEER",
"PublicDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BTCLEARS, which occurs when the Branch Target Buffer (BTB) predicts a taken branch.",
@@ -370,6 +419,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to the microcode sequencer (MS).",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.CISC",
"SampleAfterValue": "1000003",
@@ -377,6 +427,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to decode stalls.",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.DECODE",
"SampleAfterValue": "1000003",
@@ -384,6 +435,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to ITLB misses.",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.ITLB",
"PublicDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to Instruction Table Lookaside Buffer (ITLB) misses.",
@@ -392,6 +444,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to other common frontend stalls not categorized.",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.OTHER",
"SampleAfterValue": "1000003",
@@ -399,6 +452,7 @@
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to wrong predecodes.",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.PREDECODE",
"SampleAfterValue": "1000003",
@@ -406,6 +460,7 @@
},
{
"BriefDescription": "Counts the total number of consumed retirement slots.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "TOPDOWN_RETIRING.ALL",
"PEBS": "1",
@@ -413,6 +468,7 @@
},
{
"BriefDescription": "Counts the number of uops issued by the front end every cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x0e",
"EventName": "UOPS_ISSUED.ANY",
"PublicDescription": "Counts the number of uops issued by the front end every cycle. When 4-uops are requested and only 2-uops are delivered, the event counts 2. Uops_issued correlates to the number of ROB entries. If uop takes 2 ROB slots it counts as 2 uops_issued.",
@@ -420,6 +476,7 @@
},
{
"BriefDescription": "Counts the total number of uops retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.ALL",
"PEBS": "1",
@@ -427,6 +484,7 @@
},
{
"BriefDescription": "Counts the number of integer divide uops retired.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.IDIV",
"PEBS": "1",
@@ -435,6 +493,7 @@
},
{
"BriefDescription": "Counts the number of uops that are from complex flows issued by the micro-sequencer (MS).",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.MS",
"PEBS": "1",
@@ -444,6 +503,7 @@
},
{
"BriefDescription": "Counts the number of x87 uops retired, includes those in MS flows.",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.X87",
"PEBS": "1",
diff --git a/tools/perf/pmu-events/arch/x86/snowridgex/uncore-cache.json b/tools/perf/pmu-events/arch/x86/snowridgex/uncore-cache.json
index 4090e4da1bd0..7551fb91a9d7 100644
--- a/tools/perf/pmu-events/arch/x86/snowridgex/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/uncore-cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "MMIO reads. Derived from unc_cha_tor_inserts.ia_miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_MISSES.MMIO_READ",
"Filter": "config1=0x40040e33",
@@ -11,6 +12,7 @@
},
{
"BriefDescription": "MMIO writes. Derived from unc_cha_tor_inserts.ia_miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_MISSES.MMIO_WRITE",
"Filter": "config1=0x40041e33",
@@ -21,6 +23,7 @@
},
{
"BriefDescription": "LLC misses - Uncacheable reads (from cpu) . Derived from unc_cha_tor_inserts.ia_miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_MISSES.UNCACHEABLE",
"Filter": "config1=0x40e33",
@@ -31,6 +34,7 @@
},
{
"BriefDescription": "Streaming stores (full cache line). Derived from unc_cha_tor_inserts.ia_miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_REFERENCES.STREAMING_FULL",
"Filter": "config1=0x41833",
@@ -42,6 +46,7 @@
},
{
"BriefDescription": "Streaming stores (partial cache line). Derived from unc_cha_tor_inserts.ia_miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_REFERENCES.STREAMING_PARTIAL",
"Filter": "config1=0x41a33",
@@ -53,8 +58,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 0 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -62,8 +69,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 1 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -71,8 +80,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 2 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -80,8 +91,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 3 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -89,8 +102,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 4 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -98,8 +113,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 5 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -107,8 +124,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 6 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x40",
@@ -116,8 +135,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 7 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x80",
@@ -125,8 +146,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 10 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -134,8 +157,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 8 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -143,8 +168,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 9 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -152,8 +179,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 0 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -161,8 +190,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 1 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -170,8 +201,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 2 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -179,8 +212,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 3 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -188,8 +223,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 4 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -197,8 +234,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 5 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -206,8 +245,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 6 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x40",
@@ -215,8 +256,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 7 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x80",
@@ -224,8 +267,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 10 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -233,8 +278,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 8 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -242,8 +289,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 9 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -251,8 +300,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 0 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -260,8 +311,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 1 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -269,8 +322,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 2 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -278,8 +333,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 3 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -287,8 +344,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -296,8 +355,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -305,8 +366,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 6 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x40",
@@ -314,8 +377,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 7 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x80",
@@ -323,8 +388,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 10 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -332,8 +399,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 8 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -341,8 +410,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 9 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -350,8 +421,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 0 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -359,8 +432,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 1 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -368,8 +443,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 2 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -377,8 +454,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 3 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -386,8 +465,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 4 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -395,8 +476,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 5 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -404,8 +487,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 6 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x40",
@@ -413,8 +498,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 7 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x80",
@@ -422,8 +509,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x8B",
"EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 10 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -431,8 +520,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x8B",
"EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 8 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -440,8 +531,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x8B",
"EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 9 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -449,8 +542,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 0 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -458,8 +553,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 1 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -467,8 +564,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 2 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -476,8 +575,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 3 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -485,8 +586,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 4 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -494,8 +597,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 5 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -503,8 +608,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 6 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x40",
@@ -512,8 +619,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 7 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x80",
@@ -521,8 +630,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 10 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -530,8 +641,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 8 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -539,8 +652,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 9 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -548,8 +663,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 0 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -557,8 +674,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 1 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -566,8 +685,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 2 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -575,8 +696,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 3 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -584,8 +707,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 4 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -593,8 +718,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 5 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -602,8 +729,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 6 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x40",
@@ -611,8 +740,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 7 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x80",
@@ -620,8 +751,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 10 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -629,8 +762,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 8 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -638,8 +773,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 9 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -647,8 +784,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 0 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -656,8 +795,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 1 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -665,8 +806,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 2 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -674,8 +817,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 3 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -683,8 +828,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -692,8 +839,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -701,8 +850,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x40",
@@ -710,8 +861,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x80",
@@ -719,8 +872,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x8D",
"EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 10 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -728,8 +883,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x8D",
"EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 8 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -737,8 +894,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x8D",
"EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 9 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -746,8 +905,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 0 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -755,8 +916,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 1 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -764,8 +927,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 2 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -773,8 +938,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 3 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -782,8 +949,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 4 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -791,8 +960,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 5 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -800,8 +971,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 6 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x40",
@@ -809,8 +982,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 7 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x80",
@@ -818,8 +993,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x8F",
"EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 10 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -827,8 +1004,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x8F",
"EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 8 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -836,8 +1015,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x8F",
"EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 9 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -845,8 +1026,10 @@
},
{
"BriefDescription": "CHA to iMC Bypass : Intermediate bypass Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x57",
"EventName": "UNC_CHA_BYPASS_CHA_IMC.INTERMEDIATE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA to iMC Bypass : Intermediate bypass Taken : Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not. : Filter for transactions that succeeded in taking the intermediate bypass.",
"UMask": "0x2",
@@ -854,8 +1037,10 @@
},
{
"BriefDescription": "CHA to iMC Bypass : Not Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x57",
"EventName": "UNC_CHA_BYPASS_CHA_IMC.NOT_TAKEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA to iMC Bypass : Not Taken : Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not. : Filter for transactions that could not take the bypass, and issues a read to memory. Note that transactions that did not take the bypass but did not issue read to memory will not be counted.",
"UMask": "0x4",
@@ -863,8 +1048,10 @@
},
{
"BriefDescription": "CHA to iMC Bypass : Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x57",
"EventName": "UNC_CHA_BYPASS_CHA_IMC.TAKEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA to iMC Bypass : Taken : Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not. : Filter for transactions that succeeded in taking the full bypass.",
"UMask": "0x1",
@@ -872,12 +1059,14 @@
},
{
"BriefDescription": "Uncore cache clock ticks",
+ "Counter": "0,1,2,3",
"EventName": "UNC_CHA_CLOCKTICKS",
"PerPkg": "1",
"Unit": "CHA"
},
{
"BriefDescription": "CMS Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0xc0",
"EventName": "UNC_CHA_CMS_CLOCKTICKS",
"PerPkg": "1",
@@ -885,8 +1074,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued : Any Cycle with Multiple Snoops",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.ANY_GTONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Core Cross Snoops Issued : Any Cycle with Multiple Snoops : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0xf2",
@@ -894,8 +1085,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued : Any Single Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.ANY_ONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Core Cross Snoops Issued : Any Single Snoop : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0xf1",
@@ -903,8 +1096,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued : Multiple Core Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.CORE_GTONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Core Cross Snoops Issued : Multiple Core Requests : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x42",
@@ -912,8 +1107,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued : Single Core Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.CORE_ONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Core Cross Snoops Issued : Single Core Requests : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x41",
@@ -921,8 +1118,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued : Multiple Eviction",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.EVICT_GTONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Core Cross Snoops Issued : Multiple Eviction : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x82",
@@ -930,8 +1129,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued : Single Eviction",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.EVICT_ONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Core Cross Snoops Issued : Single Eviction : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x81",
@@ -939,8 +1140,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued : Multiple External Snoops",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.EXT_GTONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Core Cross Snoops Issued : Multiple External Snoops : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x22",
@@ -948,8 +1151,10 @@
},
{
"BriefDescription": "Core Cross Snoops Issued : Single External Snoops",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_CHA_CORE_SNP.EXT_ONE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Core Cross Snoops Issued : Single External Snoops : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
"UMask": "0x21",
@@ -957,104 +1162,130 @@
},
{
"BriefDescription": "Counter 0 Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_CHA_COUNTER0_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counter 0 Occupancy : Since occupancy counts can only be captured in the Cbo's 0 counter, this event allows a user to capture occupancy related information by filtering the Cb0 occupancy count captured in Counter 0. The filtering available is found in the control register - threshold, invert and edge detect. E.g. setting threshold to 1 can effectively monitor how many cycles the monitored queue has an entry.",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6E",
"EventName": "UNC_CHA_DIRECT_GO.HA_SUPPRESS_DRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6E",
"EventName": "UNC_CHA_DIRECT_GO.HA_SUPPRESS_NO_D2C",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6E",
"EventName": "UNC_CHA_DIRECT_GO.HA_TOR_DEALLOC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6D",
"EventName": "UNC_CHA_DIRECT_GO_OPC.EXTCMP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6D",
"EventName": "UNC_CHA_DIRECT_GO_OPC.FAST_GO",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6D",
"EventName": "UNC_CHA_DIRECT_GO_OPC.FAST_GO_PULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6D",
"EventName": "UNC_CHA_DIRECT_GO_OPC.GO",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6D",
"EventName": "UNC_CHA_DIRECT_GO_OPC.GO_PULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6D",
"EventName": "UNC_CHA_DIRECT_GO_OPC.IDLE_DUE_SUPPRESS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6D",
"EventName": "UNC_CHA_DIRECT_GO_OPC.NOP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
"EventCode": "0x6D",
"EventName": "UNC_CHA_DIRECT_GO_OPC.PULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Distress signal asserted : DPT Local",
+ "Counter": "0,1,2,3",
"EventCode": "0xAF",
"EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : DPT Local : Counts the number of cycles either the local or incoming distress signals are asserted. : Dynamic Prefetch Throttle triggered by this tile",
"UMask": "0x4",
@@ -1062,8 +1293,10 @@
},
{
"BriefDescription": "Distress signal asserted : DPT Remote",
+ "Counter": "0,1,2,3",
"EventCode": "0xAF",
"EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_NONLOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : DPT Remote : Counts the number of cycles either the local or incoming distress signals are asserted. : Dynamic Prefetch Throttle received by this tile",
"UMask": "0x8",
@@ -1071,8 +1304,10 @@
},
{
"BriefDescription": "Distress signal asserted : DPT Stalled - IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xAF",
"EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_STALL_IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : DPT Stalled - IV : Counts the number of cycles either the local or incoming distress signals are asserted. : DPT occurred while regular IVs were received, causing DPT to be stalled",
"UMask": "0x40",
@@ -1080,8 +1315,10 @@
},
{
"BriefDescription": "Distress signal asserted : DPT Stalled - No Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xAF",
"EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_STALL_NOCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : DPT Stalled - No Credit : Counts the number of cycles either the local or incoming distress signals are asserted. : DPT occurred while credit not available causing DPT to be stalled",
"UMask": "0x80",
@@ -1089,8 +1326,10 @@
},
{
"BriefDescription": "Distress signal asserted : Horizontal",
+ "Counter": "0,1,2,3",
"EventCode": "0xAF",
"EventName": "UNC_CHA_DISTRESS_ASSERTED.HORZ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : Horizontal : Counts the number of cycles either the local or incoming distress signals are asserted. : If TGR egress is full, then agents will throttle outgoing AD IDI transactions",
"UMask": "0x2",
@@ -1098,8 +1337,10 @@
},
{
"BriefDescription": "Distress signal asserted : Vertical",
+ "Counter": "0,1,2,3",
"EventCode": "0xAF",
"EventName": "UNC_CHA_DISTRESS_ASSERTED.VERT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : Vertical : Counts the number of cycles either the local or incoming distress signals are asserted. : If IRQ egress is full, then agents will throttle outgoing AD IDI transactions",
"UMask": "0x1",
@@ -1107,8 +1348,10 @@
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_CHA_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress Blocking due to Ordering requirements : Down : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x4",
@@ -1116,8 +1359,10 @@
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_CHA_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress Blocking due to Ordering requirements : Up : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x1",
@@ -1125,8 +1370,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AD Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -1134,8 +1381,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AD Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -1143,8 +1392,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AD Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -1152,8 +1403,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AD Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -1161,8 +1414,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_CHA_HORZ_RING_AKC_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -1170,8 +1425,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_CHA_HORZ_RING_AKC_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -1179,8 +1436,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_CHA_HORZ_RING_AKC_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -1188,8 +1447,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_CHA_HORZ_RING_AKC_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -1197,8 +1458,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -1206,8 +1469,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -1215,8 +1480,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -1224,8 +1491,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -1233,8 +1502,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use : Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal BL Ring in Use : Left and Even : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -1242,8 +1513,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use : Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal BL Ring in Use : Left and Odd : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -1251,8 +1524,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use : Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal BL Ring in Use : Right and Even : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -1260,8 +1535,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use : Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal BL Ring in Use : Right and Odd : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -1269,8 +1546,10 @@
},
{
"BriefDescription": "Horizontal IV Ring in Use : Left",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_CHA_HORZ_RING_IV_IN_USE.LEFT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal IV Ring in Use : Left : Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x1",
@@ -1278,8 +1557,10 @@
},
{
"BriefDescription": "Horizontal IV Ring in Use : Right",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_CHA_HORZ_RING_IV_IN_USE.RIGHT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal IV Ring in Use : Right : Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x4",
@@ -1287,6 +1568,7 @@
},
{
"BriefDescription": "Normal priority reads issued to the memory controller from the CHA",
+ "Counter": "0,1,2,3",
"EventCode": "0x59",
"EventName": "UNC_CHA_IMC_READS_COUNT.NORMAL",
"PerPkg": "1",
@@ -1296,8 +1578,10 @@
},
{
"BriefDescription": "HA to iMC Reads Issued : ISOCH",
+ "Counter": "0,1,2,3",
"EventCode": "0x59",
"EventName": "UNC_CHA_IMC_READS_COUNT.PRIORITY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "HA to iMC Reads Issued : ISOCH : Count of the number of reads issued to any of the memory controller channels. This can be filtered by the priority of the reads.",
"UMask": "0x2",
@@ -1305,6 +1589,7 @@
},
{
"BriefDescription": "CHA to iMC Full Line Writes Issued : Full Line Non-ISOCH",
+ "Counter": "0,1,2,3",
"EventCode": "0x5B",
"EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL",
"PerPkg": "1",
@@ -1314,8 +1599,10 @@
},
{
"BriefDescription": "CHA to iMC Full Line Writes Issued : ISOCH Full Line",
+ "Counter": "0,1,2,3",
"EventCode": "0x5B",
"EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL_PRIORITY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA to iMC Full Line Writes Issued : ISOCH Full Line : Counts the total number of full line writes issued from the HA into the memory controller.",
"UMask": "0x4",
@@ -1323,8 +1610,10 @@
},
{
"BriefDescription": "CHA to iMC Full Line Writes Issued : Partial Non-ISOCH",
+ "Counter": "0,1,2,3",
"EventCode": "0x5B",
"EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA to iMC Full Line Writes Issued : Partial Non-ISOCH : Counts the total number of full line writes issued from the HA into the memory controller.",
"UMask": "0x2",
@@ -1332,8 +1621,10 @@
},
{
"BriefDescription": "CHA to iMC Full Line Writes Issued : ISOCH Partial",
+ "Counter": "0,1,2,3",
"EventCode": "0x5B",
"EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL_PRIORITY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA to iMC Full Line Writes Issued : ISOCH Partial : Counts the total number of full line writes issued from the HA into the memory controller.",
"UMask": "0x8",
@@ -1341,8 +1632,10 @@
},
{
"BriefDescription": "Cache and Snoop Filter Lookups; Any Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.; Filters for any transaction originating from the IPQ or IRQ. This does not include lookups originating from the ISMQ.",
"UMask": "0x1fffff",
@@ -1350,25 +1643,31 @@
},
{
"BriefDescription": "Cache Lookups : All Request Filter",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.ANY_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : All Request Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Any local or remote transaction to the LLC, including prefetch.",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.CODE_READ",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.CODE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1bd0ff",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : Code Reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.CODE_READ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Code Reads : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0x1bd0ff",
@@ -1376,16 +1675,20 @@
},
{
"BriefDescription": "Cache Lookups : CRd Request Filter",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.CODE_READ_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : CRd Request Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Local or remote CRd transactions to the LLC. This includes CRd prefetch.",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : Code Read Misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.CODE_READ_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Code Read Misses : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0x1bd001",
@@ -1393,23 +1696,28 @@
},
{
"BriefDescription": "Cache Lookups : Local request Filter",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.COREPREF_OR_DMND_LOCAL_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Local request Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Any local transaction to the LLC, including prefetches from the Core",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.DATA_READ",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.DATA_RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1bc1ff",
"Unit": "CHA"
},
{
"BriefDescription": "Cache and Snoop Filter Lookups; Data Read Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ",
"PerPkg": "1",
@@ -1419,25 +1727,31 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1fc1ff",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : Data Read Request Filter",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Data Read Request Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Read transactions.",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : Data Read Misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Data Read Misses : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0x1bc101",
@@ -1445,17 +1759,21 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.DMND_READ_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x841ff",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : E State",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.E",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : E State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Hit Exclusive State",
"UMask": "0x20",
@@ -1463,8 +1781,10 @@
},
{
"BriefDescription": "Cache Lookups : F State",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : F State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Hit Forward State",
"UMask": "0x80",
@@ -1472,8 +1792,10 @@
},
{
"BriefDescription": "Cache Lookups : Flush or Invalidate Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.FLUSH_INV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Flush : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing.",
"UMask": "0x1a44ff",
@@ -1481,16 +1803,20 @@
},
{
"BriefDescription": "Cache Lookups : Flush or Invalidate Filter",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.FLUSH_OR_INV_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Flush or Invalidate Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : I State",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.I",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : I State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Miss",
"UMask": "0x1",
@@ -1498,16 +1824,20 @@
},
{
"BriefDescription": "Cache Lookups : Transactions homed locally Filter",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Transactions homed locally Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Transaction whose address resides in the local MC.",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : M State",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.M",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : M State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Hit Modified State",
"UMask": "0x40",
@@ -1515,8 +1845,10 @@
},
{
"BriefDescription": "Cache Lookups : All Misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.MISS_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : All Misses : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0x1fe001",
@@ -1524,16 +1856,20 @@
},
{
"BriefDescription": "Cache Lookups : Write Request Filter",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.OTHER_REQ_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Write Request Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Writeback transactions to the LLC This includes all write transactions -- both Cacheable and UC.",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : Reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.READ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Reads : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0x1bd9ff",
@@ -1541,8 +1877,10 @@
},
{
"BriefDescription": "Cache Lookups : Locally Requested Reads that are Locally HOMed",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.READ_LOCAL_LOC_HOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Locally Requested Reads that are Locally HOMed : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0x9d9ff",
@@ -1550,8 +1888,10 @@
},
{
"BriefDescription": "Cache Lookups : Locally Requested Reads that are Remotely HOMed",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.READ_LOCAL_REM_HOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Locally Requested Reads that are Remotely HOMed : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0x11d9ff",
@@ -1559,8 +1899,10 @@
},
{
"BriefDescription": "Cache Lookups : Read Misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.READ_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Read Misses : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0x1bd901",
@@ -1568,8 +1910,10 @@
},
{
"BriefDescription": "Cache Lookups : Locally HOMed Read Misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.READ_MISS_LOC_HOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Locally HOMed Read Misses : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0xbd901",
@@ -1577,8 +1921,10 @@
},
{
"BriefDescription": "Cache Lookups : Remotely HOMed Read Misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.READ_MISS_REM_HOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Remotely HOMed Read Misses : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0x13d901",
@@ -1586,8 +1932,10 @@
},
{
"BriefDescription": "Cache Lookups : Remotely requested Read or Snoop Misses that are Remotely HOMed",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.READ_OR_SNOOP_REMOTE_MISS_REM_HOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Remotely requested Read or Snoop Misses that are Remotely HOMed : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0x161901",
@@ -1595,8 +1943,10 @@
},
{
"BriefDescription": "Cache Lookups : Remotely Requested Reads that are Locally HOMed",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.READ_REMOTE_LOC_HOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Remotely Requested Reads that are Locally HOMed : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0xa19ff",
@@ -1604,8 +1954,10 @@
},
{
"BriefDescription": "Cache Lookups : Reads that Hit the Snoop Filter",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.READ_SF_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Reads that Hit the Snoop Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0x1bd90e",
@@ -1613,8 +1965,10 @@
},
{
"BriefDescription": "Cache Lookups : RFO Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : RFO Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote RFO transactions to the LLC. This includes RFO prefetch.",
"UMask": "0x1bc8ff",
@@ -1622,16 +1976,20 @@
},
{
"BriefDescription": "Cache Lookups : RFO Request Filter",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.RFO_F",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : RFO Request Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Local or remote RFO transactions to the LLC. This includes RFO prefetch.",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : RFO Misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.RFO_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : RFO Misses : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
"UMask": "0x1bc801",
@@ -1639,17 +1997,21 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.RFO_PREF_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x888ff",
"Unit": "CHA"
},
{
"BriefDescription": "Cache Lookups : S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.S",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : S State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Hit Shared State",
"UMask": "0x10",
@@ -1657,8 +2019,10 @@
},
{
"BriefDescription": "Cache Lookups : SnoopFilter - E State",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.SF_E",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : SnoopFilter - E State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : SF Hit Exclusive State",
"UMask": "0x4",
@@ -1666,8 +2030,10 @@
},
{
"BriefDescription": "Cache Lookups : SnoopFilter - H State",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.SF_H",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : SnoopFilter - H State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : SF Hit HitMe State",
"UMask": "0x8",
@@ -1675,8 +2041,10 @@
},
{
"BriefDescription": "Cache Lookups : SnoopFilter - S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.SF_S",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : SnoopFilter - S State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : SF Hit Shared State",
"UMask": "0x2",
@@ -1684,8 +2052,10 @@
},
{
"BriefDescription": "Cache Lookups : Filters Requests for those that write info into the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.WRITES_AND_OTHER",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cache Lookups : Write Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Writeback transactions from L2 to the LLC This includes all write transactions -- both Cacheable and UC.",
"UMask": "0x1a42ff",
@@ -1693,15 +2063,18 @@
},
{
"BriefDescription": "This event is deprecated.",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x34",
"EventName": "UNC_CHA_LLC_LOOKUP.WRITE_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x842ff",
"Unit": "CHA"
},
{
"BriefDescription": "Lines Victimized : All Lines Victimized",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.ALL",
"PerPkg": "1",
@@ -1711,8 +2084,10 @@
},
{
"BriefDescription": "Lines Victimized : Lines in E state",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.E_STATE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Lines in E state : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x2",
@@ -1720,8 +2095,10 @@
},
{
"BriefDescription": "Lines Victimized : Local - All Lines",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Local - All Lines : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x200f",
@@ -1729,8 +2106,10 @@
},
{
"BriefDescription": "Lines Victimized : Local - Lines in E State",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_E",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Local - Lines in E State : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x2002",
@@ -1738,8 +2117,10 @@
},
{
"BriefDescription": "Lines Victimized : Local - Lines in M State",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_M",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Local - Lines in M State : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x2001",
@@ -1747,16 +2128,20 @@
},
{
"BriefDescription": "Lines Victimized : Local Only",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_ONLY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Local Only : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"Unit": "CHA"
},
{
"BriefDescription": "Lines Victimized : Local - Lines in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_S",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Local - Lines in S State : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x2004",
@@ -1764,8 +2149,10 @@
},
{
"BriefDescription": "Lines Victimized : Lines in M state",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.M_STATE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Lines in M state : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x1",
@@ -1773,8 +2160,10 @@
},
{
"BriefDescription": "Lines Victimized : Lines in S State",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_CHA_LLC_VICTIMS.S_STATE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Lines Victimized : Lines in S State : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x4",
@@ -1782,8 +2171,10 @@
},
{
"BriefDescription": "Cbo Misc : CV0 Prefetch Miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_CHA_MISC.CV0_PREF_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cbo Misc : CV0 Prefetch Miss : Miscellaneous events in the Cbo.",
"UMask": "0x20",
@@ -1791,8 +2182,10 @@
},
{
"BriefDescription": "Cbo Misc : CV0 Prefetch Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_CHA_MISC.CV0_PREF_VIC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cbo Misc : CV0 Prefetch Victim : Miscellaneous events in the Cbo.",
"UMask": "0x10",
@@ -1800,8 +2193,10 @@
},
{
"BriefDescription": "Number of times that an RFO hit in S state.",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_CHA_MISC.RFO_HIT_S",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts when a RFO (the Read for Ownership issued before a write) request hit a cacheline in the S (Shared) state.",
"UMask": "0x8",
@@ -1809,8 +2204,10 @@
},
{
"BriefDescription": "Cbo Misc : Silent Snoop Eviction",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_CHA_MISC.RSPI_WAS_FSE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cbo Misc : Silent Snoop Eviction : Miscellaneous events in the Cbo. : Counts the number of times when a Snoop hit in FSE states and triggered a silent eviction. This is useful because this information is lost in the PRE encodings.",
"UMask": "0x1",
@@ -1818,8 +2215,10 @@
},
{
"BriefDescription": "Cbo Misc : Write Combining Aliasing",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_CHA_MISC.WC_ALIASING",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cbo Misc : Write Combining Aliasing : Miscellaneous events in the Cbo. : Counts the number of times that a USWC write (WCIL(F)) transaction hit in the LLC in M state, triggering a WBMtoI followed by the USWC write. This occurs when there is WC aliasing.",
"UMask": "0x2",
@@ -1827,64 +2226,80 @@
},
{
"BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI0",
+ "Counter": "0,1,2,3",
"EventCode": "0xE6",
"EventName": "UNC_CHA_MISC_EXTERNAL.MBE_INST0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI1",
+ "Counter": "0,1,2,3",
"EventCode": "0xE6",
"EventName": "UNC_CHA_MISC_EXTERNAL.MBE_INST1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.ADEGRCREDIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.AKEGRCREDIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.ALLRSFWAYS_RES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.BLEGRCREDIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.FSF_VICP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.GOTRACK_ALLOWSNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"UMask": "0x4",
@@ -1892,8 +2307,10 @@
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.GOTRACK_ALLWAYRSV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"UMask": "0x10",
@@ -1901,8 +2318,10 @@
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.GOTRACK_PAMATCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"UMask": "0x2",
@@ -1910,8 +2329,10 @@
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.GOTRACK_WAYMATCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"UMask": "0x8",
@@ -1919,88 +2340,110 @@
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.HACREDIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.IDX_INPIPE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.IPQ_SETMATCH_VICP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.IRQ_SETMATCH_VICP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.ISMQ_SETMATCH_VICP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.IVEGRCREDIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.LLC_WAYS_RES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.NOTALLOWSNOOP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.ONE_FSF_VIC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.ONE_RSP_CON",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.PTL_INPIPE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"UMask": "0x80",
@@ -2008,8 +2451,10 @@
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.RMW_SETMATCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"UMask": "0x1",
@@ -2017,104 +2462,100 @@
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.RRQ_SETMATCH_VICP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.SETMATCHENTRYWSCT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.SF_WAYS_RES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.TOPA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.TORID_MATCH_GO_P",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.VN_AD_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.VN_AD_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
- "EventCode": "0x42",
- "EventName": "UNC_CHA_PIPE_REJECT.VN_BL_NCB",
- "PerPkg": "1",
- "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Pipe Rejects",
- "EventCode": "0x42",
- "EventName": "UNC_CHA_PIPE_REJECT.VN_BL_NCS",
- "PerPkg": "1",
- "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.VN_BL_RSP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "Pipe Rejects",
- "EventCode": "0x42",
- "EventName": "UNC_CHA_PIPE_REJECT.VN_BL_WB",
- "PerPkg": "1",
- "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_CHA_PIPE_REJECT.WAY_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
"Unit": "CHA"
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty : MC0",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx READ Credits Empty : MC0 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 0 only.",
"UMask": "0x1",
@@ -2122,8 +2563,10 @@
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty : MC1",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx READ Credits Empty : MC1 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 1 only.",
"UMask": "0x2",
@@ -2131,40 +2574,50 @@
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty : MC10",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx READ Credits Empty : MC10 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 10 only.",
"Unit": "CHA"
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty : MC11",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC11",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx READ Credits Empty : MC11 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 11 only.",
"Unit": "CHA"
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty : MC12",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC12",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx READ Credits Empty : MC12 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 12 only.",
"Unit": "CHA"
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty : MC13",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC13",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx READ Credits Empty : MC13 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 13 only.",
"Unit": "CHA"
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty : MC2",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx READ Credits Empty : MC2 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 2 only.",
"UMask": "0x4",
@@ -2172,8 +2625,10 @@
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty : MC3",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx READ Credits Empty : MC3 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 3 only.",
"UMask": "0x8",
@@ -2181,8 +2636,10 @@
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty : MC4",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx READ Credits Empty : MC4 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 4 only.",
"UMask": "0x10",
@@ -2190,8 +2647,10 @@
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty : MC5",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx READ Credits Empty : MC5 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 5 only.",
"UMask": "0x20",
@@ -2199,8 +2658,10 @@
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty : MC6",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx READ Credits Empty : MC6 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 6 only.",
"UMask": "0x40",
@@ -2208,8 +2669,10 @@
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty : MC7",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx READ Credits Empty : MC7 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 7 only.",
"UMask": "0x80",
@@ -2217,24 +2680,30 @@
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty : MC8",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx READ Credits Empty : MC8 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 8 only.",
"Unit": "CHA"
},
{
"BriefDescription": "CHA iMC CHNx READ Credits Empty : MC9",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_CHA_READ_NO_CREDITS.MC9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx READ Credits Empty : MC9 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 9 only.",
"Unit": "CHA"
},
{
"BriefDescription": "Local INVITOE requests (exclusive ownership of a cache line without receiving data) that miss the SF/LLC and remote INVITOE requests sent to the CHA's home agent",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.INVITOE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the total number of requests coming from a unit on this socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.",
"UMask": "0x30",
@@ -2242,6 +2711,7 @@
},
{
"BriefDescription": "Local read requests that miss the SF/LLC and remote read requests sent to the CHA's home agent",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.READS",
"PerPkg": "1",
@@ -2251,6 +2721,7 @@
},
{
"BriefDescription": "Local write requests that miss the SF/LLC and remote write requests sent to the CHA's home agent",
+ "Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.WRITES",
"PerPkg": "1",
@@ -2260,8 +2731,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring. : AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xAC",
"EventName": "UNC_CHA_RING_BOUNCES_HORZ.AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Horizontal Ring. : AD : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x1",
@@ -2269,8 +2742,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring. : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xAC",
"EventName": "UNC_CHA_RING_BOUNCES_HORZ.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Horizontal Ring. : AK : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x2",
@@ -2278,8 +2753,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring. : BL",
+ "Counter": "0,1,2,3",
"EventCode": "0xAC",
"EventName": "UNC_CHA_RING_BOUNCES_HORZ.BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Horizontal Ring. : BL : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x4",
@@ -2287,8 +2764,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring. : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xAC",
"EventName": "UNC_CHA_RING_BOUNCES_HORZ.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Horizontal Ring. : IV : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x8",
@@ -2296,8 +2775,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring. : AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_CHA_RING_BOUNCES_VERT.AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Vertical Ring. : AD : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x1",
@@ -2305,8 +2786,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring. : Acknowledgements to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_CHA_RING_BOUNCES_VERT.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Vertical Ring. : Acknowledgements to core : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x2",
@@ -2314,8 +2797,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_CHA_RING_BOUNCES_VERT.AKC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Vertical Ring. : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x10",
@@ -2323,8 +2808,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring. : Data Responses to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_CHA_RING_BOUNCES_VERT.BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Vertical Ring. : Data Responses to core : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x4",
@@ -2332,8 +2819,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring. : Snoops of processor's cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_CHA_RING_BOUNCES_VERT.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Vertical Ring. : Snoops of processor's cache. : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x8",
@@ -2341,95 +2830,119 @@
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring : AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xAD",
"EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.AD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xAD",
"EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring : Acknowledgements to Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xAD",
"EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring : BL",
+ "Counter": "0,1,2,3",
"EventCode": "0xAD",
"EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xAD",
"EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.IV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring : AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_CHA_RING_SINK_STARVED_VERT.AD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring : Acknowledgements to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_CHA_RING_SINK_STARVED_VERT.AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_CHA_RING_SINK_STARVED_VERT.AKC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring : Data Responses to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_CHA_RING_SINK_STARVED_VERT.BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring : Snoops of processor's cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_CHA_RING_SINK_STARVED_VERT.IV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Source Throttle",
+ "Counter": "0,1,2,3",
"EventCode": "0xae",
"EventName": "UNC_CHA_RING_SRC_THRTL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Allocations : IRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_CHA_RxC_INSERTS.IRQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Allocations : IRQ : Counts number of allocations per cycle into the specified Ingress queue.",
"UMask": "0x1",
@@ -2437,8 +2950,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Allocations : IRQ Rejected",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_CHA_RxC_INSERTS.IRQ_REJ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Allocations : IRQ Rejected : Counts number of allocations per cycle into the specified Ingress queue.",
"UMask": "0x2",
@@ -2446,8 +2961,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Allocations : PRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_CHA_RxC_INSERTS.PRQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Allocations : PRQ : Counts number of allocations per cycle into the specified Ingress queue.",
"UMask": "0x10",
@@ -2455,8 +2972,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Allocations : PRQ",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_CHA_RxC_INSERTS.PRQ_REJ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Allocations : PRQ : Counts number of allocations per cycle into the specified Ingress queue.",
"UMask": "0x20",
@@ -2464,8 +2983,10 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0 : No AD VN0 credit for generating a request",
"UMask": "0x1",
@@ -2473,8 +2994,10 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0 : No AD VN0 credit for generating a response",
"UMask": "0x2",
@@ -2482,8 +3005,10 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request : Can't inject AK ring message",
"UMask": "0x40",
@@ -2491,8 +3016,10 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0 : No BL VN0 credit for NCB",
"UMask": "0x10",
@@ -2500,8 +3027,10 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0 : No BL VN0 credit for NCS",
"UMask": "0x20",
@@ -2509,8 +3038,10 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0 : No BL VN0 credit for generating a response",
"UMask": "0x4",
@@ -2518,8 +3049,10 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0 : No BL VN0 credit for generating a writeback",
"UMask": "0x8",
@@ -2527,8 +3060,10 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_CHA_RxC_IRQ0_REJECT.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request : Can't inject IV ring message",
"UMask": "0x80",
@@ -2536,16 +3071,20 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : Allow Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 1 : ANY0 : Any condition listed in the IRQ0 Reject counter was true",
"UMask": "0x1",
@@ -2553,16 +3092,20 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : LLC or SF Way",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 1 : LLC or SF Way : Way conflict with another request that caused the reject",
"UMask": "0x20",
@@ -2570,24 +3113,30 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : LLC Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Ingress (from CMS) Request Queue Rejects; PhyAddr Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "CHA"
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : SF Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IRQ Requests (from CMS) Rejected - Set 1 : SF Victim : Requests did not generate Snoop filter victim",
"UMask": "0x8",
@@ -2595,16 +3144,20 @@
},
{
"BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_CHA_RxC_IRQ1_REJECT.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "ISMQ Rejects - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Rejects - Set 0 : AD REQ on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No AD VN0 credit for generating a request",
"UMask": "0x1",
@@ -2612,8 +3165,10 @@
},
{
"BriefDescription": "ISMQ Rejects - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Rejects - Set 0 : AD RSP on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No AD VN0 credit for generating a response",
"UMask": "0x2",
@@ -2621,8 +3176,10 @@
},
{
"BriefDescription": "ISMQ Rejects - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Rejects - Set 0 : Non UPI AK Request : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Can't inject AK ring message",
"UMask": "0x40",
@@ -2630,8 +3187,10 @@
},
{
"BriefDescription": "ISMQ Rejects - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Rejects - Set 0 : BL NCB on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for NCB",
"UMask": "0x10",
@@ -2639,8 +3198,10 @@
},
{
"BriefDescription": "ISMQ Rejects - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Rejects - Set 0 : BL NCS on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for NCS",
"UMask": "0x20",
@@ -2648,8 +3209,10 @@
},
{
"BriefDescription": "ISMQ Rejects - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Rejects - Set 0 : BL RSP on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for generating a response",
"UMask": "0x4",
@@ -2657,8 +3220,10 @@
},
{
"BriefDescription": "ISMQ Rejects - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Rejects - Set 0 : BL WB on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for generating a writeback",
"UMask": "0x8",
@@ -2666,8 +3231,10 @@
},
{
"BriefDescription": "ISMQ Rejects - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_CHA_RxC_ISMQ0_REJECT.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Rejects - Set 0 : Non UPI IV Request : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Can't inject IV ring message",
"UMask": "0x80",
@@ -2675,8 +3242,10 @@
},
{
"BriefDescription": "ISMQ Retries - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Retries - Set 0 : AD REQ on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No AD VN0 credit for generating a request",
"UMask": "0x1",
@@ -2684,8 +3253,10 @@
},
{
"BriefDescription": "ISMQ Retries - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Retries - Set 0 : AD RSP on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No AD VN0 credit for generating a response",
"UMask": "0x2",
@@ -2693,8 +3264,10 @@
},
{
"BriefDescription": "ISMQ Retries - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Retries - Set 0 : Non UPI AK Request : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Can't inject AK ring message",
"UMask": "0x40",
@@ -2702,8 +3275,10 @@
},
{
"BriefDescription": "ISMQ Retries - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Retries - Set 0 : BL NCB on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for NCB",
"UMask": "0x10",
@@ -2711,8 +3286,10 @@
},
{
"BriefDescription": "ISMQ Retries - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Retries - Set 0 : BL NCS on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for NCS",
"UMask": "0x20",
@@ -2720,8 +3297,10 @@
},
{
"BriefDescription": "ISMQ Retries - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Retries - Set 0 : BL RSP on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for generating a response",
"UMask": "0x4",
@@ -2729,8 +3308,10 @@
},
{
"BriefDescription": "ISMQ Retries - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Retries - Set 0 : BL WB on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for generating a writeback",
"UMask": "0x8",
@@ -2738,8 +3319,10 @@
},
{
"BriefDescription": "ISMQ Retries - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_CHA_RxC_ISMQ0_RETRY.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Retries - Set 0 : Non UPI IV Request : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Can't inject IV ring message",
"UMask": "0x80",
@@ -2747,8 +3330,10 @@
},
{
"BriefDescription": "ISMQ Rejects - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_CHA_RxC_ISMQ1_REJECT.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Rejects - Set 1 : ANY0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Any condition listed in the ISMQ0 Reject counter was true",
"UMask": "0x1",
@@ -2756,8 +3341,10 @@
},
{
"BriefDescription": "ISMQ Rejects - Set 1 : HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_CHA_RxC_ISMQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Rejects - Set 1 : HA : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x2",
@@ -2765,8 +3352,10 @@
},
{
"BriefDescription": "ISMQ Retries - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2D",
"EventName": "UNC_CHA_RxC_ISMQ1_RETRY.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Retries - Set 1 : ANY0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Any condition listed in the ISMQ0 Reject counter was true",
"UMask": "0x1",
@@ -2774,8 +3363,10 @@
},
{
"BriefDescription": "ISMQ Retries - Set 1 : HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x2D",
"EventName": "UNC_CHA_RxC_ISMQ1_RETRY.HA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "ISMQ Retries - Set 1 : HA : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
"UMask": "0x2",
@@ -2783,8 +3374,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Occupancy : IRQ",
+ "Counter": "0",
"EventCode": "0x11",
"EventName": "UNC_CHA_RxC_OCCUPANCY.IRQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Occupancy : IRQ : Counts number of entries in the specified Ingress queue in each cycle.",
"UMask": "0x1",
@@ -2792,8 +3385,10 @@
},
{
"BriefDescription": "Other Retries - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 0 : AD REQ on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No AD VN0 credit for generating a request",
"UMask": "0x1",
@@ -2801,8 +3396,10 @@
},
{
"BriefDescription": "Other Retries - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 0 : AD RSP on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No AD VN0 credit for generating a response",
"UMask": "0x2",
@@ -2810,8 +3407,10 @@
},
{
"BriefDescription": "Other Retries - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 0 : Non UPI AK Request : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Can't inject AK ring message",
"UMask": "0x40",
@@ -2819,8 +3418,10 @@
},
{
"BriefDescription": "Other Retries - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 0 : BL NCB on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No BL VN0 credit for NCB",
"UMask": "0x10",
@@ -2828,8 +3429,10 @@
},
{
"BriefDescription": "Other Retries - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 0 : BL NCS on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No BL VN0 credit for NCS",
"UMask": "0x20",
@@ -2837,8 +3440,10 @@
},
{
"BriefDescription": "Other Retries - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 0 : BL RSP on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No BL VN0 credit for generating a response",
"UMask": "0x4",
@@ -2846,8 +3451,10 @@
},
{
"BriefDescription": "Other Retries - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 0 : BL WB on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No BL VN0 credit for generating a writeback",
"UMask": "0x8",
@@ -2855,8 +3462,10 @@
},
{
"BriefDescription": "Other Retries - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "UNC_CHA_RxC_OTHER0_RETRY.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 0 : Non UPI IV Request : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Can't inject IV ring message",
"UMask": "0x80",
@@ -2864,8 +3473,10 @@
},
{
"BriefDescription": "Other Retries - Set 1 : Allow Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 1 : Allow Snoop : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x40",
@@ -2873,8 +3484,10 @@
},
{
"BriefDescription": "Other Retries - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 1 : ANY0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Any condition listed in the Other0 Reject counter was true",
"UMask": "0x1",
@@ -2882,8 +3495,10 @@
},
{
"BriefDescription": "Other Retries - Set 1 : HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.HA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 1 : HA : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x2",
@@ -2891,8 +3506,10 @@
},
{
"BriefDescription": "Other Retries - Set 1 : LLC OR SF Way",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 1 : LLC OR SF Way : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Way conflict with another request that caused the reject",
"UMask": "0x20",
@@ -2900,8 +3517,10 @@
},
{
"BriefDescription": "Other Retries - Set 1 : LLC Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 1 : LLC Victim : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x4",
@@ -2909,8 +3528,10 @@
},
{
"BriefDescription": "Other Retries - Set 1 : PhyAddr Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 1 : PhyAddr Match : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Address match with an outstanding request that was rejected.",
"UMask": "0x80",
@@ -2918,8 +3539,10 @@
},
{
"BriefDescription": "Other Retries - Set 1 : SF Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 1 : SF Victim : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Requests did not generate Snoop filter victim",
"UMask": "0x8",
@@ -2927,8 +3550,10 @@
},
{
"BriefDescription": "Other Retries - Set 1 : Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_CHA_RxC_OTHER1_RETRY.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Other Retries - Set 1 : Victim : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
"UMask": "0x10",
@@ -2936,8 +3561,10 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0 : No AD VN0 credit for generating a request",
"UMask": "0x1",
@@ -2945,8 +3572,10 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0 : No AD VN0 credit for generating a response",
"UMask": "0x2",
@@ -2954,8 +3583,10 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request : Can't inject AK ring message",
"UMask": "0x40",
@@ -2963,8 +3594,10 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0 : No BL VN0 credit for NCB",
"UMask": "0x10",
@@ -2972,8 +3605,10 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0 : No BL VN0 credit for NCS",
"UMask": "0x20",
@@ -2981,8 +3616,10 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0 : No BL VN0 credit for generating a response",
"UMask": "0x4",
@@ -2990,8 +3627,10 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0 : No BL VN0 credit for generating a writeback",
"UMask": "0x8",
@@ -2999,8 +3638,10 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_CHA_RxC_PRQ0_REJECT.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request : Can't inject IV ring message",
"UMask": "0x80",
@@ -3008,16 +3649,20 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : Allow Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "CHA"
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 1 : ANY0 : Any condition listed in the PRQ0 Reject counter was true",
"UMask": "0x1",
@@ -3025,16 +3670,20 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.HA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : LLC OR SF Way",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 1 : LLC OR SF Way : Way conflict with another request that caused the reject",
"UMask": "0x20",
@@ -3042,16 +3691,20 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : LLC Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : PhyAddr Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 1 : PhyAddr Match : Address match with an outstanding request that was rejected.",
"UMask": "0x80",
@@ -3059,8 +3712,10 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : SF Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PRQ Requests (from CMS) Rejected - Set 1 : SF Victim : Requests did not generate Snoop filter victim",
"UMask": "0x8",
@@ -3068,16 +3723,20 @@
},
{
"BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_CHA_RxC_PRQ1_REJECT.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "Request Queue Retries - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AD_REQ_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 0 : AD REQ on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No AD VN0 credit for generating a request",
"UMask": "0x1",
@@ -3085,8 +3744,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AD_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 0 : AD RSP on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No AD VN0 credit for generating a response",
"UMask": "0x2",
@@ -3094,8 +3755,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AK_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 0 : Non UPI AK Request : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Can't inject AK ring message",
"UMask": "0x40",
@@ -3103,8 +3766,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_NCB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 0 : BL NCB on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No BL VN0 credit for NCB",
"UMask": "0x10",
@@ -3112,8 +3777,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_NCS_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 0 : BL NCS on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No BL VN0 credit for NCS",
"UMask": "0x20",
@@ -3121,8 +3788,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_RSP_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 0 : BL RSP on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No BL VN0 credit for generating a response",
"UMask": "0x4",
@@ -3130,8 +3799,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_WB_VN0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 0 : BL WB on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No BL VN0 credit for generating a writeback",
"UMask": "0x8",
@@ -3139,8 +3810,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.IV_NON_UPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 0 : Non UPI IV Request : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Can't inject IV ring message",
"UMask": "0x80",
@@ -3148,8 +3821,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 1 : Allow Snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.ALLOW_SNP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 1 : Allow Snoop : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x40",
@@ -3157,8 +3832,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.ANY0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 1 : ANY0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Any condition listed in the WBQ0 Reject counter was true",
"UMask": "0x1",
@@ -3166,8 +3843,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 1 : HA",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.HA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 1 : HA : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x2",
@@ -3175,8 +3854,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 1 : LLC OR SF Way",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.LLC_OR_SF_WAY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 1 : LLC OR SF Way : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Way conflict with another request that caused the reject",
"UMask": "0x20",
@@ -3184,8 +3865,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 1 : LLC Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.LLC_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 1 : LLC Victim : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x4",
@@ -3193,8 +3876,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 1 : PhyAddr Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.PA_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 1 : PhyAddr Match : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Address match with an outstanding request that was rejected.",
"UMask": "0x80",
@@ -3202,8 +3887,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 1 : SF Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.SF_VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 1 : SF Victim : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Requests did not generate Snoop filter victim",
"UMask": "0x8",
@@ -3211,8 +3898,10 @@
},
{
"BriefDescription": "Request Queue Retries - Set 1 : Victim",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.VICTIM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Request Queue Retries - Set 1 : Victim : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
"UMask": "0x10",
@@ -3220,8 +3909,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE5",
"EventName": "UNC_CHA_RxR_BUSY_STARVED.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority : All == Credited + Uncredited",
"UMask": "0x11",
@@ -3229,8 +3920,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE5",
"EventName": "UNC_CHA_RxR_BUSY_STARVED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x10",
@@ -3238,8 +3931,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE5",
"EventName": "UNC_CHA_RxR_BUSY_STARVED.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x1",
@@ -3247,8 +3942,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE5",
"EventName": "UNC_CHA_RxR_BUSY_STARVED.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority : All == Credited + Uncredited",
"UMask": "0x44",
@@ -3256,8 +3953,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE5",
"EventName": "UNC_CHA_RxR_BUSY_STARVED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x40",
@@ -3265,8 +3964,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE5",
"EventName": "UNC_CHA_RxR_BUSY_STARVED.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x4",
@@ -3274,8 +3975,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_CHA_RxR_BYPASS.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : AD - All : Number of packets bypassing the CMS Ingress : All == Credited + Uncredited",
"UMask": "0x11",
@@ -3283,8 +3986,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_CHA_RxR_BYPASS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : AD - Credited : Number of packets bypassing the CMS Ingress",
"UMask": "0x10",
@@ -3292,8 +3997,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_CHA_RxR_BYPASS.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : AD - Uncredited : Number of packets bypassing the CMS Ingress",
"UMask": "0x1",
@@ -3301,8 +4008,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_CHA_RxR_BYPASS.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : AK : Number of packets bypassing the CMS Ingress",
"UMask": "0x2",
@@ -3310,8 +4019,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_CHA_RxR_BYPASS.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : AKC - Uncredited : Number of packets bypassing the CMS Ingress",
"UMask": "0x80",
@@ -3319,8 +4030,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_CHA_RxR_BYPASS.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : BL - All : Number of packets bypassing the CMS Ingress : All == Credited + Uncredited",
"UMask": "0x44",
@@ -3328,8 +4041,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_CHA_RxR_BYPASS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : BL - Credited : Number of packets bypassing the CMS Ingress",
"UMask": "0x40",
@@ -3337,8 +4052,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_CHA_RxR_BYPASS.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : BL - Uncredited : Number of packets bypassing the CMS Ingress",
"UMask": "0x4",
@@ -3346,8 +4063,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_CHA_RxR_BYPASS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : IV : Number of packets bypassing the CMS Ingress",
"UMask": "0x8",
@@ -3355,8 +4074,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_CHA_RxR_CRD_STARVED.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -3364,8 +4085,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_CHA_RxR_CRD_STARVED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x10",
@@ -3373,8 +4096,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_CHA_RxR_CRD_STARVED.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x1",
@@ -3382,8 +4107,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_CHA_RxR_CRD_STARVED.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AK : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x2",
@@ -3391,8 +4118,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_CHA_RxR_CRD_STARVED.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -3400,8 +4129,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_CHA_RxR_CRD_STARVED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x40",
@@ -3409,8 +4140,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_CHA_RxR_CRD_STARVED.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x4",
@@ -3418,8 +4151,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : IFV - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_CHA_RxR_CRD_STARVED.IFV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : IFV - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x80",
@@ -3427,8 +4162,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_CHA_RxR_CRD_STARVED.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : IV : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x8",
@@ -3436,16 +4173,20 @@
},
{
"BriefDescription": "Transgress Injection Starvation",
+ "Counter": "0,1,2,3",
"EventCode": "0xe4",
"EventName": "UNC_CHA_RxR_CRD_STARVED_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"Unit": "CHA"
},
{
"BriefDescription": "Transgress Ingress Allocations : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_CHA_RxR_INSERTS.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : AD - All : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
"UMask": "0x11",
@@ -3453,8 +4194,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_CHA_RxR_INSERTS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : AD - Credited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x10",
@@ -3462,8 +4205,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_CHA_RxR_INSERTS.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : AD - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x1",
@@ -3471,8 +4216,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_CHA_RxR_INSERTS.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : AK : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x2",
@@ -3480,8 +4227,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_CHA_RxR_INSERTS.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : AKC - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x80",
@@ -3489,8 +4238,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_CHA_RxR_INSERTS.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : BL - All : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
"UMask": "0x44",
@@ -3498,8 +4249,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_CHA_RxR_INSERTS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : BL - Credited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x40",
@@ -3507,8 +4260,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_CHA_RxR_INSERTS.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : BL - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x4",
@@ -3516,8 +4271,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_CHA_RxR_INSERTS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : IV : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x8",
@@ -3525,8 +4282,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_CHA_RxR_OCCUPANCY.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : AD - All : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
"UMask": "0x11",
@@ -3534,8 +4293,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_CHA_RxR_OCCUPANCY.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : AD - Credited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x10",
@@ -3543,8 +4304,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_CHA_RxR_OCCUPANCY.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : AD - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x1",
@@ -3552,8 +4315,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_CHA_RxR_OCCUPANCY.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : AK : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x2",
@@ -3561,8 +4326,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_CHA_RxR_OCCUPANCY.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : AKC - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x80",
@@ -3570,8 +4337,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_CHA_RxR_OCCUPANCY.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : BL - All : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
"UMask": "0x44",
@@ -3579,8 +4348,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_CHA_RxR_OCCUPANCY.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : BL - Credited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x20",
@@ -3588,8 +4359,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_CHA_RxR_OCCUPANCY.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : BL - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x4",
@@ -3597,8 +4370,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_CHA_RxR_OCCUPANCY.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : IV : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x8",
@@ -3606,6 +4381,7 @@
},
{
"BriefDescription": "Snoop filter capacity evictions for E-state entries.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3D",
"EventName": "UNC_CHA_SF_EVICTION.E_STATE",
"PerPkg": "1",
@@ -3615,6 +4391,7 @@
},
{
"BriefDescription": "Snoop filter capacity evictions for M-state entries.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3D",
"EventName": "UNC_CHA_SF_EVICTION.M_STATE",
"PerPkg": "1",
@@ -3624,6 +4401,7 @@
},
{
"BriefDescription": "Snoop filter capacity evictions for S-state entries.",
+ "Counter": "0,1,2,3",
"EventCode": "0x3D",
"EventName": "UNC_CHA_SF_EVICTION.S_STATE",
"PerPkg": "1",
@@ -3633,8 +4411,10 @@
},
{
"BriefDescription": "Snoops Sent : All",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_CHA_SNOOPS_SENT.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoops Sent : All : Counts the number of snoops issued by the HA.",
"UMask": "0x1",
@@ -3642,8 +4422,10 @@
},
{
"BriefDescription": "Snoops Sent : Broadcast snoops for Local Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_CHA_SNOOPS_SENT.BCST_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoops Sent : Broadcast snoops for Local Requests : Counts the number of snoops issued by the HA. : Counts the number of broadcast snoops issued by the HA responding to local requests",
"UMask": "0x10",
@@ -3651,8 +4433,10 @@
},
{
"BriefDescription": "Snoops Sent : Directed snoops for Local Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_CHA_SNOOPS_SENT.DIRECT_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoops Sent : Directed snoops for Local Requests : Counts the number of snoops issued by the HA. : Counts the number of directed snoops issued by the HA responding to local requests",
"UMask": "0x40",
@@ -3660,8 +4444,10 @@
},
{
"BriefDescription": "Snoops Sent : Snoops sent for Local Requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "UNC_CHA_SNOOPS_SENT.LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoops Sent : Snoops sent for Local Requests : Counts the number of snoops issued by the HA. : Counts the number of broadcast or directed snoops issued by the HA responding to local requests",
"UMask": "0x4",
@@ -3669,8 +4455,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local : RspCnflct",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPCNFLCT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received Local : RspCnflct : Number of snoop responses received for a Local request : Filters for snoops responses of RspConflict to local CA requests. This is returned when a snoop finds an existing outstanding transaction in a remote caching agent when it CAMs that caching agent. This triggers conflict resolution hardware. This covers both RspCnflct and RspCnflctWbI.",
"UMask": "0x40",
@@ -3678,8 +4466,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local : RspFwd",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPFWD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received Local : RspFwd : Number of snoop responses received for a Local request : Filters for a snoop response of RspFwd to local CA requests. This snoop response is only possible for RdCur when a snoop HITM/E in a remote caching agent and it directly forwards data to a requestor without changing the requestor's cache line state.",
"UMask": "0x80",
@@ -3687,8 +4477,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local : Rsp*FWD*WB",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPFWDWB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received Local : Rsp*FWD*WB : Number of snoop responses received for a Local request : Filters for a snoop response of Rsp*Fwd*WB to local CA requests. This snoop response is only used in 4s systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to the home to be written back to memory.",
"UMask": "0x20",
@@ -3696,8 +4488,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local : RspI",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received Local : RspI : Number of snoop responses received for a Local request : Filters for snoops responses of RspI to local CA requests. RspI is returned when the remote cache does not have the data, or when the remote cache silently evicts data (such as when an RFO hits non-modified data).",
"UMask": "0x1",
@@ -3705,8 +4499,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local : RspIFwd",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPIFWD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received Local : RspIFwd : Number of snoop responses received for a Local request : Filters for snoop responses of RspIFwd to local CA requests. This is returned when a remote caching agent forwards data and the requesting agent is able to acquire the data in E or M states. This is commonly returned with RFO transactions. It can be either a HitM or a HitFE.",
"UMask": "0x4",
@@ -3714,8 +4510,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local : RspS",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received Local : RspS : Number of snoop responses received for a Local request : Filters for snoop responses of RspS to local CA requests. RspS is returned when a remote cache has data but is not forwarding it. It is a way to let the requesting socket know that it cannot allocate the data in E state. No data is sent with S RspS.",
"UMask": "0x2",
@@ -3723,8 +4521,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local : RspSFwd",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPSFWD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received Local : RspSFwd : Number of snoop responses received for a Local request : Filters for a snoop response of RspSFwd to local CA requests. This is returned when a remote caching agent forwards data but holds on to its currently copy. This is common for data and code reads that hit in a remote socket in E or F state.",
"UMask": "0x8",
@@ -3732,8 +4532,10 @@
},
{
"BriefDescription": "Snoop Responses Received Local : Rsp*WB",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPWB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Snoop Responses Received Local : Rsp*WB : Number of snoop responses received for a Local request : Filters for a snoop response of RspIWB or RspSWB to local CA requests. This is returned when a non-RFO request hits in M state. Data and Code Reads can return either RspIWB or RspSWB depending on how the system has been configured. InvItoE transactions will also return RspIWB because they must acquire ownership.",
"UMask": "0x10",
@@ -3741,56 +4543,70 @@
},
{
"BriefDescription": "Misc Snoop Responses Received : MtoI RspIDataM",
+ "Counter": "0,1,2,3",
"EventCode": "0x6B",
"EventName": "UNC_CHA_SNOOP_RSP_MISC.MTOI_RSPDATAM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "CHA"
},
{
"BriefDescription": "Misc Snoop Responses Received : MtoI RspIFwdM",
+ "Counter": "0,1,2,3",
"EventCode": "0x6B",
"EventName": "UNC_CHA_SNOOP_RSP_MISC.MTOI_RSPIFWDM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "CHA"
},
{
"BriefDescription": "Misc Snoop Responses Received : Pull Data Partial - Hit LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x6B",
"EventName": "UNC_CHA_SNOOP_RSP_MISC.PULLDATAPTL_HITLLC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "CHA"
},
{
"BriefDescription": "Misc Snoop Responses Received : Pull Data Partial - Hit SF",
+ "Counter": "0,1,2,3",
"EventCode": "0x6B",
"EventName": "UNC_CHA_SNOOP_RSP_MISC.PULLDATAPTL_HITSF",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "CHA"
},
{
"BriefDescription": "Misc Snoop Responses Received : RspIFwdPtl Hit LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x6B",
"EventName": "UNC_CHA_SNOOP_RSP_MISC.RSPIFWDMPTL_HITLLC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "CHA"
},
{
"BriefDescription": "Misc Snoop Responses Received : RspIFwdPtl Hit SF",
+ "Counter": "0,1,2,3",
"EventCode": "0x6B",
"EventName": "UNC_CHA_SNOOP_RSP_MISC.RSPIFWDMPTL_HITSF",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "CHA"
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 0 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -3798,8 +4614,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 1 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -3807,8 +4625,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 2 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -3816,8 +4636,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 3 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -3825,8 +4647,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 4 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -3834,8 +4658,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 5 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -3843,8 +4669,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 6 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x40",
@@ -3852,8 +4680,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 7 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x80",
@@ -3861,8 +4691,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 0 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -3870,8 +4702,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 1 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -3879,8 +4713,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 2 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -3888,8 +4724,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 3 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -3897,8 +4735,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 4 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -3906,8 +4746,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 5 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -3915,8 +4757,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 6 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x40",
@@ -3924,8 +4768,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 7 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x80",
@@ -3933,8 +4779,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 0 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -3942,8 +4790,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 1 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -3951,8 +4801,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 2 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -3960,8 +4812,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 3 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -3969,8 +4823,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 4 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -3978,8 +4834,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 5 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -3987,8 +4845,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 6 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x40",
@@ -3996,8 +4856,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 7 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x80",
@@ -4005,8 +4867,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 0 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -4014,8 +4878,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 1 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -4023,8 +4889,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 2 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -4032,8 +4900,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 3 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -4041,8 +4911,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 4 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -4050,8 +4922,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 5 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -4059,8 +4933,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 6 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x40",
@@ -4068,8 +4944,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 7 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x80",
@@ -4077,8 +4955,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 10 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -4086,8 +4966,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 8 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -4095,8 +4977,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 9 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -4104,8 +4988,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xD3",
"EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 10 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -4113,8 +4999,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xD3",
"EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 8 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -4122,8 +5010,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xD3",
"EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 9 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -4131,8 +5021,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xD5",
"EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 10 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -4140,8 +5032,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xD5",
"EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 8 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -4149,8 +5043,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xD5",
"EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 9 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -4158,8 +5054,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xD7",
"EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 10 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -4167,8 +5065,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xD7",
"EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 8 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -4176,8 +5076,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xD7",
"EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 9 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -4185,8 +5087,10 @@
},
{
"BriefDescription": "TOR Inserts : All",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : All : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc001ffff",
@@ -4194,24 +5098,30 @@
},
{
"BriefDescription": "TOR Inserts : DDR4 Access",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : DDR4 Access : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.DDR",
+ "Counter": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.DDR4",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : SF/LLC Evictions",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.EVICT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : SF/LLC Evictions : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : TOR allocation occurred as a result of SF/LLC evictions (came from the ISMQ)",
"UMask": "0x2",
@@ -4219,14 +5129,17 @@
},
{
"BriefDescription": "TOR Inserts : Just Hits",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.HIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : Just Hits : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : All requests from iA Cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA",
"PerPkg": "1",
@@ -4236,6 +5149,7 @@
},
{
"BriefDescription": "TOR Inserts : CLFlushes issued by iA Cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_CLFLUSH",
"PerPkg": "1",
@@ -4245,8 +5159,10 @@
},
{
"BriefDescription": "TOR Inserts : CLFlushOpts issued by iA Cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_CLFLUSHOPT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : CLFlushOpts issued by iA Cores : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8d7ff01",
@@ -4254,6 +5170,7 @@
},
{
"BriefDescription": "TOR Inserts : CRDs issued by iA Cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_CRD",
"PerPkg": "1",
@@ -4263,8 +5180,10 @@
},
{
"BriefDescription": "TOR Inserts; CRd Pref from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_CRD_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts; Code read prefetch from local IA that misses in the snoop filter",
"UMask": "0xc88fff01",
@@ -4272,8 +5191,10 @@
},
{
"BriefDescription": "TOR Inserts : DRd PTEs issued by iA Cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_DRDPTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : DRd PTEs issued by iA Cores due to a page walk : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc837ff01",
@@ -4281,6 +5202,7 @@
},
{
"BriefDescription": "TOR Inserts : DRd_Opts issued by iA Cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_OPT",
"PerPkg": "1",
@@ -4290,6 +5212,7 @@
},
{
"BriefDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_OPT_PREF",
"PerPkg": "1",
@@ -4299,6 +5222,7 @@
},
{
"BriefDescription": "TOR Inserts : All requests from iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT",
"PerPkg": "1",
@@ -4308,6 +5232,7 @@
},
{
"BriefDescription": "TOR Inserts : CRds issued by iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD",
"PerPkg": "1",
@@ -4317,6 +5242,7 @@
},
{
"BriefDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD_PREF",
"PerPkg": "1",
@@ -4326,8 +5252,10 @@
},
{
"BriefDescription": "TOR Inserts : DRd PTEs issued by iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRDPTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : DRd PTEs issued by iA Cores due to page walks that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc837fd01",
@@ -4335,6 +5263,7 @@
},
{
"BriefDescription": "TOR Inserts : DRd_Opts issued by iA Cores that hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_OPT",
"PerPkg": "1",
@@ -4344,6 +5273,7 @@
},
{
"BriefDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores that hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_OPT_PREF",
"PerPkg": "1",
@@ -4353,6 +5283,7 @@
},
{
"BriefDescription": "TOR Inserts : RFOs issued by iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO",
"PerPkg": "1",
@@ -4362,6 +5293,7 @@
},
{
"BriefDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO_PREF",
"PerPkg": "1",
@@ -4371,6 +5303,7 @@
},
{
"BriefDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
"PerPkg": "1",
@@ -4380,6 +5313,7 @@
},
{
"BriefDescription": "TOR Inserts : CRds issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD",
"PerPkg": "1",
@@ -4389,6 +5323,7 @@
},
{
"BriefDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF",
"PerPkg": "1",
@@ -4398,8 +5333,10 @@
},
{
"BriefDescription": "TOR Inserts : DRd PTEs issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRDPTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : DRd PTEs issued by iA Cores due to a page walk that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc837fe01",
@@ -4407,6 +5344,7 @@
},
{
"BriefDescription": "TOR Inserts : DRd_Opt issued by iA Cores that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT",
"PerPkg": "1",
@@ -4416,6 +5354,7 @@
},
{
"BriefDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_PREF",
"PerPkg": "1",
@@ -4425,6 +5364,7 @@
},
{
"BriefDescription": "TOR Inserts; WCiLF misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_FULL_STREAMING_WR",
"PerPkg": "1",
@@ -4434,6 +5374,7 @@
},
{
"BriefDescription": "TOR Inserts; WCiL misses from local IA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_PARTIAL_STREAMING_WR",
"PerPkg": "1",
@@ -4443,6 +5384,7 @@
},
{
"BriefDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO",
"PerPkg": "1",
@@ -4452,6 +5394,7 @@
},
{
"BriefDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF",
"PerPkg": "1",
@@ -4461,6 +5404,7 @@
},
{
"BriefDescription": "TOR Inserts : UCRdFs issued by iA Cores that Missed LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_UCRDF",
"PerPkg": "1",
@@ -4470,6 +5414,7 @@
},
{
"BriefDescription": "TOR Inserts : WCiLs issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL",
"PerPkg": "1",
@@ -4479,6 +5424,7 @@
},
{
"BriefDescription": "TOR Inserts : WCiLF issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF",
"PerPkg": "1",
@@ -4488,6 +5434,7 @@
},
{
"BriefDescription": "TOR Inserts : WiLs issued by iA Cores that Missed LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WIL",
"PerPkg": "1",
@@ -4497,6 +5444,7 @@
},
{
"BriefDescription": "TOR Inserts : RFOs issued by iA Cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_RFO",
"PerPkg": "1",
@@ -4506,6 +5454,7 @@
},
{
"BriefDescription": "TOR Inserts : RFO_Prefs issued by iA Cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_RFO_PREF",
"PerPkg": "1",
@@ -4515,8 +5464,10 @@
},
{
"BriefDescription": "TOR Inserts : WBEFtoEs issued by an IA Core. Non Modified Write Backs",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WBEFTOE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WbEFtoEs issued by iA Cores . (Non Modified Write Backs) :Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc3fff01",
@@ -4524,8 +5475,10 @@
},
{
"BriefDescription": "TOR Inserts : WBEFtoIs issued by an IA Core. Non Modified Write Backs",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WBEFTOI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WbEFtoIs issued by iA Cores . (Non Modified Write Backs) :Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc37ff01",
@@ -4533,8 +5486,10 @@
},
{
"BriefDescription": "TOR Inserts : WBMtoEs issued by an IA Core. Non Modified Write Backs",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WBMTOE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WbMtoEs issued by iA Cores . (Non Modified Write Backs) :Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc2fff01",
@@ -4542,8 +5497,10 @@
},
{
"BriefDescription": "TOR Inserts : WbMtoIs issued by an iA Cores. Modified Write Backs",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WBMTOI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WbMtoIs issued by iA Cores . (Modified Write Backs) :Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc27ff01",
@@ -4551,8 +5508,10 @@
},
{
"BriefDescription": "TOR Inserts : WBStoIs issued by an IA Core. Non Modified Write Backs",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WBSTOI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WbStoIs issued by iA Cores . (Non Modified Write Backs) :Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc67ff01",
@@ -4560,8 +5519,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLs issued by iA Cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WCIL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : WCiLs issued by iA Cores : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86fff01",
@@ -4569,8 +5530,10 @@
},
{
"BriefDescription": "TOR Inserts : WCiLF issued by iA Cores",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_WCILF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : WCiLF issued by iA Cores : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc867ff01",
@@ -4578,6 +5541,7 @@
},
{
"BriefDescription": "TOR Inserts : All requests from IO Devices",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO",
"PerPkg": "1",
@@ -4587,8 +5551,10 @@
},
{
"BriefDescription": "TOR Inserts : CLFlushes issued by IO Devices",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_CLFLUSH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : CLFlushes issued by IO Devices : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8c3ff04",
@@ -4596,6 +5562,7 @@
},
{
"BriefDescription": "TOR Inserts : All requests from IO Devices that hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_HIT",
"PerPkg": "1",
@@ -4605,6 +5572,7 @@
},
{
"BriefDescription": "TOR Inserts : ItoMs issued by IO Devices that Hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_ITOM",
"PerPkg": "1",
@@ -4614,6 +5582,7 @@
},
{
"BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_ITOMCACHENEAR",
"PerPkg": "1",
@@ -4623,6 +5592,7 @@
},
{
"BriefDescription": "TOR Inserts : PCIRdCurs issued by IO Devices that hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_PCIRDCUR",
"PerPkg": "1",
@@ -4632,8 +5602,10 @@
},
{
"BriefDescription": "TOR Inserts : RFOs issued by IO Devices that hit the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : RFOs issued by IO Devices that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc803fd04",
@@ -4641,6 +5613,7 @@
},
{
"BriefDescription": "TOR Inserts : ItoMs issued by IO Devices",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM",
"PerPkg": "1",
@@ -4650,6 +5623,7 @@
},
{
"BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR",
"PerPkg": "1",
@@ -4659,6 +5633,7 @@
},
{
"BriefDescription": "TOR Inserts : All requests from IO Devices that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS",
"PerPkg": "1",
@@ -4668,6 +5643,7 @@
},
{
"BriefDescription": "TOR Inserts : ItoMs issued by IO Devices that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM",
"PerPkg": "1",
@@ -4677,6 +5653,7 @@
},
{
"BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR",
"PerPkg": "1",
@@ -4686,6 +5663,7 @@
},
{
"BriefDescription": "TOR Inserts : PCIRdCurs issued by IO Devices that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_PCIRDCUR",
"PerPkg": "1",
@@ -4695,8 +5673,10 @@
},
{
"BriefDescription": "TOR Inserts : RFOs issued by IO Devices that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : RFOs issued by IO Devices that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc803fe04",
@@ -4704,6 +5684,7 @@
},
{
"BriefDescription": "TOR Inserts : PCIRdCurs issued by IO Devices",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR",
"PerPkg": "1",
@@ -4713,8 +5694,10 @@
},
{
"BriefDescription": "TOR Inserts : RFOs issued by IO Devices",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : RFOs issued by IO Devices : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc803ff04",
@@ -4722,8 +5705,10 @@
},
{
"BriefDescription": "TOR Inserts : WbMtoIs issued by IO Devices",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_WBMTOI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : WbMtoIs issued by IO Devices : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc23ff04",
@@ -4731,8 +5716,10 @@
},
{
"BriefDescription": "TOR Inserts : IRQ - iA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IRQ_IA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : IRQ - iA : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : From an iA Core",
"UMask": "0x1",
@@ -4740,8 +5727,10 @@
},
{
"BriefDescription": "TOR Inserts : IRQ - Non iA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IRQ_NON_IA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : IRQ - Non iA : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x10",
@@ -4749,24 +5738,30 @@
},
{
"BriefDescription": "TOR Inserts : Just ISOC",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.ISOC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : Just ISOC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : Just Local Targets",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.LOCAL_TGT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : Just Local Targets : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : All from Local iA and IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.LOC_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : All from Local iA and IO : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : All locally initiated requests",
"UMask": "0xc000ff05",
@@ -4774,8 +5769,10 @@
},
{
"BriefDescription": "TOR Inserts : All from Local iA",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.LOC_IA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : All from Local iA : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : All locally initiated requests from iA Cores",
"UMask": "0xc000ff01",
@@ -4783,8 +5780,10 @@
},
{
"BriefDescription": "TOR Inserts : All from Local IO",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.LOC_IO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : All from Local IO : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : All locally generated IO traffic",
"UMask": "0xc000ff04",
@@ -4792,64 +5791,80 @@
},
{
"BriefDescription": "TOR Inserts : Match the Opcode in b[29:19] of the extended umask field",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.MATCH_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : Match the Opcode in b[29:19] of the extended umask field : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : Just Misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : Just Misses : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : MMCFG Access",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.MMCFG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : MMCFG Access : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : Just NearMem",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.NEARMEM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : Just NearMem : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : Just NonCoherent",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.NONCOH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : Just NonCoherent : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : Just NotNearMem",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.NOT_NEARMEM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : Just NotNearMem : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : Match the PreMorphed Opcode in b[29:19] of the extended umask field",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.PREMORPH_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : Match the PreMorphed Opcode in b[29:19] of the extended umask field : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Inserts : PRQ - IOSF",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.PRQ_IOSF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : PRQ - IOSF : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : From a PCIe Device",
"UMask": "0x4",
@@ -4857,8 +5872,10 @@
},
{
"BriefDescription": "TOR Inserts : PRQ - Non IOSF",
+ "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.PRQ_NON_IOSF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : PRQ - Non IOSF : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x20",
@@ -4866,16 +5883,20 @@
},
{
"BriefDescription": "TOR Occupancy : DDR4 Access",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.DDR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DDR4 Access : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : SF/LLC Evictions",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.EVICT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : SF/LLC Evictions : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : TOR allocation occurred as a result of SF/LLC evictions (came from the ISMQ)",
"UMask": "0x2",
@@ -4883,14 +5904,17 @@
},
{
"BriefDescription": "TOR Occupancy : Just Hits",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.HIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : Just Hits : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : All requests from iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA",
"PerPkg": "1",
@@ -4900,8 +5924,10 @@
},
{
"BriefDescription": "TOR Occupancy : CLFlushes issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CLFLUSH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : CLFlushes issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8c7ff01",
@@ -4909,8 +5935,10 @@
},
{
"BriefDescription": "TOR Occupancy : CLFlushOpts issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CLFLUSHOPT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : CLFlushOpts issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8d7ff01",
@@ -4918,6 +5946,7 @@
},
{
"BriefDescription": "TOR Occupancy : CRDs issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CRD",
"PerPkg": "1",
@@ -4927,8 +5956,10 @@
},
{
"BriefDescription": "TOR Occupancy; CRd Pref from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CRD_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Code read prefetch from local IA that misses in the snoop filter",
"UMask": "0xc88fff01",
@@ -4936,8 +5967,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRDPTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc837ff01",
@@ -4945,6 +5978,7 @@
},
{
"BriefDescription": "TOR Occupancy : DRd_Opts issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_OPT",
"PerPkg": "1",
@@ -4954,6 +5988,7 @@
},
{
"BriefDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_OPT_PREF",
"PerPkg": "1",
@@ -4963,6 +5998,7 @@
},
{
"BriefDescription": "TOR Occupancy : All requests from iA Cores that Hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT",
"PerPkg": "1",
@@ -4972,8 +6008,10 @@
},
{
"BriefDescription": "TOR Occupancy : CRds issued by iA Cores that Hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : CRds issued by iA Cores that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc80ffd01",
@@ -4981,8 +6019,10 @@
},
{
"BriefDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc88ffd01",
@@ -4990,8 +6030,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk that hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRDPTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc837fd01",
@@ -4999,6 +6041,7 @@
},
{
"BriefDescription": "TOR Occupancy : DRd_Opts issued by iA Cores that hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_OPT",
"PerPkg": "1",
@@ -5008,6 +6051,7 @@
},
{
"BriefDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores that hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_OPT_PREF",
"PerPkg": "1",
@@ -5017,8 +6061,10 @@
},
{
"BriefDescription": "TOR Occupancy : RFOs issued by iA Cores that Hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : RFOs issued by iA Cores that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc807fd01",
@@ -5026,8 +6072,10 @@
},
{
"BriefDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc887fd01",
@@ -5035,6 +6083,7 @@
},
{
"BriefDescription": "TOR Occupancy : All requests from iA Cores that Missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS",
"PerPkg": "1",
@@ -5044,6 +6093,7 @@
},
{
"BriefDescription": "TOR Occupancy : CRds issued by iA Cores that Missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD",
"PerPkg": "1",
@@ -5053,8 +6103,10 @@
},
{
"BriefDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc88ffe01",
@@ -5062,8 +6114,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk that missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRDPTE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc837fe01",
@@ -5071,6 +6125,7 @@
},
{
"BriefDescription": "TOR Occupancy : DRd_Opt issued by iA Cores that missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT",
"PerPkg": "1",
@@ -5080,8 +6135,10 @@
},
{
"BriefDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores that missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8a7fe01",
@@ -5089,8 +6146,10 @@
},
{
"BriefDescription": "TOR Occupancy; WCiLF misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_FULL_STREAMING_WR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Data read from local IA that misses in the snoop filter",
"UMask": "0xc867fe01",
@@ -5098,8 +6157,10 @@
},
{
"BriefDescription": "TOR Occupancy; WCiL misses from local IA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_PARTIAL_STREAMING_WR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy; Data read from local IA that misses in the snoop filter",
"UMask": "0xc86ffe01",
@@ -5107,6 +6168,7 @@
},
{
"BriefDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO",
"PerPkg": "1",
@@ -5116,8 +6178,10 @@
},
{
"BriefDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc887fe01",
@@ -5125,8 +6189,10 @@
},
{
"BriefDescription": "TOR Occupancy : UCRdFs issued by iA Cores that Missed LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_UCRDF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : UCRdFs issued by iA Cores that Missed LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc877de01",
@@ -5134,8 +6200,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores that Missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86ffe01",
@@ -5143,8 +6211,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLF issued by iA Cores that Missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLF issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc867fe01",
@@ -5152,8 +6222,10 @@
},
{
"BriefDescription": "TOR Occupancy : WiLs issued by iA Cores that Missed LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WIL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WiLs issued by iA Cores that Missed LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc87fde01",
@@ -5161,6 +6233,7 @@
},
{
"BriefDescription": "TOR Occupancy : RFOs issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_RFO",
"PerPkg": "1",
@@ -5170,8 +6243,10 @@
},
{
"BriefDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_RFO_PREF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc887ff01",
@@ -5179,8 +6254,10 @@
},
{
"BriefDescription": "TOR Occupancy : WbMtoIs issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WBMTOI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WbMtoIs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc27ff01",
@@ -5188,8 +6265,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WCIL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc86fff01",
@@ -5197,8 +6276,10 @@
},
{
"BriefDescription": "TOR Occupancy : WCiLF issued by iA Cores",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WCILF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WCiLF issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc867ff01",
@@ -5206,6 +6287,7 @@
},
{
"BriefDescription": "TOR Occupancy : All requests from IO Devices",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO",
"PerPkg": "1",
@@ -5215,8 +6297,10 @@
},
{
"BriefDescription": "TOR Occupancy : CLFlushes issued by IO Devices",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_CLFLUSH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : CLFlushes issued by IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8c3ff04",
@@ -5224,6 +6308,7 @@
},
{
"BriefDescription": "TOR Occupancy : All requests from IO Devices that hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT",
"PerPkg": "1",
@@ -5233,8 +6318,10 @@
},
{
"BriefDescription": "TOR Occupancy : ItoMs issued by IO Devices that Hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_ITOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : ItoMs issued by IO Devices that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc43fd04",
@@ -5242,8 +6329,10 @@
},
{
"BriefDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_ITOMCACHENEAR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcd43fd04",
@@ -5251,8 +6340,10 @@
},
{
"BriefDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_PCIRDCUR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc8f3fd04",
@@ -5260,8 +6351,10 @@
},
{
"BriefDescription": "TOR Occupancy : RFOs issued by IO Devices that hit the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : RFOs issued by IO Devices that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc803fd04",
@@ -5269,8 +6362,10 @@
},
{
"BriefDescription": "TOR Occupancy : ItoMs issued by IO Devices",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_ITOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : ItoMs issued by IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc43ff04",
@@ -5278,8 +6373,10 @@
},
{
"BriefDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_ITOMCACHENEAR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcd43ff04",
@@ -5287,6 +6384,7 @@
},
{
"BriefDescription": "TOR Occupancy : All requests from IO Devices that missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS",
"PerPkg": "1",
@@ -5296,8 +6394,10 @@
},
{
"BriefDescription": "TOR Occupancy : ItoMs issued by IO Devices that missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : ItoMs issued by IO Devices that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc43fe04",
@@ -5305,8 +6405,10 @@
},
{
"BriefDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOMCACHENEAR",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcd43fe04",
@@ -5314,6 +6416,7 @@
},
{
"BriefDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_PCIRDCUR",
"PerPkg": "1",
@@ -5323,8 +6426,10 @@
},
{
"BriefDescription": "TOR Occupancy : RFOs issued by IO Devices that missed the LLC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : RFOs issued by IO Devices that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc803fe04",
@@ -5332,6 +6437,7 @@
},
{
"BriefDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_PCIRDCUR",
"PerPkg": "1",
@@ -5341,8 +6447,10 @@
},
{
"BriefDescription": "TOR Occupancy : RFOs issued by IO Devices",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : RFOs issued by IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc803ff04",
@@ -5350,8 +6458,10 @@
},
{
"BriefDescription": "TOR Occupancy : WbMtoIs issued by IO Devices",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_WBMTOI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : WbMtoIs issued by IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xcc23ff04",
@@ -5359,8 +6469,10 @@
},
{
"BriefDescription": "TOR Occupancy : IRQ - iA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IRQ_IA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : IRQ - iA : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : From an iA Core",
"UMask": "0x1",
@@ -5368,8 +6480,10 @@
},
{
"BriefDescription": "TOR Occupancy : IRQ - Non iA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IRQ_NON_IA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : IRQ - Non iA : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x10",
@@ -5377,24 +6491,30 @@
},
{
"BriefDescription": "TOR Occupancy : Just ISOC",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.ISOC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : Just ISOC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : Just Local Targets",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.LOCAL_TGT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : Just Local Targets : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : All from Local iA and IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All from Local iA and IO : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : All locally initiated requests",
"UMask": "0xc000ff05",
@@ -5402,8 +6522,10 @@
},
{
"BriefDescription": "TOR Occupancy : All from Local iA",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_IA",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All from Local iA : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : All locally initiated requests from iA Cores",
"UMask": "0xc000ff01",
@@ -5411,8 +6533,10 @@
},
{
"BriefDescription": "TOR Occupancy : All from Local IO",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_IO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : All from Local IO : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : All locally generated IO traffic",
"UMask": "0xc000ff04",
@@ -5420,64 +6544,80 @@
},
{
"BriefDescription": "TOR Occupancy : Match the Opcode in b[29:19] of the extended umask field",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.MATCH_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : Match the Opcode in b[29:19] of the extended umask field : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : Just Misses",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : Just Misses : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : MMCFG Access",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.MMCFG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : MMCFG Access : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : Just NearMem",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.NEARMEM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : Just NearMem : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : Just NonCoherent",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.NONCOH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : Just NonCoherent : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : Just NotNearMem",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.NOT_NEARMEM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : Just NotNearMem : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : Match the PreMorphed Opcode in b[29:19] of the extended umask field",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.PREMORPH_OPC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : Match the PreMorphed Opcode in b[29:19] of the extended umask field : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"Unit": "CHA"
},
{
"BriefDescription": "TOR Occupancy : PRQ - IOSF",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.PRQ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : PRQ - IOSF : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : From a PCIe Device",
"UMask": "0x4",
@@ -5485,8 +6625,10 @@
},
{
"BriefDescription": "TOR Occupancy : PRQ - Non IOSF",
+ "Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.PRQ_NON_IOSF",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : PRQ - Non IOSF : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x20",
@@ -5494,8 +6636,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_CHA_TxR_HORZ_ADS_USED.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : AD - All : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -5503,8 +6647,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_CHA_TxR_HORZ_ADS_USED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : AD - Credited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -5512,8 +6658,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_CHA_TxR_HORZ_ADS_USED.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : AD - Uncredited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -5521,8 +6669,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_CHA_TxR_HORZ_ADS_USED.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : BL - All : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -5530,8 +6680,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_CHA_TxR_HORZ_ADS_USED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : BL - Credited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -5539,8 +6691,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_CHA_TxR_HORZ_ADS_USED.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : BL - Uncredited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -5548,8 +6702,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_CHA_TxR_HORZ_BYPASS.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : AD - All : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -5557,8 +6713,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_CHA_TxR_HORZ_BYPASS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : AD - Credited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -5566,8 +6724,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_CHA_TxR_HORZ_BYPASS.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : AD - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -5575,8 +6735,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_CHA_TxR_HORZ_BYPASS.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : AK : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -5584,8 +6746,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_CHA_TxR_HORZ_BYPASS.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : AKC - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x80",
@@ -5593,8 +6757,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_CHA_TxR_HORZ_BYPASS.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : BL - All : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -5602,8 +6768,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_CHA_TxR_HORZ_BYPASS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : BL - Credited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -5611,8 +6779,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_CHA_TxR_HORZ_BYPASS.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : BL - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -5620,8 +6790,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_CHA_TxR_HORZ_BYPASS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : IV : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x8",
@@ -5629,8 +6801,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - All : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -5638,8 +6812,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -5647,8 +6823,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -5656,8 +6834,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AK : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -5665,8 +6845,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AKC - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x80",
@@ -5674,8 +6856,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - All : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -5683,8 +6867,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -5692,8 +6878,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -5701,8 +6889,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : IV : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -5710,8 +6900,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - All : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -5719,8 +6911,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -5728,8 +6922,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -5737,8 +6933,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AK : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -5746,8 +6944,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AKC - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x80",
@@ -5755,8 +6955,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - All : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -5764,8 +6966,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -5773,8 +6977,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -5782,8 +6988,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : IV : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -5791,8 +6999,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_CHA_TxR_HORZ_INSERTS.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : AD - All : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -5800,8 +7010,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_CHA_TxR_HORZ_INSERTS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : AD - Credited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -5809,8 +7021,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_CHA_TxR_HORZ_INSERTS.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : AD - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -5818,8 +7032,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_CHA_TxR_HORZ_INSERTS.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : AK : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -5827,8 +7043,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_CHA_TxR_HORZ_INSERTS.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : AKC - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x80",
@@ -5836,8 +7054,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_CHA_TxR_HORZ_INSERTS.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : BL - All : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -5845,8 +7065,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_CHA_TxR_HORZ_INSERTS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : BL - Credited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -5854,8 +7076,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_CHA_TxR_HORZ_INSERTS.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : BL - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -5863,8 +7087,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_CHA_TxR_HORZ_INSERTS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : IV : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -5872,8 +7098,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_CHA_TxR_HORZ_NACK.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : AD - All : Counts number of Egress packets NACK'ed on to the Horizontal Ring : All == Credited + Uncredited",
"UMask": "0x11",
@@ -5881,8 +7109,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_CHA_TxR_HORZ_NACK.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : AD - Credited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x10",
@@ -5890,8 +7120,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_CHA_TxR_HORZ_NACK.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : AD - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x1",
@@ -5899,8 +7131,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_CHA_TxR_HORZ_NACK.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : AK : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x2",
@@ -5908,8 +7142,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_CHA_TxR_HORZ_NACK.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : AKC - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x80",
@@ -5917,8 +7153,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_CHA_TxR_HORZ_NACK.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : BL - All : Counts number of Egress packets NACK'ed on to the Horizontal Ring : All == Credited + Uncredited",
"UMask": "0x44",
@@ -5926,8 +7164,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_CHA_TxR_HORZ_NACK.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : BL - Credited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x40",
@@ -5935,8 +7175,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_CHA_TxR_HORZ_NACK.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : BL - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x4",
@@ -5944,8 +7186,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_CHA_TxR_HORZ_NACK.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : IV : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x8",
@@ -5953,8 +7197,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : AD - All : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -5962,8 +7208,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : AD - Credited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -5971,8 +7219,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : AD - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -5980,8 +7230,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : AK : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -5989,8 +7241,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : AKC - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x80",
@@ -5998,8 +7252,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : BL - All : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -6007,8 +7263,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : BL - Credited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -6016,8 +7274,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : BL - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -6025,8 +7285,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : IV : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -6034,8 +7296,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_CHA_TxR_HORZ_STARVED.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : AD - All : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time. : All == Credited + Uncredited",
"UMask": "0x1",
@@ -6043,8 +7307,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_CHA_TxR_HORZ_STARVED.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : AD - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x1",
@@ -6052,8 +7318,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_CHA_TxR_HORZ_STARVED.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : AK : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x2",
@@ -6061,8 +7329,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_CHA_TxR_HORZ_STARVED.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : AKC - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x80",
@@ -6070,8 +7340,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_CHA_TxR_HORZ_STARVED.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : BL - All : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time. : All == Credited + Uncredited",
"UMask": "0x4",
@@ -6079,8 +7351,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_CHA_TxR_HORZ_STARVED.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : BL - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x4",
@@ -6088,8 +7362,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_CHA_TxR_HORZ_STARVED.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : IV : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x8",
@@ -6097,8 +7373,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_CHA_TxR_VERT_ADS_USED.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AD - Agent 0 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -6106,8 +7384,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_CHA_TxR_VERT_ADS_USED.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AD - Agent 1 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -6115,8 +7395,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_CHA_TxR_VERT_ADS_USED.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : BL - Agent 0 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -6124,8 +7406,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_CHA_TxR_VERT_ADS_USED.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : BL - Agent 1 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -6133,8 +7417,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_CHA_TxR_VERT_BYPASS.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AD - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -6142,8 +7428,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_CHA_TxR_VERT_BYPASS.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AD - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -6151,8 +7439,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_CHA_TxR_VERT_BYPASS.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AK - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -6160,8 +7450,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_CHA_TxR_VERT_BYPASS.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AK - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x20",
@@ -6169,8 +7461,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_CHA_TxR_VERT_BYPASS.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : BL - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -6178,8 +7472,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_CHA_TxR_VERT_BYPASS.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : BL - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -6187,8 +7483,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : IV - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_CHA_TxR_VERT_BYPASS.IV_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : IV - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x8",
@@ -6196,8 +7494,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_CHA_TxR_VERT_BYPASS_1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AKC - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -6205,8 +7505,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_CHA_TxR_VERT_BYPASS_1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AKC - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -6214,8 +7516,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -6223,8 +7527,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -6232,8 +7538,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -6241,8 +7549,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -6250,8 +7560,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -6259,8 +7571,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -6268,8 +7582,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : IV - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : IV - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -6277,8 +7593,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -6286,8 +7604,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -6295,8 +7615,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -6304,8 +7626,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -6313,8 +7637,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -6322,8 +7648,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -6331,8 +7659,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -6340,8 +7670,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -6349,8 +7681,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : IV - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : IV - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -6358,8 +7692,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_NE1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -6367,8 +7703,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_CHA_TxR_VERT_CYCLES_NE1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -6376,8 +7714,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_CHA_TxR_VERT_INSERTS0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AD - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -6385,8 +7725,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_CHA_TxR_VERT_INSERTS0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AD - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -6394,8 +7736,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_CHA_TxR_VERT_INSERTS0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AK - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -6403,8 +7747,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_CHA_TxR_VERT_INSERTS0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AK - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -6412,8 +7758,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_CHA_TxR_VERT_INSERTS0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : BL - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -6421,8 +7769,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_CHA_TxR_VERT_INSERTS0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : BL - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -6430,8 +7780,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : IV - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_CHA_TxR_VERT_INSERTS0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : IV - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -6439,8 +7791,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_CHA_TxR_VERT_INSERTS1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AKC - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -6448,8 +7802,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_CHA_TxR_VERT_INSERTS1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AKC - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -6457,8 +7813,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_CHA_TxR_VERT_NACK0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AD - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x1",
@@ -6466,8 +7824,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_CHA_TxR_VERT_NACK0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AD - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x10",
@@ -6475,8 +7835,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_CHA_TxR_VERT_NACK0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AK - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x2",
@@ -6484,8 +7846,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_CHA_TxR_VERT_NACK0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AK - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x20",
@@ -6493,8 +7857,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_CHA_TxR_VERT_NACK0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : BL - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x4",
@@ -6502,8 +7868,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_CHA_TxR_VERT_NACK0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : BL - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x40",
@@ -6511,8 +7879,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_CHA_TxR_VERT_NACK0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : IV : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x8",
@@ -6520,8 +7890,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_CHA_TxR_VERT_NACK1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AKC - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x1",
@@ -6529,8 +7901,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_CHA_TxR_VERT_NACK1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AKC - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x2",
@@ -6538,8 +7912,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AD - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -6547,8 +7923,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AD - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -6556,8 +7934,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AK - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -6565,8 +7945,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AK - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -6574,8 +7956,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : BL - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -6583,8 +7967,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : BL - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -6592,8 +7978,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : IV - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : IV - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -6601,8 +7989,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_CHA_TxR_VERT_OCCUPANCY1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AKC - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -6610,8 +8000,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_CHA_TxR_VERT_OCCUPANCY1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AKC - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -6619,8 +8011,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_CHA_TxR_VERT_STARVED0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x1",
@@ -6628,8 +8022,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_CHA_TxR_VERT_STARVED0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x10",
@@ -6637,8 +8033,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_CHA_TxR_VERT_STARVED0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x2",
@@ -6646,8 +8044,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_CHA_TxR_VERT_STARVED0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x20",
@@ -6655,8 +8055,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_CHA_TxR_VERT_STARVED0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x4",
@@ -6664,8 +8066,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_CHA_TxR_VERT_STARVED0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x40",
@@ -6673,8 +8077,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_CHA_TxR_VERT_STARVED0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : IV : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x8",
@@ -6682,8 +8088,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9B",
"EventName": "UNC_CHA_TxR_VERT_STARVED1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x1",
@@ -6691,8 +8099,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9B",
"EventName": "UNC_CHA_TxR_VERT_STARVED1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x2",
@@ -6700,8 +8110,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9B",
"EventName": "UNC_CHA_TxR_VERT_STARVED1.TGC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x4",
@@ -6709,8 +8121,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_CHA_VERT_RING_AD_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AD Ring In Use : Down and Even : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -6718,8 +8132,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_CHA_VERT_RING_AD_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AD Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -6727,8 +8143,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_CHA_VERT_RING_AD_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AD Ring In Use : Up and Even : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -6736,8 +8154,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_CHA_VERT_RING_AD_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AD Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -6745,8 +8165,10 @@
},
{
"BriefDescription": "Vertical AKC Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_CHA_VERT_RING_AKC_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AKC Ring In Use : Down and Even : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -6754,8 +8176,10 @@
},
{
"BriefDescription": "Vertical AKC Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_CHA_VERT_RING_AKC_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AKC Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -6763,8 +8187,10 @@
},
{
"BriefDescription": "Vertical AKC Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_CHA_VERT_RING_AKC_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AKC Ring In Use : Up and Even : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -6772,8 +8198,10 @@
},
{
"BriefDescription": "Vertical AKC Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_CHA_VERT_RING_AKC_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AKC Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -6781,8 +8209,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_CHA_VERT_RING_AK_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AK Ring In Use : Down and Even : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -6790,8 +8220,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_CHA_VERT_RING_AK_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AK Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -6799,8 +8231,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_CHA_VERT_RING_AK_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AK Ring In Use : Up and Even : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -6808,8 +8242,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_CHA_VERT_RING_AK_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AK Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -6817,8 +8253,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use : Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_CHA_VERT_RING_BL_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical BL Ring in Use : Down and Even : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -6826,8 +8264,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use : Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_CHA_VERT_RING_BL_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical BL Ring in Use : Down and Odd : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -6835,8 +8275,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use : Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_CHA_VERT_RING_BL_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical BL Ring in Use : Up and Even : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -6844,8 +8286,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use : Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_CHA_VERT_RING_BL_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical BL Ring in Use : Up and Odd : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -6853,8 +8297,10 @@
},
{
"BriefDescription": "Vertical IV Ring in Use : Down",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_CHA_VERT_RING_IV_IN_USE.DN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical IV Ring in Use : Down : Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x4",
@@ -6862,8 +8308,10 @@
},
{
"BriefDescription": "Vertical IV Ring in Use : Up",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_CHA_VERT_RING_IV_IN_USE.UP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical IV Ring in Use : Up : Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x1",
@@ -6871,8 +8319,10 @@
},
{
"BriefDescription": "Vertical TGC Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_CHA_VERT_RING_TGC_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical TGC Ring In Use : Down and Even : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -6880,8 +8330,10 @@
},
{
"BriefDescription": "Vertical TGC Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_CHA_VERT_RING_TGC_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical TGC Ring In Use : Down and Odd : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -6889,8 +8341,10 @@
},
{
"BriefDescription": "Vertical TGC Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_CHA_VERT_RING_TGC_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical TGC Ring In Use : Up and Even : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -6898,8 +8352,10 @@
},
{
"BriefDescription": "Vertical TGC Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_CHA_VERT_RING_TGC_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical TGC Ring In Use : Up and Odd : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -6907,8 +8363,10 @@
},
{
"BriefDescription": "WbPushMtoI : Pushed to LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_CHA_WB_PUSH_MTOI.LLC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WbPushMtoI : Pushed to LLC : Counts the number of times when the CHA was received WbPushMtoI : Counts the number of times when the CHA was able to push WbPushMToI to LLC",
"UMask": "0x1",
@@ -6916,8 +8374,10 @@
},
{
"BriefDescription": "WbPushMtoI : Pushed to Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_CHA_WB_PUSH_MTOI.MEM",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "WbPushMtoI : Pushed to Memory : Counts the number of times when the CHA was received WbPushMtoI : Counts the number of times when the CHA was unable to push WbPushMToI to LLC (hence pushed it to MEM)",
"UMask": "0x2",
@@ -6925,8 +8385,10 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC0",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC0 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 0 only.",
"UMask": "0x1",
@@ -6934,8 +8396,10 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC1",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC1 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 1 only.",
"UMask": "0x2",
@@ -6943,40 +8407,50 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC10",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC10 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 10 only.",
"Unit": "CHA"
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC11",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC11",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC11 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 11 only.",
"Unit": "CHA"
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC12",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC12",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC12 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 12 only.",
"Unit": "CHA"
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC13",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC13",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC13 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 13 only.",
"Unit": "CHA"
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC2",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC2 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 2 only.",
"UMask": "0x4",
@@ -6984,8 +8458,10 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC3",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC3 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 3 only.",
"UMask": "0x8",
@@ -6993,8 +8469,10 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC4",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC4 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 4 only.",
"UMask": "0x10",
@@ -7002,8 +8480,10 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC5",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC5 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 5 only.",
"UMask": "0x20",
@@ -7011,8 +8491,10 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC6",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC6 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 6 only.",
"UMask": "0x40",
@@ -7020,8 +8502,10 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC7",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC7 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 7 only.",
"UMask": "0x80",
@@ -7029,24 +8513,30 @@
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC8",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC8 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 8 only.",
"Unit": "CHA"
},
{
"BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC9",
+ "Counter": "0,1,2,3",
"EventCode": "0x5A",
"EventName": "UNC_CHA_WRITE_NO_CREDITS.MC9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC9 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 9 only.",
"Unit": "CHA"
},
{
"BriefDescription": "XPT Prefetches : Dropped (on 0?) - Conflict",
+ "Counter": "0,1,2,3",
"EventCode": "0x6f",
"EventName": "UNC_CHA_XPT_PREF.DROP0_CONFLICT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "XPT Prefetches : Dropped (on 0?) - Conflict : Number of XPT prefetches dropped due to AD CMS write port contention",
"UMask": "0x8",
@@ -7054,8 +8544,10 @@
},
{
"BriefDescription": "XPT Prefetches : Dropped (on 0?) - No Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x6f",
"EventName": "UNC_CHA_XPT_PREF.DROP0_NOCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "XPT Prefetches : Dropped (on 0?) - No Credits : Number of XPT prefetches dropped due to lack of XPT AD egress credits",
"UMask": "0x4",
@@ -7063,8 +8555,10 @@
},
{
"BriefDescription": "XPT Prefetches : Dropped (on 1?) - Conflict",
+ "Counter": "0,1,2,3",
"EventCode": "0x6f",
"EventName": "UNC_CHA_XPT_PREF.DROP1_CONFLICT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "XPT Prefetches : Dropped (on 1?) - Conflict : Number of XPT prefetches dropped due to AD CMS write port contention",
"UMask": "0x80",
@@ -7072,8 +8566,10 @@
},
{
"BriefDescription": "XPT Prefetches : Dropped (on 1?) - No Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x6f",
"EventName": "UNC_CHA_XPT_PREF.DROP1_NOCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "XPT Prefetches : Dropped (on 1?) - No Credits : Number of XPT prefetches dropped due to lack of XPT AD egress credits",
"UMask": "0x40",
@@ -7081,8 +8577,10 @@
},
{
"BriefDescription": "XPT Prefetches : Sent (on 0?)",
+ "Counter": "0,1,2,3",
"EventCode": "0x6f",
"EventName": "UNC_CHA_XPT_PREF.SENT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "XPT Prefetches : Sent (on 0?) : Number of XPT prefetches sent",
"UMask": "0x1",
@@ -7090,8 +8588,10 @@
},
{
"BriefDescription": "XPT Prefetches : Sent (on 1?)",
+ "Counter": "0,1,2,3",
"EventCode": "0x6f",
"EventName": "UNC_CHA_XPT_PREF.SENT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "XPT Prefetches : Sent (on 1?) : Number of XPT prefetches sent",
"UMask": "0x10",
diff --git a/tools/perf/pmu-events/arch/x86/snowridgex/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/snowridgex/uncore-interconnect.json
index 7cc3635b118b..88ee90b8a2d9 100644
--- a/tools/perf/pmu-events/arch/x86/snowridgex/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/uncore-interconnect.json
@@ -1,8 +1,10 @@
[
{
"BriefDescription": "Total Write Cache Occupancy : Any Source",
+ "Counter": "0,1",
"EventCode": "0x0F",
"EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.ANY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Total Write Cache Occupancy : Any Source : Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events. : Tracks all requests from any source port.",
"UMask": "0x1",
@@ -10,8 +12,10 @@
},
{
"BriefDescription": "Total Write Cache Occupancy : Snoops",
+ "Counter": "0,1",
"EventCode": "0x0F",
"EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.IV_Q",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Total Write Cache Occupancy : Snoops : Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.",
"UMask": "0x2",
@@ -19,6 +23,7 @@
},
{
"BriefDescription": "Total IRP occupancy of inbound read and write requests to coherent memory.",
+ "Counter": "0,1",
"EventCode": "0x0f",
"EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.MEM",
"PerPkg": "1",
@@ -28,6 +33,7 @@
},
{
"BriefDescription": "Clockticks of the IO coherency tracker (IRP)",
+ "Counter": "0,1",
"EventCode": "0x01",
"EventName": "UNC_I_CLOCKTICKS",
"PerPkg": "1",
@@ -35,8 +41,10 @@
},
{
"BriefDescription": "Coherent Ops : CLFlush",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_I_COHERENT_OPS.CLFLUSH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Coherent Ops : CLFlush : Counts the number of coherency related operations serviced by the IRP",
"UMask": "0x80",
@@ -44,6 +52,7 @@
},
{
"BriefDescription": "PCIITOM request issued by the IRP unit to the mesh with the intention of writing a full cacheline.",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_I_COHERENT_OPS.PCITOM",
"PerPkg": "1",
@@ -53,8 +62,10 @@
},
{
"BriefDescription": "RFO request issued by the IRP unit to the mesh with the intention of writing a partial cacheline.",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_I_COHERENT_OPS.RFO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RFO request issued by the IRP unit to the mesh with the intention of writing a partial cacheline to coherent memory. RFO is a Read For Ownership command that requests ownership of the cacheline and moves data from the mesh to IRP cache.",
"UMask": "0x8",
@@ -62,6 +73,7 @@
},
{
"BriefDescription": "Coherent Ops : WbMtoI",
+ "Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_I_COHERENT_OPS.WBMTOI",
"PerPkg": "1",
@@ -71,6 +83,7 @@
},
{
"BriefDescription": "FAF RF full",
+ "Counter": "0,1",
"EventCode": "0x17",
"EventName": "UNC_I_FAF_FULL",
"PerPkg": "1",
@@ -78,6 +91,7 @@
},
{
"BriefDescription": "Inbound read requests received by the IRP and inserted into the FAF queue.",
+ "Counter": "0,1",
"EventCode": "0x18",
"EventName": "UNC_I_FAF_INSERTS",
"PerPkg": "1",
@@ -86,6 +100,7 @@
},
{
"BriefDescription": "Occupancy of the IRP FAF queue.",
+ "Counter": "0,1",
"EventCode": "0x19",
"EventName": "UNC_I_FAF_OCCUPANCY",
"PerPkg": "1",
@@ -94,6 +109,7 @@
},
{
"BriefDescription": "FAF allocation -- sent to ADQ",
+ "Counter": "0,1",
"EventCode": "0x16",
"EventName": "UNC_I_FAF_TRANSACTIONS",
"PerPkg": "1",
@@ -101,14 +117,17 @@
},
{
"BriefDescription": ": All Inserts Outbound (BL, AK, Snoops)",
+ "Counter": "0,1",
"EventCode": "0x20",
"EventName": "UNC_I_IRP_ALL.EVICTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "IRP"
},
{
"BriefDescription": ": All Inserts Inbound (p2p + faf + cset)",
+ "Counter": "0,1",
"EventCode": "0x20",
"EventName": "UNC_I_IRP_ALL.INBOUND_INSERTS",
"PerPkg": "1",
@@ -117,78 +136,97 @@
},
{
"BriefDescription": ": All Inserts Outbound (BL, AK, Snoops)",
+ "Counter": "0,1",
"EventCode": "0x20",
"EventName": "UNC_I_IRP_ALL.OUTBOUND_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "IRP"
},
{
"BriefDescription": "Counts Timeouts - Set 0 : Cache Inserts of Atomic Transactions as Secondary",
+ "Counter": "0,1",
"EventCode": "0x1E",
"EventName": "UNC_I_MISC0.2ND_ATOMIC_INSERT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "IRP"
},
{
"BriefDescription": "Counts Timeouts - Set 0 : Cache Inserts of Read Transactions as Secondary",
+ "Counter": "0,1",
"EventCode": "0x1e",
"EventName": "UNC_I_MISC0.2ND_RD_INSERT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "IRP"
},
{
"BriefDescription": "Counts Timeouts - Set 0 : Cache Inserts of Write Transactions as Secondary",
+ "Counter": "0,1",
"EventCode": "0x1e",
"EventName": "UNC_I_MISC0.2ND_WR_INSERT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "IRP"
},
{
"BriefDescription": "Counts Timeouts - Set 0 : Fastpath Rejects",
+ "Counter": "0,1",
"EventCode": "0x1E",
"EventName": "UNC_I_MISC0.FAST_REJ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "IRP"
},
{
"BriefDescription": "Counts Timeouts - Set 0 : Fastpath Requests",
+ "Counter": "0,1",
"EventCode": "0x1e",
"EventName": "UNC_I_MISC0.FAST_REQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "IRP"
},
{
"BriefDescription": "Counts Timeouts - Set 0 : Fastpath Transfers From Primary to Secondary",
+ "Counter": "0,1",
"EventCode": "0x1E",
"EventName": "UNC_I_MISC0.FAST_XFER",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "IRP"
},
{
"BriefDescription": "Counts Timeouts - Set 0 : Prefetch Ack Hints From Primary to Secondary",
+ "Counter": "0,1",
"EventCode": "0x1E",
"EventName": "UNC_I_MISC0.PF_ACK_HINT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "IRP"
},
{
"BriefDescription": "Counts Timeouts - Set 0 : Slow path fwpf didn't find prefetch",
+ "Counter": "0,1",
"EventCode": "0x1E",
"EventName": "UNC_I_MISC0.SLOWPATH_FWPF_NO_PRF",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "IRP"
},
{
"BriefDescription": "Misc Events - Set 1 : Lost Forward",
+ "Counter": "0,1",
"EventCode": "0x1F",
"EventName": "UNC_I_MISC1.LOST_FWD",
"PerPkg": "1",
@@ -198,8 +236,10 @@
},
{
"BriefDescription": "Misc Events - Set 1 : Received Invalid",
+ "Counter": "0,1",
"EventCode": "0x1F",
"EventName": "UNC_I_MISC1.SEC_RCVD_INVLD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Misc Events - Set 1 : Received Invalid : Secondary received a transfer that did not have sufficient MESI state",
"UMask": "0x20",
@@ -207,8 +247,10 @@
},
{
"BriefDescription": "Misc Events - Set 1 : Received Valid",
+ "Counter": "0,1",
"EventCode": "0x1F",
"EventName": "UNC_I_MISC1.SEC_RCVD_VLD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Misc Events - Set 1 : Received Valid : Secondary received a transfer that did have sufficient MESI state",
"UMask": "0x40",
@@ -216,8 +258,10 @@
},
{
"BriefDescription": "Misc Events - Set 1 : Slow Transfer of E Line",
+ "Counter": "0,1",
"EventCode": "0x1f",
"EventName": "UNC_I_MISC1.SLOW_E",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Misc Events - Set 1 : Slow Transfer of E Line : Secondary received a transfer that did have sufficient MESI state",
"UMask": "0x4",
@@ -225,8 +269,10 @@
},
{
"BriefDescription": "Misc Events - Set 1 : Slow Transfer of I Line",
+ "Counter": "0,1",
"EventCode": "0x1f",
"EventName": "UNC_I_MISC1.SLOW_I",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Misc Events - Set 1 : Slow Transfer of I Line : Snoop took cacheline ownership before write from data was committed.",
"UMask": "0x1",
@@ -234,8 +280,10 @@
},
{
"BriefDescription": "Misc Events - Set 1 : Slow Transfer of M Line",
+ "Counter": "0,1",
"EventCode": "0x1f",
"EventName": "UNC_I_MISC1.SLOW_M",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Misc Events - Set 1 : Slow Transfer of M Line : Snoop took cacheline ownership before write from data was committed.",
"UMask": "0x8",
@@ -243,8 +291,10 @@
},
{
"BriefDescription": "Misc Events - Set 1 : Slow Transfer of S Line",
+ "Counter": "0,1",
"EventCode": "0x1f",
"EventName": "UNC_I_MISC1.SLOW_S",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Misc Events - Set 1 : Slow Transfer of S Line : Secondary received a transfer that did not have sufficient MESI state",
"UMask": "0x2",
@@ -252,88 +302,110 @@
},
{
"BriefDescription": "P2P Requests",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_I_P2P_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "P2P Requests : P2P requests from the ITC",
"Unit": "IRP"
},
{
"BriefDescription": "P2P Occupancy",
+ "Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_P2P_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "P2P Occupancy : P2P B & S Queue Occupancy",
"Unit": "IRP"
},
{
"BriefDescription": "P2P Transactions : P2P completions",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_P2P_TRANSACTIONS.CMPL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "IRP"
},
{
"BriefDescription": "P2P Transactions : match if local only",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_P2P_TRANSACTIONS.LOC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "IRP"
},
{
"BriefDescription": "P2P Transactions : match if local and target matches",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_P2P_TRANSACTIONS.LOC_AND_TGT_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "IRP"
},
{
"BriefDescription": "P2P Transactions : P2P Message",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_P2P_TRANSACTIONS.MSG",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "IRP"
},
{
"BriefDescription": "P2P Transactions : P2P reads",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_P2P_TRANSACTIONS.RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "IRP"
},
{
"BriefDescription": "P2P Transactions : Match if remote only",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_P2P_TRANSACTIONS.REM",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "IRP"
},
{
"BriefDescription": "P2P Transactions : match if remote and target matches",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_P2P_TRANSACTIONS.REM_AND_TGT_MATCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "IRP"
},
{
"BriefDescription": "P2P Transactions : P2P Writes",
+ "Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_P2P_TRANSACTIONS.WR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "IRP"
},
{
"BriefDescription": "Responses to snoops of any type that hit M, E, S or I line in the IIO",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.ALL_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit M, E, S or I line in the IIO",
"UMask": "0x7e",
@@ -341,8 +413,10 @@
},
{
"BriefDescription": "Responses to snoops of any type that hit E or S line in the IIO cache",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.ALL_HIT_ES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit E or S line in the IIO cache",
"UMask": "0x74",
@@ -350,8 +424,10 @@
},
{
"BriefDescription": "Responses to snoops of any type that hit I line in the IIO cache",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.ALL_HIT_I",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit I line in the IIO cache",
"UMask": "0x72",
@@ -359,6 +435,7 @@
},
{
"BriefDescription": "Responses to snoops of any type that hit M line in the IIO cache",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.ALL_HIT_M",
"PerPkg": "1",
@@ -368,8 +445,10 @@
},
{
"BriefDescription": "Responses to snoops of any type that miss the IIO cache",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.ALL_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Responses to snoops of any type (code, data, invalidate) that miss the IIO cache",
"UMask": "0x71",
@@ -377,64 +456,80 @@
},
{
"BriefDescription": "Snoop Responses : Hit E or S",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.HIT_ES",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses : Hit I",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.HIT_I",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses : Hit M",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.HIT_M",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses : Miss",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.MISS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses : SnpCode",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.SNPCODE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses : SnpData",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.SNPDATA",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses : SnpInv",
+ "Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_SNOOP_RESP.SNPINV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "IRP"
},
{
"BriefDescription": "Inbound Transaction Count : Atomic",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_I_TRANSACTIONS.ATOMIC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Inbound Transaction Count : Atomic : Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID. : Tracks the number of atomic transactions",
"UMask": "0x10",
@@ -442,8 +537,10 @@
},
{
"BriefDescription": "Inbound Transaction Count : Other",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_I_TRANSACTIONS.OTHER",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Inbound Transaction Count : Other : Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID. : Tracks the number of 'other' kinds of transactions.",
"UMask": "0x20",
@@ -451,8 +548,10 @@
},
{
"BriefDescription": "Inbound Transaction Count : Writes",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_I_TRANSACTIONS.WRITES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Inbound Transaction Count : Writes : Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID. : Tracks only write requests. Each write request should have a prefetch, so there is no need to explicitly track these requests. For writes that are tickled and have to retry, the counter will be incremented for each retry.",
"UMask": "0x2",
@@ -460,6 +559,7 @@
},
{
"BriefDescription": "Inbound write (fast path) requests received by the IRP.",
+ "Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_I_TRANSACTIONS.WR_PREF",
"PerPkg": "1",
@@ -469,134 +569,170 @@
},
{
"BriefDescription": "AK Egress Allocations",
+ "Counter": "0,1",
"EventCode": "0x0B",
"EventName": "UNC_I_TxC_AK_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL DRS Egress Cycles Full",
+ "Counter": "0,1",
"EventCode": "0x05",
"EventName": "UNC_I_TxC_BL_DRS_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL DRS Egress Inserts",
+ "Counter": "0,1",
"EventCode": "0x02",
"EventName": "UNC_I_TxC_BL_DRS_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL DRS Egress Occupancy",
+ "Counter": "0,1",
"EventCode": "0x08",
"EventName": "UNC_I_TxC_BL_DRS_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL NCB Egress Cycles Full",
+ "Counter": "0,1",
"EventCode": "0x06",
"EventName": "UNC_I_TxC_BL_NCB_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL NCB Egress Inserts",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "UNC_I_TxC_BL_NCB_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL NCB Egress Occupancy",
+ "Counter": "0,1",
"EventCode": "0x09",
"EventName": "UNC_I_TxC_BL_NCB_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL NCS Egress Cycles Full",
+ "Counter": "0,1",
"EventCode": "0x07",
"EventName": "UNC_I_TxC_BL_NCS_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL NCS Egress Inserts",
+ "Counter": "0,1",
"EventCode": "0x04",
"EventName": "UNC_I_TxC_BL_NCS_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "BL NCS Egress Occupancy",
+ "Counter": "0,1",
"EventCode": "0x0A",
"EventName": "UNC_I_TxC_BL_NCS_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IRP"
},
{
"BriefDescription": "UNC_I_TxR2_AD01_STALL_CREDIT_CYCLES",
+ "Counter": "0,1",
"EventCode": "0x1C",
"EventName": "UNC_I_TxR2_AD01_STALL_CREDIT_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": Counts the number times when it is not possible to issue a request to the M2PCIe because there are no Egress Credits available on AD0, A1 or AD0&AD1 both. Stalls on both AD0 and AD1 will count as 2",
"Unit": "IRP"
},
{
"BriefDescription": "No AD0 Egress Credits Stalls",
+ "Counter": "0,1",
"EventCode": "0x1A",
"EventName": "UNC_I_TxR2_AD0_STALL_CREDIT_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No AD0 Egress Credits Stalls : Counts the number times when it is not possible to issue a request to the M2PCIe because there are no AD0 Egress Credits available.",
"Unit": "IRP"
},
{
"BriefDescription": "No AD1 Egress Credits Stalls",
+ "Counter": "0,1",
"EventCode": "0x1B",
"EventName": "UNC_I_TxR2_AD1_STALL_CREDIT_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No AD1 Egress Credits Stalls : Counts the number times when it is not possible to issue a request to the M2PCIe because there are no AD1 Egress Credits available.",
"Unit": "IRP"
},
{
"BriefDescription": "No BL Egress Credit Stalls",
+ "Counter": "0,1",
"EventCode": "0x1D",
"EventName": "UNC_I_TxR2_BL_STALL_CREDIT_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "No BL Egress Credit Stalls : Counts the number times when it is not possible to issue data to the R2PCIe because there are no BL Egress Credits available.",
"Unit": "IRP"
},
{
"BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
"EventCode": "0x0D",
"EventName": "UNC_I_TxS_DATA_INSERTS_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Outbound Read Requests : Counts the number of requests issued to the switch (towards the devices).",
"Unit": "IRP"
},
{
"BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
"EventCode": "0x0E",
"EventName": "UNC_I_TxS_DATA_INSERTS_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Outbound Read Requests : Counts the number of requests issued to the switch (towards the devices).",
"Unit": "IRP"
},
{
"BriefDescription": "Outbound Request Queue Occupancy",
+ "Counter": "0,1",
"EventCode": "0x0C",
"EventName": "UNC_I_TxS_REQUEST_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Outbound Request Queue Occupancy : Accumulates the number of outstanding outbound requests from the IRP to the switch (towards the devices). This can be used in conjunction with the allocations event in order to calculate average latency of outbound requests.",
"Unit": "IRP"
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 0 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -604,8 +740,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 1 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -613,8 +751,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 2 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -622,8 +762,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 3 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -631,8 +773,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 4 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -640,8 +784,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 5 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -649,8 +795,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 6 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x40",
@@ -658,8 +806,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 7 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x80",
@@ -667,8 +817,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 10 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -676,8 +828,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 8 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -685,8 +839,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 9 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -694,8 +850,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 0 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -703,8 +861,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 1 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -712,8 +872,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 2 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -721,8 +883,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 3 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -730,8 +894,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 4 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -739,8 +905,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 5 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -748,8 +916,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 6 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x40",
@@ -757,8 +927,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 7 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x80",
@@ -766,8 +938,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 10 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -775,8 +949,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 8 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -784,8 +960,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 9 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -793,8 +971,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 0 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -802,8 +982,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 1 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -811,8 +993,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 2 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -820,8 +1004,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 3 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -829,8 +1015,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -838,8 +1026,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -847,8 +1037,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 6 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x40",
@@ -856,8 +1048,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 7 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x80",
@@ -865,8 +1059,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 10 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -874,8 +1070,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 8 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -883,8 +1081,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 9 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -892,8 +1092,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 0 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -901,8 +1103,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 1 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -910,8 +1114,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 2 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -919,8 +1125,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 3 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -928,8 +1136,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 4 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -937,8 +1147,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 5 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -946,8 +1158,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 6 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x40",
@@ -955,8 +1169,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x8A",
"EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 7 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x80",
@@ -964,8 +1180,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x8B",
"EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 10 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -973,8 +1191,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x8B",
"EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 8 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -982,8 +1202,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x8B",
"EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 9 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -991,8 +1213,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 0 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -1000,8 +1224,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 1 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -1009,8 +1235,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 2 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -1018,8 +1246,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 3 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -1027,8 +1257,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 4 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -1036,8 +1268,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 5 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -1045,8 +1279,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 6 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x40",
@@ -1054,8 +1290,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 7 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x80",
@@ -1063,8 +1301,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 10 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -1072,8 +1312,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 8 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -1081,8 +1323,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 9 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -1090,8 +1334,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 0 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -1099,8 +1345,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 1 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -1108,8 +1356,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 2 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -1117,8 +1367,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 3 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -1126,8 +1378,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 4 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -1135,8 +1389,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 5 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -1144,8 +1400,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 6 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x40",
@@ -1153,8 +1411,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 7 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x80",
@@ -1162,8 +1422,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 10 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -1171,8 +1433,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 8 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -1180,8 +1444,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 9 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -1189,8 +1455,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 0 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -1198,8 +1466,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 1 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -1207,8 +1477,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 2 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -1216,8 +1488,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 3 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -1225,8 +1499,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -1234,8 +1510,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -1243,8 +1521,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x40",
@@ -1252,8 +1532,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8C",
"EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x80",
@@ -1261,8 +1543,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x8D",
"EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 10 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -1270,8 +1554,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x8D",
"EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 8 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -1279,8 +1565,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x8D",
"EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 9 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -1288,8 +1576,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 0 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -1297,8 +1587,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 1 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -1306,8 +1598,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 2 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -1315,8 +1609,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 3 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -1324,8 +1620,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 4 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -1333,8 +1631,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 5 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -1342,8 +1642,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 6 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x40",
@@ -1351,8 +1653,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 7 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x80",
@@ -1360,8 +1664,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x8F",
"EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 10 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -1369,8 +1675,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x8F",
"EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 8 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -1378,8 +1686,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x8F",
"EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 9 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -1387,44 +1697,54 @@
},
{
"BriefDescription": "M2M to iMC Bypass : Not Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M2M_BYPASS_M2M_EGRESS.NOT_TAKEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "M2M to iMC Bypass : Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M2M_BYPASS_M2M_EGRESS.TAKEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M to iMC Bypass : Not Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_BYPASS_M2M_INGRESS.NOT_TAKEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "M2M to iMC Bypass : Taken",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M2M_BYPASS_M2M_INGRESS.TAKEN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Clockticks of the mesh to memory (M2M)",
+ "Counter": "0,1,2,3",
"EventName": "UNC_M2M_CLOCKTICKS",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "CMS Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0xc0",
"EventName": "UNC_M2M_CMS_CLOCKTICKS",
"PerPkg": "1",
@@ -1432,29 +1752,37 @@
},
{
"BriefDescription": "Cycles when direct to core mode, which bypasses the CHA, was disabled",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_DIRSTATE",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_NOTFORKED",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_NOTFORKED",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Number of reads in which direct to core transaction was overridden",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2M_DIRECT2CORE_TXN_OVERRIDE",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Distress signal asserted : DPT Local",
+ "Counter": "0,1,2,3",
"EventCode": "0xAF",
"EventName": "UNC_M2M_DISTRESS_ASSERTED.DPT_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : DPT Local : Counts the number of cycles either the local or incoming distress signals are asserted. : Dynamic Prefetch Throttle triggered by this tile",
"UMask": "0x4",
@@ -1462,8 +1790,10 @@
},
{
"BriefDescription": "Distress signal asserted : DPT Remote",
+ "Counter": "0,1,2,3",
"EventCode": "0xAF",
"EventName": "UNC_M2M_DISTRESS_ASSERTED.DPT_NONLOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : DPT Remote : Counts the number of cycles either the local or incoming distress signals are asserted. : Dynamic Prefetch Throttle received by this tile",
"UMask": "0x8",
@@ -1471,8 +1801,10 @@
},
{
"BriefDescription": "Distress signal asserted : DPT Stalled - IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xAF",
"EventName": "UNC_M2M_DISTRESS_ASSERTED.DPT_STALL_IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : DPT Stalled - IV : Counts the number of cycles either the local or incoming distress signals are asserted. : DPT occurred while regular IVs were received, causing DPT to be stalled",
"UMask": "0x40",
@@ -1480,8 +1812,10 @@
},
{
"BriefDescription": "Distress signal asserted : DPT Stalled - No Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xAF",
"EventName": "UNC_M2M_DISTRESS_ASSERTED.DPT_STALL_NOCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : DPT Stalled - No Credit : Counts the number of cycles either the local or incoming distress signals are asserted. : DPT occurred while credit not available causing DPT to be stalled",
"UMask": "0x80",
@@ -1489,8 +1823,10 @@
},
{
"BriefDescription": "Distress signal asserted : Horizontal",
+ "Counter": "0,1,2,3",
"EventCode": "0xAF",
"EventName": "UNC_M2M_DISTRESS_ASSERTED.HORZ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : Horizontal : Counts the number of cycles either the local or incoming distress signals are asserted. : If TGR egress is full, then agents will throttle outgoing AD IDI transactions",
"UMask": "0x2",
@@ -1498,8 +1834,10 @@
},
{
"BriefDescription": "Distress signal asserted : Vertical",
+ "Counter": "0,1,2,3",
"EventCode": "0xAF",
"EventName": "UNC_M2M_DISTRESS_ASSERTED.VERT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : Vertical : Counts the number of cycles either the local or incoming distress signals are asserted. : If IRQ egress is full, then agents will throttle outgoing AD IDI transactions",
"UMask": "0x1",
@@ -1507,8 +1845,10 @@
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M2M_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress Blocking due to Ordering requirements : Down : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x4",
@@ -1516,8 +1856,10 @@
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "Counter": "0,1,2,3",
"EventCode": "0xBA",
"EventName": "UNC_M2M_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress Blocking due to Ordering requirements : Up : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x1",
@@ -1525,8 +1867,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AD Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -1534,8 +1878,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AD Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -1543,8 +1889,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AD Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -1552,8 +1900,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB6",
"EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AD Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -1561,8 +1911,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M2M_HORZ_RING_AKC_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -1570,8 +1922,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M2M_HORZ_RING_AKC_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -1579,8 +1933,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M2M_HORZ_RING_AKC_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -1588,8 +1944,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xBB",
"EventName": "UNC_M2M_HORZ_RING_AKC_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -1597,8 +1955,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -1606,8 +1966,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -1615,8 +1977,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -1624,8 +1988,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7",
"EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -1633,8 +1999,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use : Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal BL Ring in Use : Left and Even : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -1642,8 +2010,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use : Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal BL Ring in Use : Left and Odd : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -1651,8 +2021,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use : Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal BL Ring in Use : Right and Even : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -1660,8 +2032,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use : Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal BL Ring in Use : Right and Odd : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -1669,8 +2043,10 @@
},
{
"BriefDescription": "Horizontal IV Ring in Use : Left",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M2M_HORZ_RING_IV_IN_USE.LEFT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal IV Ring in Use : Left : Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x1",
@@ -1678,8 +2054,10 @@
},
{
"BriefDescription": "Horizontal IV Ring in Use : Right",
+ "Counter": "0,1,2,3",
"EventCode": "0xB9",
"EventName": "UNC_M2M_HORZ_RING_IV_IN_USE.RIGHT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal IV Ring in Use : Right : Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x4",
@@ -1687,463 +2065,581 @@
},
{
"BriefDescription": "M2M Reads Issued to iMC : All, regardless of priority. - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x704",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Reads Issued to iMC : All, regardless of priority. - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.CH0_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x104",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Reads Issued to iMC : From TGR - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.CH0_FROM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x140",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Reads Issued to iMC : Critical Priority - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.CH0_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x102",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Reads Issued to iMC : Normal Priority - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.CH0_NORMAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x101",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Reads Issued to iMC : All, regardless of priority. - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.CH1_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x204",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Reads Issued to iMC : From TGR - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.CH1_FROM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x240",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Reads Issued to iMC : Critical Priority - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.CH1_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x202",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Reads Issued to iMC : Normal Priority - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.CH1_NORMAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x201",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Reads Issued to iMC : From TGR - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.FROM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x740",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Reads Issued to iMC : Critical Priority - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x702",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Reads Issued to iMC : Normal Priority - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_M2M_IMC_READS.NORMAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x701",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : All Writes - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1c10",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : All Writes - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.CH0_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x410",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : From TGR - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.CH0_FROM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : Full Line Non-ISOCH - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.CH0_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x401",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : ISOCH Full Line - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.CH0_FULL_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x404",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : Non-Inclusive Miss - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.CH0_NI_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : Partial Non-ISOCH - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.CH0_PARTIAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x402",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : ISOCH Partial - Ch0",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.CH0_PARTIAL_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x408",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : All Writes - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.CH1_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x810",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : From TGR - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.CH1_FROM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : Full Line Non-ISOCH - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.CH1_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x801",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : ISOCH Full Line - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.CH1_FULL_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x804",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : Non-Inclusive Miss - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.CH1_NI_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : Partial Non-ISOCH - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.CH1_PARTIAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x802",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : ISOCH Partial - Ch1",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.CH1_PARTIAL_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x808",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : From TGR - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.FROM_TGR",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : Full Line Non-ISOCH - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.FULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1c01",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : ISOCH Full Line - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.FULL_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1c04",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : Non-Inclusive Miss - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.NI_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : Partial Non-ISOCH - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.PARTIAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1c02",
"Unit": "M2M"
},
{
"BriefDescription": "M2M Writes Issued to iMC : ISOCH Partial - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_M2M_IMC_WRITES.PARTIAL_ISOCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1c08",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x64",
"EventName": "UNC_M2M_MIRR_WRQ_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x65",
"EventName": "UNC_M2M_MIRR_WRQ_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI0",
+ "Counter": "0,1,2,3",
"EventCode": "0xE6",
"EventName": "UNC_M2M_MISC_EXTERNAL.MBE_INST0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI1",
+ "Counter": "0,1,2,3",
"EventCode": "0xE6",
"EventName": "UNC_M2M_MISC_EXTERNAL.MBE_INST1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Number Packet Header Matches : MC Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x4C",
"EventName": "UNC_M2M_PKT_MATCH.MC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Number Packet Header Matches : Mesh Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x4C",
"EventName": "UNC_M2M_PKT_MATCH.MESH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_PREFCAM_CIS_DROPS",
+ "Counter": "0,1,2,3",
"EventCode": "0x73",
"EventName": "UNC_M2M_PREFCAM_CIS_DROPS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Cycles Full : All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x6B",
"EventName": "UNC_M2M_PREFCAM_CYCLES_FULL.ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Cycles Full : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x6B",
"EventName": "UNC_M2M_PREFCAM_CYCLES_FULL.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Cycles Full : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x6B",
"EventName": "UNC_M2M_PREFCAM_CYCLES_FULL.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Cycles Not Empty : All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x6C",
"EventName": "UNC_M2M_PREFCAM_CYCLES_NE.ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Cycles Not Empty : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x6C",
"EventName": "UNC_M2M_PREFCAM_CYCLES_NE.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Cycles Not Empty : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x6C",
"EventName": "UNC_M2M_PREFCAM_CYCLES_NE.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Deallocs",
+ "Counter": "0,1,2,3",
"EventCode": "0x6E",
"EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH0_HITA0_INVAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Deallocs",
+ "Counter": "0,1,2,3",
"EventCode": "0x6E",
"EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH0_HITA1_INVAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Deallocs",
+ "Counter": "0,1,2,3",
"EventCode": "0x6E",
"EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH0_MISS_INVAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Deallocs",
+ "Counter": "0,1,2,3",
"EventCode": "0x6E",
"EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH0_RSP_PDRESET",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Deallocs",
+ "Counter": "0,1,2,3",
"EventCode": "0x6E",
"EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH1_HITA0_INVAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Deallocs",
+ "Counter": "0,1,2,3",
"EventCode": "0x6E",
"EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH1_HITA1_INVAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Deallocs",
+ "Counter": "0,1,2,3",
"EventCode": "0x6E",
"EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH1_MISS_INVAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Deallocs",
+ "Counter": "0,1,2,3",
"EventCode": "0x6E",
"EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH1_RSP_PDRESET",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped : XPT - Ch 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x6F",
"EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH0_XPT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped : XPT - Ch 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x6F",
"EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH1_XPT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped : XPT - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x6f",
"EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.XPT_ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x15",
"Unit": "M2M"
},
{
"BriefDescription": "Demands Merged with CAMed Prefetches : XPT - Ch 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.CH0_XPT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Demands Merged with CAMed Prefetches : XPT & UPI- Ch 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.CH0_XPTUPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Demands Merged with CAMed Prefetches : XPT & UPI - Ch 0",
"UMask": "0x1",
@@ -2151,16 +2647,20 @@
},
{
"BriefDescription": "Demands Merged with CAMed Prefetches : XPT - Ch 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.CH1_XPT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Demands Merged with CAMed Prefetches : XPT & UPI - Ch 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.CH1_XPTUPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Demands Merged with CAMed Prefetches : XPT & UPI- Ch 1",
"UMask": "0x4",
@@ -2168,8 +2668,10 @@
},
{
"BriefDescription": "Demands Merged with CAMed Prefetches : XPT & UPI- Ch 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.CH2_XPTUPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Demands Merged with CAMed Prefetches : XPT & UPI - Ch 2",
"UMask": "0x10",
@@ -2177,8 +2679,10 @@
},
{
"BriefDescription": "Demands Merged with CAMed Prefetches : XPT & UPI- All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.XPTUPI_ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Demands Merged with CAMed Prefetches : XPT & UPI - All Channels",
"UMask": "0x15",
@@ -2186,24 +2690,30 @@
},
{
"BriefDescription": "Demands Merged with CAMed Prefetches : XPT - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.XPT_ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x15",
"Unit": "M2M"
},
{
"BriefDescription": "Demands Not Merged with CAMed Prefetches : XPT - Ch 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x75",
"EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.CH0_XPT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Demands Not Merged with CAMed Prefetches : XPT & UPI - Ch 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x75",
"EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.CH0_XPTUPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Demands Not Merged with CAMed Prefetches : XPT & UPI- Ch 0",
"UMask": "0x1",
@@ -2211,16 +2721,20 @@
},
{
"BriefDescription": "Demands Not Merged with CAMed Prefetches : XPT - Ch 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x75",
"EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.CH1_XPT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Demands Not Merged with CAMed Prefetches : XPT & UPI - Ch 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x75",
"EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.CH1_XPTUPI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Demands Not Merged with CAMed Prefetches : XPT & UPI- Ch 1",
"UMask": "0x4",
@@ -2228,305 +2742,383 @@
},
{
"BriefDescription": "Demands Not Merged with CAMed Prefetches : XPT & UPI - Ch 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x75",
"EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.CH2_XPTUPI",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "Demands Not Merged with CAMed Prefetches : XPT & UPI - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x75",
"EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.XPTUPI_ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x15",
"Unit": "M2M"
},
{
"BriefDescription": "Demands Not Merged with CAMed Prefetches : XPT - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x75",
"EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.XPT_ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x15",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.ERRORBLK_RxC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.NOT_PF_SAD_REGION",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.PF_AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.PF_CAM_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.PF_CAM_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.PF_SECURE_DROP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.RPQ_PROXY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.STOP_B2B",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.WPQ_PROXY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x70",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.XPT_THRESH",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.ERRORBLK_RxC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.NOT_PF_SAD_REGION",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.PF_AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.PF_CAM_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.PF_CAM_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.PF_SECURE_DROP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.RPQ_PROXY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.STOP_B2B",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.WPQ_PROXY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M2M"
},
{
"BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.XPT_THRESH",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Inserts : XPT - Ch 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x6D",
"EventName": "UNC_M2M_PREFCAM_INSERTS.CH0_XPT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Inserts : XPT - Ch 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x6D",
"EventName": "UNC_M2M_PREFCAM_INSERTS.CH1_XPT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Inserts : XPT - All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x6D",
"EventName": "UNC_M2M_PREFCAM_INSERTS.XPT_ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x15",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Occupancy : All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x6A",
"EventName": "UNC_M2M_PREFCAM_OCCUPANCY.ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x6A",
"EventName": "UNC_M2M_PREFCAM_OCCUPANCY.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Prefetch CAM Occupancy : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x6A",
"EventName": "UNC_M2M_PREFCAM_OCCUPANCY.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": ": All Channels",
+ "Counter": "0,1,2,3",
"EventCode": "0x76",
"EventName": "UNC_M2M_PREFCAM_RESP_MISS.ALLCH",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x7",
"Unit": "M2M"
},
{
"BriefDescription": ": Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x76",
"EventName": "UNC_M2M_PREFCAM_RESP_MISS.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": ": Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x76",
"EventName": "UNC_M2M_PREFCAM_RESP_MISS.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_PREFCAM_RxC_CYCLES_NE",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "UNC_M2M_PREFCAM_RxC_CYCLES_NE",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_PREFCAM_RxC_DEALLOCS.1LM_POSTED",
+ "Counter": "0,1,2,3",
"EventCode": "0x7A",
"EventName": "UNC_M2M_PREFCAM_RxC_DEALLOCS.1LM_POSTED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_PREFCAM_RxC_DEALLOCS.CIS",
+ "Counter": "0,1,2,3",
"EventCode": "0x7A",
"EventName": "UNC_M2M_PREFCAM_RxC_DEALLOCS.CIS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_PREFCAM_RxC_DEALLOCS.SQUASHED",
+ "Counter": "0,1,2,3",
"EventCode": "0x7A",
"EventName": "UNC_M2M_PREFCAM_RxC_DEALLOCS.SQUASHED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_PREFCAM_RxC_INSERTS",
+ "Counter": "0,1,2,3",
"EventCode": "0x78",
"EventName": "UNC_M2M_PREFCAM_RxC_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "UNC_M2M_PREFCAM_RxC_OCCUPANCY",
+ "Counter": "0,1,2,3",
"EventCode": "0x77",
"EventName": "UNC_M2M_PREFCAM_RxC_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring. : AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xAC",
"EventName": "UNC_M2M_RING_BOUNCES_HORZ.AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Horizontal Ring. : AD : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x1",
@@ -2534,8 +3126,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring. : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xAC",
"EventName": "UNC_M2M_RING_BOUNCES_HORZ.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Horizontal Ring. : AK : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x2",
@@ -2543,8 +3137,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring. : BL",
+ "Counter": "0,1,2,3",
"EventCode": "0xAC",
"EventName": "UNC_M2M_RING_BOUNCES_HORZ.BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Horizontal Ring. : BL : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x4",
@@ -2552,8 +3148,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring. : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xAC",
"EventName": "UNC_M2M_RING_BOUNCES_HORZ.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Horizontal Ring. : IV : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x8",
@@ -2561,8 +3159,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring. : AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_M2M_RING_BOUNCES_VERT.AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Vertical Ring. : AD : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x1",
@@ -2570,8 +3170,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring. : Acknowledgements to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_M2M_RING_BOUNCES_VERT.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Vertical Ring. : Acknowledgements to core : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x2",
@@ -2579,8 +3181,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_M2M_RING_BOUNCES_VERT.AKC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Vertical Ring. : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x10",
@@ -2588,8 +3192,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring. : Data Responses to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_M2M_RING_BOUNCES_VERT.BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Vertical Ring. : Data Responses to core : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x4",
@@ -2597,8 +3203,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring. : Snoops of processor's cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAA",
"EventName": "UNC_M2M_RING_BOUNCES_VERT.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Vertical Ring. : Snoops of processor's cache. : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x8",
@@ -2606,197 +3214,249 @@
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring : AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xAD",
"EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.AD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xAD",
"EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring : Acknowledgements to Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xAD",
"EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring : BL",
+ "Counter": "0,1,2,3",
"EventCode": "0xAD",
"EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xAD",
"EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.IV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring : AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_M2M_RING_SINK_STARVED_VERT.AD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring : Acknowledgements to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_M2M_RING_SINK_STARVED_VERT.AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_M2M_RING_SINK_STARVED_VERT.AKC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring : Data Responses to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_M2M_RING_SINK_STARVED_VERT.BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring : Snoops of processor's cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xAB",
"EventName": "UNC_M2M_RING_SINK_STARVED_VERT.IV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Source Throttle",
+ "Counter": "0,1,2,3",
"EventCode": "0xae",
"EventName": "UNC_M2M_RING_SRC_THRTL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Regular : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M2M_RPQ_NO_REG_CRD.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Regular : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M2M_RPQ_NO_REG_CRD.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Special : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M2M_RPQ_NO_SPEC_CRD.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Special : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M2M_RPQ_NO_SPEC_CRD.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "AD Ingress (from CMS) Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_M2M_RxC_AD_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "AD Ingress (from CMS) Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "UNC_M2M_RxC_AD_CYCLES_NE",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "AD Ingress (from CMS) Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_M2M_RxC_AD_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "AD Ingress (from CMS) Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_M2M_RxC_AD_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "AD Ingress (from CMS) Occupancy - Prefetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x77",
"EventName": "UNC_M2M_RxC_AD_PREF_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x5C",
"EventName": "UNC_M2M_RxC_AK_WR_CMP",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "BL Ingress (from CMS) Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "UNC_M2M_RxC_BL_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "BL Ingress (from CMS) Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x07",
"EventName": "UNC_M2M_RxC_BL_CYCLES_NE",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "BL Ingress (from CMS) Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_M2M_RxC_BL_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "BL Ingress (from CMS) Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_M2M_RxC_BL_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Transgress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE5",
"EventName": "UNC_M2M_RxR_BUSY_STARVED.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority : All == Credited + Uncredited",
"UMask": "0x11",
@@ -2804,8 +3464,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE5",
"EventName": "UNC_M2M_RxR_BUSY_STARVED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x10",
@@ -2813,8 +3475,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE5",
"EventName": "UNC_M2M_RxR_BUSY_STARVED.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x1",
@@ -2822,8 +3486,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE5",
"EventName": "UNC_M2M_RxR_BUSY_STARVED.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority : All == Credited + Uncredited",
"UMask": "0x44",
@@ -2831,8 +3497,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE5",
"EventName": "UNC_M2M_RxR_BUSY_STARVED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x40",
@@ -2840,8 +3508,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE5",
"EventName": "UNC_M2M_RxR_BUSY_STARVED.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x4",
@@ -2849,8 +3519,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_M2M_RxR_BYPASS.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : AD - All : Number of packets bypassing the CMS Ingress : All == Credited + Uncredited",
"UMask": "0x11",
@@ -2858,8 +3530,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_M2M_RxR_BYPASS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : AD - Credited : Number of packets bypassing the CMS Ingress",
"UMask": "0x10",
@@ -2867,8 +3541,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_M2M_RxR_BYPASS.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : AD - Uncredited : Number of packets bypassing the CMS Ingress",
"UMask": "0x1",
@@ -2876,8 +3552,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_M2M_RxR_BYPASS.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : AK : Number of packets bypassing the CMS Ingress",
"UMask": "0x2",
@@ -2885,8 +3563,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_M2M_RxR_BYPASS.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : AKC - Uncredited : Number of packets bypassing the CMS Ingress",
"UMask": "0x80",
@@ -2894,8 +3574,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_M2M_RxR_BYPASS.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : BL - All : Number of packets bypassing the CMS Ingress : All == Credited + Uncredited",
"UMask": "0x44",
@@ -2903,8 +3585,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_M2M_RxR_BYPASS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : BL - Credited : Number of packets bypassing the CMS Ingress",
"UMask": "0x40",
@@ -2912,8 +3596,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_M2M_RxR_BYPASS.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : BL - Uncredited : Number of packets bypassing the CMS Ingress",
"UMask": "0x4",
@@ -2921,8 +3607,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xE2",
"EventName": "UNC_M2M_RxR_BYPASS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : IV : Number of packets bypassing the CMS Ingress",
"UMask": "0x8",
@@ -2930,8 +3618,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_M2M_RxR_CRD_STARVED.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -2939,8 +3629,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_M2M_RxR_CRD_STARVED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x10",
@@ -2948,8 +3640,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_M2M_RxR_CRD_STARVED.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x1",
@@ -2957,8 +3651,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_M2M_RxR_CRD_STARVED.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AK : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x2",
@@ -2966,8 +3662,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_M2M_RxR_CRD_STARVED.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -2975,8 +3673,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_M2M_RxR_CRD_STARVED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x40",
@@ -2984,8 +3684,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_M2M_RxR_CRD_STARVED.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x4",
@@ -2993,8 +3695,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : IFV - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_M2M_RxR_CRD_STARVED.IFV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : IFV - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x80",
@@ -3002,8 +3706,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_M2M_RxR_CRD_STARVED.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : IV : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x8",
@@ -3011,16 +3717,20 @@
},
{
"BriefDescription": "Transgress Injection Starvation",
+ "Counter": "0,1,2,3",
"EventCode": "0xe4",
"EventName": "UNC_M2M_RxR_CRD_STARVED_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"Unit": "M2M"
},
{
"BriefDescription": "Transgress Ingress Allocations : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_M2M_RxR_INSERTS.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : AD - All : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
"UMask": "0x11",
@@ -3028,8 +3738,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_M2M_RxR_INSERTS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : AD - Credited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x10",
@@ -3037,8 +3749,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_M2M_RxR_INSERTS.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : AD - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x1",
@@ -3046,8 +3760,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_M2M_RxR_INSERTS.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : AK : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x2",
@@ -3055,8 +3771,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_M2M_RxR_INSERTS.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : AKC - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x80",
@@ -3064,8 +3782,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_M2M_RxR_INSERTS.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : BL - All : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
"UMask": "0x44",
@@ -3073,8 +3793,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_M2M_RxR_INSERTS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : BL - Credited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x40",
@@ -3082,8 +3804,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_M2M_RxR_INSERTS.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : BL - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x4",
@@ -3091,8 +3815,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xE1",
"EventName": "UNC_M2M_RxR_INSERTS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : IV : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x8",
@@ -3100,8 +3826,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M2M_RxR_OCCUPANCY.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : AD - All : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
"UMask": "0x11",
@@ -3109,8 +3837,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M2M_RxR_OCCUPANCY.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : AD - Credited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x10",
@@ -3118,8 +3848,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M2M_RxR_OCCUPANCY.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : AD - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x1",
@@ -3127,8 +3859,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M2M_RxR_OCCUPANCY.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : AK : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x2",
@@ -3136,8 +3870,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M2M_RxR_OCCUPANCY.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : AKC - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x80",
@@ -3145,8 +3881,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M2M_RxR_OCCUPANCY.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : BL - All : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
"UMask": "0x44",
@@ -3154,8 +3892,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M2M_RxR_OCCUPANCY.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : BL - Credited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x20",
@@ -3163,8 +3903,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M2M_RxR_OCCUPANCY.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : BL - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x4",
@@ -3172,8 +3914,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M2M_RxR_OCCUPANCY.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : IV : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x8",
@@ -3181,8 +3925,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 0 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -3190,8 +3936,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 1 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -3199,8 +3947,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 2 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -3208,8 +3958,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 3 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -3217,8 +3969,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 4 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -3226,8 +3980,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 5 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -3235,8 +3991,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 6 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x40",
@@ -3244,8 +4002,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 7 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x80",
@@ -3253,8 +4013,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 0 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -3262,8 +4024,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 1 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -3271,8 +4035,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 2 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -3280,8 +4046,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 3 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -3289,8 +4057,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 4 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -3298,8 +4068,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 5 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -3307,8 +4079,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 6 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x40",
@@ -3316,8 +4090,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 7 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x80",
@@ -3325,8 +4101,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 0 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -3334,8 +4112,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 1 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -3343,8 +4123,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 2 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -3352,8 +4134,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 3 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -3361,8 +4145,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 4 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -3370,8 +4156,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 5 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -3379,8 +4167,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 6 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x40",
@@ -3388,8 +4178,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 7 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x80",
@@ -3397,8 +4189,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 0 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -3406,8 +4200,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 1 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -3415,8 +4211,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 2 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -3424,8 +4222,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 3 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -3433,8 +4233,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 4 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -3442,8 +4244,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 5 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -3451,8 +4255,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 6 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x40",
@@ -3460,8 +4266,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xD6",
"EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 7 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x80",
@@ -3469,8 +4277,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 10 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -3478,8 +4288,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 8 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -3487,8 +4299,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 9 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -3496,8 +4310,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xD3",
"EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 10 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -3505,8 +4321,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xD3",
"EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 8 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -3514,8 +4332,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xD3",
"EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 9 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -3523,8 +4343,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xD5",
"EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 10 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -3532,8 +4354,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xD5",
"EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 8 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -3541,8 +4365,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xD5",
"EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 9 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -3550,8 +4376,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xD7",
"EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 10 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -3559,8 +4387,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xD7",
"EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 8 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -3568,8 +4398,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xD7",
"EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 9 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -3577,573 +4409,719 @@
},
{
"BriefDescription": "Number AD Ingress Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M2M_TGR_AD_CREDITS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Number BL Ingress Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_M2M_TGR_BL_CREDITS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Cycles Full : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_M2M_TRACKER_FULL.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Cycles Full : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_M2M_TRACKER_FULL.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Inserts : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "UNC_M2M_TRACKER_INSERTS.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Inserts : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "UNC_M2M_TRACKER_INSERTS.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Cycles Not Empty : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2M_TRACKER_NE.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Cycles Not Empty : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2M_TRACKER_NE.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Tracker Occupancy : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "AD Egress (to CMS) Credit Acquired",
+ "Counter": "0,1,2,3",
"EventCode": "0x0d",
"EventName": "UNC_M2M_TxC_AD_CREDITS_ACQUIRED",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "AD Egress (to CMS) Credits Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x0e",
"EventName": "UNC_M2M_TxC_AD_CREDIT_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "AD Egress (to CMS) Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x0c",
"EventName": "UNC_M2M_TxC_AD_CYCLES_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "AD Egress (to CMS) Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x0b",
"EventName": "UNC_M2M_TxC_AD_CYCLES_NE",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "AD Egress (to CMS) Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x09",
"EventName": "UNC_M2M_TxC_AD_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles with No AD Egress (to CMS) Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x0f",
"EventName": "UNC_M2M_TxC_AD_NO_CREDIT_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles Stalled with No AD Egress (to CMS) Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M2M_TxC_AD_NO_CREDIT_STALLED",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "AD Egress (to CMS) Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x0A",
"EventName": "UNC_M2M_TxC_AD_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "Outbound Ring Transactions on AK : CRD Transactions to Cbo",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_M2M_TxC_AK.CRD_CBO",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Outbound Ring Transactions on AK : NDR Transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_M2M_TxC_AK.NDR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "AKC Credits",
+ "Counter": "0,1,2,3",
"EventCode": "0x5F",
"EventName": "UNC_M2M_TxC_AKC_CREDITS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Credit Acquired : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_M2M_TxC_AK_CREDITS_ACQUIRED.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Credit Acquired : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_M2M_TxC_AK_CREDITS_ACQUIRED.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Full : All",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Full : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Full : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.RDCRD0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.RDCRD1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x88",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCMP0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCMP1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0xa0",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCRD0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCRD1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x90",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Not Empty : All",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_M2M_TxC_AK_CYCLES_NE.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Not Empty : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_M2M_TxC_AK_CYCLES_NE.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Not Empty : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_M2M_TxC_AK_CYCLES_NE.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_M2M_TxC_AK_CYCLES_NE.RDCRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_M2M_TxC_AK_CYCLES_NE.WRCMP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_M2M_TxC_AK_CYCLES_NE.WRCRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Allocations : All",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2M_TxC_AK_INSERTS.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Allocations : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2M_TxC_AK_INSERTS.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Allocations : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2M_TxC_AK_INSERTS.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2M_TxC_AK_INSERTS.PREF_RD_CAM_HIT",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2M_TxC_AK_INSERTS.RDCRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2M_TxC_AK_INSERTS.WRCMP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2M_TxC_AK_INSERTS.WRCRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles with No AK Egress (to CMS) Credits : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_M2M_TxC_AK_NO_CREDIT_CYCLES.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles with No AK Egress (to CMS) Credits : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_M2M_TxC_AK_NO_CREDIT_CYCLES.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles Stalled with No AK Egress (to CMS) Credits : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M2M_TxC_AK_NO_CREDIT_STALLED.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles Stalled with No AK Egress (to CMS) Credits : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M2M_TxC_AK_NO_CREDIT_STALLED.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Occupancy : All",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_M2M_TxC_AK_OCCUPANCY.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Occupancy : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_M2M_TxC_AK_OCCUPANCY.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Occupancy : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_M2M_TxC_AK_OCCUPANCY.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_M2M_TxC_AK_OCCUPANCY.RDCRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_M2M_TxC_AK_OCCUPANCY.WRCMP",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "AK Egress (to CMS) Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_M2M_TxC_AK_OCCUPANCY.WRCRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "Outbound DRS Ring Transactions to Cache : Data to Cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2M_TxC_BL.DRS_CACHE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Outbound DRS Ring Transactions to Cache : Data to Core",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2M_TxC_BL.DRS_CORE",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Credit Acquired : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2M_TxC_BL_CREDITS_ACQUIRED.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Credit Acquired : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2M_TxC_BL_CREDITS_ACQUIRED.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Full : All",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M2M_TxC_BL_CYCLES_FULL.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Full : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M2M_TxC_BL_CYCLES_FULL.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Full : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M2M_TxC_BL_CYCLES_FULL.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Not Empty : All",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M2M_TxC_BL_CYCLES_NE.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Not Empty : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M2M_TxC_BL_CYCLES_NE.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Not Empty : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M2M_TxC_BL_CYCLES_NE.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Allocations : All",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_M2M_TxC_BL_INSERTS.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Allocations : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_M2M_TxC_BL_INSERTS.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "BL Egress (to CMS) Allocations : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_M2M_TxC_BL_INSERTS.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles with No BL Egress (to CMS) Credits : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_M2M_TxC_BL_NO_CREDIT_CYCLES.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles with No BL Egress (to CMS) Credits : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_M2M_TxC_BL_NO_CREDIT_CYCLES.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles Stalled with No BL Egress (to CMS) Credits : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_M2M_TxC_BL_NO_CREDIT_STALLED.CMS0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Cycles Stalled with No BL Egress (to CMS) Credits : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_M2M_TxC_BL_NO_CREDIT_STALLED.CMS1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "CMS Horizontal ADS Used : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_M2M_TxR_HORZ_ADS_USED.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : AD - All : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -4151,8 +5129,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_M2M_TxR_HORZ_ADS_USED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : AD - Credited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -4160,8 +5140,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_M2M_TxR_HORZ_ADS_USED.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : AD - Uncredited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -4169,8 +5151,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_M2M_TxR_HORZ_ADS_USED.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : BL - All : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -4178,8 +5162,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_M2M_TxR_HORZ_ADS_USED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : BL - Credited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -4187,8 +5173,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "UNC_M2M_TxR_HORZ_ADS_USED.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : BL - Uncredited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -4196,8 +5184,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_M2M_TxR_HORZ_BYPASS.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : AD - All : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -4205,8 +5195,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_M2M_TxR_HORZ_BYPASS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : AD - Credited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -4214,8 +5206,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_M2M_TxR_HORZ_BYPASS.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : AD - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -4223,8 +5217,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_M2M_TxR_HORZ_BYPASS.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : AK : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -4232,8 +5228,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_M2M_TxR_HORZ_BYPASS.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : AKC - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x80",
@@ -4241,8 +5239,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_M2M_TxR_HORZ_BYPASS.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : BL - All : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -4250,8 +5250,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_M2M_TxR_HORZ_BYPASS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : BL - Credited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -4259,8 +5261,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_M2M_TxR_HORZ_BYPASS.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : BL - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -4268,8 +5272,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "UNC_M2M_TxR_HORZ_BYPASS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : IV : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x8",
@@ -4277,8 +5283,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - All : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -4286,8 +5294,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -4295,8 +5305,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -4304,8 +5316,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AK : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -4313,8 +5327,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AKC - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x80",
@@ -4322,8 +5338,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - All : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -4331,8 +5349,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -4340,8 +5360,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -4349,8 +5371,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : IV : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -4358,8 +5382,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - All : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -4367,8 +5393,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -4376,8 +5404,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -4385,8 +5415,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AK : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -4394,8 +5426,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AKC - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x80",
@@ -4403,8 +5437,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - All : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -4412,8 +5448,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -4421,8 +5459,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -4430,8 +5470,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA3",
"EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : IV : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -4439,8 +5481,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M2M_TxR_HORZ_INSERTS.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : AD - All : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -4448,8 +5492,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M2M_TxR_HORZ_INSERTS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : AD - Credited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -4457,8 +5503,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M2M_TxR_HORZ_INSERTS.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : AD - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -4466,8 +5514,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M2M_TxR_HORZ_INSERTS.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : AK : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -4475,8 +5525,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M2M_TxR_HORZ_INSERTS.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : AKC - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x80",
@@ -4484,8 +5536,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M2M_TxR_HORZ_INSERTS.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : BL - All : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -4493,8 +5547,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M2M_TxR_HORZ_INSERTS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : BL - Credited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -4502,8 +5558,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M2M_TxR_HORZ_INSERTS.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : BL - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -4511,8 +5569,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA1",
"EventName": "UNC_M2M_TxR_HORZ_INSERTS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : IV : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -4520,8 +5580,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_M2M_TxR_HORZ_NACK.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : AD - All : Counts number of Egress packets NACK'ed on to the Horizontal Ring : All == Credited + Uncredited",
"UMask": "0x11",
@@ -4529,8 +5591,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_M2M_TxR_HORZ_NACK.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : AD - Credited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x10",
@@ -4538,8 +5602,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_M2M_TxR_HORZ_NACK.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : AD - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x1",
@@ -4547,8 +5613,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_M2M_TxR_HORZ_NACK.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : AK : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x2",
@@ -4556,8 +5624,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_M2M_TxR_HORZ_NACK.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : AKC - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x80",
@@ -4565,8 +5635,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_M2M_TxR_HORZ_NACK.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : BL - All : Counts number of Egress packets NACK'ed on to the Horizontal Ring : All == Credited + Uncredited",
"UMask": "0x44",
@@ -4574,8 +5646,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_M2M_TxR_HORZ_NACK.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : BL - Credited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x40",
@@ -4583,8 +5657,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_M2M_TxR_HORZ_NACK.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : BL - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x4",
@@ -4592,8 +5668,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA4",
"EventName": "UNC_M2M_TxR_HORZ_NACK.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : IV : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x8",
@@ -4601,8 +5679,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : AD - All : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -4610,8 +5690,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : AD - Credited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -4619,8 +5701,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : AD - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -4628,8 +5712,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : AK : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -4637,8 +5723,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : AKC - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x80",
@@ -4646,8 +5734,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : BL - All : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -4655,8 +5745,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : BL - Credited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -4664,8 +5756,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : BL - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -4673,8 +5767,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : IV : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -4682,8 +5778,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_M2M_TxR_HORZ_STARVED.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : AD - All : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time. : All == Credited + Uncredited",
"UMask": "0x1",
@@ -4691,8 +5789,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_M2M_TxR_HORZ_STARVED.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : AD - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x1",
@@ -4700,8 +5800,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_M2M_TxR_HORZ_STARVED.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : AK : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x2",
@@ -4709,8 +5811,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_M2M_TxR_HORZ_STARVED.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : AKC - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x80",
@@ -4718,8 +5822,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_M2M_TxR_HORZ_STARVED.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : BL - All : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time. : All == Credited + Uncredited",
"UMask": "0x4",
@@ -4727,8 +5833,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_M2M_TxR_HORZ_STARVED.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : BL - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x4",
@@ -4736,8 +5844,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xA5",
"EventName": "UNC_M2M_TxR_HORZ_STARVED.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : IV : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x8",
@@ -4745,8 +5855,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_M2M_TxR_VERT_ADS_USED.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AD - Agent 0 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -4754,8 +5866,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_M2M_TxR_VERT_ADS_USED.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AD - Agent 1 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -4763,8 +5877,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_M2M_TxR_VERT_ADS_USED.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : BL - Agent 0 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -4772,8 +5888,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9C",
"EventName": "UNC_M2M_TxR_VERT_ADS_USED.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : BL - Agent 1 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -4781,8 +5899,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_M2M_TxR_VERT_BYPASS.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AD - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -4790,8 +5910,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_M2M_TxR_VERT_BYPASS.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AD - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -4799,8 +5921,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_M2M_TxR_VERT_BYPASS.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AK - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -4808,8 +5932,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_M2M_TxR_VERT_BYPASS.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AK - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x20",
@@ -4817,8 +5943,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_M2M_TxR_VERT_BYPASS.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : BL - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -4826,8 +5954,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_M2M_TxR_VERT_BYPASS.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : BL - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -4835,8 +5965,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : IV - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9D",
"EventName": "UNC_M2M_TxR_VERT_BYPASS.IV_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : IV - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x8",
@@ -4844,8 +5976,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_M2M_TxR_VERT_BYPASS_1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AKC - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -4853,8 +5987,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9E",
"EventName": "UNC_M2M_TxR_VERT_BYPASS_1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AKC - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -4862,8 +5998,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -4871,8 +6009,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -4880,8 +6020,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -4889,8 +6031,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -4898,8 +6042,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -4907,8 +6053,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -4916,8 +6064,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : IV - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : IV - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -4925,8 +6075,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -4934,8 +6086,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -4943,8 +6097,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -4952,8 +6108,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -4961,8 +6119,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -4970,8 +6130,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -4979,8 +6141,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -4988,8 +6152,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -4997,8 +6163,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : IV - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : IV - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -5006,8 +6174,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_NE1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -5015,8 +6185,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_M2M_TxR_VERT_CYCLES_NE1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -5024,8 +6196,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2M_TxR_VERT_INSERTS0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AD - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -5033,8 +6207,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2M_TxR_VERT_INSERTS0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AD - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -5042,8 +6218,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2M_TxR_VERT_INSERTS0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AK - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -5051,8 +6229,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2M_TxR_VERT_INSERTS0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AK - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -5060,8 +6240,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2M_TxR_VERT_INSERTS0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : BL - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -5069,8 +6251,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2M_TxR_VERT_INSERTS0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : BL - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -5078,8 +6262,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : IV - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2M_TxR_VERT_INSERTS0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : IV - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -5087,8 +6273,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_M2M_TxR_VERT_INSERTS1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AKC - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -5096,8 +6284,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_M2M_TxR_VERT_INSERTS1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AKC - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -5105,8 +6295,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2M_TxR_VERT_NACK0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AD - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x1",
@@ -5114,8 +6306,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2M_TxR_VERT_NACK0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AD - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x10",
@@ -5123,8 +6317,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2M_TxR_VERT_NACK0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AK - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x2",
@@ -5132,8 +6328,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2M_TxR_VERT_NACK0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AK - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x20",
@@ -5141,8 +6339,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2M_TxR_VERT_NACK0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : BL - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x4",
@@ -5150,8 +6350,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2M_TxR_VERT_NACK0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : BL - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x40",
@@ -5159,8 +6361,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2M_TxR_VERT_NACK0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : IV : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x8",
@@ -5168,8 +6372,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_M2M_TxR_VERT_NACK1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AKC - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x1",
@@ -5177,8 +6383,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_M2M_TxR_VERT_NACK1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AKC - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x2",
@@ -5186,8 +6394,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AD - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -5195,8 +6405,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AD - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -5204,8 +6416,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AK - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -5213,8 +6427,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AK - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -5222,8 +6438,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : BL - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -5231,8 +6449,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : BL - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -5240,8 +6460,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : IV - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : IV - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -5249,8 +6471,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_M2M_TxR_VERT_OCCUPANCY1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AKC - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -5258,8 +6482,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_M2M_TxR_VERT_OCCUPANCY1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AKC - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -5267,8 +6493,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_M2M_TxR_VERT_STARVED0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x1",
@@ -5276,8 +6504,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_M2M_TxR_VERT_STARVED0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x10",
@@ -5285,8 +6515,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_M2M_TxR_VERT_STARVED0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x2",
@@ -5294,8 +6526,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_M2M_TxR_VERT_STARVED0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x20",
@@ -5303,8 +6537,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_M2M_TxR_VERT_STARVED0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x4",
@@ -5312,8 +6548,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_M2M_TxR_VERT_STARVED0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x40",
@@ -5321,8 +6559,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x9A",
"EventName": "UNC_M2M_TxR_VERT_STARVED0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : IV : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x8",
@@ -5330,8 +6570,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9B",
"EventName": "UNC_M2M_TxR_VERT_STARVED1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x1",
@@ -5339,8 +6581,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9B",
"EventName": "UNC_M2M_TxR_VERT_STARVED1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x2",
@@ -5348,8 +6592,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9B",
"EventName": "UNC_M2M_TxR_VERT_STARVED1.TGC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x4",
@@ -5357,8 +6603,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M2M_VERT_RING_AD_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AD Ring In Use : Down and Even : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -5366,8 +6614,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M2M_VERT_RING_AD_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AD Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -5375,8 +6625,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M2M_VERT_RING_AD_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AD Ring In Use : Up and Even : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -5384,8 +6636,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "UNC_M2M_VERT_RING_AD_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AD Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -5393,8 +6647,10 @@
},
{
"BriefDescription": "Vertical AKC Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M2M_VERT_RING_AKC_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AKC Ring In Use : Down and Even : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -5402,8 +6658,10 @@
},
{
"BriefDescription": "Vertical AKC Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M2M_VERT_RING_AKC_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AKC Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -5411,8 +6669,10 @@
},
{
"BriefDescription": "Vertical AKC Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M2M_VERT_RING_AKC_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AKC Ring In Use : Up and Even : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -5420,8 +6680,10 @@
},
{
"BriefDescription": "Vertical AKC Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "UNC_M2M_VERT_RING_AKC_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AKC Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -5429,8 +6691,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M2M_VERT_RING_AK_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AK Ring In Use : Down and Even : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -5438,8 +6702,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M2M_VERT_RING_AK_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AK Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -5447,8 +6713,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M2M_VERT_RING_AK_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AK Ring In Use : Up and Even : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -5456,8 +6724,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UNC_M2M_VERT_RING_AK_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AK Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -5465,8 +6735,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use : Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M2M_VERT_RING_BL_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical BL Ring in Use : Down and Even : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -5474,8 +6746,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use : Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M2M_VERT_RING_BL_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical BL Ring in Use : Down and Odd : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -5483,8 +6757,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use : Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M2M_VERT_RING_BL_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical BL Ring in Use : Up and Even : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -5492,8 +6768,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use : Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "UNC_M2M_VERT_RING_BL_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical BL Ring in Use : Up and Odd : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -5501,8 +6779,10 @@
},
{
"BriefDescription": "Vertical IV Ring in Use : Down",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M2M_VERT_RING_IV_IN_USE.DN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical IV Ring in Use : Down : Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x4",
@@ -5510,8 +6790,10 @@
},
{
"BriefDescription": "Vertical IV Ring in Use : Up",
+ "Counter": "0,1,2,3",
"EventCode": "0xB3",
"EventName": "UNC_M2M_VERT_RING_IV_IN_USE.UP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical IV Ring in Use : Up : Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x1",
@@ -5519,8 +6801,10 @@
},
{
"BriefDescription": "Vertical TGC Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M2M_VERT_RING_TGC_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical TGC Ring In Use : Down and Even : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -5528,8 +6812,10 @@
},
{
"BriefDescription": "Vertical TGC Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M2M_VERT_RING_TGC_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical TGC Ring In Use : Down and Odd : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -5537,8 +6823,10 @@
},
{
"BriefDescription": "Vertical TGC Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M2M_VERT_RING_TGC_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical TGC Ring In Use : Up and Even : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -5546,8 +6834,10 @@
},
{
"BriefDescription": "Vertical TGC Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xB5",
"EventName": "UNC_M2M_VERT_RING_TGC_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical TGC Ring In Use : Up and Odd : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -5555,254 +6845,317 @@
},
{
"BriefDescription": "WPQ Flush : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_M2M_WPQ_FLUSH.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "WPQ Flush : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x58",
"EventName": "UNC_M2M_WPQ_FLUSH.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x4D",
"EventName": "UNC_M2M_WPQ_NO_REG_CRD.CHN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x4D",
"EventName": "UNC_M2M_WPQ_NO_REG_CRD.CHN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular : Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x4D",
"EventName": "UNC_M2M_WPQ_NO_REG_CRD.CHN2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Special : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x4E",
"EventName": "UNC_M2M_WPQ_NO_SPEC_CRD.CHN0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Special : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x4E",
"EventName": "UNC_M2M_WPQ_NO_SPEC_CRD.CHN1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Special : Channel 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x4E",
"EventName": "UNC_M2M_WPQ_NO_SPEC_CRD.CHN2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Cycles Full : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x4A",
"EventName": "UNC_M2M_WR_TRACKER_FULL.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Cycles Full : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x4A",
"EventName": "UNC_M2M_WR_TRACKER_FULL.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Cycles Full : Mirror",
+ "Counter": "0,1,2,3",
"EventCode": "0x4A",
"EventName": "UNC_M2M_WR_TRACKER_FULL.MIRR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Inserts : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_M2M_WR_TRACKER_INSERTS.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Inserts : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UNC_M2M_WR_TRACKER_INSERTS.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Cycles Not Empty : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_M2M_WR_TRACKER_NE.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Cycles Not Empty : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_M2M_WR_TRACKER_NE.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Cycles Not Empty : Mirror",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_M2M_WR_TRACKER_NE.MIRR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_M2M_WR_TRACKER_NE.MIRR_NONTGR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x4B",
"EventName": "UNC_M2M_WR_TRACKER_NE.MIRR_PWR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Non-Posted Inserts : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_INSERTS.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Non-Posted Inserts : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_INSERTS.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Non-Posted Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x62",
"EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_OCCUPANCY.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Non-Posted Occupancy : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x62",
"EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_OCCUPANCY.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_M2M_WR_TRACKER_OCCUPANCY.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Occupancy : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_M2M_WR_TRACKER_OCCUPANCY.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Occupancy : Mirror",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_M2M_WR_TRACKER_OCCUPANCY.MIRR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_M2M_WR_TRACKER_OCCUPANCY.MIRR_NONTGR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "UNC_M2M_WR_TRACKER_OCCUPANCY.MIRR_PWR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Posted Inserts : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x5E",
"EventName": "UNC_M2M_WR_TRACKER_POSTED_INSERTS.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Posted Inserts : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x5E",
"EventName": "UNC_M2M_WR_TRACKER_POSTED_INSERTS.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Posted Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_M2M_WR_TRACKER_POSTED_OCCUPANCY.CH0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2M"
},
{
"BriefDescription": "Write Tracker Posted Occupancy : Channel 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x5D",
"EventName": "UNC_M2M_WR_TRACKER_POSTED_OCCUPANCY.CH1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2M"
},
{
"BriefDescription": "Clockticks in the UBOX using a dedicated 48-bit Fixed Counter",
+ "Counter": "FIXED",
"EventCode": "0xff",
"EventName": "UNC_U_CLOCKTICKS",
"PerPkg": "1",
@@ -5810,16 +7163,20 @@
},
{
"BriefDescription": "Message Received : Doorbell",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.DOORBELL_RCVD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UBOX"
},
{
"BriefDescription": "Message Received : Interrupt",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.INT_PRIO",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Message Received : Interrupt : Interrupts",
"UMask": "0x10",
@@ -5827,8 +7184,10 @@
},
{
"BriefDescription": "Message Received : IPI",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.IPI_RCVD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Message Received : IPI : Inter Processor Interrupts",
"UMask": "0x4",
@@ -5836,8 +7195,10 @@
},
{
"BriefDescription": "Message Received : MSI",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.MSI_RCVD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Message Received : MSI : Message Signaled Interrupts - interrupts sent by devices (including PCIe via IOxAPIC) (Socket Mode only)",
"UMask": "0x2",
@@ -5845,8 +7206,10 @@
},
{
"BriefDescription": "Message Received : VLW",
+ "Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.VLW_RCVD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Message Received : VLW : Virtual Logical Wire (legacy) message were received from Uncore.",
"UMask": "0x1",
@@ -5854,128 +7217,160 @@
},
{
"BriefDescription": "IDI Lock/SplitLock Cycles",
+ "Counter": "0,1",
"EventCode": "0x44",
"EventName": "UNC_U_LOCK_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IDI Lock/SplitLock Cycles : Number of times an IDI Lock/SplitLock sequence was started",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_CBO_NCB",
+ "Counter": "0,1",
"EventCode": "0x4D",
"EventName": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_CBO_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_CBO_NCS",
+ "Counter": "0,1",
"EventCode": "0x4D",
"EventName": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_CBO_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCB",
+ "Counter": "0,1",
"EventCode": "0x4D",
"EventName": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCS",
+ "Counter": "0,1",
"EventCode": "0x4D",
"EventName": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC2.RxC_CYCLES_EMPTY_BL",
+ "Counter": "0,1",
"EventCode": "0x4E",
"EventName": "UNC_U_M2U_MISC2.RxC_CYCLES_EMPTY_BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC2.RxC_CYCLES_FULL_BL",
+ "Counter": "0,1",
"EventCode": "0x4E",
"EventName": "UNC_U_M2U_MISC2.RxC_CYCLES_FULL_BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCB",
+ "Counter": "0,1",
"EventCode": "0x4E",
"EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCS",
+ "Counter": "0,1",
"EventCode": "0x4E",
"EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_AK",
+ "Counter": "0,1",
"EventCode": "0x4E",
"EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_AKC",
+ "Counter": "0,1",
"EventCode": "0x4E",
"EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_AKC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_BL",
+ "Counter": "0,1",
"EventCode": "0x4E",
"EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_FULL_BL",
+ "Counter": "0,1",
"EventCode": "0x4E",
"EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_FULL_BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC3.TxC_CYCLES_FULL_AK",
+ "Counter": "0,1",
"EventCode": "0x4F",
"EventName": "UNC_U_M2U_MISC3.TxC_CYCLES_FULL_AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_M2U_MISC3.TxC_CYCLES_FULL_AKC",
+ "Counter": "0,1",
"EventCode": "0x4F",
"EventName": "UNC_U_M2U_MISC3.TxC_CYCLES_FULL_AKC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UBOX"
},
{
"BriefDescription": "Cycles PHOLD Assert to Ack : Assert to ACK",
+ "Counter": "0,1",
"EventCode": "0x45",
"EventName": "UNC_U_PHOLD_CYCLES.ASSERT_TO_ACK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles PHOLD Assert to Ack : Assert to ACK : PHOLD cycles.",
"UMask": "0x1",
@@ -5983,32 +7378,40 @@
},
{
"BriefDescription": "UNC_U_RACU_DRNG.PFTCH_BUF_EMPTY",
+ "Counter": "0,1",
"EventCode": "0x4C",
"EventName": "UNC_U_RACU_DRNG.PFTCH_BUF_EMPTY",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_RACU_DRNG.RDRAND",
+ "Counter": "0,1",
"EventCode": "0x4C",
"EventName": "UNC_U_RACU_DRNG.RDRAND",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "UBOX"
},
{
"BriefDescription": "UNC_U_RACU_DRNG.RDSEED",
+ "Counter": "0,1",
"EventCode": "0x4C",
"EventName": "UNC_U_RACU_DRNG.RDSEED",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "UBOX"
},
{
"BriefDescription": "RACU Request",
+ "Counter": "0,1",
"EventCode": "0x46",
"EventName": "UNC_U_RACU_REQUESTS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "RACU Request : Number outstanding register requests within message channel tracker",
"Unit": "UBOX"
diff --git a/tools/perf/pmu-events/arch/x86/snowridgex/uncore-io.json b/tools/perf/pmu-events/arch/x86/snowridgex/uncore-io.json
index de156e499f56..dff3c5a9f0d7 100644
--- a/tools/perf/pmu-events/arch/x86/snowridgex/uncore-io.json
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/uncore-io.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "PCI Express bandwidth reading at IIO. Derived from unc_iio_data_req_of_cpu.mem_read.part0",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "LLC_MISSES.PCIE_READ",
"FCMask": "0x07",
@@ -16,6 +17,7 @@
},
{
"BriefDescription": "PCI Express bandwidth writing at IIO. Derived from unc_iio_data_req_of_cpu.mem_write.part0",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "LLC_MISSES.PCIE_WRITE",
"FCMask": "0x07",
@@ -31,70 +33,87 @@
},
{
"BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "1",
"EventCode": "0xff",
"EventName": "UNC_IIO_BANDWIDTH_IN.PART0_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "iio_free_running"
},
{
"BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "2",
"EventCode": "0xff",
"EventName": "UNC_IIO_BANDWIDTH_IN.PART1_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x21",
"Unit": "iio_free_running"
},
{
"BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "3",
"EventCode": "0xff",
"EventName": "UNC_IIO_BANDWIDTH_IN.PART2_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x22",
"Unit": "iio_free_running"
},
{
"BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "4",
"EventCode": "0xff",
"EventName": "UNC_IIO_BANDWIDTH_IN.PART3_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x23",
"Unit": "iio_free_running"
},
{
"BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "5",
"EventCode": "0xff",
"EventName": "UNC_IIO_BANDWIDTH_IN.PART4_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x24",
"Unit": "iio_free_running"
},
{
"BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "6",
"EventCode": "0xff",
"EventName": "UNC_IIO_BANDWIDTH_IN.PART5_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x25",
"Unit": "iio_free_running"
},
{
"BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "7",
"EventCode": "0xff",
"EventName": "UNC_IIO_BANDWIDTH_IN.PART6_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x26",
"Unit": "iio_free_running"
},
{
"BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "8",
"EventCode": "0xff",
"EventName": "UNC_IIO_BANDWIDTH_IN.PART7_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x27",
"Unit": "iio_free_running"
},
{
"BriefDescription": "Clockticks of the integrated IO (IIO) traffic controller",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_IIO_CLOCKTICKS",
"PerPkg": "1",
@@ -102,6 +121,7 @@
},
{
"BriefDescription": "Free running counter that increments for IIO clocktick",
+ "Counter": "0",
"EventCode": "0xff",
"EventName": "UNC_IIO_CLOCKTICKS_FREERUN",
"PerPkg": "1",
@@ -111,8 +131,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts : All Ports",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.ALL",
+ "Experimental": "1",
"FCMask": "0x04",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -121,6 +143,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0-7",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.ALL_PARTS",
"FCMask": "0x04",
@@ -132,6 +155,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART0",
"FCMask": "0x04",
@@ -143,6 +167,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART1",
"FCMask": "0x04",
@@ -154,6 +179,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART2",
"FCMask": "0x04",
@@ -165,6 +191,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART3",
"FCMask": "0x04",
@@ -176,6 +203,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART4",
"FCMask": "0x04",
@@ -187,6 +215,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART5",
"FCMask": "0x04",
@@ -198,6 +227,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART6",
"FCMask": "0x04",
@@ -209,6 +239,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART7",
"FCMask": "0x04",
@@ -220,8 +251,10 @@
},
{
"BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 0-7",
+ "Counter": "2,3",
"EventCode": "0xD5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL",
+ "Experimental": "1",
"FCMask": "0x04",
"PerPkg": "1",
"PublicDescription": "PCIe Completion Buffer Occupancy : Part 0-7",
@@ -230,6 +263,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 0-7",
+ "Counter": "2,3",
"EventCode": "0xd5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL_PARTS",
"FCMask": "0x04",
@@ -240,6 +274,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 0",
+ "Counter": "2,3",
"EventCode": "0xd5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART0",
"FCMask": "0x04",
@@ -250,6 +285,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 1",
+ "Counter": "2,3",
"EventCode": "0xd5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART1",
"FCMask": "0x04",
@@ -260,6 +296,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 2",
+ "Counter": "2,3",
"EventCode": "0xd5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART2",
"FCMask": "0x04",
@@ -270,6 +307,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 3",
+ "Counter": "2,3",
"EventCode": "0xd5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART3",
"FCMask": "0x04",
@@ -280,6 +318,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 4",
+ "Counter": "2,3",
"EventCode": "0xd5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART4",
"FCMask": "0x04",
@@ -290,6 +329,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 5",
+ "Counter": "2,3",
"EventCode": "0xd5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART5",
"FCMask": "0x04",
@@ -300,6 +340,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 6",
+ "Counter": "2,3",
"EventCode": "0xd5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART6",
"FCMask": "0x04",
@@ -310,6 +351,7 @@
},
{
"BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 7",
+ "Counter": "2,3",
"EventCode": "0xd5",
"EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART7",
"FCMask": "0x04",
@@ -320,8 +362,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -331,8 +375,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -342,8 +388,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -353,8 +401,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -364,8 +414,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -375,8 +427,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -386,8 +440,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -397,8 +453,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -408,8 +466,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x40",
@@ -419,8 +479,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x80",
@@ -430,8 +492,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -441,8 +505,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -452,8 +518,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -463,8 +531,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -474,8 +544,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -485,8 +557,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -496,8 +570,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -507,8 +583,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -518,8 +596,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x40",
@@ -529,8 +609,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x80",
@@ -540,8 +622,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -551,8 +635,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -562,8 +648,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -573,8 +661,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -584,8 +674,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -595,8 +687,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -606,8 +700,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -617,8 +713,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -628,8 +726,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x40",
@@ -639,8 +739,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x80",
@@ -650,8 +752,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -661,8 +765,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -672,8 +778,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -683,8 +791,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -694,8 +804,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -705,8 +817,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -716,8 +830,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -727,8 +843,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -738,8 +856,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x40",
@@ -749,8 +869,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x80",
@@ -760,8 +882,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -771,8 +895,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -782,6 +908,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART0",
"FCMask": "0x07",
@@ -793,6 +920,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART1",
"FCMask": "0x07",
@@ -804,6 +932,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART2",
"FCMask": "0x07",
@@ -815,6 +944,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART3",
"FCMask": "0x07",
@@ -826,6 +956,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART4",
"FCMask": "0x07",
@@ -837,6 +968,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART5",
"FCMask": "0x07",
@@ -848,6 +980,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART6",
"FCMask": "0x07",
@@ -859,6 +992,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
"EventCode": "0xc0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART7",
"FCMask": "0x07",
@@ -870,8 +1004,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -881,8 +1017,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -892,6 +1030,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART0",
"FCMask": "0x07",
@@ -903,6 +1042,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART1",
"FCMask": "0x07",
@@ -914,6 +1054,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART2",
"FCMask": "0x07",
@@ -925,6 +1066,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART3",
"FCMask": "0x07",
@@ -936,6 +1078,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART4",
"FCMask": "0x07",
@@ -947,6 +1090,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART5",
"FCMask": "0x07",
@@ -958,6 +1102,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART6",
"FCMask": "0x07",
@@ -969,6 +1114,7 @@
},
{
"BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART7",
"FCMask": "0x07",
@@ -980,8 +1126,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -991,8 +1139,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -1002,8 +1152,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -1013,8 +1165,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -1024,8 +1178,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -1035,8 +1191,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -1046,8 +1204,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -1057,8 +1217,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -1068,8 +1230,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x40",
@@ -1079,8 +1243,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x80",
@@ -1090,8 +1256,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -1101,8 +1269,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -1112,8 +1282,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -1123,8 +1295,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -1134,8 +1308,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -1145,8 +1321,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -1156,8 +1334,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -1167,8 +1347,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -1178,8 +1360,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x40",
@@ -1189,8 +1373,10 @@
},
{
"BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "2,3",
"EventCode": "0xC0",
"EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x80",
@@ -1200,8 +1386,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -1211,8 +1399,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -1222,8 +1412,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -1233,8 +1425,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -1244,8 +1438,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -1255,8 +1451,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -1266,8 +1464,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -1277,8 +1477,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -1288,8 +1490,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x40",
@@ -1299,8 +1503,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x80",
@@ -1310,8 +1516,10 @@
},
{
"BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -1321,8 +1529,10 @@
},
{
"BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -1332,6 +1542,7 @@
},
{
"BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART0",
"FCMask": "0x07",
@@ -1343,6 +1554,7 @@
},
{
"BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART1",
"FCMask": "0x07",
@@ -1354,6 +1566,7 @@
},
{
"BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART2",
"FCMask": "0x07",
@@ -1365,6 +1578,7 @@
},
{
"BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART3",
"FCMask": "0x07",
@@ -1376,6 +1590,7 @@
},
{
"BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART4",
"FCMask": "0x07",
@@ -1387,6 +1602,7 @@
},
{
"BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART5",
"FCMask": "0x07",
@@ -1398,6 +1614,7 @@
},
{
"BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART6",
"FCMask": "0x07",
@@ -1409,6 +1626,7 @@
},
{
"BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART7",
"FCMask": "0x07",
@@ -1420,8 +1638,10 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -1431,8 +1651,10 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -1442,6 +1664,7 @@
},
{
"BriefDescription": "PCI Express bandwidth reading at IIO, part 0",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0",
"FCMask": "0x07",
@@ -1453,6 +1676,7 @@
},
{
"BriefDescription": "PCI Express bandwidth reading at IIO, part 1",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1",
"FCMask": "0x07",
@@ -1464,6 +1688,7 @@
},
{
"BriefDescription": "PCI Express bandwidth reading at IIO, part 2",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2",
"FCMask": "0x07",
@@ -1475,6 +1700,7 @@
},
{
"BriefDescription": "PCI Express bandwidth reading at IIO, part 3",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
"FCMask": "0x07",
@@ -1486,6 +1712,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART4",
"FCMask": "0x07",
@@ -1497,6 +1724,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART5",
"FCMask": "0x07",
@@ -1508,6 +1736,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART6",
"FCMask": "0x07",
@@ -1519,6 +1748,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART7",
"FCMask": "0x07",
@@ -1530,8 +1760,10 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -1541,8 +1773,10 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -1552,6 +1786,7 @@
},
{
"BriefDescription": "PCI Express bandwidth writing at IIO, part 0",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0",
"FCMask": "0x07",
@@ -1563,6 +1798,7 @@
},
{
"BriefDescription": "PCI Express bandwidth writing at IIO, part 1",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1",
"FCMask": "0x07",
@@ -1574,6 +1810,7 @@
},
{
"BriefDescription": "PCI Express bandwidth writing at IIO, part 2",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2",
"FCMask": "0x07",
@@ -1585,6 +1822,7 @@
},
{
"BriefDescription": "PCI Express bandwidth writing at IIO, part 3",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
"FCMask": "0x07",
@@ -1596,6 +1834,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART4",
"FCMask": "0x07",
@@ -1607,6 +1846,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART5",
"FCMask": "0x07",
@@ -1618,6 +1858,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART6",
"FCMask": "0x07",
@@ -1629,6 +1870,7 @@
},
{
"BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART7",
"FCMask": "0x07",
@@ -1640,8 +1882,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Messages",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -1651,8 +1895,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Messages",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -1662,8 +1908,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Messages",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -1673,8 +1921,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Messages",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -1684,8 +1934,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Messages",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -1695,8 +1947,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Messages",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -1706,8 +1960,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Messages",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -1717,8 +1973,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Messages",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -1728,8 +1986,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Messages",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x40",
@@ -1739,8 +1999,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Messages",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x80",
@@ -1750,8 +2012,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -1761,8 +2025,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -1772,8 +2038,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -1783,8 +2051,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -1794,8 +2064,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -1805,8 +2077,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -1816,8 +2090,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -1827,8 +2103,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -1838,8 +2116,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x40",
@@ -1849,8 +2129,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x80",
@@ -1860,8 +2142,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -1871,8 +2155,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -1882,8 +2168,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -1893,8 +2181,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -1904,8 +2194,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -1915,8 +2207,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -1926,8 +2220,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -1937,8 +2233,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -1948,8 +2246,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x40",
@@ -1959,8 +2259,10 @@
},
{
"BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x80",
@@ -1970,8 +2272,10 @@
},
{
"BriefDescription": "Incoming arbitration requests : Passing data to be written",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_IIO_INBOUND_ARB_REQ.DATA",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -1981,8 +2285,10 @@
},
{
"BriefDescription": "Incoming arbitration requests : Issuing final read or write of line",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_IIO_INBOUND_ARB_REQ.FINAL_RD_WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -1992,8 +2298,10 @@
},
{
"BriefDescription": "Incoming arbitration requests : Processing response from IOMMU",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_IIO_INBOUND_ARB_REQ.IOMMU_HIT",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2003,8 +2311,10 @@
},
{
"BriefDescription": "Incoming arbitration requests : Issuing to IOMMU",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_IIO_INBOUND_ARB_REQ.IOMMU_REQ",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2014,8 +2324,10 @@
},
{
"BriefDescription": "Incoming arbitration requests : Request Ownership",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_IIO_INBOUND_ARB_REQ.REQ_OWN",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2025,8 +2337,10 @@
},
{
"BriefDescription": "Incoming arbitration requests : Writing line",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_IIO_INBOUND_ARB_REQ.WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2036,8 +2350,10 @@
},
{
"BriefDescription": "Incoming arbitration requests granted : Passing data to be written",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_IIO_INBOUND_ARB_WON.DATA",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2047,8 +2363,10 @@
},
{
"BriefDescription": "Incoming arbitration requests granted : Issuing final read or write of line",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_IIO_INBOUND_ARB_WON.FINAL_RD_WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2058,8 +2376,10 @@
},
{
"BriefDescription": "Incoming arbitration requests granted : Processing response from IOMMU",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_IIO_INBOUND_ARB_WON.IOMMU_HIT",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2069,8 +2389,10 @@
},
{
"BriefDescription": "Incoming arbitration requests granted : Issuing to IOMMU",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_IIO_INBOUND_ARB_WON.IOMMU_REQ",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2080,8 +2402,10 @@
},
{
"BriefDescription": "Incoming arbitration requests granted : Request Ownership",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_IIO_INBOUND_ARB_WON.REQ_OWN",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2091,8 +2415,10 @@
},
{
"BriefDescription": "Incoming arbitration requests granted : Writing line",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_IIO_INBOUND_ARB_WON.WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2102,8 +2428,10 @@
},
{
"BriefDescription": ": IOTLB Hits to a 1G Page",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.1G_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": IOTLB Hits to a 1G Page : Counts if a transaction to a 1G page, on its first lookup, hits the IOTLB.",
"UMask": "0x10",
@@ -2111,8 +2439,10 @@
},
{
"BriefDescription": ": IOTLB Hits to a 2M Page",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.2M_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": IOTLB Hits to a 2M Page : Counts if a transaction to a 2M page, on its first lookup, hits the IOTLB.",
"UMask": "0x8",
@@ -2120,8 +2450,10 @@
},
{
"BriefDescription": ": IOTLB Hits to a 4K Page",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.4K_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": IOTLB Hits to a 4K Page : Counts if a transaction to a 4K page, on its first lookup, hits the IOTLB.",
"UMask": "0x4",
@@ -2129,8 +2461,10 @@
},
{
"BriefDescription": ": IOTLB lookups all",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.ALL_LOOKUPS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": IOTLB lookups all : Some transactions have to look up IOTLB multiple times. Counts every time a request looks up IOTLB.",
"UMask": "0x2",
@@ -2138,8 +2472,10 @@
},
{
"BriefDescription": ": Context cache hits",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.CTXT_CACHE_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": Context cache hits : Counts each time a first look up of the transaction hits the RCC.",
"UMask": "0x80",
@@ -2147,8 +2483,10 @@
},
{
"BriefDescription": ": Context cache lookups",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.CTXT_CACHE_LOOKUPS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": Context cache lookups : Counts each time a transaction looks up root context cache.",
"UMask": "0x40",
@@ -2156,8 +2494,10 @@
},
{
"BriefDescription": ": IOTLB lookups first",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.FIRST_LOOKUPS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": IOTLB lookups first : Some transactions have to look up IOTLB multiple times. Counts the first time a request looks up IOTLB.",
"UMask": "0x1",
@@ -2165,8 +2505,10 @@
},
{
"BriefDescription": ": IOTLB Fills (same as IOTLB miss)",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.MISSES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": IOTLB Fills (same as IOTLB miss) : When a transaction misses IOTLB, it does a page walk to look up memory and bring in the relevant page translation. Counts when this page translation is written to IOTLB.",
"UMask": "0x20",
@@ -2174,8 +2516,10 @@
},
{
"BriefDescription": ": Cycles PWT full",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.CYC_PWT_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": Cycles PWT full : Counts cycles the IOMMU has reached its maximum limit for outstanding page walks.",
"UMask": "0x80",
@@ -2183,8 +2527,10 @@
},
{
"BriefDescription": ": IOMMU memory access",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.NUM_MEM_ACCESSES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": IOMMU memory access : IOMMU sends out memory fetches when it misses the cache look up which is indicated by this signal. M2IOSF only uses low priority channel",
"UMask": "0x40",
@@ -2192,8 +2538,10 @@
},
{
"BriefDescription": ": PWC Hit to a 1G page",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.PWC_1G_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": PWC Hit to a 1G page : Counts each time a transaction's first look up hits the SLPWC at the 1G level",
"UMask": "0x8",
@@ -2201,8 +2549,10 @@
},
{
"BriefDescription": ": PWC Hit to a 2M page",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.PWC_2M_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": PWC Hit to a 2M page : Counts each time a transaction's first look up hits the SLPWC at the 2M level",
"UMask": "0x4",
@@ -2210,8 +2560,10 @@
},
{
"BriefDescription": ": PWC Hit to a 4K page",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.PWC_4K_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": PWC Hit to a 4K page : Counts each time a transaction's first look up hits the SLPWC at the 4K level",
"UMask": "0x2",
@@ -2219,8 +2571,10 @@
},
{
"BriefDescription": ": PWT Hit to a 256T page",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.PWC_512G_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": PWT Hit to a 256T page : Counts each time a transaction's first look up hits the SLPWC at the 512G level",
"UMask": "0x10",
@@ -2228,8 +2582,10 @@
},
{
"BriefDescription": ": PageWalk cache fill",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.PWC_CACHE_FILLS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": PageWalk cache fill : When a transaction misses SLPWC, it does a page walk to look up memory and bring in the relevant page translation. When this page translation is written to SLPWC, ObsPwcFillValid_nnnH is asserted.",
"UMask": "0x20",
@@ -2237,8 +2593,10 @@
},
{
"BriefDescription": ": PageWalk cache lookup",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_IIO_IOMMU1.PWT_CACHE_LOOKUPS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": PageWalk cache lookup : Counts each time a transaction looks up second level page walk cache.",
"UMask": "0x1",
@@ -2246,8 +2604,10 @@
},
{
"BriefDescription": ": Interrupt Entry cache hit",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_IIO_IOMMU3.INT_CACHE_HITS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": Interrupt Entry cache hit : Counts each time a transaction's first look up hits the IEC.",
"UMask": "0x80",
@@ -2255,8 +2615,10 @@
},
{
"BriefDescription": ": Interrupt Entry cache lookup",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_IIO_IOMMU3.INT_CACHE_LOOKUPS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": Interrupt Entry cache lookup : Counts the number of transaction looks up that interrupt remapping cache.",
"UMask": "0x40",
@@ -2264,8 +2626,10 @@
},
{
"BriefDescription": ": Device-selective Context cache invalidation cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_IIO_IOMMU3.NUM_CTXT_CACHE_INVAL_DEVICE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": Device-selective Context cache invalidation cycles : Counts number of Device selective context cache invalidation events",
"UMask": "0x20",
@@ -2273,8 +2637,10 @@
},
{
"BriefDescription": ": Domain-selective Context cache invalidation cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_IIO_IOMMU3.NUM_CTXT_CACHE_INVAL_DOMAIN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": Domain-selective Context cache invalidation cycles : Counts number of Domain selective context cache invalidation events",
"UMask": "0x10",
@@ -2282,8 +2648,10 @@
},
{
"BriefDescription": ": Context cache global invalidation cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_IIO_IOMMU3.NUM_CTXT_CACHE_INVAL_GBL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": Context cache global invalidation cycles : Counts number of Context Cache global invalidation events",
"UMask": "0x8",
@@ -2291,8 +2659,10 @@
},
{
"BriefDescription": ": Domain-selective IOTLB invalidation cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_IIO_IOMMU3.NUM_INVAL_DOMAIN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": Domain-selective IOTLB invalidation cycles : Counts number of Domain selective invalidation events",
"UMask": "0x2",
@@ -2300,8 +2670,10 @@
},
{
"BriefDescription": ": Global IOTLB invalidation cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_IIO_IOMMU3.NUM_INVAL_GBL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": Global IOTLB invalidation cycles : Indicates that IOMMU is doing global invalidation.",
"UMask": "0x1",
@@ -2309,8 +2681,10 @@
},
{
"BriefDescription": ": Page-selective IOTLB invalidation cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_IIO_IOMMU3.NUM_INVAL_PAGE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": ": Page-selective IOTLB invalidation cycles : Counts number of Page-selective within Domain Invalidation events",
"UMask": "0x4",
@@ -2318,8 +2692,10 @@
},
{
"BriefDescription": "AND Mask/match for debug bus : Non-PCIE bus",
+ "Counter": "0,1",
"EventCode": "0x02",
"EventName": "UNC_IIO_MASK_MATCH_AND.BUS0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AND Mask/match for debug bus : Non-PCIE bus : Asserted if all bits specified by mask match",
"UMask": "0x1",
@@ -2327,8 +2703,10 @@
},
{
"BriefDescription": "AND Mask/match for debug bus : Non-PCIE bus and PCIE bus",
+ "Counter": "0,1",
"EventCode": "0x02",
"EventName": "UNC_IIO_MASK_MATCH_AND.BUS0_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AND Mask/match for debug bus : Non-PCIE bus and PCIE bus : Asserted if all bits specified by mask match",
"UMask": "0x8",
@@ -2336,8 +2714,10 @@
},
{
"BriefDescription": "AND Mask/match for debug bus : Non-PCIE bus and !(PCIE bus)",
+ "Counter": "0,1",
"EventCode": "0x02",
"EventName": "UNC_IIO_MASK_MATCH_AND.BUS0_NOT_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AND Mask/match for debug bus : Non-PCIE bus and !(PCIE bus) : Asserted if all bits specified by mask match",
"UMask": "0x4",
@@ -2345,8 +2725,10 @@
},
{
"BriefDescription": "AND Mask/match for debug bus : PCIE bus",
+ "Counter": "0,1",
"EventCode": "0x02",
"EventName": "UNC_IIO_MASK_MATCH_AND.BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AND Mask/match for debug bus : PCIE bus : Asserted if all bits specified by mask match",
"UMask": "0x2",
@@ -2354,8 +2736,10 @@
},
{
"BriefDescription": "AND Mask/match for debug bus : !(Non-PCIE bus) and PCIE bus",
+ "Counter": "0,1",
"EventCode": "0x02",
"EventName": "UNC_IIO_MASK_MATCH_AND.NOT_BUS0_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AND Mask/match for debug bus : !(Non-PCIE bus) and PCIE bus : Asserted if all bits specified by mask match",
"UMask": "0x10",
@@ -2363,8 +2747,10 @@
},
{
"BriefDescription": "AND Mask/match for debug bus : !(Non-PCIE bus) and !(PCIE bus)",
+ "Counter": "0,1",
"EventCode": "0x02",
"EventName": "UNC_IIO_MASK_MATCH_AND.NOT_BUS0_NOT_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "AND Mask/match for debug bus : !(Non-PCIE bus) and !(PCIE bus) : Asserted if all bits specified by mask match",
"UMask": "0x20",
@@ -2372,8 +2758,10 @@
},
{
"BriefDescription": "OR Mask/match for debug bus : Non-PCIE bus",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "UNC_IIO_MASK_MATCH_OR.BUS0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "OR Mask/match for debug bus : Non-PCIE bus : Asserted if any bits specified by mask match",
"UMask": "0x1",
@@ -2381,8 +2769,10 @@
},
{
"BriefDescription": "OR Mask/match for debug bus : Non-PCIE bus and PCIE bus",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "UNC_IIO_MASK_MATCH_OR.BUS0_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "OR Mask/match for debug bus : Non-PCIE bus and PCIE bus : Asserted if any bits specified by mask match",
"UMask": "0x8",
@@ -2390,8 +2780,10 @@
},
{
"BriefDescription": "OR Mask/match for debug bus : Non-PCIE bus and !(PCIE bus)",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "UNC_IIO_MASK_MATCH_OR.BUS0_NOT_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "OR Mask/match for debug bus : Non-PCIE bus and !(PCIE bus) : Asserted if any bits specified by mask match",
"UMask": "0x4",
@@ -2399,8 +2791,10 @@
},
{
"BriefDescription": "OR Mask/match for debug bus : PCIE bus",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "UNC_IIO_MASK_MATCH_OR.BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "OR Mask/match for debug bus : PCIE bus : Asserted if any bits specified by mask match",
"UMask": "0x2",
@@ -2408,8 +2802,10 @@
},
{
"BriefDescription": "OR Mask/match for debug bus : !(Non-PCIE bus) and PCIE bus",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "UNC_IIO_MASK_MATCH_OR.NOT_BUS0_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "OR Mask/match for debug bus : !(Non-PCIE bus) and PCIE bus : Asserted if any bits specified by mask match",
"UMask": "0x10",
@@ -2417,8 +2813,10 @@
},
{
"BriefDescription": "OR Mask/match for debug bus : !(Non-PCIE bus) and !(PCIE bus)",
+ "Counter": "0,1",
"EventCode": "0x03",
"EventName": "UNC_IIO_MASK_MATCH_OR.NOT_BUS0_NOT_BUS1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "OR Mask/match for debug bus : !(Non-PCIE bus) and !(PCIE bus) : Asserted if any bits specified by mask match",
"UMask": "0x20",
@@ -2426,15 +2824,19 @@
},
{
"BriefDescription": "Counting disabled",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_IIO_NOTHING",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IIO"
},
{
"BriefDescription": "Occupancy of outbound request queue : To device",
+ "Counter": "2,3",
"EventCode": "0xC5",
"EventName": "UNC_IIO_NUM_OUSTANDING_REQ_FROM_CPU.TO_IO",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2444,8 +2846,10 @@
},
{
"BriefDescription": ": Passing data to be written",
+ "Counter": "2,3",
"EventCode": "0x88",
"EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.DATA",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2455,8 +2859,10 @@
},
{
"BriefDescription": ": Issuing final read or write of line",
+ "Counter": "2,3",
"EventCode": "0x88",
"EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.FINAL_RD_WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2465,8 +2871,10 @@
},
{
"BriefDescription": ": Processing response from IOMMU",
+ "Counter": "2,3",
"EventCode": "0x88",
"EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.IOMMU_HIT",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2475,8 +2883,10 @@
},
{
"BriefDescription": ": Issuing to IOMMU",
+ "Counter": "2,3",
"EventCode": "0x88",
"EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.IOMMU_REQ",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2485,8 +2895,10 @@
},
{
"BriefDescription": ": Request Ownership",
+ "Counter": "2,3",
"EventCode": "0x88",
"EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.REQ_OWN",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2496,8 +2908,10 @@
},
{
"BriefDescription": ": Writing line",
+ "Counter": "2,3",
"EventCode": "0x88",
"EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2507,8 +2921,10 @@
},
{
"BriefDescription": "Number requests sent to PCIe from main die : From ITC",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UNC_IIO_NUM_REQ_FROM_CPU.ITC",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2518,8 +2934,10 @@
},
{
"BriefDescription": "Number requests sent to PCIe from main die : Completion allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UNC_IIO_NUM_REQ_FROM_CPU.PREALLOC",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2528,8 +2946,10 @@
},
{
"BriefDescription": "Number requests PCIe makes of the main die : Drop request",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU.ALL.DROP",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2539,6 +2959,7 @@
},
{
"BriefDescription": "Number requests PCIe makes of the main die : All",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU.COMMIT.ALL",
"FCMask": "0x07",
@@ -2550,8 +2971,10 @@
},
{
"BriefDescription": "Num requests sent by PCIe - by target : Abort",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.ABORT",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2560,8 +2983,10 @@
},
{
"BriefDescription": "Num requests sent by PCIe - by target : Confined P2P",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.CONFINED_P2P",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2570,8 +2995,10 @@
},
{
"BriefDescription": "Num requests sent by PCIe - by target : Local P2P",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.LOC_P2P",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2580,8 +3007,10 @@
},
{
"BriefDescription": "Num requests sent by PCIe - by target : Multi-cast",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MCAST",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2590,8 +3019,10 @@
},
{
"BriefDescription": "Num requests sent by PCIe - by target : Memory",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MEM",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2600,8 +3031,10 @@
},
{
"BriefDescription": "Num requests sent by PCIe - by target : MsgB",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MSGB",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2610,8 +3043,10 @@
},
{
"BriefDescription": "Num requests sent by PCIe - by target : Remote P2P",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.REM_P2P",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2620,8 +3055,10 @@
},
{
"BriefDescription": "Num requests sent by PCIe - by target : Ubox",
+ "Counter": "0,1,2,3",
"EventCode": "0x8E",
"EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.UBOX",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2630,15 +3067,19 @@
},
{
"BriefDescription": "ITC address map 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8F",
"EventName": "UNC_IIO_NUM_TGT_MATCHED_REQ_OF_CPU",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "IIO"
},
{
"BriefDescription": "Outbound cacheline requests issued : 64B requests issued to device",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "UNC_IIO_OUTBOUND_CL_REQS_ISSUED.TO_IO",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2648,8 +3089,10 @@
},
{
"BriefDescription": "Outbound TLP (transaction layer packet) requests issued : To device",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "UNC_IIO_OUTBOUND_TLP_REQS_ISSUED.TO_IO",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2659,16 +3102,20 @@
},
{
"BriefDescription": "PWT occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_IIO_PWT_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "PWT occupancy : Indicates how many page walks are outstanding at any point in time.",
"Unit": "IIO"
},
{
"BriefDescription": "PCIe Request - cacheline complete : Passing data to be written",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_IIO_REQ_FROM_PCIE_CL_CMPL.DATA",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2678,8 +3125,10 @@
},
{
"BriefDescription": "PCIe Request - cacheline complete : Issuing final read or write of line",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_IIO_REQ_FROM_PCIE_CL_CMPL.FINAL_RD_WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2689,8 +3138,10 @@
},
{
"BriefDescription": "PCIe Request - cacheline complete : Request Ownership",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_IIO_REQ_FROM_PCIE_CL_CMPL.REQ_OWN",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2700,8 +3151,10 @@
},
{
"BriefDescription": "PCIe Request - cacheline complete : Writing line",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_IIO_REQ_FROM_PCIE_CL_CMPL.WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2711,8 +3164,10 @@
},
{
"BriefDescription": "PCIe Request complete : Passing data to be written",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.DATA",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2722,8 +3177,10 @@
},
{
"BriefDescription": "PCIe Request complete : Issuing final read or write of line",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.FINAL_RD_WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2733,8 +3190,10 @@
},
{
"BriefDescription": "PCIe Request complete : Processing response from IOMMU",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.IOMMU_HIT",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2744,8 +3203,10 @@
},
{
"BriefDescription": "PCIe Request complete : Issuing to IOMMU",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.IOMMU_REQ",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2755,8 +3216,10 @@
},
{
"BriefDescription": "PCIe Request complete : Request Ownership",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.REQ_OWN",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2766,8 +3229,10 @@
},
{
"BriefDescription": "PCIe Request complete : Writing line",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2777,8 +3242,10 @@
},
{
"BriefDescription": "PCIe Request - pass complete : Passing data to be written",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_IIO_REQ_FROM_PCIE_PASS_CMPL.DATA",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2788,8 +3255,10 @@
},
{
"BriefDescription": "PCIe Request - pass complete : Issuing final read or write of line",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_IIO_REQ_FROM_PCIE_PASS_CMPL.FINAL_RD_WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2799,8 +3268,10 @@
},
{
"BriefDescription": "PCIe Request - pass complete : Request Ownership",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_IIO_REQ_FROM_PCIE_PASS_CMPL.REQ_OWN",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2810,8 +3281,10 @@
},
{
"BriefDescription": "PCIe Request - pass complete : Writing line",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_IIO_REQ_FROM_PCIE_PASS_CMPL.WR",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0xFF",
@@ -2821,16 +3294,20 @@
},
{
"BriefDescription": "Symbol Times on Link",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_IIO_SYMBOL_TIMES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Symbol Times on Link : Gen1 - increment once every 4nS, Gen2 - increment once every 2nS, Gen3 - increment once every 1nS",
"Unit": "IIO"
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -2840,8 +3317,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -2851,8 +3330,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -2862,8 +3343,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -2873,8 +3356,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -2884,8 +3369,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -2895,8 +3382,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -2906,8 +3395,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -2917,8 +3408,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x40",
@@ -2928,8 +3421,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x80",
@@ -2939,8 +3434,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -2950,8 +3447,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -2961,8 +3460,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -2972,8 +3473,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -2983,8 +3486,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -2994,8 +3499,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -3005,8 +3512,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3016,8 +3525,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3027,8 +3538,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x40",
@@ -3038,8 +3551,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x80",
@@ -3049,8 +3564,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -3060,8 +3577,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -3071,8 +3590,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -3082,8 +3603,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -3093,8 +3616,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -3104,8 +3629,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -3115,8 +3642,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3126,8 +3655,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3137,8 +3668,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x40",
@@ -3148,8 +3681,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x80",
@@ -3159,8 +3694,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -3170,8 +3707,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -3181,8 +3720,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -3192,8 +3733,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -3203,8 +3746,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -3214,8 +3759,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -3225,8 +3772,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3236,8 +3785,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3247,8 +3798,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x40",
@@ -3258,8 +3811,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x80",
@@ -3269,8 +3824,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -3280,8 +3837,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -3291,6 +3850,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART0",
"FCMask": "0x07",
@@ -3302,6 +3862,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART1",
"FCMask": "0x07",
@@ -3313,6 +3874,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART2",
"FCMask": "0x07",
@@ -3324,6 +3886,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART3",
"FCMask": "0x07",
@@ -3335,6 +3898,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART4",
"FCMask": "0x07",
@@ -3346,6 +3910,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART5",
"FCMask": "0x07",
@@ -3357,6 +3922,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART6",
"FCMask": "0x07",
@@ -3368,6 +3934,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART7",
"FCMask": "0x07",
@@ -3379,8 +3946,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -3390,8 +3959,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -3401,6 +3972,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART0",
"FCMask": "0x07",
@@ -3412,6 +3984,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART1",
"FCMask": "0x07",
@@ -3423,6 +3996,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART2",
"FCMask": "0x07",
@@ -3434,6 +4008,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART3",
"FCMask": "0x07",
@@ -3445,6 +4020,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART4",
"FCMask": "0x07",
@@ -3456,6 +4032,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART5",
"FCMask": "0x07",
@@ -3467,6 +4044,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART6",
"FCMask": "0x07",
@@ -3478,6 +4056,7 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
"EventCode": "0xc1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART7",
"FCMask": "0x07",
@@ -3489,8 +4068,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -3500,8 +4081,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -3511,8 +4094,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -3522,8 +4107,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -3533,8 +4120,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -3544,8 +4133,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -3555,8 +4146,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3566,8 +4159,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3577,8 +4172,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x40",
@@ -3588,8 +4185,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x80",
@@ -3599,8 +4198,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -3610,8 +4211,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -3621,8 +4224,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -3632,8 +4237,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -3643,8 +4250,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -3654,8 +4263,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3665,8 +4276,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3676,8 +4289,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x40",
@@ -3687,8 +4302,10 @@
},
{
"BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "Counter": "0,1,2,3",
"EventCode": "0xC1",
"EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x80",
@@ -3698,8 +4315,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -3709,8 +4328,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -3720,8 +4341,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -3731,8 +4354,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -3742,8 +4367,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -3753,8 +4380,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -3764,8 +4393,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -3775,8 +4406,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -3786,8 +4419,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x40",
@@ -3797,8 +4432,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x80",
@@ -3808,8 +4445,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -3819,8 +4458,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -3830,6 +4471,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART0",
"FCMask": "0x07",
@@ -3841,6 +4483,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART1",
"FCMask": "0x07",
@@ -3852,6 +4495,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART2",
"FCMask": "0x07",
@@ -3863,6 +4507,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART3",
"FCMask": "0x07",
@@ -3874,6 +4519,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART4",
"FCMask": "0x07",
@@ -3885,6 +4531,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART5",
"FCMask": "0x07",
@@ -3896,6 +4543,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART6",
"FCMask": "0x07",
@@ -3907,6 +4555,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART7",
"FCMask": "0x07",
@@ -3918,8 +4567,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -3929,8 +4580,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -3940,6 +4593,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART0",
"FCMask": "0x07",
@@ -3951,6 +4605,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART1",
"FCMask": "0x07",
@@ -3962,6 +4617,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART2",
"FCMask": "0x07",
@@ -3973,6 +4629,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART3",
"FCMask": "0x07",
@@ -3984,6 +4641,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART4",
"FCMask": "0x07",
@@ -3995,6 +4653,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART5",
"FCMask": "0x07",
@@ -4006,6 +4665,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART6",
"FCMask": "0x07",
@@ -4017,6 +4677,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART7",
"FCMask": "0x07",
@@ -4028,8 +4689,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -4039,8 +4702,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -4050,6 +4715,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART0",
"FCMask": "0x07",
@@ -4061,6 +4727,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART1",
"FCMask": "0x07",
@@ -4072,6 +4739,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART2",
"FCMask": "0x07",
@@ -4083,6 +4751,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART3",
"FCMask": "0x07",
@@ -4094,6 +4763,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART4",
"FCMask": "0x07",
@@ -4105,6 +4775,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART5",
"FCMask": "0x07",
@@ -4116,6 +4787,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART6",
"FCMask": "0x07",
@@ -4127,6 +4799,7 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART7",
"FCMask": "0x07",
@@ -4138,8 +4811,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -4149,8 +4824,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -4160,8 +4837,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -4171,8 +4850,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -4182,8 +4863,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -4193,8 +4876,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -4204,8 +4889,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -4215,8 +4902,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -4226,8 +4915,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x40",
@@ -4237,8 +4928,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x80",
@@ -4248,8 +4941,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -4259,8 +4954,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -4270,8 +4967,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -4281,8 +4980,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -4292,8 +4993,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -4303,8 +5006,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -4314,8 +5019,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -4325,8 +5032,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -4336,8 +5045,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x40",
@@ -4347,8 +5058,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x80",
@@ -4358,8 +5071,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.IOMMU0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x100",
@@ -4369,8 +5084,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.IOMMU1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x200",
@@ -4380,8 +5097,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART0",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
@@ -4391,8 +5110,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART1",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
@@ -4402,8 +5123,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART2",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
@@ -4413,8 +5136,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART3",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
@@ -4424,8 +5149,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART4",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x10",
@@ -4435,8 +5162,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART5",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x20",
@@ -4446,8 +5175,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART6",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x40",
@@ -4457,8 +5188,10 @@
},
{
"BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART7",
+ "Experimental": "1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x80",
@@ -4468,8 +5201,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 0 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -4477,8 +5212,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 1 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -4486,8 +5223,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 2 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -4495,8 +5234,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 3 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -4504,8 +5245,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 4 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -4513,8 +5256,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 5 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -4522,8 +5267,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 6 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x40",
@@ -4531,8 +5278,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 7 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x80",
@@ -4540,8 +5289,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 10 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -4549,8 +5300,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 8 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -4558,8 +5311,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 9 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -4567,8 +5322,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 0 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -4576,8 +5333,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 1 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -4585,8 +5344,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 2 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -4594,8 +5355,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 3 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -4603,8 +5366,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 4 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -4612,8 +5377,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 5 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -4621,8 +5388,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 6 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x40",
@@ -4630,8 +5399,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 7 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x80",
@@ -4639,8 +5410,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 10 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -4648,8 +5421,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 8 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -4657,8 +5432,10 @@
},
{
"BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 9 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -4666,8 +5443,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 0 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -4675,8 +5454,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 1 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -4684,8 +5465,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 2 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -4693,8 +5476,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 3 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -4702,8 +5487,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -4711,8 +5498,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -4720,8 +5509,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 6 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x40",
@@ -4729,8 +5520,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 7 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x80",
@@ -4738,8 +5531,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 10 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -4747,8 +5542,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 8 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -4756,8 +5553,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 9 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -4765,8 +5564,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x8a",
"EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 0 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -4774,8 +5575,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8a",
"EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 1 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -4783,8 +5586,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x8a",
"EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 2 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -4792,8 +5597,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x8a",
"EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 3 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -4801,8 +5608,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8a",
"EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 4 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -4810,8 +5619,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8a",
"EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 5 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -4819,8 +5630,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x8a",
"EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 6 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x40",
@@ -4828,8 +5641,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x8a",
"EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 7 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x80",
@@ -4837,8 +5652,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x8b",
"EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 10 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -4846,8 +5663,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x8b",
"EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 8 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -4855,8 +5674,10 @@
},
{
"BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x8b",
"EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 9 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -4864,8 +5685,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 0 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -4873,8 +5696,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 1 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -4882,8 +5707,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 2 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -4891,8 +5718,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 3 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -4900,8 +5729,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 4 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -4909,8 +5740,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 5 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -4918,8 +5751,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 6 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x40",
@@ -4927,8 +5762,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x84",
"EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 7 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x80",
@@ -4936,8 +5773,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 10 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -4945,8 +5784,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 8 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -4954,8 +5795,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 9 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -4963,8 +5806,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 0 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -4972,8 +5817,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 1 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -4981,8 +5828,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 2 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -4990,8 +5839,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 3 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -4999,8 +5850,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 4 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -5008,8 +5861,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 5 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -5017,8 +5872,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 6 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x40",
@@ -5026,8 +5883,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 7 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x80",
@@ -5035,8 +5894,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 10 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -5044,8 +5905,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 8 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -5053,8 +5916,10 @@
},
{
"BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 9 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -5062,8 +5927,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x8c",
"EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 0 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -5071,8 +5938,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8c",
"EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 1 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -5080,8 +5949,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x8c",
"EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 2 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -5089,8 +5960,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x8c",
"EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 3 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x8",
@@ -5098,8 +5971,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8c",
"EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x10",
@@ -5107,8 +5982,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8c",
"EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x20",
@@ -5116,8 +5993,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8c",
"EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x40",
@@ -5125,8 +6004,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8c",
"EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x80",
@@ -5134,8 +6015,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x8d",
"EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 10 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x4",
@@ -5143,8 +6026,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x8d",
"EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 8 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x1",
@@ -5152,8 +6037,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x8d",
"EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 9 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
"UMask": "0x2",
@@ -5161,8 +6048,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 0 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -5170,8 +6059,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 1 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -5179,8 +6070,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 2 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -5188,8 +6081,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 3 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x8",
@@ -5197,8 +6092,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 4 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x10",
@@ -5206,8 +6103,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 5 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x20",
@@ -5215,8 +6114,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 6 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x40",
@@ -5224,8 +6125,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0x8e",
"EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 7 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x80",
@@ -5233,8 +6136,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0x8f",
"EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 10 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x4",
@@ -5242,8 +6147,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0x8f",
"EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 8 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x1",
@@ -5251,8 +6158,10 @@
},
{
"BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0x8f",
"EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 9 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
"UMask": "0x2",
@@ -5260,6 +6169,7 @@
},
{
"BriefDescription": "Clockticks of the mesh to PCI (M2P)",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_M2P_CLOCKTICKS",
"PerPkg": "1",
@@ -5267,6 +6177,7 @@
},
{
"BriefDescription": "CMS Clockticks",
+ "Counter": "0,1,2,3",
"EventCode": "0xc0",
"EventName": "UNC_M2P_CMS_CLOCKTICKS",
"PerPkg": "1",
@@ -5274,8 +6185,10 @@
},
{
"BriefDescription": "Distress signal asserted : DPT Local",
+ "Counter": "0,1,2,3",
"EventCode": "0xaf",
"EventName": "UNC_M2P_DISTRESS_ASSERTED.DPT_LOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : DPT Local : Counts the number of cycles either the local or incoming distress signals are asserted. : Dynamic Prefetch Throttle triggered by this tile",
"UMask": "0x4",
@@ -5283,8 +6196,10 @@
},
{
"BriefDescription": "Distress signal asserted : DPT Remote",
+ "Counter": "0,1,2,3",
"EventCode": "0xaf",
"EventName": "UNC_M2P_DISTRESS_ASSERTED.DPT_NONLOCAL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : DPT Remote : Counts the number of cycles either the local or incoming distress signals are asserted. : Dynamic Prefetch Throttle received by this tile",
"UMask": "0x8",
@@ -5292,8 +6207,10 @@
},
{
"BriefDescription": "Distress signal asserted : DPT Stalled - IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xaf",
"EventName": "UNC_M2P_DISTRESS_ASSERTED.DPT_STALL_IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : DPT Stalled - IV : Counts the number of cycles either the local or incoming distress signals are asserted. : DPT occurred while regular IVs were received, causing DPT to be stalled",
"UMask": "0x40",
@@ -5301,8 +6218,10 @@
},
{
"BriefDescription": "Distress signal asserted : DPT Stalled - No Credit",
+ "Counter": "0,1,2,3",
"EventCode": "0xaf",
"EventName": "UNC_M2P_DISTRESS_ASSERTED.DPT_STALL_NOCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : DPT Stalled - No Credit : Counts the number of cycles either the local or incoming distress signals are asserted. : DPT occurred while credit not available causing DPT to be stalled",
"UMask": "0x80",
@@ -5310,8 +6229,10 @@
},
{
"BriefDescription": "Distress signal asserted : Horizontal",
+ "Counter": "0,1,2,3",
"EventCode": "0xaf",
"EventName": "UNC_M2P_DISTRESS_ASSERTED.HORZ",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : Horizontal : Counts the number of cycles either the local or incoming distress signals are asserted. : If TGR egress is full, then agents will throttle outgoing AD IDI transactions",
"UMask": "0x2",
@@ -5319,8 +6240,10 @@
},
{
"BriefDescription": "Distress signal asserted : Vertical",
+ "Counter": "0,1,2,3",
"EventCode": "0xaf",
"EventName": "UNC_M2P_DISTRESS_ASSERTED.VERT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Distress signal asserted : Vertical : Counts the number of cycles either the local or incoming distress signals are asserted. : If IRQ egress is full, then agents will throttle outgoing AD IDI transactions",
"UMask": "0x1",
@@ -5328,8 +6251,10 @@
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "Counter": "0,1,2,3",
"EventCode": "0xba",
"EventName": "UNC_M2P_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress Blocking due to Ordering requirements : Down : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x4",
@@ -5337,8 +6262,10 @@
},
{
"BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "Counter": "0,1,2,3",
"EventCode": "0xba",
"EventName": "UNC_M2P_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress Blocking due to Ordering requirements : Up : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
"UMask": "0x1",
@@ -5346,8 +6273,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xb6",
"EventName": "UNC_M2P_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AD Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -5355,8 +6284,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xb6",
"EventName": "UNC_M2P_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AD Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -5364,8 +6295,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xb6",
"EventName": "UNC_M2P_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AD Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -5373,8 +6306,10 @@
},
{
"BriefDescription": "Horizontal AD Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xb6",
"EventName": "UNC_M2P_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AD Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -5382,8 +6317,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xbb",
"EventName": "UNC_M2P_HORZ_RING_AKC_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -5391,8 +6328,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xbb",
"EventName": "UNC_M2P_HORZ_RING_AKC_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -5400,8 +6339,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xbb",
"EventName": "UNC_M2P_HORZ_RING_AKC_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -5409,8 +6350,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xbb",
"EventName": "UNC_M2P_HORZ_RING_AKC_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -5418,8 +6361,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xb7",
"EventName": "UNC_M2P_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -5427,8 +6372,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xb7",
"EventName": "UNC_M2P_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -5436,8 +6383,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xb7",
"EventName": "UNC_M2P_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -5445,8 +6394,10 @@
},
{
"BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xb7",
"EventName": "UNC_M2P_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal AK Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -5454,8 +6405,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use : Left and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xb8",
"EventName": "UNC_M2P_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal BL Ring in Use : Left and Even : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -5463,8 +6416,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use : Left and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xb8",
"EventName": "UNC_M2P_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal BL Ring in Use : Left and Odd : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -5472,8 +6427,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use : Right and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xb8",
"EventName": "UNC_M2P_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal BL Ring in Use : Right and Even : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -5481,8 +6438,10 @@
},
{
"BriefDescription": "Horizontal BL Ring in Use : Right and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xb8",
"EventName": "UNC_M2P_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal BL Ring in Use : Right and Odd : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -5490,8 +6449,10 @@
},
{
"BriefDescription": "Horizontal IV Ring in Use : Left",
+ "Counter": "0,1,2,3",
"EventCode": "0xb9",
"EventName": "UNC_M2P_HORZ_RING_IV_IN_USE.LEFT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal IV Ring in Use : Left : Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x1",
@@ -5499,8 +6460,10 @@
},
{
"BriefDescription": "Horizontal IV Ring in Use : Right",
+ "Counter": "0,1,2,3",
"EventCode": "0xb9",
"EventName": "UNC_M2P_HORZ_RING_IV_IN_USE.RIGHT",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Horizontal IV Ring in Use : Right : Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x4",
@@ -5508,8 +6471,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credit Acquired : DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.DRS_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credit Acquired : DRS : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the DRS message class.",
"UMask": "0x1",
@@ -5517,8 +6482,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credit Acquired : DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.DRS_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credit Acquired : DRS : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the DRS message class.",
"UMask": "0x2",
@@ -5526,8 +6493,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credit Acquired : NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.NCB_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credit Acquired : NCB : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCB message class.",
"UMask": "0x4",
@@ -5535,8 +6504,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credit Acquired : NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.NCB_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credit Acquired : NCB : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCB message class.",
"UMask": "0x8",
@@ -5544,8 +6515,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credit Acquired : NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.NCS_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credit Acquired : NCS : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCS message class.",
"UMask": "0x10",
@@ -5553,8 +6526,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credit Acquired : NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x33",
"EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.NCS_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credit Acquired : NCS : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credit for transfer through CMS Port 0s to the IIO for the NCS message class.",
"UMask": "0x20",
@@ -5562,8 +6537,10 @@
},
{
"BriefDescription": "M2PCIe IIO Failed to Acquire a Credit : DRS",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_M2P_IIO_CREDITS_REJECT.DRS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Failed to Acquire a Credit : DRS : Counts the number of times that a request pending in the BL Ingress attempted to acquire either a NCB or NCS credit to transmit into the IIO, but was rejected because no credits were available. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits to the IIO for the DRS message class.",
"UMask": "0x8",
@@ -5571,8 +6548,10 @@
},
{
"BriefDescription": "M2PCIe IIO Failed to Acquire a Credit : NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_M2P_IIO_CREDITS_REJECT.NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Failed to Acquire a Credit : NCB : Counts the number of times that a request pending in the BL Ingress attempted to acquire either a NCB or NCS credit to transmit into the IIO, but was rejected because no credits were available. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits to the IIO for the NCB message class.",
"UMask": "0x10",
@@ -5580,8 +6559,10 @@
},
{
"BriefDescription": "M2PCIe IIO Failed to Acquire a Credit : NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_M2P_IIO_CREDITS_REJECT.NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Failed to Acquire a Credit : NCS : Counts the number of times that a request pending in the BL Ingress attempted to acquire either a NCB or NCS credit to transmit into the IIO, but was rejected because no credits were available. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits to the IIO for the NCS message class.",
"UMask": "0x20",
@@ -5589,8 +6570,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credits in Use : DRS to CMS Port 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_M2P_IIO_CREDITS_USED.DRS_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credits in Use : DRS to CMS Port 0 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the DRS message class.",
"UMask": "0x1",
@@ -5598,8 +6581,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credits in Use : DRS to CMS Port 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_M2P_IIO_CREDITS_USED.DRS_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credits in Use : DRS to CMS Port 1 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the DRS message class.",
"UMask": "0x2",
@@ -5607,8 +6592,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credits in Use : NCB to CMS Port 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_M2P_IIO_CREDITS_USED.NCB_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credits in Use : NCB to CMS Port 0 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCB message class.",
"UMask": "0x4",
@@ -5616,8 +6603,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credits in Use : NCB to CMS Port 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_M2P_IIO_CREDITS_USED.NCB_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credits in Use : NCB to CMS Port 1 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCB message class.",
"UMask": "0x8",
@@ -5625,8 +6614,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credits in Use : NCS to CMS Port 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_M2P_IIO_CREDITS_USED.NCS_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credits in Use : NCS to CMS Port 0 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCS message class.",
"UMask": "0x10",
@@ -5634,8 +6625,10 @@
},
{
"BriefDescription": "M2PCIe IIO Credits in Use : NCS to CMS Port 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "UNC_M2P_IIO_CREDITS_USED.NCS_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "M2PCIe IIO Credits in Use : NCS to CMS Port 1 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credit for transfer through CMS Port 0s to the IIO for the NCS message class.",
"UMask": "0x20",
@@ -5643,648 +6636,810 @@
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF0 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF0 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF1 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF1 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF2 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF2_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF2 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF2_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF3 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF3_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF3 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF3_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 1 : M2IOSF4 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF4_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 1 : M2IOSF4 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF4_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 1 : M2IOSF5 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF5_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Dedicated P2P Credit Taken - 1 : M2IOSF5 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF5_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF0 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF0 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF1 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF1 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF2 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF2_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF2 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF2_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF3 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF3_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF3 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF3_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 1 : M2IOSF4 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x1a",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_1.MS2IOSF4_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 1 : M2IOSF4 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x1a",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_1.MS2IOSF4_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 1 : M2IOSF5 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x1a",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_1.MS2IOSF5_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Dedicated Credits Returned - 1 : M2IOSF5 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x1a",
"EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_1.MS2IOSF5_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Shared Credits Returned : Agent0",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M2P_LOCAL_P2P_SHAR_RETURNED.AGENT_0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Shared Credits Returned : Agent1",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M2P_LOCAL_P2P_SHAR_RETURNED.AGENT_1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local P2P Shared Credits Returned : Agent2",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M2P_LOCAL_P2P_SHAR_RETURNED.AGENT_2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent0",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent1",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent2",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent3",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_3",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent4",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_4",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent5",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_5",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF0 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF0 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF1 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF1 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF2 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF2_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF2 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF2_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF3 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF3_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF3 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x40",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF3_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 1 : M2IOSF4 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF4_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 1 : M2IOSF4 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF4_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 1 : M2IOSF5 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF5_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Local Shared P2P Credit Taken - 1 : M2IOSF5 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x41",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF5_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF0 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x4a",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF0_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF0 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4a",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF0_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF1 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x4a",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF1_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF1 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4a",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF1_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF2 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x4a",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF2_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF2 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4a",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF2_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF3 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x4a",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF3_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x40",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF3 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4a",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF3_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x80",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 1 : M2IOSF4 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x4b",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF4_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 1 : M2IOSF4 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4b",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF4_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 1 : M2IOSF5 - NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x4b",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF5_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Waiting on Local Shared P2P Credit - 1 : M2IOSF5 - NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x4b",
"EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF5_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI0",
+ "Counter": "0,1,2,3",
"EventCode": "0xe6",
"EventName": "UNC_M2P_MISC_EXTERNAL.MBE_INST0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI1",
+ "Counter": "0,1,2,3",
"EventCode": "0xe6",
"EventName": "UNC_M2P_MISC_EXTERNAL.MBE_INST1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "P2P Credit Occupancy : All",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "P2P Credit Occupancy : Local NCB",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.LOCAL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "P2P Credit Occupancy : Local NCS",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.LOCAL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "P2P Credit Occupancy : Remote NCB",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.REMOTE_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "P2P Credit Occupancy : Remote NCS",
+ "Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.REMOTE_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Dedicated Credits Received : All",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_M2P_P2P_DED_RECEIVED.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Dedicated Credits Received : Local NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_M2P_P2P_DED_RECEIVED.LOCAL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Dedicated Credits Received : Local NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_M2P_P2P_DED_RECEIVED.LOCAL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Dedicated Credits Received : Remote NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_M2P_P2P_DED_RECEIVED.REMOTE_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Dedicated Credits Received : Remote NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_M2P_P2P_DED_RECEIVED.REMOTE_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Shared Credits Received : All",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_M2P_P2P_SHAR_RECEIVED.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Shared Credits Received : Local NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_M2P_P2P_SHAR_RECEIVED.LOCAL_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Shared Credits Received : Local NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_M2P_P2P_SHAR_RECEIVED.LOCAL_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Shared Credits Received : Remote NCB",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_M2P_P2P_SHAR_RECEIVED.REMOTE_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Shared Credits Received : Remote NCS",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_M2P_P2P_SHAR_RECEIVED.REMOTE_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote P2P Shared Credits Returned : Agent0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M2P_REMOTE_P2P_SHAR_RETURNED.AGENT_0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote P2P Shared Credits Returned : Agent1",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M2P_REMOTE_P2P_SHAR_RETURNED.AGENT_1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote P2P Shared Credits Returned : Agent2",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M2P_REMOTE_P2P_SHAR_RETURNED.AGENT_2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Shared P2P Credit Returned to credit ring : Agent0",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_RETURNED.AGENT_0",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Shared P2P Credit Returned to credit ring : Agent1",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_RETURNED.AGENT_1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Remote Shared P2P Credit Returned to credit ring : Agent2",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_RETURNED.AGENT_2",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring. : AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xac",
"EventName": "UNC_M2P_RING_BOUNCES_HORZ.AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Horizontal Ring. : AD : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x1",
@@ -6292,8 +7447,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring. : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xac",
"EventName": "UNC_M2P_RING_BOUNCES_HORZ.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Horizontal Ring. : AK : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x2",
@@ -6301,8 +7458,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring. : BL",
+ "Counter": "0,1,2,3",
"EventCode": "0xac",
"EventName": "UNC_M2P_RING_BOUNCES_HORZ.BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Horizontal Ring. : BL : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x4",
@@ -6310,8 +7469,10 @@
},
{
"BriefDescription": "Messages that bounced on the Horizontal Ring. : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xac",
"EventName": "UNC_M2P_RING_BOUNCES_HORZ.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Horizontal Ring. : IV : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
"UMask": "0x8",
@@ -6319,8 +7480,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring. : AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xaa",
"EventName": "UNC_M2P_RING_BOUNCES_VERT.AD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Vertical Ring. : AD : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x1",
@@ -6328,8 +7491,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring. : Acknowledgements to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xaa",
"EventName": "UNC_M2P_RING_BOUNCES_VERT.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Vertical Ring. : Acknowledgements to core : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x2",
@@ -6337,8 +7502,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring.",
+ "Counter": "0,1,2,3",
"EventCode": "0xaa",
"EventName": "UNC_M2P_RING_BOUNCES_VERT.AKC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Vertical Ring. : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x10",
@@ -6346,8 +7513,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring. : Data Responses to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xaa",
"EventName": "UNC_M2P_RING_BOUNCES_VERT.BL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Vertical Ring. : Data Responses to core : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x4",
@@ -6355,8 +7524,10 @@
},
{
"BriefDescription": "Messages that bounced on the Vertical Ring. : Snoops of processor's cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xaa",
"EventName": "UNC_M2P_RING_BOUNCES_VERT.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Messages that bounced on the Vertical Ring. : Snoops of processor's cache. : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
"UMask": "0x8",
@@ -6364,95 +7535,119 @@
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring : AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xad",
"EventName": "UNC_M2P_RING_SINK_STARVED_HORZ.AD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xad",
"EventName": "UNC_M2P_RING_SINK_STARVED_HORZ.AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring : Acknowledgements to Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xad",
"EventName": "UNC_M2P_RING_SINK_STARVED_HORZ.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x20",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring : BL",
+ "Counter": "0,1,2,3",
"EventCode": "0xad",
"EventName": "UNC_M2P_RING_SINK_STARVED_HORZ.BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Sink Starvation on Horizontal Ring : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xad",
"EventName": "UNC_M2P_RING_SINK_STARVED_HORZ.IV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring : AD",
+ "Counter": "0,1,2,3",
"EventCode": "0xab",
"EventName": "UNC_M2P_RING_SINK_STARVED_VERT.AD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring : Acknowledgements to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xab",
"EventName": "UNC_M2P_RING_SINK_STARVED_VERT.AK",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring",
+ "Counter": "0,1,2,3",
"EventCode": "0xab",
"EventName": "UNC_M2P_RING_SINK_STARVED_VERT.AKC",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring : Data Responses to core",
+ "Counter": "0,1,2,3",
"EventCode": "0xab",
"EventName": "UNC_M2P_RING_SINK_STARVED_VERT.BL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Sink Starvation on Vertical Ring : Snoops of processor's cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0xab",
"EventName": "UNC_M2P_RING_SINK_STARVED_VERT.IV",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Source Throttle",
+ "Counter": "0,1,2,3",
"EventCode": "0xae",
"EventName": "UNC_M2P_RING_SRC_THRTL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M2P_RxC_CYCLES_NE.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
"UMask": "0x80",
@@ -6460,8 +7655,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M2P_RxC_CYCLES_NE.CHA_IDI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
"UMask": "0x1",
@@ -6469,8 +7666,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M2P_RxC_CYCLES_NE.CHA_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
"UMask": "0x2",
@@ -6478,8 +7677,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M2P_RxC_CYCLES_NE.CHA_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
"UMask": "0x4",
@@ -6487,8 +7688,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M2P_RxC_CYCLES_NE.IIO_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
"UMask": "0x20",
@@ -6496,8 +7699,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M2P_RxC_CYCLES_NE.IIO_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
"UMask": "0x40",
@@ -6505,8 +7710,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2P_RxC_INSERTS.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
"UMask": "0x80",
@@ -6514,8 +7721,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2P_RxC_INSERTS.CHA_IDI",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
"UMask": "0x1",
@@ -6523,8 +7732,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2P_RxC_INSERTS.CHA_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
"UMask": "0x2",
@@ -6532,8 +7743,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2P_RxC_INSERTS.CHA_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
"UMask": "0x4",
@@ -6541,8 +7754,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2P_RxC_INSERTS.IIO_NCB",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
"UMask": "0x20",
@@ -6550,8 +7765,10 @@
},
{
"BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M2P_RxC_INSERTS.IIO_NCS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
"UMask": "0x40",
@@ -6559,8 +7776,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xe5",
"EventName": "UNC_M2P_RxR_BUSY_STARVED.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority : All == Credited + Uncredited",
"UMask": "0x11",
@@ -6568,8 +7787,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe5",
"EventName": "UNC_M2P_RxR_BUSY_STARVED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x10",
@@ -6577,8 +7798,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe5",
"EventName": "UNC_M2P_RxR_BUSY_STARVED.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x1",
@@ -6586,8 +7809,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xe5",
"EventName": "UNC_M2P_RxR_BUSY_STARVED.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority : All == Credited + Uncredited",
"UMask": "0x44",
@@ -6595,8 +7820,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe5",
"EventName": "UNC_M2P_RxR_BUSY_STARVED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x40",
@@ -6604,8 +7831,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe5",
"EventName": "UNC_M2P_RxR_BUSY_STARVED.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
"UMask": "0x4",
@@ -6613,8 +7842,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xe2",
"EventName": "UNC_M2P_RxR_BYPASS.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : AD - All : Number of packets bypassing the CMS Ingress : All == Credited + Uncredited",
"UMask": "0x11",
@@ -6622,8 +7853,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe2",
"EventName": "UNC_M2P_RxR_BYPASS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : AD - Credited : Number of packets bypassing the CMS Ingress",
"UMask": "0x10",
@@ -6631,8 +7864,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe2",
"EventName": "UNC_M2P_RxR_BYPASS.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : AD - Uncredited : Number of packets bypassing the CMS Ingress",
"UMask": "0x1",
@@ -6640,8 +7875,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xe2",
"EventName": "UNC_M2P_RxR_BYPASS.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : AK : Number of packets bypassing the CMS Ingress",
"UMask": "0x2",
@@ -6649,8 +7886,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe2",
"EventName": "UNC_M2P_RxR_BYPASS.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : AKC - Uncredited : Number of packets bypassing the CMS Ingress",
"UMask": "0x80",
@@ -6658,8 +7897,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xe2",
"EventName": "UNC_M2P_RxR_BYPASS.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : BL - All : Number of packets bypassing the CMS Ingress : All == Credited + Uncredited",
"UMask": "0x44",
@@ -6667,8 +7908,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe2",
"EventName": "UNC_M2P_RxR_BYPASS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : BL - Credited : Number of packets bypassing the CMS Ingress",
"UMask": "0x40",
@@ -6676,8 +7919,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe2",
"EventName": "UNC_M2P_RxR_BYPASS.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : BL - Uncredited : Number of packets bypassing the CMS Ingress",
"UMask": "0x4",
@@ -6685,8 +7930,10 @@
},
{
"BriefDescription": "Transgress Ingress Bypass : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xe2",
"EventName": "UNC_M2P_RxR_BYPASS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Bypass : IV : Number of packets bypassing the CMS Ingress",
"UMask": "0x8",
@@ -6694,8 +7941,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xe3",
"EventName": "UNC_M2P_RxR_CRD_STARVED.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -6703,8 +7952,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe3",
"EventName": "UNC_M2P_RxR_CRD_STARVED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x10",
@@ -6712,8 +7963,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe3",
"EventName": "UNC_M2P_RxR_CRD_STARVED.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AD - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x1",
@@ -6721,8 +7974,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xe3",
"EventName": "UNC_M2P_RxR_CRD_STARVED.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : AK : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x2",
@@ -6730,8 +7985,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xe3",
"EventName": "UNC_M2P_RxR_CRD_STARVED.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -6739,8 +7996,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe3",
"EventName": "UNC_M2P_RxR_CRD_STARVED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x40",
@@ -6748,8 +8007,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe3",
"EventName": "UNC_M2P_RxR_CRD_STARVED.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : BL - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x4",
@@ -6757,8 +8018,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : IFV - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe3",
"EventName": "UNC_M2P_RxR_CRD_STARVED.IFV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : IFV - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x80",
@@ -6766,8 +8029,10 @@
},
{
"BriefDescription": "Transgress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xe3",
"EventName": "UNC_M2P_RxR_CRD_STARVED.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : IV : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"UMask": "0x8",
@@ -6775,16 +8040,20 @@
},
{
"BriefDescription": "Transgress Injection Starvation",
+ "Counter": "0,1,2,3",
"EventCode": "0xe4",
"EventName": "UNC_M2P_RxR_CRD_STARVED_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Injection Starvation : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Transgress Ingress Allocations : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xe1",
"EventName": "UNC_M2P_RxR_INSERTS.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : AD - All : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
"UMask": "0x11",
@@ -6792,8 +8061,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe1",
"EventName": "UNC_M2P_RxR_INSERTS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : AD - Credited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x10",
@@ -6801,8 +8072,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe1",
"EventName": "UNC_M2P_RxR_INSERTS.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : AD - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x1",
@@ -6810,8 +8083,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xe1",
"EventName": "UNC_M2P_RxR_INSERTS.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : AK : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x2",
@@ -6819,8 +8094,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe1",
"EventName": "UNC_M2P_RxR_INSERTS.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : AKC - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x80",
@@ -6828,8 +8105,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xe1",
"EventName": "UNC_M2P_RxR_INSERTS.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : BL - All : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
"UMask": "0x44",
@@ -6837,8 +8116,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe1",
"EventName": "UNC_M2P_RxR_INSERTS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : BL - Credited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x40",
@@ -6846,8 +8127,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe1",
"EventName": "UNC_M2P_RxR_INSERTS.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : BL - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x4",
@@ -6855,8 +8138,10 @@
},
{
"BriefDescription": "Transgress Ingress Allocations : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xe1",
"EventName": "UNC_M2P_RxR_INSERTS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Allocations : IV : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
"UMask": "0x8",
@@ -6864,8 +8149,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xe0",
"EventName": "UNC_M2P_RxR_OCCUPANCY.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : AD - All : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
"UMask": "0x11",
@@ -6873,8 +8160,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe0",
"EventName": "UNC_M2P_RxR_OCCUPANCY.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : AD - Credited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x10",
@@ -6882,8 +8171,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe0",
"EventName": "UNC_M2P_RxR_OCCUPANCY.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : AD - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x1",
@@ -6891,8 +8182,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xe0",
"EventName": "UNC_M2P_RxR_OCCUPANCY.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : AK : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x2",
@@ -6900,8 +8193,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe0",
"EventName": "UNC_M2P_RxR_OCCUPANCY.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : AKC - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x80",
@@ -6909,8 +8204,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xe0",
"EventName": "UNC_M2P_RxR_OCCUPANCY.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : BL - All : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
"UMask": "0x44",
@@ -6918,8 +8215,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe0",
"EventName": "UNC_M2P_RxR_OCCUPANCY.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : BL - Credited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x20",
@@ -6927,8 +8226,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xe0",
"EventName": "UNC_M2P_RxR_OCCUPANCY.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : BL - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x4",
@@ -6936,8 +8237,10 @@
},
{
"BriefDescription": "Transgress Ingress Occupancy : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xe0",
"EventName": "UNC_M2P_RxR_OCCUPANCY.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Transgress Ingress Occupancy : IV : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
"UMask": "0x8",
@@ -6945,8 +8248,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xd0",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 0 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -6954,8 +8259,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xd0",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 1 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -6963,8 +8270,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xd0",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 2 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -6972,8 +8281,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xd0",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 3 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -6981,8 +8292,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xd0",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 4 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -6990,8 +8303,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xd0",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 5 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -6999,8 +8314,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xd0",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 6 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x40",
@@ -7008,8 +8325,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xd0",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 7 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x80",
@@ -7017,8 +8336,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xd2",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 0 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -7026,8 +8347,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xd2",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 1 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -7035,8 +8358,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xd2",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 2 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -7044,8 +8369,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xd2",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 3 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -7053,8 +8380,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xd2",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 4 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -7062,8 +8391,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xd2",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 5 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -7071,8 +8402,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xd2",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 6 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x40",
@@ -7080,8 +8413,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xd2",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 7 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x80",
@@ -7089,8 +8424,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xd4",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 0 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -7098,8 +8435,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xd4",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 1 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -7107,8 +8446,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xd4",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 2 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -7116,8 +8457,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xd4",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 3 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -7125,8 +8468,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xd4",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 4 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -7134,8 +8479,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xd4",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 5 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -7143,8 +8490,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xd4",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 6 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x40",
@@ -7152,8 +8501,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xd4",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 7 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x80",
@@ -7161,8 +8512,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xd6",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 0 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -7170,8 +8523,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xd6",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 1 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -7179,8 +8534,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
"EventCode": "0xd6",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 2 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -7188,8 +8545,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
"EventCode": "0xd6",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 3 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x8",
@@ -7197,8 +8556,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xd6",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 4 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x10",
@@ -7206,8 +8567,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xd6",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 5 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x20",
@@ -7215,8 +8578,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
"EventCode": "0xd6",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 6 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x40",
@@ -7224,8 +8589,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
"EventCode": "0xd6",
"EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR7",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 7 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x80",
@@ -7233,8 +8600,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xd1",
"EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 10 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -7242,8 +8611,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xd1",
"EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 8 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -7251,8 +8622,10 @@
},
{
"BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xd1",
"EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 9 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -7260,8 +8633,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xd3",
"EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 10 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -7269,8 +8644,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xd3",
"EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 8 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -7278,8 +8655,10 @@
},
{
"BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xd3",
"EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 9 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -7287,8 +8666,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xd5",
"EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 10 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -7296,8 +8677,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xd5",
"EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 8 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -7305,8 +8688,10 @@
},
{
"BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xd5",
"EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 9 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -7314,8 +8699,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
"EventCode": "0xd7",
"EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR10",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 10 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x4",
@@ -7323,8 +8710,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
"EventCode": "0xd7",
"EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR8",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 8 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x1",
@@ -7332,8 +8721,10 @@
},
{
"BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
"EventCode": "0xd7",
"EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR9",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 9 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
"UMask": "0x2",
@@ -7341,16 +8732,20 @@
},
{
"BriefDescription": "UNC_M2P_TxC_CREDITS.PRQ",
+ "Counter": "0,1",
"EventCode": "0x2d",
"EventName": "UNC_M2P_TxC_CREDITS.PRQ",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "M2PCIe"
},
{
"BriefDescription": "Egress (to CMS) Cycles Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2P_TxC_CYCLES_FULL.AD_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress (to CMS) Cycles Full : Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.",
"UMask": "0x1",
@@ -7358,8 +8753,10 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2P_TxC_CYCLES_FULL.AD_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress (to CMS) Cycles Full : Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.",
"UMask": "0x10",
@@ -7367,8 +8764,10 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2P_TxC_CYCLES_FULL.AK_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress (to CMS) Cycles Full : Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.",
"UMask": "0x2",
@@ -7376,8 +8775,10 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2P_TxC_CYCLES_FULL.AK_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress (to CMS) Cycles Full : Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.",
"UMask": "0x20",
@@ -7385,8 +8786,10 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2P_TxC_CYCLES_FULL.BL_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress (to CMS) Cycles Full : Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.",
"UMask": "0x4",
@@ -7394,8 +8797,10 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_M2P_TxC_CYCLES_FULL.BL_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress (to CMS) Cycles Full : Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.",
"UMask": "0x40",
@@ -7403,8 +8808,10 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "Counter": "0,1",
"EventCode": "0x23",
"EventName": "UNC_M2P_TxC_CYCLES_NE.AD_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress (to CMS) Cycles Not Empty : Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.",
"UMask": "0x1",
@@ -7412,8 +8819,10 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "Counter": "0,1",
"EventCode": "0x23",
"EventName": "UNC_M2P_TxC_CYCLES_NE.AD_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress (to CMS) Cycles Not Empty : Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.",
"UMask": "0x10",
@@ -7421,8 +8830,10 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "Counter": "0,1",
"EventCode": "0x23",
"EventName": "UNC_M2P_TxC_CYCLES_NE.AK_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress (to CMS) Cycles Not Empty : Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.",
"UMask": "0x2",
@@ -7430,8 +8841,10 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "Counter": "0,1",
"EventCode": "0x23",
"EventName": "UNC_M2P_TxC_CYCLES_NE.AK_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress (to CMS) Cycles Not Empty : Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.",
"UMask": "0x20",
@@ -7439,8 +8852,10 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "Counter": "0,1",
"EventCode": "0x23",
"EventName": "UNC_M2P_TxC_CYCLES_NE.BL_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress (to CMS) Cycles Not Empty : Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.",
"UMask": "0x4",
@@ -7448,8 +8863,10 @@
},
{
"BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "Counter": "0,1",
"EventCode": "0x23",
"EventName": "UNC_M2P_TxC_CYCLES_NE.BL_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress (to CMS) Cycles Not Empty : Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.",
"UMask": "0x40",
@@ -7457,8 +8874,10 @@
},
{
"BriefDescription": "Egress (to CMS) Ingress",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2P_TxC_INSERTS.AD_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress (to CMS) Ingress : Counts the number of number of messages inserted into the the M2PCIe Egress queue. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy.",
"UMask": "0x1",
@@ -7466,8 +8885,10 @@
},
{
"BriefDescription": "Egress (to CMS) Ingress",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2P_TxC_INSERTS.AD_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress (to CMS) Ingress : Counts the number of number of messages inserted into the the M2PCIe Egress queue. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy.",
"UMask": "0x10",
@@ -7475,8 +8896,10 @@
},
{
"BriefDescription": "Egress (to CMS) Ingress",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2P_TxC_INSERTS.AK_CRD_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress (to CMS) Ingress : Counts the number of number of messages inserted into the the M2PCIe Egress queue. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy.",
"UMask": "0x8",
@@ -7484,8 +8907,10 @@
},
{
"BriefDescription": "Egress (to CMS) Ingress",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2P_TxC_INSERTS.AK_CRD_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress (to CMS) Ingress : Counts the number of number of messages inserted into the the M2PCIe Egress queue. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy.",
"UMask": "0x80",
@@ -7493,8 +8918,10 @@
},
{
"BriefDescription": "Egress (to CMS) Ingress",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2P_TxC_INSERTS.BL_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress (to CMS) Ingress : Counts the number of number of messages inserted into the the M2PCIe Egress queue. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy.",
"UMask": "0x4",
@@ -7502,8 +8929,10 @@
},
{
"BriefDescription": "Egress (to CMS) Ingress",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M2P_TxC_INSERTS.BL_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Egress (to CMS) Ingress : Counts the number of number of messages inserted into the the M2PCIe Egress queue. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy.",
"UMask": "0x40",
@@ -7511,8 +8940,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xa6",
"EventName": "UNC_M2P_TxR_HORZ_ADS_USED.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : AD - All : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -7520,8 +8951,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa6",
"EventName": "UNC_M2P_TxR_HORZ_ADS_USED.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : AD - Credited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -7529,8 +8962,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa6",
"EventName": "UNC_M2P_TxR_HORZ_ADS_USED.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : AD - Uncredited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -7538,8 +8973,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xa6",
"EventName": "UNC_M2P_TxR_HORZ_ADS_USED.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : BL - All : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -7547,8 +8984,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa6",
"EventName": "UNC_M2P_TxR_HORZ_ADS_USED.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : BL - Credited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -7556,8 +8995,10 @@
},
{
"BriefDescription": "CMS Horizontal ADS Used : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa6",
"EventName": "UNC_M2P_TxR_HORZ_ADS_USED.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal ADS Used : BL - Uncredited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -7565,8 +9006,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xa7",
"EventName": "UNC_M2P_TxR_HORZ_BYPASS.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : AD - All : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -7574,8 +9017,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa7",
"EventName": "UNC_M2P_TxR_HORZ_BYPASS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : AD - Credited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -7583,8 +9028,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa7",
"EventName": "UNC_M2P_TxR_HORZ_BYPASS.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : AD - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -7592,8 +9039,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xa7",
"EventName": "UNC_M2P_TxR_HORZ_BYPASS.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : AK : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -7601,8 +9050,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa7",
"EventName": "UNC_M2P_TxR_HORZ_BYPASS.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : AKC - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x80",
@@ -7610,8 +9061,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xa7",
"EventName": "UNC_M2P_TxR_HORZ_BYPASS.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : BL - All : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -7619,8 +9072,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa7",
"EventName": "UNC_M2P_TxR_HORZ_BYPASS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : BL - Credited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -7628,8 +9083,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa7",
"EventName": "UNC_M2P_TxR_HORZ_BYPASS.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : BL - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -7637,8 +9094,10 @@
},
{
"BriefDescription": "CMS Horizontal Bypass Used : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xa7",
"EventName": "UNC_M2P_TxR_HORZ_BYPASS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Bypass Used : IV : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
"UMask": "0x8",
@@ -7646,8 +9105,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xa2",
"EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - All : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -7655,8 +9116,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa2",
"EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -7664,8 +9127,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa2",
"EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -7673,8 +9138,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xa2",
"EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AK : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -7682,8 +9149,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa2",
"EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AKC - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x80",
@@ -7691,8 +9160,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xa2",
"EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - All : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -7700,8 +9171,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa2",
"EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -7709,8 +9182,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa2",
"EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -7718,8 +9193,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xa2",
"EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : IV : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -7727,8 +9204,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xa3",
"EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - All : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -7736,8 +9215,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa3",
"EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -7745,8 +9226,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa3",
"EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -7754,8 +9237,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xa3",
"EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AK : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -7763,8 +9248,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa3",
"EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AKC - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x80",
@@ -7772,8 +9259,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xa3",
"EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - All : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -7781,8 +9270,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa3",
"EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -7790,8 +9281,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa3",
"EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -7799,8 +9292,10 @@
},
{
"BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xa3",
"EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : IV : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -7808,8 +9303,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xa1",
"EventName": "UNC_M2P_TxR_HORZ_INSERTS.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : AD - All : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -7817,8 +9314,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa1",
"EventName": "UNC_M2P_TxR_HORZ_INSERTS.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : AD - Credited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -7826,8 +9325,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa1",
"EventName": "UNC_M2P_TxR_HORZ_INSERTS.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : AD - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -7835,8 +9336,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xa1",
"EventName": "UNC_M2P_TxR_HORZ_INSERTS.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : AK : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -7844,8 +9347,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa1",
"EventName": "UNC_M2P_TxR_HORZ_INSERTS.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : AKC - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x80",
@@ -7853,8 +9358,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xa1",
"EventName": "UNC_M2P_TxR_HORZ_INSERTS.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : BL - All : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -7862,8 +9369,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa1",
"EventName": "UNC_M2P_TxR_HORZ_INSERTS.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : BL - Credited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -7871,8 +9380,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa1",
"EventName": "UNC_M2P_TxR_HORZ_INSERTS.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : BL - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -7880,8 +9391,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Inserts : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xa1",
"EventName": "UNC_M2P_TxR_HORZ_INSERTS.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Inserts : IV : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -7889,8 +9402,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xa4",
"EventName": "UNC_M2P_TxR_HORZ_NACK.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : AD - All : Counts number of Egress packets NACK'ed on to the Horizontal Ring : All == Credited + Uncredited",
"UMask": "0x11",
@@ -7898,8 +9413,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa4",
"EventName": "UNC_M2P_TxR_HORZ_NACK.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : AD - Credited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x10",
@@ -7907,8 +9424,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa4",
"EventName": "UNC_M2P_TxR_HORZ_NACK.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : AD - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x1",
@@ -7916,8 +9435,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xa4",
"EventName": "UNC_M2P_TxR_HORZ_NACK.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : AK : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x2",
@@ -7925,8 +9446,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa4",
"EventName": "UNC_M2P_TxR_HORZ_NACK.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : AKC - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x80",
@@ -7934,8 +9457,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xa4",
"EventName": "UNC_M2P_TxR_HORZ_NACK.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : BL - All : Counts number of Egress packets NACK'ed on to the Horizontal Ring : All == Credited + Uncredited",
"UMask": "0x44",
@@ -7943,8 +9468,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa4",
"EventName": "UNC_M2P_TxR_HORZ_NACK.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : BL - Credited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x40",
@@ -7952,8 +9479,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa4",
"EventName": "UNC_M2P_TxR_HORZ_NACK.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : BL - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x4",
@@ -7961,8 +9490,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress NACKs : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xa4",
"EventName": "UNC_M2P_TxR_HORZ_NACK.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress NACKs : IV : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
"UMask": "0x8",
@@ -7970,8 +9501,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xa0",
"EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : AD - All : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x11",
@@ -7979,8 +9512,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : AD - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa0",
"EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : AD - Credited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x10",
@@ -7988,8 +9523,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa0",
"EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : AD - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x1",
@@ -7997,8 +9534,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xa0",
"EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : AK : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x2",
@@ -8006,8 +9545,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa0",
"EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : AKC - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x80",
@@ -8015,8 +9556,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xa0",
"EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : BL - All : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
"UMask": "0x44",
@@ -8024,8 +9567,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : BL - Credited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa0",
"EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : BL - Credited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x40",
@@ -8033,8 +9578,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa0",
"EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : BL - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x4",
@@ -8042,8 +9589,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Occupancy : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xa0",
"EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Occupancy : IV : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
"UMask": "0x8",
@@ -8051,8 +9600,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xa5",
"EventName": "UNC_M2P_TxR_HORZ_STARVED.AD_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : AD - All : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time. : All == Credited + Uncredited",
"UMask": "0x1",
@@ -8060,8 +9611,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa5",
"EventName": "UNC_M2P_TxR_HORZ_STARVED.AD_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : AD - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x1",
@@ -8069,8 +9622,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : AK",
+ "Counter": "0,1,2,3",
"EventCode": "0xa5",
"EventName": "UNC_M2P_TxR_HORZ_STARVED.AK",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : AK : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x2",
@@ -8078,8 +9633,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : AKC - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa5",
"EventName": "UNC_M2P_TxR_HORZ_STARVED.AKC_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : AKC - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x80",
@@ -8087,8 +9644,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
"EventCode": "0xa5",
"EventName": "UNC_M2P_TxR_HORZ_STARVED.BL_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : BL - All : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time. : All == Credited + Uncredited",
"UMask": "0x4",
@@ -8096,8 +9655,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
"EventCode": "0xa5",
"EventName": "UNC_M2P_TxR_HORZ_STARVED.BL_UNCRD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : BL - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x4",
@@ -8105,8 +9666,10 @@
},
{
"BriefDescription": "CMS Horizontal Egress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0xa5",
"EventName": "UNC_M2P_TxR_HORZ_STARVED.IV",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Horizontal Egress Injection Starvation : IV : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
"UMask": "0x8",
@@ -8114,8 +9677,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9c",
"EventName": "UNC_M2P_TxR_VERT_ADS_USED.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AD - Agent 0 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -8123,8 +9688,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9c",
"EventName": "UNC_M2P_TxR_VERT_ADS_USED.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AD - Agent 1 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -8132,8 +9699,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9c",
"EventName": "UNC_M2P_TxR_VERT_ADS_USED.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : BL - Agent 0 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -8141,8 +9710,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9c",
"EventName": "UNC_M2P_TxR_VERT_ADS_USED.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : BL - Agent 1 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -8150,8 +9721,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9d",
"EventName": "UNC_M2P_TxR_VERT_BYPASS.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AD - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -8159,8 +9732,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9d",
"EventName": "UNC_M2P_TxR_VERT_BYPASS.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AD - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x10",
@@ -8168,8 +9743,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9d",
"EventName": "UNC_M2P_TxR_VERT_BYPASS.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AK - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -8177,8 +9754,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9d",
"EventName": "UNC_M2P_TxR_VERT_BYPASS.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AK - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x20",
@@ -8186,8 +9765,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9d",
"EventName": "UNC_M2P_TxR_VERT_BYPASS.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : BL - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x4",
@@ -8195,8 +9776,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9d",
"EventName": "UNC_M2P_TxR_VERT_BYPASS.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : BL - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x40",
@@ -8204,8 +9787,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : IV - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9d",
"EventName": "UNC_M2P_TxR_VERT_BYPASS.IV_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : IV - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x8",
@@ -8213,8 +9798,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9e",
"EventName": "UNC_M2P_TxR_VERT_BYPASS_1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AKC - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x1",
@@ -8222,8 +9809,10 @@
},
{
"BriefDescription": "CMS Vertical ADS Used : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9e",
"EventName": "UNC_M2P_TxR_VERT_BYPASS_1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical ADS Used : AKC - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
"UMask": "0x2",
@@ -8231,8 +9820,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -8240,8 +9831,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -8249,8 +9842,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -8258,8 +9853,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -8267,8 +9864,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -8276,8 +9875,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -8285,8 +9886,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : IV - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x94",
"EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : IV - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -8294,8 +9897,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -8303,8 +9908,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x95",
"EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -8312,8 +9919,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -8321,8 +9930,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -8330,8 +9941,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -8339,8 +9952,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -8348,8 +9963,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -8357,8 +9974,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -8366,8 +9985,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : IV - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x96",
"EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : IV - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -8375,8 +9996,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_M2P_TxR_VERT_CYCLES_NE1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -8384,8 +10007,10 @@
},
{
"BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x97",
"EventName": "UNC_M2P_TxR_VERT_CYCLES_NE1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -8393,8 +10018,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2P_TxR_VERT_INSERTS0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AD - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -8402,8 +10029,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2P_TxR_VERT_INSERTS0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AD - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -8411,8 +10040,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2P_TxR_VERT_INSERTS0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AK - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -8420,8 +10051,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2P_TxR_VERT_INSERTS0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AK - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -8429,8 +10062,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2P_TxR_VERT_INSERTS0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : BL - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -8438,8 +10073,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2P_TxR_VERT_INSERTS0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : BL - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -8447,8 +10084,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : IV - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x92",
"EventName": "UNC_M2P_TxR_VERT_INSERTS0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : IV - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -8456,8 +10095,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_M2P_TxR_VERT_INSERTS1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AKC - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -8465,8 +10106,10 @@
},
{
"BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x93",
"EventName": "UNC_M2P_TxR_VERT_INSERTS1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Allocations : AKC - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -8474,8 +10117,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2P_TxR_VERT_NACK0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AD - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x1",
@@ -8483,8 +10128,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2P_TxR_VERT_NACK0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AD - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x10",
@@ -8492,8 +10139,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2P_TxR_VERT_NACK0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AK - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x2",
@@ -8501,8 +10150,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2P_TxR_VERT_NACK0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AK - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x20",
@@ -8510,8 +10161,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2P_TxR_VERT_NACK0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : BL - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x4",
@@ -8519,8 +10172,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2P_TxR_VERT_NACK0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : BL - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x40",
@@ -8528,8 +10183,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x98",
"EventName": "UNC_M2P_TxR_VERT_NACK0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : IV : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x8",
@@ -8537,8 +10194,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_M2P_TxR_VERT_NACK1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AKC - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x1",
@@ -8546,8 +10205,10 @@
},
{
"BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x99",
"EventName": "UNC_M2P_TxR_VERT_NACK1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress NACKs : AKC - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
"UMask": "0x2",
@@ -8555,8 +10216,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AD - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -8564,8 +10227,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AD - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
"UMask": "0x10",
@@ -8573,8 +10238,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AK - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -8582,8 +10249,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AK - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
"UMask": "0x20",
@@ -8591,8 +10260,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : BL - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
"UMask": "0x4",
@@ -8600,8 +10271,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : BL - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
"UMask": "0x40",
@@ -8609,8 +10282,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : IV - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x90",
"EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : IV - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
"UMask": "0x8",
@@ -8618,8 +10293,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_M2P_TxR_VERT_OCCUPANCY1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AKC - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
"UMask": "0x1",
@@ -8627,8 +10304,10 @@
},
{
"BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x91",
"EventName": "UNC_M2P_TxR_VERT_OCCUPANCY1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vert Egress Occupancy : AKC - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
"UMask": "0x2",
@@ -8636,8 +10315,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9a",
"EventName": "UNC_M2P_TxR_VERT_STARVED0.AD_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x1",
@@ -8645,8 +10326,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9a",
"EventName": "UNC_M2P_TxR_VERT_STARVED0.AD_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x10",
@@ -8654,8 +10337,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9a",
"EventName": "UNC_M2P_TxR_VERT_STARVED0.AK_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x2",
@@ -8663,8 +10348,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9a",
"EventName": "UNC_M2P_TxR_VERT_STARVED0.AK_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x20",
@@ -8672,8 +10359,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9a",
"EventName": "UNC_M2P_TxR_VERT_STARVED0.BL_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x4",
@@ -8681,8 +10370,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9a",
"EventName": "UNC_M2P_TxR_VERT_STARVED0.BL_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x40",
@@ -8690,8 +10381,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
"EventCode": "0x9a",
"EventName": "UNC_M2P_TxR_VERT_STARVED0.IV_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : IV : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x8",
@@ -8699,8 +10392,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9b",
"EventName": "UNC_M2P_TxR_VERT_STARVED1.AKC_AG0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x1",
@@ -8708,8 +10403,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 1",
+ "Counter": "0,1,2,3",
"EventCode": "0x9b",
"EventName": "UNC_M2P_TxR_VERT_STARVED1.AKC_AG1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x2",
@@ -8717,8 +10414,10 @@
},
{
"BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x9b",
"EventName": "UNC_M2P_TxR_VERT_STARVED1.TGC",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
"UMask": "0x4",
@@ -8726,8 +10425,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xb0",
"EventName": "UNC_M2P_VERT_RING_AD_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AD Ring In Use : Down and Even : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -8735,8 +10436,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xb0",
"EventName": "UNC_M2P_VERT_RING_AD_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AD Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -8744,8 +10447,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xb0",
"EventName": "UNC_M2P_VERT_RING_AD_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AD Ring In Use : Up and Even : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -8753,8 +10458,10 @@
},
{
"BriefDescription": "Vertical AD Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xb0",
"EventName": "UNC_M2P_VERT_RING_AD_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AD Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -8762,8 +10469,10 @@
},
{
"BriefDescription": "Vertical AKC Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xb4",
"EventName": "UNC_M2P_VERT_RING_AKC_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AKC Ring In Use : Down and Even : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -8771,8 +10480,10 @@
},
{
"BriefDescription": "Vertical AKC Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xb4",
"EventName": "UNC_M2P_VERT_RING_AKC_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AKC Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -8780,8 +10491,10 @@
},
{
"BriefDescription": "Vertical AKC Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xb4",
"EventName": "UNC_M2P_VERT_RING_AKC_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AKC Ring In Use : Up and Even : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -8789,8 +10502,10 @@
},
{
"BriefDescription": "Vertical AKC Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xb4",
"EventName": "UNC_M2P_VERT_RING_AKC_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AKC Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -8798,8 +10513,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xb1",
"EventName": "UNC_M2P_VERT_RING_AK_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AK Ring In Use : Down and Even : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -8807,8 +10524,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xb1",
"EventName": "UNC_M2P_VERT_RING_AK_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AK Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -8816,8 +10535,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xb1",
"EventName": "UNC_M2P_VERT_RING_AK_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AK Ring In Use : Up and Even : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -8825,8 +10546,10 @@
},
{
"BriefDescription": "Vertical AK Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xb1",
"EventName": "UNC_M2P_VERT_RING_AK_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical AK Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -8834,8 +10557,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use : Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xb2",
"EventName": "UNC_M2P_VERT_RING_BL_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical BL Ring in Use : Down and Even : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -8843,8 +10568,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use : Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xb2",
"EventName": "UNC_M2P_VERT_RING_BL_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical BL Ring in Use : Down and Odd : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -8852,8 +10579,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use : Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xb2",
"EventName": "UNC_M2P_VERT_RING_BL_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical BL Ring in Use : Up and Even : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -8861,8 +10590,10 @@
},
{
"BriefDescription": "Vertical BL Ring in Use : Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xb2",
"EventName": "UNC_M2P_VERT_RING_BL_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical BL Ring in Use : Up and Odd : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
@@ -8870,8 +10601,10 @@
},
{
"BriefDescription": "Vertical IV Ring in Use : Down",
+ "Counter": "0,1,2,3",
"EventCode": "0xb3",
"EventName": "UNC_M2P_VERT_RING_IV_IN_USE.DN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical IV Ring in Use : Down : Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x4",
@@ -8879,8 +10612,10 @@
},
{
"BriefDescription": "Vertical IV Ring in Use : Up",
+ "Counter": "0,1,2,3",
"EventCode": "0xb3",
"EventName": "UNC_M2P_VERT_RING_IV_IN_USE.UP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical IV Ring in Use : Up : Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
"UMask": "0x1",
@@ -8888,8 +10623,10 @@
},
{
"BriefDescription": "Vertical TGC Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xb5",
"EventName": "UNC_M2P_VERT_RING_TGC_IN_USE.DN_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical TGC Ring In Use : Down and Even : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x4",
@@ -8897,8 +10634,10 @@
},
{
"BriefDescription": "Vertical TGC Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xb5",
"EventName": "UNC_M2P_VERT_RING_TGC_IN_USE.DN_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical TGC Ring In Use : Down and Odd : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x8",
@@ -8906,8 +10645,10 @@
},
{
"BriefDescription": "Vertical TGC Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
"EventCode": "0xb5",
"EventName": "UNC_M2P_VERT_RING_TGC_IN_USE.UP_EVEN",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical TGC Ring In Use : Up and Even : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x1",
@@ -8915,8 +10656,10 @@
},
{
"BriefDescription": "Vertical TGC Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
"EventCode": "0xb5",
"EventName": "UNC_M2P_VERT_RING_TGC_IN_USE.UP_ODD",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Vertical TGC Ring In Use : Up and Odd : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x2",
diff --git a/tools/perf/pmu-events/arch/x86/snowridgex/uncore-memory.json b/tools/perf/pmu-events/arch/x86/snowridgex/uncore-memory.json
index b80911d498dd..2f6907cba7f6 100644
--- a/tools/perf/pmu-events/arch/x86/snowridgex/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/uncore-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "read requests to memory controller. Derived from unc_m_cas_count.rd",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "LLC_MISSES.MEM_READ",
"PerPkg": "1",
@@ -11,6 +12,7 @@
},
{
"BriefDescription": "write requests to memory controller. Derived from unc_m_cas_count.wr",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "LLC_MISSES.MEM_WRITE",
"PerPkg": "1",
@@ -21,6 +23,7 @@
},
{
"BriefDescription": "DRAM Activate Count : All Activates",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_M_ACT_COUNT.ALL",
"PerPkg": "1",
@@ -30,8 +33,10 @@
},
{
"BriefDescription": "DRAM Activate Count : Activate due to Bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x01",
"EventName": "UNC_M_ACT_COUNT.BYP",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM Activate Count : Activate due to Bypass : Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
"UMask": "0x8",
@@ -39,6 +44,7 @@
},
{
"BriefDescription": "All DRAM CAS commands issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_M_CAS_COUNT.ALL",
"PerPkg": "1",
@@ -48,6 +54,7 @@
},
{
"BriefDescription": "All DRAM read CAS commands issued (including underfills)",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_M_CAS_COUNT.RD",
"PerPkg": "1",
@@ -57,8 +64,10 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM RD_CAS commands w/auto-pre",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_M_CAS_COUNT.RD_PRE_REG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM RD_CAS commands w/auto-pre : DRAM RD_CAS and WR_CAS Commands : Counts the total number or DRAM Read CAS commands issued on this channel. This includes both regular RD CAS commands as well as those with explicit Precharge. AutoPre is only used in systems that are using closed page policy. We do not filter based on major mode, as RD_CAS is not issued during WMM (with the exception of underfills).",
"UMask": "0x2",
@@ -66,8 +75,10 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_M_CAS_COUNT.RD_PRE_UNDERFILL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM RD_CAS and WR_CAS Commands",
"UMask": "0x8",
@@ -75,8 +86,10 @@
},
{
"BriefDescription": "All DRAM read CAS commands issued (does not include underfills)",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_M_CAS_COUNT.RD_REG",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the total number of DRAM Read CAS commands issued on this channel. This includes both regular RD CAS commands as well as those with implicit Precharge. We do not filter based on major mode, as RD_CAS is not issued during WMM (with the exception of underfills).",
"UMask": "0x1",
@@ -84,8 +97,10 @@
},
{
"BriefDescription": "DRAM underfill read CAS commands issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Counts the total of DRAM Read CAS commands issued due to an underfill",
"UMask": "0x4",
@@ -93,6 +108,7 @@
},
{
"BriefDescription": "All DRAM write CAS commands issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_M_CAS_COUNT.WR",
"PerPkg": "1",
@@ -102,8 +118,10 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM WR_CAS commands w/o auto-pre",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_M_CAS_COUNT.WR_NONPRE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM WR_CAS commands w/o auto-pre : DRAM RD_CAS and WR_CAS Commands",
"UMask": "0x10",
@@ -111,8 +129,10 @@
},
{
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM WR_CAS commands w/ auto-pre",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_M_CAS_COUNT.WR_PRE",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM WR_CAS commands w/ auto-pre : DRAM RD_CAS and WR_CAS Commands",
"UMask": "0x20",
@@ -120,6 +140,7 @@
},
{
"BriefDescription": "Memory controller clock ticks",
+ "Counter": "0,1,2,3",
"EventName": "UNC_M_CLOCKTICKS",
"PerPkg": "1",
"PublicDescription": "Clockticks of the integrated memory controller (IMC)",
@@ -127,22 +148,27 @@
},
{
"BriefDescription": "Free running counter that increments for the Memory Controller",
+ "Counter": "4",
"EventCode": "0xff",
"EventName": "UNC_M_CLOCKTICKS_FREERUN",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "imc_free_running"
},
{
"BriefDescription": "DRAM Precharge All Commands",
+ "Counter": "0,1,2,3",
"EventCode": "0x44",
"EventName": "UNC_M_DRAM_PRE_ALL",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM Precharge All Commands : Counts the number of times that the precharge all command was sent.",
"Unit": "iMC"
},
{
"BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_M_DRAM_REFRESH.HIGH",
"PerPkg": "1",
@@ -152,6 +178,7 @@
},
{
"BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_M_DRAM_REFRESH.OPPORTUNISTIC",
"PerPkg": "1",
@@ -161,6 +188,7 @@
},
{
"BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
"EventCode": "0x45",
"EventName": "UNC_M_DRAM_REFRESH.PANIC",
"PerPkg": "1",
@@ -170,6 +198,7 @@
},
{
"BriefDescription": "Half clockticks for IMC",
+ "Counter": "FIXED",
"EventCode": "0xff",
"EventName": "UNC_M_HCLOCKTICKS",
"PerPkg": "1",
@@ -177,39 +206,49 @@
},
{
"BriefDescription": "UNC_M_PARITY_ERRORS",
+ "Counter": "0,1,2,3",
"EventCode": "0x2c",
"EventName": "UNC_M_PARITY_ERRORS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_PCLS.RD",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M_PCLS.RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_PCLS.TOTAL",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M_PCLS.TOTAL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "UNC_M_PCLS.WR",
+ "Counter": "0,1,2,3",
"EventCode": "0xA0",
"EventName": "UNC_M_PCLS.WR",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "Cycles where DRAM ranks are in power down (CKE) mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_M_POWER_CHANNEL_PPD",
+ "Experimental": "1",
"MetricExpr": "(UNC_M_POWER_CHANNEL_PPD / UNC_M_CLOCKTICKS) * 100",
"MetricName": "power_channel_ppd",
"PerPkg": "1",
@@ -218,8 +257,10 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank : DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M_POWER_CKE_CYCLES.LOW_0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CKE_ON_CYCLES by Rank : DIMM ID : Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
"UMask": "0x1",
@@ -227,8 +268,10 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank : DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M_POWER_CKE_CYCLES.LOW_1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CKE_ON_CYCLES by Rank : DIMM ID : Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
"UMask": "0x2",
@@ -236,8 +279,10 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank : DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M_POWER_CKE_CYCLES.LOW_2",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CKE_ON_CYCLES by Rank : DIMM ID : Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
"UMask": "0x4",
@@ -245,8 +290,10 @@
},
{
"BriefDescription": "CKE_ON_CYCLES by Rank : DIMM ID",
+ "Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M_POWER_CKE_CYCLES.LOW_3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "CKE_ON_CYCLES by Rank : DIMM ID : Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
"UMask": "0x8",
@@ -254,8 +301,10 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M_POWER_CRIT_THROTTLE_CYCLES.SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Throttle Cycles for Rank 0 : Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1. : Thermal throttling is performed per DIMM. We support 3 DIMMs per channel. This ID allows us to filter by ID.",
"UMask": "0x1",
@@ -263,8 +312,10 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M_POWER_CRIT_THROTTLE_CYCLES.SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Throttle Cycles for Rank 0 : Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
"UMask": "0x2",
@@ -272,8 +323,10 @@
},
{
"BriefDescription": "Cycles Memory is in self refresh power mode",
+ "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M_POWER_SELF_REFRESH",
+ "Experimental": "1",
"MetricExpr": "(UNC_M_POWER_SELF_REFRESH / UNC_M_CLOCKTICKS) * 100",
"MetricName": "power_self_refresh",
"PerPkg": "1",
@@ -282,8 +335,10 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.SLOT0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Throttle Cycles for Rank 0 : Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1. : Thermal throttling is performed per DIMM. We support 3 DIMMs per channel. This ID allows us to filter by ID.",
"UMask": "0x1",
@@ -291,8 +346,10 @@
},
{
"BriefDescription": "Throttle Cycles for Rank 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x46",
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.SLOT1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Throttle Cycles for Rank 0 : Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
"UMask": "0x2",
@@ -300,6 +357,7 @@
},
{
"BriefDescription": "DRAM Precharge commands.",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_M_PRE_COUNT.ALL",
"PerPkg": "1",
@@ -309,8 +367,10 @@
},
{
"BriefDescription": "Pre-charges due to page misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_M_PRE_COUNT.PAGE_MISS",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "DRAM Precharge commands. : Precharge due to page miss : Counts the number of DRAM Precharge commands sent on this channel. : Pages Misses are due to precharges from bank scheduler (rd/wr requests)",
"UMask": "0xc",
@@ -318,6 +378,7 @@
},
{
"BriefDescription": "DRAM Precharge commands. : Precharge due to page table",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_M_PRE_COUNT.PGT",
"PerPkg": "1",
@@ -327,6 +388,7 @@
},
{
"BriefDescription": "Pre-charge for reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_M_PRE_COUNT.RD",
"PerPkg": "1",
@@ -336,6 +398,7 @@
},
{
"BriefDescription": "Pre-charge for writes",
+ "Counter": "0,1,2,3",
"EventCode": "0x02",
"EventName": "UNC_M_PRE_COUNT.WR",
"PerPkg": "1",
@@ -345,52 +408,66 @@
},
{
"BriefDescription": "Read Data Buffer Full",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_M_RDB_FULL",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "Read Data Buffer Inserts",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_M_RDB_INSERTS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "Read Data Buffer Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_M_RDB_NOT_EMPTY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "Read Data Buffer Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_M_RDB_OCCUPANCY",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "Read Pending Queue Full Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_M_RPQ_CYCLES_FULL_PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Read Pending Queue Full Cycles : Counts the number of cycles when the Read Pending Queue is full. When the RPQ is full, the HA will not be able to issue any additional read requests into the iMC. This count should be similar count in the HA which tracks the number of cycles that the HA has no RPQ credits, just somewhat smaller to account for the credit return overhead. We generally do not expect to see RPQ become full except for potentially during Write Major Mode or while running with slow DRAM. This event only tracks non-ISOC queue entries.",
"Unit": "iMC"
},
{
"BriefDescription": "Read Pending Queue Full Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_M_RPQ_CYCLES_FULL_PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Read Pending Queue Full Cycles : Counts the number of cycles when the Read Pending Queue is full. When the RPQ is full, the HA will not be able to issue any additional read requests into the iMC. This count should be similar count in the HA which tracks the number of cycles that the HA has no RPQ credits, just somewhat smaller to account for the credit return overhead. We generally do not expect to see RPQ become full except for potentially during Write Major Mode or while running with slow DRAM. This event only tracks non-ISOC queue entries.",
"Unit": "iMC"
},
{
"BriefDescription": "Read Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M_RPQ_CYCLES_NE.PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Read Pending Queue Not Empty : Counts the number of cycles that the Read Pending Queue is not empty. This can then be used to calculate the average occupancy (in conjunction with the Read Pending Queue Occupancy count). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This filter is to be used in conjunction with the occupancy filter so that one can correctly track the average occupancies for schedulable entries and scheduled requests.",
"UMask": "0x1",
@@ -398,8 +475,10 @@
},
{
"BriefDescription": "Read Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_M_RPQ_CYCLES_NE.PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Read Pending Queue Not Empty : Counts the number of cycles that the Read Pending Queue is not empty. This can then be used to calculate the average occupancy (in conjunction with the Read Pending Queue Occupancy count). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This filter is to be used in conjunction with the occupancy filter so that one can correctly track the average occupancies for schedulable entries and scheduled requests.",
"UMask": "0x2",
@@ -407,6 +486,7 @@
},
{
"BriefDescription": "Read Pending Queue Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M_RPQ_INSERTS.PCH0",
"PerPkg": "1",
@@ -416,6 +496,7 @@
},
{
"BriefDescription": "Read Pending Queue Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M_RPQ_INSERTS.PCH1",
"PerPkg": "1",
@@ -425,6 +506,7 @@
},
{
"BriefDescription": "Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M_RPQ_OCCUPANCY_PCH0",
"PerPkg": "1",
@@ -433,6 +515,7 @@
},
{
"BriefDescription": "Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "UNC_M_RPQ_OCCUPANCY_PCH1",
"PerPkg": "1",
@@ -441,24 +524,30 @@
},
{
"BriefDescription": "Write Pending Queue Full Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_M_WPQ_CYCLES_FULL_PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Write Pending Queue Full Cycles : Counts the number of cycles when the Write Pending Queue is full. When the WPQ is full, the HA will not be able to issue any additional write requests into the iMC. This count should be similar count in the CHA which tracks the number of cycles that the CHA has no WPQ credits, just somewhat smaller to account for the credit return overhead.",
"Unit": "iMC"
},
{
"BriefDescription": "Write Pending Queue Full Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_M_WPQ_CYCLES_FULL_PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Write Pending Queue Full Cycles : Counts the number of cycles when the Write Pending Queue is full. When the WPQ is full, the HA will not be able to issue any additional write requests into the iMC. This count should be similar count in the CHA which tracks the number of cycles that the CHA has no WPQ credits, just somewhat smaller to account for the credit return overhead.",
"Unit": "iMC"
},
{
"BriefDescription": "Write Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M_WPQ_CYCLES_NE.PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Write Pending Queue Not Empty : Counts the number of cycles that the Write Pending Queue is not empty. This can then be used to calculate the average queue occupancy (in conjunction with the WPQ Occupancy Accumulation count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies.",
"UMask": "0x1",
@@ -466,8 +555,10 @@
},
{
"BriefDescription": "Write Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_M_WPQ_CYCLES_NE.PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Write Pending Queue Not Empty : Counts the number of cycles that the Write Pending Queue is not empty. This can then be used to calculate the average queue occupancy (in conjunction with the WPQ Occupancy Accumulation count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies.",
"UMask": "0x2",
@@ -475,6 +566,7 @@
},
{
"BriefDescription": "Write Pending Queue Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M_WPQ_INSERTS.PCH0",
"PerPkg": "1",
@@ -484,6 +576,7 @@
},
{
"BriefDescription": "Write Pending Queue Allocations",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M_WPQ_INSERTS.PCH1",
"PerPkg": "1",
@@ -493,6 +586,7 @@
},
{
"BriefDescription": "Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "UNC_M_WPQ_OCCUPANCY_PCH0",
"PerPkg": "1",
@@ -501,6 +595,7 @@
},
{
"BriefDescription": "Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "UNC_M_WPQ_OCCUPANCY_PCH1",
"PerPkg": "1",
@@ -509,8 +604,10 @@
},
{
"BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_M_WPQ_READ_HIT.PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Write Pending Queue CAM Match : Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
"UMask": "0x1",
@@ -518,8 +615,10 @@
},
{
"BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_M_WPQ_READ_HIT.PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Write Pending Queue CAM Match : Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
"UMask": "0x2",
@@ -527,8 +626,10 @@
},
{
"BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M_WPQ_WRITE_HIT.PCH0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Write Pending Queue CAM Match : Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
"UMask": "0x1",
@@ -536,8 +637,10 @@
},
{
"BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_M_WPQ_WRITE_HIT.PCH1",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Write Pending Queue CAM Match : Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
"UMask": "0x2",
diff --git a/tools/perf/pmu-events/arch/x86/snowridgex/uncore-power.json b/tools/perf/pmu-events/arch/x86/snowridgex/uncore-power.json
index dcf268467db9..1d59c9b65b3f 100644
--- a/tools/perf/pmu-events/arch/x86/snowridgex/uncore-power.json
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/uncore-power.json
@@ -1,153 +1,192 @@
[
{
"BriefDescription": "Clockticks of the power control unit (PCU)",
+ "Counter": "0,1,2,3",
"EventName": "UNC_P_CLOCKTICKS",
"PerPkg": "1",
"Unit": "PCU"
},
{
"BriefDescription": "UNC_P_CORE_TRANSITION_CYCLES",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_P_CORE_TRANSITION_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "PCU"
},
{
"BriefDescription": "UNC_P_DEMOTIONS",
+ "Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "UNC_P_DEMOTIONS",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "PCU"
},
{
"BriefDescription": "Phase Shed 0 Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x75",
"EventName": "UNC_P_FIVR_PS_PS0_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Phase Shed 0 Cycles : Cycles spent in phase-shedding power state 0",
"Unit": "PCU"
},
{
"BriefDescription": "Phase Shed 1 Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x76",
"EventName": "UNC_P_FIVR_PS_PS1_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Phase Shed 1 Cycles : Cycles spent in phase-shedding power state 1",
"Unit": "PCU"
},
{
"BriefDescription": "Phase Shed 2 Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x77",
"EventName": "UNC_P_FIVR_PS_PS2_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Phase Shed 2 Cycles : Cycles spent in phase-shedding power state 2",
"Unit": "PCU"
},
{
"BriefDescription": "Phase Shed 3 Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x78",
"EventName": "UNC_P_FIVR_PS_PS3_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Phase Shed 3 Cycles : Cycles spent in phase-shedding power state 3",
"Unit": "PCU"
},
{
"BriefDescription": "AVX256 Frequency Clipping",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "UNC_P_FREQ_CLIP_AVX256",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "PCU"
},
{
"BriefDescription": "AVX512 Frequency Clipping",
+ "Counter": "0,1,2,3",
"EventCode": "0x4a",
"EventName": "UNC_P_FREQ_CLIP_AVX512",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "PCU"
},
{
"BriefDescription": "Thermal Strongest Upper Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x04",
"EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Thermal Strongest Upper Limit Cycles : Number of cycles any frequency is reduced due to a thermal limit. Count only if throttling is occurring.",
"Unit": "PCU"
},
{
"BriefDescription": "Power Strongest Upper Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x05",
"EventName": "UNC_P_FREQ_MAX_POWER_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Power Strongest Upper Limit Cycles : Counts the number of cycles when power is the upper limit on frequency.",
"Unit": "PCU"
},
{
"BriefDescription": "IO P Limit Strongest Lower Limit Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x73",
"EventName": "UNC_P_FREQ_MIN_IO_P_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "IO P Limit Strongest Lower Limit Cycles : Counts the number of cycles when IO P Limit is preventing us from dropping the frequency lower. This algorithm monitors the needs to the IO subsystem on both local and remote sockets and will maintain a frequency high enough to maintain good IO BW. This is necessary for when all the IA cores on a socket are idle but a user still would like to maintain high IO Bandwidth.",
"Unit": "PCU"
},
{
"BriefDescription": "Cycles spent changing Frequency",
+ "Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "UNC_P_FREQ_TRANS_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Cycles spent changing Frequency : Counts the number of cycles when the system is changing frequency. This can not be filtered by thread ID. One can also use it with the occupancy counter that monitors number of threads in C0 to estimate the performance impact that frequency transitions had on the system.",
"Unit": "PCU"
},
{
"BriefDescription": "Memory Phase Shedding Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x2F",
"EventName": "UNC_P_MEMORY_PHASE_SHEDDING_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Memory Phase Shedding Cycles : Counts the number of cycles that the PCU has triggered memory phase shedding. This is a mode that can be run in the iMC physicals that saves power at the expense of additional latency.",
"Unit": "PCU"
},
{
"BriefDescription": "Package C State Residency - C0",
+ "Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_P_PKG_RESIDENCY_C0_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Package C State Residency - C0 : Counts the number of cycles when the package was in C0. This event can be used in conjunction with edge detect to count C0 entrances (or exits using invert). Residency events do not include transition times.",
"Unit": "PCU"
},
{
"BriefDescription": "Package C State Residency - C2E",
+ "Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_P_PKG_RESIDENCY_C2E_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Package C State Residency - C2E : Counts the number of cycles when the package was in C2E. This event can be used in conjunction with edge detect to count C2E entrances (or exits using invert). Residency events do not include transition times.",
"Unit": "PCU"
},
{
"BriefDescription": "Package C State Residency - C3",
+ "Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_P_PKG_RESIDENCY_C3_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Package C State Residency - C3 : Counts the number of cycles when the package was in C3. This event can be used in conjunction with edge detect to count C3 entrances (or exits using invert). Residency events do not include transition times.",
"Unit": "PCU"
},
{
"BriefDescription": "Package C State Residency - C6",
+ "Counter": "0,1,2,3",
"EventCode": "0x2D",
"EventName": "UNC_P_PKG_RESIDENCY_C6_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Package C State Residency - C6 : Counts the number of cycles when the package was in C6. This event can be used in conjunction with edge detect to count C6 entrances (or exits using invert). Residency events do not include transition times.",
"Unit": "PCU"
},
{
"BriefDescription": "UNC_P_PMAX_THROTTLED_CYCLES",
+ "Counter": "0,1,2,3",
"EventCode": "0x06",
"EventName": "UNC_P_PMAX_THROTTLED_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"Unit": "PCU"
},
{
"BriefDescription": "Number of cores in C-State : C0 and C1",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cores in C-State : C0 and C1 : This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
"UMask": "0x40",
@@ -155,8 +194,10 @@
},
{
"BriefDescription": "Number of cores in C-State : C3",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cores in C-State : C3 : This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
"UMask": "0x80",
@@ -164,8 +205,10 @@
},
{
"BriefDescription": "Number of cores in C-State : C6 and C7",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cores in C-State : C6 and C7 : This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
"UMask": "0xc0",
@@ -173,32 +216,40 @@
},
{
"BriefDescription": "External Prochot",
+ "Counter": "0,1,2,3",
"EventCode": "0x0A",
"EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "External Prochot : Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip.",
"Unit": "PCU"
},
{
"BriefDescription": "Internal Prochot",
+ "Counter": "0,1,2,3",
"EventCode": "0x09",
"EventName": "UNC_P_PROCHOT_INTERNAL_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Internal Prochot : Counts the number of cycles that we are in Internal PROCHOT mode. This mode is triggered when a sensor on the die determines that we are too hot and must throttle to avoid damaging the chip.",
"Unit": "PCU"
},
{
"BriefDescription": "Total Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x72",
"EventName": "UNC_P_TOTAL_TRANSITION_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Total Core C State Transition Cycles : Number of cycles spent performing core C state transitions across all cores.",
"Unit": "PCU"
},
{
"BriefDescription": "VR Hot",
+ "Counter": "0,1,2,3",
"EventCode": "0x42",
"EventName": "UNC_P_VR_HOT_CYCLES",
+ "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "VR Hot : Number of cycles that a CPU SVID VR is hot. Does not cover DRAM VRs",
"Unit": "PCU"
diff --git a/tools/perf/pmu-events/arch/x86/snowridgex/virtual-memory.json b/tools/perf/pmu-events/arch/x86/snowridgex/virtual-memory.json
index cabe29e70e79..f9a6caed8776 100644
--- a/tools/perf/pmu-events/arch/x86/snowridgex/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/virtual-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of page walks due to loads that miss the PDE (Page Directory Entry) cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.PDE_CACHE_MISS",
"SampleAfterValue": "200003",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Counts the number of first level TLB misses but second level hits due to a demand load that did not start a page walk. Account for all page sizes. Will result in a DTLB write from STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"SampleAfterValue": "200003",
@@ -15,6 +17,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to load DTLB misses to any page size.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
@@ -23,6 +26,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 1G page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
"PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 1GB pages. Includes page walks that page fault.",
@@ -31,6 +35,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 2M or 4M page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 2M or 4M pages. Includes page walks that page fault.",
@@ -39,6 +44,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 4K page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.",
@@ -47,6 +53,7 @@
},
{
"BriefDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for demand loads every cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for demand loads every cycle. A page walk is outstanding from start till PMH becomes idle again (ready to serve next walk). Includes EPT-walk intervals.",
@@ -55,6 +62,7 @@
},
{
"BriefDescription": "Counts the number of page walks due to stores that miss the PDE (Page Directory Entry) cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.PDE_CACHE_MISS",
"SampleAfterValue": "2000003",
@@ -62,6 +70,7 @@
},
{
"BriefDescription": "Counts the number of first level TLB misses but second level hits due to stores that did not start a page walk. Account for all pages sizes. Will result in a DTLB write from STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.STLB_HIT",
"SampleAfterValue": "2000003",
@@ -69,6 +78,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to store DTLB misses to any page size.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
@@ -77,6 +87,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to store DTLB misses to a 1G page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
"PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 1G pages. Includes page walks that page fault.",
@@ -85,6 +96,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to store DTLB misses to a 2M or 4M page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 2M or 4M pages. Includes page walks that page fault.",
@@ -93,6 +105,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to store DTLB misses to a 4K page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.",
@@ -101,6 +114,7 @@
},
{
"BriefDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for stores every cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for stores every cycle. A page walk is outstanding from start till PMH becomes idle again (ready to serve next walk). Includes EPT-walk intervals.",
@@ -109,6 +123,7 @@
},
{
"BriefDescription": "Counts the number of Extended Page Directory Entry hits.",
+ "Counter": "0,1,2,3",
"EventCode": "0x4f",
"EventName": "EPT.EPDE_HIT",
"PublicDescription": "Counts the number of Extended Page Directory Entry hits. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
@@ -117,6 +132,7 @@
},
{
"BriefDescription": "Counts the number of Extended Page Directory Entry misses.",
+ "Counter": "0,1,2,3",
"EventCode": "0x4f",
"EventName": "EPT.EPDE_MISS",
"PublicDescription": "Counts the number Extended Page Directory Entry misses. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
@@ -125,6 +141,7 @@
},
{
"BriefDescription": "Counts the number of Extended Page Directory Pointer Entry hits.",
+ "Counter": "0,1,2,3",
"EventCode": "0x4f",
"EventName": "EPT.EPDPE_HIT",
"PublicDescription": "Counts the number Extended Page Directory Pointer Entry hits. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
@@ -133,6 +150,7 @@
},
{
"BriefDescription": "Counts the number of Extended Page Directory Pointer Entry misses.",
+ "Counter": "0,1,2,3",
"EventCode": "0x4f",
"EventName": "EPT.EPDPE_MISS",
"PublicDescription": "Counts the number Extended Page Directory Pointer Entry misses. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
@@ -141,6 +159,7 @@
},
{
"BriefDescription": "Counts the number of page walks outstanding for an Extended Page table walk including GTLB hits per cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x4f",
"EventName": "EPT.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding for an Extended Page table walk including GTLB hits per cycle. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
@@ -149,6 +168,7 @@
},
{
"BriefDescription": "Counts the number of times there was an ITLB miss and a new translation was filled into the ITLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "ITLB.FILLS",
"PublicDescription": "Counts the number of times the machine was unable to find a translation in the Instruction Translation Lookaside Buffer (ITLB) and a new translation was filled into the ITLB. The event is speculative in nature, but will not count translations (page walks) that are begun and not finished, or translations that are finished but not filled into the ITLB.",
@@ -157,6 +177,7 @@
},
{
"BriefDescription": "Counts the number of page walks due to an instruction fetch that miss the PDE (Page Directory Entry) cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.PDE_CACHE_MISS",
"SampleAfterValue": "2000003",
@@ -164,6 +185,7 @@
},
{
"BriefDescription": "Counts the number of first level TLB misses but second level hits due to an instruction fetch that did not start a page walk. Account for all pages sizes. Will result in an ITLB write from STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.STLB_HIT",
"SampleAfterValue": "2000003",
@@ -171,6 +193,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to any page size.",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
@@ -179,6 +202,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to a 1G page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
"PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 1G pages. Includes page walks that page fault.",
@@ -187,6 +211,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to a 2M or 4M page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 2M or 4M pages. Includes page walks that page fault.",
@@ -195,6 +220,7 @@
},
{
"BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to a 4K page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.",
@@ -203,6 +229,7 @@
},
{
"BriefDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for instruction fetches every cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for instruction fetches every cycle. A page walk is outstanding from start till PMH becomes idle again (ready to serve next walk).",
@@ -211,6 +238,7 @@
},
{
"BriefDescription": "Counts the number of retired loads that are blocked due to a first level TLB miss.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.DTLB_MISS",
"PEBS": "1",
@@ -219,6 +247,7 @@
},
{
"BriefDescription": "Counts the number of memory uops retired that missed in the second level TLB.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS",
@@ -228,6 +257,7 @@
},
{
"BriefDescription": "Counts the number of load uops retired that miss in the second Level TLB.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS_LOADS",
@@ -237,6 +267,7 @@
},
{
"BriefDescription": "Counts the number of store uops retired that miss in the second level TLB.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS_STORES",
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/cache.json b/tools/perf/pmu-events/arch/x86/tigerlake/cache.json
index c54fb65d3259..f4144a1110be 100644
--- a/tools/perf/pmu-events/arch/x86/tigerlake/cache.json
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the number of cache lines replaced in L1 data cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "L1D.REPLACEMENT",
"PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.FB_FULL",
"PublicDescription": "Counts number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x48",
@@ -27,6 +30,7 @@
},
{
"BriefDescription": "Number of cycles a demand request has waited due to L1D due to lack of L2 resources.",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.L2_STALL",
"PublicDescription": "Counts number of cycles a demand request has waited due to L1D due to lack of L2 resources. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
@@ -35,6 +39,7 @@
},
{
"BriefDescription": "Number of L1D misses that are outstanding",
+ "Counter": "0,1,2,3",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING",
"PublicDescription": "Counts number of L1D misses that are outstanding in each cycle, that is each cycle the number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch. Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
@@ -43,6 +48,7 @@
},
{
"BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x48",
"EventName": "L1D_PEND_MISS.PENDING_CYCLES",
@@ -52,6 +58,7 @@
},
{
"BriefDescription": "L2 cache lines filling L2",
+ "Counter": "0,1,2,3",
"EventCode": "0xf1",
"EventName": "L2_LINES_IN.ALL",
"PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
@@ -60,6 +67,7 @@
},
{
"BriefDescription": "Modified cache lines that are evicted by L2 cache when triggered by an L2 cache fill.",
+ "Counter": "0,1,2,3",
"EventCode": "0xf2",
"EventName": "L2_LINES_OUT.NON_SILENT",
"PublicDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines are in Modified state. Modified lines are written back to L3",
@@ -68,6 +76,7 @@
},
{
"BriefDescription": "Non-modified cache lines that are silently dropped by L2 cache when triggered by an L2 cache fill.",
+ "Counter": "0,1,2,3",
"EventCode": "0xf2",
"EventName": "L2_LINES_OUT.SILENT",
"PublicDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared or Exclusive state. A non-threaded event.",
@@ -76,6 +85,7 @@
},
{
"BriefDescription": "L2 code requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_CODE_RD",
"PublicDescription": "Counts the total number of L2 code requests.",
@@ -84,6 +94,7 @@
},
{
"BriefDescription": "Demand Data Read access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
"PublicDescription": "Counts Demand Data Read requests accessing the L2 cache. These requests may hit or miss L2 cache. True-miss exclude misses that were merged with ongoing L2 misses. An access is counted once.",
@@ -92,6 +103,7 @@
},
{
"BriefDescription": "RFO requests to L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_RFO",
"PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
@@ -100,6 +112,7 @@
},
{
"BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_HIT",
"PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
@@ -108,6 +121,7 @@
},
{
"BriefDescription": "L2 cache misses when fetching instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_MISS",
"PublicDescription": "Counts L2 cache misses when fetching instructions.",
@@ -116,6 +130,7 @@
},
{
"BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
"PublicDescription": "Counts the number of demand Data Read requests initiated by load instructions that hit L2 cache.",
@@ -124,6 +139,7 @@
},
{
"BriefDescription": "Demand Data Read miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
"PublicDescription": "Counts demand Data Read requests with true-miss in the L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. An access is counted once.",
@@ -132,6 +148,7 @@
},
{
"BriefDescription": "Read requests with true-miss in L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.MISS",
"PublicDescription": "Counts read requests of any type with true-miss in the L2 cache. True-miss excludes L2 misses that were merged with ongoing L2 misses.",
@@ -140,6 +157,7 @@
},
{
"BriefDescription": "All accesses to L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.REFERENCES",
"PublicDescription": "Counts all requests that were hit or true misses in L2 cache. True-miss excludes misses that were merged with ongoing L2 misses.",
@@ -148,6 +166,7 @@
},
{
"BriefDescription": "RFO requests that hit L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_HIT",
"PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
@@ -156,6 +175,7 @@
},
{
"BriefDescription": "RFO requests that miss L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_MISS",
"PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
@@ -164,6 +184,7 @@
},
{
"BriefDescription": "SW prefetch requests that hit L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.SWPF_HIT",
"PublicDescription": "Counts Software prefetch requests that hit the L2 cache. Accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions when FB is not full.",
@@ -172,6 +193,7 @@
},
{
"BriefDescription": "SW prefetch requests that miss L2 cache.",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.SWPF_MISS",
"PublicDescription": "Counts Software prefetch requests that miss the L2 cache. Accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions when FB is not full.",
@@ -180,6 +202,7 @@
},
{
"BriefDescription": "L2 writebacks that access L2 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xf0",
"EventName": "L2_TRANS.L2_WB",
"PublicDescription": "Counts L2 writebacks that access L2 cache.",
@@ -188,6 +211,7 @@
},
{
"BriefDescription": "Cycles when L1D is locked",
+ "Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
"PublicDescription": "This event counts the number of cycles when the L1D is locked. It is a superset of the 0x1 mask (BUS_LOCK_CLOCKS.BUS_LOCK_DURATION).",
@@ -196,6 +220,7 @@
},
{
"BriefDescription": "Core-originated cacheable requests that missed L3 (Except hardware prefetches to the L3)",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.MISS",
"PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.",
@@ -204,6 +229,7 @@
},
{
"BriefDescription": "Retired load instructions.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ALL_LOADS",
@@ -214,6 +240,7 @@
},
{
"BriefDescription": "Retired store instructions.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ALL_STORES",
@@ -224,6 +251,7 @@
},
{
"BriefDescription": "All retired memory instructions.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ANY",
@@ -234,6 +262,7 @@
},
{
"BriefDescription": "Retired load instructions with locked access.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.LOCK_LOADS",
@@ -244,6 +273,7 @@
},
{
"BriefDescription": "Retired load instructions that split across a cacheline boundary.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
@@ -254,6 +284,7 @@
},
{
"BriefDescription": "Retired store instructions that split across a cacheline boundary.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.SPLIT_STORES",
@@ -264,6 +295,7 @@
},
{
"BriefDescription": "Retired load instructions that miss the STLB.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
@@ -274,6 +306,7 @@
},
{
"BriefDescription": "Retired store instructions that miss the STLB.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
@@ -284,6 +317,7 @@
},
{
"BriefDescription": "Snoop hit a modified(HITM) or clean line(HIT_W_FWD) in another on-pkg core which forwarded the data back due to a retired load instruction.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD",
@@ -294,6 +328,7 @@
},
{
"BriefDescription": "Retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
@@ -304,6 +339,7 @@
},
{
"BriefDescription": "Retired load instructions whose data sources were hits in L3 without snoops required",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
@@ -314,6 +350,7 @@
},
{
"BriefDescription": "Snoop hit without forwarding in another on-pkg core due to a retired load instruction, data was supplied by the L3.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD",
@@ -324,6 +361,7 @@
},
{
"BriefDescription": "Retired instructions with at least 1 uncacheable load or lock.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd4",
"EventName": "MEM_LOAD_MISC_RETIRED.UC",
@@ -334,6 +372,7 @@
},
{
"BriefDescription": "Number of completed demand load requests that missed the L1, but hit the FB(fill buffer), because a preceding miss to the same cacheline initiated the line to be brought into L1, but data is not yet ready in L1.",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.FB_HIT",
@@ -344,6 +383,7 @@
},
{
"BriefDescription": "Retired load instructions with L1 cache hits as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L1_HIT",
@@ -354,6 +394,7 @@
},
{
"BriefDescription": "Retired load instructions missed L1 cache as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L1_MISS",
@@ -364,6 +405,7 @@
},
{
"BriefDescription": "Retired load instructions with L2 cache hits as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L2_HIT",
@@ -374,6 +416,7 @@
},
{
"BriefDescription": "Retired load instructions missed L2 cache as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L2_MISS",
@@ -384,6 +427,7 @@
},
{
"BriefDescription": "Retired load instructions with L3 cache hits as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L3_HIT",
@@ -394,6 +438,7 @@
},
{
"BriefDescription": "Retired load instructions missed L3 cache as data sources",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L3_MISS",
@@ -404,6 +449,7 @@
},
{
"BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -413,6 +459,7 @@
},
{
"BriefDescription": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -422,6 +469,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -431,6 +479,7 @@
},
{
"BriefDescription": "Demand and prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xb0",
"EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
"PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
@@ -439,6 +488,7 @@
},
{
"BriefDescription": "Any memory transaction that reached the SQ.",
+ "Counter": "0,1,2,3",
"EventCode": "0xb0",
"EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
"PublicDescription": "Counts memory transactions reached the super queue including requests initiated by the core, all L3 prefetches, page walks, etc..",
@@ -447,6 +497,7 @@
},
{
"BriefDescription": "Demand Data Read requests sent to uncore",
+ "Counter": "0,1,2,3",
"EventCode": "0xb0",
"EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
"PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
@@ -455,6 +506,7 @@
},
{
"BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "Counter": "0,1,2,3",
"EventCode": "0xb0",
"EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
"PublicDescription": "Counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
@@ -463,6 +515,7 @@
},
{
"BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
"PublicDescription": "Counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
@@ -471,6 +524,7 @@
},
{
"BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
@@ -480,6 +534,7 @@
},
{
"BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
@@ -489,6 +544,7 @@
},
{
"BriefDescription": "Cycles with offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
@@ -498,6 +554,7 @@
},
{
"BriefDescription": "Demand Data Read transactions pending for off-core. Highly correlated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
"PublicDescription": "Counts the number of off-core outstanding Demand Data Read transactions every cycle. A transaction is considered to be in the Off-core outstanding state between L2 cache miss and data-return to the core.",
@@ -506,6 +563,7 @@
},
{
"BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
@@ -514,6 +572,7 @@
},
{
"BriefDescription": "Store Read transactions pending for off-core. Highly correlated.",
+ "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
"PublicDescription": "Counts the number of off-core outstanding read-for-ownership (RFO) store transactions every cycle. An RFO transaction is considered to be in the Off-core outstanding state between L2 cache miss and transaction completion.",
@@ -522,6 +581,7 @@
},
{
"BriefDescription": "Counts bus locks, accounts for cache line split locks and UC locks.",
+ "Counter": "0,1,2,3",
"EventCode": "0xf4",
"EventName": "SQ_MISC.BUS_LOCK",
"PublicDescription": "Counts the more expensive bus lock needed to enforce cache coherency for certain memory accesses that need to be done atomically. Can be created by issuing an atomic instruction (via the LOCK prefix) which causes a cache line split or accesses uncacheable memory.",
@@ -530,6 +590,7 @@
},
{
"BriefDescription": "Cycles the superQ cannot take any more entries.",
+ "Counter": "0,1,2,3",
"EventCode": "0xf4",
"EventName": "SQ_MISC.SQ_FULL",
"PublicDescription": "Counts the cycles for which the thread is active and the superQ cannot take any more entries.",
@@ -537,7 +598,16 @@
"UMask": "0x4"
},
{
+ "BriefDescription": "Counts the number of PREFETCHNTA, PREFETCHW, PREFETCHT0, PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.ANY",
+ "SampleAfterValue": "100003",
+ "UMask": "0xf"
+ },
+ {
"BriefDescription": "Number of PREFETCHNTA instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "SW_PREFETCH_ACCESS.NTA",
"PublicDescription": "Counts the number of PREFETCHNTA instructions executed.",
@@ -546,6 +616,7 @@
},
{
"BriefDescription": "Number of PREFETCHW instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
"PublicDescription": "Counts the number of PREFETCHW instructions executed.",
@@ -554,6 +625,7 @@
},
{
"BriefDescription": "Number of PREFETCHT0 instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "SW_PREFETCH_ACCESS.T0",
"PublicDescription": "Counts the number of PREFETCHT0 instructions executed.",
@@ -562,6 +634,7 @@
},
{
"BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
"EventName": "SW_PREFETCH_ACCESS.T1_T2",
"PublicDescription": "Counts the number of PREFETCHT1 or PREFETCHT2 instructions executed.",
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/counter.json b/tools/perf/pmu-events/arch/x86/tigerlake/counter.json
new file mode 100644
index 000000000000..5a350072522a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/counter.json
@@ -0,0 +1,17 @@
+[
+ {
+ "Unit": "core",
+ "CountersNumFixed": "4",
+ "CountersNumGeneric": "8"
+ },
+ {
+ "Unit": "ARB",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "2"
+ },
+ {
+ "Unit": "CLOCK",
+ "CountersNumFixed": 1,
+ "CountersNumGeneric": "0"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/floating-point.json b/tools/perf/pmu-events/arch/x86/tigerlake/floating-point.json
index 63b5b56d1ed0..0b04972d0b17 100644
--- a/tools/perf/pmu-events/arch/x86/tigerlake/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/floating-point.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts all microcode FP assists.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc1",
"EventName": "ASSISTS.FP",
"PublicDescription": "Counts all microcode Floating Point assists.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
"PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
"PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 128-bit packed single and 256-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and packed double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.4_FLOPS",
"PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision and 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point and packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -49,6 +55,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -57,6 +64,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE",
"PublicDescription": "Number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -65,6 +73,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision and 512-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RSQRT14 RCP RCP14 DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.8_FLOPS",
"PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision and 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision and double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RSQRT14 RCP RCP14 DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -73,6 +82,7 @@
},
{
"BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired; some instructions will count twice as noted below. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 RANGE SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR",
"PublicDescription": "Number of SSE/AVX computational scalar single precision and double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -81,6 +91,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
"PublicDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -89,6 +100,7 @@
},
{
"BriefDescription": "Counts number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
"PublicDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
@@ -97,6 +109,7 @@
},
{
"BriefDescription": "Number of any Vector retired FP arithmetic instructions",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.VECTOR",
"SampleAfterValue": "1000003",
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/frontend.json b/tools/perf/pmu-events/arch/x86/tigerlake/frontend.json
index d7b972452c0e..13c052d0f470 100644
--- a/tools/perf/pmu-events/arch/x86/tigerlake/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/frontend.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "Counter": "0,1,2,3",
"EventCode": "0xe6",
"EventName": "BACLEARS.ANY",
"PublicDescription": "Counts the number of times the front-end is resteered when it finds a branch instruction in a fetch line. This occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Stalls caused by changing prefix length of the instruction. [This event is alias to ILD_STALL.LCP]",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "DECODE.LCP",
"PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk. [This event is alias to ILD_STALL.LCP]",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE transitions count.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xab",
@@ -27,6 +30,7 @@
},
{
"BriefDescription": "DSB-to-MITE switch true penalty cycles.",
+ "Counter": "0,1,2,3",
"EventCode": "0xab",
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
"PublicDescription": "Decode Stream Buffer (DSB) is a Uop-cache that holds translations of previously fetched instructions that were decoded by the legacy x86 decode pipeline (MITE). This event counts fetch penalty cycles when a transition occurs from DSB to MITE.",
@@ -35,6 +39,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced DSB miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.ANY_DSB_MISS",
"MSRIndex": "0x3F7",
@@ -46,6 +51,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced a critical DSB miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.DSB_MISS",
"MSRIndex": "0x3F7",
@@ -57,6 +63,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced iTLB true miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.ITLB_MISS",
"MSRIndex": "0x3F7",
@@ -68,6 +75,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.L1I_MISS",
"MSRIndex": "0x3F7",
@@ -79,6 +87,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.L2_MISS",
"MSRIndex": "0x3F7",
@@ -90,6 +99,7 @@
},
{
"BriefDescription": "Retired instructions after front-end starvation of at least 1 cycle",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_1",
"MSRIndex": "0x3F7",
@@ -101,6 +111,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
"MSRIndex": "0x3F7",
@@ -112,6 +123,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
"MSRIndex": "0x3F7",
@@ -123,6 +135,7 @@
},
{
"BriefDescription": "Retired instructions after front-end starvation of at least 2 cycles",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
"MSRIndex": "0x3F7",
@@ -134,6 +147,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
"MSRIndex": "0x3F7",
@@ -145,6 +159,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
"MSRIndex": "0x3F7",
@@ -156,6 +171,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
"MSRIndex": "0x3F7",
@@ -167,6 +183,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
"MSRIndex": "0x3F7",
@@ -178,6 +195,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
"MSRIndex": "0x3F7",
@@ -189,6 +207,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
"MSRIndex": "0x3F7",
@@ -200,6 +219,7 @@
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
"MSRIndex": "0x3F7",
@@ -211,6 +231,7 @@
},
{
"BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.STLB_MISS",
"MSRIndex": "0x3F7",
@@ -222,6 +243,7 @@
},
{
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss. [This event is alias to ICACHE_DATA.STALLS]",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE_16B.IFDATA_STALL",
"PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity. [This event is alias to ICACHE_DATA.STALLS]",
@@ -230,6 +252,7 @@
},
{
"BriefDescription": "Instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "ICACHE_64B.IFTAG_HIT",
"PublicDescription": "Counts instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity. Accounts for both cacheable and uncacheable accesses.",
@@ -238,6 +261,7 @@
},
{
"BriefDescription": "Instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "ICACHE_64B.IFTAG_MISS",
"PublicDescription": "Counts instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity. Accounts for both cacheable and uncacheable accesses.",
@@ -246,6 +270,7 @@
},
{
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_TAG.STALLS]",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "ICACHE_64B.IFTAG_STALL",
"PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_TAG.STALLS]",
@@ -254,6 +279,7 @@
},
{
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss. [This event is alias to ICACHE_16B.IFDATA_STALL]",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE_DATA.STALLS",
"PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity. [This event is alias to ICACHE_16B.IFDATA_STALL]",
@@ -262,6 +288,7 @@
},
{
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_64B.IFTAG_STALL]",
+ "Counter": "0,1,2,3",
"EventCode": "0x83",
"EventName": "ICACHE_TAG.STALLS",
"PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_64B.IFTAG_STALL]",
@@ -270,6 +297,7 @@
},
{
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.DSB_CYCLES_ANY",
@@ -279,15 +307,17 @@
},
{
"BriefDescription": "Cycles DSB is delivering optimal number of Uops",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"EventCode": "0x79",
"EventName": "IDQ.DSB_CYCLES_OK",
- "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the DSB (Decode Stream Buffer) path. Count includes uops that may 'bypass' the IDQ.",
"SampleAfterValue": "2000003",
"UMask": "0x8"
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.DSB_UOPS",
"PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
@@ -296,6 +326,7 @@
},
{
"BriefDescription": "Cycles MITE is delivering any Uop",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MITE_CYCLES_ANY",
@@ -305,6 +336,7 @@
},
{
"BriefDescription": "Cycles MITE is delivering optimal number of Uops",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"EventCode": "0x79",
"EventName": "IDQ.MITE_CYCLES_OK",
@@ -314,6 +346,7 @@
},
{
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MITE_UOPS",
"PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
@@ -322,6 +355,7 @@
},
{
"BriefDescription": "Cycles when uops are being delivered to IDQ while MS is busy",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MS_CYCLES_ANY",
@@ -331,6 +365,7 @@
},
{
"BriefDescription": "Number of switches from DSB or MITE to the MS",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x79",
@@ -341,6 +376,7 @@
},
{
"BriefDescription": "Uops delivered to IDQ while MS is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0x79",
"EventName": "IDQ.MS_UOPS",
"PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS). Any instruction over 4 uops will be delivered by the MS. Some instructions such as transcendentals may additionally generate uops from the MS.",
@@ -349,6 +385,7 @@
},
{
"BriefDescription": "Uops not delivered by IDQ when backend of the machine is not stalled",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x9c",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
"PublicDescription": "Counts the number of uops not delivered to by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
@@ -357,6 +394,7 @@
},
{
"BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "5",
"EventCode": "0x9c",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
@@ -366,6 +404,7 @@
},
{
"BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0x9c",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/memory.json b/tools/perf/pmu-events/arch/x86/tigerlake/memory.json
index 8848fcbcc35c..a125cefa100f 100644
--- a/tools/perf/pmu-events/arch/x86/tigerlake/memory.json
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "6",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Number of machine clears due to memory ordering conflicts.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
"PublicDescription": "Counts the number of Machine Clears detected dye to memory ordering. Memory Ordering Machine Clears may apply when a memory read may not conform to the memory ordering rules of the x86 architecture",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
@@ -29,6 +32,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
@@ -41,6 +45,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
@@ -53,6 +58,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
@@ -65,6 +71,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
@@ -77,6 +84,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
@@ -89,6 +97,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
@@ -101,6 +110,7 @@
},
{
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
@@ -113,6 +123,7 @@
},
{
"BriefDescription": "Demand Data Read requests who miss L3 cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xb0",
"EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
"PublicDescription": "Demand Data Read requests who miss L3 cache.",
@@ -121,14 +132,17 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED",
+ "PEBS": "1",
"PublicDescription": "Counts the number of times RTM abort was triggered.",
"SampleAfterValue": "100003",
"UMask": "0x4"
},
{
"BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_EVENTS",
"PublicDescription": "Counts the number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
@@ -137,6 +151,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_MEM",
"PublicDescription": "Counts the number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).",
@@ -145,6 +160,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_MEMTYPE",
"PublicDescription": "Counts the number of times an RTM execution aborted due to incompatible memory type.",
@@ -153,6 +169,7 @@
},
{
"BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_UNFRIENDLY",
"PublicDescription": "Counts the number of times an RTM execution aborted due to HLE-unfriendly instructions.",
@@ -161,6 +178,7 @@
},
{
"BriefDescription": "Number of times an RTM execution successfully committed",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.COMMIT",
"PublicDescription": "Counts the number of times RTM commit succeeded.",
@@ -169,6 +187,7 @@
},
{
"BriefDescription": "Number of times an RTM execution started.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.START",
"PublicDescription": "Counts the number of times we entered an RTM region. Does not count nested transactions.",
@@ -177,6 +196,7 @@
},
{
"BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed inside a transactional region",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC2",
"PublicDescription": "Counts Unfriendly TSX abort triggered by a vzeroupper instruction.",
@@ -185,6 +205,7 @@
},
{
"BriefDescription": "Number of times an instruction execution caused the transactional nest count supported to be exceeded",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x5d",
"EventName": "TX_EXEC.MISC3",
"PublicDescription": "Counts Unfriendly TSX abort triggered by a nest count that is too deep.",
@@ -193,6 +214,7 @@
},
{
"BriefDescription": "Speculatively counts the number of TSX aborts due to a data capacity limitation for transactional reads",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_CAPACITY_READ",
"PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional reads",
@@ -201,6 +223,7 @@
},
{
"BriefDescription": "Speculatively counts the number of TSX aborts due to a data capacity limitation for transactional writes.",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
"PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional writes.",
@@ -209,6 +232,7 @@
},
{
"BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address",
+ "Counter": "0,1,2,3",
"EventCode": "0x54",
"EventName": "TX_MEM.ABORT_CONFLICT",
"PublicDescription": "Counts the number of times a TSX line had a cache conflict.",
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/metricgroups.json b/tools/perf/pmu-events/arch/x86/tigerlake/metricgroups.json
index 5452a1448ded..3a88260194d1 100644
--- a/tools/perf/pmu-events/arch/x86/tigerlake/metricgroups.json
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/metricgroups.json
@@ -5,7 +5,20 @@
"BigFootprint": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"BrMispredicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Branches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvBC": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvBO": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvCB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvFB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvIO": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvML": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMP": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMS": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMT": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvOB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvUW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"CacheHits": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "CacheMisses": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"CodeGen": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Compute": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Cor": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/other.json b/tools/perf/pmu-events/arch/x86/tigerlake/other.json
index 117b18abcaaf..a22b626c14c9 100644
--- a/tools/perf/pmu-events/arch/x86/tigerlake/other.json
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/other.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the Non-AVX turbo schedule.",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "CORE_POWER.LVL0_TURBO_LICENSE",
"PublicDescription": "Counts Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes.",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX2 turbo schedule.",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "CORE_POWER.LVL1_TURBO_LICENSE",
"PublicDescription": "Counts Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions.",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX512 turbo schedule.",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "CORE_POWER.LVL2_TURBO_LICENSE",
"PublicDescription": "Core cycles where the core was running with power-delivery for license level 2 (introduced in Skylake Server microarchitecture). This includes high current AVX 512-bit instructions.",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Counts streaming stores that have any type of response.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/pipeline.json b/tools/perf/pmu-events/arch/x86/tigerlake/pipeline.json
index 4f85d53edec2..09b53b0722a9 100644
--- a/tools/perf/pmu-events/arch/x86/tigerlake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/pipeline.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Cycles when divide unit is busy executing divide or square root operations.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0x14",
"EventName": "ARITH.DIVIDER_ACTIVE",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "Number of occurrences where a microcode assist is invoked by hardware.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc1",
"EventName": "ASSISTS.ANY",
"PublicDescription": "Counts the number of occurrences where a microcode assist is invoked by hardware Examples include AD (page Access Dirty), FP and AVX related assists.",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "All branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -26,6 +29,7 @@
},
{
"BriefDescription": "Conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND",
"PEBS": "1",
@@ -35,6 +39,7 @@
},
{
"BriefDescription": "Not taken branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_NTAKEN",
"PEBS": "1",
@@ -44,6 +49,7 @@
},
{
"BriefDescription": "Taken conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_TAKEN",
"PEBS": "1",
@@ -53,6 +59,7 @@
},
{
"BriefDescription": "Far branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"PEBS": "1",
@@ -62,6 +69,7 @@
},
{
"BriefDescription": "Indirect near branch instructions retired (excluding returns)",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.INDIRECT",
"PEBS": "1",
@@ -71,6 +79,7 @@
},
{
"BriefDescription": "Direct and indirect near call instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
"PEBS": "1",
@@ -80,6 +89,7 @@
},
{
"BriefDescription": "Return instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
"PEBS": "1",
@@ -89,6 +99,7 @@
},
{
"BriefDescription": "Taken branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
"PEBS": "1",
@@ -98,6 +109,7 @@
},
{
"BriefDescription": "All mispredicted branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -106,6 +118,7 @@
},
{
"BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND",
"PEBS": "1",
@@ -115,6 +128,7 @@
},
{
"BriefDescription": "Mispredicted non-taken conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_NTAKEN",
"PEBS": "1",
@@ -124,6 +138,7 @@
},
{
"BriefDescription": "number of branch instructions retired that were mispredicted and taken.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_TAKEN",
"PEBS": "1",
@@ -133,6 +148,7 @@
},
{
"BriefDescription": "All miss-predicted indirect branch instructions retired (excluding RETs. TSX aborts is considered indirect branch).",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT",
"PEBS": "1",
@@ -142,6 +158,7 @@
},
{
"BriefDescription": "Mispredicted indirect CALL instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT_CALL",
"PEBS": "1",
@@ -151,6 +168,7 @@
},
{
"BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
"PEBS": "1",
@@ -160,6 +178,7 @@
},
{
"BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.RET",
"PEBS": "1",
@@ -169,6 +188,7 @@
},
{
"BriefDescription": "Cycle counts are evenly distributed between active threads in the Core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xec",
"EventName": "CPU_CLK_UNHALTED.DISTRIBUTED",
"PublicDescription": "This event distributes cycle counts between active hyperthreads, i.e., those in C0. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If all other hyperthreads are inactive (or disabled or do not exist), all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
@@ -177,6 +197,7 @@
},
{
"BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
"PublicDescription": "Counts Core crystal clock cycles when current thread is unhalted and the other thread is halted.",
@@ -185,6 +206,7 @@
},
{
"BriefDescription": "Core crystal clock cycles. Cycle counts are evenly distributed between active threads in the Core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.REF_DISTRIBUTED",
"PublicDescription": "This event distributes Core crystal clock cycle counts between active hyperthreads, i.e., those in C0 sleep-state. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If one thread is active in a core, all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
@@ -193,6 +215,7 @@
},
{
"BriefDescription": "Reference cycles when the core is not in halt state.",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
"SampleAfterValue": "2000003",
@@ -200,6 +223,7 @@
},
{
"BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.REF_XCLK",
"PublicDescription": "Counts core crystal clock cycles when the thread is unhalted.",
@@ -208,6 +232,7 @@
},
{
"BriefDescription": "Core cycles when the thread is not in halt state",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events.",
"SampleAfterValue": "2000003",
@@ -215,6 +240,7 @@
},
{
"BriefDescription": "Thread cycles when thread is not in halt state",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
"PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
@@ -222,6 +248,7 @@
},
{
"BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "8",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
@@ -230,6 +257,7 @@
},
{
"BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
@@ -238,6 +266,7 @@
},
{
"BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "16",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
@@ -246,6 +275,7 @@
},
{
"BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "12",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
@@ -254,6 +284,7 @@
},
{
"BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
@@ -262,6 +293,7 @@
},
{
"BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "20",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
@@ -270,6 +302,7 @@
},
{
"BriefDescription": "Total execution stalls.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "4",
"EventCode": "0xa3",
"EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
@@ -278,6 +311,7 @@
},
{
"BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.1_PORTS_UTIL",
"PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
@@ -286,6 +320,7 @@
},
{
"BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.2_PORTS_UTIL",
"PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
@@ -294,6 +329,7 @@
},
{
"BriefDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.3_PORTS_UTIL",
"PublicDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
@@ -302,6 +338,7 @@
},
{
"BriefDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.4_PORTS_UTIL",
"PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.",
@@ -310,6 +347,7 @@
},
{
"BriefDescription": "Cycles when the memory subsystem has an outstanding load. Increments by 4 for every such cycle.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "5",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.BOUND_ON_LOADS",
@@ -319,6 +357,7 @@
},
{
"BriefDescription": "Cycles where the Store Buffer was full and no loads caused an execution stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "2",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.BOUND_ON_STORES",
@@ -328,6 +367,7 @@
},
{
"BriefDescription": "Cycles no uop executed while RS was not empty, the SB was not full and there was no outstanding load.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.EXE_BOUND_0_PORTS",
"PublicDescription": "Number of cycles total of 0 uops executed on all ports, Reservation Station (RS) was not empty, the Store Buffer (SB) was not full and there was no outstanding load.",
@@ -336,6 +376,7 @@
},
{
"BriefDescription": "Stalls caused by changing prefix length of the instruction. [This event is alias to DECODE.LCP]",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.LCP",
"PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk. [This event is alias to DECODE.LCP]",
@@ -344,6 +385,7 @@
},
{
"BriefDescription": "Instruction decoders utilized in a cycle",
+ "Counter": "0,1,2,3",
"EventCode": "0x55",
"EventName": "INST_DECODED.DECODERS",
"PublicDescription": "Number of decoders utilized in a cycle when the MITE (legacy decode pipeline) fetches instructions.",
@@ -352,6 +394,7 @@
},
{
"BriefDescription": "Number of instructions retired. Fixed Counter - architectural event",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PEBS": "1",
"PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
@@ -360,6 +403,7 @@
},
{
"BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.ANY_P",
"PEBS": "1",
@@ -368,6 +412,7 @@
},
{
"BriefDescription": "Retired NOP instructions.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.NOP",
"PEBS": "1",
@@ -377,6 +422,7 @@
},
{
"BriefDescription": "Precise instruction retired event with a reduced effect of PEBS shadow in IP distribution",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.PREC_DIST",
"PEBS": "1",
"PublicDescription": "A version of INST_RETIRED that allows for a more unbiased distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR) feature to mitigate some bias in how retired instructions get sampled. Use on Fixed Counter 0.",
@@ -385,6 +431,7 @@
},
{
"BriefDescription": "Cycles the Backend cluster is recovering after a miss-speculation or a Store Buffer or Load Buffer drain stall.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0x0d",
"EventName": "INT_MISC.ALL_RECOVERY_CYCLES",
@@ -394,6 +441,7 @@
},
{
"BriefDescription": "Clears speculative count",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x0d",
@@ -404,6 +452,7 @@
},
{
"BriefDescription": "Counts cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x0d",
"EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
"PublicDescription": "Cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
@@ -412,6 +461,7 @@
},
{
"BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x0d",
"EventName": "INT_MISC.RECOVERY_CYCLES",
"PublicDescription": "Counts core cycles when the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
@@ -420,6 +470,7 @@
},
{
"BriefDescription": "TMA slots where uops got dropped",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x0d",
"EventName": "INT_MISC.UOP_DROPPING",
"PublicDescription": "Estimated number of Top-down Microarchitecture Analysis slots that got dropped due to non front-end reasons",
@@ -428,6 +479,7 @@
},
{
"BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.NO_SR",
"PublicDescription": "Counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
@@ -436,6 +488,7 @@
},
{
"BriefDescription": "Loads blocked due to overlapping with a preceding store that cannot be forwarded.",
+ "Counter": "0,1,2,3",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.STORE_FORWARD",
"PublicDescription": "Counts the number of times where store forwarding was prevented for a load operation. The most common case is a load blocked due to the address of memory access (partially) overlapping with a preceding uncompleted store. Note: See the table of not supported store forwards in the Optimization Guide.",
@@ -444,6 +497,7 @@
},
{
"BriefDescription": "False dependencies in MOB due to partial compare on address.",
+ "Counter": "0,1,2,3",
"EventCode": "0x07",
"EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
"PublicDescription": "Counts the number of times a load got blocked due to false dependencies in MOB due to partial compare on address.",
@@ -452,6 +506,7 @@
},
{
"BriefDescription": "Counts the number of demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.",
+ "Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "LOAD_HIT_PREFETCH.SWPF",
"PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
@@ -460,6 +515,7 @@
},
{
"BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xa8",
"EventName": "LSD.CYCLES_ACTIVE",
@@ -469,6 +525,7 @@
},
{
"BriefDescription": "Cycles optimal number of Uops delivered by the LSD, but did not come from the decoder.",
+ "Counter": "0,1,2,3",
"CounterMask": "5",
"EventCode": "0xa8",
"EventName": "LSD.CYCLES_OK",
@@ -478,6 +535,7 @@
},
{
"BriefDescription": "Number of Uops delivered by the LSD.",
+ "Counter": "0,1,2,3",
"EventCode": "0xa8",
"EventName": "LSD.UOPS",
"PublicDescription": "Counts the number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
@@ -486,6 +544,7 @@
},
{
"BriefDescription": "Number of machine clears (nukes) of any type.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xc3",
@@ -496,6 +555,7 @@
},
{
"BriefDescription": "Self-modifying code (SMC) detected.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.SMC",
"PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
@@ -504,6 +564,7 @@
},
{
"BriefDescription": "Increments whenever there is an update to the LBR array.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xcc",
"EventName": "MISC_RETIRED.LBR_INSERTS",
"PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR enable via IA32_DEBUGCTL MSR and branch type selection via MSR_LBR_SELECT.",
@@ -512,6 +573,7 @@
},
{
"BriefDescription": "Number of retired PAUSE instructions. This event is not supported on first SKL and KBL products.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xcc",
"EventName": "MISC_RETIRED.PAUSE_INST",
"PublicDescription": "Counts number of retired PAUSE instructions. This event is not supported on first SKL and KBL products.",
@@ -520,6 +582,7 @@
},
{
"BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa2",
"EventName": "RESOURCE_STALLS.SB",
"PublicDescription": "Counts allocation stall cycles caused by the store buffer (SB) being full. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
@@ -528,6 +591,7 @@
},
{
"BriefDescription": "Counts cycles where the pipeline is stalled due to serializing operations.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa2",
"EventName": "RESOURCE_STALLS.SCOREBOARD",
"SampleAfterValue": "100003",
@@ -535,6 +599,7 @@
},
{
"BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x5e",
"EventName": "RS_EVENTS.EMPTY_CYCLES",
"PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into starvation periods (e.g. branch mispredictions or i-cache misses)",
@@ -543,6 +608,7 @@
},
{
"BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x5e",
@@ -554,6 +620,7 @@
},
{
"BriefDescription": "TMA slots where no uops were being issued due to lack of back-end resources.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa4",
"EventName": "TOPDOWN.BACKEND_BOUND_SLOTS",
"PublicDescription": "Counts the number of Top-down Microarchitecture Analysis (TMA) method's slots where no micro-operations were being issued from front-end to back-end of the machine due to lack of back-end resources.",
@@ -562,6 +629,7 @@
},
{
"BriefDescription": "TMA slots available for an unhalted logical processor. Fixed counter - architectural event",
+ "Counter": "Fixed counter 3",
"EventName": "TOPDOWN.SLOTS",
"PublicDescription": "Number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method (TMA). The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core. Software can use this event as the denominator for the top-level metrics of the TMA method. This architectural event is counted on a designated fixed counter (Fixed Counter 3).",
"SampleAfterValue": "10000003",
@@ -569,6 +637,7 @@
},
{
"BriefDescription": "TMA slots available for an unhalted logical processor. General counter - architectural event",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa4",
"EventName": "TOPDOWN.SLOTS_P",
"PublicDescription": "Counts the number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method. The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core.",
@@ -577,6 +646,7 @@
},
{
"BriefDescription": "Number of uops decoded out of instructions exclusively fetched by decoder 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x56",
"EventName": "UOPS_DECODED.DEC0",
"PublicDescription": "Uops exclusively fetched by decoder 0",
@@ -585,6 +655,7 @@
},
{
"BriefDescription": "Number of uops executed on port 0",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa1",
"EventName": "UOPS_DISPATCHED.PORT_0",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 0.",
@@ -593,6 +664,7 @@
},
{
"BriefDescription": "Number of uops executed on port 1",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa1",
"EventName": "UOPS_DISPATCHED.PORT_1",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 1.",
@@ -601,6 +673,7 @@
},
{
"BriefDescription": "Number of uops executed on port 2 and 3",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa1",
"EventName": "UOPS_DISPATCHED.PORT_2_3",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 2 and 3.",
@@ -609,6 +682,7 @@
},
{
"BriefDescription": "Number of uops executed on port 4 and 9",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa1",
"EventName": "UOPS_DISPATCHED.PORT_4_9",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 5 and 9.",
@@ -617,6 +691,7 @@
},
{
"BriefDescription": "Number of uops executed on port 5",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa1",
"EventName": "UOPS_DISPATCHED.PORT_5",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 5.",
@@ -625,6 +700,7 @@
},
{
"BriefDescription": "Number of uops executed on port 6",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa1",
"EventName": "UOPS_DISPATCHED.PORT_6",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 6.",
@@ -633,6 +709,7 @@
},
{
"BriefDescription": "Number of uops executed on port 7 and 8",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa1",
"EventName": "UOPS_DISPATCHED.PORT_7_8",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 7 and 8.",
@@ -641,6 +718,7 @@
},
{
"BriefDescription": "Number of uops executed on the core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE",
"PublicDescription": "Counts the number of uops executed from any thread.",
@@ -649,6 +727,7 @@
},
{
"BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
@@ -658,6 +737,7 @@
},
{
"BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "2",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
@@ -667,6 +747,7 @@
},
{
"BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "3",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
@@ -676,6 +757,7 @@
},
{
"BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "4",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
@@ -685,6 +767,7 @@
},
{
"BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_1",
@@ -694,6 +777,7 @@
},
{
"BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "2",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_2",
@@ -703,6 +787,7 @@
},
{
"BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "3",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_3",
@@ -712,6 +797,7 @@
},
{
"BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "4",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CYCLES_GE_4",
@@ -721,6 +807,7 @@
},
{
"BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.STALL_CYCLES",
@@ -731,6 +818,7 @@
},
{
"BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.THREAD",
"SampleAfterValue": "2000003",
@@ -738,6 +826,7 @@
},
{
"BriefDescription": "Counts the number of x87 uops dispatched.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.X87",
"PublicDescription": "Counts the number of x87 uops executed.",
@@ -746,6 +835,7 @@
},
{
"BriefDescription": "Uops that RAT issues to RS",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x0e",
"EventName": "UOPS_ISSUED.ANY",
"PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
@@ -754,6 +844,7 @@
},
{
"BriefDescription": "Cycles when RAT does not issue Uops to RS for the thread",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0x0e",
"EventName": "UOPS_ISSUED.STALL_CYCLES",
@@ -764,6 +855,7 @@
},
{
"BriefDescription": "Uops inserted at issue-stage in order to preserve upper bits of vector registers.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x0e",
"EventName": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH",
"PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to Mixing Intel AVX and Intel SSE Code section of the Optimization Guide.",
@@ -772,6 +864,7 @@
},
{
"BriefDescription": "Retirement slots used.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.SLOTS",
"PublicDescription": "Counts the retirement slots used each cycle.",
@@ -780,6 +873,7 @@
},
{
"BriefDescription": "Cycles without actually retired uops.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.STALL_CYCLES",
@@ -790,6 +884,7 @@
},
{
"BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "10",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.TOTAL_CYCLES",
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json b/tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json
index 8ae4f2474b25..c45c6b4a380d 100644
--- a/tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json
@@ -104,7 +104,7 @@
{
"BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists",
"MetricExpr": "34 * ASSISTS.ANY / tma_info_thread_slots",
- "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricGroup": "BvIO;TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_assists",
"MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
"PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: ASSISTS.ANY",
@@ -114,7 +114,7 @@
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
"DefaultMetricgroupName": "TopdownL1",
"MetricExpr": "topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 5 * INT_MISC.CLEARS_COUNT / tma_info_thread_slots",
- "MetricGroup": "Default;TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvOB;Default;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
"MetricThreshold": "tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL1;Default",
@@ -135,7 +135,7 @@
{
"BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions.",
"MetricExpr": "tma_light_operations * BR_INST_RETIRED.ALL_BRANCHES / (tma_retiring * tma_info_thread_slots)",
- "MetricGroup": "Branches;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_branch_instructions",
"MetricThreshold": "tma_branch_instructions > 0.1 & tma_light_operations > 0.6",
"ScaleUnit": "100%"
@@ -143,7 +143,7 @@
{
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
"MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * tma_bad_speculation",
- "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
+ "MetricGroup": "BadSpec;BrMispredicts;BvMP;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
"MetricName": "tma_branch_mispredicts",
"MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
@@ -181,7 +181,7 @@
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(49 * tma_info_system_core_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) + 48 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
- "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricGroup": "BvMS;DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_contested_accesses",
"MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
@@ -201,7 +201,7 @@
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "48 * tma_info_system_core_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD + MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (1 - OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
- "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricGroup": "BvMS;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_data_sharing",
"MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
@@ -219,7 +219,7 @@
{
"BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
"MetricExpr": "ARITH.DIVIDER_ACTIVE / tma_info_thread_clks",
- "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
"MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_ACTIVE",
@@ -250,13 +250,13 @@
"MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_dsb_switches",
"MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
"MetricExpr": "min(7 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_LOAD_MISSES.WALK_ACTIVE, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
- "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
+ "MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
"MetricName": "tma_dtlb_load",
"MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs, tma_info_bottleneck_memory_synchronization",
@@ -265,7 +265,7 @@
{
"BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
"MetricExpr": "(7 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_STORE_MISSES.WALK_ACTIVE) / tma_info_core_core_clks",
- "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
+ "MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
"MetricName": "tma_dtlb_store",
"MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load, tma_info_bottleneck_memory_data_tlbs, tma_info_bottleneck_memory_synchronization",
@@ -274,7 +274,7 @@
{
"BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing",
"MetricExpr": "54 * tma_info_system_core_frequency * OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM / tma_info_thread_clks",
- "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
+ "MetricGroup": "BvMS;DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
"MetricName": "tma_false_sharing",
"MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
@@ -283,7 +283,7 @@
{
"BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
"MetricExpr": "L1D_PEND_MISS.FB_FULL / tma_info_thread_clks",
- "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
+ "MetricGroup": "BvMS;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
"MetricName": "tma_fb_full",
"MetricThreshold": "tma_fb_full > 0.3",
"PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
@@ -296,7 +296,7 @@
"MetricName": "tma_fetch_bandwidth",
"MetricThreshold": "tma_fetch_bandwidth > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
{
@@ -338,7 +338,7 @@
},
{
"BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired",
- "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ / (tma_retiring * tma_info_thread_slots)",
+ "MetricExpr": "FP_ARITH_INST_RETIRED.SCALAR / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_scalar",
"MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
@@ -347,7 +347,7 @@
},
{
"BriefDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths",
- "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@ / (tma_retiring * tma_info_thread_slots)",
+ "MetricExpr": "FP_ARITH_INST_RETIRED.VECTOR / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_vector",
"MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
@@ -385,7 +385,7 @@
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
"DefaultMetricgroupName": "TopdownL1",
"MetricExpr": "topdown\\-fe\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) - INT_MISC.UOP_DROPPING / tma_info_thread_slots",
- "MetricGroup": "Default;PGO;TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvFB;BvIO;Default;PGO;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_frontend_bound",
"MetricThreshold": "tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL1;Default",
@@ -405,7 +405,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses",
"MetricExpr": "ICACHE_DATA.STALLS / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_icache_misses",
"MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS",
@@ -469,13 +469,21 @@
"MetricThreshold": "tma_info_botlnk_l0_core_bound_likely > 0.5"
},
{
+ "BriefDescription": "Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck",
+ "MetricExpr": "100 * (tma_frontend_bound * (tma_fetch_bandwidth / (tma_fetch_bandwidth + tma_fetch_latency)) * (tma_dsb / (tma_dsb + tma_lsd + tma_mite)))",
+ "MetricGroup": "DSB;FetchBW;tma_issueFB",
+ "MetricName": "tma_info_botlnk_l2_dsb_bandwidth",
+ "MetricThreshold": "tma_info_botlnk_l2_dsb_bandwidth > 10",
+ "PublicDescription": "Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp"
+ },
+ {
"BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_lsd + tma_mite))",
"MetricGroup": "DSBmiss;Fed;tma_issueFB",
"MetricName": "tma_info_botlnk_l2_dsb_misses",
"MetricThreshold": "tma_info_botlnk_l2_dsb_misses > 10",
- "PublicDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp"
+ "PublicDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp"
},
{
"BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
@@ -487,39 +495,33 @@
"PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: "
},
{
- "BriefDescription": "Total pipeline cost of \"useful operations\" - the baseline operations not covered by Branching_Overhead nor Irregular_Overhead.",
- "MetricExpr": "100 * (tma_retiring - (BR_INST_RETIRED.ALL_BRANCHES + BR_INST_RETIRED.NEAR_CALL) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
- "MetricGroup": "Ret",
- "MetricName": "tma_info_bottleneck_base_non_br",
- "MetricThreshold": "tma_info_bottleneck_base_non_br > 20"
- },
- {
"BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)",
- "MetricGroup": "BigFootprint;Fed;Frontend;IcMiss;MemoryTLB",
+ "MetricGroup": "BigFootprint;BvBC;Fed;Frontend;IcMiss;MemoryTLB",
"MetricName": "tma_info_bottleneck_big_code",
"MetricThreshold": "tma_info_bottleneck_big_code > 20"
},
{
- "BriefDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls)",
- "MetricExpr": "100 * ((BR_INST_RETIRED.ALL_BRANCHES + BR_INST_RETIRED.NEAR_CALL) / tma_info_thread_slots)",
- "MetricGroup": "Ret",
+ "BriefDescription": "Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA",
+ "MetricExpr": "100 * ((BR_INST_RETIRED.ALL_BRANCHES + 2 * BR_INST_RETIRED.NEAR_CALL + INST_RETIRED.NOP) / tma_info_thread_slots)",
+ "MetricGroup": "BvBO;Ret",
"MetricName": "tma_info_bottleneck_branching_overhead",
- "MetricThreshold": "tma_info_bottleneck_branching_overhead > 5"
+ "MetricThreshold": "tma_info_bottleneck_branching_overhead > 5",
+ "PublicDescription": "Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA. Examples include function calls; loops and alignments. (A lower bound)"
},
{
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
- "MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_hit_latency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
+ "MetricGroup": "BvMB;Mem;MemoryBW;Offcore;tma_issueBW",
"MetricName": "tma_info_bottleneck_cache_memory_bandwidth",
"MetricThreshold": "tma_info_bottleneck_cache_memory_bandwidth > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full"
},
{
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
- "MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l1_hit_latency / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_hit_latency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
+ "MetricGroup": "BvML;Mem;MemoryLat;Offcore;tma_issueLat",
"MetricName": "tma_info_bottleneck_cache_memory_latency",
"MetricThreshold": "tma_info_bottleneck_cache_memory_latency > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks. Related metrics: tma_l3_hit_latency, tma_mem_latency"
@@ -527,23 +529,23 @@
{
"BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation",
"MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_ports_utilization + tma_serializing_operation)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))",
- "MetricGroup": "Cor;tma_issueComp",
+ "MetricGroup": "BvCB;Cor;tma_issueComp",
"MetricName": "tma_info_bottleneck_compute_bound_est",
"MetricThreshold": "tma_info_bottleneck_compute_bound_est > 20",
"PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. Related metrics: "
},
{
- "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks",
+ "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end)",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * (10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts)) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_info_bottleneck_big_code",
- "MetricGroup": "Fed;FetchBW;Frontend",
+ "MetricGroup": "BvFB;Fed;FetchBW;Frontend",
"MetricName": "tma_info_bottleneck_instruction_fetch_bw",
"MetricThreshold": "tma_info_bottleneck_instruction_fetch_bw > 20"
},
{
"BriefDescription": "Total pipeline cost of irregular execution (e.g",
"MetricExpr": "100 * (tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * (10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts)) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + tma_core_bound * RS_EVENTS.EMPTY_CYCLES / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
- "MetricGroup": "Bad;Cor;Ret;tma_issueMS",
+ "MetricGroup": "Bad;BvIO;Cor;Ret;tma_issueMS",
"MetricName": "tma_info_bottleneck_irregular_overhead",
"MetricThreshold": "tma_info_bottleneck_irregular_overhead > 10",
"PublicDescription": "Total pipeline cost of irregular execution (e.g. FP-assists in HPC, Wait time with work imbalance multithreaded workloads, overhead in system services or virtualized environments). Related metrics: tma_microcode_sequencer, tma_ms_switches"
@@ -551,8 +553,8 @@
{
"BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
- "MetricGroup": "Mem;MemoryTLB;Offcore;tma_issueTLB",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_hit_latency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
+ "MetricGroup": "BvMT;Mem;MemoryTLB;Offcore;tma_issueTLB",
"MetricName": "tma_info_bottleneck_memory_data_tlbs",
"MetricThreshold": "tma_info_bottleneck_memory_data_tlbs > 20",
"PublicDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs). Related metrics: tma_dtlb_load, tma_dtlb_store, tma_info_bottleneck_memory_synchronization"
@@ -560,7 +562,7 @@
{
"BriefDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)",
"MetricExpr": "100 * (tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * tma_false_sharing / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))",
- "MetricGroup": "Mem;Offcore;tma_issueTLB",
+ "MetricGroup": "BvMS;Mem;Offcore;tma_issueTLB",
"MetricName": "tma_info_bottleneck_memory_synchronization",
"MetricThreshold": "tma_info_bottleneck_memory_synchronization > 10",
"PublicDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors). Related metrics: tma_dtlb_load, tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs"
@@ -569,18 +571,25 @@
"BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
- "MetricGroup": "Bad;BadSpec;BrMispredicts;tma_issueBM",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts;BvMP;tma_issueBM",
"MetricName": "tma_info_bottleneck_mispredictions",
"MetricThreshold": "tma_info_bottleneck_mispredictions > 20",
"PublicDescription": "Total pipeline cost of Branch Misprediction related bottlenecks. Related metrics: tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost, tma_mispredicts_resteers"
},
{
- "BriefDescription": "Total pipeline cost of remaining bottlenecks (apart from those listed in the Info.Bottlenecks metrics class)",
- "MetricExpr": "100 - (tma_info_bottleneck_big_code + tma_info_bottleneck_instruction_fetch_bw + tma_info_bottleneck_mispredictions + tma_info_bottleneck_cache_memory_bandwidth + tma_info_bottleneck_cache_memory_latency + tma_info_bottleneck_memory_data_tlbs + tma_info_bottleneck_memory_synchronization + tma_info_bottleneck_compute_bound_est + tma_info_bottleneck_irregular_overhead + tma_info_bottleneck_branching_overhead + tma_info_bottleneck_base_non_br)",
- "MetricGroup": "Cor;Offcore",
+ "BriefDescription": "Total pipeline cost of remaining bottlenecks in the back-end",
+ "MetricExpr": "100 - (tma_info_bottleneck_big_code + tma_info_bottleneck_instruction_fetch_bw + tma_info_bottleneck_mispredictions + tma_info_bottleneck_cache_memory_bandwidth + tma_info_bottleneck_cache_memory_latency + tma_info_bottleneck_memory_data_tlbs + tma_info_bottleneck_memory_synchronization + tma_info_bottleneck_compute_bound_est + tma_info_bottleneck_irregular_overhead + tma_info_bottleneck_branching_overhead + tma_info_bottleneck_useful_work)",
+ "MetricGroup": "BvOB;Cor;Offcore",
"MetricName": "tma_info_bottleneck_other_bottlenecks",
"MetricThreshold": "tma_info_bottleneck_other_bottlenecks > 20",
- "PublicDescription": "Total pipeline cost of remaining bottlenecks (apart from those listed in the Info.Bottlenecks metrics class). Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls."
+ "PublicDescription": "Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls."
+ },
+ {
+ "BriefDescription": "Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead.",
+ "MetricExpr": "100 * (tma_retiring - (BR_INST_RETIRED.ALL_BRANCHES + 2 * BR_INST_RETIRED.NEAR_CALL + INST_RETIRED.NOP) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
+ "MetricGroup": "BvUW;Ret",
+ "MetricName": "tma_info_bottleneck_useful_work",
+ "MetricThreshold": "tma_info_bottleneck_useful_work > 20"
},
{
"BriefDescription": "Fraction of branches that are CALL or RET",
@@ -638,7 +647,7 @@
},
{
"BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
- "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@) / (2 * tma_info_core_core_clks)",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + FP_ARITH_INST_RETIRED.VECTOR) / (2 * tma_info_core_core_clks)",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "tma_info_core_fp_arith_utilization",
"PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
@@ -655,7 +664,7 @@
"MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
"MetricName": "tma_info_frontend_dsb_coverage",
"MetricThreshold": "tma_info_frontend_dsb_coverage < 0.7 & tma_info_thread_ipc / 5 > 0.35",
- "PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_inst_mix_iptb, tma_lcp"
+ "PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_inst_mix_iptb, tma_lcp"
},
{
"BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
@@ -721,7 +730,7 @@
},
{
"BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
- "MetricExpr": "INST_RETIRED.ANY / (cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR + FP_ARITH_INST_RETIRED.VECTOR)",
"MetricGroup": "Flops;InsType",
"MetricName": "tma_info_inst_mix_iparith",
"MetricThreshold": "tma_info_inst_mix_iparith < 10",
@@ -816,12 +825,12 @@
"MetricThreshold": "tma_info_inst_mix_ipswpf < 100"
},
{
- "BriefDescription": "Instruction per taken branch",
+ "BriefDescription": "Instructions per taken branch",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
"MetricName": "tma_info_inst_mix_iptb",
"MetricThreshold": "tma_info_inst_mix_iptb < 11",
- "PublicDescription": "Instruction per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_lcp"
+ "PublicDescription": "Instructions per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_lcp"
},
{
"BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
@@ -854,7 +863,7 @@
"MetricName": "tma_info_memory_fb_hpki"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
"MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l1d_cache_fill_bw"
@@ -872,7 +881,7 @@
"MetricName": "tma_info_memory_l1mpki_load"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
"MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l2_cache_fill_bw"
@@ -908,13 +917,19 @@
"MetricName": "tma_info_memory_l2mpki_load"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Offcore requests (L2 cache miss) per kilo instruction for demand RFOs",
+ "MetricExpr": "1e3 * L2_RQSTS.RFO_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Offcore",
+ "MetricName": "tma_info_memory_l2mpki_rfo"
+ },
+ {
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
"MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW;Offcore",
"MetricName": "tma_info_memory_l3_cache_access_bw"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
"MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l3_cache_fill_bw"
@@ -1000,12 +1015,30 @@
"MetricName": "tma_info_memory_tlb_store_stlb_mpki"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per core",
"MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@)",
"MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
"MetricName": "tma_info_pipeline_execute"
},
{
+ "BriefDescription": "Average number of uops fetched from DSB per cycle",
+ "MetricExpr": "IDQ.DSB_UOPS / IDQ.DSB_CYCLES_ANY",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "tma_info_pipeline_fetch_dsb"
+ },
+ {
+ "BriefDescription": "Average number of uops fetched from LSD per cycle",
+ "MetricExpr": "LSD.UOPS / LSD.CYCLES_ACTIVE",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "tma_info_pipeline_fetch_lsd"
+ },
+ {
+ "BriefDescription": "Average number of uops fetched from MITE per cycle",
+ "MetricExpr": "IDQ.MITE_UOPS / IDQ.MITE_CYCLES_ANY",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "tma_info_pipeline_fetch_mite"
+ },
+ {
"BriefDescription": "Instructions per a microcode Assist invocation",
"MetricExpr": "INST_RETIRED.ANY / ASSISTS.ANY",
"MetricGroup": "MicroSeq;Pipeline;Ret;Retire",
@@ -1027,13 +1060,13 @@
},
{
"BriefDescription": "Average CPU Utilization (percentage)",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "tma_info_system_cpus_utilized / #num_cpus_online",
"MetricGroup": "HPC;Summary",
"MetricName": "tma_info_system_cpu_utilization"
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "#num_cpus_online * tma_info_system_cpu_utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized"
},
@@ -1171,7 +1204,7 @@
"MetricThreshold": "tma_info_thread_uoppi > 1.05"
},
{
- "BriefDescription": "Instruction per taken branch",
+ "BriefDescription": "Uops per taken branch",
"MetricExpr": "tma_retiring * tma_info_thread_slots / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW",
"MetricName": "tma_info_thread_uptb",
@@ -1180,7 +1213,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
"MetricExpr": "ICACHE_TAG.STALLS / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_itlb_misses",
"MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS",
@@ -1196,10 +1229,19 @@
"ScaleUnit": "100%"
},
{
+ "BriefDescription": "This metric roughly estimates fraction of cycles with demand load accesses that hit the L1 cache",
+ "MetricExpr": "min(2 * (MEM_INST_RETIRED.ALL_LOADS - MEM_LOAD_RETIRED.FB_HIT - MEM_LOAD_RETIRED.L1_MISS) * 20 / 100, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
+ "MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_l1_hit_latency",
+ "MetricThreshold": "tma_l1_hit_latency > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles with demand load accesses that hit the L1 cache. The short latency of the L1 data cache may be exposed in pointer-chasing memory access patterns as an example. Sample with: MEM_LOAD_RETIRED.L1_HIT",
+ "ScaleUnit": "100%"
+ },
+ {
"BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) / (MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + L1D_PEND_MISS.FB_FULL_PERIODS) * ((CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks)",
- "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricGroup": "BvML;CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l2_bound",
"MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT_PS",
@@ -1218,7 +1260,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
"MetricExpr": "17.5 * tma_info_system_core_frequency * (MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2)) / tma_info_thread_clks",
- "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
+ "MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
"MetricName": "tma_l3_hit_latency",
"MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_info_bottleneck_cache_memory_latency, tma_mem_latency",
@@ -1230,7 +1272,7 @@
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_lcp",
"MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
- "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
"ScaleUnit": "100%"
},
{
@@ -1275,7 +1317,7 @@
"MetricGroup": "Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
"MetricName": "tma_lock_latency",
"MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
- "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS_PS. Related metrics: tma_store_latency",
+ "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS. Related metrics: tma_store_latency",
"ScaleUnit": "100%"
},
{
@@ -1290,7 +1332,7 @@
{
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears",
"MetricExpr": "max(0, tma_bad_speculation - tma_branch_mispredicts)",
- "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
+ "MetricGroup": "BadSpec;BvMS;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
"MetricName": "tma_machine_clears",
"MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
@@ -1300,7 +1342,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_thread_clks",
- "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
"MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_sq_full",
@@ -1309,7 +1351,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM)",
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth",
- "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
+ "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
"MetricName": "tma_mem_latency",
"MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_info_bottleneck_cache_memory_latency, tma_l3_hit_latency",
@@ -1346,7 +1388,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage",
"MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks",
- "MetricGroup": "BadSpec;BrMispredicts;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
+ "MetricGroup": "BadSpec;BrMispredicts;BvMP;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
"MetricName": "tma_mispredicts_resteers",
"MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost, tma_info_bottleneck_mispredictions",
@@ -1390,7 +1432,7 @@
{
"BriefDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions",
"MetricExpr": "tma_light_operations * INST_RETIRED.NOP / (tma_retiring * tma_info_thread_slots)",
- "MetricGroup": "Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
+ "MetricGroup": "BvBO;Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
"MetricName": "tma_nop_instructions",
"MetricThreshold": "tma_nop_instructions > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)",
"PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP",
@@ -1409,7 +1451,7 @@
{
"BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).",
"MetricExpr": "max(tma_branch_mispredicts * (1 - BR_MISP_RETIRED.ALL_BRANCHES / (INT_MISC.CLEARS_COUNT - MACHINE_CLEARS.COUNT)), 0.0001)",
- "MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
+ "MetricGroup": "BrMispredicts;BvIO;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_other_mispredicts",
"MetricThreshold": "tma_other_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
@@ -1417,7 +1459,7 @@
{
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.",
"MetricExpr": "max(tma_machine_clears * (1 - MACHINE_CLEARS.MEMORY_ORDERING / MACHINE_CLEARS.COUNT), 0.0001)",
- "MetricGroup": "Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group",
+ "MetricGroup": "BvIO;Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_other_nukes",
"MetricThreshold": "tma_other_nukes > 0.05 & (tma_machine_clears > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
@@ -1469,7 +1511,7 @@
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricExpr": "(cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ + tma_core_bound * RS_EVENTS.EMPTY_CYCLES) / tma_info_thread_clks * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) / tma_info_thread_clks",
+ "MetricExpr": "EXE_ACTIVITY.EXE_BOUND_0_PORTS / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_0",
"MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
@@ -1497,7 +1539,7 @@
{
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
"MetricExpr": "UOPS_EXECUTED.CYCLES_GE_3 / tma_info_thread_clks",
- "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricGroup": "BvCB;PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_3m",
"MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Sample with: UOPS_EXECUTED.CYCLES_GE_3",
@@ -1507,7 +1549,7 @@
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
"DefaultMetricgroupName": "TopdownL1",
"MetricExpr": "topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * tma_info_thread_slots",
- "MetricGroup": "Default;TmaL1;TopdownL1;tma_L1_group",
+ "MetricGroup": "BvUW;Default;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_retiring",
"MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL1;Default",
@@ -1517,7 +1559,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations",
"MetricExpr": "RESOURCE_STALLS.SCOREBOARD / tma_info_thread_clks",
- "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO",
+ "MetricGroup": "BvIO;PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO",
"MetricName": "tma_serializing_operation",
"MetricThreshold": "tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: RESOURCE_STALLS.SCOREBOARD. Related metrics: tma_ms_switches",
@@ -1554,7 +1596,7 @@
{
"BriefDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors)",
"MetricExpr": "L1D_PEND_MISS.L2_STALL / tma_info_thread_clks",
- "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
+ "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
"MetricName": "tma_sq_full",
"MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth",
@@ -1582,7 +1624,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses",
"MetricExpr": "(L2_RQSTS.RFO_HIT * 10 * (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) + (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_thread_clks",
- "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
+ "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
"MetricName": "tma_store_latency",
"MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
@@ -1625,7 +1667,7 @@
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears",
"MetricExpr": "10 * BACLEARS.ANY / tma_info_thread_clks",
- "MetricGroup": "BigFootprint;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
"MetricName": "tma_unknown_branches",
"MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: BACLEARS.ANY",
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/tigerlake/uncore-interconnect.json
index 48f23acc76c0..1500bf109c99 100644
--- a/tools/perf/pmu-events/arch/x86/tigerlake/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/uncore-interconnect.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "UNC_ARB_COH_TRK_REQUESTS.ALL",
+ "Counter": "0,1",
"EventCode": "0x84",
"EventName": "UNC_ARB_COH_TRK_REQUESTS.ALL",
"PerPkg": "1",
@@ -9,56 +10,69 @@
},
{
"BriefDescription": "Each cycle counts number of any coherent request at memory controller that were issued by any core.",
+ "Counter": "0",
"EventCode": "0x85",
"EventName": "UNC_ARB_DAT_OCCUPANCY.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "ARB"
},
{
"BriefDescription": "Each cycle counts number of coherent reads pending on data return from memory controller that were issued by any core.",
+ "Counter": "0",
"EventCode": "0x85",
"EventName": "UNC_ARB_DAT_OCCUPANCY.RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "ARB"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_ARB_REQ_TRK_REQUEST.DRD",
+ "Counter": "0,1",
"Deprecated": "1",
"EventCode": "0x81",
"EventName": "UNC_ARB_DAT_REQUESTS.RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "ARB"
},
{
"BriefDescription": "This event is deprecated. Refer to new event UNC_ARB_DAT_OCCUPANCY.ALL",
+ "Counter": "0",
"Deprecated": "1",
"EventCode": "0x85",
"EventName": "UNC_ARB_IFA_OCCUPANCY.ALL",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "ARB"
},
{
"BriefDescription": "Each cycle count number of 'valid' coherent Data Read entries . Such entry is defined as valid when it is allocated till deallocation. Doesn't include prefetches [This event is alias to UNC_ARB_TRK_OCCUPANCY.RD]",
+ "Counter": "0",
"EventCode": "0x80",
"EventName": "UNC_ARB_REQ_TRK_OCCUPANCY.DRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "ARB"
},
{
"BriefDescription": "Number of all coherent Data Read entries. Doesn't include prefetches [This event is alias to UNC_ARB_TRK_REQUESTS.RD]",
+ "Counter": "0,1",
"EventCode": "0x81",
"EventName": "UNC_ARB_REQ_TRK_REQUEST.DRD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "ARB"
},
{
"BriefDescription": "Each cycle count number of all outgoing valid entries in ReqTrk. Such entry is defined as valid from it's allocation in ReqTrk till deallocation. Accounts for Coherent and non-coherent traffic.",
+ "Counter": "0",
"EventCode": "0x80",
"EventName": "UNC_ARB_TRK_OCCUPANCY.ALL",
"PerPkg": "1",
@@ -67,14 +81,17 @@
},
{
"BriefDescription": "Each cycle count number of 'valid' coherent Data Read entries . Such entry is defined as valid when it is allocated till deallocation. Doesn't include prefetches [This event is alias to UNC_ARB_REQ_TRK_OCCUPANCY.DRD]",
+ "Counter": "0",
"EventCode": "0x80",
"EventName": "UNC_ARB_TRK_OCCUPANCY.RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "ARB"
},
{
"BriefDescription": "UNC_ARB_TRK_REQUESTS.ALL",
+ "Counter": "0,1",
"EventCode": "0x81",
"EventName": "UNC_ARB_TRK_REQUESTS.ALL",
"PerPkg": "1",
@@ -83,8 +100,10 @@
},
{
"BriefDescription": "Number of all coherent Data Read entries. Doesn't include prefetches [This event is alias to UNC_ARB_REQ_TRK_REQUEST.DRD]",
+ "Counter": "0,1",
"EventCode": "0x81",
"EventName": "UNC_ARB_TRK_REQUESTS.RD",
+ "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "ARB"
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/uncore-memory.json b/tools/perf/pmu-events/arch/x86/tigerlake/uncore-memory.json
index 99fb5259fd25..ea213045cbca 100644
--- a/tools/perf/pmu-events/arch/x86/tigerlake/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/uncore-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Counts every read (RdCAS) issued by the Memory Controller to DRAM (sum of all channels). All requests result in 64 byte data transfers from DRAM.",
+ "Counter": "1",
"EventCode": "0xff",
"EventName": "UNC_MC0_RDCAS_COUNT_FREERUN",
"PerPkg": "1",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Counts every 64B read and write request entering the Memory Controller to DRAM (sum of all channels). Each write request counts as a new request incrementing this counter. However, same cache line write requests (both full and partial) are combined to a single 64 byte data transfer to DRAM.",
+ "Counter": "0",
"EventCode": "0xff",
"EventName": "UNC_MC0_TOTAL_REQCOUNT_FREERUN",
"PerPkg": "1",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Counts every write (WrCAS) issued by the Memory Controller to DRAM (sum of all channels). All requests result in 64 byte data transfers from DRAM.",
+ "Counter": "2",
"EventCode": "0xff",
"EventName": "UNC_MC0_WRCAS_COUNT_FREERUN",
"PerPkg": "1",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "Counts every read (RdCAS) issued by the Memory Controller to DRAM (sum of all channels). All requests result in 64 byte data transfers from DRAM.",
+ "Counter": "4",
"EventCode": "0xff",
"EventName": "UNC_MC1_RDCAS_COUNT_FREERUN",
"PerPkg": "1",
@@ -33,6 +37,7 @@
},
{
"BriefDescription": "Counts every 64B read and write request entering the Memory Controller to DRAM (sum of all channels). Each write request counts as a new request incrementing this counter. However, same cache line write requests (both full and partial) are combined to a single 64 byte data transfer to DRAM.",
+ "Counter": "3",
"EventCode": "0xff",
"EventName": "UNC_MC1_TOTAL_REQCOUNT_FREERUN",
"PerPkg": "1",
@@ -41,6 +46,7 @@
},
{
"BriefDescription": "Counts every write (WrCAS) issued by the Memory Controller to DRAM (sum of all channels). All requests result in 64 byte data transfers from DRAM.",
+ "Counter": "5",
"EventCode": "0xff",
"EventName": "UNC_MC1_WRCAS_COUNT_FREERUN",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/uncore-other.json b/tools/perf/pmu-events/arch/x86/tigerlake/uncore-other.json
index c6596ba09195..cc8110ac020c 100644
--- a/tools/perf/pmu-events/arch/x86/tigerlake/uncore-other.json
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/uncore-other.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "UNC_CLOCK.SOCKET",
+ "Counter": "FIXED",
"EventCode": "0xff",
"EventName": "UNC_CLOCK.SOCKET",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/virtual-memory.json b/tools/perf/pmu-events/arch/x86/tigerlake/virtual-memory.json
index adb2f6b3e77c..62dc0fc76748 100644
--- a/tools/perf/pmu-events/arch/x86/tigerlake/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/virtual-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Loads that miss the DTLB and hit the STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for a demand load.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (all page sizes) caused by demand data loads. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -26,6 +29,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data load to a 2M/4M page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -34,6 +38,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data load to a 4K page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts completed page walks (4K sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -42,6 +47,7 @@
},
{
"BriefDescription": "Number of page walks outstanding for a demand load in the PMH each cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding for a demand load in the PMH (Page Miss Handler) each cycle.",
@@ -50,6 +56,7 @@
},
{
"BriefDescription": "Stores that miss the DTLB and hit the STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.STLB_HIT",
"PublicDescription": "Counts stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
@@ -58,6 +65,7 @@
},
{
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
@@ -67,6 +75,7 @@
},
{
"BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (all page sizes) caused by demand data stores. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
@@ -75,6 +84,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data store to a 2M/4M page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 2M/4M pages. The page walks can end with or without a page fault.",
@@ -83,6 +93,7 @@
},
{
"BriefDescription": "Page walks completed due to a demand data store to a 4K page.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
@@ -91,6 +102,7 @@
},
{
"BriefDescription": "Number of page walks outstanding for a store in the PMH each cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding for a store in the PMH (Page Miss Handler) each cycle.",
@@ -99,6 +111,7 @@
},
{
"BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.STLB_HIT",
"PublicDescription": "Counts instruction fetch requests that miss the ITLB (Instruction TLB) and hit the STLB (Second-level TLB).",
@@ -107,6 +120,7 @@
},
{
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request.",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_ACTIVE",
@@ -116,6 +130,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
"PublicDescription": "Counts completed page walks (all page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
@@ -124,6 +139,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
"PublicDescription": "Counts completed page walks (2M/4M page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
@@ -132,6 +148,7 @@
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts completed page walks (4K page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
@@ -140,6 +157,7 @@
},
{
"BriefDescription": "Number of page walks outstanding for an outstanding code request in the PMH each cycle.",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_PENDING",
"PublicDescription": "Counts the number of page walks outstanding for an outstanding code (instruction fetch) request in the PMH (Page Miss Handler) each cycle.",
@@ -148,6 +166,7 @@
},
{
"BriefDescription": "DTLB flush attempts of the thread-specific entries",
+ "Counter": "0,1,2,3",
"EventCode": "0xbd",
"EventName": "TLB_FLUSH.DTLB_THREAD",
"PublicDescription": "Counts the number of DTLB flush attempts of the thread-specific entries.",
@@ -156,6 +175,7 @@
},
{
"BriefDescription": "STLB flush attempts",
+ "Counter": "0,1,2,3",
"EventCode": "0xbd",
"EventName": "TLB_FLUSH.STLB_ANY",
"PublicDescription": "Counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, etc.).",
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-dp/cache.json b/tools/perf/pmu-events/arch/x86/westmereep-dp/cache.json
index 4dae735fb636..30845c7dbf08 100644
--- a/tools/perf/pmu-events/arch/x86/westmereep-dp/cache.json
+++ b/tools/perf/pmu-events/arch/x86/westmereep-dp/cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Cycles L1D locked",
+ "Counter": "0,1",
"EventCode": "0x63",
"EventName": "CACHE_LOCK_CYCLES.L1D",
"SampleAfterValue": "2000000",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Cycles L1D and L2 locked",
+ "Counter": "0,1",
"EventCode": "0x63",
"EventName": "CACHE_LOCK_CYCLES.L1D_L2",
"SampleAfterValue": "2000000",
@@ -15,6 +17,7 @@
},
{
"BriefDescription": "L1D cache lines replaced in M state",
+ "Counter": "0,1",
"EventCode": "0x51",
"EventName": "L1D.M_EVICT",
"SampleAfterValue": "2000000",
@@ -22,6 +25,7 @@
},
{
"BriefDescription": "L1D cache lines allocated in the M state",
+ "Counter": "0,1",
"EventCode": "0x51",
"EventName": "L1D.M_REPL",
"SampleAfterValue": "2000000",
@@ -29,6 +33,7 @@
},
{
"BriefDescription": "L1D snoop eviction of cache lines in M state",
+ "Counter": "0,1",
"EventCode": "0x51",
"EventName": "L1D.M_SNOOP_EVICT",
"SampleAfterValue": "2000000",
@@ -36,6 +41,7 @@
},
{
"BriefDescription": "L1 data cache lines allocated",
+ "Counter": "0,1",
"EventCode": "0x51",
"EventName": "L1D.REPL",
"SampleAfterValue": "2000000",
@@ -43,6 +49,7 @@
},
{
"BriefDescription": "L1D prefetch load lock accepted in fill buffer",
+ "Counter": "0,1",
"EventCode": "0x52",
"EventName": "L1D_CACHE_PREFETCH_LOCK_FB_HIT",
"SampleAfterValue": "2000000",
@@ -50,6 +57,7 @@
},
{
"BriefDescription": "L1D hardware prefetch misses",
+ "Counter": "0,1",
"EventCode": "0x4E",
"EventName": "L1D_PREFETCH.MISS",
"SampleAfterValue": "200000",
@@ -57,6 +65,7 @@
},
{
"BriefDescription": "L1D hardware prefetch requests",
+ "Counter": "0,1",
"EventCode": "0x4E",
"EventName": "L1D_PREFETCH.REQUESTS",
"SampleAfterValue": "200000",
@@ -64,6 +73,7 @@
},
{
"BriefDescription": "L1D hardware prefetch requests triggered",
+ "Counter": "0,1",
"EventCode": "0x4E",
"EventName": "L1D_PREFETCH.TRIGGERS",
"SampleAfterValue": "200000",
@@ -71,6 +81,7 @@
},
{
"BriefDescription": "L1 writebacks to L2 in E state",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L1D_WB_L2.E_STATE",
"SampleAfterValue": "100000",
@@ -78,6 +89,7 @@
},
{
"BriefDescription": "L1 writebacks to L2 in I state (misses)",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L1D_WB_L2.I_STATE",
"SampleAfterValue": "100000",
@@ -85,6 +97,7 @@
},
{
"BriefDescription": "All L1 writebacks to L2",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L1D_WB_L2.MESI",
"SampleAfterValue": "100000",
@@ -92,6 +105,7 @@
},
{
"BriefDescription": "L1 writebacks to L2 in M state",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L1D_WB_L2.M_STATE",
"SampleAfterValue": "100000",
@@ -99,6 +113,7 @@
},
{
"BriefDescription": "L1 writebacks to L2 in S state",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L1D_WB_L2.S_STATE",
"SampleAfterValue": "100000",
@@ -106,6 +121,7 @@
},
{
"BriefDescription": "All L2 data requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.ANY",
"SampleAfterValue": "200000",
@@ -113,6 +129,7 @@
},
{
"BriefDescription": "L2 data demand loads in E state",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.DEMAND.E_STATE",
"SampleAfterValue": "200000",
@@ -120,6 +137,7 @@
},
{
"BriefDescription": "L2 data demand loads in I state (misses)",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.DEMAND.I_STATE",
"SampleAfterValue": "200000",
@@ -127,6 +145,7 @@
},
{
"BriefDescription": "L2 data demand requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.DEMAND.MESI",
"SampleAfterValue": "200000",
@@ -134,6 +153,7 @@
},
{
"BriefDescription": "L2 data demand loads in M state",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.DEMAND.M_STATE",
"SampleAfterValue": "200000",
@@ -141,6 +161,7 @@
},
{
"BriefDescription": "L2 data demand loads in S state",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.DEMAND.S_STATE",
"SampleAfterValue": "200000",
@@ -148,6 +169,7 @@
},
{
"BriefDescription": "L2 data prefetches in E state",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.PREFETCH.E_STATE",
"SampleAfterValue": "200000",
@@ -155,6 +177,7 @@
},
{
"BriefDescription": "L2 data prefetches in the I state (misses)",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.PREFETCH.I_STATE",
"SampleAfterValue": "200000",
@@ -162,6 +185,7 @@
},
{
"BriefDescription": "All L2 data prefetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.PREFETCH.MESI",
"SampleAfterValue": "200000",
@@ -169,6 +193,7 @@
},
{
"BriefDescription": "L2 data prefetches in M state",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.PREFETCH.M_STATE",
"SampleAfterValue": "200000",
@@ -176,6 +201,7 @@
},
{
"BriefDescription": "L2 data prefetches in the S state",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.PREFETCH.S_STATE",
"SampleAfterValue": "200000",
@@ -183,6 +209,7 @@
},
{
"BriefDescription": "L2 lines allocated",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.ANY",
"SampleAfterValue": "100000",
@@ -190,6 +217,7 @@
},
{
"BriefDescription": "L2 lines allocated in the E state",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.E_STATE",
"SampleAfterValue": "100000",
@@ -197,6 +225,7 @@
},
{
"BriefDescription": "L2 lines allocated in the S state",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.S_STATE",
"SampleAfterValue": "100000",
@@ -204,6 +233,7 @@
},
{
"BriefDescription": "L2 lines evicted",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.ANY",
"SampleAfterValue": "100000",
@@ -211,6 +241,7 @@
},
{
"BriefDescription": "L2 lines evicted by a demand request",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.DEMAND_CLEAN",
"SampleAfterValue": "100000",
@@ -218,6 +249,7 @@
},
{
"BriefDescription": "L2 modified lines evicted by a demand request",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.DEMAND_DIRTY",
"SampleAfterValue": "100000",
@@ -225,6 +257,7 @@
},
{
"BriefDescription": "L2 lines evicted by a prefetch request",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.PREFETCH_CLEAN",
"SampleAfterValue": "100000",
@@ -232,6 +265,7 @@
},
{
"BriefDescription": "L2 modified lines evicted by a prefetch request",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.PREFETCH_DIRTY",
"SampleAfterValue": "100000",
@@ -239,6 +273,7 @@
},
{
"BriefDescription": "L2 instruction fetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.IFETCHES",
"SampleAfterValue": "200000",
@@ -246,6 +281,7 @@
},
{
"BriefDescription": "L2 instruction fetch hits",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.IFETCH_HIT",
"SampleAfterValue": "200000",
@@ -253,6 +289,7 @@
},
{
"BriefDescription": "L2 instruction fetch misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.IFETCH_MISS",
"SampleAfterValue": "200000",
@@ -260,6 +297,7 @@
},
{
"BriefDescription": "L2 load hits",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.LD_HIT",
"SampleAfterValue": "200000",
@@ -267,6 +305,7 @@
},
{
"BriefDescription": "L2 load misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.LD_MISS",
"SampleAfterValue": "200000",
@@ -274,6 +313,7 @@
},
{
"BriefDescription": "L2 requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.LOADS",
"SampleAfterValue": "200000",
@@ -281,6 +321,7 @@
},
{
"BriefDescription": "All L2 misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.MISS",
"SampleAfterValue": "200000",
@@ -288,6 +329,7 @@
},
{
"BriefDescription": "All L2 prefetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.PREFETCHES",
"SampleAfterValue": "200000",
@@ -295,6 +337,7 @@
},
{
"BriefDescription": "L2 prefetch hits",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.PREFETCH_HIT",
"SampleAfterValue": "200000",
@@ -302,6 +345,7 @@
},
{
"BriefDescription": "L2 prefetch misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.PREFETCH_MISS",
"SampleAfterValue": "200000",
@@ -309,6 +353,7 @@
},
{
"BriefDescription": "All L2 requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.REFERENCES",
"SampleAfterValue": "200000",
@@ -316,6 +361,7 @@
},
{
"BriefDescription": "L2 RFO requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFOS",
"SampleAfterValue": "200000",
@@ -323,6 +369,7 @@
},
{
"BriefDescription": "L2 RFO hits",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_HIT",
"SampleAfterValue": "200000",
@@ -330,6 +377,7 @@
},
{
"BriefDescription": "L2 RFO misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_MISS",
"SampleAfterValue": "200000",
@@ -337,6 +385,7 @@
},
{
"BriefDescription": "All L2 transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.ANY",
"SampleAfterValue": "200000",
@@ -344,6 +393,7 @@
},
{
"BriefDescription": "L2 fill transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.FILL",
"SampleAfterValue": "200000",
@@ -351,6 +401,7 @@
},
{
"BriefDescription": "L2 instruction fetch transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.IFETCH",
"SampleAfterValue": "200000",
@@ -358,6 +409,7 @@
},
{
"BriefDescription": "L1D writeback to L2 transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.L1D_WB",
"SampleAfterValue": "200000",
@@ -365,6 +417,7 @@
},
{
"BriefDescription": "L2 Load transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.LOAD",
"SampleAfterValue": "200000",
@@ -372,6 +425,7 @@
},
{
"BriefDescription": "L2 prefetch transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.PREFETCH",
"SampleAfterValue": "200000",
@@ -379,6 +433,7 @@
},
{
"BriefDescription": "L2 RFO transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.RFO",
"SampleAfterValue": "200000",
@@ -386,6 +441,7 @@
},
{
"BriefDescription": "L2 writeback to LLC transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.WB",
"SampleAfterValue": "200000",
@@ -393,6 +449,7 @@
},
{
"BriefDescription": "L2 demand lock RFOs in E state",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.LOCK.E_STATE",
"SampleAfterValue": "100000",
@@ -400,6 +457,7 @@
},
{
"BriefDescription": "All demand L2 lock RFOs that hit the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.LOCK.HIT",
"SampleAfterValue": "100000",
@@ -407,6 +465,7 @@
},
{
"BriefDescription": "L2 demand lock RFOs in I state (misses)",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.LOCK.I_STATE",
"SampleAfterValue": "100000",
@@ -414,6 +473,7 @@
},
{
"BriefDescription": "All demand L2 lock RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.LOCK.MESI",
"SampleAfterValue": "100000",
@@ -421,6 +481,7 @@
},
{
"BriefDescription": "L2 demand lock RFOs in M state",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.LOCK.M_STATE",
"SampleAfterValue": "100000",
@@ -428,6 +489,7 @@
},
{
"BriefDescription": "L2 demand lock RFOs in S state",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.LOCK.S_STATE",
"SampleAfterValue": "100000",
@@ -435,6 +497,7 @@
},
{
"BriefDescription": "All L2 demand store RFOs that hit the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.RFO.HIT",
"SampleAfterValue": "100000",
@@ -442,6 +505,7 @@
},
{
"BriefDescription": "L2 demand store RFOs in I state (misses)",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.RFO.I_STATE",
"SampleAfterValue": "100000",
@@ -449,6 +513,7 @@
},
{
"BriefDescription": "All L2 demand store RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.RFO.MESI",
"SampleAfterValue": "100000",
@@ -456,6 +521,7 @@
},
{
"BriefDescription": "L2 demand store RFOs in M state",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.RFO.M_STATE",
"SampleAfterValue": "100000",
@@ -463,6 +529,7 @@
},
{
"BriefDescription": "L2 demand store RFOs in S state",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.RFO.S_STATE",
"SampleAfterValue": "100000",
@@ -470,6 +537,7 @@
},
{
"BriefDescription": "Longest latency cache miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.MISS",
"SampleAfterValue": "100000",
@@ -477,6 +545,7 @@
},
{
"BriefDescription": "Longest latency cache reference",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
"SampleAfterValue": "200000",
@@ -484,6 +553,7 @@
},
{
"BriefDescription": "Memory instructions retired above 0 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_0",
"MSRIndex": "0x3F6",
@@ -493,6 +563,7 @@
},
{
"BriefDescription": "Memory instructions retired above 1024 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_1024",
"MSRIndex": "0x3F6",
@@ -503,6 +574,7 @@
},
{
"BriefDescription": "Memory instructions retired above 128 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_128",
"MSRIndex": "0x3F6",
@@ -513,6 +585,7 @@
},
{
"BriefDescription": "Memory instructions retired above 16 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_16",
"MSRIndex": "0x3F6",
@@ -523,6 +596,7 @@
},
{
"BriefDescription": "Memory instructions retired above 16384 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_16384",
"MSRIndex": "0x3F6",
@@ -533,6 +607,7 @@
},
{
"BriefDescription": "Memory instructions retired above 2048 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_2048",
"MSRIndex": "0x3F6",
@@ -543,6 +618,7 @@
},
{
"BriefDescription": "Memory instructions retired above 256 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_256",
"MSRIndex": "0x3F6",
@@ -553,6 +629,7 @@
},
{
"BriefDescription": "Memory instructions retired above 32 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_32",
"MSRIndex": "0x3F6",
@@ -563,6 +640,7 @@
},
{
"BriefDescription": "Memory instructions retired above 32768 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_32768",
"MSRIndex": "0x3F6",
@@ -573,6 +651,7 @@
},
{
"BriefDescription": "Memory instructions retired above 4 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_4",
"MSRIndex": "0x3F6",
@@ -583,6 +662,7 @@
},
{
"BriefDescription": "Memory instructions retired above 4096 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_4096",
"MSRIndex": "0x3F6",
@@ -593,6 +673,7 @@
},
{
"BriefDescription": "Memory instructions retired above 512 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_512",
"MSRIndex": "0x3F6",
@@ -603,6 +684,7 @@
},
{
"BriefDescription": "Memory instructions retired above 64 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_64",
"MSRIndex": "0x3F6",
@@ -613,6 +695,7 @@
},
{
"BriefDescription": "Memory instructions retired above 8 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_8",
"MSRIndex": "0x3F6",
@@ -623,6 +706,7 @@
},
{
"BriefDescription": "Memory instructions retired above 8192 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_8192",
"MSRIndex": "0x3F6",
@@ -633,6 +717,7 @@
},
{
"BriefDescription": "Instructions retired which contains a load (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LOADS",
"PEBS": "1",
@@ -641,6 +726,7 @@
},
{
"BriefDescription": "Instructions retired which contains a store (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.STORES",
"PEBS": "1",
@@ -649,6 +735,7 @@
},
{
"BriefDescription": "Retired loads that miss L1D and hit an previously allocated LFB (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "MEM_LOAD_RETIRED.HIT_LFB",
"PEBS": "1",
@@ -657,6 +744,7 @@
},
{
"BriefDescription": "Retired loads that hit the L1 data cache (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "MEM_LOAD_RETIRED.L1D_HIT",
"PEBS": "1",
@@ -665,6 +753,7 @@
},
{
"BriefDescription": "Retired loads that hit the L2 cache (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "MEM_LOAD_RETIRED.L2_HIT",
"PEBS": "1",
@@ -673,6 +762,7 @@
},
{
"BriefDescription": "Retired loads that miss the LLC cache (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "MEM_LOAD_RETIRED.LLC_MISS",
"PEBS": "1",
@@ -681,6 +771,7 @@
},
{
"BriefDescription": "Retired loads that hit valid versions in the LLC cache (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "MEM_LOAD_RETIRED.LLC_UNSHARED_HIT",
"PEBS": "1",
@@ -689,6 +780,7 @@
},
{
"BriefDescription": "Retired loads that hit sibling core's L2 in modified or unmodified states (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "MEM_LOAD_RETIRED.OTHER_CORE_L2_HIT_HITM",
"PEBS": "1",
@@ -697,6 +789,7 @@
},
{
"BriefDescription": "All offcore requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.ANY",
"SampleAfterValue": "100000",
@@ -704,6 +797,7 @@
},
{
"BriefDescription": "Offcore read requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.ANY.READ",
"SampleAfterValue": "100000",
@@ -711,6 +805,7 @@
},
{
"BriefDescription": "Offcore RFO requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.ANY.RFO",
"SampleAfterValue": "100000",
@@ -718,6 +813,7 @@
},
{
"BriefDescription": "Offcore demand code read requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND.READ_CODE",
"SampleAfterValue": "100000",
@@ -725,6 +821,7 @@
},
{
"BriefDescription": "Offcore demand data read requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND.READ_DATA",
"SampleAfterValue": "100000",
@@ -732,6 +829,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND.RFO",
"SampleAfterValue": "100000",
@@ -739,6 +837,7 @@
},
{
"BriefDescription": "Offcore L1 data cache writebacks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.L1D_WRITEBACK",
"SampleAfterValue": "100000",
@@ -746,6 +845,7 @@
},
{
"BriefDescription": "Outstanding offcore reads",
+ "Counter": "0",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.ANY.READ",
"SampleAfterValue": "2000000",
@@ -753,6 +853,7 @@
},
{
"BriefDescription": "Cycles offcore reads busy",
+ "Counter": "0",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.ANY.READ_NOT_EMPTY",
@@ -761,6 +862,7 @@
},
{
"BriefDescription": "Outstanding offcore demand code reads",
+ "Counter": "0",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND.READ_CODE",
"SampleAfterValue": "2000000",
@@ -768,6 +870,7 @@
},
{
"BriefDescription": "Cycles offcore demand code read busy",
+ "Counter": "0",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND.READ_CODE_NOT_EMPTY",
@@ -776,6 +879,7 @@
},
{
"BriefDescription": "Outstanding offcore demand data reads",
+ "Counter": "0",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND.READ_DATA",
"SampleAfterValue": "2000000",
@@ -783,6 +887,7 @@
},
{
"BriefDescription": "Cycles offcore demand data read busy",
+ "Counter": "0",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND.READ_DATA_NOT_EMPTY",
@@ -791,6 +896,7 @@
},
{
"BriefDescription": "Outstanding offcore demand RFOs",
+ "Counter": "0",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND.RFO",
"SampleAfterValue": "2000000",
@@ -798,6 +904,7 @@
},
{
"BriefDescription": "Cycles offcore demand RFOs busy",
+ "Counter": "0",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND.RFO_NOT_EMPTY",
@@ -806,6 +913,7 @@
},
{
"BriefDescription": "Offcore requests blocked due to Super Queue full",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "OFFCORE_REQUESTS_SQ_FULL",
"SampleAfterValue": "100000",
@@ -813,6 +921,7 @@
},
{
"BriefDescription": "REQUEST = ANY_DATA read and RESPONSE = ALL_LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.ALL_LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -822,6 +931,7 @@
},
{
"BriefDescription": "REQUEST = ANY_DATA read and RESPONSE = ANY_CACHE_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -831,6 +941,7 @@
},
{
"BriefDescription": "REQUEST = ANY_DATA read and RESPONSE = ANY_LOCATION",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_LOCATION",
"MSRIndex": "0x1a6,0x1a7",
@@ -840,6 +951,7 @@
},
{
"BriefDescription": "REQUEST = ANY_DATA read and RESPONSE = IO_CSR_MMIO",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.IO_CSR_MMIO",
"MSRIndex": "0x1a6,0x1a7",
@@ -849,6 +961,7 @@
},
{
"BriefDescription": "REQUEST = ANY_DATA read and RESPONSE = LLC_HIT_NO_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -858,6 +971,7 @@
},
{
"BriefDescription": "REQUEST = ANY_DATA read and RESPONSE = LLC_HIT_OTHER_CORE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -867,6 +981,7 @@
},
{
"BriefDescription": "REQUEST = ANY_DATA read and RESPONSE = LLC_HIT_OTHER_CORE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -876,6 +991,7 @@
},
{
"BriefDescription": "REQUEST = ANY_DATA read and RESPONSE = LOCAL_CACHE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -885,6 +1001,7 @@
},
{
"BriefDescription": "REQUEST = ANY_DATA read and RESPONSE = LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -894,6 +1011,7 @@
},
{
"BriefDescription": "REQUEST = ANY_DATA read and RESPONSE = REMOTE_CACHE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -903,6 +1021,7 @@
},
{
"BriefDescription": "REQUEST = ANY IFETCH and RESPONSE = ALL_LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ALL_LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -912,6 +1031,7 @@
},
{
"BriefDescription": "REQUEST = ANY IFETCH and RESPONSE = ANY_CACHE_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -921,6 +1041,7 @@
},
{
"BriefDescription": "REQUEST = ANY IFETCH and RESPONSE = ANY_LOCATION",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_LOCATION",
"MSRIndex": "0x1a6,0x1a7",
@@ -930,6 +1051,7 @@
},
{
"BriefDescription": "REQUEST = ANY IFETCH and RESPONSE = IO_CSR_MMIO",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.IO_CSR_MMIO",
"MSRIndex": "0x1a6,0x1a7",
@@ -939,6 +1061,7 @@
},
{
"BriefDescription": "REQUEST = ANY IFETCH and RESPONSE = LLC_HIT_NO_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -948,6 +1071,7 @@
},
{
"BriefDescription": "REQUEST = ANY IFETCH and RESPONSE = LLC_HIT_OTHER_CORE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -957,6 +1081,7 @@
},
{
"BriefDescription": "REQUEST = ANY IFETCH and RESPONSE = LLC_HIT_OTHER_CORE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -966,6 +1091,7 @@
},
{
"BriefDescription": "REQUEST = ANY IFETCH and RESPONSE = LOCAL_CACHE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LOCAL_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -975,6 +1101,7 @@
},
{
"BriefDescription": "REQUEST = ANY IFETCH and RESPONSE = LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -984,6 +1111,7 @@
},
{
"BriefDescription": "REQUEST = ANY IFETCH and RESPONSE = REMOTE_CACHE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -993,6 +1121,7 @@
},
{
"BriefDescription": "REQUEST = ANY_REQUEST and RESPONSE = ALL_LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ALL_LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1002,6 +1131,7 @@
},
{
"BriefDescription": "REQUEST = ANY_REQUEST and RESPONSE = ANY_CACHE_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1011,6 +1141,7 @@
},
{
"BriefDescription": "REQUEST = ANY_REQUEST and RESPONSE = ANY_LOCATION",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_LOCATION",
"MSRIndex": "0x1a6,0x1a7",
@@ -1020,6 +1151,7 @@
},
{
"BriefDescription": "REQUEST = ANY_REQUEST and RESPONSE = IO_CSR_MMIO",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.IO_CSR_MMIO",
"MSRIndex": "0x1a6,0x1a7",
@@ -1029,6 +1161,7 @@
},
{
"BriefDescription": "REQUEST = ANY_REQUEST and RESPONSE = LLC_HIT_NO_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1038,6 +1171,7 @@
},
{
"BriefDescription": "REQUEST = ANY_REQUEST and RESPONSE = LLC_HIT_OTHER_CORE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1047,6 +1181,7 @@
},
{
"BriefDescription": "REQUEST = ANY_REQUEST and RESPONSE = LLC_HIT_OTHER_CORE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1056,6 +1191,7 @@
},
{
"BriefDescription": "REQUEST = ANY_REQUEST and RESPONSE = LOCAL_CACHE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LOCAL_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1065,6 +1201,7 @@
},
{
"BriefDescription": "REQUEST = ANY_REQUEST and RESPONSE = LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1074,6 +1211,7 @@
},
{
"BriefDescription": "REQUEST = ANY_REQUEST and RESPONSE = REMOTE_CACHE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1083,6 +1221,7 @@
},
{
"BriefDescription": "REQUEST = ANY RFO and RESPONSE = ALL_LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.ALL_LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1092,6 +1231,7 @@
},
{
"BriefDescription": "REQUEST = ANY RFO and RESPONSE = ANY_CACHE_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1101,6 +1241,7 @@
},
{
"BriefDescription": "REQUEST = ANY RFO and RESPONSE = ANY_LOCATION",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_LOCATION",
"MSRIndex": "0x1a6,0x1a7",
@@ -1110,6 +1251,7 @@
},
{
"BriefDescription": "REQUEST = ANY RFO and RESPONSE = IO_CSR_MMIO",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.IO_CSR_MMIO",
"MSRIndex": "0x1a6,0x1a7",
@@ -1119,6 +1261,7 @@
},
{
"BriefDescription": "REQUEST = ANY RFO and RESPONSE = LLC_HIT_NO_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1128,6 +1271,7 @@
},
{
"BriefDescription": "REQUEST = ANY RFO and RESPONSE = LLC_HIT_OTHER_CORE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1137,6 +1281,7 @@
},
{
"BriefDescription": "REQUEST = ANY RFO and RESPONSE = LLC_HIT_OTHER_CORE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1146,6 +1291,7 @@
},
{
"BriefDescription": "REQUEST = ANY RFO and RESPONSE = LOCAL_CACHE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1155,6 +1301,7 @@
},
{
"BriefDescription": "REQUEST = ANY RFO and RESPONSE = LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1164,6 +1311,7 @@
},
{
"BriefDescription": "REQUEST = ANY RFO and RESPONSE = REMOTE_CACHE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1173,6 +1321,7 @@
},
{
"BriefDescription": "REQUEST = CORE_WB and RESPONSE = ALL_LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.ALL_LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1182,6 +1331,7 @@
},
{
"BriefDescription": "REQUEST = CORE_WB and RESPONSE = ANY_CACHE_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1191,6 +1341,7 @@
},
{
"BriefDescription": "REQUEST = CORE_WB and RESPONSE = ANY_LOCATION",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_LOCATION",
"MSRIndex": "0x1a6,0x1a7",
@@ -1200,6 +1351,7 @@
},
{
"BriefDescription": "REQUEST = CORE_WB and RESPONSE = IO_CSR_MMIO",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.IO_CSR_MMIO",
"MSRIndex": "0x1a6,0x1a7",
@@ -1209,6 +1361,7 @@
},
{
"BriefDescription": "REQUEST = CORE_WB and RESPONSE = LLC_HIT_NO_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1218,6 +1371,7 @@
},
{
"BriefDescription": "REQUEST = CORE_WB and RESPONSE = LLC_HIT_OTHER_CORE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1227,6 +1381,7 @@
},
{
"BriefDescription": "REQUEST = CORE_WB and RESPONSE = LLC_HIT_OTHER_CORE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1236,6 +1391,7 @@
},
{
"BriefDescription": "REQUEST = CORE_WB and RESPONSE = LOCAL_CACHE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.LOCAL_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1245,6 +1401,7 @@
},
{
"BriefDescription": "REQUEST = CORE_WB and RESPONSE = LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1254,6 +1411,7 @@
},
{
"BriefDescription": "REQUEST = CORE_WB and RESPONSE = REMOTE_CACHE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1263,6 +1421,7 @@
},
{
"BriefDescription": "REQUEST = DATA_IFETCH and RESPONSE = ALL_LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ALL_LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1272,6 +1431,7 @@
},
{
"BriefDescription": "REQUEST = DATA_IFETCH and RESPONSE = ANY_CACHE_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1281,6 +1441,7 @@
},
{
"BriefDescription": "REQUEST = DATA_IFETCH and RESPONSE = ANY_LOCATION",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_LOCATION",
"MSRIndex": "0x1a6,0x1a7",
@@ -1290,6 +1451,7 @@
},
{
"BriefDescription": "REQUEST = DATA_IFETCH and RESPONSE = IO_CSR_MMIO",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.IO_CSR_MMIO",
"MSRIndex": "0x1a6,0x1a7",
@@ -1299,6 +1461,7 @@
},
{
"BriefDescription": "REQUEST = DATA_IFETCH and RESPONSE = LLC_HIT_NO_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1308,6 +1471,7 @@
},
{
"BriefDescription": "REQUEST = DATA_IFETCH and RESPONSE = LLC_HIT_OTHER_CORE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1317,6 +1481,7 @@
},
{
"BriefDescription": "REQUEST = DATA_IFETCH and RESPONSE = LLC_HIT_OTHER_CORE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1326,6 +1491,7 @@
},
{
"BriefDescription": "REQUEST = DATA_IFETCH and RESPONSE = LOCAL_CACHE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LOCAL_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1335,6 +1501,7 @@
},
{
"BriefDescription": "REQUEST = DATA_IFETCH and RESPONSE = LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1344,6 +1511,7 @@
},
{
"BriefDescription": "REQUEST = DATA_IFETCH and RESPONSE = REMOTE_CACHE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1353,6 +1521,7 @@
},
{
"BriefDescription": "REQUEST = DATA_IN and RESPONSE = ALL_LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN.ALL_LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1362,6 +1531,7 @@
},
{
"BriefDescription": "REQUEST = DATA_IN and RESPONSE = ANY_CACHE_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1371,6 +1541,7 @@
},
{
"BriefDescription": "REQUEST = DATA_IN and RESPONSE = ANY_LOCATION",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_LOCATION",
"MSRIndex": "0x1a6,0x1a7",
@@ -1380,6 +1551,7 @@
},
{
"BriefDescription": "REQUEST = DATA_IN and RESPONSE = IO_CSR_MMIO",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN.IO_CSR_MMIO",
"MSRIndex": "0x1a6,0x1a7",
@@ -1389,6 +1561,7 @@
},
{
"BriefDescription": "REQUEST = DATA_IN and RESPONSE = LLC_HIT_NO_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1398,6 +1571,7 @@
},
{
"BriefDescription": "REQUEST = DATA_IN and RESPONSE = LLC_HIT_OTHER_CORE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1407,6 +1581,7 @@
},
{
"BriefDescription": "REQUEST = DATA_IN and RESPONSE = LLC_HIT_OTHER_CORE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1416,6 +1591,7 @@
},
{
"BriefDescription": "REQUEST = DATA_IN and RESPONSE = LOCAL_CACHE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LOCAL_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1425,6 +1601,7 @@
},
{
"BriefDescription": "REQUEST = DATA_IN and RESPONSE = LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1434,6 +1611,7 @@
},
{
"BriefDescription": "REQUEST = DATA_IN and RESPONSE = REMOTE_CACHE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1443,6 +1621,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_DATA and RESPONSE = ALL_LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ALL_LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1452,6 +1631,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_DATA and RESPONSE = ANY_CACHE_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1461,6 +1641,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_DATA and RESPONSE = ANY_LOCATION",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_LOCATION",
"MSRIndex": "0x1a6,0x1a7",
@@ -1470,6 +1651,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_DATA and RESPONSE = IO_CSR_MMIO",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.IO_CSR_MMIO",
"MSRIndex": "0x1a6,0x1a7",
@@ -1479,6 +1661,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_DATA and RESPONSE = LLC_HIT_NO_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1488,6 +1671,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_DATA and RESPONSE = LLC_HIT_OTHER_CORE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1497,6 +1681,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_DATA and RESPONSE = LLC_HIT_OTHER_CORE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1506,6 +1691,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_DATA and RESPONSE = LOCAL_CACHE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LOCAL_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1515,6 +1701,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_DATA and RESPONSE = LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1524,6 +1711,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_DATA and RESPONSE = REMOTE_CACHE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1533,6 +1721,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_DATA_RD and RESPONSE = ALL_LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ALL_LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1542,6 +1731,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_DATA_RD and RESPONSE = ANY_CACHE_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1551,6 +1741,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_DATA_RD and RESPONSE = ANY_LOCATION",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_LOCATION",
"MSRIndex": "0x1a6,0x1a7",
@@ -1560,6 +1751,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_DATA_RD and RESPONSE = IO_CSR_MMIO",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.IO_CSR_MMIO",
"MSRIndex": "0x1a6,0x1a7",
@@ -1569,6 +1761,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_DATA_RD and RESPONSE = LLC_HIT_NO_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1578,6 +1771,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_DATA_RD and RESPONSE = LLC_HIT_OTHER_CORE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1587,6 +1781,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_DATA_RD and RESPONSE = LLC_HIT_OTHER_CORE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1596,6 +1791,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_DATA_RD and RESPONSE = LOCAL_CACHE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LOCAL_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1605,6 +1801,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_DATA_RD and RESPONSE = LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1614,6 +1811,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_DATA_RD and RESPONSE = REMOTE_CACHE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1623,6 +1821,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_IFETCH and RESPONSE = ALL_LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ALL_LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1632,6 +1831,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_IFETCH and RESPONSE = ANY_CACHE_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1641,6 +1841,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_IFETCH and RESPONSE = ANY_LOCATION",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_LOCATION",
"MSRIndex": "0x1a6,0x1a7",
@@ -1650,6 +1851,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_IFETCH and RESPONSE = IO_CSR_MMIO",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.IO_CSR_MMIO",
"MSRIndex": "0x1a6,0x1a7",
@@ -1659,6 +1861,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_IFETCH and RESPONSE = LLC_HIT_NO_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1668,6 +1871,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_IFETCH and RESPONSE = LLC_HIT_OTHER_CORE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1677,6 +1881,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_IFETCH and RESPONSE = LLC_HIT_OTHER_CORE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1686,6 +1891,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_IFETCH and RESPONSE = LOCAL_CACHE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LOCAL_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1695,6 +1901,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_IFETCH and RESPONSE = LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1704,6 +1911,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_IFETCH and RESPONSE = REMOTE_CACHE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1713,6 +1921,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_RFO and RESPONSE = ALL_LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ALL_LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1722,6 +1931,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_RFO and RESPONSE = ANY_CACHE_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1731,6 +1941,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_RFO and RESPONSE = ANY_LOCATION",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_LOCATION",
"MSRIndex": "0x1a6,0x1a7",
@@ -1740,6 +1951,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_RFO and RESPONSE = IO_CSR_MMIO",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.IO_CSR_MMIO",
"MSRIndex": "0x1a6,0x1a7",
@@ -1749,6 +1961,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_RFO and RESPONSE = LLC_HIT_NO_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1758,6 +1971,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_RFO and RESPONSE = LLC_HIT_OTHER_CORE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1767,6 +1981,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_RFO and RESPONSE = LLC_HIT_OTHER_CORE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1776,6 +1991,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_RFO and RESPONSE = LOCAL_CACHE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LOCAL_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1785,6 +2001,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_RFO and RESPONSE = LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1794,6 +2011,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_RFO and RESPONSE = REMOTE_CACHE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1803,6 +2021,7 @@
},
{
"BriefDescription": "REQUEST = OTHER and RESPONSE = ALL_LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.ALL_LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1812,6 +2031,7 @@
},
{
"BriefDescription": "REQUEST = OTHER and RESPONSE = ANY_CACHE_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.ANY_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1821,6 +2041,7 @@
},
{
"BriefDescription": "REQUEST = OTHER and RESPONSE = ANY_LOCATION",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.ANY_LOCATION",
"MSRIndex": "0x1a6,0x1a7",
@@ -1830,6 +2051,7 @@
},
{
"BriefDescription": "REQUEST = OTHER and RESPONSE = IO_CSR_MMIO",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.IO_CSR_MMIO",
"MSRIndex": "0x1a6,0x1a7",
@@ -1839,6 +2061,7 @@
},
{
"BriefDescription": "REQUEST = OTHER and RESPONSE = LLC_HIT_NO_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1848,6 +2071,7 @@
},
{
"BriefDescription": "REQUEST = OTHER and RESPONSE = LLC_HIT_OTHER_CORE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1857,6 +2081,7 @@
},
{
"BriefDescription": "REQUEST = OTHER and RESPONSE = LLC_HIT_OTHER_CORE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1866,6 +2091,7 @@
},
{
"BriefDescription": "REQUEST = OTHER and RESPONSE = LOCAL_CACHE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.LOCAL_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1875,6 +2101,7 @@
},
{
"BriefDescription": "REQUEST = OTHER and RESPONSE = LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1884,6 +2111,7 @@
},
{
"BriefDescription": "REQUEST = OTHER and RESPONSE = REMOTE_CACHE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1893,6 +2121,7 @@
},
{
"BriefDescription": "REQUEST = PF_DATA and RESPONSE = ALL_LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA.ALL_LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1902,6 +2131,7 @@
},
{
"BriefDescription": "REQUEST = PF_DATA and RESPONSE = ANY_CACHE_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1911,6 +2141,7 @@
},
{
"BriefDescription": "REQUEST = PF_DATA and RESPONSE = ANY_LOCATION",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_LOCATION",
"MSRIndex": "0x1a6,0x1a7",
@@ -1920,6 +2151,7 @@
},
{
"BriefDescription": "REQUEST = PF_DATA and RESPONSE = IO_CSR_MMIO",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA.IO_CSR_MMIO",
"MSRIndex": "0x1a6,0x1a7",
@@ -1929,6 +2161,7 @@
},
{
"BriefDescription": "REQUEST = PF_DATA and RESPONSE = LLC_HIT_NO_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1938,6 +2171,7 @@
},
{
"BriefDescription": "REQUEST = PF_DATA and RESPONSE = LLC_HIT_OTHER_CORE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1947,6 +2181,7 @@
},
{
"BriefDescription": "REQUEST = PF_DATA and RESPONSE = LLC_HIT_OTHER_CORE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1956,6 +2191,7 @@
},
{
"BriefDescription": "REQUEST = PF_DATA and RESPONSE = LOCAL_CACHE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LOCAL_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1965,6 +2201,7 @@
},
{
"BriefDescription": "REQUEST = PF_DATA and RESPONSE = LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1974,6 +2211,7 @@
},
{
"BriefDescription": "REQUEST = PF_DATA and RESPONSE = REMOTE_CACHE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1983,6 +2221,7 @@
},
{
"BriefDescription": "REQUEST = PF_DATA_RD and RESPONSE = ALL_LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ALL_LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1992,6 +2231,7 @@
},
{
"BriefDescription": "REQUEST = PF_DATA_RD and RESPONSE = ANY_CACHE_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2001,6 +2241,7 @@
},
{
"BriefDescription": "REQUEST = PF_DATA_RD and RESPONSE = ANY_LOCATION",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_LOCATION",
"MSRIndex": "0x1a6,0x1a7",
@@ -2010,6 +2251,7 @@
},
{
"BriefDescription": "REQUEST = PF_DATA_RD and RESPONSE = IO_CSR_MMIO",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.IO_CSR_MMIO",
"MSRIndex": "0x1a6,0x1a7",
@@ -2019,6 +2261,7 @@
},
{
"BriefDescription": "REQUEST = PF_DATA_RD and RESPONSE = LLC_HIT_NO_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2028,6 +2271,7 @@
},
{
"BriefDescription": "REQUEST = PF_DATA_RD and RESPONSE = LLC_HIT_OTHER_CORE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -2037,6 +2281,7 @@
},
{
"BriefDescription": "REQUEST = PF_DATA_RD and RESPONSE = LLC_HIT_OTHER_CORE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2046,6 +2291,7 @@
},
{
"BriefDescription": "REQUEST = PF_DATA_RD and RESPONSE = LOCAL_CACHE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LOCAL_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2055,6 +2301,7 @@
},
{
"BriefDescription": "REQUEST = PF_DATA_RD and RESPONSE = LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -2064,6 +2311,7 @@
},
{
"BriefDescription": "REQUEST = PF_DATA_RD and RESPONSE = REMOTE_CACHE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2073,6 +2321,7 @@
},
{
"BriefDescription": "REQUEST = PF_RFO and RESPONSE = ALL_LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.ALL_LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -2082,6 +2331,7 @@
},
{
"BriefDescription": "REQUEST = PF_RFO and RESPONSE = ANY_CACHE_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2091,6 +2341,7 @@
},
{
"BriefDescription": "REQUEST = PF_RFO and RESPONSE = ANY_LOCATION",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_LOCATION",
"MSRIndex": "0x1a6,0x1a7",
@@ -2100,6 +2351,7 @@
},
{
"BriefDescription": "REQUEST = PF_RFO and RESPONSE = IO_CSR_MMIO",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.IO_CSR_MMIO",
"MSRIndex": "0x1a6,0x1a7",
@@ -2109,6 +2361,7 @@
},
{
"BriefDescription": "REQUEST = PF_RFO and RESPONSE = LLC_HIT_NO_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2118,6 +2371,7 @@
},
{
"BriefDescription": "REQUEST = PF_RFO and RESPONSE = LLC_HIT_OTHER_CORE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -2127,6 +2381,7 @@
},
{
"BriefDescription": "REQUEST = PF_RFO and RESPONSE = LLC_HIT_OTHER_CORE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2136,6 +2391,7 @@
},
{
"BriefDescription": "REQUEST = PF_RFO and RESPONSE = LOCAL_CACHE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LOCAL_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2145,6 +2401,7 @@
},
{
"BriefDescription": "REQUEST = PF_RFO and RESPONSE = LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -2154,6 +2411,7 @@
},
{
"BriefDescription": "REQUEST = PF_RFO and RESPONSE = REMOTE_CACHE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2163,6 +2421,7 @@
},
{
"BriefDescription": "REQUEST = PF_IFETCH and RESPONSE = ALL_LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_RFO.ALL_LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -2172,6 +2431,7 @@
},
{
"BriefDescription": "REQUEST = PF_IFETCH and RESPONSE = ANY_CACHE_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2181,6 +2441,7 @@
},
{
"BriefDescription": "REQUEST = PF_IFETCH and RESPONSE = ANY_LOCATION",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_LOCATION",
"MSRIndex": "0x1a6,0x1a7",
@@ -2190,6 +2451,7 @@
},
{
"BriefDescription": "REQUEST = PF_IFETCH and RESPONSE = IO_CSR_MMIO",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_RFO.IO_CSR_MMIO",
"MSRIndex": "0x1a6,0x1a7",
@@ -2199,6 +2461,7 @@
},
{
"BriefDescription": "REQUEST = PF_IFETCH and RESPONSE = LLC_HIT_NO_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2208,6 +2471,7 @@
},
{
"BriefDescription": "REQUEST = PF_IFETCH and RESPONSE = LLC_HIT_OTHER_CORE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -2217,6 +2481,7 @@
},
{
"BriefDescription": "REQUEST = PF_IFETCH and RESPONSE = LLC_HIT_OTHER_CORE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2226,6 +2491,7 @@
},
{
"BriefDescription": "REQUEST = PF_IFETCH and RESPONSE = LOCAL_CACHE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LOCAL_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2235,6 +2501,7 @@
},
{
"BriefDescription": "REQUEST = PF_IFETCH and RESPONSE = LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -2244,6 +2511,7 @@
},
{
"BriefDescription": "REQUEST = PF_IFETCH and RESPONSE = REMOTE_CACHE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2253,6 +2521,7 @@
},
{
"BriefDescription": "REQUEST = PREFETCH and RESPONSE = ALL_LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PREFETCH.ALL_LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -2262,6 +2531,7 @@
},
{
"BriefDescription": "REQUEST = PREFETCH and RESPONSE = ANY_CACHE_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2271,6 +2541,7 @@
},
{
"BriefDescription": "REQUEST = PREFETCH and RESPONSE = ANY_LOCATION",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_LOCATION",
"MSRIndex": "0x1a6,0x1a7",
@@ -2280,6 +2551,7 @@
},
{
"BriefDescription": "REQUEST = PREFETCH and RESPONSE = IO_CSR_MMIO",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PREFETCH.IO_CSR_MMIO",
"MSRIndex": "0x1a6,0x1a7",
@@ -2289,6 +2561,7 @@
},
{
"BriefDescription": "REQUEST = PREFETCH and RESPONSE = LLC_HIT_NO_OTHER_CORE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2298,6 +2571,7 @@
},
{
"BriefDescription": "REQUEST = PREFETCH and RESPONSE = LLC_HIT_OTHER_CORE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -2307,6 +2581,7 @@
},
{
"BriefDescription": "REQUEST = PREFETCH and RESPONSE = LLC_HIT_OTHER_CORE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2316,6 +2591,7 @@
},
{
"BriefDescription": "REQUEST = PREFETCH and RESPONSE = LOCAL_CACHE",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2325,6 +2601,7 @@
},
{
"BriefDescription": "REQUEST = PREFETCH and RESPONSE = LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -2334,6 +2611,7 @@
},
{
"BriefDescription": "REQUEST = PREFETCH and RESPONSE = REMOTE_CACHE_HITM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2343,6 +2621,7 @@
},
{
"BriefDescription": "Super Queue LRU hints sent to LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xF4",
"EventName": "SQ_MISC.LRU_HINTS",
"SampleAfterValue": "2000000",
@@ -2350,6 +2629,7 @@
},
{
"BriefDescription": "Super Queue lock splits across a cache line",
+ "Counter": "0,1,2,3",
"EventCode": "0xF4",
"EventName": "SQ_MISC.SPLIT_LOCK",
"SampleAfterValue": "2000000",
@@ -2357,6 +2637,7 @@
},
{
"BriefDescription": "Loads delayed with at-Retirement block code",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "STORE_BLOCKS.AT_RET",
"SampleAfterValue": "200000",
@@ -2364,6 +2645,7 @@
},
{
"BriefDescription": "Cacheable loads delayed with L1D block code",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "STORE_BLOCKS.L1D_BLOCK",
"SampleAfterValue": "200000",
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-dp/counter.json b/tools/perf/pmu-events/arch/x86/westmereep-dp/counter.json
new file mode 100644
index 000000000000..ecf0795dceab
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/westmereep-dp/counter.json
@@ -0,0 +1,7 @@
+[
+ {
+ "Unit": "core",
+ "CountersNumFixed": "4",
+ "CountersNumGeneric": "4"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-dp/floating-point.json b/tools/perf/pmu-events/arch/x86/westmereep-dp/floating-point.json
index 196ae1d9b157..9bac9313b65c 100644
--- a/tools/perf/pmu-events/arch/x86/westmereep-dp/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/westmereep-dp/floating-point.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "X87 Floating point assists (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xF7",
"EventName": "FP_ASSIST.ALL",
"PEBS": "1",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "X87 Floating point assists for invalid input value (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xF7",
"EventName": "FP_ASSIST.INPUT",
"PEBS": "1",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "X87 Floating point assists for invalid output value (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xF7",
"EventName": "FP_ASSIST.OUTPUT",
"PEBS": "1",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "MMX Uops",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.MMX",
"SampleAfterValue": "2000000",
@@ -32,6 +36,7 @@
},
{
"BriefDescription": "SSE2 integer Uops",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE2_INTEGER",
"SampleAfterValue": "2000000",
@@ -39,6 +44,7 @@
},
{
"BriefDescription": "SSE* FP double precision Uops",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_DOUBLE_PRECISION",
"SampleAfterValue": "2000000",
@@ -46,6 +52,7 @@
},
{
"BriefDescription": "SSE and SSE2 FP Uops",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_FP",
"SampleAfterValue": "2000000",
@@ -53,6 +60,7 @@
},
{
"BriefDescription": "SSE FP packed Uops",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_FP_PACKED",
"SampleAfterValue": "2000000",
@@ -60,6 +68,7 @@
},
{
"BriefDescription": "SSE FP scalar Uops",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_FP_SCALAR",
"SampleAfterValue": "2000000",
@@ -67,6 +76,7 @@
},
{
"BriefDescription": "SSE* FP single precision Uops",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_SINGLE_PRECISION",
"SampleAfterValue": "2000000",
@@ -74,6 +84,7 @@
},
{
"BriefDescription": "Computational floating-point operations executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.X87",
"SampleAfterValue": "2000000",
@@ -81,6 +92,7 @@
},
{
"BriefDescription": "All Floating Point to and from MMX transitions",
+ "Counter": "0,1,2,3",
"EventCode": "0xCC",
"EventName": "FP_MMX_TRANS.ANY",
"SampleAfterValue": "2000000",
@@ -88,6 +100,7 @@
},
{
"BriefDescription": "Transitions from MMX to Floating Point instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0xCC",
"EventName": "FP_MMX_TRANS.TO_FP",
"SampleAfterValue": "2000000",
@@ -95,6 +108,7 @@
},
{
"BriefDescription": "Transitions from Floating Point to MMX instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0xCC",
"EventName": "FP_MMX_TRANS.TO_MMX",
"SampleAfterValue": "2000000",
@@ -102,6 +116,7 @@
},
{
"BriefDescription": "128 bit SIMD integer pack operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "SIMD_INT_128.PACK",
"SampleAfterValue": "200000",
@@ -109,6 +124,7 @@
},
{
"BriefDescription": "128 bit SIMD integer arithmetic operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "SIMD_INT_128.PACKED_ARITH",
"SampleAfterValue": "200000",
@@ -116,6 +132,7 @@
},
{
"BriefDescription": "128 bit SIMD integer logical operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "SIMD_INT_128.PACKED_LOGICAL",
"SampleAfterValue": "200000",
@@ -123,6 +140,7 @@
},
{
"BriefDescription": "128 bit SIMD integer multiply operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "SIMD_INT_128.PACKED_MPY",
"SampleAfterValue": "200000",
@@ -130,6 +148,7 @@
},
{
"BriefDescription": "128 bit SIMD integer shift operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "SIMD_INT_128.PACKED_SHIFT",
"SampleAfterValue": "200000",
@@ -137,6 +156,7 @@
},
{
"BriefDescription": "128 bit SIMD integer shuffle/move operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "SIMD_INT_128.SHUFFLE_MOVE",
"SampleAfterValue": "200000",
@@ -144,6 +164,7 @@
},
{
"BriefDescription": "128 bit SIMD integer unpack operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "SIMD_INT_128.UNPACK",
"SampleAfterValue": "200000",
@@ -151,6 +172,7 @@
},
{
"BriefDescription": "SIMD integer 64 bit pack operations",
+ "Counter": "0,1,2,3",
"EventCode": "0xFD",
"EventName": "SIMD_INT_64.PACK",
"SampleAfterValue": "200000",
@@ -158,6 +180,7 @@
},
{
"BriefDescription": "SIMD integer 64 bit arithmetic operations",
+ "Counter": "0,1,2,3",
"EventCode": "0xFD",
"EventName": "SIMD_INT_64.PACKED_ARITH",
"SampleAfterValue": "200000",
@@ -165,6 +188,7 @@
},
{
"BriefDescription": "SIMD integer 64 bit logical operations",
+ "Counter": "0,1,2,3",
"EventCode": "0xFD",
"EventName": "SIMD_INT_64.PACKED_LOGICAL",
"SampleAfterValue": "200000",
@@ -172,6 +196,7 @@
},
{
"BriefDescription": "SIMD integer 64 bit packed multiply operations",
+ "Counter": "0,1,2,3",
"EventCode": "0xFD",
"EventName": "SIMD_INT_64.PACKED_MPY",
"SampleAfterValue": "200000",
@@ -179,6 +204,7 @@
},
{
"BriefDescription": "SIMD integer 64 bit shift operations",
+ "Counter": "0,1,2,3",
"EventCode": "0xFD",
"EventName": "SIMD_INT_64.PACKED_SHIFT",
"SampleAfterValue": "200000",
@@ -186,6 +212,7 @@
},
{
"BriefDescription": "SIMD integer 64 bit shuffle/move operations",
+ "Counter": "0,1,2,3",
"EventCode": "0xFD",
"EventName": "SIMD_INT_64.SHUFFLE_MOVE",
"SampleAfterValue": "200000",
@@ -193,6 +220,7 @@
},
{
"BriefDescription": "SIMD integer 64 bit unpack operations",
+ "Counter": "0,1,2,3",
"EventCode": "0xFD",
"EventName": "SIMD_INT_64.UNPACK",
"SampleAfterValue": "200000",
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-dp/frontend.json b/tools/perf/pmu-events/arch/x86/westmereep-dp/frontend.json
index f7f28510e3ae..c561ac24d91d 100644
--- a/tools/perf/pmu-events/arch/x86/westmereep-dp/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/westmereep-dp/frontend.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Instructions decoded",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "MACRO_INSTS.DECODED",
"SampleAfterValue": "2000000",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Macro-fused instructions decoded",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "MACRO_INSTS.FUSIONS_DECODED",
"SampleAfterValue": "2000000",
@@ -15,6 +17,7 @@
},
{
"BriefDescription": "Two Uop instructions decoded",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "TWO_UOP_INSTS_DECODED",
"SampleAfterValue": "2000000",
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-dp/memory.json b/tools/perf/pmu-events/arch/x86/westmereep-dp/memory.json
index 7085c3307c91..dcf1bf3f880d 100644
--- a/tools/perf/pmu-events/arch/x86/westmereep-dp/memory.json
+++ b/tools/perf/pmu-events/arch/x86/westmereep-dp/memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Misaligned store references",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "MISALIGN_MEM_REF.STORE",
"SampleAfterValue": "200000",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "REQUEST = ANY_DATA read and RESPONSE = ANY_DRAM AND REMOTE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_DRAM_AND_REMOTE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "REQUEST = ANY_DATA read and RESPONSE = ANY_LLC_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -26,6 +29,7 @@
},
{
"BriefDescription": "REQUEST = ANY_DATA read and RESPONSE = OTHER_LOCAL_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.OTHER_LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -35,6 +39,7 @@
},
{
"BriefDescription": "REQUEST = ANY_DATA read and RESPONSE = REMOTE_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -44,6 +49,7 @@
},
{
"BriefDescription": "REQUEST = ANY IFETCH and RESPONSE = ANY_DRAM AND REMOTE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_DRAM_AND_REMOTE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -53,6 +59,7 @@
},
{
"BriefDescription": "REQUEST = ANY IFETCH and RESPONSE = ANY_LLC_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_LLC_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -62,6 +69,7 @@
},
{
"BriefDescription": "REQUEST = ANY IFETCH and RESPONSE = OTHER_LOCAL_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.OTHER_LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -71,6 +79,7 @@
},
{
"BriefDescription": "REQUEST = ANY IFETCH and RESPONSE = REMOTE_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -80,6 +89,7 @@
},
{
"BriefDescription": "REQUEST = ANY_REQUEST and RESPONSE = ANY_DRAM AND REMOTE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_DRAM_AND_REMOTE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -89,6 +99,7 @@
},
{
"BriefDescription": "REQUEST = ANY_REQUEST and RESPONSE = ANY_LLC_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_LLC_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -98,6 +109,7 @@
},
{
"BriefDescription": "REQUEST = ANY_REQUEST and RESPONSE = OTHER_LOCAL_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.OTHER_LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -107,6 +119,7 @@
},
{
"BriefDescription": "REQUEST = ANY_REQUEST and RESPONSE = REMOTE_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -116,6 +129,7 @@
},
{
"BriefDescription": "REQUEST = ANY RFO and RESPONSE = ANY_DRAM AND REMOTE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_DRAM_AND_REMOTE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -125,6 +139,7 @@
},
{
"BriefDescription": "REQUEST = ANY RFO and RESPONSE = ANY_LLC_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -134,6 +149,7 @@
},
{
"BriefDescription": "REQUEST = ANY RFO and RESPONSE = OTHER_LOCAL_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.OTHER_LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -143,6 +159,7 @@
},
{
"BriefDescription": "REQUEST = ANY RFO and RESPONSE = REMOTE_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -152,6 +169,7 @@
},
{
"BriefDescription": "REQUEST = CORE_WB and RESPONSE = ANY_DRAM AND REMOTE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_DRAM_AND_REMOTE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -161,6 +179,7 @@
},
{
"BriefDescription": "REQUEST = CORE_WB and RESPONSE = ANY_LLC_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_LLC_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -170,6 +189,7 @@
},
{
"BriefDescription": "REQUEST = CORE_WB and RESPONSE = OTHER_LOCAL_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.OTHER_LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -179,6 +199,7 @@
},
{
"BriefDescription": "REQUEST = CORE_WB and RESPONSE = REMOTE_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -188,6 +209,7 @@
},
{
"BriefDescription": "REQUEST = DATA_IFETCH and RESPONSE = ANY_DRAM AND REMOTE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_DRAM_AND_REMOTE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -197,6 +219,7 @@
},
{
"BriefDescription": "REQUEST = DATA_IFETCH and RESPONSE = ANY_LLC_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_LLC_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -206,6 +229,7 @@
},
{
"BriefDescription": "REQUEST = DATA_IFETCH and RESPONSE = OTHER_LOCAL_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.OTHER_LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -215,6 +239,7 @@
},
{
"BriefDescription": "REQUEST = DATA_IFETCH and RESPONSE = REMOTE_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -224,6 +249,7 @@
},
{
"BriefDescription": "REQUEST = DATA_IN and RESPONSE = ANY_DRAM AND REMOTE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_DRAM_AND_REMOTE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -233,6 +259,7 @@
},
{
"BriefDescription": "REQUEST = DATA_IN and RESPONSE = ANY_LLC_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_LLC_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -242,6 +269,7 @@
},
{
"BriefDescription": "REQUEST = DATA_IN and RESPONSE = OTHER_LOCAL_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN.OTHER_LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -251,6 +279,7 @@
},
{
"BriefDescription": "REQUEST = DATA_IN and RESPONSE = REMOTE_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -260,6 +289,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_DATA and RESPONSE = ANY_DRAM AND REMOTE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_DRAM_AND_REMOTE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -269,6 +299,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_DATA and RESPONSE = ANY_LLC_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_LLC_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -278,6 +309,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_DATA and RESPONSE = OTHER_LOCAL_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.OTHER_LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -287,6 +319,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_DATA and RESPONSE = REMOTE_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -296,6 +329,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_DATA_RD and RESPONSE = ANY_DRAM AND REMOTE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_DRAM_AND_REMOTE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -305,6 +339,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_DATA_RD and RESPONSE = ANY_LLC_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_LLC_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -314,6 +349,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_DATA_RD and RESPONSE = OTHER_LOCAL_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.OTHER_LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -323,6 +359,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_DATA_RD and RESPONSE = REMOTE_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -332,6 +369,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_IFETCH and RESPONSE = ANY_DRAM AND REMOTE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_DRAM_AND_REMOTE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -341,6 +379,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_IFETCH and RESPONSE = ANY_LLC_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_LLC_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -350,6 +389,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_IFETCH and RESPONSE = OTHER_LOCAL_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.OTHER_LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -359,6 +399,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_IFETCH and RESPONSE = REMOTE_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -368,6 +409,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_RFO and RESPONSE = ANY_DRAM AND REMOTE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_DRAM_AND_REMOTE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -377,6 +419,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_RFO and RESPONSE = ANY_LLC_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_LLC_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -386,6 +429,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_RFO and RESPONSE = OTHER_LOCAL_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.OTHER_LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -395,6 +439,7 @@
},
{
"BriefDescription": "REQUEST = DEMAND_RFO and RESPONSE = REMOTE_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -404,6 +449,7 @@
},
{
"BriefDescription": "REQUEST = OTHER and RESPONSE = ANY_DRAM AND REMOTE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.ANY_DRAM_AND_REMOTE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -413,6 +459,7 @@
},
{
"BriefDescription": "REQUEST = OTHER and RESPONSE = ANY_LLC_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.ANY_LLC_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -422,6 +469,7 @@
},
{
"BriefDescription": "REQUEST = OTHER and RESPONSE = OTHER_LOCAL_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.OTHER_LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -431,6 +479,7 @@
},
{
"BriefDescription": "REQUEST = OTHER and RESPONSE = REMOTE_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -440,6 +489,7 @@
},
{
"BriefDescription": "REQUEST = PF_DATA and RESPONSE = ANY_DRAM AND REMOTE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_DRAM_AND_REMOTE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -449,6 +499,7 @@
},
{
"BriefDescription": "REQUEST = PF_DATA and RESPONSE = ANY_LLC_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_LLC_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -458,6 +509,7 @@
},
{
"BriefDescription": "REQUEST = PF_DATA and RESPONSE = OTHER_LOCAL_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA.OTHER_LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -467,6 +519,7 @@
},
{
"BriefDescription": "REQUEST = PF_DATA and RESPONSE = REMOTE_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -476,6 +529,7 @@
},
{
"BriefDescription": "REQUEST = PF_DATA_RD and RESPONSE = ANY_DRAM AND REMOTE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_DRAM_AND_REMOTE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -485,6 +539,7 @@
},
{
"BriefDescription": "REQUEST = PF_DATA_RD and RESPONSE = ANY_LLC_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_LLC_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -494,6 +549,7 @@
},
{
"BriefDescription": "REQUEST = PF_DATA_RD and RESPONSE = OTHER_LOCAL_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.OTHER_LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -503,6 +559,7 @@
},
{
"BriefDescription": "REQUEST = PF_DATA_RD and RESPONSE = REMOTE_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -512,6 +569,7 @@
},
{
"BriefDescription": "REQUEST = PF_RFO and RESPONSE = ANY_DRAM AND REMOTE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_DRAM_AND_REMOTE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -521,6 +579,7 @@
},
{
"BriefDescription": "REQUEST = PF_RFO and RESPONSE = ANY_LLC_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_LLC_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -530,6 +589,7 @@
},
{
"BriefDescription": "REQUEST = PF_RFO and RESPONSE = OTHER_LOCAL_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.OTHER_LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -539,6 +599,7 @@
},
{
"BriefDescription": "REQUEST = PF_RFO and RESPONSE = REMOTE_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -548,6 +609,7 @@
},
{
"BriefDescription": "REQUEST = PF_IFETCH and RESPONSE = ANY_DRAM AND REMOTE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_DRAM_AND_REMOTE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -557,6 +619,7 @@
},
{
"BriefDescription": "REQUEST = PF_IFETCH and RESPONSE = ANY_LLC_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_LLC_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -566,6 +629,7 @@
},
{
"BriefDescription": "REQUEST = PF_IFETCH and RESPONSE = OTHER_LOCAL_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_RFO.OTHER_LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -575,6 +639,7 @@
},
{
"BriefDescription": "REQUEST = PF_IFETCH and RESPONSE = REMOTE_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -584,6 +649,7 @@
},
{
"BriefDescription": "REQUEST = PREFETCH and RESPONSE = ANY_DRAM AND REMOTE_FWD",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_DRAM_AND_REMOTE_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -593,6 +659,7 @@
},
{
"BriefDescription": "REQUEST = PREFETCH and RESPONSE = ANY_LLC_MISS",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -602,6 +669,7 @@
},
{
"BriefDescription": "REQUEST = PREFETCH and RESPONSE = OTHER_LOCAL_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PREFETCH.OTHER_LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -611,6 +679,7 @@
},
{
"BriefDescription": "REQUEST = PREFETCH and RESPONSE = REMOTE_DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-dp/other.json b/tools/perf/pmu-events/arch/x86/westmereep-dp/other.json
index 488274980564..bcf5bcf637c0 100644
--- a/tools/perf/pmu-events/arch/x86/westmereep-dp/other.json
+++ b/tools/perf/pmu-events/arch/x86/westmereep-dp/other.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "ES segment renames",
+ "Counter": "0,1,2,3",
"EventCode": "0xD5",
"EventName": "ES_REG_RENAMES",
"SampleAfterValue": "2000000",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "I/O transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0x6C",
"EventName": "IO_TRANSACTIONS",
"SampleAfterValue": "2000000",
@@ -15,6 +17,7 @@
},
{
"BriefDescription": "L1I instruction fetch stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "L1I.CYCLES_STALLED",
"SampleAfterValue": "2000000",
@@ -22,6 +25,7 @@
},
{
"BriefDescription": "L1I instruction fetch hits",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "L1I.HITS",
"SampleAfterValue": "2000000",
@@ -29,6 +33,7 @@
},
{
"BriefDescription": "L1I instruction fetch misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "L1I.MISSES",
"SampleAfterValue": "2000000",
@@ -36,6 +41,7 @@
},
{
"BriefDescription": "L1I Instruction fetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "L1I.READS",
"SampleAfterValue": "2000000",
@@ -43,6 +49,7 @@
},
{
"BriefDescription": "Large ITLB hit",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "LARGE_ITLB.HIT",
"SampleAfterValue": "200000",
@@ -50,6 +57,7 @@
},
{
"BriefDescription": "Loads that partially overlap an earlier store",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "LOAD_BLOCK.OVERLAP_STORE",
"SampleAfterValue": "200000",
@@ -57,6 +65,7 @@
},
{
"BriefDescription": "All loads dispatched",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "LOAD_DISPATCH.ANY",
"SampleAfterValue": "2000000",
@@ -64,6 +73,7 @@
},
{
"BriefDescription": "Loads dispatched from the MOB",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "LOAD_DISPATCH.MOB",
"SampleAfterValue": "2000000",
@@ -71,6 +81,7 @@
},
{
"BriefDescription": "Loads dispatched that bypass the MOB",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "LOAD_DISPATCH.RS",
"SampleAfterValue": "2000000",
@@ -78,6 +89,7 @@
},
{
"BriefDescription": "Loads dispatched from stage 305",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "LOAD_DISPATCH.RS_DELAYED",
"SampleAfterValue": "2000000",
@@ -85,6 +97,7 @@
},
{
"BriefDescription": "False dependencies due to partial address aliasing",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "PARTIAL_ADDRESS_ALIAS",
"SampleAfterValue": "200000",
@@ -92,6 +105,7 @@
},
{
"BriefDescription": "All Store buffer stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "SB_DRAIN.ANY",
"SampleAfterValue": "200000",
@@ -99,6 +113,7 @@
},
{
"BriefDescription": "Segment rename stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "SEG_RENAME_STALLS",
"SampleAfterValue": "2000000",
@@ -106,6 +121,7 @@
},
{
"BriefDescription": "Snoop code requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "SNOOPQ_REQUESTS.CODE",
"SampleAfterValue": "100000",
@@ -113,6 +129,7 @@
},
{
"BriefDescription": "Snoop data requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "SNOOPQ_REQUESTS.DATA",
"SampleAfterValue": "100000",
@@ -120,6 +137,7 @@
},
{
"BriefDescription": "Snoop invalidate requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "SNOOPQ_REQUESTS.INVALIDATE",
"SampleAfterValue": "100000",
@@ -127,6 +145,7 @@
},
{
"BriefDescription": "Outstanding snoop code requests",
+ "Counter": "0",
"EventCode": "0xB3",
"EventName": "SNOOPQ_REQUESTS_OUTSTANDING.CODE",
"SampleAfterValue": "2000000",
@@ -134,6 +153,7 @@
},
{
"BriefDescription": "Cycles snoop code requests queued",
+ "Counter": "0",
"CounterMask": "1",
"EventCode": "0xB3",
"EventName": "SNOOPQ_REQUESTS_OUTSTANDING.CODE_NOT_EMPTY",
@@ -142,6 +162,7 @@
},
{
"BriefDescription": "Outstanding snoop data requests",
+ "Counter": "0",
"EventCode": "0xB3",
"EventName": "SNOOPQ_REQUESTS_OUTSTANDING.DATA",
"SampleAfterValue": "2000000",
@@ -149,6 +170,7 @@
},
{
"BriefDescription": "Cycles snoop data requests queued",
+ "Counter": "0",
"CounterMask": "1",
"EventCode": "0xB3",
"EventName": "SNOOPQ_REQUESTS_OUTSTANDING.DATA_NOT_EMPTY",
@@ -157,6 +179,7 @@
},
{
"BriefDescription": "Outstanding snoop invalidate requests",
+ "Counter": "0",
"EventCode": "0xB3",
"EventName": "SNOOPQ_REQUESTS_OUTSTANDING.INVALIDATE",
"SampleAfterValue": "2000000",
@@ -164,6 +187,7 @@
},
{
"BriefDescription": "Cycles snoop invalidate requests queued",
+ "Counter": "0",
"CounterMask": "1",
"EventCode": "0xB3",
"EventName": "SNOOPQ_REQUESTS_OUTSTANDING.INVALIDATE_NOT_EMPTY",
@@ -172,6 +196,7 @@
},
{
"BriefDescription": "Thread responded HIT to snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "SNOOP_RESPONSE.HIT",
"SampleAfterValue": "100000",
@@ -179,6 +204,7 @@
},
{
"BriefDescription": "Thread responded HITE to snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "SNOOP_RESPONSE.HITE",
"SampleAfterValue": "100000",
@@ -186,6 +212,7 @@
},
{
"BriefDescription": "Thread responded HITM to snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "SNOOP_RESPONSE.HITM",
"SampleAfterValue": "100000",
@@ -193,6 +220,7 @@
},
{
"BriefDescription": "Super Queue full stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xF6",
"EventName": "SQ_FULL_STALL_CYCLES",
"SampleAfterValue": "2000000",
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-dp/pipeline.json b/tools/perf/pmu-events/arch/x86/westmereep-dp/pipeline.json
index a29ed3522779..0267788d9dce 100644
--- a/tools/perf/pmu-events/arch/x86/westmereep-dp/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/westmereep-dp/pipeline.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Cycles the divider is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "ARITH.CYCLES_DIV_BUSY",
"SampleAfterValue": "2000000",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Divide Operations executed",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x14",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "Multiply operations executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "ARITH.MUL",
"SampleAfterValue": "2000000",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "BACLEAR asserted with bad target address",
+ "Counter": "0,1,2,3",
"EventCode": "0xE6",
"EventName": "BACLEAR.BAD_TARGET",
"SampleAfterValue": "2000000",
@@ -32,6 +36,7 @@
},
{
"BriefDescription": "BACLEAR asserted, regardless of cause",
+ "Counter": "0,1,2,3",
"EventCode": "0xE6",
"EventName": "BACLEAR.CLEAR",
"SampleAfterValue": "2000000",
@@ -39,6 +44,7 @@
},
{
"BriefDescription": "Instruction queue forced BACLEAR",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "BACLEAR_FORCE_IQ",
"SampleAfterValue": "2000000",
@@ -46,6 +52,7 @@
},
{
"BriefDescription": "Early Branch Prediciton Unit clears",
+ "Counter": "0,1,2,3",
"EventCode": "0xE8",
"EventName": "BPU_CLEARS.EARLY",
"SampleAfterValue": "2000000",
@@ -53,6 +60,7 @@
},
{
"BriefDescription": "Late Branch Prediction Unit clears",
+ "Counter": "0,1,2,3",
"EventCode": "0xE8",
"EventName": "BPU_CLEARS.LATE",
"SampleAfterValue": "2000000",
@@ -60,6 +68,7 @@
},
{
"BriefDescription": "Branch prediction unit missed call or return",
+ "Counter": "0,1,2,3",
"EventCode": "0xE5",
"EventName": "BPU_MISSED_CALL_RET",
"SampleAfterValue": "2000000",
@@ -67,6 +76,7 @@
},
{
"BriefDescription": "Branch instructions decoded",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "BR_INST_DECODED",
"SampleAfterValue": "2000000",
@@ -74,6 +84,7 @@
},
{
"BriefDescription": "Branch instructions executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ANY",
"SampleAfterValue": "200000",
@@ -81,6 +92,7 @@
},
{
"BriefDescription": "Conditional branch instructions executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.COND",
"SampleAfterValue": "200000",
@@ -88,6 +100,7 @@
},
{
"BriefDescription": "Unconditional branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.DIRECT",
"SampleAfterValue": "200000",
@@ -95,6 +108,7 @@
},
{
"BriefDescription": "Unconditional call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.DIRECT_NEAR_CALL",
"SampleAfterValue": "20000",
@@ -102,6 +116,7 @@
},
{
"BriefDescription": "Indirect call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.INDIRECT_NEAR_CALL",
"SampleAfterValue": "20000",
@@ -109,6 +124,7 @@
},
{
"BriefDescription": "Indirect non call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.INDIRECT_NON_CALL",
"SampleAfterValue": "20000",
@@ -116,6 +132,7 @@
},
{
"BriefDescription": "Call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.NEAR_CALLS",
"SampleAfterValue": "20000",
@@ -123,6 +140,7 @@
},
{
"BriefDescription": "All non call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.NON_CALLS",
"SampleAfterValue": "200000",
@@ -130,6 +148,7 @@
},
{
"BriefDescription": "Indirect return branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.RETURN_NEAR",
"SampleAfterValue": "20000",
@@ -137,6 +156,7 @@
},
{
"BriefDescription": "Taken branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN",
"SampleAfterValue": "200000",
@@ -144,6 +164,7 @@
},
{
"BriefDescription": "Retired branch instructions (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -152,6 +173,7 @@
},
{
"BriefDescription": "Retired conditional branch instructions (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
"PEBS": "1",
@@ -160,6 +182,7 @@
},
{
"BriefDescription": "Retired near call instructions (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
"PEBS": "1",
@@ -168,6 +191,7 @@
},
{
"BriefDescription": "Mispredicted branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.ANY",
"SampleAfterValue": "20000",
@@ -175,6 +199,7 @@
},
{
"BriefDescription": "Mispredicted conditional branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.COND",
"SampleAfterValue": "20000",
@@ -182,6 +207,7 @@
},
{
"BriefDescription": "Mispredicted unconditional branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.DIRECT",
"SampleAfterValue": "20000",
@@ -189,6 +215,7 @@
},
{
"BriefDescription": "Mispredicted non call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.DIRECT_NEAR_CALL",
"SampleAfterValue": "2000",
@@ -196,6 +223,7 @@
},
{
"BriefDescription": "Mispredicted indirect call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.INDIRECT_NEAR_CALL",
"SampleAfterValue": "2000",
@@ -203,6 +231,7 @@
},
{
"BriefDescription": "Mispredicted indirect non call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.INDIRECT_NON_CALL",
"SampleAfterValue": "2000",
@@ -210,6 +239,7 @@
},
{
"BriefDescription": "Mispredicted call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.NEAR_CALLS",
"SampleAfterValue": "2000",
@@ -217,6 +247,7 @@
},
{
"BriefDescription": "Mispredicted non call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.NON_CALLS",
"SampleAfterValue": "20000",
@@ -224,6 +255,7 @@
},
{
"BriefDescription": "Mispredicted return branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.RETURN_NEAR",
"SampleAfterValue": "2000",
@@ -231,6 +263,7 @@
},
{
"BriefDescription": "Mispredicted taken branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN",
"SampleAfterValue": "20000",
@@ -238,6 +271,7 @@
},
{
"BriefDescription": "Mispredicted retired branch instructions (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -246,6 +280,7 @@
},
{
"BriefDescription": "Mispredicted conditional retired branches (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.CONDITIONAL",
"PEBS": "1",
@@ -254,6 +289,7 @@
},
{
"BriefDescription": "Mispredicted near retired calls (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.NEAR_CALL",
"PEBS": "1",
@@ -262,11 +298,13 @@
},
{
"BriefDescription": "Reference cycles when thread is not halted (fixed counter)",
+ "Counter": "Fixed counter 3",
"EventName": "CPU_CLK_UNHALTED.REF",
"SampleAfterValue": "2000000"
},
{
"BriefDescription": "Reference base clock (133 Mhz) cycles when thread is not halted (programmable counter)",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.REF_P",
"SampleAfterValue": "100000",
@@ -274,17 +312,20 @@
},
{
"BriefDescription": "Cycles when thread is not halted (fixed counter)",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"SampleAfterValue": "2000000"
},
{
"BriefDescription": "Cycles when thread is not halted (programmable counter)",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
"SampleAfterValue": "2000000"
},
{
"BriefDescription": "Total CPU cycles",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.TOTAL_CYCLES",
@@ -293,6 +334,7 @@
},
{
"BriefDescription": "Any Instruction Length Decoder stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.ANY",
"SampleAfterValue": "2000000",
@@ -300,6 +342,7 @@
},
{
"BriefDescription": "Instruction Queue full stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.IQ_FULL",
"SampleAfterValue": "2000000",
@@ -307,6 +350,7 @@
},
{
"BriefDescription": "Length Change Prefix stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.LCP",
"SampleAfterValue": "2000000",
@@ -314,6 +358,7 @@
},
{
"BriefDescription": "Stall cycles due to BPU MRU bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.MRU",
"SampleAfterValue": "2000000",
@@ -321,6 +366,7 @@
},
{
"BriefDescription": "Regen stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.REGEN",
"SampleAfterValue": "2000000",
@@ -328,6 +374,7 @@
},
{
"BriefDescription": "Instructions that must be decoded by decoder 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "INST_DECODED.DEC0",
"SampleAfterValue": "2000000",
@@ -335,6 +382,7 @@
},
{
"BriefDescription": "Instructions written to instruction queue.",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "INST_QUEUE_WRITES",
"SampleAfterValue": "2000000",
@@ -342,6 +390,7 @@
},
{
"BriefDescription": "Cycles instructions are written to the instruction queue",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "INST_QUEUE_WRITE_CYCLES",
"SampleAfterValue": "2000000",
@@ -349,11 +398,13 @@
},
{
"BriefDescription": "Instructions retired (fixed counter)",
+ "Counter": "Fixed counter 1",
"EventName": "INST_RETIRED.ANY",
"SampleAfterValue": "2000000"
},
{
"BriefDescription": "Instructions retired (Programmable counter and Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.ANY_P",
"PEBS": "1",
@@ -362,6 +413,7 @@
},
{
"BriefDescription": "Retired MMX instructions (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.MMX",
"PEBS": "1",
@@ -370,6 +422,7 @@
},
{
"BriefDescription": "Total cycles (Precise Event)",
+ "Counter": "0,1,2,3",
"CounterMask": "16",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.TOTAL_CYCLES",
@@ -380,6 +433,7 @@
},
{
"BriefDescription": "Total cycles (Precise Event)",
+ "Counter": "0,1,2,3",
"CounterMask": "16",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.TOTAL_CYCLES_PS",
@@ -390,6 +444,7 @@
},
{
"BriefDescription": "Retired floating-point operations (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.X87",
"PEBS": "1",
@@ -398,6 +453,7 @@
},
{
"BriefDescription": "Load operations conflicting with software prefetches",
+ "Counter": "0,1",
"EventCode": "0x4C",
"EventName": "LOAD_HIT_PRE",
"SampleAfterValue": "200000",
@@ -405,6 +461,7 @@
},
{
"BriefDescription": "Cycles when uops were delivered by the LSD",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA8",
"EventName": "LSD.ACTIVE",
@@ -413,6 +470,7 @@
},
{
"BriefDescription": "Cycles no uops were delivered by the LSD",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA8",
"EventName": "LSD.INACTIVE",
@@ -422,6 +480,7 @@
},
{
"BriefDescription": "Loops that can't stream from the instruction queue",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "LSD_OVERFLOW",
"SampleAfterValue": "2000000",
@@ -429,6 +488,7 @@
},
{
"BriefDescription": "Cycles machine clear asserted",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.CYCLES",
"SampleAfterValue": "20000",
@@ -436,6 +496,7 @@
},
{
"BriefDescription": "Execution pipeline restart due to Memory ordering conflicts",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.MEM_ORDER",
"SampleAfterValue": "20000",
@@ -443,6 +504,7 @@
},
{
"BriefDescription": "Self-Modifying Code detected",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.SMC",
"SampleAfterValue": "20000",
@@ -450,6 +512,7 @@
},
{
"BriefDescription": "All RAT stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "RAT_STALLS.ANY",
"SampleAfterValue": "2000000",
@@ -457,6 +520,7 @@
},
{
"BriefDescription": "Flag stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "RAT_STALLS.FLAGS",
"SampleAfterValue": "2000000",
@@ -464,6 +528,7 @@
},
{
"BriefDescription": "Partial register stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "RAT_STALLS.REGISTERS",
"SampleAfterValue": "2000000",
@@ -471,6 +536,7 @@
},
{
"BriefDescription": "ROB read port stalls cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "RAT_STALLS.ROB_READ_PORT",
"SampleAfterValue": "2000000",
@@ -478,6 +544,7 @@
},
{
"BriefDescription": "Scoreboard stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "RAT_STALLS.SCOREBOARD",
"SampleAfterValue": "2000000",
@@ -485,6 +552,7 @@
},
{
"BriefDescription": "Resource related stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.ANY",
"SampleAfterValue": "2000000",
@@ -492,6 +560,7 @@
},
{
"BriefDescription": "FPU control word write stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.FPCW",
"SampleAfterValue": "2000000",
@@ -499,6 +568,7 @@
},
{
"BriefDescription": "Load buffer stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.LOAD",
"SampleAfterValue": "2000000",
@@ -506,6 +576,7 @@
},
{
"BriefDescription": "MXCSR rename stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.MXCSR",
"SampleAfterValue": "2000000",
@@ -513,6 +584,7 @@
},
{
"BriefDescription": "Other Resource related stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.OTHER",
"SampleAfterValue": "2000000",
@@ -520,6 +592,7 @@
},
{
"BriefDescription": "ROB full stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.ROB_FULL",
"SampleAfterValue": "2000000",
@@ -527,6 +600,7 @@
},
{
"BriefDescription": "Reservation Station full stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.RS_FULL",
"SampleAfterValue": "2000000",
@@ -534,6 +608,7 @@
},
{
"BriefDescription": "Store buffer stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.STORE",
"SampleAfterValue": "2000000",
@@ -541,6 +616,7 @@
},
{
"BriefDescription": "SIMD Packed-Double Uops retired (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "SSEX_UOPS_RETIRED.PACKED_DOUBLE",
"PEBS": "1",
@@ -549,6 +625,7 @@
},
{
"BriefDescription": "SIMD Packed-Single Uops retired (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "SSEX_UOPS_RETIRED.PACKED_SINGLE",
"PEBS": "1",
@@ -557,6 +634,7 @@
},
{
"BriefDescription": "SIMD Scalar-Double Uops retired (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "SSEX_UOPS_RETIRED.SCALAR_DOUBLE",
"PEBS": "1",
@@ -565,6 +643,7 @@
},
{
"BriefDescription": "SIMD Scalar-Single Uops retired (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "SSEX_UOPS_RETIRED.SCALAR_SINGLE",
"PEBS": "1",
@@ -573,6 +652,7 @@
},
{
"BriefDescription": "SIMD Vector Integer Uops retired (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "SSEX_UOPS_RETIRED.VECTOR_INTEGER",
"PEBS": "1",
@@ -581,6 +661,7 @@
},
{
"BriefDescription": "Stack pointer instructions decoded",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "UOPS_DECODED.ESP_FOLDING",
"SampleAfterValue": "2000000",
@@ -588,6 +669,7 @@
},
{
"BriefDescription": "Stack pointer sync operations",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "UOPS_DECODED.ESP_SYNC",
"SampleAfterValue": "2000000",
@@ -595,6 +677,7 @@
},
{
"BriefDescription": "Uops decoded by Microcode Sequencer",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xD1",
"EventName": "UOPS_DECODED.MS_CYCLES_ACTIVE",
@@ -603,6 +686,7 @@
},
{
"BriefDescription": "Cycles no Uops are decoded",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xD1",
"EventName": "UOPS_DECODED.STALL_CYCLES",
@@ -613,6 +697,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles Uops executed on any port (core count)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_ACTIVE_CYCLES",
@@ -622,6 +707,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles Uops executed on ports 0-4 (core count)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_ACTIVE_CYCLES_NO_PORT5",
@@ -631,6 +717,7 @@
{
"AnyThread": "1",
"BriefDescription": "Uops executed on any port (core count)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xB1",
@@ -642,6 +729,7 @@
{
"AnyThread": "1",
"BriefDescription": "Uops executed on ports 0-4 (core count)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xB1",
@@ -653,6 +741,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles no Uops issued on any port (core count)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_STALL_CYCLES",
@@ -663,6 +752,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles no Uops issued on ports 0-4 (core count)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_STALL_CYCLES_NO_PORT5",
@@ -672,6 +762,7 @@
},
{
"BriefDescription": "Uops executed on port 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT0",
"SampleAfterValue": "2000000",
@@ -679,6 +770,7 @@
},
{
"BriefDescription": "Uops issued on ports 0, 1 or 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT015",
"SampleAfterValue": "2000000",
@@ -686,6 +778,7 @@
},
{
"BriefDescription": "Cycles no Uops issued on ports 0, 1 or 5",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT015_STALL_CYCLES",
@@ -695,6 +788,7 @@
},
{
"BriefDescription": "Uops executed on port 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT1",
"SampleAfterValue": "2000000",
@@ -703,6 +797,7 @@
{
"AnyThread": "1",
"BriefDescription": "Uops issued on ports 2, 3 or 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT234_CORE",
"SampleAfterValue": "2000000",
@@ -711,6 +806,7 @@
{
"AnyThread": "1",
"BriefDescription": "Uops executed on port 2 (core count)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT2_CORE",
"SampleAfterValue": "2000000",
@@ -719,6 +815,7 @@
{
"AnyThread": "1",
"BriefDescription": "Uops executed on port 3 (core count)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT3_CORE",
"SampleAfterValue": "2000000",
@@ -727,6 +824,7 @@
{
"AnyThread": "1",
"BriefDescription": "Uops executed on port 4 (core count)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT4_CORE",
"SampleAfterValue": "2000000",
@@ -734,6 +832,7 @@
},
{
"BriefDescription": "Uops executed on port 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT5",
"SampleAfterValue": "2000000",
@@ -741,6 +840,7 @@
},
{
"BriefDescription": "Uops issued",
+ "Counter": "0,1,2,3",
"EventCode": "0xE",
"EventName": "UOPS_ISSUED.ANY",
"SampleAfterValue": "2000000",
@@ -749,6 +849,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles no Uops were issued on any thread",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xE",
"EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
@@ -759,6 +860,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles Uops were issued on either thread",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xE",
"EventName": "UOPS_ISSUED.CYCLES_ALL_THREADS",
@@ -767,6 +869,7 @@
},
{
"BriefDescription": "Fused Uops issued",
+ "Counter": "0,1,2,3",
"EventCode": "0xE",
"EventName": "UOPS_ISSUED.FUSED",
"SampleAfterValue": "2000000",
@@ -774,6 +877,7 @@
},
{
"BriefDescription": "Cycles no Uops were issued",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xE",
"EventName": "UOPS_ISSUED.STALL_CYCLES",
@@ -783,6 +887,7 @@
},
{
"BriefDescription": "Cycles Uops are being retired",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.ACTIVE_CYCLES",
@@ -792,6 +897,7 @@
},
{
"BriefDescription": "Uops retired (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.ANY",
"PEBS": "1",
@@ -800,6 +906,7 @@
},
{
"BriefDescription": "Macro-fused Uops retired (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.MACRO_FUSED",
"PEBS": "1",
@@ -808,6 +915,7 @@
},
{
"BriefDescription": "Retirement slots used (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.RETIRE_SLOTS",
"PEBS": "1",
@@ -816,6 +924,7 @@
},
{
"BriefDescription": "Cycles Uops are not retiring (Precise Event)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.STALL_CYCLES",
@@ -826,6 +935,7 @@
},
{
"BriefDescription": "Total cycles using precise uop retired event (Precise Event)",
+ "Counter": "0,1,2,3",
"CounterMask": "16",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.TOTAL_CYCLES",
@@ -836,6 +946,7 @@
},
{
"BriefDescription": "Uop unfusions due to FP exceptions",
+ "Counter": "0,1,2,3",
"EventCode": "0xDB",
"EventName": "UOP_UNFUSION",
"SampleAfterValue": "2000000",
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-dp/virtual-memory.json b/tools/perf/pmu-events/arch/x86/westmereep-dp/virtual-memory.json
index f75084309041..53d7f76325a3 100644
--- a/tools/perf/pmu-events/arch/x86/westmereep-dp/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/westmereep-dp/virtual-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "DTLB load misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "DTLB_LOAD_MISSES.ANY",
"SampleAfterValue": "200000",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "DTLB load miss large page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "DTLB_LOAD_MISSES.LARGE_WALK_COMPLETED",
"SampleAfterValue": "200000",
@@ -15,6 +17,7 @@
},
{
"BriefDescription": "DTLB load miss caused by low part of address",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "DTLB_LOAD_MISSES.PDE_MISS",
"SampleAfterValue": "200000",
@@ -22,6 +25,7 @@
},
{
"BriefDescription": "DTLB second level hit",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"SampleAfterValue": "2000000",
@@ -29,6 +33,7 @@
},
{
"BriefDescription": "DTLB load miss page walks complete",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"SampleAfterValue": "200000",
@@ -36,6 +41,7 @@
},
{
"BriefDescription": "DTLB load miss page walk cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "DTLB_LOAD_MISSES.WALK_CYCLES",
"SampleAfterValue": "200000",
@@ -43,6 +49,7 @@
},
{
"BriefDescription": "DTLB misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_MISSES.ANY",
"SampleAfterValue": "200000",
@@ -50,6 +57,7 @@
},
{
"BriefDescription": "DTLB miss large page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_MISSES.LARGE_WALK_COMPLETED",
"SampleAfterValue": "200000",
@@ -57,6 +65,7 @@
},
{
"BriefDescription": "DTLB misses caused by low part of address",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_MISSES.PDE_MISS",
"SampleAfterValue": "200000",
@@ -64,6 +73,7 @@
},
{
"BriefDescription": "DTLB first level misses but second level hit",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_MISSES.STLB_HIT",
"SampleAfterValue": "200000",
@@ -71,6 +81,7 @@
},
{
"BriefDescription": "DTLB miss page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_MISSES.WALK_COMPLETED",
"SampleAfterValue": "200000",
@@ -78,6 +89,7 @@
},
{
"BriefDescription": "DTLB miss page walk cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_MISSES.WALK_CYCLES",
"SampleAfterValue": "2000000",
@@ -85,6 +97,7 @@
},
{
"BriefDescription": "Extended Page Table walk cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x4F",
"EventName": "EPT.WALK_CYCLES",
"SampleAfterValue": "2000000",
@@ -92,6 +105,7 @@
},
{
"BriefDescription": "ITLB flushes",
+ "Counter": "0,1,2,3",
"EventCode": "0xAE",
"EventName": "ITLB_FLUSH",
"SampleAfterValue": "2000000",
@@ -99,6 +113,7 @@
},
{
"BriefDescription": "ITLB miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.ANY",
"SampleAfterValue": "200000",
@@ -106,6 +121,7 @@
},
{
"BriefDescription": "ITLB miss large page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.LARGE_WALK_COMPLETED",
"SampleAfterValue": "200000",
@@ -113,6 +129,7 @@
},
{
"BriefDescription": "ITLB miss page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
"SampleAfterValue": "200000",
@@ -120,6 +137,7 @@
},
{
"BriefDescription": "ITLB miss page walk cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_CYCLES",
"SampleAfterValue": "2000000",
@@ -127,6 +145,7 @@
},
{
"BriefDescription": "Retired instructions that missed the ITLB (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC8",
"EventName": "ITLB_MISS_RETIRED",
"PEBS": "1",
@@ -135,6 +154,7 @@
},
{
"BriefDescription": "Retired loads that miss the DTLB (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "MEM_LOAD_RETIRED.DTLB_MISS",
"PEBS": "1",
@@ -143,6 +163,7 @@
},
{
"BriefDescription": "Retired stores that miss the DTLB (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC",
"EventName": "MEM_STORE_RETIRED.DTLB_MISS",
"PEBS": "1",
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-sp/cache.json b/tools/perf/pmu-events/arch/x86/westmereep-sp/cache.json
index d025e2c0cf1c..90cb367f5798 100644
--- a/tools/perf/pmu-events/arch/x86/westmereep-sp/cache.json
+++ b/tools/perf/pmu-events/arch/x86/westmereep-sp/cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Cycles L1D locked",
+ "Counter": "0,1",
"EventCode": "0x63",
"EventName": "CACHE_LOCK_CYCLES.L1D",
"SampleAfterValue": "2000000",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Cycles L1D and L2 locked",
+ "Counter": "0,1",
"EventCode": "0x63",
"EventName": "CACHE_LOCK_CYCLES.L1D_L2",
"SampleAfterValue": "2000000",
@@ -15,6 +17,7 @@
},
{
"BriefDescription": "L1D cache lines replaced in M state",
+ "Counter": "0,1",
"EventCode": "0x51",
"EventName": "L1D.M_EVICT",
"SampleAfterValue": "2000000",
@@ -22,6 +25,7 @@
},
{
"BriefDescription": "L1D cache lines allocated in the M state",
+ "Counter": "0,1",
"EventCode": "0x51",
"EventName": "L1D.M_REPL",
"SampleAfterValue": "2000000",
@@ -29,6 +33,7 @@
},
{
"BriefDescription": "L1D snoop eviction of cache lines in M state",
+ "Counter": "0,1",
"EventCode": "0x51",
"EventName": "L1D.M_SNOOP_EVICT",
"SampleAfterValue": "2000000",
@@ -36,6 +41,7 @@
},
{
"BriefDescription": "L1 data cache lines allocated",
+ "Counter": "0,1",
"EventCode": "0x51",
"EventName": "L1D.REPL",
"SampleAfterValue": "2000000",
@@ -43,6 +49,7 @@
},
{
"BriefDescription": "L1D prefetch load lock accepted in fill buffer",
+ "Counter": "0,1",
"EventCode": "0x52",
"EventName": "L1D_CACHE_PREFETCH_LOCK_FB_HIT",
"SampleAfterValue": "2000000",
@@ -50,6 +57,7 @@
},
{
"BriefDescription": "L1D hardware prefetch misses",
+ "Counter": "0,1",
"EventCode": "0x4E",
"EventName": "L1D_PREFETCH.MISS",
"SampleAfterValue": "200000",
@@ -57,6 +65,7 @@
},
{
"BriefDescription": "L1D hardware prefetch requests",
+ "Counter": "0,1",
"EventCode": "0x4E",
"EventName": "L1D_PREFETCH.REQUESTS",
"SampleAfterValue": "200000",
@@ -64,6 +73,7 @@
},
{
"BriefDescription": "L1D hardware prefetch requests triggered",
+ "Counter": "0,1",
"EventCode": "0x4E",
"EventName": "L1D_PREFETCH.TRIGGERS",
"SampleAfterValue": "200000",
@@ -71,6 +81,7 @@
},
{
"BriefDescription": "L1 writebacks to L2 in E state",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L1D_WB_L2.E_STATE",
"SampleAfterValue": "100000",
@@ -78,6 +89,7 @@
},
{
"BriefDescription": "L1 writebacks to L2 in I state (misses)",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L1D_WB_L2.I_STATE",
"SampleAfterValue": "100000",
@@ -85,6 +97,7 @@
},
{
"BriefDescription": "All L1 writebacks to L2",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L1D_WB_L2.MESI",
"SampleAfterValue": "100000",
@@ -92,6 +105,7 @@
},
{
"BriefDescription": "L1 writebacks to L2 in M state",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L1D_WB_L2.M_STATE",
"SampleAfterValue": "100000",
@@ -99,6 +113,7 @@
},
{
"BriefDescription": "L1 writebacks to L2 in S state",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L1D_WB_L2.S_STATE",
"SampleAfterValue": "100000",
@@ -106,6 +121,7 @@
},
{
"BriefDescription": "All L2 data requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.ANY",
"SampleAfterValue": "200000",
@@ -113,6 +129,7 @@
},
{
"BriefDescription": "L2 data demand loads in E state",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.DEMAND.E_STATE",
"SampleAfterValue": "200000",
@@ -120,6 +137,7 @@
},
{
"BriefDescription": "L2 data demand loads in I state (misses)",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.DEMAND.I_STATE",
"SampleAfterValue": "200000",
@@ -127,6 +145,7 @@
},
{
"BriefDescription": "L2 data demand requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.DEMAND.MESI",
"SampleAfterValue": "200000",
@@ -134,6 +153,7 @@
},
{
"BriefDescription": "L2 data demand loads in M state",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.DEMAND.M_STATE",
"SampleAfterValue": "200000",
@@ -141,6 +161,7 @@
},
{
"BriefDescription": "L2 data demand loads in S state",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.DEMAND.S_STATE",
"SampleAfterValue": "200000",
@@ -148,6 +169,7 @@
},
{
"BriefDescription": "L2 data prefetches in E state",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.PREFETCH.E_STATE",
"SampleAfterValue": "200000",
@@ -155,6 +177,7 @@
},
{
"BriefDescription": "L2 data prefetches in the I state (misses)",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.PREFETCH.I_STATE",
"SampleAfterValue": "200000",
@@ -162,6 +185,7 @@
},
{
"BriefDescription": "All L2 data prefetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.PREFETCH.MESI",
"SampleAfterValue": "200000",
@@ -169,6 +193,7 @@
},
{
"BriefDescription": "L2 data prefetches in M state",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.PREFETCH.M_STATE",
"SampleAfterValue": "200000",
@@ -176,6 +201,7 @@
},
{
"BriefDescription": "L2 data prefetches in the S state",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.PREFETCH.S_STATE",
"SampleAfterValue": "200000",
@@ -183,6 +209,7 @@
},
{
"BriefDescription": "L2 lines allocated",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.ANY",
"SampleAfterValue": "100000",
@@ -190,6 +217,7 @@
},
{
"BriefDescription": "L2 lines allocated in the E state",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.E_STATE",
"SampleAfterValue": "100000",
@@ -197,6 +225,7 @@
},
{
"BriefDescription": "L2 lines allocated in the S state",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.S_STATE",
"SampleAfterValue": "100000",
@@ -204,6 +233,7 @@
},
{
"BriefDescription": "L2 lines evicted",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.ANY",
"SampleAfterValue": "100000",
@@ -211,6 +241,7 @@
},
{
"BriefDescription": "L2 lines evicted by a demand request",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.DEMAND_CLEAN",
"SampleAfterValue": "100000",
@@ -218,6 +249,7 @@
},
{
"BriefDescription": "L2 modified lines evicted by a demand request",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.DEMAND_DIRTY",
"SampleAfterValue": "100000",
@@ -225,6 +257,7 @@
},
{
"BriefDescription": "L2 lines evicted by a prefetch request",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.PREFETCH_CLEAN",
"SampleAfterValue": "100000",
@@ -232,6 +265,7 @@
},
{
"BriefDescription": "L2 modified lines evicted by a prefetch request",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.PREFETCH_DIRTY",
"SampleAfterValue": "100000",
@@ -239,6 +273,7 @@
},
{
"BriefDescription": "L2 instruction fetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.IFETCHES",
"SampleAfterValue": "200000",
@@ -246,6 +281,7 @@
},
{
"BriefDescription": "L2 instruction fetch hits",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.IFETCH_HIT",
"SampleAfterValue": "200000",
@@ -253,6 +289,7 @@
},
{
"BriefDescription": "L2 instruction fetch misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.IFETCH_MISS",
"SampleAfterValue": "200000",
@@ -260,6 +297,7 @@
},
{
"BriefDescription": "L2 load hits",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.LD_HIT",
"SampleAfterValue": "200000",
@@ -267,6 +305,7 @@
},
{
"BriefDescription": "L2 load misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.LD_MISS",
"SampleAfterValue": "200000",
@@ -274,6 +313,7 @@
},
{
"BriefDescription": "L2 requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.LOADS",
"SampleAfterValue": "200000",
@@ -281,6 +321,7 @@
},
{
"BriefDescription": "All L2 misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.MISS",
"SampleAfterValue": "200000",
@@ -288,6 +329,7 @@
},
{
"BriefDescription": "All L2 prefetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.PREFETCHES",
"SampleAfterValue": "200000",
@@ -295,6 +337,7 @@
},
{
"BriefDescription": "L2 prefetch hits",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.PREFETCH_HIT",
"SampleAfterValue": "200000",
@@ -302,6 +345,7 @@
},
{
"BriefDescription": "L2 prefetch misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.PREFETCH_MISS",
"SampleAfterValue": "200000",
@@ -309,6 +353,7 @@
},
{
"BriefDescription": "All L2 requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.REFERENCES",
"SampleAfterValue": "200000",
@@ -316,6 +361,7 @@
},
{
"BriefDescription": "L2 RFO requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFOS",
"SampleAfterValue": "200000",
@@ -323,6 +369,7 @@
},
{
"BriefDescription": "L2 RFO hits",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_HIT",
"SampleAfterValue": "200000",
@@ -330,6 +377,7 @@
},
{
"BriefDescription": "L2 RFO misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_MISS",
"SampleAfterValue": "200000",
@@ -337,6 +385,7 @@
},
{
"BriefDescription": "All L2 transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.ANY",
"SampleAfterValue": "200000",
@@ -344,6 +393,7 @@
},
{
"BriefDescription": "L2 fill transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.FILL",
"SampleAfterValue": "200000",
@@ -351,6 +401,7 @@
},
{
"BriefDescription": "L2 instruction fetch transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.IFETCH",
"SampleAfterValue": "200000",
@@ -358,6 +409,7 @@
},
{
"BriefDescription": "L1D writeback to L2 transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.L1D_WB",
"SampleAfterValue": "200000",
@@ -365,6 +417,7 @@
},
{
"BriefDescription": "L2 Load transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.LOAD",
"SampleAfterValue": "200000",
@@ -372,6 +425,7 @@
},
{
"BriefDescription": "L2 prefetch transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.PREFETCH",
"SampleAfterValue": "200000",
@@ -379,6 +433,7 @@
},
{
"BriefDescription": "L2 RFO transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.RFO",
"SampleAfterValue": "200000",
@@ -386,6 +441,7 @@
},
{
"BriefDescription": "L2 writeback to LLC transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.WB",
"SampleAfterValue": "200000",
@@ -393,6 +449,7 @@
},
{
"BriefDescription": "L2 demand lock RFOs in E state",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.LOCK.E_STATE",
"SampleAfterValue": "100000",
@@ -400,6 +457,7 @@
},
{
"BriefDescription": "All demand L2 lock RFOs that hit the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.LOCK.HIT",
"SampleAfterValue": "100000",
@@ -407,6 +465,7 @@
},
{
"BriefDescription": "L2 demand lock RFOs in I state (misses)",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.LOCK.I_STATE",
"SampleAfterValue": "100000",
@@ -414,6 +473,7 @@
},
{
"BriefDescription": "All demand L2 lock RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.LOCK.MESI",
"SampleAfterValue": "100000",
@@ -421,6 +481,7 @@
},
{
"BriefDescription": "L2 demand lock RFOs in M state",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.LOCK.M_STATE",
"SampleAfterValue": "100000",
@@ -428,6 +489,7 @@
},
{
"BriefDescription": "L2 demand lock RFOs in S state",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.LOCK.S_STATE",
"SampleAfterValue": "100000",
@@ -435,6 +497,7 @@
},
{
"BriefDescription": "All L2 demand store RFOs that hit the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.RFO.HIT",
"SampleAfterValue": "100000",
@@ -442,6 +505,7 @@
},
{
"BriefDescription": "L2 demand store RFOs in I state (misses)",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.RFO.I_STATE",
"SampleAfterValue": "100000",
@@ -449,6 +513,7 @@
},
{
"BriefDescription": "All L2 demand store RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.RFO.MESI",
"SampleAfterValue": "100000",
@@ -456,6 +521,7 @@
},
{
"BriefDescription": "L2 demand store RFOs in M state",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.RFO.M_STATE",
"SampleAfterValue": "100000",
@@ -463,6 +529,7 @@
},
{
"BriefDescription": "L2 demand store RFOs in S state",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.RFO.S_STATE",
"SampleAfterValue": "100000",
@@ -470,6 +537,7 @@
},
{
"BriefDescription": "Longest latency cache miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.MISS",
"SampleAfterValue": "100000",
@@ -477,6 +545,7 @@
},
{
"BriefDescription": "Longest latency cache reference",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
"SampleAfterValue": "200000",
@@ -484,6 +553,7 @@
},
{
"BriefDescription": "Memory instructions retired above 0 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_0",
"MSRIndex": "0x3F6",
@@ -493,6 +563,7 @@
},
{
"BriefDescription": "Memory instructions retired above 1024 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_1024",
"MSRIndex": "0x3F6",
@@ -503,6 +574,7 @@
},
{
"BriefDescription": "Memory instructions retired above 128 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_128",
"MSRIndex": "0x3F6",
@@ -513,6 +585,7 @@
},
{
"BriefDescription": "Memory instructions retired above 16 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_16",
"MSRIndex": "0x3F6",
@@ -523,6 +596,7 @@
},
{
"BriefDescription": "Memory instructions retired above 16384 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_16384",
"MSRIndex": "0x3F6",
@@ -533,6 +607,7 @@
},
{
"BriefDescription": "Memory instructions retired above 2048 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_2048",
"MSRIndex": "0x3F6",
@@ -543,6 +618,7 @@
},
{
"BriefDescription": "Memory instructions retired above 256 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_256",
"MSRIndex": "0x3F6",
@@ -553,6 +629,7 @@
},
{
"BriefDescription": "Memory instructions retired above 32 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_32",
"MSRIndex": "0x3F6",
@@ -563,6 +640,7 @@
},
{
"BriefDescription": "Memory instructions retired above 32768 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_32768",
"MSRIndex": "0x3F6",
@@ -573,6 +651,7 @@
},
{
"BriefDescription": "Memory instructions retired above 4 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_4",
"MSRIndex": "0x3F6",
@@ -583,6 +662,7 @@
},
{
"BriefDescription": "Memory instructions retired above 4096 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_4096",
"MSRIndex": "0x3F6",
@@ -593,6 +673,7 @@
},
{
"BriefDescription": "Memory instructions retired above 512 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_512",
"MSRIndex": "0x3F6",
@@ -603,6 +684,7 @@
},
{
"BriefDescription": "Memory instructions retired above 64 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_64",
"MSRIndex": "0x3F6",
@@ -613,6 +695,7 @@
},
{
"BriefDescription": "Memory instructions retired above 8 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_8",
"MSRIndex": "0x3F6",
@@ -623,6 +706,7 @@
},
{
"BriefDescription": "Memory instructions retired above 8192 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_8192",
"MSRIndex": "0x3F6",
@@ -633,6 +717,7 @@
},
{
"BriefDescription": "Instructions retired which contains a load (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LOADS",
"PEBS": "1",
@@ -641,6 +726,7 @@
},
{
"BriefDescription": "Instructions retired which contains a store (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.STORES",
"PEBS": "1",
@@ -649,6 +735,7 @@
},
{
"BriefDescription": "Retired loads that miss L1D and hit an previously allocated LFB (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "MEM_LOAD_RETIRED.HIT_LFB",
"PEBS": "1",
@@ -657,6 +744,7 @@
},
{
"BriefDescription": "Retired loads that hit the L1 data cache (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "MEM_LOAD_RETIRED.L1D_HIT",
"PEBS": "1",
@@ -665,6 +753,7 @@
},
{
"BriefDescription": "Retired loads that hit the L2 cache (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "MEM_LOAD_RETIRED.L2_HIT",
"PEBS": "1",
@@ -673,6 +762,7 @@
},
{
"BriefDescription": "Retired loads that miss the LLC cache (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "MEM_LOAD_RETIRED.LLC_MISS",
"PEBS": "1",
@@ -681,6 +771,7 @@
},
{
"BriefDescription": "Retired loads that hit valid versions in the LLC cache (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "MEM_LOAD_RETIRED.LLC_UNSHARED_HIT",
"PEBS": "1",
@@ -689,6 +780,7 @@
},
{
"BriefDescription": "Retired loads that hit sibling core's L2 in modified or unmodified states (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "MEM_LOAD_RETIRED.OTHER_CORE_L2_HIT_HITM",
"PEBS": "1",
@@ -697,6 +789,7 @@
},
{
"BriefDescription": "Load instructions retired with a data source of local DRAM or locally homed remote hitm (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xF",
"EventName": "MEM_UNCORE_RETIRED.LOCAL_DRAM",
"PEBS": "1",
@@ -705,6 +798,7 @@
},
{
"BriefDescription": "Load instructions retired that HIT modified data in sibling core (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xF",
"EventName": "MEM_UNCORE_RETIRED.OTHER_CORE_L2_HITM",
"PEBS": "1",
@@ -713,6 +807,7 @@
},
{
"BriefDescription": "Load instructions retired remote cache HIT data source (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xF",
"EventName": "MEM_UNCORE_RETIRED.REMOTE_CACHE_LOCAL_HOME_HIT",
"PEBS": "1",
@@ -721,6 +816,7 @@
},
{
"BriefDescription": "Load instructions retired remote DRAM and remote home-remote cache HITM (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xF",
"EventName": "MEM_UNCORE_RETIRED.REMOTE_DRAM",
"PEBS": "1",
@@ -729,6 +825,7 @@
},
{
"BriefDescription": "Load instructions retired IO (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xF",
"EventName": "MEM_UNCORE_RETIRED.UNCACHEABLE",
"PEBS": "1",
@@ -737,6 +834,7 @@
},
{
"BriefDescription": "All offcore requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.ANY",
"SampleAfterValue": "100000",
@@ -744,6 +842,7 @@
},
{
"BriefDescription": "Offcore read requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.ANY.READ",
"SampleAfterValue": "100000",
@@ -751,6 +850,7 @@
},
{
"BriefDescription": "Offcore RFO requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.ANY.RFO",
"SampleAfterValue": "100000",
@@ -758,6 +858,7 @@
},
{
"BriefDescription": "Offcore demand code read requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND.READ_CODE",
"SampleAfterValue": "100000",
@@ -765,6 +866,7 @@
},
{
"BriefDescription": "Offcore demand data read requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND.READ_DATA",
"SampleAfterValue": "100000",
@@ -772,6 +874,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND.RFO",
"SampleAfterValue": "100000",
@@ -779,6 +882,7 @@
},
{
"BriefDescription": "Offcore L1 data cache writebacks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.L1D_WRITEBACK",
"SampleAfterValue": "100000",
@@ -786,6 +890,7 @@
},
{
"BriefDescription": "Offcore uncached memory accesses",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.UNCACHED_MEM",
"SampleAfterValue": "100000",
@@ -793,6 +898,7 @@
},
{
"BriefDescription": "Outstanding offcore reads",
+ "Counter": "0",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.ANY.READ",
"SampleAfterValue": "2000000",
@@ -800,6 +906,7 @@
},
{
"BriefDescription": "Cycles offcore reads busy",
+ "Counter": "0",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.ANY.READ_NOT_EMPTY",
@@ -808,6 +915,7 @@
},
{
"BriefDescription": "Outstanding offcore demand code reads",
+ "Counter": "0",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND.READ_CODE",
"SampleAfterValue": "2000000",
@@ -815,6 +923,7 @@
},
{
"BriefDescription": "Cycles offcore demand code read busy",
+ "Counter": "0",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND.READ_CODE_NOT_EMPTY",
@@ -823,6 +932,7 @@
},
{
"BriefDescription": "Outstanding offcore demand data reads",
+ "Counter": "0",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND.READ_DATA",
"SampleAfterValue": "2000000",
@@ -830,6 +940,7 @@
},
{
"BriefDescription": "Cycles offcore demand data read busy",
+ "Counter": "0",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND.READ_DATA_NOT_EMPTY",
@@ -838,6 +949,7 @@
},
{
"BriefDescription": "Outstanding offcore demand RFOs",
+ "Counter": "0",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND.RFO",
"SampleAfterValue": "2000000",
@@ -845,6 +957,7 @@
},
{
"BriefDescription": "Cycles offcore demand RFOs busy",
+ "Counter": "0",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND.RFO_NOT_EMPTY",
@@ -853,6 +966,7 @@
},
{
"BriefDescription": "Offcore requests blocked due to Super Queue full",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "OFFCORE_REQUESTS_SQ_FULL",
"SampleAfterValue": "100000",
@@ -860,6 +974,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by any cache or DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -869,6 +984,7 @@
},
{
"BriefDescription": "All offcore data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_LOCATION",
"MSRIndex": "0x1a6,0x1a7",
@@ -878,6 +994,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by the IO, CSR, MMIO unit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.IO_CSR_MMIO",
"MSRIndex": "0x1a6,0x1a7",
@@ -887,6 +1004,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by the LLC and not found in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -896,6 +1014,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by the LLC and HIT in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -905,6 +1024,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by the LLC and HITM in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -914,6 +1034,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -923,6 +1044,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by the LLC or local DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -932,6 +1054,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -941,6 +1064,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by a remote cache or remote DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -950,6 +1074,7 @@
},
{
"BriefDescription": "Offcore data reads that HIT in a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -959,6 +1084,7 @@
},
{
"BriefDescription": "Offcore data reads that HITM in a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -968,6 +1094,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by any cache or DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -977,6 +1104,7 @@
},
{
"BriefDescription": "All offcore code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_LOCATION",
"MSRIndex": "0x1a6,0x1a7",
@@ -986,6 +1114,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by the IO, CSR, MMIO unit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.IO_CSR_MMIO",
"MSRIndex": "0x1a6,0x1a7",
@@ -995,6 +1124,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by the LLC and not found in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1004,6 +1134,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by the LLC and HIT in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1013,6 +1144,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by the LLC and HITM in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1022,6 +1154,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LOCAL_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1031,6 +1164,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by the LLC or local DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1040,6 +1174,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1049,6 +1184,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by a remote cache or remote DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1058,6 +1194,7 @@
},
{
"BriefDescription": "Offcore code reads that HIT in a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1067,6 +1204,7 @@
},
{
"BriefDescription": "Offcore code reads that HITM in a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1076,6 +1214,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by any cache or DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1085,6 +1224,7 @@
},
{
"BriefDescription": "All offcore requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_LOCATION",
"MSRIndex": "0x1a6,0x1a7",
@@ -1094,6 +1234,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by the IO, CSR, MMIO unit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.IO_CSR_MMIO",
"MSRIndex": "0x1a6,0x1a7",
@@ -1103,6 +1244,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by the LLC and not found in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1112,6 +1254,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by the LLC and HIT in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1121,6 +1264,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by the LLC and HITM in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1130,6 +1274,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LOCAL_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1139,6 +1284,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by the LLC or local DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1148,6 +1294,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1157,6 +1304,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by a remote cache or remote DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1166,6 +1314,7 @@
},
{
"BriefDescription": "Offcore requests that HIT in a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1175,6 +1324,7 @@
},
{
"BriefDescription": "Offcore requests that HITM in a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1184,6 +1334,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by any cache or DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1193,6 +1344,7 @@
},
{
"BriefDescription": "All offcore RFO requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_LOCATION",
"MSRIndex": "0x1a6,0x1a7",
@@ -1202,6 +1354,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by the IO, CSR, MMIO unit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.IO_CSR_MMIO",
"MSRIndex": "0x1a6,0x1a7",
@@ -1211,6 +1364,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by the LLC and not found in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1220,6 +1374,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by the LLC and HIT in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1229,6 +1384,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by the LLC and HITM in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1238,6 +1394,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1247,6 +1404,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by the LLC or local DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1256,6 +1414,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1265,6 +1424,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by a remote cache or remote DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1274,6 +1434,7 @@
},
{
"BriefDescription": "Offcore RFO requests that HIT in a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1283,6 +1444,7 @@
},
{
"BriefDescription": "Offcore RFO requests that HITM in a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1292,6 +1454,7 @@
},
{
"BriefDescription": "Offcore writebacks to any cache or DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1301,6 +1464,7 @@
},
{
"BriefDescription": "All offcore writebacks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_LOCATION",
"MSRIndex": "0x1a6,0x1a7",
@@ -1310,6 +1474,7 @@
},
{
"BriefDescription": "Offcore writebacks to the IO, CSR, MMIO unit.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.IO_CSR_MMIO",
"MSRIndex": "0x1a6,0x1a7",
@@ -1319,6 +1484,7 @@
},
{
"BriefDescription": "Offcore writebacks to the LLC and not found in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1328,6 +1494,7 @@
},
{
"BriefDescription": "Offcore writebacks to the LLC and HITM in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1337,6 +1504,7 @@
},
{
"BriefDescription": "Offcore writebacks to the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.LOCAL_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1346,6 +1514,7 @@
},
{
"BriefDescription": "Offcore writebacks to the LLC or local DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1355,6 +1524,7 @@
},
{
"BriefDescription": "Offcore writebacks to a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1364,6 +1534,7 @@
},
{
"BriefDescription": "Offcore writebacks to a remote cache or remote DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1373,6 +1544,7 @@
},
{
"BriefDescription": "Offcore writebacks that HIT in a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1382,6 +1554,7 @@
},
{
"BriefDescription": "Offcore writebacks that HITM in a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1391,6 +1564,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by any cache or DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1400,6 +1574,7 @@
},
{
"BriefDescription": "All offcore code or data read requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_LOCATION",
"MSRIndex": "0x1a6,0x1a7",
@@ -1409,6 +1584,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by the IO, CSR, MMIO unit.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.IO_CSR_MMIO",
"MSRIndex": "0x1a6,0x1a7",
@@ -1418,6 +1594,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by the LLC and not found in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1427,6 +1604,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by the LLC and HIT in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1436,6 +1614,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by the LLC and HITM in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1445,6 +1624,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LOCAL_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1454,6 +1634,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by the LLC or local DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1463,6 +1644,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1472,6 +1654,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by a remote cache or remote DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1481,6 +1664,7 @@
},
{
"BriefDescription": "Offcore code or data read requests that HIT in a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1490,6 +1674,7 @@
},
{
"BriefDescription": "Offcore code or data read requests that HITM in a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1499,6 +1684,7 @@
},
{
"BriefDescription": "Offcore request = all data, response = any cache_dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1508,6 +1694,7 @@
},
{
"BriefDescription": "Offcore request = all data, response = any location",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_LOCATION",
"MSRIndex": "0x1a6,0x1a7",
@@ -1517,6 +1704,7 @@
},
{
"BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the IO, CSR, MMIO unit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN.IO_CSR_MMIO",
"MSRIndex": "0x1a6,0x1a7",
@@ -1526,6 +1714,7 @@
},
{
"BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the LLC and not found in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1535,6 +1724,7 @@
},
{
"BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the LLC and HIT in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1544,6 +1734,7 @@
},
{
"BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the LLC and HITM in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1553,6 +1744,7 @@
},
{
"BriefDescription": "Offcore request = all data, response = local cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LOCAL_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1562,6 +1754,7 @@
},
{
"BriefDescription": "Offcore request = all data, response = local cache or dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1571,6 +1764,7 @@
},
{
"BriefDescription": "Offcore request = all data, response = remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1580,6 +1774,7 @@
},
{
"BriefDescription": "Offcore request = all data, response = remote cache or dram",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1589,6 +1784,7 @@
},
{
"BriefDescription": "Offcore data reads, RFOs, and prefetches that HIT in a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1598,6 +1794,7 @@
},
{
"BriefDescription": "Offcore data reads, RFOs, and prefetches that HITM in a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1607,6 +1804,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by any cache or DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1616,6 +1814,7 @@
},
{
"BriefDescription": "All offcore demand data requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_LOCATION",
"MSRIndex": "0x1a6,0x1a7",
@@ -1625,6 +1824,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by the IO, CSR, MMIO unit.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.IO_CSR_MMIO",
"MSRIndex": "0x1a6,0x1a7",
@@ -1634,6 +1834,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by the LLC and not found in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1643,6 +1844,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by the LLC and HIT in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1652,6 +1854,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by the LLC and HITM in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1661,6 +1864,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LOCAL_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1670,6 +1874,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by the LLC or local DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1679,6 +1884,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1688,6 +1894,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by a remote cache or remote DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1697,6 +1904,7 @@
},
{
"BriefDescription": "Offcore demand data requests that HIT in a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1706,6 +1914,7 @@
},
{
"BriefDescription": "Offcore demand data requests that HITM in a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1715,6 +1924,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by any cache or DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1724,6 +1934,7 @@
},
{
"BriefDescription": "All offcore demand data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_LOCATION",
"MSRIndex": "0x1a6,0x1a7",
@@ -1733,6 +1944,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by the IO, CSR, MMIO unit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.IO_CSR_MMIO",
"MSRIndex": "0x1a6,0x1a7",
@@ -1742,6 +1954,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by the LLC and not found in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1751,6 +1964,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by the LLC and HIT in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1760,6 +1974,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by the LLC and HITM in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1769,6 +1984,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LOCAL_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1778,6 +1994,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by the LLC or local DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1787,6 +2004,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1796,6 +2014,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by a remote cache or remote DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1805,6 +2024,7 @@
},
{
"BriefDescription": "Offcore demand data reads that HIT in a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1814,6 +2034,7 @@
},
{
"BriefDescription": "Offcore demand data reads that HITM in a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1823,6 +2044,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by any cache or DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1832,6 +2054,7 @@
},
{
"BriefDescription": "All offcore demand code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_LOCATION",
"MSRIndex": "0x1a6,0x1a7",
@@ -1841,6 +2064,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by the IO, CSR, MMIO unit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.IO_CSR_MMIO",
"MSRIndex": "0x1a6,0x1a7",
@@ -1850,6 +2074,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by the LLC and not found in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1859,6 +2084,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by the LLC and HIT in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1868,6 +2094,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by the LLC and HITM in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1877,6 +2104,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LOCAL_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1886,6 +2114,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by the LLC or local DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1895,6 +2124,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1904,6 +2134,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by a remote cache or remote DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1913,6 +2144,7 @@
},
{
"BriefDescription": "Offcore demand code reads that HIT in a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1922,6 +2154,7 @@
},
{
"BriefDescription": "Offcore demand code reads that HITM in a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1931,6 +2164,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by any cache or DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1940,6 +2174,7 @@
},
{
"BriefDescription": "All offcore demand RFO requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_LOCATION",
"MSRIndex": "0x1a6,0x1a7",
@@ -1949,6 +2184,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by the IO, CSR, MMIO unit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.IO_CSR_MMIO",
"MSRIndex": "0x1a6,0x1a7",
@@ -1958,6 +2194,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by the LLC and not found in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1967,6 +2204,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by the LLC and HIT in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -1976,6 +2214,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by the LLC and HITM in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1985,6 +2224,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LOCAL_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -1994,6 +2234,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by the LLC or local DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2003,6 +2244,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2012,6 +2254,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by a remote cache or remote DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2021,6 +2264,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests that HIT in a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -2030,6 +2274,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests that HITM in a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2039,6 +2284,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by any cache or DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.ANY_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2048,6 +2294,7 @@
},
{
"BriefDescription": "All offcore other requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.ANY_LOCATION",
"MSRIndex": "0x1a6,0x1a7",
@@ -2057,6 +2304,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by the IO, CSR, MMIO unit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.IO_CSR_MMIO",
"MSRIndex": "0x1a6,0x1a7",
@@ -2066,6 +2314,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by the LLC and not found in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2075,6 +2324,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by the LLC and HIT in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -2084,6 +2334,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by the LLC and HITM in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2093,6 +2344,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.LOCAL_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2102,6 +2354,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by the LLC or local DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2111,6 +2364,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2120,6 +2374,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by a remote cache or remote DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2129,6 +2384,7 @@
},
{
"BriefDescription": "Offcore other requests that HIT in a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -2138,6 +2394,7 @@
},
{
"BriefDescription": "Offcore other requests that HITM in a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2147,6 +2404,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by any cache or DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2156,6 +2414,7 @@
},
{
"BriefDescription": "All offcore prefetch data requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_LOCATION",
"MSRIndex": "0x1a6,0x1a7",
@@ -2165,6 +2424,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by the IO, CSR, MMIO unit.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA.IO_CSR_MMIO",
"MSRIndex": "0x1a6,0x1a7",
@@ -2174,6 +2434,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by the LLC and not found in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2183,6 +2444,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by the LLC and HIT in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -2192,6 +2454,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by the LLC and HITM in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2201,6 +2464,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LOCAL_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2210,6 +2474,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by the LLC or local DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2219,6 +2484,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2228,6 +2494,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by a remote cache or remote DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2237,6 +2504,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests that HIT in a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -2246,6 +2514,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests that HITM in a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2255,6 +2524,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by any cache or DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2264,6 +2534,7 @@
},
{
"BriefDescription": "All offcore prefetch data reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_LOCATION",
"MSRIndex": "0x1a6,0x1a7",
@@ -2273,6 +2544,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by the IO, CSR, MMIO unit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.IO_CSR_MMIO",
"MSRIndex": "0x1a6,0x1a7",
@@ -2282,6 +2554,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by the LLC and not found in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2291,6 +2564,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by the LLC and HIT in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -2300,6 +2574,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by the LLC and HITM in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2309,6 +2584,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LOCAL_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2318,6 +2594,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by the LLC or local DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2327,6 +2604,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2336,6 +2614,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by a remote cache or remote DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2345,6 +2624,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads that HIT in a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -2354,6 +2634,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads that HITM in a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2363,6 +2644,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by any cache or DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2372,6 +2654,7 @@
},
{
"BriefDescription": "All offcore prefetch code reads",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_LOCATION",
"MSRIndex": "0x1a6,0x1a7",
@@ -2381,6 +2664,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by the IO, CSR, MMIO unit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.IO_CSR_MMIO",
"MSRIndex": "0x1a6,0x1a7",
@@ -2390,6 +2674,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by the LLC and not found in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2399,6 +2684,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by the LLC and HIT in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -2408,6 +2694,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by the LLC and HITM in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2417,6 +2704,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LOCAL_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2426,6 +2714,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by the LLC or local DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2435,6 +2724,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2444,6 +2734,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by a remote cache or remote DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2453,6 +2744,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads that HIT in a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -2462,6 +2754,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads that HITM in a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2471,6 +2764,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by any cache or DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2480,6 +2774,7 @@
},
{
"BriefDescription": "All offcore prefetch RFO requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_LOCATION",
"MSRIndex": "0x1a6,0x1a7",
@@ -2489,6 +2784,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by the IO, CSR, MMIO unit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_RFO.IO_CSR_MMIO",
"MSRIndex": "0x1a6,0x1a7",
@@ -2498,6 +2794,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and not found in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2507,6 +2804,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and HIT in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -2516,6 +2814,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and HITM in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2525,6 +2824,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LOCAL_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2534,6 +2834,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC or local DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2543,6 +2844,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2552,6 +2854,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by a remote cache or remote DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2561,6 +2864,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests that HIT in a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -2570,6 +2874,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests that HITM in a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2579,6 +2884,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by any cache or DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2588,6 +2894,7 @@
},
{
"BriefDescription": "All offcore prefetch requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_LOCATION",
"MSRIndex": "0x1a6,0x1a7",
@@ -2597,6 +2904,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by the IO, CSR, MMIO unit",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PREFETCH.IO_CSR_MMIO",
"MSRIndex": "0x1a6,0x1a7",
@@ -2606,6 +2914,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by the LLC and not found in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2615,6 +2924,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by the LLC and HIT in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -2624,6 +2934,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by the LLC and HITM in a sibling core",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2633,6 +2944,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2642,6 +2954,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by the LLC or local DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2651,6 +2964,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE",
"MSRIndex": "0x1a6,0x1a7",
@@ -2660,6 +2974,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by a remote cache or remote DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2669,6 +2984,7 @@
},
{
"BriefDescription": "Offcore prefetch requests that HIT in a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE_HIT",
"MSRIndex": "0x1a6,0x1a7",
@@ -2678,6 +2994,7 @@
},
{
"BriefDescription": "Offcore prefetch requests that HITM in a remote cache",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -2687,6 +3004,7 @@
},
{
"BriefDescription": "Super Queue LRU hints sent to LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xF4",
"EventName": "SQ_MISC.LRU_HINTS",
"SampleAfterValue": "2000000",
@@ -2694,6 +3012,7 @@
},
{
"BriefDescription": "Super Queue lock splits across a cache line",
+ "Counter": "0,1,2,3",
"EventCode": "0xF4",
"EventName": "SQ_MISC.SPLIT_LOCK",
"SampleAfterValue": "2000000",
@@ -2701,6 +3020,7 @@
},
{
"BriefDescription": "Loads delayed with at-Retirement block code",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "STORE_BLOCKS.AT_RET",
"SampleAfterValue": "200000",
@@ -2708,6 +3028,7 @@
},
{
"BriefDescription": "Cacheable loads delayed with L1D block code",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "STORE_BLOCKS.L1D_BLOCK",
"SampleAfterValue": "200000",
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-sp/counter.json b/tools/perf/pmu-events/arch/x86/westmereep-sp/counter.json
new file mode 100644
index 000000000000..ecf0795dceab
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/westmereep-sp/counter.json
@@ -0,0 +1,7 @@
+[
+ {
+ "Unit": "core",
+ "CountersNumFixed": "4",
+ "CountersNumGeneric": "4"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-sp/floating-point.json b/tools/perf/pmu-events/arch/x86/westmereep-sp/floating-point.json
index 196ae1d9b157..9bac9313b65c 100644
--- a/tools/perf/pmu-events/arch/x86/westmereep-sp/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/westmereep-sp/floating-point.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "X87 Floating point assists (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xF7",
"EventName": "FP_ASSIST.ALL",
"PEBS": "1",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "X87 Floating point assists for invalid input value (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xF7",
"EventName": "FP_ASSIST.INPUT",
"PEBS": "1",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "X87 Floating point assists for invalid output value (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xF7",
"EventName": "FP_ASSIST.OUTPUT",
"PEBS": "1",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "MMX Uops",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.MMX",
"SampleAfterValue": "2000000",
@@ -32,6 +36,7 @@
},
{
"BriefDescription": "SSE2 integer Uops",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE2_INTEGER",
"SampleAfterValue": "2000000",
@@ -39,6 +44,7 @@
},
{
"BriefDescription": "SSE* FP double precision Uops",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_DOUBLE_PRECISION",
"SampleAfterValue": "2000000",
@@ -46,6 +52,7 @@
},
{
"BriefDescription": "SSE and SSE2 FP Uops",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_FP",
"SampleAfterValue": "2000000",
@@ -53,6 +60,7 @@
},
{
"BriefDescription": "SSE FP packed Uops",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_FP_PACKED",
"SampleAfterValue": "2000000",
@@ -60,6 +68,7 @@
},
{
"BriefDescription": "SSE FP scalar Uops",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_FP_SCALAR",
"SampleAfterValue": "2000000",
@@ -67,6 +76,7 @@
},
{
"BriefDescription": "SSE* FP single precision Uops",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_SINGLE_PRECISION",
"SampleAfterValue": "2000000",
@@ -74,6 +84,7 @@
},
{
"BriefDescription": "Computational floating-point operations executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.X87",
"SampleAfterValue": "2000000",
@@ -81,6 +92,7 @@
},
{
"BriefDescription": "All Floating Point to and from MMX transitions",
+ "Counter": "0,1,2,3",
"EventCode": "0xCC",
"EventName": "FP_MMX_TRANS.ANY",
"SampleAfterValue": "2000000",
@@ -88,6 +100,7 @@
},
{
"BriefDescription": "Transitions from MMX to Floating Point instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0xCC",
"EventName": "FP_MMX_TRANS.TO_FP",
"SampleAfterValue": "2000000",
@@ -95,6 +108,7 @@
},
{
"BriefDescription": "Transitions from Floating Point to MMX instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0xCC",
"EventName": "FP_MMX_TRANS.TO_MMX",
"SampleAfterValue": "2000000",
@@ -102,6 +116,7 @@
},
{
"BriefDescription": "128 bit SIMD integer pack operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "SIMD_INT_128.PACK",
"SampleAfterValue": "200000",
@@ -109,6 +124,7 @@
},
{
"BriefDescription": "128 bit SIMD integer arithmetic operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "SIMD_INT_128.PACKED_ARITH",
"SampleAfterValue": "200000",
@@ -116,6 +132,7 @@
},
{
"BriefDescription": "128 bit SIMD integer logical operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "SIMD_INT_128.PACKED_LOGICAL",
"SampleAfterValue": "200000",
@@ -123,6 +140,7 @@
},
{
"BriefDescription": "128 bit SIMD integer multiply operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "SIMD_INT_128.PACKED_MPY",
"SampleAfterValue": "200000",
@@ -130,6 +148,7 @@
},
{
"BriefDescription": "128 bit SIMD integer shift operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "SIMD_INT_128.PACKED_SHIFT",
"SampleAfterValue": "200000",
@@ -137,6 +156,7 @@
},
{
"BriefDescription": "128 bit SIMD integer shuffle/move operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "SIMD_INT_128.SHUFFLE_MOVE",
"SampleAfterValue": "200000",
@@ -144,6 +164,7 @@
},
{
"BriefDescription": "128 bit SIMD integer unpack operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "SIMD_INT_128.UNPACK",
"SampleAfterValue": "200000",
@@ -151,6 +172,7 @@
},
{
"BriefDescription": "SIMD integer 64 bit pack operations",
+ "Counter": "0,1,2,3",
"EventCode": "0xFD",
"EventName": "SIMD_INT_64.PACK",
"SampleAfterValue": "200000",
@@ -158,6 +180,7 @@
},
{
"BriefDescription": "SIMD integer 64 bit arithmetic operations",
+ "Counter": "0,1,2,3",
"EventCode": "0xFD",
"EventName": "SIMD_INT_64.PACKED_ARITH",
"SampleAfterValue": "200000",
@@ -165,6 +188,7 @@
},
{
"BriefDescription": "SIMD integer 64 bit logical operations",
+ "Counter": "0,1,2,3",
"EventCode": "0xFD",
"EventName": "SIMD_INT_64.PACKED_LOGICAL",
"SampleAfterValue": "200000",
@@ -172,6 +196,7 @@
},
{
"BriefDescription": "SIMD integer 64 bit packed multiply operations",
+ "Counter": "0,1,2,3",
"EventCode": "0xFD",
"EventName": "SIMD_INT_64.PACKED_MPY",
"SampleAfterValue": "200000",
@@ -179,6 +204,7 @@
},
{
"BriefDescription": "SIMD integer 64 bit shift operations",
+ "Counter": "0,1,2,3",
"EventCode": "0xFD",
"EventName": "SIMD_INT_64.PACKED_SHIFT",
"SampleAfterValue": "200000",
@@ -186,6 +212,7 @@
},
{
"BriefDescription": "SIMD integer 64 bit shuffle/move operations",
+ "Counter": "0,1,2,3",
"EventCode": "0xFD",
"EventName": "SIMD_INT_64.SHUFFLE_MOVE",
"SampleAfterValue": "200000",
@@ -193,6 +220,7 @@
},
{
"BriefDescription": "SIMD integer 64 bit unpack operations",
+ "Counter": "0,1,2,3",
"EventCode": "0xFD",
"EventName": "SIMD_INT_64.UNPACK",
"SampleAfterValue": "200000",
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-sp/frontend.json b/tools/perf/pmu-events/arch/x86/westmereep-sp/frontend.json
index f7f28510e3ae..c561ac24d91d 100644
--- a/tools/perf/pmu-events/arch/x86/westmereep-sp/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/westmereep-sp/frontend.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Instructions decoded",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "MACRO_INSTS.DECODED",
"SampleAfterValue": "2000000",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Macro-fused instructions decoded",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "MACRO_INSTS.FUSIONS_DECODED",
"SampleAfterValue": "2000000",
@@ -15,6 +17,7 @@
},
{
"BriefDescription": "Two Uop instructions decoded",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "TWO_UOP_INSTS_DECODED",
"SampleAfterValue": "2000000",
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-sp/memory.json b/tools/perf/pmu-events/arch/x86/westmereep-sp/memory.json
index b65c5294bcf1..37a69ffe8521 100644
--- a/tools/perf/pmu-events/arch/x86/westmereep-sp/memory.json
+++ b/tools/perf/pmu-events/arch/x86/westmereep-sp/memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Offcore data reads satisfied by any DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -10,6 +11,7 @@
},
{
"BriefDescription": "Offcore data reads that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -19,6 +21,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by the local DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -28,6 +31,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by a remote DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -37,6 +41,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by any DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -46,6 +51,7 @@
},
{
"BriefDescription": "Offcore code reads that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_LLC_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -55,6 +61,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by the local DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -64,6 +71,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by a remote DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -73,6 +81,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by any DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -82,6 +91,7 @@
},
{
"BriefDescription": "Offcore requests that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_LLC_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -91,6 +101,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by the local DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -100,6 +111,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by a remote DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -109,6 +121,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by any DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -118,6 +131,7 @@
},
{
"BriefDescription": "Offcore RFO requests that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -127,6 +141,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by the local DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -136,6 +151,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by a remote DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -145,6 +161,7 @@
},
{
"BriefDescription": "Offcore writebacks to any DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -154,6 +171,7 @@
},
{
"BriefDescription": "Offcore writebacks that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_LLC_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -163,6 +181,7 @@
},
{
"BriefDescription": "Offcore writebacks to the local DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -172,6 +191,7 @@
},
{
"BriefDescription": "Offcore writebacks to a remote DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -181,6 +201,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by any DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -190,6 +211,7 @@
},
{
"BriefDescription": "Offcore code or data read requests that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_LLC_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -199,6 +221,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by the local DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -208,6 +231,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by a remote DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -217,6 +241,7 @@
},
{
"BriefDescription": "Offcore request = all data, response = any DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -226,6 +251,7 @@
},
{
"BriefDescription": "Offcore request = all data, response = any LLC miss",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_LLC_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -235,6 +261,7 @@
},
{
"BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the local DRAM.",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -244,6 +271,7 @@
},
{
"BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the remote DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -253,6 +281,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by any DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -262,6 +291,7 @@
},
{
"BriefDescription": "Offcore demand data requests that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_LLC_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -271,6 +301,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by the local DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -280,6 +311,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by a remote DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -289,6 +321,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by any DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -298,6 +331,7 @@
},
{
"BriefDescription": "Offcore demand data reads that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_LLC_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -307,6 +341,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by the local DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -316,6 +351,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by a remote DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -325,6 +361,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by any DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -334,6 +371,7 @@
},
{
"BriefDescription": "Offcore demand code reads that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_LLC_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -343,6 +381,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by the local DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -352,6 +391,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by a remote DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -361,6 +401,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by any DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -370,6 +411,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_LLC_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -379,6 +421,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by the local DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -388,6 +431,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by a remote DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -397,6 +441,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by any DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.ANY_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -406,6 +451,7 @@
},
{
"BriefDescription": "Offcore other requests that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.ANY_LLC_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -415,6 +461,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by a remote DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -424,6 +471,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by any DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -433,6 +481,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_LLC_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -442,6 +491,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by the local DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -451,6 +501,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by a remote DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -460,6 +511,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by any DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -469,6 +521,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_LLC_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -478,6 +531,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by the local DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -487,6 +541,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by a remote DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -496,6 +551,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by any DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -505,6 +561,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_LLC_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -514,6 +571,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by the local DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -523,6 +581,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by a remote DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -532,6 +591,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by any DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -541,6 +601,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_LLC_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -550,6 +611,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by the local DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -559,6 +621,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by a remote DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -568,6 +631,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by any DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -577,6 +641,7 @@
},
{
"BriefDescription": "Offcore prefetch requests that missed the LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -586,6 +651,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by the local DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
@@ -595,6 +661,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by a remote DRAM",
+ "Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-sp/other.json b/tools/perf/pmu-events/arch/x86/westmereep-sp/other.json
index 488274980564..bcf5bcf637c0 100644
--- a/tools/perf/pmu-events/arch/x86/westmereep-sp/other.json
+++ b/tools/perf/pmu-events/arch/x86/westmereep-sp/other.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "ES segment renames",
+ "Counter": "0,1,2,3",
"EventCode": "0xD5",
"EventName": "ES_REG_RENAMES",
"SampleAfterValue": "2000000",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "I/O transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0x6C",
"EventName": "IO_TRANSACTIONS",
"SampleAfterValue": "2000000",
@@ -15,6 +17,7 @@
},
{
"BriefDescription": "L1I instruction fetch stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "L1I.CYCLES_STALLED",
"SampleAfterValue": "2000000",
@@ -22,6 +25,7 @@
},
{
"BriefDescription": "L1I instruction fetch hits",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "L1I.HITS",
"SampleAfterValue": "2000000",
@@ -29,6 +33,7 @@
},
{
"BriefDescription": "L1I instruction fetch misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "L1I.MISSES",
"SampleAfterValue": "2000000",
@@ -36,6 +41,7 @@
},
{
"BriefDescription": "L1I Instruction fetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "L1I.READS",
"SampleAfterValue": "2000000",
@@ -43,6 +49,7 @@
},
{
"BriefDescription": "Large ITLB hit",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "LARGE_ITLB.HIT",
"SampleAfterValue": "200000",
@@ -50,6 +57,7 @@
},
{
"BriefDescription": "Loads that partially overlap an earlier store",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "LOAD_BLOCK.OVERLAP_STORE",
"SampleAfterValue": "200000",
@@ -57,6 +65,7 @@
},
{
"BriefDescription": "All loads dispatched",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "LOAD_DISPATCH.ANY",
"SampleAfterValue": "2000000",
@@ -64,6 +73,7 @@
},
{
"BriefDescription": "Loads dispatched from the MOB",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "LOAD_DISPATCH.MOB",
"SampleAfterValue": "2000000",
@@ -71,6 +81,7 @@
},
{
"BriefDescription": "Loads dispatched that bypass the MOB",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "LOAD_DISPATCH.RS",
"SampleAfterValue": "2000000",
@@ -78,6 +89,7 @@
},
{
"BriefDescription": "Loads dispatched from stage 305",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "LOAD_DISPATCH.RS_DELAYED",
"SampleAfterValue": "2000000",
@@ -85,6 +97,7 @@
},
{
"BriefDescription": "False dependencies due to partial address aliasing",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "PARTIAL_ADDRESS_ALIAS",
"SampleAfterValue": "200000",
@@ -92,6 +105,7 @@
},
{
"BriefDescription": "All Store buffer stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "SB_DRAIN.ANY",
"SampleAfterValue": "200000",
@@ -99,6 +113,7 @@
},
{
"BriefDescription": "Segment rename stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "SEG_RENAME_STALLS",
"SampleAfterValue": "2000000",
@@ -106,6 +121,7 @@
},
{
"BriefDescription": "Snoop code requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "SNOOPQ_REQUESTS.CODE",
"SampleAfterValue": "100000",
@@ -113,6 +129,7 @@
},
{
"BriefDescription": "Snoop data requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "SNOOPQ_REQUESTS.DATA",
"SampleAfterValue": "100000",
@@ -120,6 +137,7 @@
},
{
"BriefDescription": "Snoop invalidate requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "SNOOPQ_REQUESTS.INVALIDATE",
"SampleAfterValue": "100000",
@@ -127,6 +145,7 @@
},
{
"BriefDescription": "Outstanding snoop code requests",
+ "Counter": "0",
"EventCode": "0xB3",
"EventName": "SNOOPQ_REQUESTS_OUTSTANDING.CODE",
"SampleAfterValue": "2000000",
@@ -134,6 +153,7 @@
},
{
"BriefDescription": "Cycles snoop code requests queued",
+ "Counter": "0",
"CounterMask": "1",
"EventCode": "0xB3",
"EventName": "SNOOPQ_REQUESTS_OUTSTANDING.CODE_NOT_EMPTY",
@@ -142,6 +162,7 @@
},
{
"BriefDescription": "Outstanding snoop data requests",
+ "Counter": "0",
"EventCode": "0xB3",
"EventName": "SNOOPQ_REQUESTS_OUTSTANDING.DATA",
"SampleAfterValue": "2000000",
@@ -149,6 +170,7 @@
},
{
"BriefDescription": "Cycles snoop data requests queued",
+ "Counter": "0",
"CounterMask": "1",
"EventCode": "0xB3",
"EventName": "SNOOPQ_REQUESTS_OUTSTANDING.DATA_NOT_EMPTY",
@@ -157,6 +179,7 @@
},
{
"BriefDescription": "Outstanding snoop invalidate requests",
+ "Counter": "0",
"EventCode": "0xB3",
"EventName": "SNOOPQ_REQUESTS_OUTSTANDING.INVALIDATE",
"SampleAfterValue": "2000000",
@@ -164,6 +187,7 @@
},
{
"BriefDescription": "Cycles snoop invalidate requests queued",
+ "Counter": "0",
"CounterMask": "1",
"EventCode": "0xB3",
"EventName": "SNOOPQ_REQUESTS_OUTSTANDING.INVALIDATE_NOT_EMPTY",
@@ -172,6 +196,7 @@
},
{
"BriefDescription": "Thread responded HIT to snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "SNOOP_RESPONSE.HIT",
"SampleAfterValue": "100000",
@@ -179,6 +204,7 @@
},
{
"BriefDescription": "Thread responded HITE to snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "SNOOP_RESPONSE.HITE",
"SampleAfterValue": "100000",
@@ -186,6 +212,7 @@
},
{
"BriefDescription": "Thread responded HITM to snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "SNOOP_RESPONSE.HITM",
"SampleAfterValue": "100000",
@@ -193,6 +220,7 @@
},
{
"BriefDescription": "Super Queue full stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xF6",
"EventName": "SQ_FULL_STALL_CYCLES",
"SampleAfterValue": "2000000",
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-sp/pipeline.json b/tools/perf/pmu-events/arch/x86/westmereep-sp/pipeline.json
index a29ed3522779..0267788d9dce 100644
--- a/tools/perf/pmu-events/arch/x86/westmereep-sp/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/westmereep-sp/pipeline.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Cycles the divider is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "ARITH.CYCLES_DIV_BUSY",
"SampleAfterValue": "2000000",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Divide Operations executed",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x14",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "Multiply operations executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "ARITH.MUL",
"SampleAfterValue": "2000000",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "BACLEAR asserted with bad target address",
+ "Counter": "0,1,2,3",
"EventCode": "0xE6",
"EventName": "BACLEAR.BAD_TARGET",
"SampleAfterValue": "2000000",
@@ -32,6 +36,7 @@
},
{
"BriefDescription": "BACLEAR asserted, regardless of cause",
+ "Counter": "0,1,2,3",
"EventCode": "0xE6",
"EventName": "BACLEAR.CLEAR",
"SampleAfterValue": "2000000",
@@ -39,6 +44,7 @@
},
{
"BriefDescription": "Instruction queue forced BACLEAR",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "BACLEAR_FORCE_IQ",
"SampleAfterValue": "2000000",
@@ -46,6 +52,7 @@
},
{
"BriefDescription": "Early Branch Prediciton Unit clears",
+ "Counter": "0,1,2,3",
"EventCode": "0xE8",
"EventName": "BPU_CLEARS.EARLY",
"SampleAfterValue": "2000000",
@@ -53,6 +60,7 @@
},
{
"BriefDescription": "Late Branch Prediction Unit clears",
+ "Counter": "0,1,2,3",
"EventCode": "0xE8",
"EventName": "BPU_CLEARS.LATE",
"SampleAfterValue": "2000000",
@@ -60,6 +68,7 @@
},
{
"BriefDescription": "Branch prediction unit missed call or return",
+ "Counter": "0,1,2,3",
"EventCode": "0xE5",
"EventName": "BPU_MISSED_CALL_RET",
"SampleAfterValue": "2000000",
@@ -67,6 +76,7 @@
},
{
"BriefDescription": "Branch instructions decoded",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "BR_INST_DECODED",
"SampleAfterValue": "2000000",
@@ -74,6 +84,7 @@
},
{
"BriefDescription": "Branch instructions executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ANY",
"SampleAfterValue": "200000",
@@ -81,6 +92,7 @@
},
{
"BriefDescription": "Conditional branch instructions executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.COND",
"SampleAfterValue": "200000",
@@ -88,6 +100,7 @@
},
{
"BriefDescription": "Unconditional branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.DIRECT",
"SampleAfterValue": "200000",
@@ -95,6 +108,7 @@
},
{
"BriefDescription": "Unconditional call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.DIRECT_NEAR_CALL",
"SampleAfterValue": "20000",
@@ -102,6 +116,7 @@
},
{
"BriefDescription": "Indirect call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.INDIRECT_NEAR_CALL",
"SampleAfterValue": "20000",
@@ -109,6 +124,7 @@
},
{
"BriefDescription": "Indirect non call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.INDIRECT_NON_CALL",
"SampleAfterValue": "20000",
@@ -116,6 +132,7 @@
},
{
"BriefDescription": "Call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.NEAR_CALLS",
"SampleAfterValue": "20000",
@@ -123,6 +140,7 @@
},
{
"BriefDescription": "All non call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.NON_CALLS",
"SampleAfterValue": "200000",
@@ -130,6 +148,7 @@
},
{
"BriefDescription": "Indirect return branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.RETURN_NEAR",
"SampleAfterValue": "20000",
@@ -137,6 +156,7 @@
},
{
"BriefDescription": "Taken branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN",
"SampleAfterValue": "200000",
@@ -144,6 +164,7 @@
},
{
"BriefDescription": "Retired branch instructions (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -152,6 +173,7 @@
},
{
"BriefDescription": "Retired conditional branch instructions (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
"PEBS": "1",
@@ -160,6 +182,7 @@
},
{
"BriefDescription": "Retired near call instructions (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
"PEBS": "1",
@@ -168,6 +191,7 @@
},
{
"BriefDescription": "Mispredicted branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.ANY",
"SampleAfterValue": "20000",
@@ -175,6 +199,7 @@
},
{
"BriefDescription": "Mispredicted conditional branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.COND",
"SampleAfterValue": "20000",
@@ -182,6 +207,7 @@
},
{
"BriefDescription": "Mispredicted unconditional branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.DIRECT",
"SampleAfterValue": "20000",
@@ -189,6 +215,7 @@
},
{
"BriefDescription": "Mispredicted non call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.DIRECT_NEAR_CALL",
"SampleAfterValue": "2000",
@@ -196,6 +223,7 @@
},
{
"BriefDescription": "Mispredicted indirect call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.INDIRECT_NEAR_CALL",
"SampleAfterValue": "2000",
@@ -203,6 +231,7 @@
},
{
"BriefDescription": "Mispredicted indirect non call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.INDIRECT_NON_CALL",
"SampleAfterValue": "2000",
@@ -210,6 +239,7 @@
},
{
"BriefDescription": "Mispredicted call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.NEAR_CALLS",
"SampleAfterValue": "2000",
@@ -217,6 +247,7 @@
},
{
"BriefDescription": "Mispredicted non call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.NON_CALLS",
"SampleAfterValue": "20000",
@@ -224,6 +255,7 @@
},
{
"BriefDescription": "Mispredicted return branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.RETURN_NEAR",
"SampleAfterValue": "2000",
@@ -231,6 +263,7 @@
},
{
"BriefDescription": "Mispredicted taken branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN",
"SampleAfterValue": "20000",
@@ -238,6 +271,7 @@
},
{
"BriefDescription": "Mispredicted retired branch instructions (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -246,6 +280,7 @@
},
{
"BriefDescription": "Mispredicted conditional retired branches (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.CONDITIONAL",
"PEBS": "1",
@@ -254,6 +289,7 @@
},
{
"BriefDescription": "Mispredicted near retired calls (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.NEAR_CALL",
"PEBS": "1",
@@ -262,11 +298,13 @@
},
{
"BriefDescription": "Reference cycles when thread is not halted (fixed counter)",
+ "Counter": "Fixed counter 3",
"EventName": "CPU_CLK_UNHALTED.REF",
"SampleAfterValue": "2000000"
},
{
"BriefDescription": "Reference base clock (133 Mhz) cycles when thread is not halted (programmable counter)",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.REF_P",
"SampleAfterValue": "100000",
@@ -274,17 +312,20 @@
},
{
"BriefDescription": "Cycles when thread is not halted (fixed counter)",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"SampleAfterValue": "2000000"
},
{
"BriefDescription": "Cycles when thread is not halted (programmable counter)",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
"SampleAfterValue": "2000000"
},
{
"BriefDescription": "Total CPU cycles",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.TOTAL_CYCLES",
@@ -293,6 +334,7 @@
},
{
"BriefDescription": "Any Instruction Length Decoder stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.ANY",
"SampleAfterValue": "2000000",
@@ -300,6 +342,7 @@
},
{
"BriefDescription": "Instruction Queue full stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.IQ_FULL",
"SampleAfterValue": "2000000",
@@ -307,6 +350,7 @@
},
{
"BriefDescription": "Length Change Prefix stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.LCP",
"SampleAfterValue": "2000000",
@@ -314,6 +358,7 @@
},
{
"BriefDescription": "Stall cycles due to BPU MRU bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.MRU",
"SampleAfterValue": "2000000",
@@ -321,6 +366,7 @@
},
{
"BriefDescription": "Regen stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.REGEN",
"SampleAfterValue": "2000000",
@@ -328,6 +374,7 @@
},
{
"BriefDescription": "Instructions that must be decoded by decoder 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "INST_DECODED.DEC0",
"SampleAfterValue": "2000000",
@@ -335,6 +382,7 @@
},
{
"BriefDescription": "Instructions written to instruction queue.",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "INST_QUEUE_WRITES",
"SampleAfterValue": "2000000",
@@ -342,6 +390,7 @@
},
{
"BriefDescription": "Cycles instructions are written to the instruction queue",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "INST_QUEUE_WRITE_CYCLES",
"SampleAfterValue": "2000000",
@@ -349,11 +398,13 @@
},
{
"BriefDescription": "Instructions retired (fixed counter)",
+ "Counter": "Fixed counter 1",
"EventName": "INST_RETIRED.ANY",
"SampleAfterValue": "2000000"
},
{
"BriefDescription": "Instructions retired (Programmable counter and Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.ANY_P",
"PEBS": "1",
@@ -362,6 +413,7 @@
},
{
"BriefDescription": "Retired MMX instructions (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.MMX",
"PEBS": "1",
@@ -370,6 +422,7 @@
},
{
"BriefDescription": "Total cycles (Precise Event)",
+ "Counter": "0,1,2,3",
"CounterMask": "16",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.TOTAL_CYCLES",
@@ -380,6 +433,7 @@
},
{
"BriefDescription": "Total cycles (Precise Event)",
+ "Counter": "0,1,2,3",
"CounterMask": "16",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.TOTAL_CYCLES_PS",
@@ -390,6 +444,7 @@
},
{
"BriefDescription": "Retired floating-point operations (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.X87",
"PEBS": "1",
@@ -398,6 +453,7 @@
},
{
"BriefDescription": "Load operations conflicting with software prefetches",
+ "Counter": "0,1",
"EventCode": "0x4C",
"EventName": "LOAD_HIT_PRE",
"SampleAfterValue": "200000",
@@ -405,6 +461,7 @@
},
{
"BriefDescription": "Cycles when uops were delivered by the LSD",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA8",
"EventName": "LSD.ACTIVE",
@@ -413,6 +470,7 @@
},
{
"BriefDescription": "Cycles no uops were delivered by the LSD",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA8",
"EventName": "LSD.INACTIVE",
@@ -422,6 +480,7 @@
},
{
"BriefDescription": "Loops that can't stream from the instruction queue",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "LSD_OVERFLOW",
"SampleAfterValue": "2000000",
@@ -429,6 +488,7 @@
},
{
"BriefDescription": "Cycles machine clear asserted",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.CYCLES",
"SampleAfterValue": "20000",
@@ -436,6 +496,7 @@
},
{
"BriefDescription": "Execution pipeline restart due to Memory ordering conflicts",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.MEM_ORDER",
"SampleAfterValue": "20000",
@@ -443,6 +504,7 @@
},
{
"BriefDescription": "Self-Modifying Code detected",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.SMC",
"SampleAfterValue": "20000",
@@ -450,6 +512,7 @@
},
{
"BriefDescription": "All RAT stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "RAT_STALLS.ANY",
"SampleAfterValue": "2000000",
@@ -457,6 +520,7 @@
},
{
"BriefDescription": "Flag stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "RAT_STALLS.FLAGS",
"SampleAfterValue": "2000000",
@@ -464,6 +528,7 @@
},
{
"BriefDescription": "Partial register stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "RAT_STALLS.REGISTERS",
"SampleAfterValue": "2000000",
@@ -471,6 +536,7 @@
},
{
"BriefDescription": "ROB read port stalls cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "RAT_STALLS.ROB_READ_PORT",
"SampleAfterValue": "2000000",
@@ -478,6 +544,7 @@
},
{
"BriefDescription": "Scoreboard stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "RAT_STALLS.SCOREBOARD",
"SampleAfterValue": "2000000",
@@ -485,6 +552,7 @@
},
{
"BriefDescription": "Resource related stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.ANY",
"SampleAfterValue": "2000000",
@@ -492,6 +560,7 @@
},
{
"BriefDescription": "FPU control word write stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.FPCW",
"SampleAfterValue": "2000000",
@@ -499,6 +568,7 @@
},
{
"BriefDescription": "Load buffer stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.LOAD",
"SampleAfterValue": "2000000",
@@ -506,6 +576,7 @@
},
{
"BriefDescription": "MXCSR rename stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.MXCSR",
"SampleAfterValue": "2000000",
@@ -513,6 +584,7 @@
},
{
"BriefDescription": "Other Resource related stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.OTHER",
"SampleAfterValue": "2000000",
@@ -520,6 +592,7 @@
},
{
"BriefDescription": "ROB full stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.ROB_FULL",
"SampleAfterValue": "2000000",
@@ -527,6 +600,7 @@
},
{
"BriefDescription": "Reservation Station full stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.RS_FULL",
"SampleAfterValue": "2000000",
@@ -534,6 +608,7 @@
},
{
"BriefDescription": "Store buffer stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.STORE",
"SampleAfterValue": "2000000",
@@ -541,6 +616,7 @@
},
{
"BriefDescription": "SIMD Packed-Double Uops retired (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "SSEX_UOPS_RETIRED.PACKED_DOUBLE",
"PEBS": "1",
@@ -549,6 +625,7 @@
},
{
"BriefDescription": "SIMD Packed-Single Uops retired (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "SSEX_UOPS_RETIRED.PACKED_SINGLE",
"PEBS": "1",
@@ -557,6 +634,7 @@
},
{
"BriefDescription": "SIMD Scalar-Double Uops retired (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "SSEX_UOPS_RETIRED.SCALAR_DOUBLE",
"PEBS": "1",
@@ -565,6 +643,7 @@
},
{
"BriefDescription": "SIMD Scalar-Single Uops retired (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "SSEX_UOPS_RETIRED.SCALAR_SINGLE",
"PEBS": "1",
@@ -573,6 +652,7 @@
},
{
"BriefDescription": "SIMD Vector Integer Uops retired (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "SSEX_UOPS_RETIRED.VECTOR_INTEGER",
"PEBS": "1",
@@ -581,6 +661,7 @@
},
{
"BriefDescription": "Stack pointer instructions decoded",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "UOPS_DECODED.ESP_FOLDING",
"SampleAfterValue": "2000000",
@@ -588,6 +669,7 @@
},
{
"BriefDescription": "Stack pointer sync operations",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "UOPS_DECODED.ESP_SYNC",
"SampleAfterValue": "2000000",
@@ -595,6 +677,7 @@
},
{
"BriefDescription": "Uops decoded by Microcode Sequencer",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xD1",
"EventName": "UOPS_DECODED.MS_CYCLES_ACTIVE",
@@ -603,6 +686,7 @@
},
{
"BriefDescription": "Cycles no Uops are decoded",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xD1",
"EventName": "UOPS_DECODED.STALL_CYCLES",
@@ -613,6 +697,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles Uops executed on any port (core count)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_ACTIVE_CYCLES",
@@ -622,6 +707,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles Uops executed on ports 0-4 (core count)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_ACTIVE_CYCLES_NO_PORT5",
@@ -631,6 +717,7 @@
{
"AnyThread": "1",
"BriefDescription": "Uops executed on any port (core count)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xB1",
@@ -642,6 +729,7 @@
{
"AnyThread": "1",
"BriefDescription": "Uops executed on ports 0-4 (core count)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xB1",
@@ -653,6 +741,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles no Uops issued on any port (core count)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_STALL_CYCLES",
@@ -663,6 +752,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles no Uops issued on ports 0-4 (core count)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_STALL_CYCLES_NO_PORT5",
@@ -672,6 +762,7 @@
},
{
"BriefDescription": "Uops executed on port 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT0",
"SampleAfterValue": "2000000",
@@ -679,6 +770,7 @@
},
{
"BriefDescription": "Uops issued on ports 0, 1 or 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT015",
"SampleAfterValue": "2000000",
@@ -686,6 +778,7 @@
},
{
"BriefDescription": "Cycles no Uops issued on ports 0, 1 or 5",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT015_STALL_CYCLES",
@@ -695,6 +788,7 @@
},
{
"BriefDescription": "Uops executed on port 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT1",
"SampleAfterValue": "2000000",
@@ -703,6 +797,7 @@
{
"AnyThread": "1",
"BriefDescription": "Uops issued on ports 2, 3 or 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT234_CORE",
"SampleAfterValue": "2000000",
@@ -711,6 +806,7 @@
{
"AnyThread": "1",
"BriefDescription": "Uops executed on port 2 (core count)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT2_CORE",
"SampleAfterValue": "2000000",
@@ -719,6 +815,7 @@
{
"AnyThread": "1",
"BriefDescription": "Uops executed on port 3 (core count)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT3_CORE",
"SampleAfterValue": "2000000",
@@ -727,6 +824,7 @@
{
"AnyThread": "1",
"BriefDescription": "Uops executed on port 4 (core count)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT4_CORE",
"SampleAfterValue": "2000000",
@@ -734,6 +832,7 @@
},
{
"BriefDescription": "Uops executed on port 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT5",
"SampleAfterValue": "2000000",
@@ -741,6 +840,7 @@
},
{
"BriefDescription": "Uops issued",
+ "Counter": "0,1,2,3",
"EventCode": "0xE",
"EventName": "UOPS_ISSUED.ANY",
"SampleAfterValue": "2000000",
@@ -749,6 +849,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles no Uops were issued on any thread",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xE",
"EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
@@ -759,6 +860,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles Uops were issued on either thread",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xE",
"EventName": "UOPS_ISSUED.CYCLES_ALL_THREADS",
@@ -767,6 +869,7 @@
},
{
"BriefDescription": "Fused Uops issued",
+ "Counter": "0,1,2,3",
"EventCode": "0xE",
"EventName": "UOPS_ISSUED.FUSED",
"SampleAfterValue": "2000000",
@@ -774,6 +877,7 @@
},
{
"BriefDescription": "Cycles no Uops were issued",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xE",
"EventName": "UOPS_ISSUED.STALL_CYCLES",
@@ -783,6 +887,7 @@
},
{
"BriefDescription": "Cycles Uops are being retired",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.ACTIVE_CYCLES",
@@ -792,6 +897,7 @@
},
{
"BriefDescription": "Uops retired (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.ANY",
"PEBS": "1",
@@ -800,6 +906,7 @@
},
{
"BriefDescription": "Macro-fused Uops retired (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.MACRO_FUSED",
"PEBS": "1",
@@ -808,6 +915,7 @@
},
{
"BriefDescription": "Retirement slots used (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.RETIRE_SLOTS",
"PEBS": "1",
@@ -816,6 +924,7 @@
},
{
"BriefDescription": "Cycles Uops are not retiring (Precise Event)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.STALL_CYCLES",
@@ -826,6 +935,7 @@
},
{
"BriefDescription": "Total cycles using precise uop retired event (Precise Event)",
+ "Counter": "0,1,2,3",
"CounterMask": "16",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.TOTAL_CYCLES",
@@ -836,6 +946,7 @@
},
{
"BriefDescription": "Uop unfusions due to FP exceptions",
+ "Counter": "0,1,2,3",
"EventCode": "0xDB",
"EventName": "UOP_UNFUSION",
"SampleAfterValue": "2000000",
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-sp/virtual-memory.json b/tools/perf/pmu-events/arch/x86/westmereep-sp/virtual-memory.json
index 80efcfd48239..e7affdf7f41b 100644
--- a/tools/perf/pmu-events/arch/x86/westmereep-sp/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/westmereep-sp/virtual-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "DTLB load misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "DTLB_LOAD_MISSES.ANY",
"SampleAfterValue": "200000",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "DTLB load miss caused by low part of address",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "DTLB_LOAD_MISSES.PDE_MISS",
"SampleAfterValue": "200000",
@@ -15,6 +17,7 @@
},
{
"BriefDescription": "DTLB second level hit",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"SampleAfterValue": "2000000",
@@ -22,6 +25,7 @@
},
{
"BriefDescription": "DTLB load miss page walks complete",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"SampleAfterValue": "200000",
@@ -29,6 +33,7 @@
},
{
"BriefDescription": "DTLB load miss page walk cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "DTLB_LOAD_MISSES.WALK_CYCLES",
"SampleAfterValue": "200000",
@@ -36,6 +41,7 @@
},
{
"BriefDescription": "DTLB misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_MISSES.ANY",
"SampleAfterValue": "200000",
@@ -43,6 +49,7 @@
},
{
"BriefDescription": "DTLB miss large page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_MISSES.LARGE_WALK_COMPLETED",
"SampleAfterValue": "200000",
@@ -50,6 +57,7 @@
},
{
"BriefDescription": "DTLB first level misses but second level hit",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_MISSES.STLB_HIT",
"SampleAfterValue": "200000",
@@ -57,6 +65,7 @@
},
{
"BriefDescription": "DTLB miss page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_MISSES.WALK_COMPLETED",
"SampleAfterValue": "200000",
@@ -64,6 +73,7 @@
},
{
"BriefDescription": "DTLB miss page walk cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_MISSES.WALK_CYCLES",
"SampleAfterValue": "2000000",
@@ -71,6 +81,7 @@
},
{
"BriefDescription": "Extended Page Table walk cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x4F",
"EventName": "EPT.WALK_CYCLES",
"SampleAfterValue": "2000000",
@@ -78,6 +89,7 @@
},
{
"BriefDescription": "ITLB flushes",
+ "Counter": "0,1,2,3",
"EventCode": "0xAE",
"EventName": "ITLB_FLUSH",
"SampleAfterValue": "2000000",
@@ -85,6 +97,7 @@
},
{
"BriefDescription": "ITLB miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.ANY",
"SampleAfterValue": "200000",
@@ -92,6 +105,7 @@
},
{
"BriefDescription": "ITLB miss page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
"SampleAfterValue": "200000",
@@ -99,6 +113,7 @@
},
{
"BriefDescription": "ITLB miss page walk cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_CYCLES",
"SampleAfterValue": "2000000",
@@ -106,6 +121,7 @@
},
{
"BriefDescription": "Retired instructions that missed the ITLB (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC8",
"EventName": "ITLB_MISS_RETIRED",
"PEBS": "1",
@@ -114,6 +130,7 @@
},
{
"BriefDescription": "Retired loads that miss the DTLB (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "MEM_LOAD_RETIRED.DTLB_MISS",
"PEBS": "1",
@@ -122,6 +139,7 @@
},
{
"BriefDescription": "Retired stores that miss the DTLB (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC",
"EventName": "MEM_STORE_RETIRED.DTLB_MISS",
"PEBS": "1",
diff --git a/tools/perf/pmu-events/arch/x86/westmereex/cache.json b/tools/perf/pmu-events/arch/x86/westmereex/cache.json
index 18d61d43e4c9..9f922370ee8b 100644
--- a/tools/perf/pmu-events/arch/x86/westmereex/cache.json
+++ b/tools/perf/pmu-events/arch/x86/westmereex/cache.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Cycles L1D locked",
+ "Counter": "0,1",
"EventCode": "0x63",
"EventName": "CACHE_LOCK_CYCLES.L1D",
"SampleAfterValue": "2000000",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Cycles L1D and L2 locked",
+ "Counter": "0,1",
"EventCode": "0x63",
"EventName": "CACHE_LOCK_CYCLES.L1D_L2",
"SampleAfterValue": "2000000",
@@ -15,6 +17,7 @@
},
{
"BriefDescription": "L1D cache lines replaced in M state",
+ "Counter": "0,1",
"EventCode": "0x51",
"EventName": "L1D.M_EVICT",
"SampleAfterValue": "2000000",
@@ -22,6 +25,7 @@
},
{
"BriefDescription": "L1D cache lines allocated in the M state",
+ "Counter": "0,1",
"EventCode": "0x51",
"EventName": "L1D.M_REPL",
"SampleAfterValue": "2000000",
@@ -29,6 +33,7 @@
},
{
"BriefDescription": "L1D snoop eviction of cache lines in M state",
+ "Counter": "0,1",
"EventCode": "0x51",
"EventName": "L1D.M_SNOOP_EVICT",
"SampleAfterValue": "2000000",
@@ -36,6 +41,7 @@
},
{
"BriefDescription": "L1 data cache lines allocated",
+ "Counter": "0,1",
"EventCode": "0x51",
"EventName": "L1D.REPL",
"SampleAfterValue": "2000000",
@@ -43,6 +49,7 @@
},
{
"BriefDescription": "L1D prefetch load lock accepted in fill buffer",
+ "Counter": "0,1",
"EventCode": "0x52",
"EventName": "L1D_CACHE_PREFETCH_LOCK_FB_HIT",
"SampleAfterValue": "2000000",
@@ -50,6 +57,7 @@
},
{
"BriefDescription": "L1D hardware prefetch misses",
+ "Counter": "0,1",
"EventCode": "0x4E",
"EventName": "L1D_PREFETCH.MISS",
"SampleAfterValue": "200000",
@@ -57,6 +65,7 @@
},
{
"BriefDescription": "L1D hardware prefetch requests",
+ "Counter": "0,1",
"EventCode": "0x4E",
"EventName": "L1D_PREFETCH.REQUESTS",
"SampleAfterValue": "200000",
@@ -64,6 +73,7 @@
},
{
"BriefDescription": "L1D hardware prefetch requests triggered",
+ "Counter": "0,1",
"EventCode": "0x4E",
"EventName": "L1D_PREFETCH.TRIGGERS",
"SampleAfterValue": "200000",
@@ -71,6 +81,7 @@
},
{
"BriefDescription": "L1 writebacks to L2 in E state",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L1D_WB_L2.E_STATE",
"SampleAfterValue": "100000",
@@ -78,6 +89,7 @@
},
{
"BriefDescription": "L1 writebacks to L2 in I state (misses)",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L1D_WB_L2.I_STATE",
"SampleAfterValue": "100000",
@@ -85,6 +97,7 @@
},
{
"BriefDescription": "All L1 writebacks to L2",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L1D_WB_L2.MESI",
"SampleAfterValue": "100000",
@@ -92,6 +105,7 @@
},
{
"BriefDescription": "L1 writebacks to L2 in M state",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L1D_WB_L2.M_STATE",
"SampleAfterValue": "100000",
@@ -99,6 +113,7 @@
},
{
"BriefDescription": "L1 writebacks to L2 in S state",
+ "Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "L1D_WB_L2.S_STATE",
"SampleAfterValue": "100000",
@@ -106,6 +121,7 @@
},
{
"BriefDescription": "All L2 data requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.ANY",
"SampleAfterValue": "200000",
@@ -113,6 +129,7 @@
},
{
"BriefDescription": "L2 data demand loads in E state",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.DEMAND.E_STATE",
"SampleAfterValue": "200000",
@@ -120,6 +137,7 @@
},
{
"BriefDescription": "L2 data demand loads in I state (misses)",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.DEMAND.I_STATE",
"SampleAfterValue": "200000",
@@ -127,6 +145,7 @@
},
{
"BriefDescription": "L2 data demand requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.DEMAND.MESI",
"SampleAfterValue": "200000",
@@ -134,6 +153,7 @@
},
{
"BriefDescription": "L2 data demand loads in M state",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.DEMAND.M_STATE",
"SampleAfterValue": "200000",
@@ -141,6 +161,7 @@
},
{
"BriefDescription": "L2 data demand loads in S state",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.DEMAND.S_STATE",
"SampleAfterValue": "200000",
@@ -148,6 +169,7 @@
},
{
"BriefDescription": "L2 data prefetches in E state",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.PREFETCH.E_STATE",
"SampleAfterValue": "200000",
@@ -155,6 +177,7 @@
},
{
"BriefDescription": "L2 data prefetches in the I state (misses)",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.PREFETCH.I_STATE",
"SampleAfterValue": "200000",
@@ -162,6 +185,7 @@
},
{
"BriefDescription": "All L2 data prefetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.PREFETCH.MESI",
"SampleAfterValue": "200000",
@@ -169,6 +193,7 @@
},
{
"BriefDescription": "L2 data prefetches in M state",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.PREFETCH.M_STATE",
"SampleAfterValue": "200000",
@@ -176,6 +201,7 @@
},
{
"BriefDescription": "L2 data prefetches in the S state",
+ "Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.PREFETCH.S_STATE",
"SampleAfterValue": "200000",
@@ -183,6 +209,7 @@
},
{
"BriefDescription": "L2 lines allocated",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.ANY",
"SampleAfterValue": "100000",
@@ -190,6 +217,7 @@
},
{
"BriefDescription": "L2 lines allocated in the E state",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.E_STATE",
"SampleAfterValue": "100000",
@@ -197,6 +225,7 @@
},
{
"BriefDescription": "L2 lines allocated in the S state",
+ "Counter": "0,1,2,3",
"EventCode": "0xF1",
"EventName": "L2_LINES_IN.S_STATE",
"SampleAfterValue": "100000",
@@ -204,6 +233,7 @@
},
{
"BriefDescription": "L2 lines evicted",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.ANY",
"SampleAfterValue": "100000",
@@ -211,6 +241,7 @@
},
{
"BriefDescription": "L2 lines evicted by a demand request",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.DEMAND_CLEAN",
"SampleAfterValue": "100000",
@@ -218,6 +249,7 @@
},
{
"BriefDescription": "L2 modified lines evicted by a demand request",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.DEMAND_DIRTY",
"SampleAfterValue": "100000",
@@ -225,6 +257,7 @@
},
{
"BriefDescription": "L2 lines evicted by a prefetch request",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.PREFETCH_CLEAN",
"SampleAfterValue": "100000",
@@ -232,6 +265,7 @@
},
{
"BriefDescription": "L2 modified lines evicted by a prefetch request",
+ "Counter": "0,1,2,3",
"EventCode": "0xF2",
"EventName": "L2_LINES_OUT.PREFETCH_DIRTY",
"SampleAfterValue": "100000",
@@ -239,6 +273,7 @@
},
{
"BriefDescription": "L2 instruction fetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.IFETCHES",
"SampleAfterValue": "200000",
@@ -246,6 +281,7 @@
},
{
"BriefDescription": "L2 instruction fetch hits",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.IFETCH_HIT",
"SampleAfterValue": "200000",
@@ -253,6 +289,7 @@
},
{
"BriefDescription": "L2 instruction fetch misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.IFETCH_MISS",
"SampleAfterValue": "200000",
@@ -260,6 +297,7 @@
},
{
"BriefDescription": "L2 load hits",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.LD_HIT",
"SampleAfterValue": "200000",
@@ -267,6 +305,7 @@
},
{
"BriefDescription": "L2 load misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.LD_MISS",
"SampleAfterValue": "200000",
@@ -274,6 +313,7 @@
},
{
"BriefDescription": "L2 requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.LOADS",
"SampleAfterValue": "200000",
@@ -281,6 +321,7 @@
},
{
"BriefDescription": "All L2 misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.MISS",
"SampleAfterValue": "200000",
@@ -288,6 +329,7 @@
},
{
"BriefDescription": "All L2 prefetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.PREFETCHES",
"SampleAfterValue": "200000",
@@ -295,6 +337,7 @@
},
{
"BriefDescription": "L2 prefetch hits",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.PREFETCH_HIT",
"SampleAfterValue": "200000",
@@ -302,6 +345,7 @@
},
{
"BriefDescription": "L2 prefetch misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.PREFETCH_MISS",
"SampleAfterValue": "200000",
@@ -309,6 +353,7 @@
},
{
"BriefDescription": "All L2 requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.REFERENCES",
"SampleAfterValue": "200000",
@@ -316,6 +361,7 @@
},
{
"BriefDescription": "L2 RFO requests",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFOS",
"SampleAfterValue": "200000",
@@ -323,6 +369,7 @@
},
{
"BriefDescription": "L2 RFO hits",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_HIT",
"SampleAfterValue": "200000",
@@ -330,6 +377,7 @@
},
{
"BriefDescription": "L2 RFO misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_MISS",
"SampleAfterValue": "200000",
@@ -337,6 +385,7 @@
},
{
"BriefDescription": "All L2 transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.ANY",
"SampleAfterValue": "200000",
@@ -344,6 +393,7 @@
},
{
"BriefDescription": "L2 fill transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.FILL",
"SampleAfterValue": "200000",
@@ -351,6 +401,7 @@
},
{
"BriefDescription": "L2 instruction fetch transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.IFETCH",
"SampleAfterValue": "200000",
@@ -358,6 +409,7 @@
},
{
"BriefDescription": "L1D writeback to L2 transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.L1D_WB",
"SampleAfterValue": "200000",
@@ -365,6 +417,7 @@
},
{
"BriefDescription": "L2 Load transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.LOAD",
"SampleAfterValue": "200000",
@@ -372,6 +425,7 @@
},
{
"BriefDescription": "L2 prefetch transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.PREFETCH",
"SampleAfterValue": "200000",
@@ -379,6 +433,7 @@
},
{
"BriefDescription": "L2 RFO transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.RFO",
"SampleAfterValue": "200000",
@@ -386,6 +441,7 @@
},
{
"BriefDescription": "L2 writeback to LLC transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.WB",
"SampleAfterValue": "200000",
@@ -393,6 +449,7 @@
},
{
"BriefDescription": "L2 demand lock RFOs in E state",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.LOCK.E_STATE",
"SampleAfterValue": "100000",
@@ -400,6 +457,7 @@
},
{
"BriefDescription": "All demand L2 lock RFOs that hit the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.LOCK.HIT",
"SampleAfterValue": "100000",
@@ -407,6 +465,7 @@
},
{
"BriefDescription": "L2 demand lock RFOs in I state (misses)",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.LOCK.I_STATE",
"SampleAfterValue": "100000",
@@ -414,6 +473,7 @@
},
{
"BriefDescription": "All demand L2 lock RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.LOCK.MESI",
"SampleAfterValue": "100000",
@@ -421,6 +481,7 @@
},
{
"BriefDescription": "L2 demand lock RFOs in M state",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.LOCK.M_STATE",
"SampleAfterValue": "100000",
@@ -428,6 +489,7 @@
},
{
"BriefDescription": "L2 demand lock RFOs in S state",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.LOCK.S_STATE",
"SampleAfterValue": "100000",
@@ -435,6 +497,7 @@
},
{
"BriefDescription": "All L2 demand store RFOs that hit the cache",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.RFO.HIT",
"SampleAfterValue": "100000",
@@ -442,6 +505,7 @@
},
{
"BriefDescription": "L2 demand store RFOs in I state (misses)",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.RFO.I_STATE",
"SampleAfterValue": "100000",
@@ -449,6 +513,7 @@
},
{
"BriefDescription": "All L2 demand store RFOs",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.RFO.MESI",
"SampleAfterValue": "100000",
@@ -456,6 +521,7 @@
},
{
"BriefDescription": "L2 demand store RFOs in M state",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.RFO.M_STATE",
"SampleAfterValue": "100000",
@@ -463,6 +529,7 @@
},
{
"BriefDescription": "L2 demand store RFOs in S state",
+ "Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "L2_WRITE.RFO.S_STATE",
"SampleAfterValue": "100000",
@@ -470,6 +537,7 @@
},
{
"BriefDescription": "Longest latency cache miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.MISS",
"SampleAfterValue": "100000",
@@ -477,6 +545,7 @@
},
{
"BriefDescription": "Longest latency cache reference",
+ "Counter": "0,1,2,3",
"EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
"SampleAfterValue": "200000",
@@ -484,6 +553,7 @@
},
{
"BriefDescription": "Memory instructions retired above 0 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_0",
"MSRIndex": "0x3F6",
@@ -493,6 +563,7 @@
},
{
"BriefDescription": "Memory instructions retired above 1024 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_1024",
"MSRIndex": "0x3F6",
@@ -503,6 +574,7 @@
},
{
"BriefDescription": "Memory instructions retired above 128 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_128",
"MSRIndex": "0x3F6",
@@ -513,6 +585,7 @@
},
{
"BriefDescription": "Memory instructions retired above 16 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_16",
"MSRIndex": "0x3F6",
@@ -523,6 +596,7 @@
},
{
"BriefDescription": "Memory instructions retired above 16384 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_16384",
"MSRIndex": "0x3F6",
@@ -533,6 +607,7 @@
},
{
"BriefDescription": "Memory instructions retired above 2048 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_2048",
"MSRIndex": "0x3F6",
@@ -543,6 +618,7 @@
},
{
"BriefDescription": "Memory instructions retired above 256 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_256",
"MSRIndex": "0x3F6",
@@ -553,6 +629,7 @@
},
{
"BriefDescription": "Memory instructions retired above 32 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_32",
"MSRIndex": "0x3F6",
@@ -563,6 +640,7 @@
},
{
"BriefDescription": "Memory instructions retired above 32768 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_32768",
"MSRIndex": "0x3F6",
@@ -573,6 +651,7 @@
},
{
"BriefDescription": "Memory instructions retired above 4 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_4",
"MSRIndex": "0x3F6",
@@ -583,6 +662,7 @@
},
{
"BriefDescription": "Memory instructions retired above 4096 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_4096",
"MSRIndex": "0x3F6",
@@ -593,6 +673,7 @@
},
{
"BriefDescription": "Memory instructions retired above 512 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_512",
"MSRIndex": "0x3F6",
@@ -603,6 +684,7 @@
},
{
"BriefDescription": "Memory instructions retired above 64 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_64",
"MSRIndex": "0x3F6",
@@ -613,6 +695,7 @@
},
{
"BriefDescription": "Memory instructions retired above 8 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_8",
"MSRIndex": "0x3F6",
@@ -623,6 +706,7 @@
},
{
"BriefDescription": "Memory instructions retired above 8192 clocks (Precise Event)",
+ "Counter": "3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_8192",
"MSRIndex": "0x3F6",
@@ -633,6 +717,7 @@
},
{
"BriefDescription": "Instructions retired which contains a load (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LOADS",
"PEBS": "1",
@@ -641,6 +726,7 @@
},
{
"BriefDescription": "Instructions retired which contains a store (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.STORES",
"PEBS": "1",
@@ -649,6 +735,7 @@
},
{
"BriefDescription": "Retired loads that miss L1D and hit an previously allocated LFB (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "MEM_LOAD_RETIRED.HIT_LFB",
"PEBS": "1",
@@ -657,6 +744,7 @@
},
{
"BriefDescription": "Retired loads that hit the L1 data cache (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "MEM_LOAD_RETIRED.L1D_HIT",
"PEBS": "1",
@@ -665,6 +753,7 @@
},
{
"BriefDescription": "Retired loads that hit the L2 cache (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "MEM_LOAD_RETIRED.L2_HIT",
"PEBS": "1",
@@ -673,6 +762,7 @@
},
{
"BriefDescription": "Retired loads that miss the LLC cache (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "MEM_LOAD_RETIRED.LLC_MISS",
"PEBS": "1",
@@ -681,6 +771,7 @@
},
{
"BriefDescription": "Retired loads that hit valid versions in the LLC cache (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "MEM_LOAD_RETIRED.LLC_UNSHARED_HIT",
"PEBS": "1",
@@ -689,6 +780,7 @@
},
{
"BriefDescription": "Retired loads that hit sibling core's L2 in modified or unmodified states (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "MEM_LOAD_RETIRED.OTHER_CORE_L2_HIT_HITM",
"PEBS": "1",
@@ -697,6 +789,7 @@
},
{
"BriefDescription": "Load instructions retired local dram and remote cache HIT data sources (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xF",
"EventName": "MEM_UNCORE_RETIRED.LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
"PEBS": "1",
@@ -705,6 +798,7 @@
},
{
"BriefDescription": "Load instructions retired that HIT modified data in sibling core (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xF",
"EventName": "MEM_UNCORE_RETIRED.LOCAL_HITM",
"PEBS": "1",
@@ -713,6 +807,7 @@
},
{
"BriefDescription": "Load instructions retired remote DRAM and remote home-remote cache HITM (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xF",
"EventName": "MEM_UNCORE_RETIRED.REMOTE_DRAM",
"PEBS": "1",
@@ -721,6 +816,7 @@
},
{
"BriefDescription": "Retired loads that hit remote socket in modified state (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xF",
"EventName": "MEM_UNCORE_RETIRED.REMOTE_HITM",
"PEBS": "1",
@@ -729,6 +825,7 @@
},
{
"BriefDescription": "Load instructions retired IO (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xF",
"EventName": "MEM_UNCORE_RETIRED.UNCACHEABLE",
"PEBS": "1",
@@ -737,6 +834,7 @@
},
{
"BriefDescription": "All offcore requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.ANY",
"SampleAfterValue": "100000",
@@ -744,6 +842,7 @@
},
{
"BriefDescription": "Offcore read requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.ANY.READ",
"SampleAfterValue": "100000",
@@ -751,6 +850,7 @@
},
{
"BriefDescription": "Offcore RFO requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.ANY.RFO",
"SampleAfterValue": "100000",
@@ -758,6 +858,7 @@
},
{
"BriefDescription": "Offcore demand code read requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND.READ_CODE",
"SampleAfterValue": "100000",
@@ -765,6 +866,7 @@
},
{
"BriefDescription": "Offcore demand data read requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND.READ_DATA",
"SampleAfterValue": "100000",
@@ -772,6 +874,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.DEMAND.RFO",
"SampleAfterValue": "100000",
@@ -779,6 +882,7 @@
},
{
"BriefDescription": "Offcore L1 data cache writebacks",
+ "Counter": "0,1,2,3",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.L1D_WRITEBACK",
"SampleAfterValue": "100000",
@@ -786,6 +890,7 @@
},
{
"BriefDescription": "Outstanding offcore reads",
+ "Counter": "0",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.ANY.READ",
"SampleAfterValue": "2000000",
@@ -793,6 +898,7 @@
},
{
"BriefDescription": "Cycles offcore reads busy",
+ "Counter": "0",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.ANY.READ_NOT_EMPTY",
@@ -801,6 +907,7 @@
},
{
"BriefDescription": "Outstanding offcore demand code reads",
+ "Counter": "0",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND.READ_CODE",
"SampleAfterValue": "2000000",
@@ -808,6 +915,7 @@
},
{
"BriefDescription": "Cycles offcore demand code read busy",
+ "Counter": "0",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND.READ_CODE_NOT_EMPTY",
@@ -816,6 +924,7 @@
},
{
"BriefDescription": "Outstanding offcore demand data reads",
+ "Counter": "0",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND.READ_DATA",
"SampleAfterValue": "2000000",
@@ -823,6 +932,7 @@
},
{
"BriefDescription": "Cycles offcore demand data read busy",
+ "Counter": "0",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND.READ_DATA_NOT_EMPTY",
@@ -831,6 +941,7 @@
},
{
"BriefDescription": "Outstanding offcore demand RFOs",
+ "Counter": "0",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND.RFO",
"SampleAfterValue": "2000000",
@@ -838,6 +949,7 @@
},
{
"BriefDescription": "Cycles offcore demand RFOs busy",
+ "Counter": "0",
"CounterMask": "1",
"EventCode": "0x60",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND.RFO_NOT_EMPTY",
@@ -846,6 +958,7 @@
},
{
"BriefDescription": "Offcore requests blocked due to Super Queue full",
+ "Counter": "0,1,2,3",
"EventCode": "0xB2",
"EventName": "OFFCORE_REQUESTS_SQ_FULL",
"SampleAfterValue": "100000",
@@ -853,6 +966,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by any cache or DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -862,6 +976,7 @@
},
{
"BriefDescription": "All offcore data reads",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -871,6 +986,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by the IO, CSR, MMIO unit",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -880,6 +996,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -889,6 +1006,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -898,6 +1016,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -907,6 +1026,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -916,6 +1036,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -925,6 +1046,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -934,6 +1056,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -943,6 +1066,7 @@
},
{
"BriefDescription": "Offcore data reads that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -952,6 +1076,7 @@
},
{
"BriefDescription": "Offcore data reads that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -961,6 +1086,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by any cache or DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -970,6 +1096,7 @@
},
{
"BriefDescription": "All offcore code reads",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -979,6 +1106,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by the IO, CSR, MMIO unit",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -988,6 +1116,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -997,6 +1126,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -1006,6 +1136,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -1015,6 +1146,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -1024,6 +1156,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1033,6 +1166,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -1042,6 +1176,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1051,6 +1186,7 @@
},
{
"BriefDescription": "Offcore code reads that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -1060,6 +1196,7 @@
},
{
"BriefDescription": "Offcore code reads that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -1069,6 +1206,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by any cache or DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1078,6 +1216,7 @@
},
{
"BriefDescription": "All offcore requests",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -1087,6 +1226,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by the IO, CSR, MMIO unit",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -1096,6 +1236,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -1105,6 +1246,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -1114,6 +1256,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -1123,6 +1266,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -1132,6 +1276,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1141,6 +1286,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -1150,6 +1296,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1159,6 +1306,7 @@
},
{
"BriefDescription": "Offcore requests that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -1168,6 +1316,7 @@
},
{
"BriefDescription": "Offcore requests that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -1177,6 +1326,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by any cache or DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1186,6 +1336,7 @@
},
{
"BriefDescription": "All offcore RFO requests",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -1195,6 +1346,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by the IO, CSR, MMIO unit",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -1204,6 +1356,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -1213,6 +1366,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -1222,6 +1376,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -1231,6 +1386,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -1240,6 +1396,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1249,6 +1406,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -1258,6 +1416,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1267,6 +1426,7 @@
},
{
"BriefDescription": "Offcore RFO requests that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -1276,6 +1436,7 @@
},
{
"BriefDescription": "Offcore RFO requests that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -1285,6 +1446,7 @@
},
{
"BriefDescription": "Offcore writebacks to any cache or DRAM.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1294,6 +1456,7 @@
},
{
"BriefDescription": "All offcore writebacks",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -1303,6 +1466,7 @@
},
{
"BriefDescription": "Offcore writebacks to the IO, CSR, MMIO unit.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -1312,6 +1476,7 @@
},
{
"BriefDescription": "Offcore writebacks to the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -1321,6 +1486,7 @@
},
{
"BriefDescription": "Offcore writebacks to the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -1330,6 +1496,7 @@
},
{
"BriefDescription": "Offcore writebacks to the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -1339,6 +1506,7 @@
},
{
"BriefDescription": "Offcore writebacks to the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1348,6 +1516,7 @@
},
{
"BriefDescription": "Offcore writebacks to a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -1357,6 +1526,7 @@
},
{
"BriefDescription": "Offcore writebacks to a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1366,6 +1536,7 @@
},
{
"BriefDescription": "Offcore writebacks that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -1375,6 +1546,7 @@
},
{
"BriefDescription": "Offcore writebacks that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -1384,6 +1556,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by any cache or DRAM.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1393,6 +1566,7 @@
},
{
"BriefDescription": "All offcore code or data read requests",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -1402,6 +1576,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by the IO, CSR, MMIO unit.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -1411,6 +1586,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -1420,6 +1596,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -1429,6 +1606,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -1438,6 +1616,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -1447,6 +1626,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1456,6 +1636,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -1465,6 +1646,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1474,6 +1656,7 @@
},
{
"BriefDescription": "Offcore code or data read requests that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -1483,6 +1666,7 @@
},
{
"BriefDescription": "Offcore code or data read requests that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -1492,6 +1676,7 @@
},
{
"BriefDescription": "Offcore request = all data, response = any cache_dram",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1501,6 +1686,7 @@
},
{
"BriefDescription": "Offcore request = all data, response = any location",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -1510,6 +1696,7 @@
},
{
"BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the IO, CSR, MMIO unit",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -1519,6 +1706,7 @@
},
{
"BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -1528,6 +1716,7 @@
},
{
"BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -1537,6 +1726,7 @@
},
{
"BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -1546,6 +1736,7 @@
},
{
"BriefDescription": "Offcore request = all data, response = local cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -1555,6 +1746,7 @@
},
{
"BriefDescription": "Offcore request = all data, response = local cache or dram",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1564,6 +1756,7 @@
},
{
"BriefDescription": "Offcore request = all data, response = remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -1573,6 +1766,7 @@
},
{
"BriefDescription": "Offcore request = all data, response = remote cache or dram",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1582,6 +1776,7 @@
},
{
"BriefDescription": "Offcore data reads, RFOs, and prefetches that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -1591,6 +1786,7 @@
},
{
"BriefDescription": "Offcore data reads, RFOs, and prefetches that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -1600,6 +1796,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by any cache or DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1609,6 +1806,7 @@
},
{
"BriefDescription": "All offcore demand data requests",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -1618,6 +1816,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by the IO, CSR, MMIO unit.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -1627,6 +1826,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -1636,6 +1836,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -1645,6 +1846,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -1654,6 +1856,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -1663,6 +1866,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1672,6 +1876,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -1681,6 +1886,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1690,6 +1896,7 @@
},
{
"BriefDescription": "Offcore demand data requests that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -1699,6 +1906,7 @@
},
{
"BriefDescription": "Offcore demand data requests that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -1708,6 +1916,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by any cache or DRAM.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1717,6 +1926,7 @@
},
{
"BriefDescription": "All offcore demand data reads",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -1726,6 +1936,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by the IO, CSR, MMIO unit",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -1735,6 +1946,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -1744,6 +1956,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -1753,6 +1966,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -1762,6 +1976,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -1771,6 +1986,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1780,6 +1996,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -1789,6 +2006,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1798,6 +2016,7 @@
},
{
"BriefDescription": "Offcore demand data reads that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -1807,6 +2026,7 @@
},
{
"BriefDescription": "Offcore demand data reads that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -1816,6 +2036,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by any cache or DRAM.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1825,6 +2046,7 @@
},
{
"BriefDescription": "All offcore demand code reads",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -1834,6 +2056,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by the IO, CSR, MMIO unit",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -1843,6 +2066,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -1852,6 +2076,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -1861,6 +2086,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -1870,6 +2096,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -1879,6 +2106,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1888,6 +2116,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -1897,6 +2126,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1906,6 +2136,7 @@
},
{
"BriefDescription": "Offcore demand code reads that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -1915,6 +2146,7 @@
},
{
"BriefDescription": "Offcore demand code reads that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -1924,6 +2156,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by any cache or DRAM.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1933,6 +2166,7 @@
},
{
"BriefDescription": "All offcore demand RFO requests",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -1942,6 +2176,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by the IO, CSR, MMIO unit",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -1951,6 +2186,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -1960,6 +2196,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -1969,6 +2206,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -1978,6 +2216,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -1987,6 +2226,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -1996,6 +2236,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -2005,6 +2246,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2014,6 +2256,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -2023,6 +2266,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -2032,6 +2276,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by any cache or DRAM.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2041,6 +2286,7 @@
},
{
"BriefDescription": "All offcore other requests",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -2050,6 +2296,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by the IO, CSR, MMIO unit",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -2059,6 +2306,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -2068,6 +2316,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -2077,6 +2326,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -2086,6 +2336,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -2095,6 +2346,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2104,6 +2356,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -2113,6 +2366,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2122,6 +2376,7 @@
},
{
"BriefDescription": "Offcore other requests that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -2131,6 +2386,7 @@
},
{
"BriefDescription": "Offcore other requests that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -2140,6 +2396,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by any cache or DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2149,6 +2406,7 @@
},
{
"BriefDescription": "All offcore prefetch data requests",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -2158,6 +2416,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by the IO, CSR, MMIO unit.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -2167,6 +2426,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -2176,6 +2436,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -2185,6 +2446,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -2194,6 +2456,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -2203,6 +2466,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2212,6 +2476,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -2221,6 +2486,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2230,6 +2496,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -2239,6 +2506,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -2248,6 +2516,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by any cache or DRAM.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2257,6 +2526,7 @@
},
{
"BriefDescription": "All offcore prefetch data reads",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -2266,6 +2536,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by the IO, CSR, MMIO unit",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -2275,6 +2546,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -2284,6 +2556,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -2293,6 +2566,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -2302,6 +2576,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -2311,6 +2586,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2320,6 +2596,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -2329,6 +2606,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2338,6 +2616,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -2347,6 +2626,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -2356,6 +2636,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by any cache or DRAM.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2365,6 +2646,7 @@
},
{
"BriefDescription": "All offcore prefetch code reads",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -2374,6 +2656,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by the IO, CSR, MMIO unit",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -2383,6 +2666,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -2392,6 +2676,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -2401,6 +2686,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -2410,6 +2696,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -2419,6 +2706,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2428,6 +2716,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -2437,6 +2726,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2446,6 +2736,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -2455,6 +2746,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -2464,6 +2756,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by any cache or DRAM.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2473,6 +2766,7 @@
},
{
"BriefDescription": "All offcore prefetch RFO requests",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -2482,6 +2776,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by the IO, CSR, MMIO unit",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -2491,6 +2786,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -2500,6 +2796,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -2509,6 +2806,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -2518,6 +2816,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -2527,6 +2826,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2536,6 +2836,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -2545,6 +2846,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2554,6 +2856,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -2563,6 +2866,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -2572,6 +2876,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by any cache or DRAM.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2581,6 +2886,7 @@
},
{
"BriefDescription": "All offcore prefetch requests",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_LOCATION",
"MSRIndex": "0x1A6",
@@ -2590,6 +2896,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by the IO, CSR, MMIO unit",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
@@ -2599,6 +2906,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by the LLC and not found in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
@@ -2608,6 +2916,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by the LLC and HIT in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
@@ -2617,6 +2926,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by the LLC and HITM in a sibling core",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
@@ -2626,6 +2936,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE",
"MSRIndex": "0x1A6",
@@ -2635,6 +2946,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by the LLC or local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2644,6 +2956,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE",
"MSRIndex": "0x1A6",
@@ -2653,6 +2966,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by a remote cache or remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
@@ -2662,6 +2976,7 @@
},
{
"BriefDescription": "Offcore prefetch requests that HIT in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
@@ -2671,6 +2986,7 @@
},
{
"BriefDescription": "Offcore prefetch requests that HITM in a remote cache",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
@@ -2680,6 +2996,7 @@
},
{
"BriefDescription": "Super Queue LRU hints sent to LLC",
+ "Counter": "0,1,2,3",
"EventCode": "0xF4",
"EventName": "SQ_MISC.LRU_HINTS",
"SampleAfterValue": "2000000",
@@ -2687,6 +3004,7 @@
},
{
"BriefDescription": "Super Queue lock splits across a cache line",
+ "Counter": "0,1,2,3",
"EventCode": "0xF4",
"EventName": "SQ_MISC.SPLIT_LOCK",
"SampleAfterValue": "2000000",
@@ -2694,6 +3012,7 @@
},
{
"BriefDescription": "Loads delayed with at-Retirement block code",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "STORE_BLOCKS.AT_RET",
"SampleAfterValue": "200000",
@@ -2701,6 +3020,7 @@
},
{
"BriefDescription": "Cacheable loads delayed with L1D block code",
+ "Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "STORE_BLOCKS.L1D_BLOCK",
"SampleAfterValue": "200000",
diff --git a/tools/perf/pmu-events/arch/x86/westmereex/counter.json b/tools/perf/pmu-events/arch/x86/westmereex/counter.json
new file mode 100644
index 000000000000..ecf0795dceab
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/westmereex/counter.json
@@ -0,0 +1,7 @@
+[
+ {
+ "Unit": "core",
+ "CountersNumFixed": "4",
+ "CountersNumGeneric": "4"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/westmereex/floating-point.json b/tools/perf/pmu-events/arch/x86/westmereex/floating-point.json
index 196ae1d9b157..9bac9313b65c 100644
--- a/tools/perf/pmu-events/arch/x86/westmereex/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/westmereex/floating-point.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "X87 Floating point assists (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xF7",
"EventName": "FP_ASSIST.ALL",
"PEBS": "1",
@@ -9,6 +10,7 @@
},
{
"BriefDescription": "X87 Floating point assists for invalid input value (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xF7",
"EventName": "FP_ASSIST.INPUT",
"PEBS": "1",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "X87 Floating point assists for invalid output value (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xF7",
"EventName": "FP_ASSIST.OUTPUT",
"PEBS": "1",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "MMX Uops",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.MMX",
"SampleAfterValue": "2000000",
@@ -32,6 +36,7 @@
},
{
"BriefDescription": "SSE2 integer Uops",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE2_INTEGER",
"SampleAfterValue": "2000000",
@@ -39,6 +44,7 @@
},
{
"BriefDescription": "SSE* FP double precision Uops",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_DOUBLE_PRECISION",
"SampleAfterValue": "2000000",
@@ -46,6 +52,7 @@
},
{
"BriefDescription": "SSE and SSE2 FP Uops",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_FP",
"SampleAfterValue": "2000000",
@@ -53,6 +60,7 @@
},
{
"BriefDescription": "SSE FP packed Uops",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_FP_PACKED",
"SampleAfterValue": "2000000",
@@ -60,6 +68,7 @@
},
{
"BriefDescription": "SSE FP scalar Uops",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_FP_SCALAR",
"SampleAfterValue": "2000000",
@@ -67,6 +76,7 @@
},
{
"BriefDescription": "SSE* FP single precision Uops",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_SINGLE_PRECISION",
"SampleAfterValue": "2000000",
@@ -74,6 +84,7 @@
},
{
"BriefDescription": "Computational floating-point operations executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.X87",
"SampleAfterValue": "2000000",
@@ -81,6 +92,7 @@
},
{
"BriefDescription": "All Floating Point to and from MMX transitions",
+ "Counter": "0,1,2,3",
"EventCode": "0xCC",
"EventName": "FP_MMX_TRANS.ANY",
"SampleAfterValue": "2000000",
@@ -88,6 +100,7 @@
},
{
"BriefDescription": "Transitions from MMX to Floating Point instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0xCC",
"EventName": "FP_MMX_TRANS.TO_FP",
"SampleAfterValue": "2000000",
@@ -95,6 +108,7 @@
},
{
"BriefDescription": "Transitions from Floating Point to MMX instructions",
+ "Counter": "0,1,2,3",
"EventCode": "0xCC",
"EventName": "FP_MMX_TRANS.TO_MMX",
"SampleAfterValue": "2000000",
@@ -102,6 +116,7 @@
},
{
"BriefDescription": "128 bit SIMD integer pack operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "SIMD_INT_128.PACK",
"SampleAfterValue": "200000",
@@ -109,6 +124,7 @@
},
{
"BriefDescription": "128 bit SIMD integer arithmetic operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "SIMD_INT_128.PACKED_ARITH",
"SampleAfterValue": "200000",
@@ -116,6 +132,7 @@
},
{
"BriefDescription": "128 bit SIMD integer logical operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "SIMD_INT_128.PACKED_LOGICAL",
"SampleAfterValue": "200000",
@@ -123,6 +140,7 @@
},
{
"BriefDescription": "128 bit SIMD integer multiply operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "SIMD_INT_128.PACKED_MPY",
"SampleAfterValue": "200000",
@@ -130,6 +148,7 @@
},
{
"BriefDescription": "128 bit SIMD integer shift operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "SIMD_INT_128.PACKED_SHIFT",
"SampleAfterValue": "200000",
@@ -137,6 +156,7 @@
},
{
"BriefDescription": "128 bit SIMD integer shuffle/move operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "SIMD_INT_128.SHUFFLE_MOVE",
"SampleAfterValue": "200000",
@@ -144,6 +164,7 @@
},
{
"BriefDescription": "128 bit SIMD integer unpack operations",
+ "Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "SIMD_INT_128.UNPACK",
"SampleAfterValue": "200000",
@@ -151,6 +172,7 @@
},
{
"BriefDescription": "SIMD integer 64 bit pack operations",
+ "Counter": "0,1,2,3",
"EventCode": "0xFD",
"EventName": "SIMD_INT_64.PACK",
"SampleAfterValue": "200000",
@@ -158,6 +180,7 @@
},
{
"BriefDescription": "SIMD integer 64 bit arithmetic operations",
+ "Counter": "0,1,2,3",
"EventCode": "0xFD",
"EventName": "SIMD_INT_64.PACKED_ARITH",
"SampleAfterValue": "200000",
@@ -165,6 +188,7 @@
},
{
"BriefDescription": "SIMD integer 64 bit logical operations",
+ "Counter": "0,1,2,3",
"EventCode": "0xFD",
"EventName": "SIMD_INT_64.PACKED_LOGICAL",
"SampleAfterValue": "200000",
@@ -172,6 +196,7 @@
},
{
"BriefDescription": "SIMD integer 64 bit packed multiply operations",
+ "Counter": "0,1,2,3",
"EventCode": "0xFD",
"EventName": "SIMD_INT_64.PACKED_MPY",
"SampleAfterValue": "200000",
@@ -179,6 +204,7 @@
},
{
"BriefDescription": "SIMD integer 64 bit shift operations",
+ "Counter": "0,1,2,3",
"EventCode": "0xFD",
"EventName": "SIMD_INT_64.PACKED_SHIFT",
"SampleAfterValue": "200000",
@@ -186,6 +212,7 @@
},
{
"BriefDescription": "SIMD integer 64 bit shuffle/move operations",
+ "Counter": "0,1,2,3",
"EventCode": "0xFD",
"EventName": "SIMD_INT_64.SHUFFLE_MOVE",
"SampleAfterValue": "200000",
@@ -193,6 +220,7 @@
},
{
"BriefDescription": "SIMD integer 64 bit unpack operations",
+ "Counter": "0,1,2,3",
"EventCode": "0xFD",
"EventName": "SIMD_INT_64.UNPACK",
"SampleAfterValue": "200000",
diff --git a/tools/perf/pmu-events/arch/x86/westmereex/frontend.json b/tools/perf/pmu-events/arch/x86/westmereex/frontend.json
index f7f28510e3ae..c561ac24d91d 100644
--- a/tools/perf/pmu-events/arch/x86/westmereex/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/westmereex/frontend.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Instructions decoded",
+ "Counter": "0,1,2,3",
"EventCode": "0xD0",
"EventName": "MACRO_INSTS.DECODED",
"SampleAfterValue": "2000000",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Macro-fused instructions decoded",
+ "Counter": "0,1,2,3",
"EventCode": "0xA6",
"EventName": "MACRO_INSTS.FUSIONS_DECODED",
"SampleAfterValue": "2000000",
@@ -15,6 +17,7 @@
},
{
"BriefDescription": "Two Uop instructions decoded",
+ "Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "TWO_UOP_INSTS_DECODED",
"SampleAfterValue": "2000000",
diff --git a/tools/perf/pmu-events/arch/x86/westmereex/memory.json b/tools/perf/pmu-events/arch/x86/westmereex/memory.json
index f3c0d2d4bc6a..aaa7c43a7fec 100644
--- a/tools/perf/pmu-events/arch/x86/westmereex/memory.json
+++ b/tools/perf/pmu-events/arch/x86/westmereex/memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Misaligned store references",
+ "Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "MISALIGN_MEM_REF.STORE",
"SampleAfterValue": "200000",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -17,6 +19,7 @@
},
{
"BriefDescription": "Offcore data reads that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -26,6 +29,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -35,6 +39,7 @@
},
{
"BriefDescription": "Offcore data reads satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -44,6 +49,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -53,6 +59,7 @@
},
{
"BriefDescription": "Offcore code reads that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -62,6 +69,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -71,6 +79,7 @@
},
{
"BriefDescription": "Offcore code reads satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -80,6 +89,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -89,6 +99,7 @@
},
{
"BriefDescription": "Offcore requests that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -98,6 +109,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -107,6 +119,7 @@
},
{
"BriefDescription": "Offcore requests satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -116,6 +129,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -125,6 +139,7 @@
},
{
"BriefDescription": "Offcore RFO requests that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -134,6 +149,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -143,6 +159,7 @@
},
{
"BriefDescription": "Offcore RFO requests satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -152,6 +169,7 @@
},
{
"BriefDescription": "Offcore writebacks to any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -161,6 +179,7 @@
},
{
"BriefDescription": "Offcore writebacks that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -170,6 +189,7 @@
},
{
"BriefDescription": "Offcore writebacks to the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -179,6 +199,7 @@
},
{
"BriefDescription": "Offcore writebacks to a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -188,6 +209,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -197,6 +219,7 @@
},
{
"BriefDescription": "Offcore code or data read requests that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -206,6 +229,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -215,6 +239,7 @@
},
{
"BriefDescription": "Offcore code or data read requests satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -224,6 +249,7 @@
},
{
"BriefDescription": "Offcore request = all data, response = any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -233,6 +259,7 @@
},
{
"BriefDescription": "Offcore request = all data, response = any LLC miss",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -242,6 +269,7 @@
},
{
"BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the local DRAM.",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -251,6 +279,7 @@
},
{
"BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -260,6 +289,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -269,6 +299,7 @@
},
{
"BriefDescription": "Offcore demand data requests that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -278,6 +309,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -287,6 +319,7 @@
},
{
"BriefDescription": "Offcore demand data requests satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -296,6 +329,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -305,6 +339,7 @@
},
{
"BriefDescription": "Offcore demand data reads that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -314,6 +349,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -323,6 +359,7 @@
},
{
"BriefDescription": "Offcore demand data reads satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -332,6 +369,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -341,6 +379,7 @@
},
{
"BriefDescription": "Offcore demand code reads that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -350,6 +389,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -359,6 +399,7 @@
},
{
"BriefDescription": "Offcore demand code reads satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -368,6 +409,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -377,6 +419,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -386,6 +429,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -395,6 +439,7 @@
},
{
"BriefDescription": "Offcore demand RFO requests satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -404,6 +449,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -413,6 +459,7 @@
},
{
"BriefDescription": "Offcore other requests that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -422,6 +469,7 @@
},
{
"BriefDescription": "Offcore other requests satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -431,6 +479,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -440,6 +489,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -449,6 +499,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -458,6 +509,7 @@
},
{
"BriefDescription": "Offcore prefetch data requests satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -467,6 +519,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -476,6 +529,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -485,6 +539,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -494,6 +549,7 @@
},
{
"BriefDescription": "Offcore prefetch data reads satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -503,6 +559,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -512,6 +569,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -521,6 +579,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -530,6 +589,7 @@
},
{
"BriefDescription": "Offcore prefetch code reads satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -539,6 +599,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -548,6 +609,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -557,6 +619,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -566,6 +629,7 @@
},
{
"BriefDescription": "Offcore prefetch RFO requests satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_DRAM",
"MSRIndex": "0x1A6",
@@ -575,6 +639,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by any DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_DRAM",
"MSRIndex": "0x1A6",
@@ -584,6 +649,7 @@
},
{
"BriefDescription": "Offcore prefetch requests that missed the LLC",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
@@ -593,6 +659,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by the local DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LOCAL_DRAM",
"MSRIndex": "0x1A6",
@@ -602,6 +669,7 @@
},
{
"BriefDescription": "Offcore prefetch requests satisfied by a remote DRAM",
+ "Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_DRAM",
"MSRIndex": "0x1A6",
diff --git a/tools/perf/pmu-events/arch/x86/westmereex/other.json b/tools/perf/pmu-events/arch/x86/westmereex/other.json
index 488274980564..bcf5bcf637c0 100644
--- a/tools/perf/pmu-events/arch/x86/westmereex/other.json
+++ b/tools/perf/pmu-events/arch/x86/westmereex/other.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "ES segment renames",
+ "Counter": "0,1,2,3",
"EventCode": "0xD5",
"EventName": "ES_REG_RENAMES",
"SampleAfterValue": "2000000",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "I/O transactions",
+ "Counter": "0,1,2,3",
"EventCode": "0x6C",
"EventName": "IO_TRANSACTIONS",
"SampleAfterValue": "2000000",
@@ -15,6 +17,7 @@
},
{
"BriefDescription": "L1I instruction fetch stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "L1I.CYCLES_STALLED",
"SampleAfterValue": "2000000",
@@ -22,6 +25,7 @@
},
{
"BriefDescription": "L1I instruction fetch hits",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "L1I.HITS",
"SampleAfterValue": "2000000",
@@ -29,6 +33,7 @@
},
{
"BriefDescription": "L1I instruction fetch misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "L1I.MISSES",
"SampleAfterValue": "2000000",
@@ -36,6 +41,7 @@
},
{
"BriefDescription": "L1I Instruction fetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "L1I.READS",
"SampleAfterValue": "2000000",
@@ -43,6 +49,7 @@
},
{
"BriefDescription": "Large ITLB hit",
+ "Counter": "0,1,2,3",
"EventCode": "0x82",
"EventName": "LARGE_ITLB.HIT",
"SampleAfterValue": "200000",
@@ -50,6 +57,7 @@
},
{
"BriefDescription": "Loads that partially overlap an earlier store",
+ "Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "LOAD_BLOCK.OVERLAP_STORE",
"SampleAfterValue": "200000",
@@ -57,6 +65,7 @@
},
{
"BriefDescription": "All loads dispatched",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "LOAD_DISPATCH.ANY",
"SampleAfterValue": "2000000",
@@ -64,6 +73,7 @@
},
{
"BriefDescription": "Loads dispatched from the MOB",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "LOAD_DISPATCH.MOB",
"SampleAfterValue": "2000000",
@@ -71,6 +81,7 @@
},
{
"BriefDescription": "Loads dispatched that bypass the MOB",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "LOAD_DISPATCH.RS",
"SampleAfterValue": "2000000",
@@ -78,6 +89,7 @@
},
{
"BriefDescription": "Loads dispatched from stage 305",
+ "Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "LOAD_DISPATCH.RS_DELAYED",
"SampleAfterValue": "2000000",
@@ -85,6 +97,7 @@
},
{
"BriefDescription": "False dependencies due to partial address aliasing",
+ "Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "PARTIAL_ADDRESS_ALIAS",
"SampleAfterValue": "200000",
@@ -92,6 +105,7 @@
},
{
"BriefDescription": "All Store buffer stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "SB_DRAIN.ANY",
"SampleAfterValue": "200000",
@@ -99,6 +113,7 @@
},
{
"BriefDescription": "Segment rename stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xD4",
"EventName": "SEG_RENAME_STALLS",
"SampleAfterValue": "2000000",
@@ -106,6 +121,7 @@
},
{
"BriefDescription": "Snoop code requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "SNOOPQ_REQUESTS.CODE",
"SampleAfterValue": "100000",
@@ -113,6 +129,7 @@
},
{
"BriefDescription": "Snoop data requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "SNOOPQ_REQUESTS.DATA",
"SampleAfterValue": "100000",
@@ -120,6 +137,7 @@
},
{
"BriefDescription": "Snoop invalidate requests",
+ "Counter": "0,1,2,3",
"EventCode": "0xB4",
"EventName": "SNOOPQ_REQUESTS.INVALIDATE",
"SampleAfterValue": "100000",
@@ -127,6 +145,7 @@
},
{
"BriefDescription": "Outstanding snoop code requests",
+ "Counter": "0",
"EventCode": "0xB3",
"EventName": "SNOOPQ_REQUESTS_OUTSTANDING.CODE",
"SampleAfterValue": "2000000",
@@ -134,6 +153,7 @@
},
{
"BriefDescription": "Cycles snoop code requests queued",
+ "Counter": "0",
"CounterMask": "1",
"EventCode": "0xB3",
"EventName": "SNOOPQ_REQUESTS_OUTSTANDING.CODE_NOT_EMPTY",
@@ -142,6 +162,7 @@
},
{
"BriefDescription": "Outstanding snoop data requests",
+ "Counter": "0",
"EventCode": "0xB3",
"EventName": "SNOOPQ_REQUESTS_OUTSTANDING.DATA",
"SampleAfterValue": "2000000",
@@ -149,6 +170,7 @@
},
{
"BriefDescription": "Cycles snoop data requests queued",
+ "Counter": "0",
"CounterMask": "1",
"EventCode": "0xB3",
"EventName": "SNOOPQ_REQUESTS_OUTSTANDING.DATA_NOT_EMPTY",
@@ -157,6 +179,7 @@
},
{
"BriefDescription": "Outstanding snoop invalidate requests",
+ "Counter": "0",
"EventCode": "0xB3",
"EventName": "SNOOPQ_REQUESTS_OUTSTANDING.INVALIDATE",
"SampleAfterValue": "2000000",
@@ -164,6 +187,7 @@
},
{
"BriefDescription": "Cycles snoop invalidate requests queued",
+ "Counter": "0",
"CounterMask": "1",
"EventCode": "0xB3",
"EventName": "SNOOPQ_REQUESTS_OUTSTANDING.INVALIDATE_NOT_EMPTY",
@@ -172,6 +196,7 @@
},
{
"BriefDescription": "Thread responded HIT to snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "SNOOP_RESPONSE.HIT",
"SampleAfterValue": "100000",
@@ -179,6 +204,7 @@
},
{
"BriefDescription": "Thread responded HITE to snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "SNOOP_RESPONSE.HITE",
"SampleAfterValue": "100000",
@@ -186,6 +212,7 @@
},
{
"BriefDescription": "Thread responded HITM to snoop",
+ "Counter": "0,1,2,3",
"EventCode": "0xB8",
"EventName": "SNOOP_RESPONSE.HITM",
"SampleAfterValue": "100000",
@@ -193,6 +220,7 @@
},
{
"BriefDescription": "Super Queue full stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xF6",
"EventName": "SQ_FULL_STALL_CYCLES",
"SampleAfterValue": "2000000",
diff --git a/tools/perf/pmu-events/arch/x86/westmereex/pipeline.json b/tools/perf/pmu-events/arch/x86/westmereex/pipeline.json
index 026236558d05..e8cac8622b30 100644
--- a/tools/perf/pmu-events/arch/x86/westmereex/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/westmereex/pipeline.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "Cycles the divider is busy",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "ARITH.CYCLES_DIV_BUSY",
"SampleAfterValue": "2000000",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "Divide Operations executed",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x14",
@@ -18,6 +20,7 @@
},
{
"BriefDescription": "Multiply operations executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "ARITH.MUL",
"SampleAfterValue": "2000000",
@@ -25,6 +28,7 @@
},
{
"BriefDescription": "BACLEAR asserted with bad target address",
+ "Counter": "0,1,2,3",
"EventCode": "0xE6",
"EventName": "BACLEAR.BAD_TARGET",
"SampleAfterValue": "2000000",
@@ -32,6 +36,7 @@
},
{
"BriefDescription": "BACLEAR asserted, regardless of cause",
+ "Counter": "0,1,2,3",
"EventCode": "0xE6",
"EventName": "BACLEAR.CLEAR",
"SampleAfterValue": "2000000",
@@ -39,6 +44,7 @@
},
{
"BriefDescription": "Instruction queue forced BACLEAR",
+ "Counter": "0,1,2,3",
"EventCode": "0xA7",
"EventName": "BACLEAR_FORCE_IQ",
"SampleAfterValue": "2000000",
@@ -46,6 +52,7 @@
},
{
"BriefDescription": "Early Branch Prediction Unit clears",
+ "Counter": "0,1,2,3",
"EventCode": "0xE8",
"EventName": "BPU_CLEARS.EARLY",
"SampleAfterValue": "2000000",
@@ -53,6 +60,7 @@
},
{
"BriefDescription": "Late Branch Prediction Unit clears",
+ "Counter": "0,1,2,3",
"EventCode": "0xE8",
"EventName": "BPU_CLEARS.LATE",
"SampleAfterValue": "2000000",
@@ -60,6 +68,7 @@
},
{
"BriefDescription": "Branch prediction unit missed call or return",
+ "Counter": "0,1,2,3",
"EventCode": "0xE5",
"EventName": "BPU_MISSED_CALL_RET",
"SampleAfterValue": "2000000",
@@ -67,6 +76,7 @@
},
{
"BriefDescription": "Branch instructions decoded",
+ "Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "BR_INST_DECODED",
"SampleAfterValue": "2000000",
@@ -74,6 +84,7 @@
},
{
"BriefDescription": "Branch instructions executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.ANY",
"SampleAfterValue": "200000",
@@ -81,6 +92,7 @@
},
{
"BriefDescription": "Conditional branch instructions executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.COND",
"SampleAfterValue": "200000",
@@ -88,6 +100,7 @@
},
{
"BriefDescription": "Unconditional branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.DIRECT",
"SampleAfterValue": "200000",
@@ -95,6 +108,7 @@
},
{
"BriefDescription": "Unconditional call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.DIRECT_NEAR_CALL",
"SampleAfterValue": "20000",
@@ -102,6 +116,7 @@
},
{
"BriefDescription": "Indirect call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.INDIRECT_NEAR_CALL",
"SampleAfterValue": "20000",
@@ -109,6 +124,7 @@
},
{
"BriefDescription": "Indirect non call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.INDIRECT_NON_CALL",
"SampleAfterValue": "20000",
@@ -116,6 +132,7 @@
},
{
"BriefDescription": "Call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.NEAR_CALLS",
"SampleAfterValue": "20000",
@@ -123,6 +140,7 @@
},
{
"BriefDescription": "All non call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.NON_CALLS",
"SampleAfterValue": "200000",
@@ -130,6 +148,7 @@
},
{
"BriefDescription": "Indirect return branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.RETURN_NEAR",
"SampleAfterValue": "20000",
@@ -137,6 +156,7 @@
},
{
"BriefDescription": "Taken branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN",
"SampleAfterValue": "200000",
@@ -144,6 +164,7 @@
},
{
"BriefDescription": "Retired branch instructions (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -152,6 +173,7 @@
},
{
"BriefDescription": "Retired conditional branch instructions (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
"PEBS": "1",
@@ -160,6 +182,7 @@
},
{
"BriefDescription": "Retired near call instructions (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
"PEBS": "1",
@@ -168,6 +191,7 @@
},
{
"BriefDescription": "Mispredicted branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.ANY",
"SampleAfterValue": "20000",
@@ -175,6 +199,7 @@
},
{
"BriefDescription": "Mispredicted conditional branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.COND",
"SampleAfterValue": "20000",
@@ -182,6 +207,7 @@
},
{
"BriefDescription": "Mispredicted unconditional branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.DIRECT",
"SampleAfterValue": "20000",
@@ -189,6 +215,7 @@
},
{
"BriefDescription": "Mispredicted non call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.DIRECT_NEAR_CALL",
"SampleAfterValue": "2000",
@@ -196,6 +223,7 @@
},
{
"BriefDescription": "Mispredicted indirect call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.INDIRECT_NEAR_CALL",
"SampleAfterValue": "2000",
@@ -203,6 +231,7 @@
},
{
"BriefDescription": "Mispredicted indirect non call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.INDIRECT_NON_CALL",
"SampleAfterValue": "2000",
@@ -210,6 +239,7 @@
},
{
"BriefDescription": "Mispredicted call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.NEAR_CALLS",
"SampleAfterValue": "2000",
@@ -217,6 +247,7 @@
},
{
"BriefDescription": "Mispredicted non call branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.NON_CALLS",
"SampleAfterValue": "20000",
@@ -224,6 +255,7 @@
},
{
"BriefDescription": "Mispredicted return branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.RETURN_NEAR",
"SampleAfterValue": "2000",
@@ -231,6 +263,7 @@
},
{
"BriefDescription": "Mispredicted taken branches executed",
+ "Counter": "0,1,2,3",
"EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN",
"SampleAfterValue": "20000",
@@ -238,6 +271,7 @@
},
{
"BriefDescription": "Mispredicted retired branch instructions (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"PEBS": "1",
@@ -246,6 +280,7 @@
},
{
"BriefDescription": "Mispredicted conditional retired branches (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.CONDITIONAL",
"PEBS": "1",
@@ -254,6 +289,7 @@
},
{
"BriefDescription": "Mispredicted near retired calls (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.NEAR_CALL",
"PEBS": "1",
@@ -262,11 +298,13 @@
},
{
"BriefDescription": "Reference cycles when thread is not halted (fixed counter)",
+ "Counter": "Fixed counter 3",
"EventName": "CPU_CLK_UNHALTED.REF",
"SampleAfterValue": "2000000"
},
{
"BriefDescription": "Reference base clock (133 Mhz) cycles when thread is not halted (programmable counter)",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.REF_P",
"SampleAfterValue": "100000",
@@ -274,17 +312,20 @@
},
{
"BriefDescription": "Cycles when thread is not halted (fixed counter)",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"SampleAfterValue": "2000000"
},
{
"BriefDescription": "Cycles when thread is not halted (programmable counter)",
+ "Counter": "0,1,2,3",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
"SampleAfterValue": "2000000"
},
{
"BriefDescription": "Total CPU cycles",
+ "Counter": "0,1,2,3",
"CounterMask": "2",
"EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.TOTAL_CYCLES",
@@ -293,6 +334,7 @@
},
{
"BriefDescription": "Any Instruction Length Decoder stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.ANY",
"SampleAfterValue": "2000000",
@@ -300,6 +342,7 @@
},
{
"BriefDescription": "Instruction Queue full stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.IQ_FULL",
"SampleAfterValue": "2000000",
@@ -307,6 +350,7 @@
},
{
"BriefDescription": "Length Change Prefix stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.LCP",
"SampleAfterValue": "2000000",
@@ -314,6 +358,7 @@
},
{
"BriefDescription": "Stall cycles due to BPU MRU bypass",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.MRU",
"SampleAfterValue": "2000000",
@@ -321,6 +366,7 @@
},
{
"BriefDescription": "Regen stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x87",
"EventName": "ILD_STALL.REGEN",
"SampleAfterValue": "2000000",
@@ -328,6 +374,7 @@
},
{
"BriefDescription": "Instructions that must be decoded by decoder 0",
+ "Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "INST_DECODED.DEC0",
"SampleAfterValue": "2000000",
@@ -335,6 +382,7 @@
},
{
"BriefDescription": "Instructions written to instruction queue.",
+ "Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "INST_QUEUE_WRITES",
"SampleAfterValue": "2000000",
@@ -342,6 +390,7 @@
},
{
"BriefDescription": "Cycles instructions are written to the instruction queue",
+ "Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "INST_QUEUE_WRITE_CYCLES",
"SampleAfterValue": "2000000",
@@ -349,11 +398,13 @@
},
{
"BriefDescription": "Instructions retired (fixed counter)",
+ "Counter": "Fixed counter 1",
"EventName": "INST_RETIRED.ANY",
"SampleAfterValue": "2000000"
},
{
"BriefDescription": "Instructions retired (Programmable counter and Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.ANY_P",
"PEBS": "1",
@@ -362,6 +413,7 @@
},
{
"BriefDescription": "Retired MMX instructions (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.MMX",
"PEBS": "1",
@@ -370,6 +422,7 @@
},
{
"BriefDescription": "Total cycles (Precise Event)",
+ "Counter": "0,1,2,3",
"CounterMask": "16",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.TOTAL_CYCLES",
@@ -380,6 +433,7 @@
},
{
"BriefDescription": "Total cycles (Precise Event)",
+ "Counter": "0,1,2,3",
"CounterMask": "16",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.TOTAL_CYCLES_PS",
@@ -390,6 +444,7 @@
},
{
"BriefDescription": "Retired floating-point operations (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC0",
"EventName": "INST_RETIRED.X87",
"PEBS": "1",
@@ -398,6 +453,7 @@
},
{
"BriefDescription": "Load operations conflicting with software prefetches",
+ "Counter": "0,1",
"EventCode": "0x4C",
"EventName": "LOAD_HIT_PRE",
"SampleAfterValue": "200000",
@@ -405,6 +461,7 @@
},
{
"BriefDescription": "Cycles when uops were delivered by the LSD",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA8",
"EventName": "LSD.ACTIVE",
@@ -413,6 +470,7 @@
},
{
"BriefDescription": "Cycles no uops were delivered by the LSD",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xA8",
"EventName": "LSD.INACTIVE",
@@ -422,6 +480,7 @@
},
{
"BriefDescription": "Loops that can't stream from the instruction queue",
+ "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "LSD_OVERFLOW",
"SampleAfterValue": "2000000",
@@ -429,6 +488,7 @@
},
{
"BriefDescription": "Cycles machine clear asserted",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.CYCLES",
"SampleAfterValue": "20000",
@@ -436,6 +496,7 @@
},
{
"BriefDescription": "Execution pipeline restart due to Memory ordering conflicts",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.MEM_ORDER",
"SampleAfterValue": "20000",
@@ -443,6 +504,7 @@
},
{
"BriefDescription": "Self-Modifying Code detected",
+ "Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.SMC",
"SampleAfterValue": "20000",
@@ -450,6 +512,7 @@
},
{
"BriefDescription": "All RAT stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "RAT_STALLS.ANY",
"SampleAfterValue": "2000000",
@@ -457,6 +520,7 @@
},
{
"BriefDescription": "Flag stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "RAT_STALLS.FLAGS",
"SampleAfterValue": "2000000",
@@ -464,6 +528,7 @@
},
{
"BriefDescription": "Partial register stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "RAT_STALLS.REGISTERS",
"SampleAfterValue": "2000000",
@@ -471,6 +536,7 @@
},
{
"BriefDescription": "ROB read port stalls cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "RAT_STALLS.ROB_READ_PORT",
"SampleAfterValue": "2000000",
@@ -478,6 +544,7 @@
},
{
"BriefDescription": "Scoreboard stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xD2",
"EventName": "RAT_STALLS.SCOREBOARD",
"SampleAfterValue": "2000000",
@@ -485,6 +552,7 @@
},
{
"BriefDescription": "Resource related stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.ANY",
"SampleAfterValue": "2000000",
@@ -492,6 +560,7 @@
},
{
"BriefDescription": "FPU control word write stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.FPCW",
"SampleAfterValue": "2000000",
@@ -499,6 +568,7 @@
},
{
"BriefDescription": "Load buffer stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.LOAD",
"SampleAfterValue": "2000000",
@@ -506,6 +576,7 @@
},
{
"BriefDescription": "MXCSR rename stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.MXCSR",
"SampleAfterValue": "2000000",
@@ -513,6 +584,7 @@
},
{
"BriefDescription": "Other Resource related stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.OTHER",
"SampleAfterValue": "2000000",
@@ -520,6 +592,7 @@
},
{
"BriefDescription": "ROB full stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.ROB_FULL",
"SampleAfterValue": "2000000",
@@ -527,6 +600,7 @@
},
{
"BriefDescription": "Reservation Station full stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.RS_FULL",
"SampleAfterValue": "2000000",
@@ -534,6 +608,7 @@
},
{
"BriefDescription": "Store buffer stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.STORE",
"SampleAfterValue": "2000000",
@@ -541,6 +616,7 @@
},
{
"BriefDescription": "SIMD Packed-Double Uops retired (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "SSEX_UOPS_RETIRED.PACKED_DOUBLE",
"PEBS": "1",
@@ -549,6 +625,7 @@
},
{
"BriefDescription": "SIMD Packed-Single Uops retired (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "SSEX_UOPS_RETIRED.PACKED_SINGLE",
"PEBS": "1",
@@ -557,6 +634,7 @@
},
{
"BriefDescription": "SIMD Scalar-Double Uops retired (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "SSEX_UOPS_RETIRED.SCALAR_DOUBLE",
"PEBS": "1",
@@ -565,6 +643,7 @@
},
{
"BriefDescription": "SIMD Scalar-Single Uops retired (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "SSEX_UOPS_RETIRED.SCALAR_SINGLE",
"PEBS": "1",
@@ -573,6 +652,7 @@
},
{
"BriefDescription": "SIMD Vector Integer Uops retired (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC7",
"EventName": "SSEX_UOPS_RETIRED.VECTOR_INTEGER",
"PEBS": "1",
@@ -581,6 +661,7 @@
},
{
"BriefDescription": "Stack pointer instructions decoded",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "UOPS_DECODED.ESP_FOLDING",
"SampleAfterValue": "2000000",
@@ -588,6 +669,7 @@
},
{
"BriefDescription": "Stack pointer sync operations",
+ "Counter": "0,1,2,3",
"EventCode": "0xD1",
"EventName": "UOPS_DECODED.ESP_SYNC",
"SampleAfterValue": "2000000",
@@ -595,6 +677,7 @@
},
{
"BriefDescription": "Uops decoded by Microcode Sequencer",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xD1",
"EventName": "UOPS_DECODED.MS_CYCLES_ACTIVE",
@@ -603,6 +686,7 @@
},
{
"BriefDescription": "Cycles no Uops are decoded",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xD1",
"EventName": "UOPS_DECODED.STALL_CYCLES",
@@ -613,6 +697,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles Uops executed on any port (core count)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_ACTIVE_CYCLES",
@@ -622,6 +707,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles Uops executed on ports 0-4 (core count)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_ACTIVE_CYCLES_NO_PORT5",
@@ -630,6 +716,7 @@
},
{
"BriefDescription": "Uops executed on any port (core count)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xB1",
@@ -640,6 +727,7 @@
},
{
"BriefDescription": "Uops executed on ports 0-4 (core count)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0xB1",
@@ -651,6 +739,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles no Uops issued on any port (core count)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_STALL_CYCLES",
@@ -661,6 +750,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles no Uops issued on ports 0-4 (core count)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_STALL_CYCLES_NO_PORT5",
@@ -670,6 +760,7 @@
},
{
"BriefDescription": "Uops executed on port 0",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT0",
"SampleAfterValue": "2000000",
@@ -677,6 +768,7 @@
},
{
"BriefDescription": "Uops issued on ports 0, 1 or 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT015",
"SampleAfterValue": "2000000",
@@ -684,6 +776,7 @@
},
{
"BriefDescription": "Cycles no Uops issued on ports 0, 1 or 5",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT015_STALL_CYCLES",
@@ -693,6 +786,7 @@
},
{
"BriefDescription": "Uops executed on port 1",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT1",
"SampleAfterValue": "2000000",
@@ -701,6 +795,7 @@
{
"AnyThread": "1",
"BriefDescription": "Uops issued on ports 2, 3 or 4",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT234_CORE",
"SampleAfterValue": "2000000",
@@ -709,6 +804,7 @@
{
"AnyThread": "1",
"BriefDescription": "Uops executed on port 2 (core count)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT2_CORE",
"SampleAfterValue": "2000000",
@@ -717,6 +813,7 @@
{
"AnyThread": "1",
"BriefDescription": "Uops executed on port 3 (core count)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT3_CORE",
"SampleAfterValue": "2000000",
@@ -725,6 +822,7 @@
{
"AnyThread": "1",
"BriefDescription": "Uops executed on port 4 (core count)",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT4_CORE",
"SampleAfterValue": "2000000",
@@ -732,6 +830,7 @@
},
{
"BriefDescription": "Uops executed on port 5",
+ "Counter": "0,1,2,3",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT5",
"SampleAfterValue": "2000000",
@@ -739,6 +838,7 @@
},
{
"BriefDescription": "Uops issued",
+ "Counter": "0,1,2,3",
"EventCode": "0xE",
"EventName": "UOPS_ISSUED.ANY",
"SampleAfterValue": "2000000",
@@ -747,6 +847,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles no Uops were issued on any thread",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xE",
"EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
@@ -757,6 +858,7 @@
{
"AnyThread": "1",
"BriefDescription": "Cycles Uops were issued on either thread",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xE",
"EventName": "UOPS_ISSUED.CYCLES_ALL_THREADS",
@@ -765,6 +867,7 @@
},
{
"BriefDescription": "Fused Uops issued",
+ "Counter": "0,1,2,3",
"EventCode": "0xE",
"EventName": "UOPS_ISSUED.FUSED",
"SampleAfterValue": "2000000",
@@ -772,6 +875,7 @@
},
{
"BriefDescription": "Cycles no Uops were issued",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xE",
"EventName": "UOPS_ISSUED.STALL_CYCLES",
@@ -781,6 +885,7 @@
},
{
"BriefDescription": "Cycles Uops are being retired",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.ACTIVE_CYCLES",
@@ -790,6 +895,7 @@
},
{
"BriefDescription": "Uops retired (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.ANY",
"PEBS": "1",
@@ -798,6 +904,7 @@
},
{
"BriefDescription": "Macro-fused Uops retired (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.MACRO_FUSED",
"PEBS": "1",
@@ -806,6 +913,7 @@
},
{
"BriefDescription": "Retirement slots used (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.RETIRE_SLOTS",
"PEBS": "1",
@@ -814,6 +922,7 @@
},
{
"BriefDescription": "Cycles Uops are not retiring (Precise Event)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.STALL_CYCLES",
@@ -824,6 +933,7 @@
},
{
"BriefDescription": "Total cycles using precise uop retired event (Precise Event)",
+ "Counter": "0,1,2,3",
"CounterMask": "16",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.TOTAL_CYCLES",
@@ -834,6 +944,7 @@
},
{
"BriefDescription": "Uop unfusions due to FP exceptions",
+ "Counter": "0,1,2,3",
"EventCode": "0xDB",
"EventName": "UOP_UNFUSION",
"SampleAfterValue": "2000000",
diff --git a/tools/perf/pmu-events/arch/x86/westmereex/virtual-memory.json b/tools/perf/pmu-events/arch/x86/westmereex/virtual-memory.json
index 6c92b2be2d06..0c3501e6e5a3 100644
--- a/tools/perf/pmu-events/arch/x86/westmereex/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/westmereex/virtual-memory.json
@@ -1,6 +1,7 @@
[
{
"BriefDescription": "DTLB load misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "DTLB_LOAD_MISSES.ANY",
"SampleAfterValue": "200000",
@@ -8,6 +9,7 @@
},
{
"BriefDescription": "DTLB load miss large page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "DTLB_LOAD_MISSES.LARGE_WALK_COMPLETED",
"SampleAfterValue": "200000",
@@ -15,6 +17,7 @@
},
{
"BriefDescription": "DTLB load miss caused by low part of address",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "DTLB_LOAD_MISSES.PDE_MISS",
"SampleAfterValue": "200000",
@@ -22,6 +25,7 @@
},
{
"BriefDescription": "DTLB second level hit",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"SampleAfterValue": "2000000",
@@ -29,6 +33,7 @@
},
{
"BriefDescription": "DTLB load miss page walks complete",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"SampleAfterValue": "200000",
@@ -36,6 +41,7 @@
},
{
"BriefDescription": "DTLB load miss page walk cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "DTLB_LOAD_MISSES.WALK_CYCLES",
"SampleAfterValue": "200000",
@@ -43,6 +49,7 @@
},
{
"BriefDescription": "DTLB misses",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_MISSES.ANY",
"SampleAfterValue": "200000",
@@ -50,6 +57,7 @@
},
{
"BriefDescription": "DTLB miss large page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_MISSES.LARGE_WALK_COMPLETED",
"SampleAfterValue": "200000",
@@ -57,6 +65,7 @@
},
{
"BriefDescription": "DTLB misses caused by low part of address. Count also includes 2M page references because 2M pages do not use the PDE.",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_MISSES.PDE_MISS",
"SampleAfterValue": "200000",
@@ -64,6 +73,7 @@
},
{
"BriefDescription": "DTLB first level misses but second level hit",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_MISSES.STLB_HIT",
"SampleAfterValue": "200000",
@@ -71,6 +81,7 @@
},
{
"BriefDescription": "DTLB miss page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_MISSES.WALK_COMPLETED",
"SampleAfterValue": "200000",
@@ -78,6 +89,7 @@
},
{
"BriefDescription": "DTLB miss page walk cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_MISSES.WALK_CYCLES",
"SampleAfterValue": "2000000",
@@ -85,6 +97,7 @@
},
{
"BriefDescription": "Extended Page Table walk cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x4F",
"EventName": "EPT.WALK_CYCLES",
"SampleAfterValue": "2000000",
@@ -92,6 +105,7 @@
},
{
"BriefDescription": "ITLB flushes",
+ "Counter": "0,1,2,3",
"EventCode": "0xAE",
"EventName": "ITLB_FLUSH",
"SampleAfterValue": "2000000",
@@ -99,6 +113,7 @@
},
{
"BriefDescription": "ITLB miss",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.ANY",
"SampleAfterValue": "200000",
@@ -106,6 +121,7 @@
},
{
"BriefDescription": "ITLB miss large page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.LARGE_WALK_COMPLETED",
"SampleAfterValue": "200000",
@@ -113,6 +129,7 @@
},
{
"BriefDescription": "ITLB miss page walks",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
"SampleAfterValue": "200000",
@@ -120,6 +137,7 @@
},
{
"BriefDescription": "ITLB miss page walk cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_CYCLES",
"SampleAfterValue": "2000000",
@@ -127,6 +145,7 @@
},
{
"BriefDescription": "Retired instructions that missed the ITLB (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC8",
"EventName": "ITLB_MISS_RETIRED",
"PEBS": "1",
@@ -135,6 +154,7 @@
},
{
"BriefDescription": "Retired loads that miss the DTLB (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xCB",
"EventName": "MEM_LOAD_RETIRED.DTLB_MISS",
"PEBS": "1",
@@ -143,6 +163,7 @@
},
{
"BriefDescription": "Retired stores that miss the DTLB (Precise Event)",
+ "Counter": "0,1,2,3",
"EventCode": "0xC",
"EventName": "MEM_STORE_RETIRED.DTLB_MISS",
"PEBS": "1",
diff --git a/tools/perf/pmu-events/jevents.py b/tools/perf/pmu-events/jevents.py
index e42efc16723e..ac9b7ca41856 100755
--- a/tools/perf/pmu-events/jevents.py
+++ b/tools/perf/pmu-events/jevents.py
@@ -284,6 +284,7 @@ class JsonEvent:
'hisi_sccl,hha': 'hisi_sccl,hha',
'hisi_sccl,l3c': 'hisi_sccl,l3c',
'imx8_ddr': 'imx8_ddr',
+ 'imx9_ddr': 'imx9_ddr',
'L3PMC': 'amd_l3',
'DFPMC': 'amd_df',
'UMCPMC': 'amd_umc',
diff --git a/tools/perf/scripts/Build b/tools/perf/scripts/Build
index 7d8e2e57faac..46f0c6f76dbf 100644
--- a/tools/perf/scripts/Build
+++ b/tools/perf/scripts/Build
@@ -1,4 +1,4 @@
ifeq ($(CONFIG_LIBTRACEEVENT),y)
- perf-$(CONFIG_LIBPERL) += perl/Perf-Trace-Util/
+ perf-util-$(CONFIG_LIBPERL) += perl/Perf-Trace-Util/
endif
-perf-$(CONFIG_LIBPYTHON) += python/Perf-Trace-Util/
+perf-util-$(CONFIG_LIBPYTHON) += python/Perf-Trace-Util/
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Build b/tools/perf/scripts/perl/Perf-Trace-Util/Build
index cc76be005d5e..9b0e5a8b5070 100644
--- a/tools/perf/scripts/perl/Perf-Trace-Util/Build
+++ b/tools/perf/scripts/perl/Perf-Trace-Util/Build
@@ -1,4 +1,4 @@
-perf-y += Context.o
+perf-util-y += Context.o
CFLAGS_Context.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-bad-function-cast -Wno-declaration-after-statement -Wno-switch-enum
CFLAGS_Context.o += -Wno-unused-parameter -Wno-nested-externs -Wno-undef
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/Build b/tools/perf/scripts/python/Perf-Trace-Util/Build
index 5b0b5ff7e14a..be3710c61320 100644
--- a/tools/perf/scripts/python/Perf-Trace-Util/Build
+++ b/tools/perf/scripts/python/Perf-Trace-Util/Build
@@ -1,4 +1,4 @@
-perf-y += Context.o
+perf-util-y += Context.o
# -Wno-declaration-after-statement: The python headers have mixed code with declarations (decls after asserts, for instance)
CFLAGS_Context.o += $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs -Wno-declaration-after-statement
diff --git a/tools/perf/scripts/python/netdev-times.py b/tools/perf/scripts/python/netdev-times.py
index 00552eeb7178..30c4bccee5b2 100644
--- a/tools/perf/scripts/python/netdev-times.py
+++ b/tools/perf/scripts/python/netdev-times.py
@@ -293,7 +293,8 @@ def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, location, protocol, reason)
all_event_list.append(event_info)
-def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr):
+def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, callchain,
+ skbaddr, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
diff --git a/tools/perf/scripts/python/parallel-perf.py b/tools/perf/scripts/python/parallel-perf.py
index 21f32ec5ed46..be85fd7f6632 100755
--- a/tools/perf/scripts/python/parallel-perf.py
+++ b/tools/perf/scripts/python/parallel-perf.py
@@ -439,7 +439,8 @@ def ProcessCommandOutputLines(cmd, per_cpu, fn, *x):
pat = re.compile(r"\s*\[[0-9]+\]")
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
while True:
- if line := p.stdout.readline():
+ line = p.stdout.readline()
+ if line:
line = line.decode("utf-8")
if pat.match(line):
line = line.split()
diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build
index c7f9d9676095..5671ee530019 100644
--- a/tools/perf/tests/Build
+++ b/tools/perf/tests/Build
@@ -1,82 +1,82 @@
# SPDX-License-Identifier: GPL-2.0
-perf-y += builtin-test.o
-perf-y += tests-scripts.o
-perf-y += parse-events.o
-perf-y += dso-data.o
-perf-y += attr.o
-perf-y += vmlinux-kallsyms.o
-perf-$(CONFIG_LIBTRACEEVENT) += openat-syscall.o
-perf-$(CONFIG_LIBTRACEEVENT) += openat-syscall-all-cpus.o
-perf-$(CONFIG_LIBTRACEEVENT) += openat-syscall-tp-fields.o
-perf-$(CONFIG_LIBTRACEEVENT) += mmap-basic.o
-perf-y += perf-record.o
-perf-y += evsel-roundtrip-name.o
-perf-$(CONFIG_LIBTRACEEVENT) += evsel-tp-sched.o
-perf-y += fdarray.o
-perf-y += pmu.o
-perf-y += pmu-events.o
-perf-y += hists_common.o
-perf-y += hists_link.o
-perf-y += hists_filter.o
-perf-y += hists_output.o
-perf-y += hists_cumulate.o
-perf-y += python-use.o
-perf-y += bp_signal.o
-perf-y += bp_signal_overflow.o
-perf-y += bp_account.o
-perf-y += wp.o
-perf-y += task-exit.o
-perf-y += sw-clock.o
-perf-y += mmap-thread-lookup.o
-perf-y += thread-maps-share.o
-perf-$(CONFIG_LIBTRACEEVENT) += switch-tracking.o
-perf-y += keep-tracking.o
-perf-y += code-reading.o
-perf-y += sample-parsing.o
-perf-y += parse-no-sample-id-all.o
-perf-y += kmod-path.o
-perf-y += thread-map.o
-perf-y += topology.o
-perf-y += mem.o
-perf-y += cpumap.o
-perf-y += stat.o
-perf-y += event_update.o
-perf-y += event-times.o
-perf-y += expr.o
-perf-y += backward-ring-buffer.o
-perf-y += sdt.o
-perf-y += is_printable_array.o
-perf-y += bitmap.o
-perf-y += perf-hooks.o
-perf-y += unit_number__scnprintf.o
-perf-y += mem2node.o
-perf-y += maps.o
-perf-y += time-utils-test.o
-perf-y += genelf.o
-perf-y += api-io.o
-perf-y += demangle-java-test.o
-perf-y += demangle-ocaml-test.o
-perf-y += pfm.o
-perf-y += parse-metric.o
-perf-y += pe-file-parsing.o
-perf-y += expand-cgroup.o
-perf-y += perf-time-to-tsc.o
-perf-y += dlfilter-test.o
-perf-y += sigtrap.o
-perf-y += event_groups.o
-perf-y += symbols.o
-perf-y += util.o
+perf-test-y += builtin-test.o
+perf-test-y += tests-scripts.o
+perf-test-y += parse-events.o
+perf-test-y += dso-data.o
+perf-test-y += attr.o
+perf-test-y += vmlinux-kallsyms.o
+perf-test-$(CONFIG_LIBTRACEEVENT) += openat-syscall.o
+perf-test-$(CONFIG_LIBTRACEEVENT) += openat-syscall-all-cpus.o
+perf-test-$(CONFIG_LIBTRACEEVENT) += openat-syscall-tp-fields.o
+perf-test-$(CONFIG_LIBTRACEEVENT) += mmap-basic.o
+perf-test-y += perf-record.o
+perf-test-y += evsel-roundtrip-name.o
+perf-test-$(CONFIG_LIBTRACEEVENT) += evsel-tp-sched.o
+perf-test-y += fdarray.o
+perf-test-y += pmu.o
+perf-test-y += pmu-events.o
+perf-test-y += hists_common.o
+perf-test-y += hists_link.o
+perf-test-y += hists_filter.o
+perf-test-y += hists_output.o
+perf-test-y += hists_cumulate.o
+perf-test-y += python-use.o
+perf-test-y += bp_signal.o
+perf-test-y += bp_signal_overflow.o
+perf-test-y += bp_account.o
+perf-test-y += wp.o
+perf-test-y += task-exit.o
+perf-test-y += sw-clock.o
+perf-test-y += mmap-thread-lookup.o
+perf-test-y += thread-maps-share.o
+perf-test-$(CONFIG_LIBTRACEEVENT) += switch-tracking.o
+perf-test-y += keep-tracking.o
+perf-test-y += code-reading.o
+perf-test-y += sample-parsing.o
+perf-test-y += parse-no-sample-id-all.o
+perf-test-y += kmod-path.o
+perf-test-y += thread-map.o
+perf-test-y += topology.o
+perf-test-y += mem.o
+perf-test-y += cpumap.o
+perf-test-y += stat.o
+perf-test-y += event_update.o
+perf-test-y += event-times.o
+perf-test-y += expr.o
+perf-test-y += backward-ring-buffer.o
+perf-test-y += sdt.o
+perf-test-y += is_printable_array.o
+perf-test-y += bitmap.o
+perf-test-y += perf-hooks.o
+perf-test-y += unit_number__scnprintf.o
+perf-test-y += mem2node.o
+perf-test-y += maps.o
+perf-test-y += time-utils-test.o
+perf-test-y += genelf.o
+perf-test-y += api-io.o
+perf-test-y += demangle-java-test.o
+perf-test-y += demangle-ocaml-test.o
+perf-test-y += pfm.o
+perf-test-y += parse-metric.o
+perf-test-y += pe-file-parsing.o
+perf-test-y += expand-cgroup.o
+perf-test-y += perf-time-to-tsc.o
+perf-test-y += dlfilter-test.o
+perf-test-y += sigtrap.o
+perf-test-y += event_groups.o
+perf-test-y += symbols.o
+perf-test-y += util.o
ifeq ($(SRCARCH),$(filter $(SRCARCH),x86 arm arm64 powerpc))
-perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
+perf-test-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
endif
CFLAGS_attr.o += -DBINDIR="BUILD_STR($(bindir_SQ))" -DPYTHON="BUILD_STR($(PYTHON_WORD))"
CFLAGS_python-use.o += -DPYTHONPATH="BUILD_STR($(OUTPUT)python)" -DPYTHON="BUILD_STR($(PYTHON_WORD))"
CFLAGS_dwarf-unwind.o += -fno-optimize-sibling-calls
-perf-y += workloads/
+perf-test-y += workloads/
ifdef SHELLCHECK
SHELL_TESTS := $(shell find tests/shell -executable -type f -name '*.sh')
@@ -90,4 +90,4 @@ $(OUTPUT)%.shellcheck_log: %
$(call rule_mkdir)
$(Q)$(call echo-cmd,test)shellcheck -a -S warning "$<" > $@ || (cat $@ && rm $@ && false)
-perf-y += $(TEST_LOGS)
+perf-test-y += $(TEST_LOGS)
diff --git a/tools/perf/tests/pmu.c b/tools/perf/tests/pmu.c
index 06cc0e46cb28..40132655ccd1 100644
--- a/tools/perf/tests/pmu.c
+++ b/tools/perf/tests/pmu.c
@@ -3,6 +3,7 @@
#include "evsel.h"
#include "parse-events.h"
#include "pmu.h"
+#include "pmus.h"
#include "tests.h"
#include "debug.h"
#include "fncache.h"
@@ -259,26 +260,42 @@ err_out:
static bool permitted_event_name(const char *name)
{
bool has_lower = false, has_upper = false;
+ __u64 config;
for (size_t i = 0; i < strlen(name); i++) {
char c = name[i];
if (islower(c)) {
if (has_upper)
- return false;
+ goto check_legacy;
has_lower = true;
continue;
}
if (isupper(c)) {
if (has_lower)
- return false;
+ goto check_legacy;
has_upper = true;
continue;
}
if (!isdigit(c) && c != '.' && c != '_' && c != '-')
- return false;
+ goto check_legacy;
}
return true;
+check_legacy:
+ /*
+ * If the event name matches a legacy cache name the legacy encoding
+ * will still be used. This isn't quite WAI as sysfs events should take
+ * priority, but this case happens on PowerPC and matches the behavior
+ * in older perf tools where legacy events were the priority. Be
+ * permissive and assume later PMU drivers will use all lower or upper
+ * case names.
+ */
+ if (parse_events__decode_legacy_cache(name, /*extended_pmu_type=*/0, &config) == 0) {
+ pr_warning("sysfs event '%s' should be all lower/upper case, it will be matched using legacy encoding.",
+ name);
+ return true;
+ }
+ return false;
}
static int test__pmu_event_names(struct test_suite *test __maybe_unused,
@@ -340,10 +357,186 @@ static int test__pmu_event_names(struct test_suite *test __maybe_unused,
return ret;
}
+static const char * const uncore_chas[] = {
+ "uncore_cha_0",
+ "uncore_cha_1",
+ "uncore_cha_2",
+ "uncore_cha_3",
+ "uncore_cha_4",
+ "uncore_cha_5",
+ "uncore_cha_6",
+ "uncore_cha_7",
+ "uncore_cha_8",
+ "uncore_cha_9",
+ "uncore_cha_10",
+ "uncore_cha_11",
+ "uncore_cha_12",
+ "uncore_cha_13",
+ "uncore_cha_14",
+ "uncore_cha_15",
+ "uncore_cha_16",
+ "uncore_cha_17",
+ "uncore_cha_18",
+ "uncore_cha_19",
+ "uncore_cha_20",
+ "uncore_cha_21",
+ "uncore_cha_22",
+ "uncore_cha_23",
+ "uncore_cha_24",
+ "uncore_cha_25",
+ "uncore_cha_26",
+ "uncore_cha_27",
+ "uncore_cha_28",
+ "uncore_cha_29",
+ "uncore_cha_30",
+ "uncore_cha_31",
+};
+
+static const char * const mrvl_ddrs[] = {
+ "mrvl_ddr_pmu_87e1b0000000",
+ "mrvl_ddr_pmu_87e1b1000000",
+ "mrvl_ddr_pmu_87e1b2000000",
+ "mrvl_ddr_pmu_87e1b3000000",
+ "mrvl_ddr_pmu_87e1b4000000",
+ "mrvl_ddr_pmu_87e1b5000000",
+ "mrvl_ddr_pmu_87e1b6000000",
+ "mrvl_ddr_pmu_87e1b7000000",
+ "mrvl_ddr_pmu_87e1b8000000",
+ "mrvl_ddr_pmu_87e1b9000000",
+ "mrvl_ddr_pmu_87e1ba000000",
+ "mrvl_ddr_pmu_87e1bb000000",
+ "mrvl_ddr_pmu_87e1bc000000",
+ "mrvl_ddr_pmu_87e1bd000000",
+ "mrvl_ddr_pmu_87e1be000000",
+ "mrvl_ddr_pmu_87e1bf000000",
+};
+
+static int test__name_len(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ TEST_ASSERT_VAL("cpu", pmu_name_len_no_suffix("cpu") == strlen("cpu"));
+ TEST_ASSERT_VAL("i915", pmu_name_len_no_suffix("i915") == strlen("i915"));
+ TEST_ASSERT_VAL("cpum_cf", pmu_name_len_no_suffix("cpum_cf") == strlen("cpum_cf"));
+ for (size_t i = 0; i < ARRAY_SIZE(uncore_chas); i++) {
+ TEST_ASSERT_VAL("Strips uncore_cha suffix",
+ pmu_name_len_no_suffix(uncore_chas[i]) ==
+ strlen("uncore_cha"));
+ }
+ for (size_t i = 0; i < ARRAY_SIZE(mrvl_ddrs); i++) {
+ TEST_ASSERT_VAL("Strips mrvl_ddr_pmu suffix",
+ pmu_name_len_no_suffix(mrvl_ddrs[i]) ==
+ strlen("mrvl_ddr_pmu"));
+ }
+ return TEST_OK;
+}
+
+static int test__name_cmp(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ TEST_ASSERT_EQUAL("cpu", pmu_name_cmp("cpu", "cpu"), 0);
+ TEST_ASSERT_EQUAL("i915", pmu_name_cmp("i915", "i915"), 0);
+ TEST_ASSERT_EQUAL("cpum_cf", pmu_name_cmp("cpum_cf", "cpum_cf"), 0);
+ TEST_ASSERT_VAL("i915", pmu_name_cmp("cpu", "i915") < 0);
+ TEST_ASSERT_VAL("i915", pmu_name_cmp("i915", "cpu") > 0);
+ TEST_ASSERT_VAL("cpum_cf", pmu_name_cmp("cpum_cf", "cpum_ce") > 0);
+ TEST_ASSERT_VAL("cpum_cf", pmu_name_cmp("cpum_cf", "cpum_d0") < 0);
+ for (size_t i = 1; i < ARRAY_SIZE(uncore_chas); i++) {
+ TEST_ASSERT_VAL("uncore_cha suffixes ordered lt",
+ pmu_name_cmp(uncore_chas[i-1], uncore_chas[i]) < 0);
+ TEST_ASSERT_VAL("uncore_cha suffixes ordered gt",
+ pmu_name_cmp(uncore_chas[i], uncore_chas[i-1]) > 0);
+ }
+ for (size_t i = 1; i < ARRAY_SIZE(mrvl_ddrs); i++) {
+ TEST_ASSERT_VAL("mrvl_ddr_pmu suffixes ordered lt",
+ pmu_name_cmp(mrvl_ddrs[i-1], mrvl_ddrs[i]) < 0);
+ TEST_ASSERT_VAL("mrvl_ddr_pmu suffixes ordered gt",
+ pmu_name_cmp(mrvl_ddrs[i], mrvl_ddrs[i-1]) > 0);
+ }
+ return TEST_OK;
+}
+
+/**
+ * Test perf_pmu__match() that's used to search for a PMU given a name passed
+ * on the command line. The name that's passed may also be a filename type glob
+ * match.
+ */
+static int test__pmu_match(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ struct perf_pmu test_pmu;
+
+ test_pmu.name = "pmuname";
+ TEST_ASSERT_EQUAL("Exact match", perf_pmu__match(&test_pmu, "pmuname"), true);
+ TEST_ASSERT_EQUAL("Longer token", perf_pmu__match(&test_pmu, "longertoken"), false);
+ TEST_ASSERT_EQUAL("Shorter token", perf_pmu__match(&test_pmu, "pmu"), false);
+
+ test_pmu.name = "pmuname_10";
+ TEST_ASSERT_EQUAL("Diff suffix_", perf_pmu__match(&test_pmu, "pmuname_2"), false);
+ TEST_ASSERT_EQUAL("Sub suffix_", perf_pmu__match(&test_pmu, "pmuname_1"), true);
+ TEST_ASSERT_EQUAL("Same suffix_", perf_pmu__match(&test_pmu, "pmuname_10"), true);
+ TEST_ASSERT_EQUAL("No suffix_", perf_pmu__match(&test_pmu, "pmuname"), true);
+ TEST_ASSERT_EQUAL("Underscore_", perf_pmu__match(&test_pmu, "pmuname_"), true);
+ TEST_ASSERT_EQUAL("Substring_", perf_pmu__match(&test_pmu, "pmuna"), false);
+
+ test_pmu.name = "pmuname_ab23";
+ TEST_ASSERT_EQUAL("Diff suffix hex_", perf_pmu__match(&test_pmu, "pmuname_2"), false);
+ TEST_ASSERT_EQUAL("Sub suffix hex_", perf_pmu__match(&test_pmu, "pmuname_ab"), true);
+ TEST_ASSERT_EQUAL("Same suffix hex_", perf_pmu__match(&test_pmu, "pmuname_ab23"), true);
+ TEST_ASSERT_EQUAL("No suffix hex_", perf_pmu__match(&test_pmu, "pmuname"), true);
+ TEST_ASSERT_EQUAL("Underscore hex_", perf_pmu__match(&test_pmu, "pmuname_"), true);
+ TEST_ASSERT_EQUAL("Substring hex_", perf_pmu__match(&test_pmu, "pmuna"), false);
+
+ test_pmu.name = "pmuname10";
+ TEST_ASSERT_EQUAL("Diff suffix", perf_pmu__match(&test_pmu, "pmuname2"), false);
+ TEST_ASSERT_EQUAL("Sub suffix", perf_pmu__match(&test_pmu, "pmuname1"), true);
+ TEST_ASSERT_EQUAL("Same suffix", perf_pmu__match(&test_pmu, "pmuname10"), true);
+ TEST_ASSERT_EQUAL("No suffix", perf_pmu__match(&test_pmu, "pmuname"), true);
+ TEST_ASSERT_EQUAL("Underscore", perf_pmu__match(&test_pmu, "pmuname_"), false);
+ TEST_ASSERT_EQUAL("Substring", perf_pmu__match(&test_pmu, "pmuna"), false);
+
+ test_pmu.name = "pmunameab23";
+ TEST_ASSERT_EQUAL("Diff suffix hex", perf_pmu__match(&test_pmu, "pmuname2"), false);
+ TEST_ASSERT_EQUAL("Sub suffix hex", perf_pmu__match(&test_pmu, "pmunameab"), true);
+ TEST_ASSERT_EQUAL("Same suffix hex", perf_pmu__match(&test_pmu, "pmunameab23"), true);
+ TEST_ASSERT_EQUAL("No suffix hex", perf_pmu__match(&test_pmu, "pmuname"), true);
+ TEST_ASSERT_EQUAL("Underscore hex", perf_pmu__match(&test_pmu, "pmuname_"), false);
+ TEST_ASSERT_EQUAL("Substring hex", perf_pmu__match(&test_pmu, "pmuna"), false);
+
+ /*
+ * 2 hex chars or less are not considered suffixes so it shouldn't be
+ * possible to wildcard by skipping the suffix. Therefore there are more
+ * false results here than above.
+ */
+ test_pmu.name = "pmuname_a3";
+ TEST_ASSERT_EQUAL("Diff suffix 2 hex_", perf_pmu__match(&test_pmu, "pmuname_2"), false);
+ /*
+ * This one should be false, but because pmuname_a3 ends in 3 which is
+ * decimal, it's not possible to determine if it's a short hex suffix or
+ * a normal decimal suffix following text. And we want to match on any
+ * length of decimal suffix. Run the test anyway and expect the wrong
+ * result. And slightly fuzzy matching shouldn't do too much harm.
+ */
+ TEST_ASSERT_EQUAL("Sub suffix 2 hex_", perf_pmu__match(&test_pmu, "pmuname_a"), true);
+ TEST_ASSERT_EQUAL("Same suffix 2 hex_", perf_pmu__match(&test_pmu, "pmuname_a3"), true);
+ TEST_ASSERT_EQUAL("No suffix 2 hex_", perf_pmu__match(&test_pmu, "pmuname"), false);
+ TEST_ASSERT_EQUAL("Underscore 2 hex_", perf_pmu__match(&test_pmu, "pmuname_"), false);
+ TEST_ASSERT_EQUAL("Substring 2 hex_", perf_pmu__match(&test_pmu, "pmuna"), false);
+
+ test_pmu.name = "pmuname_5";
+ TEST_ASSERT_EQUAL("Glob 1", perf_pmu__match(&test_pmu, "pmu*"), true);
+ TEST_ASSERT_EQUAL("Glob 2", perf_pmu__match(&test_pmu, "nomatch*"), false);
+ TEST_ASSERT_EQUAL("Seq 1", perf_pmu__match(&test_pmu, "pmuname_[12345]"), true);
+ TEST_ASSERT_EQUAL("Seq 2", perf_pmu__match(&test_pmu, "pmuname_[67890]"), false);
+ TEST_ASSERT_EQUAL("? 1", perf_pmu__match(&test_pmu, "pmuname_?"), true);
+ TEST_ASSERT_EQUAL("? 2", perf_pmu__match(&test_pmu, "pmuname_1?"), false);
+
+ return TEST_OK;
+}
+
static struct test_case tests__pmu[] = {
TEST_CASE("Parsing with PMU format directory", pmu_format),
TEST_CASE("Parsing with PMU event", pmu_events),
TEST_CASE("PMU event names", pmu_event_names),
+ TEST_CASE("PMU name combining", name_len),
+ TEST_CASE("PMU name comparison", name_cmp),
+ TEST_CASE("PMU cmdline match", pmu_match),
{ .name = NULL, }
};
diff --git a/tools/perf/tests/shell/annotate.sh b/tools/perf/tests/shell/annotate.sh
index 1db1e8113d99..b072d9b97387 100755
--- a/tools/perf/tests/shell/annotate.sh
+++ b/tools/perf/tests/shell/annotate.sh
@@ -15,12 +15,13 @@ skip_test_missing_symbol ${testsym}
err=0
perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+perfout=$(mktemp /tmp/__perf_test.perf.out.XXXXX)
testprog="perf test -w noploop"
# disassembly format: "percent : offset: instruction (operands ...)"
disasm_regex="[0-9]*\.[0-9]* *: *\w*: *\w*"
cleanup() {
- rm -rf "${perfdata}"
+ rm -rf "${perfdata}" "${perfout}"
rm -rf "${perfdata}".old
trap - EXIT TERM INT
@@ -41,8 +42,11 @@ test_basic() {
return
fi
+ # Generate the annotated output file
+ perf annotate -i "${perfdata}" --stdio 2> /dev/null > "${perfout}"
+
# check if it has the target symbol
- if ! perf annotate -i "${perfdata}" 2> /dev/null | grep "${testsym}"
+ if ! grep "${testsym}" "${perfout}"
then
echo "Basic annotate [Failed: missing target symbol]"
err=1
@@ -50,7 +54,7 @@ test_basic() {
fi
# check if it has the disassembly lines
- if ! perf annotate -i "${perfdata}" 2> /dev/null | grep "${disasm_regex}"
+ if ! grep "${disasm_regex}" "${perfout}"
then
echo "Basic annotate [Failed: missing disasm output from default disassembler]"
err=1
diff --git a/tools/perf/tests/shell/base_probe/test_adding_kernel.sh b/tools/perf/tests/shell/base_probe/test_adding_kernel.sh
index 63bb8974b38e..187dc8d4b163 100755
--- a/tools/perf/tests/shell/base_probe/test_adding_kernel.sh
+++ b/tools/perf/tests/shell/base_probe/test_adding_kernel.sh
@@ -21,8 +21,18 @@
THIS_TEST_NAME=`basename $0 .sh`
TEST_RESULT=0
+# shellcheck source=lib/probe_vfs_getname.sh
+. "$(dirname "$0")/../lib/probe_vfs_getname.sh"
+
TEST_PROBE=${TEST_PROBE:-"inode_permission"}
+# set NO_DEBUGINFO to skip testcase if debuginfo is not present
+# skip_if_no_debuginfo returns 2 if debuginfo is not present
+skip_if_no_debuginfo
+if [ $? -eq 2 ]; then
+ NO_DEBUGINFO=1
+fi
+
check_kprobes_available
if [ $? -ne 0 ]; then
print_overall_skipped
@@ -67,7 +77,12 @@ PERF_EXIT_CODE=$?
../common/check_all_patterns_found.pl "\s*probe:${TEST_PROBE}(?:_\d+)?\s+\(on ${TEST_PROBE}(?:[:\+]$RE_NUMBER_HEX)?@.+\)" < $LOGS_DIR/adding_kernel_list-l.log
CHECK_EXIT_CODE=$?
-print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "listing added probe :: perf probe -l"
+if [ $NO_DEBUGINFO ] ; then
+ print_testcase_skipped $NO_DEBUGINFO $NO_DEBUGINFO "Skipped due to missing debuginfo"
+else
+ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "listing added probe :: perf probe -l"
+fi
+
(( TEST_RESULT += $? ))
@@ -208,7 +223,12 @@ PERF_EXIT_CODE=$?
../common/check_all_patterns_found.pl "probe:vfs_mknod" "probe:vfs_create" "probe:vfs_rmdir" "probe:vfs_link" "probe:vfs_write" < $LOGS_DIR/adding_kernel_adding_wildcard.err
CHECK_EXIT_CODE=$?
-print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "wildcard adding support"
+if [ $NO_DEBUGINFO ] ; then
+ print_testcase_skipped $NO_DEBUGINFO $NO_DEBUGINFO "Skipped due to missing debuginfo"
+else
+ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "wildcard adding support"
+fi
+
(( TEST_RESULT += $? ))
@@ -232,7 +252,12 @@ CHECK_EXIT_CODE=$?
../common/check_no_patterns_found.pl "$RE_SEGFAULT" < $LOGS_DIR/adding_kernel_nonexisting.err
(( CHECK_EXIT_CODE += $? ))
-print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "non-existing variable"
+if [ $NO_DEBUGINFO ]; then
+ print_testcase_skipped $NO_DEBUGINFO $NO_DEBUGINFO "Skipped due to missing debuginfo"
+else
+ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "non-existing variable"
+fi
+
(( TEST_RESULT += $? ))
diff --git a/tools/perf/tests/shell/stat_bpf_counters.sh b/tools/perf/tests/shell/stat_bpf_counters.sh
index 61f8149d854e..f250b7d6f773 100755
--- a/tools/perf/tests/shell/stat_bpf_counters.sh
+++ b/tools/perf/tests/shell/stat_bpf_counters.sh
@@ -4,7 +4,7 @@
set -e
-workload="perf bench sched messaging -g 1 -l 100 -t"
+workload="perf test -w brstack"
# check whether $2 is within +/- 20% of $1
compare_number()
@@ -25,15 +25,15 @@ compare_number()
check_counts()
{
- base_cycles=$1
- bpf_cycles=$2
+ base_instructions=$1
+ bpf_instructions=$2
- if [ "$base_cycles" = "<not" ]; then
- echo "Skipping: cycles event not counted"
+ if [ "$base_instructions" = "<not" ]; then
+ echo "Skipping: instructions event not counted"
exit 2
fi
- if [ "$bpf_cycles" = "<not" ]; then
- echo "Failed: cycles not counted with --bpf-counters"
+ if [ "$bpf_instructions" = "<not" ]; then
+ echo "Failed: instructions not counted with --bpf-counters"
exit 1
fi
}
@@ -41,29 +41,29 @@ check_counts()
test_bpf_counters()
{
printf "Testing --bpf-counters "
- base_cycles=$(perf stat --no-big-num -e cycles -- $workload 2>&1 | awk '/cycles/ {print $1}')
- bpf_cycles=$(perf stat --no-big-num --bpf-counters -e cycles -- $workload 2>&1 | awk '/cycles/ {print $1}')
- check_counts $base_cycles $bpf_cycles
- compare_number $base_cycles $bpf_cycles
+ base_instructions=$(perf stat --no-big-num -e instructions -- $workload 2>&1 | awk '/instructions/ {print $1}')
+ bpf_instructions=$(perf stat --no-big-num --bpf-counters -e instructions -- $workload 2>&1 | awk '/instructions/ {print $1}')
+ check_counts $base_instructions $bpf_instructions
+ compare_number $base_instructions $bpf_instructions
echo "[Success]"
}
test_bpf_modifier()
{
printf "Testing bpf event modifier "
- stat_output=$(perf stat --no-big-num -e cycles/name=base_cycles/,cycles/name=bpf_cycles/b -- $workload 2>&1)
- base_cycles=$(echo "$stat_output"| awk '/base_cycles/ {print $1}')
- bpf_cycles=$(echo "$stat_output"| awk '/bpf_cycles/ {print $1}')
- check_counts $base_cycles $bpf_cycles
- compare_number $base_cycles $bpf_cycles
+ stat_output=$(perf stat --no-big-num -e instructions/name=base_instructions/,instructions/name=bpf_instructions/b -- $workload 2>&1)
+ base_instructions=$(echo "$stat_output"| awk '/base_instructions/ {print $1}')
+ bpf_instructions=$(echo "$stat_output"| awk '/bpf_instructions/ {print $1}')
+ check_counts $base_instructions $bpf_instructions
+ compare_number $base_instructions $bpf_instructions
echo "[Success]"
}
# skip if --bpf-counters is not supported
-if ! perf stat -e cycles --bpf-counters true > /dev/null 2>&1; then
+if ! perf stat -e instructions --bpf-counters true > /dev/null 2>&1; then
if [ "$1" = "-v" ]; then
echo "Skipping: --bpf-counters not supported"
- perf --no-pager stat -e cycles --bpf-counters true || true
+ perf --no-pager stat -e instructions --bpf-counters true || true
fi
exit 2
fi
diff --git a/tools/perf/tests/shell/test_arm_callgraph_fp.sh b/tools/perf/tests/shell/test_arm_callgraph_fp.sh
index 61898e256616..9caa36130175 100755
--- a/tools/perf/tests/shell/test_arm_callgraph_fp.sh
+++ b/tools/perf/tests/shell/test_arm_callgraph_fp.sh
@@ -28,28 +28,21 @@ cleanup_files()
trap cleanup_files EXIT TERM INT
-# Add a 1 second delay to skip samples that are not in the leaf() function
# shellcheck disable=SC2086
-perf record -o "$PERF_DATA" --call-graph fp -e cycles//u -D 1000 --user-callchains -- $TEST_PROGRAM 2> /dev/null &
-PID=$!
+perf record -o "$PERF_DATA" --call-graph fp -e cycles//u --user-callchains -- $TEST_PROGRAM
-echo " + Recording (PID=$PID)..."
-sleep 2
-echo " + Stopping perf-record..."
-
-kill $PID
-wait $PID
+# Try opening the file so any immediate errors are visible in the log
+perf script -i "$PERF_DATA" -F comm,ip,sym | head -n4
-# expected perf-script output:
+# expected perf-script output if 'leaf' has been inserted correctly:
#
-# program
+# perf
# 728 leaf
# 753 parent
# 76c leafloop
-# ...
+# ... remaining stack to main() ...
-perf script -i "$PERF_DATA" -F comm,ip,sym | head -n4
-perf script -i "$PERF_DATA" -F comm,ip,sym | head -n4 | \
- awk '{ if ($2 != "") sym[i++] = $2 } END { if (sym[0] != "leaf" ||
- sym[1] != "parent" ||
- sym[2] != "leafloop") exit 1 }'
+# Each frame is separated by a tab, some spaces and an address
+SEP="[[:space:]]+ [[:xdigit:]]+"
+perf script -i "$PERF_DATA" -F comm,ip,sym | tr '\n' ' ' | \
+ grep -E -q "perf $SEP leaf $SEP parent $SEP leafloop"
diff --git a/tools/perf/tests/shell/test_uprobe_from_different_cu.sh b/tools/perf/tests/shell/test_uprobe_from_different_cu.sh
index 319f36ebb9a4..82bc774a078a 100755
--- a/tools/perf/tests/shell/test_uprobe_from_different_cu.sh
+++ b/tools/perf/tests/shell/test_uprobe_from_different_cu.sh
@@ -77,7 +77,7 @@ gcc -g -Og -flto -c ${temp_dir}/testfile-foo.c -o ${temp_dir}/testfile-foo.o
gcc -g -Og -c ${temp_dir}/testfile-main.c -o ${temp_dir}/testfile-main.o
gcc -g -Og -o ${temp_dir}/testfile ${temp_dir}/testfile-foo.o ${temp_dir}/testfile-main.o
-perf probe -x ${temp_dir}/testfile --funcs foo
+perf probe -x ${temp_dir}/testfile --funcs foo | grep "foo"
perf probe -x ${temp_dir}/testfile foo
cleanup
diff --git a/tools/perf/tests/workloads/Build b/tools/perf/tests/workloads/Build
index a1f34d5861e3..48bf0d3b0f3d 100644
--- a/tools/perf/tests/workloads/Build
+++ b/tools/perf/tests/workloads/Build
@@ -1,11 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
-perf-y += noploop.o
-perf-y += thloop.o
-perf-y += leafloop.o
-perf-y += sqrtloop.o
-perf-y += brstack.o
-perf-y += datasym.o
+perf-test-y += noploop.o
+perf-test-y += thloop.o
+perf-test-y += leafloop.o
+perf-test-y += sqrtloop.o
+perf-test-y += brstack.o
+perf-test-y += datasym.o
CFLAGS_sqrtloop.o = -g -O0 -fno-inline -U_FORTIFY_SOURCE
CFLAGS_leafloop.o = -g -O0 -fno-inline -fno-omit-frame-pointer -U_FORTIFY_SOURCE
diff --git a/tools/perf/tests/workloads/leafloop.c b/tools/perf/tests/workloads/leafloop.c
index 1bf5cc97649b..f7561767e32c 100644
--- a/tools/perf/tests/workloads/leafloop.c
+++ b/tools/perf/tests/workloads/leafloop.c
@@ -1,6 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#include <signal.h>
#include <stdlib.h>
#include <linux/compiler.h>
+#include <unistd.h>
#include "../tests.h"
/* We want to check these symbols in perf script */
@@ -8,10 +10,16 @@ noinline void leaf(volatile int b);
noinline void parent(volatile int b);
static volatile int a;
+static volatile sig_atomic_t done;
+
+static void sighandler(int sig __maybe_unused)
+{
+ done = 1;
+}
noinline void leaf(volatile int b)
{
- for (;;)
+ while (!done)
a += b;
}
@@ -22,12 +30,16 @@ noinline void parent(volatile int b)
static int leafloop(int argc, const char **argv)
{
- int c = 1;
+ int sec = 1;
if (argc > 0)
- c = atoi(argv[0]);
+ sec = atoi(argv[0]);
+
+ signal(SIGINT, sighandler);
+ signal(SIGALRM, sighandler);
+ alarm(sec);
- parent(c);
+ parent(sec);
return 0;
}
diff --git a/tools/perf/ui/Build b/tools/perf/ui/Build
index 6b6d7143a37b..d2ecd9290600 100644
--- a/tools/perf/ui/Build
+++ b/tools/perf/ui/Build
@@ -1,12 +1,12 @@
-perf-y += setup.o
-perf-y += helpline.o
-perf-y += progress.o
-perf-y += util.o
-perf-y += hist.o
-perf-y += stdio/hist.o
+perf-ui-y += setup.o
+perf-ui-y += helpline.o
+perf-ui-y += progress.o
+perf-ui-y += util.o
+perf-ui-y += hist.o
+perf-ui-y += stdio/hist.o
CFLAGS_setup.o += -DLIBDIR="BUILD_STR($(LIBDIR))"
-perf-$(CONFIG_SLANG) += browser.o
-perf-$(CONFIG_SLANG) += browsers/
-perf-$(CONFIG_SLANG) += tui/
+perf-ui-$(CONFIG_SLANG) += browser.o
+perf-ui-$(CONFIG_SLANG) += browsers/
+perf-ui-$(CONFIG_SLANG) += tui/
diff --git a/tools/perf/ui/browsers/Build b/tools/perf/ui/browsers/Build
index 2608b5da3167..a07489e44765 100644
--- a/tools/perf/ui/browsers/Build
+++ b/tools/perf/ui/browsers/Build
@@ -1,7 +1,7 @@
-perf-y += annotate.o
-perf-y += annotate-data.o
-perf-y += hists.o
-perf-y += map.o
-perf-y += scripts.o
-perf-y += header.o
-perf-y += res_sample.o
+perf-ui-y += annotate.o
+perf-ui-y += annotate-data.o
+perf-ui-y += hists.o
+perf-ui-y += map.o
+perf-ui-y += scripts.o
+perf-ui-y += header.o
+perf-ui-y += res_sample.o
diff --git a/tools/perf/ui/gtk/annotate.c b/tools/perf/ui/gtk/annotate.c
index 93ce3d47e47e..6da24aa039eb 100644
--- a/tools/perf/ui/gtk/annotate.c
+++ b/tools/perf/ui/gtk/annotate.c
@@ -180,13 +180,14 @@ static int symbol__gtk_annotate(struct map_symbol *ms, struct evsel *evsel,
GtkWidget *tab_label;
int err;
- if (dso->annotate_warned)
+ if (dso__annotate_warned(dso))
return -1;
err = symbol__annotate(ms, evsel, NULL);
if (err) {
char msg[BUFSIZ];
- dso->annotate_warned = true;
+
+ dso__set_annotate_warned(dso);
symbol__strerror_disassemble(ms, err, msg, sizeof(msg));
ui__error("Couldn't annotate %s: %s\n", sym->name, msg);
return -1;
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
index 685ba2a54fd8..5d1f04f66a5a 100644
--- a/tools/perf/ui/hist.c
+++ b/tools/perf/ui/hist.c
@@ -23,99 +23,89 @@
__ret; \
})
+static int __hpp__fmt_print(struct perf_hpp *hpp, struct hists *hists, u64 val,
+ int nr_samples, const char *fmt, int len,
+ hpp_snprint_fn print_fn, enum perf_hpp_fmt_type fmtype)
+{
+ if (fmtype == PERF_HPP_FMT_TYPE__PERCENT) {
+ double percent = 0.0;
+ u64 total = hists__total_period(hists);
+
+ if (total)
+ percent = 100.0 * val / total;
+
+ return hpp__call_print_fn(hpp, print_fn, fmt, len, percent);
+ }
+
+ if (fmtype == PERF_HPP_FMT_TYPE__AVERAGE) {
+ double avg = nr_samples ? (1.0 * val / nr_samples) : 0;
+
+ return hpp__call_print_fn(hpp, print_fn, fmt, len, avg);
+ }
+
+ return hpp__call_print_fn(hpp, print_fn, fmt, len, val);
+}
+
+struct hpp_fmt_value {
+ struct hists *hists;
+ u64 val;
+ int samples;
+};
+
static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
hpp_field_fn get_field, const char *fmt, int len,
hpp_snprint_fn print_fn, enum perf_hpp_fmt_type fmtype)
{
- int ret;
+ int ret = 0;
struct hists *hists = he->hists;
struct evsel *evsel = hists_to_evsel(hists);
+ struct evsel *pos;
char *buf = hpp->buf;
size_t size = hpp->size;
+ int i, nr_members = 1;
+ struct hpp_fmt_value *values;
- if (fmtype == PERF_HPP_FMT_TYPE__PERCENT) {
- double percent = 0.0;
- u64 total = hists__total_period(hists);
-
- if (total)
- percent = 100.0 * get_field(he) / total;
+ if (evsel__is_group_event(evsel))
+ nr_members = evsel->core.nr_members;
- ret = hpp__call_print_fn(hpp, print_fn, fmt, len, percent);
- } else if (fmtype == PERF_HPP_FMT_TYPE__AVERAGE) {
- double average = 0;
+ values = calloc(nr_members, sizeof(*values));
+ if (values == NULL)
+ return 0;
- if (he->stat.nr_events)
- average = 1.0 * get_field(he) / he->stat.nr_events;
+ i = 0;
+ for_each_group_evsel(pos, evsel)
+ values[i++].hists = evsel__hists(pos);
- ret = hpp__call_print_fn(hpp, print_fn, fmt, len, average);
- } else {
- ret = hpp__call_print_fn(hpp, print_fn, fmt, len, get_field(he));
- }
+ values[0].val = get_field(he);
+ values[0].samples = he->stat.nr_events;
if (evsel__is_group_event(evsel)) {
- int prev_idx, idx_delta;
struct hist_entry *pair;
- int nr_members = evsel->core.nr_members;
-
- prev_idx = evsel__group_idx(evsel);
list_for_each_entry(pair, &he->pairs.head, pairs.node) {
- u64 period = get_field(pair);
- u64 total = hists__total_period(pair->hists);
- int nr_samples = pair->stat.nr_events;
+ for (i = 0; i < nr_members; i++) {
+ if (values[i].hists != pair->hists)
+ continue;
- if (!total)
- continue;
-
- evsel = hists_to_evsel(pair->hists);
- idx_delta = evsel__group_idx(evsel) - prev_idx - 1;
-
- while (idx_delta--) {
- /*
- * zero-fill group members in the middle which
- * have no sample
- */
- if (fmtype != PERF_HPP_FMT_TYPE__RAW) {
- ret += hpp__call_print_fn(hpp, print_fn,
- fmt, len, 0.0);
- } else {
- ret += hpp__call_print_fn(hpp, print_fn,
- fmt, len, 0ULL);
- }
+ values[i].val = get_field(pair);
+ values[i].samples = pair->stat.nr_events;
+ break;
}
-
- if (fmtype == PERF_HPP_FMT_TYPE__PERCENT) {
- ret += hpp__call_print_fn(hpp, print_fn, fmt, len,
- 100.0 * period / total);
- } else if (fmtype == PERF_HPP_FMT_TYPE__AVERAGE) {
- double avg = nr_samples ? (period / nr_samples) : 0;
-
- ret += hpp__call_print_fn(hpp, print_fn, fmt,
- len, avg);
- } else {
- ret += hpp__call_print_fn(hpp, print_fn, fmt,
- len, period);
- }
-
- prev_idx = evsel__group_idx(evsel);
}
+ }
- idx_delta = nr_members - prev_idx - 1;
-
- while (idx_delta--) {
- /*
- * zero-fill group members at last which have no sample
- */
- if (fmtype != PERF_HPP_FMT_TYPE__RAW) {
- ret += hpp__call_print_fn(hpp, print_fn,
- fmt, len, 0.0);
- } else {
- ret += hpp__call_print_fn(hpp, print_fn,
- fmt, len, 0ULL);
- }
- }
+ for (i = 0; i < nr_members; i++) {
+ if (symbol_conf.skip_empty &&
+ values[i].hists->stats.nr_samples == 0)
+ continue;
+
+ ret += __hpp__fmt_print(hpp, values[i].hists, values[i].val,
+ values[i].samples, fmt, len,
+ print_fn, fmtype);
}
+ free(values);
+
/*
* Restore original buf and size as it's where caller expects
* the result will be saved.
@@ -310,8 +300,18 @@ static int hpp__width_fn(struct perf_hpp_fmt *fmt,
int len = fmt->user_len ?: fmt->len;
struct evsel *evsel = hists_to_evsel(hists);
- if (symbol_conf.event_group)
- len = max(len, evsel->core.nr_members * fmt->len);
+ if (symbol_conf.event_group) {
+ int nr = 0;
+ struct evsel *pos;
+
+ for_each_group_evsel(pos, evsel) {
+ if (!symbol_conf.skip_empty ||
+ evsel__hists(pos)->stats.nr_samples)
+ nr++;
+ }
+
+ len = max(len, nr * fmt->len);
+ }
if (len < (int)strlen(fmt->name))
len = strlen(fmt->name);
diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c
index b849caace398..9372e8904d22 100644
--- a/tools/perf/ui/stdio/hist.c
+++ b/tools/perf/ui/stdio/hist.c
@@ -897,8 +897,7 @@ out:
return ret;
}
-size_t events_stats__fprintf(struct events_stats *stats, FILE *fp,
- bool skip_empty)
+size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
{
int i;
size_t ret = 0;
@@ -910,7 +909,7 @@ size_t events_stats__fprintf(struct events_stats *stats, FILE *fp,
name = perf_event__name(i);
if (!strcmp(name, "UNKNOWN"))
continue;
- if (skip_empty && !stats->nr_events[i])
+ if (symbol_conf.skip_empty && !stats->nr_events[i])
continue;
if (i && total) {
diff --git a/tools/perf/ui/tui/Build b/tools/perf/ui/tui/Build
index f916df33a1a7..2ac058ad1a61 100644
--- a/tools/perf/ui/tui/Build
+++ b/tools/perf/ui/tui/Build
@@ -1,4 +1,4 @@
-perf-y += setup.o
-perf-y += util.o
-perf-y += helpline.o
-perf-y += progress.o
+perf-ui-y += setup.o
+perf-ui-y += util.o
+perf-ui-y += helpline.o
+perf-ui-y += progress.o
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index da64efd8718f..0f18fe81ef0b 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -1,181 +1,181 @@
include $(srctree)/tools/scripts/Makefile.include
include $(srctree)/tools/scripts/utilities.mak
-perf-y += arm64-frame-pointer-unwind-support.o
-perf-y += addr_location.o
-perf-y += annotate.o
-perf-y += block-info.o
-perf-y += block-range.o
-perf-y += build-id.o
-perf-y += cacheline.o
-perf-y += config.o
-perf-y += copyfile.o
-perf-y += ctype.o
-perf-y += db-export.o
-perf-y += disasm.o
-perf-y += env.o
-perf-y += event.o
-perf-y += evlist.o
-perf-y += sideband_evlist.o
-perf-y += evsel.o
-perf-y += evsel_fprintf.o
-perf-y += perf_event_attr_fprintf.o
-perf-y += evswitch.o
-perf-y += find_bit.o
-perf-y += get_current_dir_name.o
-perf-y += levenshtein.o
-perf-y += mmap.o
-perf-y += memswap.o
-perf-y += parse-events.o
-perf-y += print-events.o
-perf-y += tracepoint.o
-perf-y += perf_regs.o
-perf-y += perf-regs-arch/
-perf-y += path.o
-perf-y += print_binary.o
-perf-y += print_insn.o
-perf-y += rlimit.o
-perf-y += argv_split.o
-perf-y += rbtree.o
-perf-y += libstring.o
-perf-y += bitmap.o
-perf-y += hweight.o
-perf-y += smt.o
-perf-y += strbuf.o
-perf-y += string.o
-perf-y += strlist.o
-perf-y += strfilter.o
-perf-y += top.o
-perf-y += usage.o
-perf-y += dso.o
-perf-y += dsos.o
-perf-y += symbol.o
-perf-y += symbol_fprintf.o
-perf-y += map_symbol.o
-perf-y += color.o
-perf-y += color_config.o
-perf-y += metricgroup.o
-perf-y += header.o
-perf-y += callchain.o
-perf-y += values.o
-perf-y += debug.o
-perf-y += fncache.o
-perf-y += machine.o
-perf-y += map.o
-perf-y += maps.o
-perf-y += pstack.o
-perf-y += session.o
-perf-y += sample-raw.o
-perf-y += s390-sample-raw.o
-perf-y += amd-sample-raw.o
-perf-$(CONFIG_TRACE) += syscalltbl.o
-perf-y += ordered-events.o
-perf-y += namespaces.o
-perf-y += comm.o
-perf-y += thread.o
-perf-y += threads.o
-perf-y += thread_map.o
-perf-y += parse-events-flex.o
-perf-y += parse-events-bison.o
-perf-y += pmu.o
-perf-y += pmus.o
-perf-y += pmu-flex.o
-perf-y += pmu-bison.o
-perf-y += svghelper.o
-perf-$(CONFIG_LIBTRACEEVENT) += trace-event-info.o
-perf-y += trace-event-scripting.o
-perf-$(CONFIG_LIBTRACEEVENT) += trace-event.o
-perf-$(CONFIG_LIBTRACEEVENT) += trace-event-parse.o
-perf-$(CONFIG_LIBTRACEEVENT) += trace-event-read.o
-perf-y += sort.o
-perf-y += hist.o
-perf-y += util.o
-perf-y += cpumap.o
-perf-y += affinity.o
-perf-y += cputopo.o
-perf-y += cgroup.o
-perf-y += target.o
-perf-y += rblist.o
-perf-y += intlist.o
-perf-y += vdso.o
-perf-y += counts.o
-perf-y += stat.o
-perf-y += stat-shadow.o
-perf-y += stat-display.o
-perf-y += perf_api_probe.o
-perf-y += record.o
-perf-y += srcline.o
-perf-y += srccode.o
-perf-y += synthetic-events.o
-perf-y += data.o
-perf-y += tsc.o
-perf-y += cloexec.o
-perf-y += call-path.o
-perf-y += rwsem.o
-perf-y += thread-stack.o
-perf-y += spark.o
-perf-y += topdown.o
-perf-y += iostat.o
-perf-y += stream.o
-perf-$(CONFIG_AUXTRACE) += auxtrace.o
-perf-$(CONFIG_AUXTRACE) += intel-pt-decoder/
-perf-$(CONFIG_AUXTRACE) += intel-pt.o
-perf-$(CONFIG_AUXTRACE) += intel-bts.o
-perf-$(CONFIG_AUXTRACE) += arm-spe.o
-perf-$(CONFIG_AUXTRACE) += arm-spe-decoder/
-perf-$(CONFIG_AUXTRACE) += hisi-ptt.o
-perf-$(CONFIG_AUXTRACE) += hisi-ptt-decoder/
-perf-$(CONFIG_AUXTRACE) += s390-cpumsf.o
+perf-util-y += arm64-frame-pointer-unwind-support.o
+perf-util-y += addr_location.o
+perf-util-y += annotate.o
+perf-util-y += block-info.o
+perf-util-y += block-range.o
+perf-util-y += build-id.o
+perf-util-y += cacheline.o
+perf-util-y += config.o
+perf-util-y += copyfile.o
+perf-util-y += ctype.o
+perf-util-y += db-export.o
+perf-util-y += disasm.o
+perf-util-y += env.o
+perf-util-y += event.o
+perf-util-y += evlist.o
+perf-util-y += sideband_evlist.o
+perf-util-y += evsel.o
+perf-util-y += evsel_fprintf.o
+perf-util-y += perf_event_attr_fprintf.o
+perf-util-y += evswitch.o
+perf-util-y += find_bit.o
+perf-util-y += get_current_dir_name.o
+perf-util-y += levenshtein.o
+perf-util-y += mmap.o
+perf-util-y += memswap.o
+perf-util-y += parse-events.o
+perf-util-y += print-events.o
+perf-util-y += tracepoint.o
+perf-util-y += perf_regs.o
+perf-util-y += perf-regs-arch/
+perf-util-y += path.o
+perf-util-y += print_binary.o
+perf-util-y += print_insn.o
+perf-util-y += rlimit.o
+perf-util-y += argv_split.o
+perf-util-y += rbtree.o
+perf-util-y += libstring.o
+perf-util-y += bitmap.o
+perf-util-y += hweight.o
+perf-util-y += smt.o
+perf-util-y += strbuf.o
+perf-util-y += string.o
+perf-util-y += strlist.o
+perf-util-y += strfilter.o
+perf-util-y += top.o
+perf-util-y += usage.o
+perf-util-y += dso.o
+perf-util-y += dsos.o
+perf-util-y += symbol.o
+perf-util-y += symbol_fprintf.o
+perf-util-y += map_symbol.o
+perf-util-y += color.o
+perf-util-y += color_config.o
+perf-util-y += metricgroup.o
+perf-util-y += header.o
+perf-util-y += callchain.o
+perf-util-y += values.o
+perf-util-y += debug.o
+perf-util-y += fncache.o
+perf-util-y += machine.o
+perf-util-y += map.o
+perf-util-y += maps.o
+perf-util-y += pstack.o
+perf-util-y += session.o
+perf-util-y += sample-raw.o
+perf-util-y += s390-sample-raw.o
+perf-util-y += amd-sample-raw.o
+perf-util-$(CONFIG_TRACE) += syscalltbl.o
+perf-util-y += ordered-events.o
+perf-util-y += namespaces.o
+perf-util-y += comm.o
+perf-util-y += thread.o
+perf-util-y += threads.o
+perf-util-y += thread_map.o
+perf-util-y += parse-events-flex.o
+perf-util-y += parse-events-bison.o
+perf-util-y += pmu.o
+perf-util-y += pmus.o
+perf-util-y += pmu-flex.o
+perf-util-y += pmu-bison.o
+perf-util-y += svghelper.o
+perf-util-$(CONFIG_LIBTRACEEVENT) += trace-event-info.o
+perf-util-y += trace-event-scripting.o
+perf-util-$(CONFIG_LIBTRACEEVENT) += trace-event.o
+perf-util-$(CONFIG_LIBTRACEEVENT) += trace-event-parse.o
+perf-util-$(CONFIG_LIBTRACEEVENT) += trace-event-read.o
+perf-util-y += sort.o
+perf-util-y += hist.o
+perf-util-y += util.o
+perf-util-y += cpumap.o
+perf-util-y += affinity.o
+perf-util-y += cputopo.o
+perf-util-y += cgroup.o
+perf-util-y += target.o
+perf-util-y += rblist.o
+perf-util-y += intlist.o
+perf-util-y += vdso.o
+perf-util-y += counts.o
+perf-util-y += stat.o
+perf-util-y += stat-shadow.o
+perf-util-y += stat-display.o
+perf-util-y += perf_api_probe.o
+perf-util-y += record.o
+perf-util-y += srcline.o
+perf-util-y += srccode.o
+perf-util-y += synthetic-events.o
+perf-util-y += data.o
+perf-util-y += tsc.o
+perf-util-y += cloexec.o
+perf-util-y += call-path.o
+perf-util-y += rwsem.o
+perf-util-y += thread-stack.o
+perf-util-y += spark.o
+perf-util-y += topdown.o
+perf-util-y += iostat.o
+perf-util-y += stream.o
+perf-util-$(CONFIG_AUXTRACE) += auxtrace.o
+perf-util-$(CONFIG_AUXTRACE) += intel-pt-decoder/
+perf-util-$(CONFIG_AUXTRACE) += intel-pt.o
+perf-util-$(CONFIG_AUXTRACE) += intel-bts.o
+perf-util-$(CONFIG_AUXTRACE) += arm-spe.o
+perf-util-$(CONFIG_AUXTRACE) += arm-spe-decoder/
+perf-util-$(CONFIG_AUXTRACE) += hisi-ptt.o
+perf-util-$(CONFIG_AUXTRACE) += hisi-ptt-decoder/
+perf-util-$(CONFIG_AUXTRACE) += s390-cpumsf.o
ifdef CONFIG_LIBOPENCSD
-perf-$(CONFIG_AUXTRACE) += cs-etm.o
-perf-$(CONFIG_AUXTRACE) += cs-etm-decoder/
+perf-util-$(CONFIG_AUXTRACE) += cs-etm.o
+perf-util-$(CONFIG_AUXTRACE) += cs-etm-decoder/
endif
-perf-$(CONFIG_AUXTRACE) += cs-etm-base.o
-
-perf-y += parse-branch-options.o
-perf-y += dump-insn.o
-perf-y += parse-regs-options.o
-perf-y += parse-sublevel-options.o
-perf-y += term.o
-perf-y += help-unknown-cmd.o
-perf-y += dlfilter.o
-perf-y += mem-events.o
-perf-y += mem-info.o
-perf-y += vsprintf.o
-perf-y += units.o
-perf-y += time-utils.o
-perf-y += expr-flex.o
-perf-y += expr-bison.o
-perf-y += expr.o
-perf-y += branch.o
-perf-y += mem2node.o
-perf-y += clockid.o
-perf-y += list_sort.o
-perf-y += mutex.o
-perf-y += sharded_mutex.o
-
-perf-$(CONFIG_LIBBPF) += bpf_map.o
-perf-$(CONFIG_PERF_BPF_SKEL) += bpf_counter.o
-perf-$(CONFIG_PERF_BPF_SKEL) += bpf_counter_cgroup.o
-perf-$(CONFIG_PERF_BPF_SKEL) += bpf_ftrace.o
-perf-$(CONFIG_PERF_BPF_SKEL) += bpf_off_cpu.o
-perf-$(CONFIG_PERF_BPF_SKEL) += bpf-filter.o
-perf-$(CONFIG_PERF_BPF_SKEL) += bpf-filter-flex.o
-perf-$(CONFIG_PERF_BPF_SKEL) += bpf-filter-bison.o
+perf-util-$(CONFIG_AUXTRACE) += cs-etm-base.o
+
+perf-util-y += parse-branch-options.o
+perf-util-y += dump-insn.o
+perf-util-y += parse-regs-options.o
+perf-util-y += parse-sublevel-options.o
+perf-util-y += term.o
+perf-util-y += help-unknown-cmd.o
+perf-util-y += dlfilter.o
+perf-util-y += mem-events.o
+perf-util-y += mem-info.o
+perf-util-y += vsprintf.o
+perf-util-y += units.o
+perf-util-y += time-utils.o
+perf-util-y += expr-flex.o
+perf-util-y += expr-bison.o
+perf-util-y += expr.o
+perf-util-y += branch.o
+perf-util-y += mem2node.o
+perf-util-y += clockid.o
+perf-util-y += list_sort.o
+perf-util-y += mutex.o
+perf-util-y += sharded_mutex.o
+
+perf-util-$(CONFIG_LIBBPF) += bpf_map.o
+perf-util-$(CONFIG_PERF_BPF_SKEL) += bpf_counter.o
+perf-util-$(CONFIG_PERF_BPF_SKEL) += bpf_counter_cgroup.o
+perf-util-$(CONFIG_PERF_BPF_SKEL) += bpf_ftrace.o
+perf-util-$(CONFIG_PERF_BPF_SKEL) += bpf_off_cpu.o
+perf-util-$(CONFIG_PERF_BPF_SKEL) += bpf-filter.o
+perf-util-$(CONFIG_PERF_BPF_SKEL) += bpf-filter-flex.o
+perf-util-$(CONFIG_PERF_BPF_SKEL) += bpf-filter-bison.o
ifeq ($(CONFIG_LIBTRACEEVENT),y)
- perf-$(CONFIG_PERF_BPF_SKEL) += bpf_lock_contention.o
+ perf-util-$(CONFIG_PERF_BPF_SKEL) += bpf_lock_contention.o
endif
ifeq ($(CONFIG_LIBTRACEEVENT),y)
- perf-$(CONFIG_PERF_BPF_SKEL) += bpf_kwork.o
- perf-$(CONFIG_PERF_BPF_SKEL) += bpf_kwork_top.o
+ perf-util-$(CONFIG_PERF_BPF_SKEL) += bpf_kwork.o
+ perf-util-$(CONFIG_PERF_BPF_SKEL) += bpf_kwork_top.o
endif
-perf-$(CONFIG_LIBELF) += symbol-elf.o
-perf-$(CONFIG_LIBELF) += probe-file.o
-perf-$(CONFIG_LIBELF) += probe-event.o
+perf-util-$(CONFIG_LIBELF) += symbol-elf.o
+perf-util-$(CONFIG_LIBELF) += probe-file.o
+perf-util-$(CONFIG_LIBELF) += probe-event.o
ifdef CONFIG_LIBBPF_DYNAMIC
hashmap := 1
@@ -185,60 +185,60 @@ ifndef CONFIG_LIBBPF
endif
ifdef hashmap
-perf-y += hashmap.o
+perf-util-y += hashmap.o
endif
ifndef CONFIG_LIBELF
-perf-y += symbol-minimal.o
+perf-util-y += symbol-minimal.o
endif
ifndef CONFIG_SETNS
-perf-y += setns.o
+perf-util-y += setns.o
endif
-perf-$(CONFIG_DWARF) += probe-finder.o
-perf-$(CONFIG_DWARF) += dwarf-aux.o
-perf-$(CONFIG_DWARF) += dwarf-regs.o
-perf-$(CONFIG_DWARF) += debuginfo.o
-perf-$(CONFIG_DWARF) += annotate-data.o
+perf-util-$(CONFIG_DWARF) += probe-finder.o
+perf-util-$(CONFIG_DWARF) += dwarf-aux.o
+perf-util-$(CONFIG_DWARF) += dwarf-regs.o
+perf-util-$(CONFIG_DWARF) += debuginfo.o
+perf-util-$(CONFIG_DWARF) += annotate-data.o
-perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
-perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind-local.o
-perf-$(CONFIG_LIBUNWIND) += unwind-libunwind.o
-perf-$(CONFIG_LIBUNWIND_X86) += libunwind/x86_32.o
-perf-$(CONFIG_LIBUNWIND_AARCH64) += libunwind/arm64.o
+perf-util-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
+perf-util-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind-local.o
+perf-util-$(CONFIG_LIBUNWIND) += unwind-libunwind.o
+perf-util-$(CONFIG_LIBUNWIND_X86) += libunwind/x86_32.o
+perf-util-$(CONFIG_LIBUNWIND_AARCH64) += libunwind/arm64.o
ifeq ($(CONFIG_LIBTRACEEVENT),y)
- perf-$(CONFIG_LIBBABELTRACE) += data-convert-bt.o
+ perf-util-$(CONFIG_LIBBABELTRACE) += data-convert-bt.o
endif
-perf-y += data-convert-json.o
+perf-util-y += data-convert-json.o
-perf-y += scripting-engines/
+perf-util-y += scripting-engines/
-perf-$(CONFIG_ZLIB) += zlib.o
-perf-$(CONFIG_LZMA) += lzma.o
-perf-$(CONFIG_ZSTD) += zstd.o
+perf-util-$(CONFIG_ZLIB) += zlib.o
+perf-util-$(CONFIG_LZMA) += lzma.o
+perf-util-$(CONFIG_ZSTD) += zstd.o
-perf-$(CONFIG_LIBCAP) += cap.o
+perf-util-$(CONFIG_LIBCAP) += cap.o
-perf-$(CONFIG_CXX_DEMANGLE) += demangle-cxx.o
-perf-y += demangle-ocaml.o
-perf-y += demangle-java.o
-perf-y += demangle-rust.o
+perf-util-$(CONFIG_CXX_DEMANGLE) += demangle-cxx.o
+perf-util-y += demangle-ocaml.o
+perf-util-y += demangle-java.o
+perf-util-y += demangle-rust.o
ifdef CONFIG_JITDUMP
-perf-$(CONFIG_LIBELF) += jitdump.o
-perf-$(CONFIG_LIBELF) += genelf.o
-perf-$(CONFIG_DWARF) += genelf_debug.o
+perf-util-$(CONFIG_LIBELF) += jitdump.o
+perf-util-$(CONFIG_LIBELF) += genelf.o
+perf-util-$(CONFIG_DWARF) += genelf_debug.o
endif
-perf-y += perf-hooks.o
+perf-util-y += perf-hooks.o
-perf-$(CONFIG_LIBBPF) += bpf-event.o
-perf-$(CONFIG_LIBBPF) += bpf-utils.o
+perf-util-$(CONFIG_LIBBPF) += bpf-event.o
+perf-util-$(CONFIG_LIBBPF) += bpf-utils.o
-perf-$(CONFIG_LIBPFM4) += pfm.o
+perf-util-$(CONFIG_LIBPFM4) += pfm.o
CFLAGS_config.o += -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
@@ -403,4 +403,4 @@ $(OUTPUT)%.shellcheck_log: %
$(call rule_mkdir)
$(Q)$(call echo-cmd,test)shellcheck -a -S warning "$<" > $@ || (cat $@ && rm $@ && false)
-perf-y += $(TEST_LOGS)
+perf-util-y += $(TEST_LOGS)
diff --git a/tools/perf/util/arm-spe-decoder/Build b/tools/perf/util/arm-spe-decoder/Build
index f8dae13fc876..960062b3cb9e 100644
--- a/tools/perf/util/arm-spe-decoder/Build
+++ b/tools/perf/util/arm-spe-decoder/Build
@@ -1 +1 @@
-perf-$(CONFIG_AUXTRACE) += arm-spe-pkt-decoder.o arm-spe-decoder.o
+perf-util-$(CONFIG_AUXTRACE) += arm-spe-pkt-decoder.o arm-spe-decoder.o
diff --git a/tools/perf/util/arm-spe-decoder/arm-spe-pkt-decoder.c b/tools/perf/util/arm-spe-decoder/arm-spe-pkt-decoder.c
index a454c6737563..7bf607d0f6d8 100644
--- a/tools/perf/util/arm-spe-decoder/arm-spe-pkt-decoder.c
+++ b/tools/perf/util/arm-spe-decoder/arm-spe-pkt-decoder.c
@@ -10,24 +10,11 @@
#include <byteswap.h>
#include <linux/bitops.h>
#include <stdarg.h>
+#include <linux/kernel.h>
+#include <asm-generic/unaligned.h>
#include "arm-spe-pkt-decoder.h"
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-#define le16_to_cpu bswap_16
-#define le32_to_cpu bswap_32
-#define le64_to_cpu bswap_64
-#define memcpy_le64(d, s, n) do { \
- memcpy((d), (s), (n)); \
- *(d) = le64_to_cpu(*(d)); \
-} while (0)
-#else
-#define le16_to_cpu
-#define le32_to_cpu
-#define le64_to_cpu
-#define memcpy_le64 memcpy
-#endif
-
static const char * const arm_spe_packet_name[] = {
[ARM_SPE_PAD] = "PAD",
[ARM_SPE_END] = "END",
@@ -70,9 +57,9 @@ static int arm_spe_get_payload(const unsigned char *buf, size_t len,
switch (payload_len) {
case 1: packet->payload = *(uint8_t *)buf; break;
- case 2: packet->payload = le16_to_cpu(*(uint16_t *)buf); break;
- case 4: packet->payload = le32_to_cpu(*(uint32_t *)buf); break;
- case 8: packet->payload = le64_to_cpu(*(uint64_t *)buf); break;
+ case 2: packet->payload = get_unaligned_le16(buf); break;
+ case 4: packet->payload = get_unaligned_le32(buf); break;
+ case 8: packet->payload = get_unaligned_le64(buf); break;
default: return ARM_SPE_BAD_PACKET;
}
diff --git a/tools/perf/util/bpf-filter.c b/tools/perf/util/bpf-filter.c
index b51544996046..04f98b6bb291 100644
--- a/tools/perf/util/bpf-filter.c
+++ b/tools/perf/util/bpf-filter.c
@@ -17,11 +17,11 @@
#define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
-#define __PERF_SAMPLE_TYPE(st, opt) { st, #st, opt }
-#define PERF_SAMPLE_TYPE(_st, opt) __PERF_SAMPLE_TYPE(PERF_SAMPLE_##_st, opt)
+#define __PERF_SAMPLE_TYPE(tt, st, opt) { tt, #st, opt }
+#define PERF_SAMPLE_TYPE(_st, opt) __PERF_SAMPLE_TYPE(PBF_TERM_##_st, PERF_SAMPLE_##_st, opt)
static const struct perf_sample_info {
- u64 type;
+ enum perf_bpf_filter_term type;
const char *name;
const char *option;
} sample_table[] = {
@@ -44,12 +44,12 @@ static const struct perf_sample_info {
PERF_SAMPLE_TYPE(DATA_PAGE_SIZE, "--data-page-size"),
};
-static const struct perf_sample_info *get_sample_info(u64 flags)
+static const struct perf_sample_info *get_sample_info(enum perf_bpf_filter_term type)
{
size_t i;
for (i = 0; i < ARRAY_SIZE(sample_table); i++) {
- if (sample_table[i].type == flags)
+ if (sample_table[i].type == type)
return &sample_table[i];
}
return NULL;
@@ -59,9 +59,15 @@ static int check_sample_flags(struct evsel *evsel, struct perf_bpf_filter_expr *
{
const struct perf_sample_info *info;
- if (evsel->core.attr.sample_type & expr->sample_flags)
+ if (expr->term >= PBF_TERM_SAMPLE_START && expr->term <= PBF_TERM_SAMPLE_END &&
+ (evsel->core.attr.sample_type & (1 << (expr->term - PBF_TERM_SAMPLE_START))))
return 0;
+ if (expr->term == PBF_TERM_UID || expr->term == PBF_TERM_GID) {
+ /* Not dependent on the sample_type as computed from a BPF helper. */
+ return 0;
+ }
+
if (expr->op == PBF_OP_GROUP_BEGIN) {
struct perf_bpf_filter_expr *group;
@@ -72,10 +78,10 @@ static int check_sample_flags(struct evsel *evsel, struct perf_bpf_filter_expr *
return 0;
}
- info = get_sample_info(expr->sample_flags);
+ info = get_sample_info(expr->term);
if (info == NULL) {
- pr_err("Error: %s event does not have sample flags %lx\n",
- evsel__name(evsel), expr->sample_flags);
+ pr_err("Error: %s event does not have sample flags %d\n",
+ evsel__name(evsel), expr->term);
return -1;
}
@@ -105,7 +111,7 @@ int perf_bpf_filter__prepare(struct evsel *evsel)
struct perf_bpf_filter_entry entry = {
.op = expr->op,
.part = expr->part,
- .flags = expr->sample_flags,
+ .term = expr->term,
.value = expr->val,
};
@@ -122,7 +128,7 @@ int perf_bpf_filter__prepare(struct evsel *evsel)
struct perf_bpf_filter_entry group_entry = {
.op = group->op,
.part = group->part,
- .flags = group->sample_flags,
+ .term = group->term,
.value = group->val,
};
bpf_map_update_elem(fd, &i, &group_entry, BPF_ANY);
@@ -173,7 +179,8 @@ u64 perf_bpf_filter__lost_count(struct evsel *evsel)
return skel ? skel->bss->dropped : 0;
}
-struct perf_bpf_filter_expr *perf_bpf_filter_expr__new(unsigned long sample_flags, int part,
+struct perf_bpf_filter_expr *perf_bpf_filter_expr__new(enum perf_bpf_filter_term term,
+ int part,
enum perf_bpf_filter_op op,
unsigned long val)
{
@@ -181,7 +188,7 @@ struct perf_bpf_filter_expr *perf_bpf_filter_expr__new(unsigned long sample_flag
expr = malloc(sizeof(*expr));
if (expr != NULL) {
- expr->sample_flags = sample_flags;
+ expr->term = term;
expr->part = part;
expr->op = op;
expr->val = val;
diff --git a/tools/perf/util/bpf-filter.h b/tools/perf/util/bpf-filter.h
index 7afd159411b8..cd6764442c16 100644
--- a/tools/perf/util/bpf-filter.h
+++ b/tools/perf/util/bpf-filter.h
@@ -11,14 +11,15 @@ struct perf_bpf_filter_expr {
struct list_head groups;
enum perf_bpf_filter_op op;
int part;
- unsigned long sample_flags;
+ enum perf_bpf_filter_term term;
unsigned long val;
};
struct evsel;
#ifdef HAVE_BPF_SKEL
-struct perf_bpf_filter_expr *perf_bpf_filter_expr__new(unsigned long sample_flags, int part,
+struct perf_bpf_filter_expr *perf_bpf_filter_expr__new(enum perf_bpf_filter_term term,
+ int part,
enum perf_bpf_filter_op op,
unsigned long val);
int perf_bpf_filter__parse(struct list_head *expr_head, const char *str);
diff --git a/tools/perf/util/bpf-filter.l b/tools/perf/util/bpf-filter.l
index d4ff0f1345cd..2a7c839f3fae 100644
--- a/tools/perf/util/bpf-filter.l
+++ b/tools/perf/util/bpf-filter.l
@@ -9,16 +9,16 @@
#include "bpf-filter.h"
#include "bpf-filter-bison.h"
-static int sample(unsigned long sample_flag)
+static int sample(enum perf_bpf_filter_term term)
{
- perf_bpf_filter_lval.sample.type = sample_flag;
+ perf_bpf_filter_lval.sample.term = term;
perf_bpf_filter_lval.sample.part = 0;
return BFT_SAMPLE;
}
-static int sample_part(unsigned long sample_flag, int part)
+static int sample_part(enum perf_bpf_filter_term term, int part)
{
- perf_bpf_filter_lval.sample.type = sample_flag;
+ perf_bpf_filter_lval.sample.term = term;
perf_bpf_filter_lval.sample.part = part;
return BFT_SAMPLE;
}
@@ -67,34 +67,36 @@ ident [_a-zA-Z][_a-zA-Z0-9]+
{num_hex} { return value(16); }
{space} { }
-ip { return sample(PERF_SAMPLE_IP); }
-id { return sample(PERF_SAMPLE_ID); }
-tid { return sample(PERF_SAMPLE_TID); }
-pid { return sample_part(PERF_SAMPLE_TID, 1); }
-cpu { return sample(PERF_SAMPLE_CPU); }
-time { return sample(PERF_SAMPLE_TIME); }
-addr { return sample(PERF_SAMPLE_ADDR); }
-period { return sample(PERF_SAMPLE_PERIOD); }
-txn { return sample(PERF_SAMPLE_TRANSACTION); }
-weight { return sample(PERF_SAMPLE_WEIGHT); }
-weight1 { return sample_part(PERF_SAMPLE_WEIGHT_STRUCT, 1); }
-weight2 { return sample_part(PERF_SAMPLE_WEIGHT_STRUCT, 2); }
-weight3 { return sample_part(PERF_SAMPLE_WEIGHT_STRUCT, 3); }
-ins_lat { return sample_part(PERF_SAMPLE_WEIGHT_STRUCT, 2); } /* alias for weight2 */
-p_stage_cyc { return sample_part(PERF_SAMPLE_WEIGHT_STRUCT, 3); } /* alias for weight3 */
-retire_lat { return sample_part(PERF_SAMPLE_WEIGHT_STRUCT, 3); } /* alias for weight3 */
-phys_addr { return sample(PERF_SAMPLE_PHYS_ADDR); }
-code_pgsz { return sample(PERF_SAMPLE_CODE_PAGE_SIZE); }
-data_pgsz { return sample(PERF_SAMPLE_DATA_PAGE_SIZE); }
-mem_op { return sample_part(PERF_SAMPLE_DATA_SRC, 1); }
-mem_lvlnum { return sample_part(PERF_SAMPLE_DATA_SRC, 2); }
-mem_lvl { return sample_part(PERF_SAMPLE_DATA_SRC, 2); } /* alias for mem_lvlnum */
-mem_snoop { return sample_part(PERF_SAMPLE_DATA_SRC, 3); } /* include snoopx */
-mem_remote { return sample_part(PERF_SAMPLE_DATA_SRC, 4); }
-mem_lock { return sample_part(PERF_SAMPLE_DATA_SRC, 5); }
-mem_dtlb { return sample_part(PERF_SAMPLE_DATA_SRC, 6); }
-mem_blk { return sample_part(PERF_SAMPLE_DATA_SRC, 7); }
-mem_hops { return sample_part(PERF_SAMPLE_DATA_SRC, 8); }
+ip { return sample(PBF_TERM_IP); }
+id { return sample(PBF_TERM_ID); }
+tid { return sample(PBF_TERM_TID); }
+pid { return sample_part(PBF_TERM_TID, 1); }
+cpu { return sample(PBF_TERM_CPU); }
+time { return sample(PBF_TERM_TIME); }
+addr { return sample(PBF_TERM_ADDR); }
+period { return sample(PBF_TERM_PERIOD); }
+txn { return sample(PBF_TERM_TRANSACTION); }
+weight { return sample(PBF_TERM_WEIGHT); }
+weight1 { return sample_part(PBF_TERM_WEIGHT_STRUCT, 1); }
+weight2 { return sample_part(PBF_TERM_WEIGHT_STRUCT, 2); }
+weight3 { return sample_part(PBF_TERM_WEIGHT_STRUCT, 3); }
+ins_lat { return sample_part(PBF_TERM_WEIGHT_STRUCT, 2); } /* alias for weight2 */
+p_stage_cyc { return sample_part(PBF_TERM_WEIGHT_STRUCT, 3); } /* alias for weight3 */
+retire_lat { return sample_part(PBF_TERM_WEIGHT_STRUCT, 3); } /* alias for weight3 */
+phys_addr { return sample(PBF_TERM_PHYS_ADDR); }
+code_pgsz { return sample(PBF_TERM_CODE_PAGE_SIZE); }
+data_pgsz { return sample(PBF_TERM_DATA_PAGE_SIZE); }
+mem_op { return sample_part(PBF_TERM_DATA_SRC, 1); }
+mem_lvlnum { return sample_part(PBF_TERM_DATA_SRC, 2); }
+mem_lvl { return sample_part(PBF_TERM_DATA_SRC, 2); } /* alias for mem_lvlnum */
+mem_snoop { return sample_part(PBF_TERM_DATA_SRC, 3); } /* include snoopx */
+mem_remote { return sample_part(PBF_TERM_DATA_SRC, 4); }
+mem_lock { return sample_part(PBF_TERM_DATA_SRC, 5); }
+mem_dtlb { return sample_part(PBF_TERM_DATA_SRC, 6); }
+mem_blk { return sample_part(PBF_TERM_DATA_SRC, 7); }
+mem_hops { return sample_part(PBF_TERM_DATA_SRC, 8); }
+uid { return sample(PBF_TERM_UID); }
+gid { return sample(PBF_TERM_GID); }
"==" { return operator(PBF_OP_EQ); }
"!=" { return operator(PBF_OP_NEQ); }
diff --git a/tools/perf/util/bpf-filter.y b/tools/perf/util/bpf-filter.y
index 0e4d6de3c2ad..0c56fccb8874 100644
--- a/tools/perf/util/bpf-filter.y
+++ b/tools/perf/util/bpf-filter.y
@@ -27,7 +27,7 @@ static void perf_bpf_filter_error(struct list_head *expr __maybe_unused,
{
unsigned long num;
struct {
- unsigned long type;
+ enum perf_bpf_filter_term term;
int part;
} sample;
enum perf_bpf_filter_op op;
@@ -62,7 +62,8 @@ filter_term BFT_LOGICAL_OR filter_expr
if ($1->op == PBF_OP_GROUP_BEGIN) {
expr = $1;
} else {
- expr = perf_bpf_filter_expr__new(0, 0, PBF_OP_GROUP_BEGIN, 1);
+ expr = perf_bpf_filter_expr__new(PBF_TERM_NONE, /*part=*/0,
+ PBF_OP_GROUP_BEGIN, /*val=*/1);
list_add_tail(&$1->list, &expr->groups);
}
expr->val++;
@@ -78,7 +79,7 @@ filter_expr
filter_expr:
BFT_SAMPLE BFT_OP BFT_NUM
{
- $$ = perf_bpf_filter_expr__new($1.type, $1.part, $2, $3);
+ $$ = perf_bpf_filter_expr__new($1.term, $1.part, $2, $3);
}
%%
diff --git a/tools/perf/util/bpf_skel/sample-filter.h b/tools/perf/util/bpf_skel/sample-filter.h
index 2e96e1ab084a..350efa121026 100644
--- a/tools/perf/util/bpf_skel/sample-filter.h
+++ b/tools/perf/util/bpf_skel/sample-filter.h
@@ -16,12 +16,48 @@ enum perf_bpf_filter_op {
PBF_OP_GROUP_END,
};
+enum perf_bpf_filter_term {
+ /* No term is in use. */
+ PBF_TERM_NONE = 0,
+ /* Terms that correspond to PERF_SAMPLE_xx values. */
+ PBF_TERM_SAMPLE_START = PBF_TERM_NONE + 1,
+ PBF_TERM_IP = PBF_TERM_SAMPLE_START + 0, /* SAMPLE_IP = 1U << 0 */
+ PBF_TERM_TID = PBF_TERM_SAMPLE_START + 1, /* SAMPLE_TID = 1U << 1 */
+ PBF_TERM_TIME = PBF_TERM_SAMPLE_START + 2, /* SAMPLE_TIME = 1U << 2 */
+ PBF_TERM_ADDR = PBF_TERM_SAMPLE_START + 3, /* SAMPLE_ADDR = 1U << 3 */
+ __PBF_UNUSED_TERM4 = PBF_TERM_SAMPLE_START + 4, /* SAMPLE_READ = 1U << 4 */
+ __PBF_UNUSED_TERM5 = PBF_TERM_SAMPLE_START + 5, /* SAMPLE_CALLCHAIN = 1U << 5 */
+ PBF_TERM_ID = PBF_TERM_SAMPLE_START + 6, /* SAMPLE_ID = 1U << 6 */
+ PBF_TERM_CPU = PBF_TERM_SAMPLE_START + 7, /* SAMPLE_CPU = 1U << 7 */
+ PBF_TERM_PERIOD = PBF_TERM_SAMPLE_START + 8, /* SAMPLE_PERIOD = 1U << 8 */
+ __PBF_UNUSED_TERM9 = PBF_TERM_SAMPLE_START + 9, /* SAMPLE_STREAM_ID = 1U << 9 */
+ __PBF_UNUSED_TERM10 = PBF_TERM_SAMPLE_START + 10, /* SAMPLE_RAW = 1U << 10 */
+ __PBF_UNUSED_TERM11 = PBF_TERM_SAMPLE_START + 11, /* SAMPLE_BRANCH_STACK = 1U << 11 */
+ __PBF_UNUSED_TERM12 = PBF_TERM_SAMPLE_START + 12, /* SAMPLE_REGS_USER = 1U << 12 */
+ __PBF_UNUSED_TERM13 = PBF_TERM_SAMPLE_START + 13, /* SAMPLE_STACK_USER = 1U << 13 */
+ PBF_TERM_WEIGHT = PBF_TERM_SAMPLE_START + 14, /* SAMPLE_WEIGHT = 1U << 14 */
+ PBF_TERM_DATA_SRC = PBF_TERM_SAMPLE_START + 15, /* SAMPLE_DATA_SRC = 1U << 15 */
+ __PBF_UNUSED_TERM16 = PBF_TERM_SAMPLE_START + 16, /* SAMPLE_IDENTIFIER = 1U << 16 */
+ PBF_TERM_TRANSACTION = PBF_TERM_SAMPLE_START + 17, /* SAMPLE_TRANSACTION = 1U << 17 */
+ __PBF_UNUSED_TERM18 = PBF_TERM_SAMPLE_START + 18, /* SAMPLE_REGS_INTR = 1U << 18 */
+ PBF_TERM_PHYS_ADDR = PBF_TERM_SAMPLE_START + 19, /* SAMPLE_PHYS_ADDR = 1U << 19 */
+ __PBF_UNUSED_TERM20 = PBF_TERM_SAMPLE_START + 20, /* SAMPLE_AUX = 1U << 20 */
+ __PBF_UNUSED_TERM21 = PBF_TERM_SAMPLE_START + 21, /* SAMPLE_CGROUP = 1U << 21 */
+ PBF_TERM_DATA_PAGE_SIZE = PBF_TERM_SAMPLE_START + 22, /* SAMPLE_DATA_PAGE_SIZE = 1U << 22 */
+ PBF_TERM_CODE_PAGE_SIZE = PBF_TERM_SAMPLE_START + 23, /* SAMPLE_CODE_PAGE_SIZE = 1U << 23 */
+ PBF_TERM_WEIGHT_STRUCT = PBF_TERM_SAMPLE_START + 24, /* SAMPLE_WEIGHT_STRUCT = 1U << 24 */
+ PBF_TERM_SAMPLE_END = PBF_TERM_WEIGHT_STRUCT,
+ /* Terms computed from BPF helpers. */
+ PBF_TERM_UID,
+ PBF_TERM_GID,
+};
+
/* BPF map entry for filtering */
struct perf_bpf_filter_entry {
enum perf_bpf_filter_op op;
__u32 part; /* sub-sample type info when it has multiple values */
- __u64 flags; /* perf sample type flags */
+ enum perf_bpf_filter_term term;
__u64 value;
};
-#endif /* PERF_UTIL_BPF_SKEL_SAMPLE_FILTER_H */ \ No newline at end of file
+#endif /* PERF_UTIL_BPF_SKEL_SAMPLE_FILTER_H */
diff --git a/tools/perf/util/bpf_skel/sample_filter.bpf.c b/tools/perf/util/bpf_skel/sample_filter.bpf.c
index fb94f5280626..f59985101973 100644
--- a/tools/perf/util/bpf_skel/sample_filter.bpf.c
+++ b/tools/perf/util/bpf_skel/sample_filter.bpf.c
@@ -48,31 +48,54 @@ static inline __u64 perf_get_sample(struct bpf_perf_event_data_kern *kctx,
{
struct perf_sample_data___new *data = (void *)kctx->data;
- if (!bpf_core_field_exists(data->sample_flags) ||
- (data->sample_flags & entry->flags) == 0)
+ if (!bpf_core_field_exists(data->sample_flags))
return 0;
- switch (entry->flags) {
- case PERF_SAMPLE_IP:
+#define BUILD_CHECK_SAMPLE(x) \
+ _Static_assert((1 << (PBF_TERM_##x - PBF_TERM_SAMPLE_START)) == PERF_SAMPLE_##x, \
+ "Mismatched PBF term to sample bit " #x)
+ BUILD_CHECK_SAMPLE(IP);
+ BUILD_CHECK_SAMPLE(TID);
+ BUILD_CHECK_SAMPLE(TIME);
+ BUILD_CHECK_SAMPLE(ADDR);
+ BUILD_CHECK_SAMPLE(ID);
+ BUILD_CHECK_SAMPLE(CPU);
+ BUILD_CHECK_SAMPLE(PERIOD);
+ BUILD_CHECK_SAMPLE(WEIGHT);
+ BUILD_CHECK_SAMPLE(DATA_SRC);
+ BUILD_CHECK_SAMPLE(TRANSACTION);
+ BUILD_CHECK_SAMPLE(PHYS_ADDR);
+ BUILD_CHECK_SAMPLE(DATA_PAGE_SIZE);
+ BUILD_CHECK_SAMPLE(CODE_PAGE_SIZE);
+ BUILD_CHECK_SAMPLE(WEIGHT_STRUCT);
+#undef BUILD_CHECK_SAMPLE
+
+ /* For sample terms check the sample bit is set. */
+ if (entry->term >= PBF_TERM_SAMPLE_START && entry->term <= PBF_TERM_SAMPLE_END &&
+ (data->sample_flags & (1 << (entry->term - PBF_TERM_SAMPLE_START))) == 0)
+ return 0;
+
+ switch (entry->term) {
+ case PBF_TERM_IP:
return kctx->data->ip;
- case PERF_SAMPLE_ID:
+ case PBF_TERM_ID:
return kctx->data->id;
- case PERF_SAMPLE_TID:
+ case PBF_TERM_TID:
if (entry->part)
return kctx->data->tid_entry.pid;
else
return kctx->data->tid_entry.tid;
- case PERF_SAMPLE_CPU:
+ case PBF_TERM_CPU:
return kctx->data->cpu_entry.cpu;
- case PERF_SAMPLE_TIME:
+ case PBF_TERM_TIME:
return kctx->data->time;
- case PERF_SAMPLE_ADDR:
+ case PBF_TERM_ADDR:
return kctx->data->addr;
- case PERF_SAMPLE_PERIOD:
+ case PBF_TERM_PERIOD:
return kctx->data->period;
- case PERF_SAMPLE_TRANSACTION:
+ case PBF_TERM_TRANSACTION:
return kctx->data->txn;
- case PERF_SAMPLE_WEIGHT_STRUCT:
+ case PBF_TERM_WEIGHT_STRUCT:
if (entry->part == 1)
return kctx->data->weight.var1_dw;
if (entry->part == 2)
@@ -80,15 +103,15 @@ static inline __u64 perf_get_sample(struct bpf_perf_event_data_kern *kctx,
if (entry->part == 3)
return kctx->data->weight.var3_w;
/* fall through */
- case PERF_SAMPLE_WEIGHT:
+ case PBF_TERM_WEIGHT:
return kctx->data->weight.full;
- case PERF_SAMPLE_PHYS_ADDR:
+ case PBF_TERM_PHYS_ADDR:
return kctx->data->phys_addr;
- case PERF_SAMPLE_CODE_PAGE_SIZE:
+ case PBF_TERM_CODE_PAGE_SIZE:
return kctx->data->code_page_size;
- case PERF_SAMPLE_DATA_PAGE_SIZE:
+ case PBF_TERM_DATA_PAGE_SIZE:
return kctx->data->data_page_size;
- case PERF_SAMPLE_DATA_SRC:
+ case PBF_TERM_DATA_SRC:
if (entry->part == 1)
return kctx->data->data_src.mem_op;
if (entry->part == 2)
@@ -117,6 +140,22 @@ static inline __u64 perf_get_sample(struct bpf_perf_event_data_kern *kctx,
}
/* return the whole word */
return kctx->data->data_src.val;
+ case PBF_TERM_UID:
+ return bpf_get_current_uid_gid() & 0xFFFFFFFF;
+ case PBF_TERM_GID:
+ return bpf_get_current_uid_gid() >> 32;
+ case PBF_TERM_NONE:
+ case __PBF_UNUSED_TERM4:
+ case __PBF_UNUSED_TERM5:
+ case __PBF_UNUSED_TERM9:
+ case __PBF_UNUSED_TERM10:
+ case __PBF_UNUSED_TERM11:
+ case __PBF_UNUSED_TERM12:
+ case __PBF_UNUSED_TERM13:
+ case __PBF_UNUSED_TERM16:
+ case __PBF_UNUSED_TERM18:
+ case __PBF_UNUSED_TERM20:
+ case __PBF_UNUSED_TERM21:
default:
break;
}
diff --git a/tools/perf/util/comm.c b/tools/perf/util/comm.c
index 233f2b6edf52..49b79cf0c5cc 100644
--- a/tools/perf/util/comm.c
+++ b/tools/perf/util/comm.c
@@ -86,14 +86,6 @@ static struct comm_str *comm_str__new(const char *str)
return result;
}
-static int comm_str__cmp(const void *_lhs, const void *_rhs)
-{
- const struct comm_str *lhs = *(const struct comm_str * const *)_lhs;
- const struct comm_str *rhs = *(const struct comm_str * const *)_rhs;
-
- return strcmp(comm_str__str(lhs), comm_str__str(rhs));
-}
-
static int comm_str__search(const void *_key, const void *_member)
{
const char *key = _key;
@@ -169,9 +161,24 @@ static struct comm_str *comm_strs__findnew(const char *str)
}
result = comm_str__new(str);
if (result) {
- comm_strs->strs[comm_strs->num_strs++] = result;
- qsort(comm_strs->strs, comm_strs->num_strs, sizeof(struct comm_str *),
- comm_str__cmp);
+ int low = 0, high = comm_strs->num_strs - 1;
+ int insert = comm_strs->num_strs; /* Default to inserting at the end. */
+
+ while (low <= high) {
+ int mid = low + (high - low) / 2;
+ int cmp = strcmp(comm_str__str(comm_strs->strs[mid]), str);
+
+ if (cmp < 0) {
+ low = mid + 1;
+ } else {
+ high = mid - 1;
+ insert = mid;
+ }
+ }
+ memmove(&comm_strs->strs[insert + 1], &comm_strs->strs[insert],
+ (comm_strs->num_strs - insert) * sizeof(struct comm_str *));
+ comm_strs->num_strs++;
+ comm_strs->strs[insert] = result;
}
}
up_write(&comm_strs->lock);
diff --git a/tools/perf/util/cs-etm-decoder/Build b/tools/perf/util/cs-etm-decoder/Build
index 216cb17a3322..056d665f7f88 100644
--- a/tools/perf/util/cs-etm-decoder/Build
+++ b/tools/perf/util/cs-etm-decoder/Build
@@ -1 +1 @@
-perf-$(CONFIG_AUXTRACE) += cs-etm-decoder.o
+perf-util-$(CONFIG_AUXTRACE) += cs-etm-decoder.o
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index 32818bd7cd17..5e9fbcfad7d4 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -1013,7 +1013,7 @@ static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u8 trace_chan_id,
if (!dso)
goto out;
- if (dso->data.status == DSO_DATA_STATUS_ERROR &&
+ if (dso__data(dso)->status == DSO_DATA_STATUS_ERROR &&
dso__data_status_seen(dso, DSO_DATA_STATUS_SEEN_ITRACE))
goto out;
@@ -1027,11 +1027,11 @@ static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u8 trace_chan_id,
if (len <= 0) {
ui__warning_once("CS ETM Trace: Missing DSO. Use 'perf archive' or debuginfod to export data from the traced system.\n"
" Enable CONFIG_PROC_KCORE or use option '-k /path/to/vmlinux' for kernel symbols.\n");
- if (!dso->auxtrace_warned) {
+ if (!dso__auxtrace_warned(dso)) {
pr_err("CS ETM Trace: Debug data not found for address %#"PRIx64" in %s\n",
- address,
- dso->long_name ? dso->long_name : "Unknown");
- dso->auxtrace_warned = true;
+ address,
+ dso__long_name(dso) ? dso__long_name(dso) : "Unknown");
+ dso__set_auxtrace_warned(dso);
}
goto out;
}
diff --git a/tools/perf/util/disasm.c b/tools/perf/util/disasm.c
index 72aec8f61b94..e10558b79504 100644
--- a/tools/perf/util/disasm.c
+++ b/tools/perf/util/disasm.c
@@ -1199,7 +1199,7 @@ static int symbol__disassemble_bpf(struct symbol *sym,
int ret;
FILE *s;
- if (dso->binary_type != DSO_BINARY_TYPE__BPF_PROG_INFO)
+ if (dso__binary_type(dso) != DSO_BINARY_TYPE__BPF_PROG_INFO)
return SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE;
pr_debug("%s: handling sym %s addr %" PRIx64 " len %" PRIx64 "\n", __func__,
@@ -1226,14 +1226,14 @@ static int symbol__disassemble_bpf(struct symbol *sym,
info.arch = bfd_get_arch(bfdf);
info.mach = bfd_get_mach(bfdf);
- info_node = perf_env__find_bpf_prog_info(dso->bpf_prog.env,
- dso->bpf_prog.id);
+ info_node = perf_env__find_bpf_prog_info(dso__bpf_prog(dso)->env,
+ dso__bpf_prog(dso)->id);
if (!info_node) {
ret = SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF;
goto out;
}
info_linear = info_node->info_linear;
- sub_id = dso->bpf_prog.sub_id;
+ sub_id = dso__bpf_prog(dso)->sub_id;
info.buffer = (void *)(uintptr_t)(info_linear->info.jited_prog_insns);
info.buffer_length = info_linear->info.jited_prog_len;
@@ -1244,7 +1244,7 @@ static int symbol__disassemble_bpf(struct symbol *sym,
if (info_linear->info.btf_id) {
struct btf_node *node;
- node = perf_env__find_btf(dso->bpf_prog.env,
+ node = perf_env__find_btf(dso__bpf_prog(dso)->env,
info_linear->info.btf_id);
if (node)
btf = btf__new((__u8 *)(node->data),
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index dde706b71da7..2340c4f6d0c2 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -1652,3 +1652,15 @@ int dso__strerror_load(struct dso *dso, char *buf, size_t buflen)
scnprintf(buf, buflen, "%s", dso_load__error_str[idx]);
return 0;
}
+
+bool perf_pid_map_tid(const char *dso_name, int *tid)
+{
+ return sscanf(dso_name, "/tmp/perf-%d.map", tid) == 1;
+}
+
+bool is_perf_pid_map_name(const char *dso_name)
+{
+ int tid;
+
+ return perf_pid_map_tid(dso_name, &tid);
+}
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index df2c98402af3..878c1f441868 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -280,6 +280,16 @@ static inline void dso__set_annotate_warned(struct dso *dso)
RC_CHK_ACCESS(dso)->annotate_warned = 1;
}
+static inline bool dso__auxtrace_warned(const struct dso *dso)
+{
+ return RC_CHK_ACCESS(dso)->auxtrace_warned;
+}
+
+static inline void dso__set_auxtrace_warned(struct dso *dso)
+{
+ RC_CHK_ACCESS(dso)->auxtrace_warned = 1;
+}
+
static inline struct auxtrace_cache *dso__auxtrace_cache(struct dso *dso)
{
return RC_CHK_ACCESS(dso)->auxtrace_cache;
@@ -809,4 +819,8 @@ void reset_fd_limit(void);
u64 dso__find_global_type(struct dso *dso, u64 addr);
u64 dso__findnew_global_type(struct dso *dso, u64 addr, u64 offset);
+/* Check if dso name is of format "/tmp/perf-%d.map" */
+bool perf_pid_map_tid(const char *dso_name, int *tid);
+bool is_perf_pid_map_name(const char *dso_name);
+
#endif /* __PERF_DSO */
diff --git a/tools/perf/util/dsos.c b/tools/perf/util/dsos.c
index ab3d0c01dd63..d4acdb37f046 100644
--- a/tools/perf/util/dsos.c
+++ b/tools/perf/util/dsos.c
@@ -164,6 +164,9 @@ static struct dso *__dsos__find_by_longname_id(struct dsos *dsos,
};
struct dso **res;
+ if (dsos->dsos == NULL)
+ return NULL;
+
if (!dsos->sorted) {
if (!write_locked) {
struct dso *dso;
@@ -203,11 +206,27 @@ int __dsos__add(struct dsos *dsos, struct dso *dso)
dsos->dsos = temp;
dsos->allocated = to_allocate;
}
- dsos->dsos[dsos->cnt++] = dso__get(dso);
- if (dsos->cnt >= 2 && dsos->sorted) {
- dsos->sorted = dsos__cmp_long_name_id_short_name(&dsos->dsos[dsos->cnt - 2],
- &dsos->dsos[dsos->cnt - 1])
- <= 0;
+ if (!dsos->sorted) {
+ dsos->dsos[dsos->cnt++] = dso__get(dso);
+ } else {
+ int low = 0, high = dsos->cnt - 1;
+ int insert = dsos->cnt; /* Default to inserting at the end. */
+
+ while (low <= high) {
+ int mid = low + (high - low) / 2;
+ int cmp = dsos__cmp_long_name_id_short_name(&dsos->dsos[mid], &dso);
+
+ if (cmp < 0) {
+ low = mid + 1;
+ } else {
+ high = mid - 1;
+ insert = mid;
+ }
+ }
+ memmove(&dsos->dsos[insert + 1], &dsos->dsos[insert],
+ (dsos->cnt - insert) * sizeof(struct dso *));
+ dsos->cnt++;
+ dsos->dsos[insert] = dso__get(dso);
}
dso__set_dsos(dso, dsos);
return 0;
@@ -275,7 +294,7 @@ static void dso__set_basename(struct dso *dso)
char *base, *lname;
int tid;
- if (sscanf(dso__long_name(dso), "/tmp/perf-%d.map", &tid) == 1) {
+ if (perf_pid_map_tid(dso__long_name(dso), &tid)) {
if (asprintf(&base, "[JIT] tid %d", tid) < 0)
return;
} else {
diff --git a/tools/perf/util/events_stats.h b/tools/perf/util/events_stats.h
index 8fecc9fbaecc..f43e5b1a366a 100644
--- a/tools/perf/util/events_stats.h
+++ b/tools/perf/util/events_stats.h
@@ -52,7 +52,6 @@ struct hists_stats {
void events_stats__inc(struct events_stats *stats, u32 type);
-size_t events_stats__fprintf(struct events_stats *stats, FILE *fp,
- bool skip_empty);
+size_t events_stats__fprintf(struct events_stats *stats, FILE *fp);
#endif /* __PERF_EVENTS_STATS_ */
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 4f818ab6b662..bc603193c477 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -10,6 +10,7 @@
#include <errno.h>
#include <inttypes.h>
#include <linux/bitops.h>
+#include <api/io.h>
#include <api/fs/fs.h>
#include <api/fs/tracing_path.h>
#include <linux/hw_breakpoint.h>
@@ -30,6 +31,7 @@
#include "counts.h"
#include "event.h"
#include "evsel.h"
+#include "time-utils.h"
#include "util/env.h"
#include "util/evsel_config.h"
#include "util/evsel_fprintf.h"
@@ -53,6 +55,7 @@
#include "../perf-sys.h"
#include "util/parse-branch-options.h"
#include "util/bpf-filter.h"
+#include "util/hist.h"
#include <internal/xyarray.h>
#include <internal/lib.h>
#include <internal/threadmap.h>
@@ -830,16 +833,22 @@ const char *evsel__group_name(struct evsel *evsel)
int evsel__group_desc(struct evsel *evsel, char *buf, size_t size)
{
int ret = 0;
+ bool first = true;
struct evsel *pos;
const char *group_name = evsel__group_name(evsel);
if (!evsel->forced_leader)
ret = scnprintf(buf, size, "%s { ", group_name);
- ret += scnprintf(buf + ret, size - ret, "%s", evsel__name(evsel));
+ for_each_group_evsel(pos, evsel) {
+ if (symbol_conf.skip_empty &&
+ evsel__hists(pos)->stats.nr_samples == 0)
+ continue;
- for_each_group_member(pos, evsel)
- ret += scnprintf(buf + ret, size - ret, ", %s", evsel__name(pos));
+ ret += scnprintf(buf + ret, size - ret, "%s%s",
+ first ? "" : ", ", evsel__name(pos));
+ first = false;
+ }
if (!evsel->forced_leader)
ret += scnprintf(buf + ret, size - ret, " }");
@@ -1493,6 +1502,9 @@ void evsel__exit(struct evsel *evsel)
evsel->per_pkg_mask = NULL;
zfree(&evsel->metric_events);
perf_evsel__object.fini(evsel);
+ if (evsel->tool_event == PERF_TOOL_SYSTEM_TIME ||
+ evsel->tool_event == PERF_TOOL_USER_TIME)
+ xyarray__delete(evsel->start_times);
}
void evsel__delete(struct evsel *evsel)
@@ -1606,11 +1618,173 @@ static int evsel__read_group(struct evsel *leader, int cpu_map_idx, int thread)
return evsel__process_group_data(leader, cpu_map_idx, thread, data);
}
+static bool read_until_char(struct io *io, char e)
+{
+ int c;
+
+ do {
+ c = io__get_char(io);
+ if (c == -1)
+ return false;
+ } while (c != e);
+ return true;
+}
+
+static int read_stat_field(int fd, struct perf_cpu cpu, int field, __u64 *val)
+{
+ char buf[256];
+ struct io io;
+ int i;
+
+ io__init(&io, fd, buf, sizeof(buf));
+
+ /* Skip lines to relevant CPU. */
+ for (i = -1; i < cpu.cpu; i++) {
+ if (!read_until_char(&io, '\n'))
+ return -EINVAL;
+ }
+ /* Skip to "cpu". */
+ if (io__get_char(&io) != 'c') return -EINVAL;
+ if (io__get_char(&io) != 'p') return -EINVAL;
+ if (io__get_char(&io) != 'u') return -EINVAL;
+
+ /* Skip N of cpuN. */
+ if (!read_until_char(&io, ' '))
+ return -EINVAL;
+
+ i = 1;
+ while (true) {
+ if (io__get_dec(&io, val) != ' ')
+ break;
+ if (field == i)
+ return 0;
+ i++;
+ }
+ return -EINVAL;
+}
+
+static int read_pid_stat_field(int fd, int field, __u64 *val)
+{
+ char buf[256];
+ struct io io;
+ int c, i;
+
+ io__init(&io, fd, buf, sizeof(buf));
+ if (io__get_dec(&io, val) != ' ')
+ return -EINVAL;
+ if (field == 1)
+ return 0;
+
+ /* Skip comm. */
+ if (io__get_char(&io) != '(' || !read_until_char(&io, ')'))
+ return -EINVAL;
+ if (field == 2)
+ return -EINVAL; /* String can't be returned. */
+
+ /* Skip state */
+ if (io__get_char(&io) != ' ' || io__get_char(&io) == -1)
+ return -EINVAL;
+ if (field == 3)
+ return -EINVAL; /* String can't be returned. */
+
+ /* Loop over numeric fields*/
+ if (io__get_char(&io) != ' ')
+ return -EINVAL;
+
+ i = 4;
+ while (true) {
+ c = io__get_dec(&io, val);
+ if (c == -1)
+ return -EINVAL;
+ if (c == -2) {
+ /* Assume a -ve was read */
+ c = io__get_dec(&io, val);
+ *val *= -1;
+ }
+ if (c != ' ')
+ return -EINVAL;
+ if (field == i)
+ return 0;
+ i++;
+ }
+ return -EINVAL;
+}
+
+static int evsel__read_tool(struct evsel *evsel, int cpu_map_idx, int thread)
+{
+ __u64 *start_time, cur_time, delta_start;
+ int fd, err = 0;
+ struct perf_counts_values *count;
+ bool adjust = false;
+
+ count = perf_counts(evsel->counts, cpu_map_idx, thread);
+
+ switch (evsel->tool_event) {
+ case PERF_TOOL_DURATION_TIME:
+ /*
+ * Pretend duration_time is only on the first CPU and thread, or
+ * else aggregation will scale duration_time by the number of
+ * CPUs/threads.
+ */
+ start_time = &evsel->start_time;
+ if (cpu_map_idx == 0 && thread == 0)
+ cur_time = rdclock();
+ else
+ cur_time = *start_time;
+ break;
+ case PERF_TOOL_USER_TIME:
+ case PERF_TOOL_SYSTEM_TIME: {
+ bool system = evsel->tool_event == PERF_TOOL_SYSTEM_TIME;
+
+ start_time = xyarray__entry(evsel->start_times, cpu_map_idx, thread);
+ fd = FD(evsel, cpu_map_idx, thread);
+ lseek(fd, SEEK_SET, 0);
+ if (evsel->pid_stat) {
+ /* The event exists solely on 1 CPU. */
+ if (cpu_map_idx == 0)
+ err = read_pid_stat_field(fd, system ? 15 : 14, &cur_time);
+ else
+ cur_time = 0;
+ } else {
+ /* The event is for all threads. */
+ if (thread == 0) {
+ struct perf_cpu cpu = perf_cpu_map__cpu(evsel->core.cpus,
+ cpu_map_idx);
+
+ err = read_stat_field(fd, cpu, system ? 3 : 1, &cur_time);
+ } else {
+ cur_time = 0;
+ }
+ }
+ adjust = true;
+ break;
+ }
+ case PERF_TOOL_NONE:
+ case PERF_TOOL_MAX:
+ default:
+ err = -EINVAL;
+ }
+ if (err)
+ return err;
+
+ delta_start = cur_time - *start_time;
+ if (adjust) {
+ __u64 ticks_per_sec = sysconf(_SC_CLK_TCK);
+
+ delta_start *= 1000000000 / ticks_per_sec;
+ }
+ count->val = delta_start;
+ count->ena = count->run = delta_start;
+ count->lost = 0;
+ return 0;
+}
+
int evsel__read_counter(struct evsel *evsel, int cpu_map_idx, int thread)
{
- u64 read_format = evsel->core.attr.read_format;
+ if (evsel__is_tool(evsel))
+ return evsel__read_tool(evsel, cpu_map_idx, thread);
- if (read_format & PERF_FORMAT_GROUP)
+ if (evsel->core.attr.read_format & PERF_FORMAT_GROUP)
return evsel__read_group(evsel, cpu_map_idx, thread);
return evsel__read_one(evsel, cpu_map_idx, thread);
@@ -1829,6 +2003,14 @@ static int __evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus,
perf_evsel__alloc_fd(&evsel->core, perf_cpu_map__nr(cpus), nthreads) < 0)
return -ENOMEM;
+ if ((evsel->tool_event == PERF_TOOL_SYSTEM_TIME ||
+ evsel->tool_event == PERF_TOOL_USER_TIME) &&
+ !evsel->start_times) {
+ evsel->start_times = xyarray__new(perf_cpu_map__nr(cpus), nthreads, sizeof(__u64));
+ if (!evsel->start_times)
+ return -ENOMEM;
+ }
+
evsel->open_flags = PERF_FLAG_FD_CLOEXEC;
if (evsel->cgrp)
evsel->open_flags |= PERF_FLAG_PID_CGROUP;
@@ -2011,6 +2193,13 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
int pid = -1, err, old_errno;
enum rlimit_action set_rlimit = NO_CHANGE;
+ if (evsel->tool_event == PERF_TOOL_DURATION_TIME) {
+ if (evsel->core.attr.sample_period) /* no sampling */
+ return -EINVAL;
+ evsel->start_time = rdclock();
+ return 0;
+ }
+
err = __evsel__prepare_open(evsel, cpus, threads);
if (err)
return err;
@@ -2043,6 +2232,46 @@ retry_open:
if (!evsel->cgrp && !evsel->core.system_wide)
pid = perf_thread_map__pid(threads, thread);
+ if (evsel->tool_event == PERF_TOOL_USER_TIME ||
+ evsel->tool_event == PERF_TOOL_SYSTEM_TIME) {
+ bool system = evsel->tool_event == PERF_TOOL_SYSTEM_TIME;
+ __u64 *start_time = NULL;
+
+ if (evsel->core.attr.sample_period) {
+ /* no sampling */
+ err = -EINVAL;
+ goto out_close;
+ }
+ if (pid > -1) {
+ char buf[64];
+
+ snprintf(buf, sizeof(buf), "/proc/%d/stat", pid);
+ fd = open(buf, O_RDONLY);
+ evsel->pid_stat = true;
+ } else {
+ fd = open("/proc/stat", O_RDONLY);
+ }
+ FD(evsel, idx, thread) = fd;
+ if (fd < 0) {
+ err = -errno;
+ goto out_close;
+ }
+ start_time = xyarray__entry(evsel->start_times, idx, thread);
+ if (pid > -1) {
+ err = read_pid_stat_field(fd, system ? 15 : 14,
+ start_time);
+ } else {
+ struct perf_cpu cpu;
+
+ cpu = perf_cpu_map__cpu(evsel->core.cpus, idx);
+ err = read_stat_field(fd, cpu, system ? 3 : 1,
+ start_time);
+ }
+ if (err)
+ goto out_close;
+ continue;
+ }
+
group_fd = get_group_fd(evsel, idx, thread);
if (group_fd == -2) {
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 375a38e15cd9..80b5f6dd868e 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -170,6 +170,20 @@ struct evsel {
/* for missing_features */
struct perf_pmu *pmu;
+
+ /* For tool events */
+ /* Beginning time subtracted when the counter is read. */
+ union {
+ /* duration_time is a single global time. */
+ __u64 start_time;
+ /*
+ * user_time and system_time read an initial value potentially
+ * per-CPU or per-pid.
+ */
+ struct xyarray *start_times;
+ };
+ /* Is the tool's fd for /proc/pid/stat or /proc/stat. */
+ bool pid_stat;
};
struct perf_missing_features {
diff --git a/tools/perf/util/expr.c b/tools/perf/util/expr.c
index b8875aac8f87..b2536a59c44e 100644
--- a/tools/perf/util/expr.c
+++ b/tools/perf/util/expr.c
@@ -25,10 +25,6 @@
#include <math.h>
#include "pmu.h"
-#ifdef PARSER_DEBUG
-extern int expr_debug;
-#endif
-
struct expr_id_data {
union {
struct {
diff --git a/tools/perf/util/genelf.c b/tools/perf/util/genelf.c
index ac17a3cb59dc..c8f6bee1fa61 100644
--- a/tools/perf/util/genelf.c
+++ b/tools/perf/util/genelf.c
@@ -54,11 +54,6 @@ typedef struct {
char name[0]; /* Start of the name+desc data */
} Elf_Note;
-struct options {
- char *output;
- int fd;
-};
-
static char shd_string_table[] = {
0,
'.', 't', 'e', 'x', 't', 0, /* 1 */
diff --git a/tools/perf/util/hisi-ptt-decoder/Build b/tools/perf/util/hisi-ptt-decoder/Build
index db3db8b75033..3298f7b7e308 100644
--- a/tools/perf/util/hisi-ptt-decoder/Build
+++ b/tools/perf/util/hisi-ptt-decoder/Build
@@ -1 +1 @@
-perf-$(CONFIG_AUXTRACE) += hisi-ptt-pkt-decoder.o
+perf-util-$(CONFIG_AUXTRACE) += hisi-ptt-pkt-decoder.o
diff --git a/tools/perf/util/hisi-ptt.c b/tools/perf/util/hisi-ptt.c
index 52d0ce302ca0..37ea987017f6 100644
--- a/tools/perf/util/hisi-ptt.c
+++ b/tools/perf/util/hisi-ptt.c
@@ -35,11 +35,6 @@ struct hisi_ptt {
u32 pmu_type;
};
-struct hisi_ptt_queue {
- struct hisi_ptt *ptt;
- struct auxtrace_buffer *buffer;
-};
-
static enum hisi_ptt_pkt_type hisi_ptt_check_packet_type(unsigned char *buf)
{
uint32_t head = *(uint32_t *)buf;
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 2e9e193179dd..f028f113c4fd 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -2706,8 +2706,7 @@ void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
}
}
-size_t evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp,
- bool skip_empty)
+size_t evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp)
{
struct evsel *pos;
size_t ret = 0;
@@ -2715,7 +2714,8 @@ size_t evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp,
evlist__for_each_entry(evlist, pos) {
struct hists *hists = evsel__hists(pos);
- if (skip_empty && !hists->stats.nr_samples && !hists->stats.nr_lost_samples)
+ if (symbol_conf.skip_empty && !hists->stats.nr_samples &&
+ !hists->stats.nr_lost_samples)
continue;
ret += fprintf(fp, "%s stats:\n", evsel__name(pos));
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 8fb3bdd29188..5273f5c37050 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -375,8 +375,7 @@ void hists__inc_nr_lost_samples(struct hists *hists, u32 lost);
size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
int max_cols, float min_pcnt, FILE *fp,
bool ignore_callchains);
-size_t evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp,
- bool skip_empty);
+size_t evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp);
void hists__filter_by_dso(struct hists *hists);
void hists__filter_by_thread(struct hists *hists);
diff --git a/tools/perf/util/intel-pt-decoder/Build b/tools/perf/util/intel-pt-decoder/Build
index b41c2e9c6f88..30793d08c6d4 100644
--- a/tools/perf/util/intel-pt-decoder/Build
+++ b/tools/perf/util/intel-pt-decoder/Build
@@ -1,4 +1,4 @@
-perf-$(CONFIG_AUXTRACE) += intel-pt-pkt-decoder.o intel-pt-insn-decoder.o intel-pt-log.o intel-pt-decoder.o
+perf-util-$(CONFIG_AUXTRACE) += intel-pt-pkt-decoder.o intel-pt-insn-decoder.o intel-pt-log.o intel-pt-decoder.o
inat_tables_script = $(srctree)/tools/arch/x86/tools/gen-insn-attr-x86.awk
inat_tables_maps = $(srctree)/tools/arch/x86/lib/x86-opcode-map.txt
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
index c5d57027ec23..4407130d91f8 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
@@ -92,6 +92,15 @@ static void intel_pt_insn_decoder(struct insn *insn,
op = INTEL_PT_OP_JCC;
branch = INTEL_PT_BR_CONDITIONAL;
break;
+ case 0xa1:
+ if (insn_is_rex2(insn)) { /* jmpabs */
+ intel_pt_insn->op = INTEL_PT_OP_JMP;
+ /* jmpabs causes a TIP packet like an indirect branch */
+ intel_pt_insn->branch = INTEL_PT_BR_INDIRECT;
+ intel_pt_insn->length = insn->length;
+ return;
+ }
+ break;
case 0xc2: /* near ret */
case 0xc3: /* near ret */
case 0xca: /* far ret */
diff --git a/tools/perf/util/maps.c b/tools/perf/util/maps.c
index 16b39db594f4..432399cbe5dd 100644
--- a/tools/perf/util/maps.c
+++ b/tools/perf/util/maps.c
@@ -735,26 +735,79 @@ static unsigned int first_ending_after(struct maps *maps, const struct map *map)
return first;
}
+static int __maps__insert_sorted(struct maps *maps, unsigned int first_after_index,
+ struct map *new1, struct map *new2)
+{
+ struct map **maps_by_address = maps__maps_by_address(maps);
+ struct map **maps_by_name = maps__maps_by_name(maps);
+ unsigned int nr_maps = maps__nr_maps(maps);
+ unsigned int nr_allocate = RC_CHK_ACCESS(maps)->nr_maps_allocated;
+ unsigned int to_add = new2 ? 2 : 1;
+
+ assert(maps__maps_by_address_sorted(maps));
+ assert(first_after_index == nr_maps ||
+ map__end(new1) <= map__start(maps_by_address[first_after_index]));
+ assert(!new2 || map__end(new1) <= map__start(new2));
+ assert(first_after_index == nr_maps || !new2 ||
+ map__end(new2) <= map__start(maps_by_address[first_after_index]));
+
+ if (nr_maps + to_add > nr_allocate) {
+ nr_allocate = !nr_allocate ? 32 : nr_allocate * 2;
+
+ maps_by_address = realloc(maps_by_address, nr_allocate * sizeof(new1));
+ if (!maps_by_address)
+ return -ENOMEM;
+
+ maps__set_maps_by_address(maps, maps_by_address);
+ if (maps_by_name) {
+ maps_by_name = realloc(maps_by_name, nr_allocate * sizeof(new1));
+ if (!maps_by_name) {
+ /*
+ * If by name fails, just disable by name and it will
+ * recompute next time it is required.
+ */
+ __maps__free_maps_by_name(maps);
+ }
+ maps__set_maps_by_name(maps, maps_by_name);
+ }
+ RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_allocate;
+ }
+ memmove(&maps_by_address[first_after_index+to_add],
+ &maps_by_address[first_after_index],
+ (nr_maps - first_after_index) * sizeof(new1));
+ maps_by_address[first_after_index] = map__get(new1);
+ if (maps_by_name)
+ maps_by_name[nr_maps] = map__get(new1);
+ if (new2) {
+ maps_by_address[first_after_index + 1] = map__get(new2);
+ if (maps_by_name)
+ maps_by_name[nr_maps + 1] = map__get(new2);
+ }
+ RC_CHK_ACCESS(maps)->nr_maps = nr_maps + to_add;
+ maps__set_maps_by_name_sorted(maps, false);
+ check_invariants(maps);
+ return 0;
+}
+
/*
* Adds new to maps, if new overlaps existing entries then the existing maps are
* adjusted or removed so that new fits without overlapping any entries.
*/
static int __maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
{
- struct map **maps_by_address;
int err = 0;
FILE *fp = debug_file();
+ unsigned int i;
-sort_again:
if (!maps__maps_by_address_sorted(maps))
__maps__sort_by_address(maps);
- maps_by_address = maps__maps_by_address(maps);
/*
* Iterate through entries where the end of the existing entry is
* greater-than the new map's start.
*/
- for (unsigned int i = first_ending_after(maps, new); i < maps__nr_maps(maps); ) {
+ for (i = first_ending_after(maps, new); i < maps__nr_maps(maps); ) {
+ struct map **maps_by_address = maps__maps_by_address(maps);
struct map *pos = maps_by_address[i];
struct map *before = NULL, *after = NULL;
@@ -821,37 +874,55 @@ sort_again:
/* Maps are still ordered, go to next one. */
i++;
if (after) {
- __maps__insert(maps, after);
- map__put(after);
- if (!maps__maps_by_address_sorted(maps)) {
- /*
- * Sorting broken so invariants don't
- * hold, sort and go again.
- */
- goto sort_again;
- }
/*
- * Maps are still ordered, skip after and go to
- * next one (terminate loop).
+ * 'before' and 'after' mean 'new' split the
+ * 'pos' mapping and therefore there are no
+ * later mappings.
*/
- i++;
+ err = __maps__insert_sorted(maps, i, new, after);
+ map__put(after);
+ check_invariants(maps);
+ return err;
}
+ check_invariants(maps);
} else if (after) {
+ /*
+ * 'after' means 'new' split 'pos' and there are no
+ * later mappings.
+ */
map__put(maps_by_address[i]);
- maps_by_address[i] = after;
- /* Maps are ordered, go to next one. */
- i++;
+ maps_by_address[i] = map__get(new);
+ err = __maps__insert_sorted(maps, i + 1, after, NULL);
+ map__put(after);
+ check_invariants(maps);
+ return err;
} else {
+ struct map *next = NULL;
+
+ if (i + 1 < maps__nr_maps(maps))
+ next = maps_by_address[i + 1];
+
+ if (!next || map__start(next) >= map__end(new)) {
+ /*
+ * Replace existing mapping and end knowing
+ * there aren't later overlapping or any
+ * mappings.
+ */
+ map__put(maps_by_address[i]);
+ maps_by_address[i] = map__get(new);
+ check_invariants(maps);
+ return err;
+ }
__maps__remove(maps, pos);
+ check_invariants(maps);
/*
* Maps are ordered but no need to increase `i` as the
* later maps were moved down.
*/
}
- check_invariants(maps);
}
/* Add the map. */
- __maps__insert(maps, new);
+ err = __maps__insert_sorted(maps, i, new, NULL);
out_err:
return err;
}
diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
index 6dda47bb774f..be048bd02f36 100644
--- a/tools/perf/util/mem-events.c
+++ b/tools/perf/util/mem-events.c
@@ -8,6 +8,7 @@
#include <unistd.h>
#include <api/fs/fs.h>
#include <linux/kernel.h>
+#include "cpumap.h"
#include "map_symbol.h"
#include "mem-events.h"
#include "mem-info.h"
@@ -86,7 +87,7 @@ static const char *perf_pmu__mem_events_name(int i, struct perf_pmu *pmu)
return NULL;
e = &pmu->mem_events[i];
- if (!e)
+ if (!e || !e->name)
return NULL;
if (i == PERF_MEM_EVENTS__LOAD || i == PERF_MEM_EVENTS__LOAD_STORE) {
@@ -242,6 +243,7 @@ int perf_mem_events__record_args(const char **rec_argv, int *argv_nr)
int i = *argv_nr;
const char *s;
char *copy;
+ struct perf_cpu_map *cpu_map = NULL;
while ((pmu = perf_pmus__scan_mem(pmu)) != NULL) {
for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
@@ -266,7 +268,19 @@ int perf_mem_events__record_args(const char **rec_argv, int *argv_nr)
rec_argv[i++] = "-e";
rec_argv[i++] = copy;
+
+ cpu_map = perf_cpu_map__merge(cpu_map, pmu->cpus);
+ }
+ }
+
+ if (cpu_map) {
+ if (!perf_cpu_map__equal(cpu_map, cpu_map__online())) {
+ char buf[200];
+
+ cpu_map__snprint(cpu_map, buf, sizeof(buf));
+ pr_warning("Memory events are enabled on a subset of CPUs: %s\n", buf);
}
+ perf_cpu_map__put(cpu_map);
}
*argv_nr = i;
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 6ed0f9c5581d..321586fb5556 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -31,9 +31,6 @@
#define MAX_NAME_LEN 100
-#ifdef PARSER_DEBUG
-extern int parse_events_debug;
-#endif
static int get_config_terms(const struct parse_events_terms *head_config,
struct list_head *head_terms);
static int parse_events_terms__copy(const struct parse_events_terms *src,
@@ -308,11 +305,16 @@ static int add_event_tool(struct list_head *list, int *idx,
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_DUMMY,
};
+ const char *cpu_list = NULL;
+ if (tool_event == PERF_TOOL_DURATION_TIME) {
+ /* Duration time is gathered globally, pretend it is only on CPU0. */
+ cpu_list = "0";
+ }
evsel = __add_event(list, idx, &attr, /*init_attr=*/true, /*name=*/NULL,
/*metric_id=*/NULL, /*pmu=*/NULL,
/*config_terms=*/NULL, /*auto_merge_stats=*/false,
- /*cpu_list=*/"0");
+ cpu_list);
if (!evsel)
return -ENOMEM;
evsel->tool_event = tool_event;
diff --git a/tools/perf/util/perf-regs-arch/Build b/tools/perf/util/perf-regs-arch/Build
index d9d596d330a7..be95402aa540 100644
--- a/tools/perf/util/perf-regs-arch/Build
+++ b/tools/perf/util/perf-regs-arch/Build
@@ -1,9 +1,9 @@
-perf-y += perf_regs_aarch64.o
-perf-y += perf_regs_arm.o
-perf-y += perf_regs_csky.o
-perf-y += perf_regs_loongarch.o
-perf-y += perf_regs_mips.o
-perf-y += perf_regs_powerpc.o
-perf-y += perf_regs_riscv.o
-perf-y += perf_regs_s390.o
-perf-y += perf_regs_x86.o
+perf-util-y += perf_regs_aarch64.o
+perf-util-y += perf_regs_arm.o
+perf-util-y += perf_regs_csky.o
+perf-util-y += perf_regs_loongarch.o
+perf-util-y += perf_regs_mips.o
+perf-util-y += perf_regs_powerpc.o
+perf-util-y += perf_regs_riscv.o
+perf-util-y += perf_regs_s390.o
+perf-util-y += perf_regs_x86.o
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 888ce9912275..986166bc7c78 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -848,6 +848,23 @@ __weak const struct pmu_metrics_table *pmu_metrics_table__find(void)
}
/**
+ * Return the length of the PMU name not including the suffix for uncore PMUs.
+ *
+ * We want to deduplicate many similar uncore PMUs by stripping their suffixes,
+ * but there are never going to be too many core PMUs and the suffixes might be
+ * interesting. "arm_cortex_a53" vs "arm_cortex_a57" or "cpum_cf" for example.
+ *
+ * @skip_duplicate_pmus: False in verbose mode so all uncore PMUs are visible
+ */
+static size_t pmu_deduped_name_len(const struct perf_pmu *pmu, const char *name,
+ bool skip_duplicate_pmus)
+{
+ return skip_duplicate_pmus && !pmu->is_core
+ ? pmu_name_len_no_suffix(name)
+ : strlen(name);
+}
+
+/**
* perf_pmu__match_ignoring_suffix - Does the pmu_name match tok ignoring any
* trailing suffix? The Suffix must be in form
* tok_{digits}, or tok{digits}.
@@ -856,26 +873,34 @@ __weak const struct pmu_metrics_table *pmu_metrics_table__find(void)
*/
static bool perf_pmu__match_ignoring_suffix(const char *pmu_name, const char *tok)
{
- const char *p;
+ const char *p, *suffix;
+ bool has_hex = false;
if (strncmp(pmu_name, tok, strlen(tok)))
return false;
- p = pmu_name + strlen(tok);
+ suffix = p = pmu_name + strlen(tok);
if (*p == 0)
return true;
- if (*p == '_')
+ if (*p == '_') {
++p;
+ ++suffix;
+ }
/* Ensure we end in a number */
while (1) {
- if (!isdigit(*p))
+ if (!isxdigit(*p))
return false;
+ if (!has_hex)
+ has_hex = !isdigit(*p);
if (*(++p) == 0)
break;
}
+ if (has_hex)
+ return (p - suffix) > 2;
+
return true;
}
@@ -1765,7 +1790,7 @@ size_t perf_pmu__num_events(struct perf_pmu *pmu)
size_t nr;
pmu_aliases_parse(pmu);
- nr = pmu->sysfs_aliases + pmu->sys_json_aliases;;
+ nr = pmu->sysfs_aliases + pmu->sys_json_aliases;
if (pmu->cpu_aliases_added)
nr += pmu->cpu_json_aliases;
@@ -1788,10 +1813,9 @@ static char *format_alias(char *buf, int len, const struct perf_pmu *pmu,
const struct perf_pmu_alias *alias, bool skip_duplicate_pmus)
{
struct parse_events_term *term;
- int pmu_name_len = skip_duplicate_pmus
- ? pmu_name_len_no_suffix(pmu->name, /*num=*/NULL)
- : (int)strlen(pmu->name);
- int used = snprintf(buf, len, "%.*s/%s", pmu_name_len, pmu->name, alias->name);
+ size_t pmu_name_len = pmu_deduped_name_len(pmu, pmu->name,
+ skip_duplicate_pmus);
+ int used = snprintf(buf, len, "%.*s/%s", (int)pmu_name_len, pmu->name, alias->name);
list_for_each_entry(term, &alias->terms.terms, list) {
if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR)
@@ -1828,13 +1852,11 @@ int perf_pmu__for_each_event(struct perf_pmu *pmu, bool skip_duplicate_pmus,
pmu_aliases_parse(pmu);
pmu_add_cpu_aliases(pmu);
list_for_each_entry(event, &pmu->aliases, list) {
- size_t buf_used;
- int pmu_name_len;
+ size_t buf_used, pmu_name_len;
info.pmu_name = event->pmu_name ?: pmu->name;
- pmu_name_len = skip_duplicate_pmus
- ? pmu_name_len_no_suffix(info.pmu_name, /*num=*/NULL)
- : (int)strlen(info.pmu_name);
+ pmu_name_len = pmu_deduped_name_len(pmu, info.pmu_name,
+ skip_duplicate_pmus);
info.alias = NULL;
if (event->desc) {
info.name = event->name;
@@ -1859,7 +1881,7 @@ int perf_pmu__for_each_event(struct perf_pmu *pmu, bool skip_duplicate_pmus,
info.encoding_desc = buf + buf_used;
parse_events_terms__to_strbuf(&event->terms, &sb);
buf_used += snprintf(buf + buf_used, sizeof(buf) - buf_used,
- "%.*s/%s/", pmu_name_len, info.pmu_name, sb.buf) + 1;
+ "%.*s/%s/", (int)pmu_name_len, info.pmu_name, sb.buf) + 1;
info.topic = event->topic;
info.str = sb.buf;
info.deprecated = event->deprecated;
@@ -2143,7 +2165,7 @@ void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config,
bool perf_pmu__match(const struct perf_pmu *pmu, const char *tok)
{
const char *name = pmu->name;
- bool need_fnmatch = strchr(tok, '*') != NULL;
+ bool need_fnmatch = strisglob(tok);
if (!strncmp(tok, "uncore_", 7))
tok += 7;
diff --git a/tools/perf/util/pmus.c b/tools/perf/util/pmus.c
index b9b4c5eb5002..3fcabfd8fca1 100644
--- a/tools/perf/util/pmus.c
+++ b/tools/perf/util/pmus.c
@@ -40,31 +40,52 @@ static bool read_sysfs_all_pmus;
static void pmu_read_sysfs(bool core_only);
-int pmu_name_len_no_suffix(const char *str, unsigned long *num)
+size_t pmu_name_len_no_suffix(const char *str)
{
int orig_len, len;
+ bool has_hex_digits = false;
orig_len = len = strlen(str);
- /* Non-uncore PMUs have their full length, for example, i915. */
- if (!strstarts(str, "uncore_"))
- return len;
-
- /*
- * Count trailing digits and '_', if '_{num}' suffix isn't present use
- * the full length.
- */
- while (len > 0 && isdigit(str[len - 1]))
+ /* Count trailing digits. */
+ while (len > 0 && isxdigit(str[len - 1])) {
+ if (!isdigit(str[len - 1]))
+ has_hex_digits = true;
len--;
+ }
if (len > 0 && len != orig_len && str[len - 1] == '_') {
- if (num)
- *num = strtoul(&str[len], NULL, 10);
- return len - 1;
+ /*
+ * There is a '_{num}' suffix. For decimal suffixes any length
+ * will do, for hexadecimal ensure more than 2 hex digits so
+ * that S390's cpum_cf PMU doesn't match.
+ */
+ if (!has_hex_digits || (orig_len - len) > 2)
+ return len - 1;
}
+ /* Use the full length. */
return orig_len;
}
+int pmu_name_cmp(const char *lhs_pmu_name, const char *rhs_pmu_name)
+{
+ unsigned long lhs_num = 0, rhs_num = 0;
+ size_t lhs_pmu_name_len = pmu_name_len_no_suffix(lhs_pmu_name);
+ size_t rhs_pmu_name_len = pmu_name_len_no_suffix(rhs_pmu_name);
+ int ret = strncmp(lhs_pmu_name, rhs_pmu_name,
+ lhs_pmu_name_len < rhs_pmu_name_len ? lhs_pmu_name_len : rhs_pmu_name_len);
+
+ if (lhs_pmu_name_len != rhs_pmu_name_len || ret != 0 || lhs_pmu_name_len == 0)
+ return ret;
+
+ if (lhs_pmu_name_len + 1 < strlen(lhs_pmu_name))
+ lhs_num = strtoul(&lhs_pmu_name[lhs_pmu_name_len + 1], NULL, 16);
+ if (rhs_pmu_name_len + 1 < strlen(rhs_pmu_name))
+ rhs_num = strtoul(&rhs_pmu_name[rhs_pmu_name_len + 1], NULL, 16);
+
+ return lhs_num < rhs_num ? -1 : (lhs_num > rhs_num ? 1 : 0);
+}
+
void perf_pmus__destroy(void)
{
struct perf_pmu *pmu, *tmp;
@@ -167,20 +188,10 @@ static struct perf_pmu *perf_pmu__find2(int dirfd, const char *name)
static int pmus_cmp(void *priv __maybe_unused,
const struct list_head *lhs, const struct list_head *rhs)
{
- unsigned long lhs_num = 0, rhs_num = 0;
struct perf_pmu *lhs_pmu = container_of(lhs, struct perf_pmu, list);
struct perf_pmu *rhs_pmu = container_of(rhs, struct perf_pmu, list);
- const char *lhs_pmu_name = lhs_pmu->name ?: "";
- const char *rhs_pmu_name = rhs_pmu->name ?: "";
- int lhs_pmu_name_len = pmu_name_len_no_suffix(lhs_pmu_name, &lhs_num);
- int rhs_pmu_name_len = pmu_name_len_no_suffix(rhs_pmu_name, &rhs_num);
- int ret = strncmp(lhs_pmu_name, rhs_pmu_name,
- lhs_pmu_name_len < rhs_pmu_name_len ? lhs_pmu_name_len : rhs_pmu_name_len);
-
- if (lhs_pmu_name_len != rhs_pmu_name_len || ret != 0 || lhs_pmu_name_len == 0)
- return ret;
- return lhs_num < rhs_num ? -1 : (lhs_num > rhs_num ? 1 : 0);
+ return pmu_name_cmp(lhs_pmu->name ?: "", rhs_pmu->name ?: "");
}
/* Add all pmus in sysfs to pmu list: */
@@ -300,11 +311,11 @@ static struct perf_pmu *perf_pmus__scan_skip_duplicates(struct perf_pmu *pmu)
pmu_read_sysfs(/*core_only=*/false);
pmu = list_prepare_entry(pmu, &core_pmus, list);
} else
- last_pmu_name_len = pmu_name_len_no_suffix(pmu->name ?: "", NULL);
+ last_pmu_name_len = pmu_name_len_no_suffix(pmu->name ?: "");
if (use_core_pmus) {
list_for_each_entry_continue(pmu, &core_pmus, list) {
- int pmu_name_len = pmu_name_len_no_suffix(pmu->name ?: "", /*num=*/NULL);
+ int pmu_name_len = pmu_name_len_no_suffix(pmu->name ?: "");
if (last_pmu_name_len == pmu_name_len &&
!strncmp(last_pmu_name, pmu->name ?: "", pmu_name_len))
@@ -316,7 +327,7 @@ static struct perf_pmu *perf_pmus__scan_skip_duplicates(struct perf_pmu *pmu)
pmu = list_prepare_entry(pmu, &other_pmus, list);
}
list_for_each_entry_continue(pmu, &other_pmus, list) {
- int pmu_name_len = pmu_name_len_no_suffix(pmu->name ?: "", /*num=*/NULL);
+ int pmu_name_len = pmu_name_len_no_suffix(pmu->name ?: "");
if (last_pmu_name_len == pmu_name_len &&
!strncmp(last_pmu_name, pmu->name ?: "", pmu_name_len))
@@ -477,8 +488,8 @@ void perf_pmus__print_pmu_events(const struct print_callbacks *print_cb, void *p
qsort(aliases, len, sizeof(struct sevent), cmp_sevent);
for (int j = 0; j < len; j++) {
/* Skip duplicates */
- if (j > 0 && pmu_alias_is_duplicate(&aliases[j], &aliases[j - 1]))
- continue;
+ if (j < len - 1 && pmu_alias_is_duplicate(&aliases[j], &aliases[j + 1]))
+ goto free;
print_cb->print_event(print_state,
aliases[j].pmu_name,
@@ -491,6 +502,7 @@ void perf_pmus__print_pmu_events(const struct print_callbacks *print_cb, void *p
aliases[j].desc,
aliases[j].long_desc,
aliases[j].encoding_desc);
+free:
zfree(&aliases[j].name);
zfree(&aliases[j].alias);
zfree(&aliases[j].scale_unit);
@@ -566,7 +578,7 @@ void perf_pmus__print_raw_pmu_events(const struct print_callbacks *print_cb, voi
.long_string = STRBUF_INIT,
.num_formats = 0,
};
- int len = pmu_name_len_no_suffix(pmu->name, /*num=*/NULL);
+ int len = pmu_name_len_no_suffix(pmu->name);
const char *desc = "(see 'man perf-list' or 'man perf-record' on how to encode it)";
if (!pmu->is_core)
diff --git a/tools/perf/util/pmus.h b/tools/perf/util/pmus.h
index 9d4ded80b8e9..bdbff02324bb 100644
--- a/tools/perf/util/pmus.h
+++ b/tools/perf/util/pmus.h
@@ -2,10 +2,15 @@
#ifndef __PMUS_H
#define __PMUS_H
+#include <stdbool.h>
+#include <stddef.h>
+
struct perf_pmu;
struct print_callbacks;
-int pmu_name_len_no_suffix(const char *str, unsigned long *num);
+size_t pmu_name_len_no_suffix(const char *str);
+/* Exposed for testing only. */
+int pmu_name_cmp(const char *lhs_pmu_name, const char *rhs_pmu_name);
void perf_pmus__destroy(void);
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
deleted file mode 100644
index 1bec945f4838..000000000000
--- a/tools/perf/util/python-ext-sources
+++ /dev/null
@@ -1,53 +0,0 @@
-#
-# List of files needed by perf python extension
-#
-# Each source file must be placed on its own line so that it can be
-# processed by Makefile and util/setup.py accordingly.
-#
-
-util/python.c
-../lib/ctype.c
-util/cap.c
-util/evlist.c
-util/evsel.c
-util/evsel_fprintf.c
-util/perf_event_attr_fprintf.c
-util/cpumap.c
-util/memswap.c
-util/mmap.c
-util/namespaces.c
-../lib/bitmap.c
-../lib/find_bit.c
-../lib/list_sort.c
-../lib/hweight.c
-../lib/string.c
-../lib/vsprintf.c
-util/thread_map.c
-util/util.c
-util/cgroup.c
-util/parse-branch-options.c
-util/rblist.c
-util/counts.c
-util/print_binary.c
-util/strlist.c
-util/trace-event.c
-util/trace-event-parse.c
-../lib/rbtree.c
-util/string.c
-util/symbol_fprintf.c
-util/units.c
-util/affinity.c
-util/rwsem.c
-util/hashmap.c
-util/perf_regs.c
-util/fncache.c
-util/rlimit.c
-util/perf-regs-arch/perf_regs_aarch64.c
-util/perf-regs-arch/perf_regs_arm.c
-util/perf-regs-arch/perf_regs_csky.c
-util/perf-regs-arch/perf_regs_loongarch.c
-util/perf-regs-arch/perf_regs_mips.c
-util/perf-regs-arch/perf_regs_powerpc.c
-util/perf-regs-arch/perf_regs_riscv.c
-util/perf-regs-arch/perf_regs_s390.c
-util/perf-regs-arch/perf_regs_x86.c
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 0aeb97c11c03..3be882b2e845 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -10,21 +10,19 @@
#endif
#include <perf/mmap.h>
#include "evlist.h"
-#include "callchain.h"
#include "evsel.h"
#include "event.h"
#include "print_binary.h"
#include "thread_map.h"
#include "trace-event.h"
#include "mmap.h"
-#include "stat.h"
-#include "metricgroup.h"
#include "util/bpf-filter.h"
#include "util/env.h"
-#include "util/pmu.h"
-#include "util/pmus.h"
+#include "util/kvm-stat.h"
+#include "util/kwork.h"
+#include "util/lock-contention.h"
#include <internal/lib.h>
-#include "util.h"
+#include "../builtin.h"
#if PY_MAJOR_VERSION < 3
#define _PyUnicode_FromString(arg) \
@@ -50,166 +48,6 @@
#define Py_TYPE(ob) (((PyObject*)(ob))->ob_type)
#endif
-/*
- * Avoid bringing in event parsing.
- */
-int parse_event(struct evlist *evlist __maybe_unused, const char *str __maybe_unused)
-{
- return 0;
-}
-
-/*
- * Provide these two so that we don't have to link against callchain.c and
- * start dragging hist.c, etc.
- */
-struct callchain_param callchain_param;
-
-int parse_callchain_record(const char *arg __maybe_unused,
- struct callchain_param *param __maybe_unused)
-{
- return 0;
-}
-
-/*
- * Add these not to drag util/env.c
- */
-struct perf_env perf_env;
-
-const char *perf_env__cpuid(struct perf_env *env __maybe_unused)
-{
- return NULL;
-}
-
-// This one is a bit easier, wouldn't drag too much, but leave it as a stub we need it here
-const char *perf_env__arch(struct perf_env *env __maybe_unused)
-{
- return NULL;
-}
-
-/*
- * These ones are needed not to drag the PMU bandwagon, jevents generated
- * pmu_sys_event_tables, etc and evsel__find_pmu() is used so far just for
- * doing per PMU perf_event_attr.exclude_guest handling, not really needed, so
- * far, for the perf python binding known usecases, revisit if this become
- * necessary.
- */
-struct perf_pmu *evsel__find_pmu(const struct evsel *evsel __maybe_unused)
-{
- return NULL;
-}
-
-int perf_pmu__scan_file(const struct perf_pmu *pmu, const char *name, const char *fmt, ...)
-{
- return EOF;
-}
-
-const char *perf_pmu__name_from_config(struct perf_pmu *pmu __maybe_unused, u64 config __maybe_unused)
-{
- return NULL;
-}
-
-struct perf_pmu *perf_pmus__find_by_type(unsigned int type __maybe_unused)
-{
- return NULL;
-}
-
-int perf_pmus__num_core_pmus(void)
-{
- return 1;
-}
-
-bool evsel__is_aux_event(const struct evsel *evsel __maybe_unused)
-{
- return false;
-}
-
-bool perf_pmus__supports_extended_type(void)
-{
- return false;
-}
-
-/*
- * Add this one here not to drag util/metricgroup.c
- */
-int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
- struct rblist *new_metric_events,
- struct rblist *old_metric_events)
-{
- return 0;
-}
-
-/*
- * Add this one here not to drag util/trace-event-info.c
- */
-char *tracepoint_id_to_name(u64 config)
-{
- return NULL;
-}
-
-/*
- * XXX: All these evsel destructors need some better mechanism, like a linked
- * list of destructors registered when the relevant code indeed is used instead
- * of having more and more calls in perf_evsel__delete(). -- acme
- *
- * For now, add some more:
- *
- * Not to drag the BPF bandwagon...
- */
-void bpf_counter__destroy(struct evsel *evsel);
-int bpf_counter__install_pe(struct evsel *evsel, int cpu, int fd);
-int bpf_counter__disable(struct evsel *evsel);
-
-void bpf_counter__destroy(struct evsel *evsel __maybe_unused)
-{
-}
-
-int bpf_counter__install_pe(struct evsel *evsel __maybe_unused, int cpu __maybe_unused, int fd __maybe_unused)
-{
- return 0;
-}
-
-int bpf_counter__disable(struct evsel *evsel __maybe_unused)
-{
- return 0;
-}
-
-// not to drag util/bpf-filter.c
-#ifdef HAVE_BPF_SKEL
-int perf_bpf_filter__prepare(struct evsel *evsel __maybe_unused)
-{
- return 0;
-}
-
-int perf_bpf_filter__destroy(struct evsel *evsel __maybe_unused)
-{
- return 0;
-}
-#endif
-
-/*
- * Support debug printing even though util/debug.c is not linked. That means
- * implementing 'verbose' and 'eprintf'.
- */
-int verbose;
-int debug_kmaps;
-int debug_peo_args;
-
-int eprintf(int level, int var, const char *fmt, ...);
-
-int eprintf(int level, int var, const char *fmt, ...)
-{
- va_list args;
- int ret = 0;
-
- if (var >= level) {
- va_start(args, fmt);
- ret = vfprintf(stderr, fmt, args);
- va_end(args);
- }
-
- return ret;
-}
-
/* Define PyVarObject_HEAD_INIT for python 2.5 */
#ifndef PyVarObject_HEAD_INIT
# define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size,
@@ -1510,15 +1348,102 @@ error:
#endif
}
-/*
- * Dummy, to avoid dragging all the test_attr infrastructure in the python
- * binding.
- */
-void test_attr__open(struct perf_event_attr *attr, pid_t pid, struct perf_cpu cpu,
- int fd, int group_fd, unsigned long flags)
+
+/* The following are stubs to avoid dragging in builtin-* objects. */
+/* TODO: move the code out of the builtin-* file into util. */
+
+unsigned int scripting_max_stack = PERF_MAX_STACK_DEPTH;
+
+bool kvm_entry_event(struct evsel *evsel __maybe_unused)
{
+ return false;
+}
+
+bool kvm_exit_event(struct evsel *evsel __maybe_unused)
+{
+ return false;
+}
+
+bool exit_event_begin(struct evsel *evsel __maybe_unused,
+ struct perf_sample *sample __maybe_unused,
+ struct event_key *key __maybe_unused)
+{
+ return false;
+}
+
+bool exit_event_end(struct evsel *evsel __maybe_unused,
+ struct perf_sample *sample __maybe_unused,
+ struct event_key *key __maybe_unused)
+{
+ return false;
+}
+
+void exit_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
+ struct event_key *key __maybe_unused,
+ char *decode __maybe_unused)
+{
+}
+
+int find_scripts(char **scripts_array __maybe_unused, char **scripts_path_array __maybe_unused,
+ int num __maybe_unused, int pathlen __maybe_unused)
+{
+ return -1;
+}
+
+void perf_stat__set_no_csv_summary(int set __maybe_unused)
+{
+}
+
+void perf_stat__set_big_num(int set __maybe_unused)
+{
+}
+
+int script_spec_register(const char *spec __maybe_unused, struct scripting_ops *ops __maybe_unused)
+{
+ return -1;
+}
+
+arch_syscalls__strerrno_t *arch_syscalls__strerrno_function(const char *arch __maybe_unused)
+{
+ return NULL;
+}
+
+struct kwork_work *perf_kwork_add_work(struct perf_kwork *kwork __maybe_unused,
+ struct kwork_class *class __maybe_unused,
+ struct kwork_work *key __maybe_unused)
+{
+ return NULL;
+}
+
+void script_fetch_insn(struct perf_sample *sample __maybe_unused,
+ struct thread *thread __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+}
+
+int perf_sample__sprintf_flags(u32 flags __maybe_unused, char *str __maybe_unused,
+ size_t sz __maybe_unused)
+{
+ return -1;
+}
+
+bool match_callstack_filter(struct machine *machine __maybe_unused, u64 *callstack __maybe_unused)
+{
+ return false;
+}
+
+struct lock_stat *lock_stat_find(u64 addr __maybe_unused)
+{
+ return NULL;
+}
+
+struct lock_stat *lock_stat_findnew(u64 addr __maybe_unused, const char *name __maybe_unused,
+ int flags __maybe_unused)
+{
+ return NULL;
}
-void evlist__free_stats(struct evlist *evlist)
+int cmd_inject(int argc __maybe_unused, const char *argv[] __maybe_unused)
{
+ return -1;
}
diff --git a/tools/perf/util/scripting-engines/Build b/tools/perf/util/scripting-engines/Build
index 586b94e90f4e..2282fe3772f3 100644
--- a/tools/perf/util/scripting-engines/Build
+++ b/tools/perf/util/scripting-engines/Build
@@ -1,7 +1,7 @@
ifeq ($(CONFIG_LIBTRACEEVENT),y)
- perf-$(CONFIG_LIBPERL) += trace-event-perl.o
+ perf-util-$(CONFIG_LIBPERL) += trace-event-perl.o
endif
-perf-$(CONFIG_LIBPYTHON) += trace-event-python.o
+perf-util-$(CONFIG_LIBPYTHON) += trace-event-python.o
CFLAGS_trace-event-perl.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-nested-externs -Wno-undef -Wno-switch-default -Wno-bad-function-cast -Wno-declaration-after-statement -Wno-switch-enum
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index a10343b9dcd4..5596bed1b8c8 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -2050,6 +2050,7 @@ static int __perf_session__process_pipe_events(struct perf_session *session)
{
struct ordered_events *oe = &session->ordered_events;
struct perf_tool *tool = session->tool;
+ struct ui_progress prog;
union perf_event *event;
uint32_t size, cur_size = 0;
void *buf = NULL;
@@ -2057,9 +2058,21 @@ static int __perf_session__process_pipe_events(struct perf_session *session)
u64 head;
ssize_t err;
void *p;
+ bool update_prog = false;
perf_tool__fill_defaults(tool);
+ /*
+ * If it's from a file saving pipe data (by redirection), it would have
+ * a file name other than "-". Then we can get the total size and show
+ * the progress.
+ */
+ if (strcmp(session->data->path, "-") && session->data->file.size) {
+ ui_progress__init_size(&prog, session->data->file.size,
+ "Processing events...");
+ update_prog = true;
+ }
+
head = 0;
cur_size = sizeof(union perf_event);
@@ -2131,6 +2144,9 @@ more:
if (err)
goto out_err;
+ if (update_prog)
+ ui_progress__update(&prog, size);
+
if (!session_done())
goto more;
done:
@@ -2144,6 +2160,8 @@ done:
err = perf_session__flush_thread_stacks(session);
out_err:
free(buf);
+ if (update_prog)
+ ui_progress__finish();
if (!tool->no_warn)
perf_session__warn_about_errors(session);
ordered_events__free(&session->ordered_events);
@@ -2523,7 +2541,7 @@ static int __perf_session__process_dir_events(struct perf_session *session)
perf_tool__fill_defaults(tool);
- ui_progress__init_size(&prog, total_size, "Sorting events...");
+ ui_progress__init_size(&prog, total_size, "Processing events...");
nr_readers = 1;
for (i = 0; i < data->dir.nr; i++) {
@@ -2696,8 +2714,7 @@ size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp
return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
}
-size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp,
- bool skip_empty)
+size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
{
size_t ret;
const char *msg = "";
@@ -2707,7 +2724,7 @@ size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp,
ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
- ret += events_stats__fprintf(&session->evlist->stats, fp, skip_empty);
+ ret += events_stats__fprintf(&session->evlist->stats, fp);
return ret;
}
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 3b0256e977a6..4c29dc86956f 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -130,8 +130,7 @@ size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp);
size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
bool (fn)(struct dso *dso, int parm), int parm);
-size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp,
- bool skip_empty);
+size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp);
void perf_session__dump_kmaps(struct perf_session *session);
diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
index 3107f5aa8c9a..142e9d447ce7 100644
--- a/tools/perf/util/setup.py
+++ b/tools/perf/util/setup.py
@@ -60,7 +60,7 @@ class install_lib(_install_lib):
cflags = getenv('CFLAGS', '').split()
# switch off several checks (need to be at the end of cflags list)
-cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter', '-Wno-redundant-decls', '-DPYTHON_PERF' ]
+cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter', '-Wno-redundant-decls' ]
if cc_is_clang:
cflags += ["-Wno-unused-command-line-argument" ]
else:
@@ -72,36 +72,11 @@ cflags += [ "-Wno-declaration-after-statement" ]
src_perf = getenv('srctree') + '/tools/perf'
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
-libtraceevent = getenv('LIBTRACEEVENT')
-libapikfs = getenv('LIBAPI')
-libperf = getenv('LIBPERF')
-
-ext_sources = [f.strip() for f in open('util/python-ext-sources')
- if len(f.strip()) > 0 and f[0] != '#']
-
-extra_libraries = []
-
-if '-DHAVE_LIBTRACEEVENT' in cflags:
- extra_libraries += [ 'traceevent' ]
-else:
- ext_sources.remove('util/trace-event.c')
- ext_sources.remove('util/trace-event-parse.c')
-
-# use full paths with source files
-ext_sources = list(map(lambda x: '%s/%s' % (src_perf, x) , ext_sources))
-
-if '-DHAVE_LIBNUMA_SUPPORT' in cflags:
- extra_libraries += [ 'numa' ]
-if '-DHAVE_LIBCAP_SUPPORT' in cflags:
- extra_libraries += [ 'cap' ]
perf = Extension('perf',
- sources = ext_sources,
- include_dirs = ['util/include'],
- libraries = extra_libraries,
- extra_compile_args = cflags,
- extra_objects = [ x for x in [libtraceevent, libapikfs, libperf]
- if x is not None],
+ sources = [ src_perf + '/util/python.c' ],
+ include_dirs = ['util/include'],
+ extra_compile_args = cflags,
)
setup(name='perf',
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index cd39ea972193..ab7c7ff35f9b 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -334,7 +334,7 @@ sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
* comparing symbol address alone is not enough since it's a
* relative address within a dso.
*/
- if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) {
+ if (!hists__has(left->hists, dso)) {
ret = sort__dso_cmp(left, right);
if (ret != 0)
return ret;
diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c
index 9d670d8c1c08..760742fd4a7d 100644
--- a/tools/perf/util/srcline.c
+++ b/tools/perf/util/srcline.c
@@ -39,7 +39,7 @@ static const char *srcline_dso_name(struct dso *dso)
if (dso_name[0] == '[')
return NULL;
- if (!strncmp(dso_name, "/tmp/perf-", 10))
+ if (is_perf_pid_map_name(dso_name))
return NULL;
return dso_name;
@@ -288,7 +288,7 @@ static int inline_list__append_dso_a2l(struct dso *dso,
struct inline_node *node,
struct symbol *sym)
{
- struct a2l_data *a2l = dso->a2l;
+ struct a2l_data *a2l = dso__a2l(dso);
struct symbol *inline_sym = new_inline_sym(dso, sym, a2l->funcname);
char *srcline = NULL;
@@ -304,11 +304,11 @@ static int addr2line(const char *dso_name, u64 addr,
struct symbol *sym)
{
int ret = 0;
- struct a2l_data *a2l = dso->a2l;
+ struct a2l_data *a2l = dso__a2l(dso);
if (!a2l) {
- dso->a2l = addr2line_init(dso_name);
- a2l = dso->a2l;
+ a2l = addr2line_init(dso_name);
+ dso__set_a2l(dso, a2l);
}
if (a2l == NULL) {
@@ -360,14 +360,14 @@ static int addr2line(const char *dso_name, u64 addr,
void dso__free_a2l(struct dso *dso)
{
- struct a2l_data *a2l = dso->a2l;
+ struct a2l_data *a2l = dso__a2l(dso);
if (!a2l)
return;
addr2line_cleanup(a2l);
- dso->a2l = NULL;
+ dso__set_a2l(dso, NULL);
}
#else /* HAVE_LIBBFD_SUPPORT */
diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
index 91d2f7f65df7..c38bcb6f4c78 100644
--- a/tools/perf/util/stat-display.c
+++ b/tools/perf/util/stat-display.c
@@ -38,6 +38,7 @@
static int aggr_header_lens[] = {
[AGGR_CORE] = 18,
[AGGR_CACHE] = 22,
+ [AGGR_CLUSTER] = 20,
[AGGR_DIE] = 12,
[AGGR_SOCKET] = 6,
[AGGR_NODE] = 6,
@@ -49,6 +50,7 @@ static int aggr_header_lens[] = {
static const char *aggr_header_csv[] = {
[AGGR_CORE] = "core,cpus,",
[AGGR_CACHE] = "cache,cpus,",
+ [AGGR_CLUSTER] = "cluster,cpus,",
[AGGR_DIE] = "die,cpus,",
[AGGR_SOCKET] = "socket,cpus,",
[AGGR_NONE] = "cpu,",
@@ -60,6 +62,7 @@ static const char *aggr_header_csv[] = {
static const char *aggr_header_std[] = {
[AGGR_CORE] = "core",
[AGGR_CACHE] = "cache",
+ [AGGR_CLUSTER] = "cluster",
[AGGR_DIE] = "die",
[AGGR_SOCKET] = "socket",
[AGGR_NONE] = "cpu",
@@ -1183,10 +1186,21 @@ static void print_metric_headers_std(struct perf_stat_config *config,
static void print_metric_headers_csv(struct perf_stat_config *config,
bool no_indent __maybe_unused)
{
+ const char *p;
+
if (config->interval)
- fputs("time,", config->output);
- if (!config->iostat_run)
- fputs(aggr_header_csv[config->aggr_mode], config->output);
+ fprintf(config->output, "time%s", config->csv_sep);
+ if (config->iostat_run)
+ return;
+
+ p = aggr_header_csv[config->aggr_mode];
+ while (*p) {
+ if (*p == ',')
+ fputs(config->csv_sep, config->output);
+ else
+ fputc(*p, config->output);
+ p++;
+ }
}
static void print_metric_headers_json(struct perf_stat_config *config __maybe_unused,
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 3466aa952442..6bb975e46de3 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -176,6 +176,13 @@ static double find_stat(const struct evsel *evsel, int aggr_idx, enum stat_type
if (type != evsel__stat_type(cur))
continue;
+ /*
+ * Except the SW CLOCK events,
+ * ignore if not the PMU we're looking for.
+ */
+ if ((type != STAT_NSECS) && (evsel->pmu != cur->pmu))
+ continue;
+
aggr = &cur->stats->aggr[aggr_idx];
if (type == STAT_NSECS)
return aggr->counts.val;
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 9e5940b5bc59..19eb623e0826 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1607,7 +1607,7 @@ int dso__load_bfd_symbols(struct dso *dso, const char *debugfile)
if (!bfd_check_format(abfd, bfd_object)) {
pr_debug2("%s: cannot read %s bfd file.\n", __func__,
- dso->long_name);
+ dso__long_name(dso));
goto out_close;
}
@@ -1640,12 +1640,13 @@ int dso__load_bfd_symbols(struct dso *dso, const char *debugfile)
}
if (i < symbols_count) {
/* PE symbols can only have 4 bytes, so use .text high bits */
- dso->text_offset = section->vma - (u32)section->vma;
- dso->text_offset += (u32)bfd_asymbol_value(symbols[i]);
- dso->text_end = (section->vma - dso->text_offset) + section->size;
+ u64 text_offset = (section->vma - (u32)section->vma)
+ + (u32)bfd_asymbol_value(symbols[i]);
+ dso__set_text_offset(dso, text_offset);
+ dso__set_text_end(dso, (section->vma - text_offset) + section->size);
} else {
- dso->text_offset = section->vma - section->filepos;
- dso->text_end = section->filepos + section->size;
+ dso__set_text_offset(dso, section->vma - section->filepos);
+ dso__set_text_end(dso, section->filepos + section->size);
}
}
@@ -1671,7 +1672,7 @@ int dso__load_bfd_symbols(struct dso *dso, const char *debugfile)
else
len = section->size - sym->value;
- start = bfd_asymbol_value(sym) - dso->text_offset;
+ start = bfd_asymbol_value(sym) - dso__text_offset(dso);
symbol = symbol__new(start, len, bfd2elf_binding(sym), STT_FUNC,
bfd_asymbol_name(sym));
if (!symbol)
@@ -1799,7 +1800,8 @@ int dso__load(struct dso *dso, struct map *map)
const char *map_path = dso__long_name(dso);
mutex_lock(dso__lock(dso));
- perfmap = strncmp(dso__name(dso), "/tmp/perf-", 10) == 0;
+ perfmap = is_perf_pid_map_name(map_path);
+
if (perfmap) {
if (dso__nsinfo(dso) &&
(dso__find_perf_map(newmapname, sizeof(newmapname),
@@ -1816,10 +1818,7 @@ int dso__load(struct dso *dso, struct map *map)
goto out;
}
- kmod = dso__symtab_type(dso) == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
- dso__symtab_type(dso) == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
- dso__symtab_type(dso) == DSO_BINARY_TYPE__GUEST_KMODULE ||
- dso__symtab_type(dso) == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
+ kmod = dso__is_kmod(dso);
if (dso__kernel(dso) && !kmod) {
if (dso__kernel(dso) == DSO_SPACE__KERNEL)
diff --git a/tools/perf/util/symbol_conf.h b/tools/perf/util/symbol_conf.h
index c114bbceef40..657cfa5af43c 100644
--- a/tools/perf/util/symbol_conf.h
+++ b/tools/perf/util/symbol_conf.h
@@ -46,7 +46,8 @@ struct symbol_conf {
lazy_load_kernel_maps,
keep_exited_threads,
annotate_data_member,
- annotate_data_sample;
+ annotate_data_sample,
+ skip_empty;
const char *vmlinux_name,
*kallsyms_name,
*source_prefix,
diff --git a/tools/perf/util/syscalltbl.c b/tools/perf/util/syscalltbl.c
index 63be7b58761d..0dd26b991b3f 100644
--- a/tools/perf/util/syscalltbl.c
+++ b/tools/perf/util/syscalltbl.c
@@ -123,6 +123,13 @@ int syscalltbl__id(struct syscalltbl *tbl, const char *name)
return sc ? sc->id : -1;
}
+int syscalltbl__id_at_idx(struct syscalltbl *tbl, int idx)
+{
+ struct syscall *syscalls = tbl->syscalls.entries;
+
+ return idx < tbl->syscalls.nr_entries ? syscalls[idx].id : -1;
+}
+
int syscalltbl__strglobmatch_next(struct syscalltbl *tbl, const char *syscall_glob, int *idx)
{
int i;
diff --git a/tools/perf/util/syscalltbl.h b/tools/perf/util/syscalltbl.h
index a41d2ca9e4ae..2b53b7ed25a6 100644
--- a/tools/perf/util/syscalltbl.h
+++ b/tools/perf/util/syscalltbl.h
@@ -16,6 +16,7 @@ void syscalltbl__delete(struct syscalltbl *tbl);
const char *syscalltbl__name(const struct syscalltbl *tbl, int id);
int syscalltbl__id(struct syscalltbl *tbl, const char *name);
+int syscalltbl__id_at_idx(struct syscalltbl *tbl, int idx);
int syscalltbl__strglobmatch_first(struct syscalltbl *tbl, const char *syscall_glob, int *idx);
int syscalltbl__strglobmatch_next(struct syscalltbl *tbl, const char *syscall_glob, int *idx);
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
index b38d322734b4..bde216e630d2 100644
--- a/tools/perf/util/unwind-libdw.c
+++ b/tools/perf/util/unwind-libdw.c
@@ -29,8 +29,8 @@ static int __find_debuginfo(Dwfl_Module *mod __maybe_unused, void **userdata,
const struct dso *dso = *userdata;
assert(dso);
- if (dso->symsrc_filename && strcmp (file_name, dso->symsrc_filename))
- *debuginfo_file_name = strdup(dso->symsrc_filename);
+ if (dso__symsrc_filename(dso) && strcmp(file_name, dso__symsrc_filename(dso)))
+ *debuginfo_file_name = strdup(dso__symsrc_filename(dso));
return -1;
}
@@ -66,7 +66,7 @@ static int __report_module(struct addr_location *al, u64 ip,
* a different code in another DSO. So just use the map->start
* directly to pick the correct one.
*/
- if (!strncmp(dso->long_name, "/tmp/jitted-", 12))
+ if (!strncmp(dso__long_name(dso), "/tmp/jitted-", 12))
base = map__start(al->map);
else
base = map__start(al->map) - map__pgoff(al->map);
@@ -83,15 +83,15 @@ static int __report_module(struct addr_location *al, u64 ip,
if (!mod) {
char filename[PATH_MAX];
- __symbol__join_symfs(filename, sizeof(filename), dso->long_name);
- mod = dwfl_report_elf(ui->dwfl, dso->short_name, filename, -1,
+ __symbol__join_symfs(filename, sizeof(filename), dso__long_name(dso));
+ mod = dwfl_report_elf(ui->dwfl, dso__short_name(dso), filename, -1,
base, false);
}
if (!mod) {
char filename[PATH_MAX];
if (dso__build_id_filename(dso, filename, sizeof(filename), false))
- mod = dwfl_report_elf(ui->dwfl, dso->short_name, filename, -1,
+ mod = dwfl_report_elf(ui->dwfl, dso__short_name(dso), filename, -1,
base, false);
}
diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c
index cde267ea3e99..f6a6f6a91030 100644
--- a/tools/perf/util/unwind-libunwind-local.c
+++ b/tools/perf/util/unwind-libunwind-local.c
@@ -363,7 +363,7 @@ static int read_unwind_spec_debug_frame(struct dso *dso,
struct machine *machine, u64 *offset)
{
int fd;
- u64 ofs = dso->data.debug_frame_offset;
+ u64 ofs = dso__data(dso)->debug_frame_offset;
/* debug_frame can reside in:
* - dso
@@ -379,7 +379,7 @@ static int read_unwind_spec_debug_frame(struct dso *dso,
}
if (ofs <= 0) {
- fd = open(dso->symsrc_filename, O_RDONLY);
+ fd = open(dso__symsrc_filename(dso), O_RDONLY);
if (fd >= 0) {
ofs = elf_section_offset(fd, ".debug_frame");
close(fd);
@@ -390,6 +390,11 @@ static int read_unwind_spec_debug_frame(struct dso *dso,
char *debuglink = malloc(PATH_MAX);
int ret = 0;
+ if (debuglink == NULL) {
+ pr_err("unwind: Can't read unwind spec debug frame.\n");
+ return -ENOMEM;
+ }
+
ret = dso__read_binary_type_filename(
dso, DSO_BINARY_TYPE__DEBUGLINK,
machine->root_dir, debuglink, PATH_MAX);
@@ -402,21 +407,21 @@ static int read_unwind_spec_debug_frame(struct dso *dso,
}
}
if (ofs > 0) {
- if (dso->symsrc_filename != NULL) {
+ if (dso__symsrc_filename(dso) != NULL) {
pr_warning(
"%s: overwrite symsrc(%s,%s)\n",
__func__,
- dso->symsrc_filename,
+ dso__symsrc_filename(dso),
debuglink);
- zfree(&dso->symsrc_filename);
+ zfree(&dso__symsrc_filename(dso));
}
- dso->symsrc_filename = debuglink;
+ dso__set_symsrc_filename(dso, debuglink);
} else {
free(debuglink);
}
}
- dso->data.debug_frame_offset = ofs;
+ dso__data(dso)->debug_frame_offset = ofs;
}
*offset = ofs;
@@ -481,7 +486,7 @@ find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
if (ret < 0 &&
!read_unwind_spec_debug_frame(dso, ui->machine, &segbase)) {
int fd = dso__data_get_fd(dso, ui->machine);
- int is_exec = elf_is_exec(fd, dso->name);
+ int is_exec = elf_is_exec(fd, dso__name(dso));
u64 start = map__start(map);
unw_word_t base = is_exec ? 0 : start;
const char *symfile;
@@ -489,7 +494,7 @@ find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
if (fd >= 0)
dso__data_put_fd(dso);
- symfile = dso->symsrc_filename ?: dso->name;
+ symfile = dso__symsrc_filename(dso) ?: dso__name(dso);
memset(&di, 0, sizeof(di));
if (dwarf_find_debug_frame(0, &di, ip, base, symfile, start, map__end(map)))
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile
index b53753dee02f..6c02f401069e 100644
--- a/tools/power/cpupower/Makefile
+++ b/tools/power/cpupower/Makefile
@@ -67,6 +67,7 @@ LANGUAGES = de fr it cs pt ka
bindir ?= /usr/bin
sbindir ?= /usr/sbin
mandir ?= /usr/man
+libdir ?= /usr/lib
includedir ?= /usr/include
localedir ?= /usr/share/locale
docdir ?= /usr/share/doc/packages/cpupower
@@ -94,15 +95,6 @@ RANLIB = $(CROSS)ranlib
HOSTCC = gcc
MKDIR = mkdir
-# 64bit library detection
-include ../../scripts/Makefile.arch
-
-ifeq ($(IS_64_BIT), 1)
-libdir ?= /usr/lib64
-else
-libdir ?= /usr/lib
-endif
-
# Now we set up the build system
#
@@ -332,4 +324,39 @@ uninstall:
rm -f $(DESTDIR)${localedir}/$$HLANG/LC_MESSAGES/cpupower.mo; \
done;
-.PHONY: all utils libcpupower update-po create-gmo install-lib install-tools install-man install-gmo install uninstall clean
+help:
+ @echo 'Building targets:'
+ @echo ' all - Default target. Could be omitted. Put build artifacts'
+ @echo ' to "O" cmdline option dir (default: current dir)'
+ @echo ' install - Install previously built project files from the output'
+ @echo ' dir defined by "O" cmdline option (default: current dir)'
+ @echo ' to the install dir defined by "DESTDIR" cmdline or'
+ @echo ' Makefile config block option (default: "")'
+ @echo ' install-lib - Install previously built library binary from the output'
+ @echo ' dir defined by "O" cmdline option (default: current dir)'
+ @echo ' and library headers from "lib/" for userspace to the install'
+ @echo ' dir defined by "DESTDIR" cmdline (default: "")'
+ @echo ' install-tools - Install previously built "cpupower" util from the output'
+ @echo ' dir defined by "O" cmdline option (default: current dir) and'
+ @echo ' "cpupower-completion.sh" script from the src dir to the'
+ @echo ' install dir defined by "DESTDIR" cmdline or Makefile'
+ @echo ' config block option (default: "")'
+ @echo ' install-man - Install man pages from the "man" src subdir to the'
+ @echo ' install dir defined by "DESTDIR" cmdline or Makefile'
+ @echo ' config block option (default: "")'
+ @echo ' install-gmo - Install previously built language files from the output'
+ @echo ' dir defined by "O" cmdline option (default: current dir)'
+ @echo ' to the install dir defined by "DESTDIR" cmdline or Makefile'
+ @echo ' config block option (default: "")'
+ @echo ' install-bench - Install previously built "cpufreq-bench" util files from the'
+ @echo ' output dir defined by "O" cmdline option (default: current dir)'
+ @echo ' to the install dir defined by "DESTDIR" cmdline or Makefile'
+ @echo ' config block option (default: "")'
+ @echo ''
+ @echo 'Cleaning targets:'
+ @echo ' clean - Clean build artifacts from the dir defined by "O" cmdline'
+ @echo ' option (default: current dir)'
+ @echo ' uninstall - Remove previously installed files from the dir defined by "DESTDIR"'
+ @echo ' cmdline or Makefile config block option (default: "")'
+
+.PHONY: all utils libcpupower update-po create-gmo install-lib install-tools install-man install-gmo install uninstall clean help
diff --git a/tools/power/cpupower/README b/tools/power/cpupower/README
index 1c68f47663b2..2678ed81d311 100644
--- a/tools/power/cpupower/README
+++ b/tools/power/cpupower/README
@@ -22,16 +22,156 @@ interfaces [depending on configuration, see below].
compilation and installation
----------------------------
-make
-su
-make install
-
-should suffice on most systems. It builds libcpupower to put in
-/usr/lib; cpupower, cpufreq-bench_plot.sh to put in /usr/bin; and
-cpufreq-bench to put in /usr/sbin. If you want to set up the paths
-differently and/or want to configure the package to your specific
-needs, you need to open "Makefile" with an editor of your choice and
-edit the block marked CONFIGURATION.
+There are 2 output directories - one for the build output and another for
+the installation of the build results, that is the utility, library,
+man pages, etc...
+
+default directory
+-----------------
+
+In the case of default directory, build and install process requires no
+additional parameters:
+
+build
+-----
+
+$ make
+
+The output directory for the 'make' command is the current directory and
+its subdirs in the kernel tree:
+tools/power/cpupower
+
+install
+-------
+
+$ sudo make install
+
+'make install' command puts targets to default system dirs:
+
+-----------------------------------------------------------------------
+| Installing file | System dir |
+-----------------------------------------------------------------------
+| libcpupower | /usr/lib |
+-----------------------------------------------------------------------
+| cpupower | /usr/bin |
+-----------------------------------------------------------------------
+| cpufreq-bench_plot.sh | /usr/bin |
+-----------------------------------------------------------------------
+| man pages | /usr/man |
+-----------------------------------------------------------------------
+
+To put it in other words it makes build results available system-wide,
+enabling any user to simply start using it without any additional steps
+
+custom directory
+----------------
+
+There are 2 make's command-line variables 'O' and 'DESTDIR' that setup
+appropriate dirs:
+'O' - build directory
+'DESTDIR' - installation directory. This variable could also be setup in
+the 'CONFIGURATION' block of the "Makefile"
+
+build
+-----
+
+$ make O=<your_custom_build_catalog>
+
+Example:
+$ make O=/home/hedin/prj/cpupower/build
+
+install
+-------
+
+$ make O=<your_custom_build_catalog> DESTDIR=<your_custom_install_catalog>
+
+Example:
+$ make O=/home/hedin/prj/cpupower/build DESTDIR=/home/hedin/prj/cpupower \
+> install
+
+Notice that both variables 'O' and 'DESTDIR' have been provided. The reason
+is that the build results are saved in the custom output dir defined by 'O'
+variable. So, this dir is the source for the installation step. If only
+'DESTDIR' were provided then the 'install' target would assume that the
+build directory is the current one, build everything there and install
+from the current dir.
+
+The files will be installed to the following dirs:
+
+-----------------------------------------------------------------------
+| Installing file | System dir |
+-----------------------------------------------------------------------
+| libcpupower | ${DESTDIR}/usr/lib |
+-----------------------------------------------------------------------
+| cpupower | ${DESTDIR}/usr/bin |
+-----------------------------------------------------------------------
+| cpufreq-bench_plot.sh | ${DESTDIR}/usr/bin |
+-----------------------------------------------------------------------
+| man pages | ${DESTDIR}/usr/man |
+-----------------------------------------------------------------------
+
+If you look at the table for the default 'make' output dirs you will
+notice that the only difference with the non-default case is the
+${DESTDIR} prefix. So, the structure of the output dirs remains the same
+regardles of the root output directory.
+
+
+clean and uninstall
+-------------------
+
+'clean' target is intended for cleanup the build catalog from build results
+'uninstall' target is intended for removing installed files from the
+installation directory
+
+default directory
+-----------------
+
+This case is a straightforward one:
+$ make clean
+$ make uninstall
+
+custom directory
+----------------
+
+Use 'O' command line variable to remove previously built files from the
+build dir:
+$ make O=<your_custom_build_catalog> clean
+
+Example:
+$ make O=/home/hedin/prj/cpupower/build clean
+
+Use 'DESTDIR' command line variable to uninstall previously installed files
+from the given dir:
+$ make DESTDIR=<your_custom_install_catalog>
+
+Example:
+make DESTDIR=/home/hedin/prj/cpupower uninstall
+
+
+running the tool
+----------------
+
+default directory
+-----------------
+
+$ sudo cpupower
+
+custom directory
+----------------
+
+When it comes to run the utility from the custom build catalog things
+become a little bit complicated as 'just run' approach doesn't work.
+Assuming that the current dir is '<your_custom_install_catalog>/usr',
+issuing the following command:
+
+$ sudo ./bin/cpupower
+will produce the following error output:
+./bin/cpupower: error while loading shared libraries: libcpupower.so.1:
+cannot open shared object file: No such file or directory
+
+The issue is that binary cannot find the 'libcpupower' library. So, we
+shall point to the lib dir:
+sudo LD_LIBRARY_PATH=lib64/ ./bin/cpupower
THANKS
diff --git a/tools/power/cpupower/bench/Makefile b/tools/power/cpupower/bench/Makefile
index a4b902f9e1c4..34e5894476eb 100644
--- a/tools/power/cpupower/bench/Makefile
+++ b/tools/power/cpupower/bench/Makefile
@@ -1,4 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
+ifeq ($(MAKELEVEL),0)
+$(error This Makefile is not intended to be run standalone, but only as a part \
+of the main one in the parent dir)
+endif
+
OUTPUT := ./
ifeq ("$(origin O)", "command line")
ifneq ($(O),)
diff --git a/tools/power/cpupower/man/cpupower-monitor.1 b/tools/power/cpupower/man/cpupower-monitor.1
index 8ee737eefa5c..89af019f8dc4 100644
--- a/tools/power/cpupower/man/cpupower-monitor.1
+++ b/tools/power/cpupower/man/cpupower-monitor.1
@@ -81,11 +81,6 @@ Measure idle and frequency characteristics of an arbitrary command/workload.
The executable \fBcommand\fP is forked and upon its exit, statistics gathered since it was
forked are displayed.
.RE
-.PP
-\-v
-.RS 4
-Increase verbosity if the binary was compiled with the DEBUG option set.
-.RE
.SH MONITOR DESCRIPTIONS
.SS "Idle_Stats"
@@ -172,9 +167,11 @@ displayed.
"BIOS and Kernel Developer’s Guide (BKDG) for AMD Family 14h Processors"
https://support.amd.com/us/Processor_TechDocs/43170.pdf
-"Intel® Turbo Boost Technology
-in Intel® Core™ Microarchitecture (Nehalem) Based Processors"
-http://download.intel.com/design/processor/applnots/320354.pdf
+"What Is Intel® Turbo Boost Technology?"
+https://www.intel.com/content/www/us/en/gaming/resources/turbo-boost.html
+
+"Power Management - Technology Overview"
+https://cdrdv2.intel.com/v1/dl/getContent/637748
"Intel® 64 and IA-32 Architectures Software Developer's Manual
Volume 3B: System Programming Guide"
diff --git a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
index 075e766ff1f3..f746099b5dac 100644
--- a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
+++ b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
@@ -35,7 +35,7 @@ static unsigned int avail_monitors;
static char *progname;
enum operation_mode_e { list = 1, show, show_all };
-static int mode;
+static enum operation_mode_e mode;
static int interval = 1;
static char *show_monitors_param;
static struct cpupower_topology cpu_top;
diff --git a/tools/power/pm-graph/bootgraph.py b/tools/power/pm-graph/bootgraph.py
index f96f50e0c336..8a3ef94fe88f 100755
--- a/tools/power/pm-graph/bootgraph.py
+++ b/tools/power/pm-graph/bootgraph.py
@@ -77,12 +77,12 @@ class SystemValues(aslib.SystemValues):
fp.close()
self.testdir = datetime.now().strftime('boot-%y%m%d-%H%M%S')
def kernelVersion(self, msg):
- m = re.match('^[Ll]inux *[Vv]ersion *(?P<v>\S*) .*', msg)
+ m = re.match(r'^[Ll]inux *[Vv]ersion *(?P<v>\S*) .*', msg)
if m:
return m.group('v')
return 'unknown'
def checkFtraceKernelVersion(self):
- m = re.match('^(?P<x>[0-9]*)\.(?P<y>[0-9]*)\.(?P<z>[0-9]*).*', self.kernel)
+ m = re.match(r'^(?P<x>[0-9]*)\.(?P<y>[0-9]*)\.(?P<z>[0-9]*).*', self.kernel)
if m:
val = tuple(map(int, m.groups()))
if val >= (4, 10, 0):
@@ -324,7 +324,7 @@ def parseKernelLog():
idx = line.find('[')
if idx > 1:
line = line[idx:]
- m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
+ m = re.match(r'[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(not m):
continue
ktime = float(m.group('ktime'))
@@ -332,24 +332,24 @@ def parseKernelLog():
break
msg = m.group('msg')
data.dmesgtext.append(line)
- if(ktime == 0.0 and re.match('^Linux version .*', msg)):
+ if(ktime == 0.0 and re.match(r'^Linux version .*', msg)):
if(not sysvals.stamp['kernel']):
sysvals.stamp['kernel'] = sysvals.kernelVersion(msg)
continue
- m = re.match('.* setting system clock to (?P<d>[0-9\-]*)[ A-Z](?P<t>[0-9:]*) UTC.*', msg)
+ m = re.match(r'.* setting system clock to (?P<d>[0-9\-]*)[ A-Z](?P<t>[0-9:]*) UTC.*', msg)
if(m):
bt = datetime.strptime(m.group('d')+' '+m.group('t'), '%Y-%m-%d %H:%M:%S')
bt = bt - timedelta(seconds=int(ktime))
data.boottime = bt.strftime('%Y-%m-%d_%H:%M:%S')
sysvals.stamp['time'] = bt.strftime('%B %d %Y, %I:%M:%S %p')
continue
- m = re.match('^calling *(?P<f>.*)\+.* @ (?P<p>[0-9]*)', msg)
+ m = re.match(r'^calling *(?P<f>.*)\+.* @ (?P<p>[0-9]*)', msg)
if(m):
func = m.group('f')
pid = int(m.group('p'))
devtemp[func] = (ktime, pid)
continue
- m = re.match('^initcall *(?P<f>.*)\+.* returned (?P<r>.*) after (?P<t>.*) usecs', msg)
+ m = re.match(r'^initcall *(?P<f>.*)\+.* returned (?P<r>.*) after (?P<t>.*) usecs', msg)
if(m):
data.valid = True
data.end = ktime
@@ -359,7 +359,7 @@ def parseKernelLog():
data.newAction(phase, f, pid, start, ktime, int(r), int(t))
del devtemp[f]
continue
- if(re.match('^Freeing unused kernel .*', msg)):
+ if(re.match(r'^Freeing unused kernel .*', msg)):
data.tUserMode = ktime
data.dmesg['kernel']['end'] = ktime
data.dmesg['user']['start'] = ktime
diff --git a/tools/power/pm-graph/sleepgraph.py b/tools/power/pm-graph/sleepgraph.py
index 40ad221e8881..ef87e63c05c7 100755
--- a/tools/power/pm-graph/sleepgraph.py
+++ b/tools/power/pm-graph/sleepgraph.py
@@ -86,7 +86,7 @@ def ascii(text):
# store system values and test parameters
class SystemValues:
title = 'SleepGraph'
- version = '5.11'
+ version = '5.12'
ansi = False
rs = 0
display = ''
@@ -420,11 +420,11 @@ class SystemValues:
return value.format(**args)
def setOutputFile(self):
if self.dmesgfile != '':
- m = re.match('(?P<name>.*)_dmesg\.txt.*', self.dmesgfile)
+ m = re.match(r'(?P<name>.*)_dmesg\.txt.*', self.dmesgfile)
if(m):
self.htmlfile = m.group('name')+'.html'
if self.ftracefile != '':
- m = re.match('(?P<name>.*)_ftrace\.txt.*', self.ftracefile)
+ m = re.match(r'(?P<name>.*)_ftrace\.txt.*', self.ftracefile)
if(m):
self.htmlfile = m.group('name')+'.html'
def systemInfo(self, info):
@@ -464,15 +464,15 @@ class SystemValues:
if os.path.exists('/proc/cpuinfo'):
with open('/proc/cpuinfo', 'r') as fp:
for line in fp:
- if re.match('^processor[ \t]*:[ \t]*[0-9]*', line):
+ if re.match(r'^processor[ \t]*:[ \t]*[0-9]*', line):
self.cpucount += 1
if os.path.exists('/proc/meminfo'):
with open('/proc/meminfo', 'r') as fp:
for line in fp:
- m = re.match('^MemTotal:[ \t]*(?P<sz>[0-9]*) *kB', line)
+ m = re.match(r'^MemTotal:[ \t]*(?P<sz>[0-9]*) *kB', line)
if m:
self.memtotal = int(m.group('sz'))
- m = re.match('^MemFree:[ \t]*(?P<sz>[0-9]*) *kB', line)
+ m = re.match(r'^MemFree:[ \t]*(?P<sz>[0-9]*) *kB', line)
if m:
self.memfree = int(m.group('sz'))
if os.path.exists('/etc/os-release'):
@@ -539,7 +539,7 @@ class SystemValues:
idx = line.find('[')
if idx > 1:
line = line[idx:]
- m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
+ m = re.match(r'[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(m):
ktime = m.group('ktime')
break
@@ -553,7 +553,7 @@ class SystemValues:
idx = line.find('[')
if idx > 1:
line = line[idx:]
- m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
+ m = re.match(r'[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(not m):
continue
ktime = float(m.group('ktime'))
@@ -636,11 +636,11 @@ class SystemValues:
# now process the args
for arg in sorted(args):
arglist[arg] = ''
- m = re.match('.* '+arg+'=(?P<arg>.*) ', data);
+ m = re.match(r'.* '+arg+'=(?P<arg>.*) ', data);
if m:
arglist[arg] = m.group('arg')
else:
- m = re.match('.* '+arg+'=(?P<arg>.*)', data);
+ m = re.match(r'.* '+arg+'=(?P<arg>.*)', data);
if m:
arglist[arg] = m.group('arg')
out = fmt.format(**arglist)
@@ -989,7 +989,7 @@ class SystemValues:
m = re.match(tp.ftrace_line_fmt, line)
if(not m or 'device_pm_callback_start' not in line):
continue
- m = re.match('.*: (?P<drv>.*) (?P<d>.*), parent: *(?P<p>.*), .*', m.group('msg'));
+ m = re.match(r'.*: (?P<drv>.*) (?P<d>.*), parent: *(?P<p>.*), .*', m.group('msg'));
if(not m):
continue
dev = m.group('d')
@@ -999,7 +999,7 @@ class SystemValues:
# now get the syspath for each target device
for dirname, dirnames, filenames in os.walk('/sys/devices'):
- if(re.match('.*/power', dirname) and 'async' in filenames):
+ if(re.match(r'.*/power', dirname) and 'async' in filenames):
dev = dirname.split('/')[-2]
if dev in props and (not props[dev].syspath or len(dirname) < len(props[dev].syspath)):
props[dev].syspath = dirname[:-6]
@@ -1143,12 +1143,12 @@ class SystemValues:
elif value and os.path.exists(file):
fp = open(file, 'r+')
if fmt == 'radio':
- m = re.match('.*\[(?P<v>.*)\].*', fp.read())
+ m = re.match(r'.*\[(?P<v>.*)\].*', fp.read())
if m:
self.cfgdef[file] = m.group('v')
elif fmt == 'acpi':
line = fp.read().strip().split('\n')[-1]
- m = re.match('.* (?P<v>[0-9A-Fx]*) .*', line)
+ m = re.match(r'.* (?P<v>[0-9A-Fx]*) .*', line)
if m:
self.cfgdef[file] = m.group('v')
else:
@@ -1173,7 +1173,7 @@ class SystemValues:
fp = Popen([cmd, '-v'], stdout=PIPE, stderr=PIPE).stderr
out = ascii(fp.read()).strip()
fp.close()
- if re.match('turbostat version .*', out):
+ if re.match(r'turbostat version .*', out):
self.vprint(out)
return True
return False
@@ -1181,33 +1181,33 @@ class SystemValues:
cmd = self.getExec('turbostat')
rawout = keyline = valline = ''
fullcmd = '%s -q -S echo freeze > %s' % (cmd, self.powerfile)
- fp = Popen(['sh', '-c', fullcmd], stdout=PIPE, stderr=PIPE).stderr
- for line in fp:
+ fp = Popen(['sh', '-c', fullcmd], stdout=PIPE, stderr=PIPE)
+ for line in fp.stderr:
line = ascii(line)
rawout += line
if keyline and valline:
continue
- if re.match('(?i)Avg_MHz.*', line):
+ if re.match(r'(?i)Avg_MHz.*', line):
keyline = line.strip().split()
elif keyline:
valline = line.strip().split()
- fp.close()
+ fp.wait()
if not keyline or not valline or len(keyline) != len(valline):
errmsg = 'unrecognized turbostat output:\n'+rawout.strip()
self.vprint(errmsg)
if not self.verbose:
pprint(errmsg)
- return ''
+ return (fp.returncode, '')
if self.verbose:
pprint(rawout.strip())
out = []
for key in keyline:
idx = keyline.index(key)
val = valline[idx]
- if key == 'SYS%LPI' and not s0ixready and re.match('^[0\.]*$', val):
+ if key == 'SYS%LPI' and not s0ixready and re.match(r'^[0\.]*$', val):
continue
out.append('%s=%s' % (key, val))
- return '|'.join(out)
+ return (fp.returncode, '|'.join(out))
def netfixon(self, net='both'):
cmd = self.getExec('netfix')
if not cmd:
@@ -1232,7 +1232,7 @@ class SystemValues:
except:
return ''
for line in reversed(w.split('\n')):
- m = re.match(' *(?P<dev>.*): (?P<stat>[0-9a-f]*) .*', line)
+ m = re.match(r' *(?P<dev>.*): (?P<stat>[0-9a-f]*) .*', line)
if not m or (dev and dev != m.group('dev')):
continue
return m.group('dev')
@@ -1261,14 +1261,14 @@ class SystemValues:
return
arr = msg.split()
for j in range(len(arr)):
- if re.match('^[0-9,\-\.]*$', arr[j]):
- arr[j] = '[0-9,\-\.]*'
+ if re.match(r'^[0-9,\-\.]*$', arr[j]):
+ arr[j] = r'[0-9,\-\.]*'
else:
arr[j] = arr[j]\
- .replace('\\', '\\\\').replace(']', '\]').replace('[', '\[')\
- .replace('.', '\.').replace('+', '\+').replace('*', '\*')\
- .replace('(', '\(').replace(')', '\)').replace('}', '\}')\
- .replace('{', '\{')
+ .replace('\\', r'\\\\').replace(']', r'\]').replace('[', r'\[')\
+ .replace('.', r'\.').replace('+', r'\+').replace('*', r'\*')\
+ .replace('(', r'\(').replace(')', r'\)').replace('}', r'\}')\
+ .replace('{', r'\{')
mstr = ' *'.join(arr)
entry = {
'line': msg,
@@ -1340,7 +1340,7 @@ class SystemValues:
fp = Popen(xset.format('q').split(' '), stdout=PIPE).stdout
ret = 'unknown'
for line in fp:
- m = re.match('[\s]*Monitor is (?P<m>.*)', ascii(line))
+ m = re.match(r'[\s]*Monitor is (?P<m>.*)', ascii(line))
if(m and len(m.group('m')) >= 2):
out = m.group('m').lower()
ret = out[3:] if out[0:2] == 'in' else out
@@ -1566,7 +1566,7 @@ class Data:
i += 1
if tp.stampInfo(line, sysvals):
continue
- m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
+ m = re.match(r'[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if not m:
continue
t = float(m.group('ktime'))
@@ -1574,7 +1574,7 @@ class Data:
continue
dir = 'suspend' if t < self.tSuspended else 'resume'
msg = m.group('msg')
- if re.match('capability: warning: .*', msg):
+ if re.match(r'capability: warning: .*', msg):
continue
for err in self.errlist:
if re.match(self.errlist[err], msg):
@@ -1679,8 +1679,8 @@ class Data:
ubiquitous = False
if kprobename in dtf and 'ub' in dtf[kprobename]:
ubiquitous = True
- mc = re.match('\(.*\) *(?P<args>.*)', cdata)
- mr = re.match('\((?P<caller>\S*).* arg1=(?P<ret>.*)', rdata)
+ mc = re.match(r'\(.*\) *(?P<args>.*)', cdata)
+ mr = re.match(r'\((?P<caller>\S*).* arg1=(?P<ret>.*)', rdata)
if mc and mr:
c = mr.group('caller').split('+')[0]
a = mc.group('args').strip()
@@ -1997,7 +1997,7 @@ class Data:
list = self.dmesg[phase]['list']
mydev = ''
for devname in sorted(list):
- if name == devname or re.match('^%s\[(?P<num>[0-9]*)\]$' % name, devname):
+ if name == devname or re.match(r'^%s\[(?P<num>[0-9]*)\]$' % name, devname):
mydev = devname
if mydev:
return list[mydev]
@@ -2099,7 +2099,7 @@ class Data:
for dev in sorted(list):
pdev = list[dev]['par']
pid = list[dev]['pid']
- if(pid < 0 or re.match('[0-9]*-[0-9]*\.[0-9]*[\.0-9]*\:[\.0-9]*$', pdev)):
+ if(pid < 0 or re.match(r'[0-9]*-[0-9]*\.[0-9]*[\.0-9]*\:[\.0-9]*$', pdev)):
continue
if pdev and pdev not in real and pdev not in rootlist:
rootlist.append(pdev)
@@ -2190,26 +2190,26 @@ class Data:
if 'resume_complete' in dm:
dm['resume_complete']['end'] = time
def initcall_debug_call(self, line, quick=False):
- m = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) .* (?P<f>.*)\: '+\
- 'PM: *calling .* @ (?P<n>.*), parent: (?P<p>.*)', line)
+ m = re.match(r'.*(\[ *)(?P<t>[0-9\.]*)(\]) .* (?P<f>.*)\: '+\
+ r'PM: *calling .* @ (?P<n>.*), parent: (?P<p>.*)', line)
if not m:
- m = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) .* (?P<f>.*)\: '+\
- 'calling .* @ (?P<n>.*), parent: (?P<p>.*)', line)
+ m = re.match(r'.*(\[ *)(?P<t>[0-9\.]*)(\]) .* (?P<f>.*)\: '+\
+ r'calling .* @ (?P<n>.*), parent: (?P<p>.*)', line)
if not m:
- m = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) calling '+\
- '(?P<f>.*)\+ @ (?P<n>.*), parent: (?P<p>.*)', line)
+ m = re.match(r'.*(\[ *)(?P<t>[0-9\.]*)(\]) calling '+\
+ r'(?P<f>.*)\+ @ (?P<n>.*), parent: (?P<p>.*)', line)
if m:
return True if quick else m.group('t', 'f', 'n', 'p')
return False if quick else ('', '', '', '')
def initcall_debug_return(self, line, quick=False):
- m = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) .* (?P<f>.*)\: PM: '+\
- '.* returned (?P<r>[0-9]*) after (?P<dt>[0-9]*) usecs', line)
+ m = re.match(r'.*(\[ *)(?P<t>[0-9\.]*)(\]) .* (?P<f>.*)\: PM: '+\
+ r'.* returned (?P<r>[0-9]*) after (?P<dt>[0-9]*) usecs', line)
if not m:
- m = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) .* (?P<f>.*)\: '+\
- '.* returned (?P<r>[0-9]*) after (?P<dt>[0-9]*) usecs', line)
+ m = re.match(r'.*(\[ *)(?P<t>[0-9\.]*)(\]) .* (?P<f>.*)\: '+\
+ r'.* returned (?P<r>[0-9]*) after (?P<dt>[0-9]*) usecs', line)
if not m:
- m = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) call '+\
- '(?P<f>.*)\+ returned .* after (?P<dt>.*) usecs', line)
+ m = re.match(r'.*(\[ *)(?P<t>[0-9\.]*)(\]) call '+\
+ r'(?P<f>.*)\+ returned .* after (?P<dt>.*) usecs', line)
if m:
return True if quick else m.group('t', 'f', 'dt')
return False if quick else ('', '', '')
@@ -2294,28 +2294,28 @@ class FTraceLine:
if not m and not d:
return
# is this a trace event
- if(d == 'traceevent' or re.match('^ *\/\* *(?P<msg>.*) \*\/ *$', m)):
+ if(d == 'traceevent' or re.match(r'^ *\/\* *(?P<msg>.*) \*\/ *$', m)):
if(d == 'traceevent'):
# nop format trace event
msg = m
else:
# function_graph format trace event
- em = re.match('^ *\/\* *(?P<msg>.*) \*\/ *$', m)
+ em = re.match(r'^ *\/\* *(?P<msg>.*) \*\/ *$', m)
msg = em.group('msg')
- emm = re.match('^(?P<call>.*?): (?P<msg>.*)', msg)
+ emm = re.match(r'^(?P<call>.*?): (?P<msg>.*)', msg)
if(emm):
self.name = emm.group('msg')
self.type = emm.group('call')
else:
self.name = msg
- km = re.match('^(?P<n>.*)_cal$', self.type)
+ km = re.match(r'^(?P<n>.*)_cal$', self.type)
if km:
self.fcall = True
self.fkprobe = True
self.type = km.group('n')
return
- km = re.match('^(?P<n>.*)_ret$', self.type)
+ km = re.match(r'^(?P<n>.*)_ret$', self.type)
if km:
self.freturn = True
self.fkprobe = True
@@ -2327,7 +2327,7 @@ class FTraceLine:
if(d):
self.length = float(d)/1000000
# the indentation determines the depth
- match = re.match('^(?P<d> *)(?P<o>.*)$', m)
+ match = re.match(r'^(?P<d> *)(?P<o>.*)$', m)
if(not match):
return
self.depth = self.getDepth(match.group('d'))
@@ -2337,7 +2337,7 @@ class FTraceLine:
self.freturn = True
if(len(m) > 1):
# includes comment with function name
- match = re.match('^} *\/\* *(?P<n>.*) *\*\/$', m)
+ match = re.match(r'^} *\/\* *(?P<n>.*) *\*\/$', m)
if(match):
self.name = match.group('n').strip()
# function call
@@ -2345,13 +2345,13 @@ class FTraceLine:
self.fcall = True
# function call with children
if(m[-1] == '{'):
- match = re.match('^(?P<n>.*) *\(.*', m)
+ match = re.match(r'^(?P<n>.*) *\(.*', m)
if(match):
self.name = match.group('n').strip()
# function call with no children (leaf)
elif(m[-1] == ';'):
self.freturn = True
- match = re.match('^(?P<n>.*) *\(.*', m)
+ match = re.match(r'^(?P<n>.*) *\(.*', m)
if(match):
self.name = match.group('n').strip()
# something else (possibly a trace marker)
@@ -2385,7 +2385,7 @@ class FTraceLine:
return False
else:
if(self.type == 'suspend_resume' and
- re.match('suspend_enter\[.*\] begin', self.name)):
+ re.match(r'suspend_enter\[.*\] begin', self.name)):
return True
return False
def endMarker(self):
@@ -2398,7 +2398,7 @@ class FTraceLine:
return False
else:
if(self.type == 'suspend_resume' and
- re.match('thaw_processes\[.*\] end', self.name)):
+ re.match(r'thaw_processes\[.*\] end', self.name)):
return True
return False
@@ -2976,30 +2976,30 @@ class Timeline:
# Description:
# A list of values describing the properties of these test runs
class TestProps:
- stampfmt = '# [a-z]*-(?P<m>[0-9]{2})(?P<d>[0-9]{2})(?P<y>[0-9]{2})-'+\
- '(?P<H>[0-9]{2})(?P<M>[0-9]{2})(?P<S>[0-9]{2})'+\
- ' (?P<host>.*) (?P<mode>.*) (?P<kernel>.*)$'
- wififmt = '^# wifi *(?P<d>\S*) *(?P<s>\S*) *(?P<t>[0-9\.]+).*'
- tstatfmt = '^# turbostat (?P<t>\S*)'
- testerrfmt = '^# enter_sleep_error (?P<e>.*)'
- sysinfofmt = '^# sysinfo .*'
- cmdlinefmt = '^# command \| (?P<cmd>.*)'
- kparamsfmt = '^# kparams \| (?P<kp>.*)'
- devpropfmt = '# Device Properties: .*'
- pinfofmt = '# platform-(?P<val>[a-z,A-Z,0-9,_]*): (?P<info>.*)'
- tracertypefmt = '# tracer: (?P<t>.*)'
- firmwarefmt = '# fwsuspend (?P<s>[0-9]*) fwresume (?P<r>[0-9]*)$'
- procexecfmt = 'ps - (?P<ps>.*)$'
- procmultifmt = '@(?P<n>[0-9]*)\|(?P<ps>.*)$'
+ stampfmt = r'# [a-z]*-(?P<m>[0-9]{2})(?P<d>[0-9]{2})(?P<y>[0-9]{2})-'+\
+ r'(?P<H>[0-9]{2})(?P<M>[0-9]{2})(?P<S>[0-9]{2})'+\
+ r' (?P<host>.*) (?P<mode>.*) (?P<kernel>.*)$'
+ wififmt = r'^# wifi *(?P<d>\S*) *(?P<s>\S*) *(?P<t>[0-9\.]+).*'
+ tstatfmt = r'^# turbostat (?P<t>\S*)'
+ testerrfmt = r'^# enter_sleep_error (?P<e>.*)'
+ sysinfofmt = r'^# sysinfo .*'
+ cmdlinefmt = r'^# command \| (?P<cmd>.*)'
+ kparamsfmt = r'^# kparams \| (?P<kp>.*)'
+ devpropfmt = r'# Device Properties: .*'
+ pinfofmt = r'# platform-(?P<val>[a-z,A-Z,0-9,_]*): (?P<info>.*)'
+ tracertypefmt = r'# tracer: (?P<t>.*)'
+ firmwarefmt = r'# fwsuspend (?P<s>[0-9]*) fwresume (?P<r>[0-9]*)$'
+ procexecfmt = r'ps - (?P<ps>.*)$'
+ procmultifmt = r'@(?P<n>[0-9]*)\|(?P<ps>.*)$'
ftrace_line_fmt_fg = \
- '^ *(?P<time>[0-9\.]*) *\| *(?P<cpu>[0-9]*)\)'+\
- ' *(?P<proc>.*)-(?P<pid>[0-9]*) *\|'+\
- '[ +!#\*@$]*(?P<dur>[0-9\.]*) .*\| (?P<msg>.*)'
+ r'^ *(?P<time>[0-9\.]*) *\| *(?P<cpu>[0-9]*)\)'+\
+ r' *(?P<proc>.*)-(?P<pid>[0-9]*) *\|'+\
+ r'[ +!#\*@$]*(?P<dur>[0-9\.]*) .*\| (?P<msg>.*)'
ftrace_line_fmt_nop = \
- ' *(?P<proc>.*)-(?P<pid>[0-9]*) *\[(?P<cpu>[0-9]*)\] *'+\
- '(?P<flags>\S*) *(?P<time>[0-9\.]*): *'+\
- '(?P<msg>.*)'
- machinesuspend = 'machine_suspend\[.*'
+ r' *(?P<proc>.*)-(?P<pid>[0-9]*) *\[(?P<cpu>[0-9]*)\] *'+\
+ r'(?P<flags>\S*) *(?P<time>[0-9\.]*): *'+\
+ r'(?P<msg>.*)'
+ machinesuspend = r'machine_suspend\[.*'
multiproclist = dict()
multiproctime = 0.0
multiproccnt = 0
@@ -3081,14 +3081,14 @@ class TestProps:
sv.hostname = data.stamp['host']
sv.suspendmode = data.stamp['mode']
if sv.suspendmode == 'freeze':
- self.machinesuspend = 'timekeeping_freeze\[.*'
+ self.machinesuspend = r'timekeeping_freeze\[.*'
else:
- self.machinesuspend = 'machine_suspend\[.*'
+ self.machinesuspend = r'machine_suspend\[.*'
if sv.suspendmode == 'command' and sv.ftracefile != '':
modes = ['on', 'freeze', 'standby', 'mem', 'disk']
fp = sv.openlog(sv.ftracefile, 'r')
for line in fp:
- m = re.match('.* machine_suspend\[(?P<mode>.*)\]', line)
+ m = re.match(r'.* machine_suspend\[(?P<mode>.*)\]', line)
if m and m.group('mode') in ['1', '2', '3', '4']:
sv.suspendmode = modes[int(m.group('mode'))]
data.stamp['mode'] = sv.suspendmode
@@ -3401,9 +3401,9 @@ def loadTraceLog():
for i in range(len(blk)):
if 'SUSPEND START' in blk[i][3]:
first.append(i)
- elif re.match('.* timekeeping_freeze.*begin', blk[i][3]):
+ elif re.match(r'.* timekeeping_freeze.*begin', blk[i][3]):
last.append(i)
- elif re.match('.* timekeeping_freeze.*end', blk[i][3]):
+ elif re.match(r'.* timekeeping_freeze.*end', blk[i][3]):
first.append(i)
elif 'RESUME COMPLETE' in blk[i][3]:
last.append(i)
@@ -3514,28 +3514,28 @@ def parseTraceLog(live=False):
if(t.fevent):
if(t.type == 'suspend_resume'):
# suspend_resume trace events have two types, begin and end
- if(re.match('(?P<name>.*) begin$', t.name)):
+ if(re.match(r'(?P<name>.*) begin$', t.name)):
isbegin = True
- elif(re.match('(?P<name>.*) end$', t.name)):
+ elif(re.match(r'(?P<name>.*) end$', t.name)):
isbegin = False
else:
continue
if '[' in t.name:
- m = re.match('(?P<name>.*)\[.*', t.name)
+ m = re.match(r'(?P<name>.*)\[.*', t.name)
else:
- m = re.match('(?P<name>.*) .*', t.name)
+ m = re.match(r'(?P<name>.*) .*', t.name)
name = m.group('name')
# ignore these events
if(name.split('[')[0] in tracewatch):
continue
# -- phase changes --
# start of kernel suspend
- if(re.match('suspend_enter\[.*', t.name)):
+ if(re.match(r'suspend_enter\[.*', t.name)):
if(isbegin and data.tKernSus == 0):
data.tKernSus = t.time
continue
# suspend_prepare start
- elif(re.match('dpm_prepare\[.*', t.name)):
+ elif(re.match(r'dpm_prepare\[.*', t.name)):
if isbegin and data.first_suspend_prepare:
data.first_suspend_prepare = False
if data.tKernSus == 0:
@@ -3544,15 +3544,15 @@ def parseTraceLog(live=False):
phase = data.setPhase('suspend_prepare', t.time, isbegin)
continue
# suspend start
- elif(re.match('dpm_suspend\[.*', t.name)):
+ elif(re.match(r'dpm_suspend\[.*', t.name)):
phase = data.setPhase('suspend', t.time, isbegin)
continue
# suspend_late start
- elif(re.match('dpm_suspend_late\[.*', t.name)):
+ elif(re.match(r'dpm_suspend_late\[.*', t.name)):
phase = data.setPhase('suspend_late', t.time, isbegin)
continue
# suspend_noirq start
- elif(re.match('dpm_suspend_noirq\[.*', t.name)):
+ elif(re.match(r'dpm_suspend_noirq\[.*', t.name)):
phase = data.setPhase('suspend_noirq', t.time, isbegin)
continue
# suspend_machine/resume_machine
@@ -3589,19 +3589,19 @@ def parseTraceLog(live=False):
data.tResumed = t.time
continue
# resume_noirq start
- elif(re.match('dpm_resume_noirq\[.*', t.name)):
+ elif(re.match(r'dpm_resume_noirq\[.*', t.name)):
phase = data.setPhase('resume_noirq', t.time, isbegin)
continue
# resume_early start
- elif(re.match('dpm_resume_early\[.*', t.name)):
+ elif(re.match(r'dpm_resume_early\[.*', t.name)):
phase = data.setPhase('resume_early', t.time, isbegin)
continue
# resume start
- elif(re.match('dpm_resume\[.*', t.name)):
+ elif(re.match(r'dpm_resume\[.*', t.name)):
phase = data.setPhase('resume', t.time, isbegin)
continue
# resume complete start
- elif(re.match('dpm_complete\[.*', t.name)):
+ elif(re.match(r'dpm_complete\[.*', t.name)):
phase = data.setPhase('resume_complete', t.time, isbegin)
continue
# skip trace events inside devices calls
@@ -3635,7 +3635,7 @@ def parseTraceLog(live=False):
elif(t.type == 'device_pm_callback_start'):
if phase not in data.dmesg:
continue
- m = re.match('(?P<drv>.*) (?P<d>.*), parent: *(?P<p>.*), .*',\
+ m = re.match(r'(?P<drv>.*) (?P<d>.*), parent: *(?P<p>.*), .*',\
t.name);
if(not m):
continue
@@ -3650,7 +3650,7 @@ def parseTraceLog(live=False):
elif(t.type == 'device_pm_callback_end'):
if phase not in data.dmesg:
continue
- m = re.match('(?P<drv>.*) (?P<d>.*), err.*', t.name);
+ m = re.match(r'(?P<drv>.*) (?P<d>.*), err.*', t.name);
if(not m):
continue
n = m.group('d')
@@ -3904,24 +3904,24 @@ def loadKernelLog():
line = line[idx:]
if tp.stampInfo(line, sysvals):
continue
- m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
+ m = re.match(r'[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(not m):
continue
msg = m.group("msg")
- if re.match('PM: Syncing filesystems.*', msg) or \
- re.match('PM: suspend entry.*', msg):
+ if re.match(r'PM: Syncing filesystems.*', msg) or \
+ re.match(r'PM: suspend entry.*', msg):
if(data):
testruns.append(data)
data = Data(len(testruns))
tp.parseStamp(data, sysvals)
if(not data):
continue
- m = re.match('.* *(?P<k>[0-9]\.[0-9]{2}\.[0-9]-.*) .*', msg)
+ m = re.match(r'.* *(?P<k>[0-9]\.[0-9]{2}\.[0-9]-.*) .*', msg)
if(m):
sysvals.stamp['kernel'] = m.group('k')
- m = re.match('PM: Preparing system for (?P<m>.*) sleep', msg)
+ m = re.match(r'PM: Preparing system for (?P<m>.*) sleep', msg)
if not m:
- m = re.match('PM: Preparing system for sleep \((?P<m>.*)\)', msg)
+ m = re.match(r'PM: Preparing system for sleep \((?P<m>.*)\)', msg)
if m:
sysvals.stamp['mode'] = sysvals.suspendmode = m.group('m')
data.dmesgtext.append(line)
@@ -3984,7 +3984,7 @@ def parseKernelLog(data):
'resume_machine': ['[PM: ]*Timekeeping suspended for.*',
'ACPI: Low-level resume complete.*',
'ACPI: resume from mwait',
- 'Suspended for [0-9\.]* seconds'],
+ r'Suspended for [0-9\.]* seconds'],
'resume_noirq': ['PM: resume from suspend-to-idle',
'ACPI: Waking up from system sleep state.*'],
'resume_early': ['PM: noirq resume of devices complete after.*',
@@ -3993,7 +3993,7 @@ def parseKernelLog(data):
'PM: early restore of devices complete after.*'],
'resume_complete': ['PM: resume of devices complete after.*',
'PM: restore of devices complete after.*'],
- 'post_resume': ['.*Restarting tasks \.\.\..*'],
+ 'post_resume': [r'.*Restarting tasks \.\.\..*'],
}
# action table (expected events that occur and show up in dmesg)
@@ -4021,7 +4021,7 @@ def parseKernelLog(data):
actions = dict()
for line in data.dmesgtext:
# parse each dmesg line into the time and message
- m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
+ m = re.match(r'[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(m):
val = m.group('ktime')
try:
@@ -4145,26 +4145,26 @@ def parseKernelLog(data):
if(a in actions and actions[a][-1]['begin'] == actions[a][-1]['end']):
actions[a][-1]['end'] = ktime
# now look for CPU on/off events
- if(re.match('Disabling non-boot CPUs .*', msg)):
+ if(re.match(r'Disabling non-boot CPUs .*', msg)):
# start of first cpu suspend
cpu_start = ktime
- elif(re.match('Enabling non-boot CPUs .*', msg)):
+ elif(re.match(r'Enabling non-boot CPUs .*', msg)):
# start of first cpu resume
cpu_start = ktime
- elif(re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg) \
- or re.match('psci: CPU(?P<cpu>[0-9]*) killed.*', msg)):
+ elif(re.match(r'smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg) \
+ or re.match(r'psci: CPU(?P<cpu>[0-9]*) killed.*', msg)):
# end of a cpu suspend, start of the next
- m = re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg)
+ m = re.match(r'smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg)
if(not m):
- m = re.match('psci: CPU(?P<cpu>[0-9]*) killed.*', msg)
+ m = re.match(r'psci: CPU(?P<cpu>[0-9]*) killed.*', msg)
cpu = 'CPU'+m.group('cpu')
if(cpu not in actions):
actions[cpu] = []
actions[cpu].append({'begin': cpu_start, 'end': ktime})
cpu_start = ktime
- elif(re.match('CPU(?P<cpu>[0-9]*) is up', msg)):
+ elif(re.match(r'CPU(?P<cpu>[0-9]*) is up', msg)):
# end of a cpu resume, start of the next
- m = re.match('CPU(?P<cpu>[0-9]*) is up', msg)
+ m = re.match(r'CPU(?P<cpu>[0-9]*) is up', msg)
cpu = 'CPU'+m.group('cpu')
if(cpu not in actions):
actions[cpu] = []
@@ -4343,7 +4343,8 @@ def createHTMLSummarySimple(testruns, htmlfile, title):
list[mode]['data'].append([data['host'], data['kernel'],
data['time'], tVal[0], tVal[1], data['url'], res,
data['issues'], data['sus_worst'], data['sus_worsttime'],
- data['res_worst'], data['res_worsttime'], pkgpc10, syslpi, wifi])
+ data['res_worst'], data['res_worsttime'], pkgpc10, syslpi, wifi,
+ (data['fullmode'] if 'fullmode' in data else mode)])
idx = len(list[mode]['data']) - 1
if res.startswith('fail in'):
res = 'fail'
@@ -4449,7 +4450,7 @@ def createHTMLSummarySimple(testruns, htmlfile, title):
elif idx == iMed[i]:
tHigh[i] = ' id="%smed" class=medval title="Median"' % tag
html += td.format("%d" % (list[mode]['data'].index(d) + 1)) # row
- html += td.format(mode) # mode
+ html += td.format(d[15]) # mode
html += td.format(d[0]) # host
html += td.format(d[1]) # kernel
html += td.format(d[2]) # time
@@ -5061,6 +5062,7 @@ def addCSS(hf, sv, testcount=1, kerror=False, extra=''):
def addScriptCode(hf, testruns):
t0 = testruns[0].start * 1000
tMax = testruns[-1].end * 1000
+ hf.write('<script type="text/javascript">\n');
# create an array in javascript memory with the device details
detail = ' var devtable = [];\n'
for data in testruns:
@@ -5068,384 +5070,383 @@ def addScriptCode(hf, testruns):
detail += ' devtable[%d] = "%s";\n' % (data.testnumber, topo)
detail += ' var bounds = [%f,%f];\n' % (t0, tMax)
# add the code which will manipulate the data in the browser
- script_code = \
- '<script type="text/javascript">\n'+detail+\
- ' var resolution = -1;\n'\
- ' var dragval = [0, 0];\n'\
- ' function redrawTimescale(t0, tMax, tS) {\n'\
- ' var rline = \'<div class="t" style="left:0;border-left:1px solid black;border-right:0;">\';\n'\
- ' var tTotal = tMax - t0;\n'\
- ' var list = document.getElementsByClassName("tblock");\n'\
- ' for (var i = 0; i < list.length; i++) {\n'\
- ' var timescale = list[i].getElementsByClassName("timescale")[0];\n'\
- ' var m0 = t0 + (tTotal*parseFloat(list[i].style.left)/100);\n'\
- ' var mTotal = tTotal*parseFloat(list[i].style.width)/100;\n'\
- ' var mMax = m0 + mTotal;\n'\
- ' var html = "";\n'\
- ' var divTotal = Math.floor(mTotal/tS) + 1;\n'\
- ' if(divTotal > 1000) continue;\n'\
- ' var divEdge = (mTotal - tS*(divTotal-1))*100/mTotal;\n'\
- ' var pos = 0.0, val = 0.0;\n'\
- ' for (var j = 0; j < divTotal; j++) {\n'\
- ' var htmlline = "";\n'\
- ' var mode = list[i].id[5];\n'\
- ' if(mode == "s") {\n'\
- ' pos = 100 - (((j)*tS*100)/mTotal) - divEdge;\n'\
- ' val = (j-divTotal+1)*tS;\n'\
- ' if(j == divTotal - 1)\n'\
- ' htmlline = \'<div class="t" style="right:\'+pos+\'%"><cS>S&rarr;</cS></div>\';\n'\
- ' else\n'\
- ' htmlline = \'<div class="t" style="right:\'+pos+\'%">\'+val+\'ms</div>\';\n'\
- ' } else {\n'\
- ' pos = 100 - (((j)*tS*100)/mTotal);\n'\
- ' val = (j)*tS;\n'\
- ' htmlline = \'<div class="t" style="right:\'+pos+\'%">\'+val+\'ms</div>\';\n'\
- ' if(j == 0)\n'\
- ' if(mode == "r")\n'\
- ' htmlline = rline+"<cS>&larr;R</cS></div>";\n'\
- ' else\n'\
- ' htmlline = rline+"<cS>0ms</div>";\n'\
- ' }\n'\
- ' html += htmlline;\n'\
- ' }\n'\
- ' timescale.innerHTML = html;\n'\
- ' }\n'\
- ' }\n'\
- ' function zoomTimeline() {\n'\
- ' var dmesg = document.getElementById("dmesg");\n'\
- ' var zoombox = document.getElementById("dmesgzoombox");\n'\
- ' var left = zoombox.scrollLeft;\n'\
- ' var val = parseFloat(dmesg.style.width);\n'\
- ' var newval = 100;\n'\
- ' var sh = window.outerWidth / 2;\n'\
- ' if(this.id == "zoomin") {\n'\
- ' newval = val * 1.2;\n'\
- ' if(newval > 910034) newval = 910034;\n'\
- ' dmesg.style.width = newval+"%";\n'\
- ' zoombox.scrollLeft = ((left + sh) * newval / val) - sh;\n'\
- ' } else if (this.id == "zoomout") {\n'\
- ' newval = val / 1.2;\n'\
- ' if(newval < 100) newval = 100;\n'\
- ' dmesg.style.width = newval+"%";\n'\
- ' zoombox.scrollLeft = ((left + sh) * newval / val) - sh;\n'\
- ' } else {\n'\
- ' zoombox.scrollLeft = 0;\n'\
- ' dmesg.style.width = "100%";\n'\
- ' }\n'\
- ' var tS = [10000, 5000, 2000, 1000, 500, 200, 100, 50, 20, 10, 5, 2, 1];\n'\
- ' var t0 = bounds[0];\n'\
- ' var tMax = bounds[1];\n'\
- ' var tTotal = tMax - t0;\n'\
- ' var wTotal = tTotal * 100.0 / newval;\n'\
- ' var idx = 7*window.innerWidth/1100;\n'\
- ' for(var i = 0; (i < tS.length)&&((wTotal / tS[i]) < idx); i++);\n'\
- ' if(i >= tS.length) i = tS.length - 1;\n'\
- ' if(tS[i] == resolution) return;\n'\
- ' resolution = tS[i];\n'\
- ' redrawTimescale(t0, tMax, tS[i]);\n'\
- ' }\n'\
- ' function deviceName(title) {\n'\
- ' var name = title.slice(0, title.indexOf(" ("));\n'\
- ' return name;\n'\
- ' }\n'\
- ' function deviceHover() {\n'\
- ' var name = deviceName(this.title);\n'\
- ' var dmesg = document.getElementById("dmesg");\n'\
- ' var dev = dmesg.getElementsByClassName("thread");\n'\
- ' var cpu = -1;\n'\
- ' if(name.match("CPU_ON\[[0-9]*\]"))\n'\
- ' cpu = parseInt(name.slice(7));\n'\
- ' else if(name.match("CPU_OFF\[[0-9]*\]"))\n'\
- ' cpu = parseInt(name.slice(8));\n'\
- ' for (var i = 0; i < dev.length; i++) {\n'\
- ' dname = deviceName(dev[i].title);\n'\
- ' var cname = dev[i].className.slice(dev[i].className.indexOf("thread"));\n'\
- ' if((cpu >= 0 && dname.match("CPU_O[NF]*\\\[*"+cpu+"\\\]")) ||\n'\
- ' (name == dname))\n'\
- ' {\n'\
- ' dev[i].className = "hover "+cname;\n'\
- ' } else {\n'\
- ' dev[i].className = cname;\n'\
- ' }\n'\
- ' }\n'\
- ' }\n'\
- ' function deviceUnhover() {\n'\
- ' var dmesg = document.getElementById("dmesg");\n'\
- ' var dev = dmesg.getElementsByClassName("thread");\n'\
- ' for (var i = 0; i < dev.length; i++) {\n'\
- ' dev[i].className = dev[i].className.slice(dev[i].className.indexOf("thread"));\n'\
- ' }\n'\
- ' }\n'\
- ' function deviceTitle(title, total, cpu) {\n'\
- ' var prefix = "Total";\n'\
- ' if(total.length > 3) {\n'\
- ' prefix = "Average";\n'\
- ' total[1] = (total[1]+total[3])/2;\n'\
- ' total[2] = (total[2]+total[4])/2;\n'\
- ' }\n'\
- ' var devtitle = document.getElementById("devicedetailtitle");\n'\
- ' var name = deviceName(title);\n'\
- ' if(cpu >= 0) name = "CPU"+cpu;\n'\
- ' var driver = "";\n'\
- ' var tS = "<t2>(</t2>";\n'\
- ' var tR = "<t2>)</t2>";\n'\
- ' if(total[1] > 0)\n'\
- ' tS = "<t2>("+prefix+" Suspend:</t2><t0> "+total[1].toFixed(3)+" ms</t0> ";\n'\
- ' if(total[2] > 0)\n'\
- ' tR = " <t2>"+prefix+" Resume:</t2><t0> "+total[2].toFixed(3)+" ms<t2>)</t2></t0>";\n'\
- ' var s = title.indexOf("{");\n'\
- ' var e = title.indexOf("}");\n'\
- ' if((s >= 0) && (e >= 0))\n'\
- ' driver = title.slice(s+1, e) + " <t1>@</t1> ";\n'\
- ' if(total[1] > 0 && total[2] > 0)\n'\
- ' devtitle.innerHTML = "<t0>"+driver+name+"</t0> "+tS+tR;\n'\
- ' else\n'\
- ' devtitle.innerHTML = "<t0>"+title+"</t0>";\n'\
- ' return name;\n'\
- ' }\n'\
- ' function deviceDetail() {\n'\
- ' var devinfo = document.getElementById("devicedetail");\n'\
- ' devinfo.style.display = "block";\n'\
- ' var name = deviceName(this.title);\n'\
- ' var cpu = -1;\n'\
- ' if(name.match("CPU_ON\[[0-9]*\]"))\n'\
- ' cpu = parseInt(name.slice(7));\n'\
- ' else if(name.match("CPU_OFF\[[0-9]*\]"))\n'\
- ' cpu = parseInt(name.slice(8));\n'\
- ' var dmesg = document.getElementById("dmesg");\n'\
- ' var dev = dmesg.getElementsByClassName("thread");\n'\
- ' var idlist = [];\n'\
- ' var pdata = [[]];\n'\
- ' if(document.getElementById("devicedetail1"))\n'\
- ' pdata = [[], []];\n'\
- ' var pd = pdata[0];\n'\
- ' var total = [0.0, 0.0, 0.0];\n'\
- ' for (var i = 0; i < dev.length; i++) {\n'\
- ' dname = deviceName(dev[i].title);\n'\
- ' if((cpu >= 0 && dname.match("CPU_O[NF]*\\\[*"+cpu+"\\\]")) ||\n'\
- ' (name == dname))\n'\
- ' {\n'\
- ' idlist[idlist.length] = dev[i].id;\n'\
- ' var tidx = 1;\n'\
- ' if(dev[i].id[0] == "a") {\n'\
- ' pd = pdata[0];\n'\
- ' } else {\n'\
- ' if(pdata.length == 1) pdata[1] = [];\n'\
- ' if(total.length == 3) total[3]=total[4]=0.0;\n'\
- ' pd = pdata[1];\n'\
- ' tidx = 3;\n'\
- ' }\n'\
- ' var info = dev[i].title.split(" ");\n'\
- ' var pname = info[info.length-1];\n'\
- ' pd[pname] = parseFloat(info[info.length-3].slice(1));\n'\
- ' total[0] += pd[pname];\n'\
- ' if(pname.indexOf("suspend") >= 0)\n'\
- ' total[tidx] += pd[pname];\n'\
- ' else\n'\
- ' total[tidx+1] += pd[pname];\n'\
- ' }\n'\
- ' }\n'\
- ' var devname = deviceTitle(this.title, total, cpu);\n'\
- ' var left = 0.0;\n'\
- ' for (var t = 0; t < pdata.length; t++) {\n'\
- ' pd = pdata[t];\n'\
- ' devinfo = document.getElementById("devicedetail"+t);\n'\
- ' var phases = devinfo.getElementsByClassName("phaselet");\n'\
- ' for (var i = 0; i < phases.length; i++) {\n'\
- ' if(phases[i].id in pd) {\n'\
- ' var w = 100.0*pd[phases[i].id]/total[0];\n'\
- ' var fs = 32;\n'\
- ' if(w < 8) fs = 4*w | 0;\n'\
- ' var fs2 = fs*3/4;\n'\
- ' phases[i].style.width = w+"%";\n'\
- ' phases[i].style.left = left+"%";\n'\
- ' phases[i].title = phases[i].id+" "+pd[phases[i].id]+" ms";\n'\
- ' left += w;\n'\
- ' var time = "<t4 style=\\"font-size:"+fs+"px\\">"+pd[phases[i].id]+" ms<br></t4>";\n'\
- ' var pname = "<t3 style=\\"font-size:"+fs2+"px\\">"+phases[i].id.replace(new RegExp("_", "g"), " ")+"</t3>";\n'\
- ' phases[i].innerHTML = time+pname;\n'\
- ' } else {\n'\
- ' phases[i].style.width = "0%";\n'\
- ' phases[i].style.left = left+"%";\n'\
- ' }\n'\
- ' }\n'\
- ' }\n'\
- ' if(typeof devstats !== \'undefined\')\n'\
- ' callDetail(this.id, this.title);\n'\
- ' var cglist = document.getElementById("callgraphs");\n'\
- ' if(!cglist) return;\n'\
- ' var cg = cglist.getElementsByClassName("atop");\n'\
- ' if(cg.length < 10) return;\n'\
- ' for (var i = 0; i < cg.length; i++) {\n'\
- ' cgid = cg[i].id.split("x")[0]\n'\
- ' if(idlist.indexOf(cgid) >= 0) {\n'\
- ' cg[i].style.display = "block";\n'\
- ' } else {\n'\
- ' cg[i].style.display = "none";\n'\
- ' }\n'\
- ' }\n'\
- ' }\n'\
- ' function callDetail(devid, devtitle) {\n'\
- ' if(!(devid in devstats) || devstats[devid].length < 1)\n'\
- ' return;\n'\
- ' var list = devstats[devid];\n'\
- ' var tmp = devtitle.split(" ");\n'\
- ' var name = tmp[0], phase = tmp[tmp.length-1];\n'\
- ' var dd = document.getElementById(phase);\n'\
- ' var total = parseFloat(tmp[1].slice(1));\n'\
- ' var mlist = [];\n'\
- ' var maxlen = 0;\n'\
- ' var info = []\n'\
- ' for(var i in list) {\n'\
- ' if(list[i][0] == "@") {\n'\
- ' info = list[i].split("|");\n'\
- ' continue;\n'\
- ' }\n'\
- ' var tmp = list[i].split("|");\n'\
- ' var t = parseFloat(tmp[0]), f = tmp[1], c = parseInt(tmp[2]);\n'\
- ' var p = (t*100.0/total).toFixed(2);\n'\
- ' mlist[mlist.length] = [f, c, t.toFixed(2), p+"%"];\n'\
- ' if(f.length > maxlen)\n'\
- ' maxlen = f.length;\n'\
- ' }\n'\
- ' var pad = 5;\n'\
- ' if(mlist.length == 0) pad = 30;\n'\
- ' var html = \'<div style="padding-top:\'+pad+\'px"><t3> <b>\'+name+\':</b>\';\n'\
- ' if(info.length > 2)\n'\
- ' html += " start=<b>"+info[1]+"</b>, end=<b>"+info[2]+"</b>";\n'\
- ' if(info.length > 3)\n'\
- ' html += ", length<i>(w/o overhead)</i>=<b>"+info[3]+" ms</b>";\n'\
- ' if(info.length > 4)\n'\
- ' html += ", return=<b>"+info[4]+"</b>";\n'\
- ' html += "</t3></div>";\n'\
- ' if(mlist.length > 0) {\n'\
- ' html += \'<table class=fstat style="padding-top:\'+(maxlen*5)+\'px;"><tr><th>Function</th>\';\n'\
- ' for(var i in mlist)\n'\
- ' html += "<td class=vt>"+mlist[i][0]+"</td>";\n'\
- ' html += "</tr><tr><th>Calls</th>";\n'\
- ' for(var i in mlist)\n'\
- ' html += "<td>"+mlist[i][1]+"</td>";\n'\
- ' html += "</tr><tr><th>Time(ms)</th>";\n'\
- ' for(var i in mlist)\n'\
- ' html += "<td>"+mlist[i][2]+"</td>";\n'\
- ' html += "</tr><tr><th>Percent</th>";\n'\
- ' for(var i in mlist)\n'\
- ' html += "<td>"+mlist[i][3]+"</td>";\n'\
- ' html += "</tr></table>";\n'\
- ' }\n'\
- ' dd.innerHTML = html;\n'\
- ' var height = (maxlen*5)+100;\n'\
- ' dd.style.height = height+"px";\n'\
- ' document.getElementById("devicedetail").style.height = height+"px";\n'\
- ' }\n'\
- ' function callSelect() {\n'\
- ' var cglist = document.getElementById("callgraphs");\n'\
- ' if(!cglist) return;\n'\
- ' var cg = cglist.getElementsByClassName("atop");\n'\
- ' for (var i = 0; i < cg.length; i++) {\n'\
- ' if(this.id == cg[i].id) {\n'\
- ' cg[i].style.display = "block";\n'\
- ' } else {\n'\
- ' cg[i].style.display = "none";\n'\
- ' }\n'\
- ' }\n'\
- ' }\n'\
- ' function devListWindow(e) {\n'\
- ' var win = window.open();\n'\
- ' var html = "<title>"+e.target.innerHTML+"</title>"+\n'\
- ' "<style type=\\"text/css\\">"+\n'\
- ' " ul {list-style-type:circle;padding-left:10px;margin-left:10px;}"+\n'\
- ' "</style>"\n'\
- ' var dt = devtable[0];\n'\
- ' if(e.target.id != "devlist1")\n'\
- ' dt = devtable[1];\n'\
- ' win.document.write(html+dt);\n'\
- ' }\n'\
- ' function errWindow() {\n'\
- ' var range = this.id.split("_");\n'\
- ' var idx1 = parseInt(range[0]);\n'\
- ' var idx2 = parseInt(range[1]);\n'\
- ' var win = window.open();\n'\
- ' var log = document.getElementById("dmesglog");\n'\
- ' var title = "<title>dmesg log</title>";\n'\
- ' var text = log.innerHTML.split("\\n");\n'\
- ' var html = "";\n'\
- ' for(var i = 0; i < text.length; i++) {\n'\
- ' if(i == idx1) {\n'\
- ' html += "<e id=target>"+text[i]+"</e>\\n";\n'\
- ' } else if(i > idx1 && i <= idx2) {\n'\
- ' html += "<e>"+text[i]+"</e>\\n";\n'\
- ' } else {\n'\
- ' html += text[i]+"\\n";\n'\
- ' }\n'\
- ' }\n'\
- ' win.document.write("<style>e{color:red}</style>"+title+"<pre>"+html+"</pre>");\n'\
- ' win.location.hash = "#target";\n'\
- ' win.document.close();\n'\
- ' }\n'\
- ' function logWindow(e) {\n'\
- ' var name = e.target.id.slice(4);\n'\
- ' var win = window.open();\n'\
- ' var log = document.getElementById(name+"log");\n'\
- ' var title = "<title>"+document.title.split(" ")[0]+" "+name+" log</title>";\n'\
- ' win.document.write(title+"<pre>"+log.innerHTML+"</pre>");\n'\
- ' win.document.close();\n'\
- ' }\n'\
- ' function onMouseDown(e) {\n'\
- ' dragval[0] = e.clientX;\n'\
- ' dragval[1] = document.getElementById("dmesgzoombox").scrollLeft;\n'\
- ' document.onmousemove = onMouseMove;\n'\
- ' }\n'\
- ' function onMouseMove(e) {\n'\
- ' var zoombox = document.getElementById("dmesgzoombox");\n'\
- ' zoombox.scrollLeft = dragval[1] + dragval[0] - e.clientX;\n'\
- ' }\n'\
- ' function onMouseUp(e) {\n'\
- ' document.onmousemove = null;\n'\
- ' }\n'\
- ' function onKeyPress(e) {\n'\
- ' var c = e.charCode;\n'\
- ' if(c != 42 && c != 43 && c != 45) return;\n'\
- ' var click = document.createEvent("Events");\n'\
- ' click.initEvent("click", true, false);\n'\
- ' if(c == 43) \n'\
- ' document.getElementById("zoomin").dispatchEvent(click);\n'\
- ' else if(c == 45)\n'\
- ' document.getElementById("zoomout").dispatchEvent(click);\n'\
- ' else if(c == 42)\n'\
- ' document.getElementById("zoomdef").dispatchEvent(click);\n'\
- ' }\n'\
- ' window.addEventListener("resize", function () {zoomTimeline();});\n'\
- ' window.addEventListener("load", function () {\n'\
- ' var dmesg = document.getElementById("dmesg");\n'\
- ' dmesg.style.width = "100%"\n'\
- ' dmesg.onmousedown = onMouseDown;\n'\
- ' document.onmouseup = onMouseUp;\n'\
- ' document.onkeypress = onKeyPress;\n'\
- ' document.getElementById("zoomin").onclick = zoomTimeline;\n'\
- ' document.getElementById("zoomout").onclick = zoomTimeline;\n'\
- ' document.getElementById("zoomdef").onclick = zoomTimeline;\n'\
- ' var list = document.getElementsByClassName("err");\n'\
- ' for (var i = 0; i < list.length; i++)\n'\
- ' list[i].onclick = errWindow;\n'\
- ' var list = document.getElementsByClassName("logbtn");\n'\
- ' for (var i = 0; i < list.length; i++)\n'\
- ' list[i].onclick = logWindow;\n'\
- ' list = document.getElementsByClassName("devlist");\n'\
- ' for (var i = 0; i < list.length; i++)\n'\
- ' list[i].onclick = devListWindow;\n'\
- ' var dev = dmesg.getElementsByClassName("thread");\n'\
- ' for (var i = 0; i < dev.length; i++) {\n'\
- ' dev[i].onclick = deviceDetail;\n'\
- ' dev[i].onmouseover = deviceHover;\n'\
- ' dev[i].onmouseout = deviceUnhover;\n'\
- ' }\n'\
- ' var dev = dmesg.getElementsByClassName("srccall");\n'\
- ' for (var i = 0; i < dev.length; i++)\n'\
- ' dev[i].onclick = callSelect;\n'\
- ' zoomTimeline();\n'\
- ' });\n'\
- '</script>\n'
+ hf.write(detail);
+ script_code = r""" var resolution = -1;
+ var dragval = [0, 0];
+ function redrawTimescale(t0, tMax, tS) {
+ var rline = '<div class="t" style="left:0;border-left:1px solid black;border-right:0;">';
+ var tTotal = tMax - t0;
+ var list = document.getElementsByClassName("tblock");
+ for (var i = 0; i < list.length; i++) {
+ var timescale = list[i].getElementsByClassName("timescale")[0];
+ var m0 = t0 + (tTotal*parseFloat(list[i].style.left)/100);
+ var mTotal = tTotal*parseFloat(list[i].style.width)/100;
+ var mMax = m0 + mTotal;
+ var html = "";
+ var divTotal = Math.floor(mTotal/tS) + 1;
+ if(divTotal > 1000) continue;
+ var divEdge = (mTotal - tS*(divTotal-1))*100/mTotal;
+ var pos = 0.0, val = 0.0;
+ for (var j = 0; j < divTotal; j++) {
+ var htmlline = "";
+ var mode = list[i].id[5];
+ if(mode == "s") {
+ pos = 100 - (((j)*tS*100)/mTotal) - divEdge;
+ val = (j-divTotal+1)*tS;
+ if(j == divTotal - 1)
+ htmlline = '<div class="t" style="right:'+pos+'%"><cS>S&rarr;</cS></div>';
+ else
+ htmlline = '<div class="t" style="right:'+pos+'%">'+val+'ms</div>';
+ } else {
+ pos = 100 - (((j)*tS*100)/mTotal);
+ val = (j)*tS;
+ htmlline = '<div class="t" style="right:'+pos+'%">'+val+'ms</div>';
+ if(j == 0)
+ if(mode == "r")
+ htmlline = rline+"<cS>&larr;R</cS></div>";
+ else
+ htmlline = rline+"<cS>0ms</div>";
+ }
+ html += htmlline;
+ }
+ timescale.innerHTML = html;
+ }
+ }
+ function zoomTimeline() {
+ var dmesg = document.getElementById("dmesg");
+ var zoombox = document.getElementById("dmesgzoombox");
+ var left = zoombox.scrollLeft;
+ var val = parseFloat(dmesg.style.width);
+ var newval = 100;
+ var sh = window.outerWidth / 2;
+ if(this.id == "zoomin") {
+ newval = val * 1.2;
+ if(newval > 910034) newval = 910034;
+ dmesg.style.width = newval+"%";
+ zoombox.scrollLeft = ((left + sh) * newval / val) - sh;
+ } else if (this.id == "zoomout") {
+ newval = val / 1.2;
+ if(newval < 100) newval = 100;
+ dmesg.style.width = newval+"%";
+ zoombox.scrollLeft = ((left + sh) * newval / val) - sh;
+ } else {
+ zoombox.scrollLeft = 0;
+ dmesg.style.width = "100%";
+ }
+ var tS = [10000, 5000, 2000, 1000, 500, 200, 100, 50, 20, 10, 5, 2, 1];
+ var t0 = bounds[0];
+ var tMax = bounds[1];
+ var tTotal = tMax - t0;
+ var wTotal = tTotal * 100.0 / newval;
+ var idx = 7*window.innerWidth/1100;
+ for(var i = 0; (i < tS.length)&&((wTotal / tS[i]) < idx); i++);
+ if(i >= tS.length) i = tS.length - 1;
+ if(tS[i] == resolution) return;
+ resolution = tS[i];
+ redrawTimescale(t0, tMax, tS[i]);
+ }
+ function deviceName(title) {
+ var name = title.slice(0, title.indexOf(" ("));
+ return name;
+ }
+ function deviceHover() {
+ var name = deviceName(this.title);
+ var dmesg = document.getElementById("dmesg");
+ var dev = dmesg.getElementsByClassName("thread");
+ var cpu = -1;
+ if(name.match("CPU_ON\[[0-9]*\]"))
+ cpu = parseInt(name.slice(7));
+ else if(name.match("CPU_OFF\[[0-9]*\]"))
+ cpu = parseInt(name.slice(8));
+ for (var i = 0; i < dev.length; i++) {
+ dname = deviceName(dev[i].title);
+ var cname = dev[i].className.slice(dev[i].className.indexOf("thread"));
+ if((cpu >= 0 && dname.match("CPU_O[NF]*\\[*"+cpu+"\\]")) ||
+ (name == dname))
+ {
+ dev[i].className = "hover "+cname;
+ } else {
+ dev[i].className = cname;
+ }
+ }
+ }
+ function deviceUnhover() {
+ var dmesg = document.getElementById("dmesg");
+ var dev = dmesg.getElementsByClassName("thread");
+ for (var i = 0; i < dev.length; i++) {
+ dev[i].className = dev[i].className.slice(dev[i].className.indexOf("thread"));
+ }
+ }
+ function deviceTitle(title, total, cpu) {
+ var prefix = "Total";
+ if(total.length > 3) {
+ prefix = "Average";
+ total[1] = (total[1]+total[3])/2;
+ total[2] = (total[2]+total[4])/2;
+ }
+ var devtitle = document.getElementById("devicedetailtitle");
+ var name = deviceName(title);
+ if(cpu >= 0) name = "CPU"+cpu;
+ var driver = "";
+ var tS = "<t2>(</t2>";
+ var tR = "<t2>)</t2>";
+ if(total[1] > 0)
+ tS = "<t2>("+prefix+" Suspend:</t2><t0> "+total[1].toFixed(3)+" ms</t0> ";
+ if(total[2] > 0)
+ tR = " <t2>"+prefix+" Resume:</t2><t0> "+total[2].toFixed(3)+" ms<t2>)</t2></t0>";
+ var s = title.indexOf("{");
+ var e = title.indexOf("}");
+ if((s >= 0) && (e >= 0))
+ driver = title.slice(s+1, e) + " <t1>@</t1> ";
+ if(total[1] > 0 && total[2] > 0)
+ devtitle.innerHTML = "<t0>"+driver+name+"</t0> "+tS+tR;
+ else
+ devtitle.innerHTML = "<t0>"+title+"</t0>";
+ return name;
+ }
+ function deviceDetail() {
+ var devinfo = document.getElementById("devicedetail");
+ devinfo.style.display = "block";
+ var name = deviceName(this.title);
+ var cpu = -1;
+ if(name.match("CPU_ON\[[0-9]*\]"))
+ cpu = parseInt(name.slice(7));
+ else if(name.match("CPU_OFF\[[0-9]*\]"))
+ cpu = parseInt(name.slice(8));
+ var dmesg = document.getElementById("dmesg");
+ var dev = dmesg.getElementsByClassName("thread");
+ var idlist = [];
+ var pdata = [[]];
+ if(document.getElementById("devicedetail1"))
+ pdata = [[], []];
+ var pd = pdata[0];
+ var total = [0.0, 0.0, 0.0];
+ for (var i = 0; i < dev.length; i++) {
+ dname = deviceName(dev[i].title);
+ if((cpu >= 0 && dname.match("CPU_O[NF]*\\[*"+cpu+"\\]")) ||
+ (name == dname))
+ {
+ idlist[idlist.length] = dev[i].id;
+ var tidx = 1;
+ if(dev[i].id[0] == "a") {
+ pd = pdata[0];
+ } else {
+ if(pdata.length == 1) pdata[1] = [];
+ if(total.length == 3) total[3]=total[4]=0.0;
+ pd = pdata[1];
+ tidx = 3;
+ }
+ var info = dev[i].title.split(" ");
+ var pname = info[info.length-1];
+ pd[pname] = parseFloat(info[info.length-3].slice(1));
+ total[0] += pd[pname];
+ if(pname.indexOf("suspend") >= 0)
+ total[tidx] += pd[pname];
+ else
+ total[tidx+1] += pd[pname];
+ }
+ }
+ var devname = deviceTitle(this.title, total, cpu);
+ var left = 0.0;
+ for (var t = 0; t < pdata.length; t++) {
+ pd = pdata[t];
+ devinfo = document.getElementById("devicedetail"+t);
+ var phases = devinfo.getElementsByClassName("phaselet");
+ for (var i = 0; i < phases.length; i++) {
+ if(phases[i].id in pd) {
+ var w = 100.0*pd[phases[i].id]/total[0];
+ var fs = 32;
+ if(w < 8) fs = 4*w | 0;
+ var fs2 = fs*3/4;
+ phases[i].style.width = w+"%";
+ phases[i].style.left = left+"%";
+ phases[i].title = phases[i].id+" "+pd[phases[i].id]+" ms";
+ left += w;
+ var time = "<t4 style=\"font-size:"+fs+"px\">"+pd[phases[i].id]+" ms<br></t4>";
+ var pname = "<t3 style=\"font-size:"+fs2+"px\">"+phases[i].id.replace(new RegExp("_", "g"), " ")+"</t3>";
+ phases[i].innerHTML = time+pname;
+ } else {
+ phases[i].style.width = "0%";
+ phases[i].style.left = left+"%";
+ }
+ }
+ }
+ if(typeof devstats !== 'undefined')
+ callDetail(this.id, this.title);
+ var cglist = document.getElementById("callgraphs");
+ if(!cglist) return;
+ var cg = cglist.getElementsByClassName("atop");
+ if(cg.length < 10) return;
+ for (var i = 0; i < cg.length; i++) {
+ cgid = cg[i].id.split("x")[0]
+ if(idlist.indexOf(cgid) >= 0) {
+ cg[i].style.display = "block";
+ } else {
+ cg[i].style.display = "none";
+ }
+ }
+ }
+ function callDetail(devid, devtitle) {
+ if(!(devid in devstats) || devstats[devid].length < 1)
+ return;
+ var list = devstats[devid];
+ var tmp = devtitle.split(" ");
+ var name = tmp[0], phase = tmp[tmp.length-1];
+ var dd = document.getElementById(phase);
+ var total = parseFloat(tmp[1].slice(1));
+ var mlist = [];
+ var maxlen = 0;
+ var info = []
+ for(var i in list) {
+ if(list[i][0] == "@") {
+ info = list[i].split("|");
+ continue;
+ }
+ var tmp = list[i].split("|");
+ var t = parseFloat(tmp[0]), f = tmp[1], c = parseInt(tmp[2]);
+ var p = (t*100.0/total).toFixed(2);
+ mlist[mlist.length] = [f, c, t.toFixed(2), p+"%"];
+ if(f.length > maxlen)
+ maxlen = f.length;
+ }
+ var pad = 5;
+ if(mlist.length == 0) pad = 30;
+ var html = '<div style="padding-top:'+pad+'px"><t3> <b>'+name+':</b>';
+ if(info.length > 2)
+ html += " start=<b>"+info[1]+"</b>, end=<b>"+info[2]+"</b>";
+ if(info.length > 3)
+ html += ", length<i>(w/o overhead)</i>=<b>"+info[3]+" ms</b>";
+ if(info.length > 4)
+ html += ", return=<b>"+info[4]+"</b>";
+ html += "</t3></div>";
+ if(mlist.length > 0) {
+ html += '<table class=fstat style="padding-top:'+(maxlen*5)+'px;"><tr><th>Function</th>';
+ for(var i in mlist)
+ html += "<td class=vt>"+mlist[i][0]+"</td>";
+ html += "</tr><tr><th>Calls</th>";
+ for(var i in mlist)
+ html += "<td>"+mlist[i][1]+"</td>";
+ html += "</tr><tr><th>Time(ms)</th>";
+ for(var i in mlist)
+ html += "<td>"+mlist[i][2]+"</td>";
+ html += "</tr><tr><th>Percent</th>";
+ for(var i in mlist)
+ html += "<td>"+mlist[i][3]+"</td>";
+ html += "</tr></table>";
+ }
+ dd.innerHTML = html;
+ var height = (maxlen*5)+100;
+ dd.style.height = height+"px";
+ document.getElementById("devicedetail").style.height = height+"px";
+ }
+ function callSelect() {
+ var cglist = document.getElementById("callgraphs");
+ if(!cglist) return;
+ var cg = cglist.getElementsByClassName("atop");
+ for (var i = 0; i < cg.length; i++) {
+ if(this.id == cg[i].id) {
+ cg[i].style.display = "block";
+ } else {
+ cg[i].style.display = "none";
+ }
+ }
+ }
+ function devListWindow(e) {
+ var win = window.open();
+ var html = "<title>"+e.target.innerHTML+"</title>"+
+ "<style type=\"text/css\">"+
+ " ul {list-style-type:circle;padding-left:10px;margin-left:10px;}"+
+ "</style>"
+ var dt = devtable[0];
+ if(e.target.id != "devlist1")
+ dt = devtable[1];
+ win.document.write(html+dt);
+ }
+ function errWindow() {
+ var range = this.id.split("_");
+ var idx1 = parseInt(range[0]);
+ var idx2 = parseInt(range[1]);
+ var win = window.open();
+ var log = document.getElementById("dmesglog");
+ var title = "<title>dmesg log</title>";
+ var text = log.innerHTML.split("\n");
+ var html = "";
+ for(var i = 0; i < text.length; i++) {
+ if(i == idx1) {
+ html += "<e id=target>"+text[i]+"</e>\n";
+ } else if(i > idx1 && i <= idx2) {
+ html += "<e>"+text[i]+"</e>\n";
+ } else {
+ html += text[i]+"\n";
+ }
+ }
+ win.document.write("<style>e{color:red}</style>"+title+"<pre>"+html+"</pre>");
+ win.location.hash = "#target";
+ win.document.close();
+ }
+ function logWindow(e) {
+ var name = e.target.id.slice(4);
+ var win = window.open();
+ var log = document.getElementById(name+"log");
+ var title = "<title>"+document.title.split(" ")[0]+" "+name+" log</title>";
+ win.document.write(title+"<pre>"+log.innerHTML+"</pre>");
+ win.document.close();
+ }
+ function onMouseDown(e) {
+ dragval[0] = e.clientX;
+ dragval[1] = document.getElementById("dmesgzoombox").scrollLeft;
+ document.onmousemove = onMouseMove;
+ }
+ function onMouseMove(e) {
+ var zoombox = document.getElementById("dmesgzoombox");
+ zoombox.scrollLeft = dragval[1] + dragval[0] - e.clientX;
+ }
+ function onMouseUp(e) {
+ document.onmousemove = null;
+ }
+ function onKeyPress(e) {
+ var c = e.charCode;
+ if(c != 42 && c != 43 && c != 45) return;
+ var click = document.createEvent("Events");
+ click.initEvent("click", true, false);
+ if(c == 43)
+ document.getElementById("zoomin").dispatchEvent(click);
+ else if(c == 45)
+ document.getElementById("zoomout").dispatchEvent(click);
+ else if(c == 42)
+ document.getElementById("zoomdef").dispatchEvent(click);
+ }
+ window.addEventListener("resize", function () {zoomTimeline();});
+ window.addEventListener("load", function () {
+ var dmesg = document.getElementById("dmesg");
+ dmesg.style.width = "100%"
+ dmesg.onmousedown = onMouseDown;
+ document.onmouseup = onMouseUp;
+ document.onkeypress = onKeyPress;
+ document.getElementById("zoomin").onclick = zoomTimeline;
+ document.getElementById("zoomout").onclick = zoomTimeline;
+ document.getElementById("zoomdef").onclick = zoomTimeline;
+ var list = document.getElementsByClassName("err");
+ for (var i = 0; i < list.length; i++)
+ list[i].onclick = errWindow;
+ var list = document.getElementsByClassName("logbtn");
+ for (var i = 0; i < list.length; i++)
+ list[i].onclick = logWindow;
+ list = document.getElementsByClassName("devlist");
+ for (var i = 0; i < list.length; i++)
+ list[i].onclick = devListWindow;
+ var dev = dmesg.getElementsByClassName("thread");
+ for (var i = 0; i < dev.length; i++) {
+ dev[i].onclick = deviceDetail;
+ dev[i].onmouseover = deviceHover;
+ dev[i].onmouseout = deviceUnhover;
+ }
+ var dev = dmesg.getElementsByClassName("srccall");
+ for (var i = 0; i < dev.length; i++)
+ dev[i].onclick = callSelect;
+ zoomTimeline();
+ });
+</script> """
hf.write(script_code);
# Function: executeSuspend
@@ -5524,7 +5525,9 @@ def executeSuspend(quiet=False):
if ((mode == 'freeze') or (sv.memmode == 's2idle')) \
and sv.haveTurbostat():
# execution will pause here
- turbo = sv.turbostat(s0ixready)
+ retval, turbo = sv.turbostat(s0ixready)
+ if retval != 0:
+ tdata['error'] ='turbostat returned %d' % retval
if turbo:
tdata['turbo'] = turbo
else:
@@ -5532,6 +5535,7 @@ def executeSuspend(quiet=False):
pf.write(mode)
# execution will pause here
try:
+ pf.flush()
pf.close()
except Exception as e:
tdata['error'] = str(e)
@@ -5633,7 +5637,7 @@ def deviceInfo(output=''):
tgtval = 'runtime_status'
lines = dict()
for dirname, dirnames, filenames in os.walk('/sys/devices'):
- if(not re.match('.*/power', dirname) or
+ if(not re.match(r'.*/power', dirname) or
'control' not in filenames or
tgtval not in filenames):
continue
@@ -5702,6 +5706,40 @@ def getModes():
fp.close()
return modes
+def dmidecode_backup(out, fatal=False):
+ cpath, spath, info = '/proc/cpuinfo', '/sys/class/dmi/id', {
+ 'bios-vendor': 'bios_vendor',
+ 'bios-version': 'bios_version',
+ 'bios-release-date': 'bios_date',
+ 'system-manufacturer': 'sys_vendor',
+ 'system-product-name': 'product_name',
+ 'system-version': 'product_version',
+ 'system-serial-number': 'product_serial',
+ 'baseboard-manufacturer': 'board_vendor',
+ 'baseboard-product-name': 'board_name',
+ 'baseboard-version': 'board_version',
+ 'baseboard-serial-number': 'board_serial',
+ 'chassis-manufacturer': 'chassis_vendor',
+ 'chassis-version': 'chassis_version',
+ 'chassis-serial-number': 'chassis_serial',
+ }
+ for key in info:
+ if key not in out:
+ val = sysvals.getVal(os.path.join(spath, info[key])).strip()
+ if val and val.lower() != 'to be filled by o.e.m.':
+ out[key] = val
+ if 'processor-version' not in out and os.path.exists(cpath):
+ with open(cpath, 'r') as fp:
+ for line in fp:
+ m = re.match(r'^model\s*name\s*\:\s*(?P<c>.*)', line)
+ if m:
+ out['processor-version'] = m.group('c').strip()
+ break
+ if fatal and len(out) < 1:
+ doError('dmidecode failed to get info from %s or %s' % \
+ (sysvals.mempath, spath))
+ return out
+
# Function: dmidecode
# Description:
# Read the bios tables and pull out system info
@@ -5712,6 +5750,8 @@ def getModes():
# A dict object with all available key/values
def dmidecode(mempath, fatal=False):
out = dict()
+ if(not (os.path.exists(mempath) and os.access(mempath, os.R_OK))):
+ return dmidecode_backup(out, fatal)
# the list of values to retrieve, with hardcoded (type, idx)
info = {
@@ -5727,24 +5767,14 @@ def dmidecode(mempath, fatal=False):
'baseboard-version': (2, 6),
'baseboard-serial-number': (2, 7),
'chassis-manufacturer': (3, 4),
- 'chassis-type': (3, 5),
'chassis-version': (3, 6),
'chassis-serial-number': (3, 7),
'processor-manufacturer': (4, 7),
'processor-version': (4, 16),
}
- if(not os.path.exists(mempath)):
- if(fatal):
- doError('file does not exist: %s' % mempath)
- return out
- if(not os.access(mempath, os.R_OK)):
- if(fatal):
- doError('file is not readable: %s' % mempath)
- return out
# by default use legacy scan, but try to use EFI first
- memaddr = 0xf0000
- memsize = 0x10000
+ memaddr, memsize = 0xf0000, 0x10000
for ep in ['/sys/firmware/efi/systab', '/proc/efi/systab']:
if not os.path.exists(ep) or not os.access(ep, os.R_OK):
continue
@@ -5765,11 +5795,7 @@ def dmidecode(mempath, fatal=False):
fp.seek(memaddr)
buf = fp.read(memsize)
except:
- if(fatal):
- doError('DMI table is unreachable, sorry')
- else:
- pprint('WARNING: /dev/mem is not readable, ignoring DMI data')
- return out
+ return dmidecode_backup(out, fatal)
fp.close()
# search for either an SM table or DMI table
@@ -5785,10 +5811,7 @@ def dmidecode(mempath, fatal=False):
break
i += 16
if base == 0 and length == 0 and num == 0:
- if(fatal):
- doError('Neither SMBIOS nor DMI were found')
- else:
- return out
+ return dmidecode_backup(out, fatal)
# read in the SM or DMI table
try:
@@ -5796,11 +5819,7 @@ def dmidecode(mempath, fatal=False):
fp.seek(base)
buf = fp.read(length)
except:
- if(fatal):
- doError('DMI table is unreachable, sorry')
- else:
- pprint('WARNING: /dev/mem is not readable, ignoring DMI data')
- return out
+ return dmidecode_backup(out, fatal)
fp.close()
# scan the table for the values we want
@@ -6272,7 +6291,10 @@ def find_in_html(html, start, end, firstonly=True):
return out
def data_from_html(file, outpath, issues, fulldetail=False):
- html = open(file, 'r').read()
+ try:
+ html = open(file, 'r').read()
+ except:
+ html = ascii(open(file, 'rb').read())
sysvals.htmlfile = os.path.relpath(file, outpath)
# extract general info
suspend = find_in_html(html, 'Kernel Suspend', 'ms')
@@ -6290,7 +6312,7 @@ def data_from_html(file, outpath, issues, fulldetail=False):
tstr = dt.strftime('%Y/%m/%d %H:%M:%S')
error = find_in_html(html, '<table class="testfail"><tr><td>', '</td>')
if error:
- m = re.match('[a-z0-9]* failed in (?P<p>\S*).*', error)
+ m = re.match(r'[a-z0-9]* failed in (?P<p>\S*).*', error)
if m:
result = 'fail in %s' % m.group('p')
else:
@@ -6307,8 +6329,9 @@ def data_from_html(file, outpath, issues, fulldetail=False):
d.end = 999999999
d.dmesgtext = log.split('\n')
tp = d.extractErrorInfo()
- for msg in tp.msglist:
- sysvals.errorSummary(issues, msg)
+ if len(issues) < 100:
+ for msg in tp.msglist:
+ sysvals.errorSummary(issues, msg)
if stmp[2] == 'freeze':
extra = d.turbostatInfo()
elist = dict()
@@ -6325,6 +6348,11 @@ def data_from_html(file, outpath, issues, fulldetail=False):
line = find_in_html(log, '# netfix ', '\n')
if line:
extra['netfix'] = line
+ line = find_in_html(log, '# command ', '\n')
+ if line:
+ m = re.match(r'.* -m (?P<m>\S*).*', line)
+ if m:
+ extra['fullmode'] = m.group('m')
low = find_in_html(html, 'freeze time: <b>', ' ms</b>')
for lowstr in ['waking', '+']:
if not low:
@@ -6334,7 +6362,7 @@ def data_from_html(file, outpath, issues, fulldetail=False):
if lowstr == '+':
issue = 'S2LOOPx%d' % len(low.split('+'))
else:
- m = re.match('.*waking *(?P<n>[0-9]*) *times.*', low)
+ m = re.match(r'.*waking *(?P<n>[0-9]*) *times.*', low)
issue = 'S2WAKEx%s' % m.group('n') if m else 'S2WAKExNaN'
match = [i for i in issues if i['match'] == issue]
if len(match) > 0:
@@ -6352,10 +6380,10 @@ def data_from_html(file, outpath, issues, fulldetail=False):
# extract device info
devices = dict()
for line in html.split('\n'):
- m = re.match(' *<div id=\"[a,0-9]*\" *title=\"(?P<title>.*)\" class=\"thread.*', line)
+ m = re.match(r' *<div id=\"[a,0-9]*\" *title=\"(?P<title>.*)\" class=\"thread.*', line)
if not m or 'thread kth' in line or 'thread sec' in line:
continue
- m = re.match('(?P<n>.*) \((?P<t>[0-9,\.]*) ms\) (?P<p>.*)', m.group('title'))
+ m = re.match(r'(?P<n>.*) \((?P<t>[0-9,\.]*) ms\) (?P<p>.*)', m.group('title'))
if not m:
continue
name, time, phase = m.group('n'), m.group('t'), m.group('p')
@@ -6416,9 +6444,9 @@ def genHtml(subdir, force=False):
for filename in filenames:
file = os.path.join(dirname, filename)
if sysvals.usable(file):
- if(re.match('.*_dmesg.txt', filename)):
+ if(re.match(r'.*_dmesg.txt', filename)):
sysvals.dmesgfile = file
- elif(re.match('.*_ftrace.txt', filename)):
+ elif(re.match(r'.*_ftrace.txt', filename)):
sysvals.ftracefile = file
sysvals.setOutputFile()
if (sysvals.dmesgfile or sysvals.ftracefile) and sysvals.htmlfile and \
@@ -6441,7 +6469,7 @@ def runSummary(subdir, local=True, genhtml=False):
desc = {'host':[],'mode':[],'kernel':[]}
for dirname, dirnames, filenames in os.walk(subdir):
for filename in filenames:
- if(not re.match('.*.html', filename)):
+ if(not re.match(r'.*.html', filename)):
continue
data = data_from_html(os.path.join(dirname, filename), outpath, issues)
if(not data):
diff --git a/tools/power/x86/intel-speed-select/isst-config.c b/tools/power/x86/intel-speed-select/isst-config.c
index 5899c27c2e2e..5127be34869e 100644
--- a/tools/power/x86/intel-speed-select/isst-config.c
+++ b/tools/power/x86/intel-speed-select/isst-config.c
@@ -16,7 +16,7 @@ struct process_cmd_struct {
int arg;
};
-static const char *version_str = "v1.19";
+static const char *version_str = "v1.20";
static const int supported_api_ver = 3;
static struct isst_if_platform_info isst_platform_info;
diff --git a/tools/power/x86/intel-speed-select/isst-core.c b/tools/power/x86/intel-speed-select/isst-core.c
index 05efffbca3b7..e05561d00458 100644
--- a/tools/power/x86/intel-speed-select/isst-core.c
+++ b/tools/power/x86/intel-speed-select/isst-core.c
@@ -283,6 +283,8 @@ int isst_set_trl(struct isst_id *id, unsigned long long trl)
return 0;
}
+#define MSR_TRL_FREQ_MULTIPLIER 100
+
int isst_set_trl_from_current_tdp(struct isst_id *id, unsigned long long trl)
{
unsigned long long msr_trl;
@@ -310,6 +312,10 @@ int isst_set_trl_from_current_tdp(struct isst_id *id, unsigned long long trl)
for (i = 0; i < 8; ++i) {
unsigned long long _trl = trl[i];
+ /* MSR is always in 100 MHz unit */
+ if (isst_get_disp_freq_multiplier() == 1)
+ _trl /= MSR_TRL_FREQ_MULTIPLIER;
+
msr_trl |= (_trl << (i * 8));
}
}
diff --git a/tools/rcu/rcu-updaters.sh b/tools/rcu/rcu-updaters.sh
new file mode 100755
index 000000000000..4ef1397927bb
--- /dev/null
+++ b/tools/rcu/rcu-updaters.sh
@@ -0,0 +1,52 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Run bpftrace to obtain a histogram of the types of primitives used to
+# initiate RCU grace periods. The count associated with rcu_gp_init()
+# is the number of normal (non-expedited) grace periods.
+#
+# Usage: rcu-updaters.sh [ duration-in-seconds ]
+#
+# Note that not all kernel builds have all of these functions. In those
+# that do not, this script will issue a diagnostic for each that is not
+# found, but continue normally for the rest of the functions.
+
+duration=${1}
+if test -n "${duration}"
+then
+ exitclause='interval:s:'"${duration}"' { exit(); }'
+else
+ echo 'Hit control-C to end sample and print results.'
+fi
+bpftrace -e 'kprobe:kvfree_call_rcu,
+ kprobe:call_rcu,
+ kprobe:call_rcu_tasks,
+ kprobe:call_rcu_tasks_rude,
+ kprobe:call_rcu_tasks_trace,
+ kprobe:call_srcu,
+ kprobe:rcu_barrier,
+ kprobe:rcu_barrier_tasks,
+ kprobe:rcu_barrier_tasks_rude,
+ kprobe:rcu_barrier_tasks_trace,
+ kprobe:srcu_barrier,
+ kprobe:synchronize_rcu,
+ kprobe:synchronize_rcu_expedited,
+ kprobe:synchronize_rcu_tasks,
+ kprobe:synchronize_rcu_tasks_rude,
+ kprobe:synchronize_rcu_tasks_trace,
+ kprobe:synchronize_srcu,
+ kprobe:synchronize_srcu_expedited,
+ kprobe:get_state_synchronize_rcu,
+ kprobe:get_state_synchronize_rcu_full,
+ kprobe:start_poll_synchronize_rcu,
+ kprobe:start_poll_synchronize_rcu_expedited,
+ kprobe:start_poll_synchronize_rcu_full,
+ kprobe:start_poll_synchronize_rcu_expedited_full,
+ kprobe:poll_state_synchronize_rcu,
+ kprobe:poll_state_synchronize_rcu_full,
+ kprobe:cond_synchronize_rcu,
+ kprobe:cond_synchronize_rcu_full,
+ kprobe:start_poll_synchronize_srcu,
+ kprobe:poll_state_synchronize_srcu,
+ kprobe:rcu_gp_init
+ { @counts[func] = count(); } '"${exitclause}"
diff --git a/tools/testing/memblock/tests/basic_api.c b/tools/testing/memblock/tests/basic_api.c
index 57bf2688edfd..67503089e6a0 100644
--- a/tools/testing/memblock/tests/basic_api.c
+++ b/tools/testing/memblock/tests/basic_api.c
@@ -15,12 +15,12 @@ static int memblock_initialization_check(void)
PREFIX_PUSH();
ASSERT_NE(memblock.memory.regions, NULL);
- ASSERT_EQ(memblock.memory.cnt, 1);
+ ASSERT_EQ(memblock.memory.cnt, 0);
ASSERT_EQ(memblock.memory.max, EXPECTED_MEMBLOCK_REGIONS);
ASSERT_EQ(strcmp(memblock.memory.name, "memory"), 0);
ASSERT_NE(memblock.reserved.regions, NULL);
- ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.cnt, 0);
ASSERT_EQ(memblock.memory.max, EXPECTED_MEMBLOCK_REGIONS);
ASSERT_EQ(strcmp(memblock.reserved.name, "reserved"), 0);
@@ -982,6 +982,262 @@ static int memblock_reserve_many_check(void)
return 0;
}
+
+/*
+ * A test that trying to reserve the 129th memory block at all locations.
+ * Expect to trigger memblock_double_array() to double the
+ * memblock.memory.max, find a new valid memory as reserved.regions.
+ *
+ * 0 1 2 128
+ * +-------+ +-------+ +-------+ +-------+
+ * | 32K | | 32K | | 32K | ... | 32K |
+ * +-------+-------+-------+-------+-------+ +-------+
+ * |<-32K->| |<-32K->|
+ *
+ */
+/* Keep the gap so these memory region will not be merged. */
+#define MEMORY_BASE(idx) (SZ_128K + (MEM_SIZE * 2) * (idx))
+static int memblock_reserve_all_locations_check(void)
+{
+ int i, skip;
+ void *orig_region;
+ struct region r = {
+ .base = SZ_16K,
+ .size = SZ_16K,
+ };
+ phys_addr_t new_reserved_regions_size;
+
+ PREFIX_PUSH();
+
+ /* Reserve the 129th memory block for all possible positions*/
+ for (skip = 0; skip < INIT_MEMBLOCK_REGIONS + 1; skip++) {
+ reset_memblock_regions();
+ memblock_allow_resize();
+
+ /* Add a valid memory region used by double_array(). */
+ dummy_physical_memory_init();
+ memblock_add(dummy_physical_memory_base(), MEM_SIZE);
+
+ for (i = 0; i < INIT_MEMBLOCK_REGIONS + 1; i++) {
+ if (i == skip)
+ continue;
+
+ /* Reserve some fakes memory region to fulfill the memblock. */
+ memblock_reserve(MEMORY_BASE(i), MEM_SIZE);
+
+ if (i < skip) {
+ ASSERT_EQ(memblock.reserved.cnt, i + 1);
+ ASSERT_EQ(memblock.reserved.total_size, (i + 1) * MEM_SIZE);
+ } else {
+ ASSERT_EQ(memblock.reserved.cnt, i);
+ ASSERT_EQ(memblock.reserved.total_size, i * MEM_SIZE);
+ }
+ }
+
+ orig_region = memblock.reserved.regions;
+
+ /* This reserve the 129 memory_region, and makes it double array. */
+ memblock_reserve(MEMORY_BASE(skip), MEM_SIZE);
+
+ /*
+ * This is the memory region size used by the doubled reserved.regions,
+ * and it has been reserved due to it has been used. The size is used to
+ * calculate the total_size that the memblock.reserved have now.
+ */
+ new_reserved_regions_size = PAGE_ALIGN((INIT_MEMBLOCK_REGIONS * 2) *
+ sizeof(struct memblock_region));
+ /*
+ * The double_array() will find a free memory region as the new
+ * reserved.regions, and the used memory region will be reserved, so
+ * there will be one more region exist in the reserved memblock. And the
+ * one more reserved region's size is new_reserved_regions_size.
+ */
+ ASSERT_EQ(memblock.reserved.cnt, INIT_MEMBLOCK_REGIONS + 2);
+ ASSERT_EQ(memblock.reserved.total_size, (INIT_MEMBLOCK_REGIONS + 1) * MEM_SIZE +
+ new_reserved_regions_size);
+ ASSERT_EQ(memblock.reserved.max, INIT_MEMBLOCK_REGIONS * 2);
+
+ /*
+ * Now memblock_double_array() works fine. Let's check after the
+ * double_array(), the memblock_reserve() still works as normal.
+ */
+ memblock_reserve(r.base, r.size);
+ ASSERT_EQ(memblock.reserved.regions[0].base, r.base);
+ ASSERT_EQ(memblock.reserved.regions[0].size, r.size);
+
+ ASSERT_EQ(memblock.reserved.cnt, INIT_MEMBLOCK_REGIONS + 3);
+ ASSERT_EQ(memblock.reserved.total_size, (INIT_MEMBLOCK_REGIONS + 1) * MEM_SIZE +
+ new_reserved_regions_size +
+ r.size);
+ ASSERT_EQ(memblock.reserved.max, INIT_MEMBLOCK_REGIONS * 2);
+
+ dummy_physical_memory_cleanup();
+
+ /*
+ * The current reserved.regions is occupying a range of memory that
+ * allocated from dummy_physical_memory_init(). After free the memory,
+ * we must not use it. So restore the origin memory region to make sure
+ * the tests can run as normal and not affected by the double array.
+ */
+ memblock.reserved.regions = orig_region;
+ memblock.reserved.cnt = INIT_MEMBLOCK_RESERVED_REGIONS;
+ }
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that trying to reserve the 129th memory block at all locations.
+ * Expect to trigger memblock_double_array() to double the
+ * memblock.memory.max, find a new valid memory as reserved.regions. And make
+ * sure it doesn't conflict with the range we want to reserve.
+ *
+ * For example, we have 128 regions in reserved and now want to reserve
+ * the skipped one. Since reserved is full, memblock_double_array() would find
+ * an available range in memory for the new array. We intended to put two
+ * ranges in memory with one is the exact range of the skipped one. Before
+ * commit 48c3b583bbdd ("mm/memblock: fix overlapping allocation when doubling
+ * reserved array"), the new array would sits in the skipped range which is a
+ * conflict. The expected new array should be allocated from memory.regions[0].
+ *
+ * 0 1
+ * memory +-------+ +-------+
+ * | 32K | | 32K |
+ * +-------+ ------+-------+-------+-------+
+ * |<-32K->|<-32K->|<-32K->|
+ *
+ * 0 skipped 127
+ * reserved +-------+ ......... +-------+
+ * | 32K | . 32K . ... | 32K |
+ * +-------+-------+-------+ +-------+
+ * |<-32K->|
+ * ^
+ * |
+ * |
+ * skipped one
+ */
+/* Keep the gap so these memory region will not be merged. */
+#define MEMORY_BASE_OFFSET(idx, offset) ((offset) + (MEM_SIZE * 2) * (idx))
+static int memblock_reserve_many_may_conflict_check(void)
+{
+ int i, skip;
+ void *orig_region;
+ struct region r = {
+ .base = SZ_16K,
+ .size = SZ_16K,
+ };
+ phys_addr_t new_reserved_regions_size;
+
+ /*
+ * 0 1 129
+ * +---+ +---+ +---+
+ * |32K| |32K| .. |32K|
+ * +---+ +---+ +---+
+ *
+ * Pre-allocate the range for 129 memory block + one range for double
+ * memblock.reserved.regions at idx 0.
+ */
+ dummy_physical_memory_init();
+ phys_addr_t memory_base = dummy_physical_memory_base();
+ phys_addr_t offset = PAGE_ALIGN(memory_base);
+
+ PREFIX_PUSH();
+
+ /* Reserve the 129th memory block for all possible positions*/
+ for (skip = 1; skip <= INIT_MEMBLOCK_REGIONS + 1; skip++) {
+ reset_memblock_regions();
+ memblock_allow_resize();
+
+ reset_memblock_attributes();
+ /* Add a valid memory region used by double_array(). */
+ memblock_add(MEMORY_BASE_OFFSET(0, offset), MEM_SIZE);
+ /*
+ * Add a memory region which will be reserved as 129th memory
+ * region. This is not expected to be used by double_array().
+ */
+ memblock_add(MEMORY_BASE_OFFSET(skip, offset), MEM_SIZE);
+
+ for (i = 1; i <= INIT_MEMBLOCK_REGIONS + 1; i++) {
+ if (i == skip)
+ continue;
+
+ /* Reserve some fakes memory region to fulfill the memblock. */
+ memblock_reserve(MEMORY_BASE_OFFSET(i, offset), MEM_SIZE);
+
+ if (i < skip) {
+ ASSERT_EQ(memblock.reserved.cnt, i);
+ ASSERT_EQ(memblock.reserved.total_size, i * MEM_SIZE);
+ } else {
+ ASSERT_EQ(memblock.reserved.cnt, i - 1);
+ ASSERT_EQ(memblock.reserved.total_size, (i - 1) * MEM_SIZE);
+ }
+ }
+
+ orig_region = memblock.reserved.regions;
+
+ /* This reserve the 129 memory_region, and makes it double array. */
+ memblock_reserve(MEMORY_BASE_OFFSET(skip, offset), MEM_SIZE);
+
+ /*
+ * This is the memory region size used by the doubled reserved.regions,
+ * and it has been reserved due to it has been used. The size is used to
+ * calculate the total_size that the memblock.reserved have now.
+ */
+ new_reserved_regions_size = PAGE_ALIGN((INIT_MEMBLOCK_REGIONS * 2) *
+ sizeof(struct memblock_region));
+ /*
+ * The double_array() will find a free memory region as the new
+ * reserved.regions, and the used memory region will be reserved, so
+ * there will be one more region exist in the reserved memblock. And the
+ * one more reserved region's size is new_reserved_regions_size.
+ */
+ ASSERT_EQ(memblock.reserved.cnt, INIT_MEMBLOCK_REGIONS + 2);
+ ASSERT_EQ(memblock.reserved.total_size, (INIT_MEMBLOCK_REGIONS + 1) * MEM_SIZE +
+ new_reserved_regions_size);
+ ASSERT_EQ(memblock.reserved.max, INIT_MEMBLOCK_REGIONS * 2);
+
+ /*
+ * The first reserved region is allocated for double array
+ * with the size of new_reserved_regions_size and the base to be
+ * MEMORY_BASE_OFFSET(0, offset) + SZ_32K - new_reserved_regions_size
+ */
+ ASSERT_EQ(memblock.reserved.regions[0].base + memblock.reserved.regions[0].size,
+ MEMORY_BASE_OFFSET(0, offset) + SZ_32K);
+ ASSERT_EQ(memblock.reserved.regions[0].size, new_reserved_regions_size);
+
+ /*
+ * Now memblock_double_array() works fine. Let's check after the
+ * double_array(), the memblock_reserve() still works as normal.
+ */
+ memblock_reserve(r.base, r.size);
+ ASSERT_EQ(memblock.reserved.regions[0].base, r.base);
+ ASSERT_EQ(memblock.reserved.regions[0].size, r.size);
+
+ ASSERT_EQ(memblock.reserved.cnt, INIT_MEMBLOCK_REGIONS + 3);
+ ASSERT_EQ(memblock.reserved.total_size, (INIT_MEMBLOCK_REGIONS + 1) * MEM_SIZE +
+ new_reserved_regions_size +
+ r.size);
+ ASSERT_EQ(memblock.reserved.max, INIT_MEMBLOCK_REGIONS * 2);
+
+ /*
+ * The current reserved.regions is occupying a range of memory that
+ * allocated from dummy_physical_memory_init(). After free the memory,
+ * we must not use it. So restore the origin memory region to make sure
+ * the tests can run as normal and not affected by the double array.
+ */
+ memblock.reserved.regions = orig_region;
+ memblock.reserved.cnt = INIT_MEMBLOCK_RESERVED_REGIONS;
+ }
+
+ dummy_physical_memory_cleanup();
+
+ test_pass_pop();
+
+ return 0;
+}
+
static int memblock_reserve_checks(void)
{
prefix_reset();
@@ -997,6 +1253,8 @@ static int memblock_reserve_checks(void)
memblock_reserve_between_check();
memblock_reserve_near_max_check();
memblock_reserve_many_check();
+ memblock_reserve_all_locations_check();
+ memblock_reserve_many_may_conflict_check();
prefix_pop();
@@ -1295,7 +1553,7 @@ static int memblock_remove_only_region_check(void)
ASSERT_EQ(rgn->base, 0);
ASSERT_EQ(rgn->size, 0);
- ASSERT_EQ(memblock.memory.cnt, 1);
+ ASSERT_EQ(memblock.memory.cnt, 0);
ASSERT_EQ(memblock.memory.total_size, 0);
test_pass_pop();
@@ -1723,7 +1981,7 @@ static int memblock_free_only_region_check(void)
ASSERT_EQ(rgn->base, 0);
ASSERT_EQ(rgn->size, 0);
- ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.cnt, 0);
ASSERT_EQ(memblock.reserved.total_size, 0);
test_pass_pop();
@@ -2129,6 +2387,53 @@ static int memblock_trim_memory_checks(void)
return 0;
}
+static int memblock_overlaps_region_check(void)
+{
+ struct region r = {
+ .base = SZ_1G,
+ .size = SZ_4M
+ };
+
+ PREFIX_PUSH();
+
+ reset_memblock_regions();
+ memblock_add(r.base, r.size);
+
+ /* Far Away */
+ ASSERT_FALSE(memblock_overlaps_region(&memblock.memory, SZ_1M, SZ_1M));
+ ASSERT_FALSE(memblock_overlaps_region(&memblock.memory, SZ_2G, SZ_1M));
+
+ /* Neighbor */
+ ASSERT_FALSE(memblock_overlaps_region(&memblock.memory, SZ_1G - SZ_1M, SZ_1M));
+ ASSERT_FALSE(memblock_overlaps_region(&memblock.memory, SZ_1G + SZ_4M, SZ_1M));
+
+ /* Partial Overlap */
+ ASSERT_TRUE(memblock_overlaps_region(&memblock.memory, SZ_1G - SZ_1M, SZ_2M));
+ ASSERT_TRUE(memblock_overlaps_region(&memblock.memory, SZ_1G + SZ_2M, SZ_2M));
+
+ /* Totally Overlap */
+ ASSERT_TRUE(memblock_overlaps_region(&memblock.memory, SZ_1G, SZ_4M));
+ ASSERT_TRUE(memblock_overlaps_region(&memblock.memory, SZ_1G - SZ_2M, SZ_8M));
+ ASSERT_TRUE(memblock_overlaps_region(&memblock.memory, SZ_1G + SZ_1M, SZ_1M));
+
+ test_pass_pop();
+
+ return 0;
+}
+
+static int memblock_overlaps_region_checks(void)
+{
+ prefix_reset();
+ prefix_push("memblock_overlaps_region");
+ test_print("Running memblock_overlaps_region tests...\n");
+
+ memblock_overlaps_region_check();
+
+ prefix_pop();
+
+ return 0;
+}
+
int memblock_basic_checks(void)
{
memblock_initialization_check();
@@ -2138,6 +2443,7 @@ int memblock_basic_checks(void)
memblock_free_checks();
memblock_bottom_up_checks();
memblock_trim_memory_checks();
+ memblock_overlaps_region_checks();
return 0;
}
diff --git a/tools/testing/memblock/tests/common.c b/tools/testing/memblock/tests/common.c
index f43b6f414983..3250c8e5124b 100644
--- a/tools/testing/memblock/tests/common.c
+++ b/tools/testing/memblock/tests/common.c
@@ -40,13 +40,13 @@ void reset_memblock_regions(void)
{
memset(memblock.memory.regions, 0,
memblock.memory.cnt * sizeof(struct memblock_region));
- memblock.memory.cnt = 1;
+ memblock.memory.cnt = 0;
memblock.memory.max = INIT_MEMBLOCK_REGIONS;
memblock.memory.total_size = 0;
memset(memblock.reserved.regions, 0,
memblock.reserved.cnt * sizeof(struct memblock_region));
- memblock.reserved.cnt = 1;
+ memblock.reserved.cnt = 0;
memblock.reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS;
memblock.reserved.total_size = 0;
}
@@ -61,7 +61,7 @@ void reset_memblock_attributes(void)
static inline void fill_memblock(void)
{
- memset(memory_block.base, 1, MEM_SIZE);
+ memset(memory_block.base, 1, PHYS_MEM_SIZE);
}
void setup_memblock(void)
@@ -103,7 +103,7 @@ void setup_numa_memblock(const unsigned int node_fracs[])
void dummy_physical_memory_init(void)
{
- memory_block.base = malloc(MEM_SIZE);
+ memory_block.base = malloc(PHYS_MEM_SIZE);
assert(memory_block.base);
fill_memblock();
}
diff --git a/tools/testing/memblock/tests/common.h b/tools/testing/memblock/tests/common.h
index b5ec59aa62d7..e1138e06c903 100644
--- a/tools/testing/memblock/tests/common.h
+++ b/tools/testing/memblock/tests/common.h
@@ -12,6 +12,7 @@
#include <../selftests/kselftest.h>
#define MEM_SIZE SZ_32K
+#define PHYS_MEM_SIZE SZ_16M
#define NUMA_NODES 8
#define INIT_MEMBLOCK_REGIONS 128
@@ -39,6 +40,9 @@ enum test_flags {
assert((_expected) == (_seen)); \
} while (0)
+#define ASSERT_TRUE(_seen) ASSERT_EQ(true, _seen)
+#define ASSERT_FALSE(_seen) ASSERT_EQ(false, _seen)
+
/**
* ASSERT_NE():
* Check the condition
diff --git a/tools/testing/nvdimm/test/iomap.c b/tools/testing/nvdimm/test/iomap.c
index ea956082e6a4..e4313726fae3 100644
--- a/tools/testing/nvdimm/test/iomap.c
+++ b/tools/testing/nvdimm/test/iomap.c
@@ -407,4 +407,5 @@ union acpi_object * __wrap_acpi_evaluate_dsm(acpi_handle handle, const guid_t *g
}
EXPORT_SYMBOL(__wrap_acpi_evaluate_dsm);
+MODULE_DESCRIPTION("NVDIMM unit test");
MODULE_LICENSE("GPL v2");
diff --git a/tools/testing/nvdimm/test/ndtest.c b/tools/testing/nvdimm/test/ndtest.c
index b438f3d053ee..892e990c034a 100644
--- a/tools/testing/nvdimm/test/ndtest.c
+++ b/tools/testing/nvdimm/test/ndtest.c
@@ -987,5 +987,6 @@ static __exit void ndtest_exit(void)
module_init(ndtest_init);
module_exit(ndtest_exit);
+MODULE_DESCRIPTION("Test non-NFIT devices");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("IBM Corporation");
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index a61df347a33d..cfd4378e2129 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -3382,5 +3382,6 @@ static __exit void nfit_test_exit(void)
module_init(nfit_test_init);
module_exit(nfit_test_exit);
+MODULE_DESCRIPTION("Test ACPI NFIT devices");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Intel Corporation");
diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c
index ca24f6839d50..84b8c3c92c79 100644
--- a/tools/testing/radix-tree/idr-test.c
+++ b/tools/testing/radix-tree/idr-test.c
@@ -424,6 +424,7 @@ void idr_checks(void)
#define module_init(x)
#define module_exit(x)
#define MODULE_AUTHOR(x)
+#define MODULE_DESCRIPTION(X)
#define MODULE_LICENSE(x)
#define dump_stack() assert(0)
void ida_dump(struct ida *);
diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c
index f1caf4bcf937..cd1cf05503b4 100644
--- a/tools/testing/radix-tree/maple.c
+++ b/tools/testing/radix-tree/maple.c
@@ -19,6 +19,7 @@
#define module_init(x)
#define module_exit(x)
#define MODULE_AUTHOR(x)
+#define MODULE_DESCRIPTION(X)
#define MODULE_LICENSE(x)
#define dump_stack() assert(0)
diff --git a/tools/testing/radix-tree/xarray.c b/tools/testing/radix-tree/xarray.c
index f20e12cbbfd4..d0e53bff1eb6 100644
--- a/tools/testing/radix-tree/xarray.c
+++ b/tools/testing/radix-tree/xarray.c
@@ -10,6 +10,7 @@
#define module_init(x)
#define module_exit(x)
#define MODULE_AUTHOR(x)
+#define MODULE_DESCRIPTION(X)
#define MODULE_LICENSE(x)
#define dump_stack() assert(0)
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 9039f3709aff..bc8fe9e8f7f2 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -13,7 +13,8 @@ TARGETS += core
TARGETS += cpufreq
TARGETS += cpu-hotplug
TARGETS += damon
-TARGETS += devices
+TARGETS += devices/error_logs
+TARGETS += devices/probe
TARGETS += dmabuf-heaps
TARGETS += drivers/dma-buf
TARGETS += drivers/s390x/uvdevice
@@ -21,6 +22,7 @@ TARGETS += drivers/net
TARGETS += drivers/net/bonding
TARGETS += drivers/net/team
TARGETS += drivers/net/virtio_net
+TARGETS += drivers/platform/x86/intel/ifs
TARGETS += dt
TARGETS += efivarfs
TARGETS += exec
@@ -251,6 +253,7 @@ ifdef INSTALL_PATH
install -m 744 kselftest/runner.sh $(INSTALL_PATH)/kselftest/
install -m 744 kselftest/prefix.pl $(INSTALL_PATH)/kselftest/
install -m 744 kselftest/ktap_helpers.sh $(INSTALL_PATH)/kselftest/
+ install -m 744 kselftest/ksft.py $(INSTALL_PATH)/kselftest/
install -m 744 run_kselftest.sh $(INSTALL_PATH)/
rm -f $(TEST_LIST)
@ret=1; \
diff --git a/tools/testing/selftests/alsa/mixer-test.c b/tools/testing/selftests/alsa/mixer-test.c
index 1c04e5f638a0..2a4b2662035e 100644
--- a/tools/testing/selftests/alsa/mixer-test.c
+++ b/tools/testing/selftests/alsa/mixer-test.c
@@ -33,6 +33,8 @@
struct card_data {
snd_ctl_t *handle;
int card;
+ snd_ctl_card_info_t *info;
+ const char *card_name;
struct pollfd pollfd;
int num_ctls;
snd_ctl_elem_list_t *ctls;
@@ -91,8 +93,26 @@ static void find_controls(void)
err = snd_card_get_longname(card, &card_longname);
if (err != 0)
card_longname = "Unknown";
- ksft_print_msg("Card %d - %s (%s)\n", card,
- card_name, card_longname);
+
+ err = snd_ctl_card_info_malloc(&card_data->info);
+ if (err != 0)
+ ksft_exit_fail_msg("Failed to allocate card info: %d\n",
+ err);
+
+ err = snd_ctl_card_info(card_data->handle, card_data->info);
+ if (err == 0) {
+ card_data->card_name = snd_ctl_card_info_get_id(card_data->info);
+ if (!card_data->card_name)
+ ksft_print_msg("Failed to get card ID\n");
+ } else {
+ ksft_print_msg("Failed to get card info: %d\n", err);
+ }
+
+ if (!card_data->card_name)
+ card_data->card_name = "Unknown";
+
+ ksft_print_msg("Card %d/%s - %s (%s)\n", card,
+ card_data->card_name, card_name, card_longname);
/* Count controls */
snd_ctl_elem_list_malloc(&card_data->ctls);
@@ -389,16 +409,16 @@ static void test_ctl_get_value(struct ctl_data *ctl)
/* If the control is turned off let's be polite */
if (snd_ctl_elem_info_is_inactive(ctl->info)) {
ksft_print_msg("%s is inactive\n", ctl->name);
- ksft_test_result_skip("get_value.%d.%d\n",
- ctl->card->card, ctl->elem);
+ ksft_test_result_skip("get_value.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
return;
}
/* Can't test reading on an unreadable control */
if (!snd_ctl_elem_info_is_readable(ctl->info)) {
ksft_print_msg("%s is not readable\n", ctl->name);
- ksft_test_result_skip("get_value.%d.%d\n",
- ctl->card->card, ctl->elem);
+ ksft_test_result_skip("get_value.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
return;
}
@@ -413,8 +433,8 @@ static void test_ctl_get_value(struct ctl_data *ctl)
err = -EINVAL;
out:
- ksft_test_result(err >= 0, "get_value.%d.%d\n",
- ctl->card->card, ctl->elem);
+ ksft_test_result(err >= 0, "get_value.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
}
static bool strend(const char *haystack, const char *needle)
@@ -431,7 +451,7 @@ static void test_ctl_name(struct ctl_data *ctl)
{
bool name_ok = true;
- ksft_print_msg("%d.%d %s\n", ctl->card->card, ctl->elem,
+ ksft_print_msg("%s.%d %s\n", ctl->card->card_name, ctl->elem,
ctl->name);
/* Only boolean controls should end in Switch */
@@ -453,8 +473,8 @@ static void test_ctl_name(struct ctl_data *ctl)
}
}
- ksft_test_result(name_ok, "name.%d.%d\n",
- ctl->card->card, ctl->elem);
+ ksft_test_result(name_ok, "name.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
}
static void show_values(struct ctl_data *ctl, snd_ctl_elem_value_t *orig_val,
@@ -626,28 +646,41 @@ static int write_and_verify(struct ctl_data *ctl,
}
/*
+ * We can't verify any specific value for volatile controls
+ * but we should still check that whatever we read is a valid
+ * vale for the control.
+ */
+ if (snd_ctl_elem_info_is_volatile(ctl->info)) {
+ if (!ctl_value_valid(ctl, read_val)) {
+ ksft_print_msg("Volatile control %s has invalid value\n",
+ ctl->name);
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ /*
* Check for an event if the value changed, or confirm that
* there was none if it didn't. We rely on the kernel
* generating the notification before it returns from the
* write, this is currently true, should that ever change this
* will most likely break and need updating.
*/
- if (!snd_ctl_elem_info_is_volatile(ctl->info)) {
- err = wait_for_event(ctl, 0);
- if (snd_ctl_elem_value_compare(initial_val, read_val)) {
- if (err < 1) {
- ksft_print_msg("No event generated for %s\n",
- ctl->name);
- show_values(ctl, initial_val, read_val);
- ctl->event_missing++;
- }
- } else {
- if (err != 0) {
- ksft_print_msg("Spurious event generated for %s\n",
- ctl->name);
- show_values(ctl, initial_val, read_val);
- ctl->event_spurious++;
- }
+ err = wait_for_event(ctl, 0);
+ if (snd_ctl_elem_value_compare(initial_val, read_val)) {
+ if (err < 1) {
+ ksft_print_msg("No event generated for %s\n",
+ ctl->name);
+ show_values(ctl, initial_val, read_val);
+ ctl->event_missing++;
+ }
+ } else {
+ if (err != 0) {
+ ksft_print_msg("Spurious event generated for %s\n",
+ ctl->name);
+ show_values(ctl, initial_val, read_val);
+ ctl->event_spurious++;
}
}
@@ -682,30 +715,30 @@ static void test_ctl_write_default(struct ctl_data *ctl)
/* If the control is turned off let's be polite */
if (snd_ctl_elem_info_is_inactive(ctl->info)) {
ksft_print_msg("%s is inactive\n", ctl->name);
- ksft_test_result_skip("write_default.%d.%d\n",
- ctl->card->card, ctl->elem);
+ ksft_test_result_skip("write_default.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
return;
}
if (!snd_ctl_elem_info_is_writable(ctl->info)) {
ksft_print_msg("%s is not writeable\n", ctl->name);
- ksft_test_result_skip("write_default.%d.%d\n",
- ctl->card->card, ctl->elem);
+ ksft_test_result_skip("write_default.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
return;
}
/* No idea what the default was for unreadable controls */
if (!snd_ctl_elem_info_is_readable(ctl->info)) {
ksft_print_msg("%s couldn't read default\n", ctl->name);
- ksft_test_result_skip("write_default.%d.%d\n",
- ctl->card->card, ctl->elem);
+ ksft_test_result_skip("write_default.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
return;
}
err = write_and_verify(ctl, ctl->def_val, NULL);
- ksft_test_result(err >= 0, "write_default.%d.%d\n",
- ctl->card->card, ctl->elem);
+ ksft_test_result(err >= 0, "write_default.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
}
static bool test_ctl_write_valid_boolean(struct ctl_data *ctl)
@@ -815,15 +848,15 @@ static void test_ctl_write_valid(struct ctl_data *ctl)
/* If the control is turned off let's be polite */
if (snd_ctl_elem_info_is_inactive(ctl->info)) {
ksft_print_msg("%s is inactive\n", ctl->name);
- ksft_test_result_skip("write_valid.%d.%d\n",
- ctl->card->card, ctl->elem);
+ ksft_test_result_skip("write_valid.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
return;
}
if (!snd_ctl_elem_info_is_writable(ctl->info)) {
ksft_print_msg("%s is not writeable\n", ctl->name);
- ksft_test_result_skip("write_valid.%d.%d\n",
- ctl->card->card, ctl->elem);
+ ksft_test_result_skip("write_valid.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
return;
}
@@ -846,16 +879,16 @@ static void test_ctl_write_valid(struct ctl_data *ctl)
default:
/* No tests for this yet */
- ksft_test_result_skip("write_valid.%d.%d\n",
- ctl->card->card, ctl->elem);
+ ksft_test_result_skip("write_valid.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
return;
}
/* Restore the default value to minimise disruption */
write_and_verify(ctl, ctl->def_val, NULL);
- ksft_test_result(pass, "write_valid.%d.%d\n",
- ctl->card->card, ctl->elem);
+ ksft_test_result(pass, "write_valid.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
}
static bool test_ctl_write_invalid_value(struct ctl_data *ctl,
@@ -1027,15 +1060,15 @@ static void test_ctl_write_invalid(struct ctl_data *ctl)
/* If the control is turned off let's be polite */
if (snd_ctl_elem_info_is_inactive(ctl->info)) {
ksft_print_msg("%s is inactive\n", ctl->name);
- ksft_test_result_skip("write_invalid.%d.%d\n",
- ctl->card->card, ctl->elem);
+ ksft_test_result_skip("write_invalid.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
return;
}
if (!snd_ctl_elem_info_is_writable(ctl->info)) {
ksft_print_msg("%s is not writeable\n", ctl->name);
- ksft_test_result_skip("write_invalid.%d.%d\n",
- ctl->card->card, ctl->elem);
+ ksft_test_result_skip("write_invalid.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
return;
}
@@ -1058,28 +1091,28 @@ static void test_ctl_write_invalid(struct ctl_data *ctl)
default:
/* No tests for this yet */
- ksft_test_result_skip("write_invalid.%d.%d\n",
- ctl->card->card, ctl->elem);
+ ksft_test_result_skip("write_invalid.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
return;
}
/* Restore the default value to minimise disruption */
write_and_verify(ctl, ctl->def_val, NULL);
- ksft_test_result(pass, "write_invalid.%d.%d\n",
- ctl->card->card, ctl->elem);
+ ksft_test_result(pass, "write_invalid.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
}
static void test_ctl_event_missing(struct ctl_data *ctl)
{
- ksft_test_result(!ctl->event_missing, "event_missing.%d.%d\n",
- ctl->card->card, ctl->elem);
+ ksft_test_result(!ctl->event_missing, "event_missing.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
}
static void test_ctl_event_spurious(struct ctl_data *ctl)
{
- ksft_test_result(!ctl->event_spurious, "event_spurious.%d.%d\n",
- ctl->card->card, ctl->elem);
+ ksft_test_result(!ctl->event_spurious, "event_spurious.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
}
int main(void)
diff --git a/tools/testing/selftests/alsa/pcm-test.c b/tools/testing/selftests/alsa/pcm-test.c
index de664dedb541..dbd7c222ce93 100644
--- a/tools/testing/selftests/alsa/pcm-test.c
+++ b/tools/testing/selftests/alsa/pcm-test.c
@@ -24,6 +24,8 @@ typedef struct timespec timestamp_t;
struct card_data {
int card;
+ snd_ctl_card_info_t *info;
+ const char *name;
pthread_t thread;
struct card_data *next;
};
@@ -35,6 +37,7 @@ struct pcm_data {
int card;
int device;
int subdevice;
+ const char *card_name;
snd_pcm_stream_t stream;
snd_config_t *pcm_config;
struct pcm_data *next;
@@ -167,6 +170,10 @@ static void find_pcms(void)
config = get_alsalib_config();
while (card >= 0) {
+ card_data = calloc(1, sizeof(*card_data));
+ if (!card_data)
+ ksft_exit_fail_msg("Out of memory\n");
+
sprintf(name, "hw:%d", card);
err = snd_ctl_open_lconf(&handle, name, 0, config);
@@ -182,14 +189,29 @@ static void find_pcms(void)
err = snd_card_get_longname(card, &card_longname);
if (err != 0)
card_longname = "Unknown";
- ksft_print_msg("Card %d - %s (%s)\n", card,
- card_name, card_longname);
+
+ err = snd_ctl_card_info_malloc(&card_data->info);
+ if (err != 0)
+ ksft_exit_fail_msg("Failed to allocate card info: %d\n",
+ err);
+
+ err = snd_ctl_card_info(handle, card_data->info);
+ if (err == 0) {
+ card_data->name = snd_ctl_card_info_get_id(card_data->info);
+ if (!card_data->name)
+ ksft_print_msg("Failed to get card ID\n");
+ } else {
+ ksft_print_msg("Failed to get card info: %d\n", err);
+ }
+
+ if (!card_data->name)
+ card_data->name = "Unknown";
+
+ ksft_print_msg("Card %d/%s - %s (%s)\n", card,
+ card_data->name, card_name, card_longname);
card_config = conf_by_card(card);
- card_data = calloc(1, sizeof(*card_data));
- if (!card_data)
- ksft_exit_fail_msg("Out of memory\n");
card_data->card = card;
card_data->next = card_list;
card_list = card_data;
@@ -218,6 +240,10 @@ static void find_pcms(void)
if (err < 0)
ksft_exit_fail_msg("snd_ctl_pcm_info: %d:%d:%d\n",
dev, 0, stream);
+
+ ksft_print_msg("%s.0 - %s\n", card_data->name,
+ snd_pcm_info_get_id(pcm_info));
+
count = snd_pcm_info_get_subdevices_count(pcm_info);
for (subdev = 0; subdev < count; subdev++) {
sprintf(key, "pcm.%d.%d.%s", dev, subdev, snd_pcm_stream_name(stream));
@@ -232,6 +258,7 @@ static void find_pcms(void)
pcm_data->card = card;
pcm_data->device = dev;
pcm_data->subdevice = subdev;
+ pcm_data->card_name = card_data->name;
pcm_data->stream = stream;
pcm_data->pcm_config = conf_get_subtree(card_config, key, NULL);
pcm_data->next = pcm_list;
@@ -294,9 +321,9 @@ static void test_pcm_time(struct pcm_data *data, enum test_class class,
desc = conf_get_string(pcm_cfg, "description", NULL, NULL);
if (desc)
- ksft_print_msg("%s.%s.%d.%d.%d.%s - %s\n",
+ ksft_print_msg("%s.%s.%s.%d.%d.%s - %s\n",
test_class_name, test_name,
- data->card, data->device, data->subdevice,
+ data->card_name, data->device, data->subdevice,
snd_pcm_stream_name(data->stream),
desc);
@@ -352,9 +379,9 @@ __format:
old_format = format;
format = snd_pcm_format_value(alt_formats[i]);
if (format != SND_PCM_FORMAT_UNKNOWN) {
- ksft_print_msg("%s.%d.%d.%d.%s.%s format %s -> %s\n",
+ ksft_print_msg("%s.%s.%d.%d.%s.%s format %s -> %s\n",
test_name,
- data->card, data->device, data->subdevice,
+ data->card_name, data->device, data->subdevice,
snd_pcm_stream_name(data->stream),
snd_pcm_access_name(access),
snd_pcm_format_name(old_format),
@@ -383,7 +410,7 @@ __format:
goto __close;
}
if (rrate != rate) {
- snprintf(msg, sizeof(msg), "rate mismatch %ld != %d", rate, rrate);
+ snprintf(msg, sizeof(msg), "rate mismatch %ld != %u", rate, rrate);
goto __close;
}
rperiod_size = period_size;
@@ -430,9 +457,9 @@ __format:
goto __close;
}
- ksft_print_msg("%s.%s.%d.%d.%d.%s hw_params.%s.%s.%ld.%ld.%ld.%ld sw_params.%ld\n",
+ ksft_print_msg("%s.%s.%s.%d.%d.%s hw_params.%s.%s.%ld.%ld.%ld.%ld sw_params.%ld\n",
test_class_name, test_name,
- data->card, data->device, data->subdevice,
+ data->card_name, data->device, data->subdevice,
snd_pcm_stream_name(data->stream),
snd_pcm_access_name(access),
snd_pcm_format_name(format),
@@ -491,9 +518,10 @@ __close:
* Anything specified as specific to this system
* should always be supported.
*/
- ksft_test_result(!skip, "%s.%s.%d.%d.%d.%s.params\n",
+ ksft_test_result(!skip, "%s.%s.%s.%d.%d.%s.params\n",
test_class_name, test_name,
- data->card, data->device, data->subdevice,
+ data->card_name, data->device,
+ data->subdevice,
snd_pcm_stream_name(data->stream));
break;
default:
@@ -501,14 +529,16 @@ __close:
}
if (!skip)
- ksft_test_result(pass, "%s.%s.%d.%d.%d.%s\n",
+ ksft_test_result(pass, "%s.%s.%s.%d.%d.%s\n",
test_class_name, test_name,
- data->card, data->device, data->subdevice,
+ data->card_name, data->device,
+ data->subdevice,
snd_pcm_stream_name(data->stream));
else
- ksft_test_result_skip("%s.%s.%d.%d.%d.%s\n",
+ ksft_test_result_skip("%s.%s.%s.%d.%d.%s\n",
test_class_name, test_name,
- data->card, data->device, data->subdevice,
+ data->card_name, data->device,
+ data->subdevice,
snd_pcm_stream_name(data->stream));
if (msg[0])
@@ -609,8 +639,8 @@ int main(void)
conf->filename, conf->config_id);
for (pcm = pcm_missing; pcm != NULL; pcm = pcm->next) {
- ksft_test_result(false, "test.missing.%d.%d.%d.%s\n",
- pcm->card, pcm->device, pcm->subdevice,
+ ksft_test_result(false, "test.missing.%s.%d.%d.%s\n",
+ pcm->card_name, pcm->device, pcm->subdevice,
snd_pcm_stream_name(pcm->stream));
}
diff --git a/tools/testing/selftests/arm64/abi/ptrace.c b/tools/testing/selftests/arm64/abi/ptrace.c
index abe4d58d731d..4c941270d8de 100644
--- a/tools/testing/selftests/arm64/abi/ptrace.c
+++ b/tools/testing/selftests/arm64/abi/ptrace.c
@@ -47,7 +47,7 @@ static void test_tpidr(pid_t child)
/* ...write a new value.. */
write_iov.iov_len = sizeof(uint64_t);
- write_val[0] = read_val[0]++;
+ write_val[0] = read_val[0] + 1;
ret = ptrace(PTRACE_SETREGSET, child, NT_ARM_TLS, &write_iov);
ksft_test_result(ret == 0, "write_tpidr_one\n");
diff --git a/tools/testing/selftests/arm64/fp/.gitignore b/tools/testing/selftests/arm64/fp/.gitignore
index 00e52c966281..8362e7ec35ad 100644
--- a/tools/testing/selftests/arm64/fp/.gitignore
+++ b/tools/testing/selftests/arm64/fp/.gitignore
@@ -2,6 +2,7 @@ fp-pidbench
fp-ptrace
fp-stress
fpsimd-test
+kernel-test
rdvl-sme
rdvl-sve
sve-probe-vls
diff --git a/tools/testing/selftests/arm64/fp/Makefile b/tools/testing/selftests/arm64/fp/Makefile
index 55d4f00d9e8e..d171021e4cdd 100644
--- a/tools/testing/selftests/arm64/fp/Makefile
+++ b/tools/testing/selftests/arm64/fp/Makefile
@@ -12,6 +12,7 @@ TEST_GEN_PROGS := \
vec-syscfg \
za-fork za-ptrace
TEST_GEN_PROGS_EXTENDED := fp-pidbench fpsimd-test \
+ kernel-test \
rdvl-sme rdvl-sve \
sve-test \
ssve-test \
diff --git a/tools/testing/selftests/arm64/fp/fp-stress.c b/tools/testing/selftests/arm64/fp/fp-stress.c
index dd31647b00a2..faac24bdefeb 100644
--- a/tools/testing/selftests/arm64/fp/fp-stress.c
+++ b/tools/testing/selftests/arm64/fp/fp-stress.c
@@ -319,6 +319,19 @@ static void start_fpsimd(struct child_data *child, int cpu, int copy)
ksft_print_msg("Started %s\n", child->name);
}
+static void start_kernel(struct child_data *child, int cpu, int copy)
+{
+ int ret;
+
+ ret = asprintf(&child->name, "KERNEL-%d-%d", cpu, copy);
+ if (ret == -1)
+ ksft_exit_fail_msg("asprintf() failed\n");
+
+ child_start(child, "./kernel-test");
+
+ ksft_print_msg("Started %s\n", child->name);
+}
+
static void start_sve(struct child_data *child, int vl, int cpu)
{
int ret;
@@ -438,7 +451,7 @@ int main(int argc, char **argv)
int ret;
int timeout = 10;
int cpus, i, j, c;
- int sve_vl_count, sme_vl_count, fpsimd_per_cpu;
+ int sve_vl_count, sme_vl_count;
bool all_children_started = false;
int seen_children;
int sve_vls[MAX_VLS], sme_vls[MAX_VLS];
@@ -482,12 +495,7 @@ int main(int argc, char **argv)
have_sme2 = false;
}
- /* Force context switching if we only have FPSIMD */
- if (!sve_vl_count && !sme_vl_count)
- fpsimd_per_cpu = 2;
- else
- fpsimd_per_cpu = 1;
- tests += cpus * fpsimd_per_cpu;
+ tests += cpus * 2;
ksft_print_header();
ksft_set_plan(tests);
@@ -542,8 +550,8 @@ int main(int argc, char **argv)
tests);
for (i = 0; i < cpus; i++) {
- for (j = 0; j < fpsimd_per_cpu; j++)
- start_fpsimd(&children[num_children++], i, j);
+ start_fpsimd(&children[num_children++], i, 0);
+ start_kernel(&children[num_children++], i, 0);
for (j = 0; j < sve_vl_count; j++)
start_sve(&children[num_children++], sve_vls[j], i);
diff --git a/tools/testing/selftests/arm64/fp/kernel-test.c b/tools/testing/selftests/arm64/fp/kernel-test.c
new file mode 100644
index 000000000000..e8da3b4cbd23
--- /dev/null
+++ b/tools/testing/selftests/arm64/fp/kernel-test.c
@@ -0,0 +1,324 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 ARM Limited.
+ */
+
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <sys/socket.h>
+
+#include <linux/kernel.h>
+#include <linux/if_alg.h>
+
+#define DATA_SIZE (16 * 4096)
+
+static int base, sock;
+
+static int digest_len;
+static char *ref;
+static char *digest;
+static char *alg_name;
+
+static struct iovec data_iov;
+static int zerocopy[2];
+static int sigs;
+static int iter;
+
+static void handle_exit_signal(int sig, siginfo_t *info, void *context)
+{
+ printf("Terminated by signal %d, iterations=%d, signals=%d\n",
+ sig, iter, sigs);
+ exit(0);
+}
+
+static void handle_kick_signal(int sig, siginfo_t *info, void *context)
+{
+ sigs++;
+}
+
+static char *drivers[] = {
+ "crct10dif-arm64-ce",
+ /* "crct10dif-arm64-neon", - Same priority as generic */
+ "sha1-ce",
+ "sha224-arm64",
+ "sha224-arm64-neon",
+ "sha224-ce",
+ "sha256-arm64",
+ "sha256-arm64-neon",
+ "sha256-ce",
+ "sha384-ce",
+ "sha512-ce",
+ "sha3-224-ce",
+ "sha3-256-ce",
+ "sha3-384-ce",
+ "sha3-512-ce",
+ "sm3-ce",
+ "sm3-neon",
+};
+
+static bool create_socket(void)
+{
+ FILE *proc;
+ struct sockaddr_alg addr;
+ char buf[1024];
+ char *c, *driver_name;
+ bool is_shash, match;
+ int ret, i;
+
+ ret = socket(AF_ALG, SOCK_SEQPACKET, 0);
+ if (ret < 0) {
+ if (errno == EAFNOSUPPORT) {
+ printf("AF_ALG not supported\n");
+ return false;
+ }
+
+ printf("Failed to create AF_ALG socket: %s (%d)\n",
+ strerror(errno), errno);
+ return false;
+ }
+ base = ret;
+
+ memset(&addr, 0, sizeof(addr));
+ addr.salg_family = AF_ALG;
+ strncpy((char *)addr.salg_type, "hash", sizeof(addr.salg_type));
+
+ proc = fopen("/proc/crypto", "r");
+ if (!proc) {
+ printf("Unable to open /proc/crypto\n");
+ return false;
+ }
+
+ driver_name = NULL;
+ is_shash = false;
+ match = false;
+
+ /* Look through /proc/crypto for a driver with kernel mode FP usage */
+ while (!match) {
+ c = fgets(buf, sizeof(buf), proc);
+ if (!c) {
+ if (feof(proc)) {
+ printf("Nothing found in /proc/crypto\n");
+ return false;
+ }
+ continue;
+ }
+
+ /* Algorithm descriptions are separated by a blank line */
+ if (*c == '\n') {
+ if (is_shash && driver_name) {
+ for (i = 0; i < ARRAY_SIZE(drivers); i++) {
+ if (strcmp(drivers[i],
+ driver_name) == 0) {
+ match = true;
+ }
+ }
+ }
+
+ if (!match) {
+ digest_len = 0;
+
+ free(driver_name);
+ driver_name = NULL;
+
+ free(alg_name);
+ alg_name = NULL;
+
+ is_shash = false;
+ }
+ continue;
+ }
+
+ /* Remove trailing newline */
+ c = strchr(buf, '\n');
+ if (c)
+ *c = '\0';
+
+ /* Find the field/value separator and start of the value */
+ c = strchr(buf, ':');
+ if (!c)
+ continue;
+ c += 2;
+
+ if (strncmp(buf, "digestsize", strlen("digestsize")) == 0)
+ sscanf(c, "%d", &digest_len);
+
+ if (strncmp(buf, "name", strlen("name")) == 0)
+ alg_name = strdup(c);
+
+ if (strncmp(buf, "driver", strlen("driver")) == 0)
+ driver_name = strdup(c);
+
+ if (strncmp(buf, "type", strlen("type")) == 0)
+ if (strncmp(c, "shash", strlen("shash")) == 0)
+ is_shash = true;
+ }
+
+ strncpy((char *)addr.salg_name, alg_name,
+ sizeof(addr.salg_name) - 1);
+
+ ret = bind(base, (struct sockaddr *)&addr, sizeof(addr));
+ if (ret < 0) {
+ printf("Failed to bind %s: %s (%d)\n",
+ addr.salg_name, strerror(errno), errno);
+ return false;
+ }
+
+ ret = accept(base, NULL, 0);
+ if (ret < 0) {
+ printf("Failed to accept %s: %s (%d)\n",
+ addr.salg_name, strerror(errno), errno);
+ return false;
+ }
+
+ sock = ret;
+
+ ret = pipe(zerocopy);
+ if (ret != 0) {
+ printf("Failed to create zerocopy pipe: %s (%d)\n",
+ strerror(errno), errno);
+ return false;
+ }
+
+ ref = malloc(digest_len);
+ if (!ref) {
+ printf("Failed to allocated %d byte reference\n", digest_len);
+ return false;
+ }
+
+ digest = malloc(digest_len);
+ if (!digest) {
+ printf("Failed to allocated %d byte digest\n", digest_len);
+ return false;
+ }
+
+ return true;
+}
+
+static bool compute_digest(void *buf)
+{
+ struct iovec iov;
+ int ret, wrote;
+
+ iov = data_iov;
+ while (iov.iov_len) {
+ ret = vmsplice(zerocopy[1], &iov, 1, SPLICE_F_GIFT);
+ if (ret < 0) {
+ printf("Failed to send buffer: %s (%d)\n",
+ strerror(errno), errno);
+ return false;
+ }
+
+ wrote = ret;
+ ret = splice(zerocopy[0], NULL, sock, NULL, wrote, 0);
+ if (ret < 0) {
+ printf("Failed to splice buffer: %s (%d)\n",
+ strerror(errno), errno);
+ } else if (ret != wrote) {
+ printf("Short splice: %d < %d\n", ret, wrote);
+ }
+
+ iov.iov_len -= wrote;
+ iov.iov_base += wrote;
+ }
+
+reread:
+ ret = recv(sock, buf, digest_len, 0);
+ if (ret == 0) {
+ printf("No digest returned\n");
+ return false;
+ }
+ if (ret != digest_len) {
+ if (errno == -EAGAIN)
+ goto reread;
+ printf("Failed to get digest: %s (%d)\n",
+ strerror(errno), errno);
+ return false;
+ }
+
+ return true;
+}
+
+int main(void)
+{
+ char *data;
+ struct sigaction sa;
+ int ret;
+
+ /* Ensure we have unbuffered output */
+ setvbuf(stdout, NULL, _IOLBF, 0);
+
+ /* The parent will communicate with us via signals */
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_sigaction = handle_exit_signal;
+ sa.sa_flags = SA_RESTART | SA_SIGINFO;
+ sigemptyset(&sa.sa_mask);
+ ret = sigaction(SIGTERM, &sa, NULL);
+ if (ret < 0)
+ printf("Failed to install SIGTERM handler: %s (%d)\n",
+ strerror(errno), errno);
+
+ sa.sa_sigaction = handle_kick_signal;
+ ret = sigaction(SIGUSR2, &sa, NULL);
+ if (ret < 0)
+ printf("Failed to install SIGUSR2 handler: %s (%d)\n",
+ strerror(errno), errno);
+
+ data = malloc(DATA_SIZE);
+ if (!data) {
+ printf("Failed to allocate data buffer\n");
+ return EXIT_FAILURE;
+ }
+ memset(data, 0, DATA_SIZE);
+
+ data_iov.iov_base = data;
+ data_iov.iov_len = DATA_SIZE;
+
+ /*
+ * If we can't create a socket assume it's a lack of system
+ * support and fall back to a basic FPSIMD test for the
+ * benefit of fp-stress.
+ */
+ if (!create_socket()) {
+ execl("./fpsimd-test", "./fpsimd-test", NULL);
+ printf("Failed to fall back to fspimd-test: %d (%s)\n",
+ errno, strerror(errno));
+ return EXIT_FAILURE;
+ }
+
+ /*
+ * Compute a reference digest we hope is repeatable, we do
+ * this at runtime partly to make it easier to play with
+ * parameters.
+ */
+ if (!compute_digest(ref)) {
+ printf("Failed to compute reference digest\n");
+ return EXIT_FAILURE;
+ }
+
+ printf("AF_ALG using %s\n", alg_name);
+
+ while (true) {
+ if (!compute_digest(digest)) {
+ printf("Failed to compute digest, iter=%d\n", iter);
+ return EXIT_FAILURE;
+ }
+
+ if (memcmp(ref, digest, digest_len) != 0) {
+ printf("Digest mismatch, iter=%d\n", iter);
+ return EXIT_FAILURE;
+ }
+
+ iter++;
+ }
+
+ return EXIT_FAILURE;
+}
diff --git a/tools/testing/selftests/arm64/tags/Makefile b/tools/testing/selftests/arm64/tags/Makefile
index 6d29cfde43a2..0a77f35295fb 100644
--- a/tools/testing/selftests/arm64/tags/Makefile
+++ b/tools/testing/selftests/arm64/tags/Makefile
@@ -2,6 +2,5 @@
CFLAGS += $(KHDR_INCLUDES)
TEST_GEN_PROGS := tags_test
-TEST_PROGS := run_tags_test.sh
include ../../lib.mk
diff --git a/tools/testing/selftests/arm64/tags/run_tags_test.sh b/tools/testing/selftests/arm64/tags/run_tags_test.sh
deleted file mode 100755
index 745f11379930..000000000000
--- a/tools/testing/selftests/arm64/tags/run_tags_test.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-
-echo "--------------------"
-echo "running tags test"
-echo "--------------------"
-./tags_test
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
-else
- echo "[PASS]"
-fi
diff --git a/tools/testing/selftests/arm64/tags/tags_test.c b/tools/testing/selftests/arm64/tags/tags_test.c
index 955f87c1170d..8ae26e496c89 100644
--- a/tools/testing/selftests/arm64/tags/tags_test.c
+++ b/tools/testing/selftests/arm64/tags/tags_test.c
@@ -17,19 +17,21 @@ int main(void)
static int tbi_enabled = 0;
unsigned long tag = 0;
struct utsname *ptr;
- int err;
+
+ ksft_print_header();
+ ksft_set_plan(1);
if (prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE, 0, 0, 0) == 0)
tbi_enabled = 1;
ptr = (struct utsname *)malloc(sizeof(*ptr));
if (!ptr)
- ksft_exit_fail_msg("Failed to allocate utsname buffer\n");
+ ksft_exit_fail_perror("Failed to allocate utsname buffer");
if (tbi_enabled)
tag = 0x42;
ptr = (struct utsname *)SET_TAG(ptr, tag);
- err = uname(ptr);
+ ksft_test_result(!uname(ptr), "Syscall successful with tagged address\n");
free(ptr);
- return err;
+ ksft_finished();
}
diff --git a/tools/testing/selftests/bpf/DENYLIST.aarch64 b/tools/testing/selftests/bpf/DENYLIST.aarch64
index 0445ac38bc07..3c7c3e79aa93 100644
--- a/tools/testing/selftests/bpf/DENYLIST.aarch64
+++ b/tools/testing/selftests/bpf/DENYLIST.aarch64
@@ -6,6 +6,7 @@ kprobe_multi_test # needs CONFIG_FPROBE
module_attach # prog 'kprobe_multi': failed to auto-attach: -95
fentry_test/fentry_many_args # fentry_many_args:FAIL:fentry_many_args_attach unexpected error: -524
fexit_test/fexit_many_args # fexit_many_args:FAIL:fexit_many_args_attach unexpected error: -524
+tracing_struct/struct_many_args # struct_many_args:FAIL:tracing_struct_many_args__attach unexpected error: -524
fill_link_info/kprobe_multi_link_info # bpf_program__attach_kprobe_multi_opts unexpected error: -95
fill_link_info/kretprobe_multi_link_info # bpf_program__attach_kprobe_multi_opts unexpected error: -95
fill_link_info/kprobe_multi_invalid_ubuff # bpf_program__attach_kprobe_multi_opts unexpected error: -95
diff --git a/tools/testing/selftests/bpf/DENYLIST.s390x b/tools/testing/selftests/bpf/DENYLIST.s390x
index c34adf39eeb2..3ebd77206f98 100644
--- a/tools/testing/selftests/bpf/DENYLIST.s390x
+++ b/tools/testing/selftests/bpf/DENYLIST.s390x
@@ -1,9 +1,5 @@
# TEMPORARY
# Alphabetical order
-exceptions # JIT does not support calling kfunc bpf_throw (exceptions)
get_stack_raw_tp # user_stack corrupted user stack (no backchain userspace)
stacktrace_build_id # compare_map_keys stackid_hmap vs. stackmap err -2 errno 2 (?)
verifier_iterating_callbacks
-verifier_arena # JIT does not support arena
-arena_htab # JIT does not support arena
-arena_atomics
diff --git a/tools/testing/selftests/bpf/bpf_arena_common.h b/tools/testing/selftests/bpf/bpf_arena_common.h
index 567491f3e1b5..68a51dcc0669 100644
--- a/tools/testing/selftests/bpf/bpf_arena_common.h
+++ b/tools/testing/selftests/bpf/bpf_arena_common.h
@@ -34,10 +34,12 @@
#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) && !defined(BPF_ARENA_FORCE_ASM)
#define __arena __attribute__((address_space(1)))
+#define __arena_global __attribute__((address_space(1)))
#define cast_kern(ptr) /* nop for bpf prog. emitted by LLVM */
#define cast_user(ptr) /* nop for bpf prog. emitted by LLVM */
#else
#define __arena
+#define __arena_global SEC(".addr_space.1")
#define cast_kern(ptr) bpf_addr_space_cast(ptr, 0, 1)
#define cast_user(ptr) bpf_addr_space_cast(ptr, 1, 0)
#endif
diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h
index 3d9e4b8c6b81..828556cdc2f0 100644
--- a/tools/testing/selftests/bpf/bpf_experimental.h
+++ b/tools/testing/selftests/bpf/bpf_experimental.h
@@ -163,7 +163,7 @@ struct bpf_iter_task_vma;
extern int bpf_iter_task_vma_new(struct bpf_iter_task_vma *it,
struct task_struct *task,
- unsigned long addr) __ksym;
+ __u64 addr) __ksym;
extern struct vm_area_struct *bpf_iter_task_vma_next(struct bpf_iter_task_vma *it) __ksym;
extern void bpf_iter_task_vma_destroy(struct bpf_iter_task_vma *it) __ksym;
@@ -351,6 +351,7 @@ l_true: \
l_continue:; \
})
#else
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define can_loop \
({ __label__ l_break, l_continue; \
bool ret = true; \
@@ -376,6 +377,33 @@ l_true: \
l_break: break; \
l_continue:; \
})
+#else
+#define can_loop \
+ ({ __label__ l_break, l_continue; \
+ bool ret = true; \
+ asm volatile goto("1:.byte 0xe5; \
+ .byte 0; \
+ .long (((%l[l_break] - 1b - 8) / 8) & 0xffff) << 16; \
+ .short 0" \
+ :::: l_break); \
+ goto l_continue; \
+ l_break: ret = false; \
+ l_continue:; \
+ ret; \
+ })
+
+#define cond_break \
+ ({ __label__ l_break, l_continue; \
+ asm volatile goto("1:.byte 0xe5; \
+ .byte 0; \
+ .long (((%l[l_break] - 1b - 8) / 8) & 0xffff) << 16; \
+ .short 0" \
+ :::: l_break); \
+ goto l_continue; \
+ l_break: break; \
+ l_continue:; \
+ })
+#endif
#endif
#ifndef bpf_nop_mov
@@ -524,7 +552,7 @@ extern void bpf_iter_css_destroy(struct bpf_iter_css *it) __weak __ksym;
extern int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags) __weak __ksym;
extern int bpf_wq_start(struct bpf_wq *wq, unsigned int flags) __weak __ksym;
extern int bpf_wq_set_callback_impl(struct bpf_wq *wq,
- int (callback_fn)(void *map, int *key, struct bpf_wq *wq),
+ int (callback_fn)(void *map, int *key, void *value),
unsigned int flags__k, void *aux__ign) __ksym;
#define bpf_wq_set_callback(timer, cb, flags) \
bpf_wq_set_callback_impl(timer, cb, flags, NULL)
diff --git a/tools/testing/selftests/bpf/bpf_kfuncs.h b/tools/testing/selftests/bpf/bpf_kfuncs.h
index be91a6919315..3b6675ab4086 100644
--- a/tools/testing/selftests/bpf/bpf_kfuncs.h
+++ b/tools/testing/selftests/bpf/bpf_kfuncs.h
@@ -77,5 +77,5 @@ extern int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_ptr,
struct bpf_key *trusted_keyring) __ksym;
extern bool bpf_session_is_return(void) __ksym __weak;
-extern long *bpf_session_cookie(void) __ksym __weak;
+extern __u64 *bpf_session_cookie(void) __ksym __weak;
#endif
diff --git a/tools/testing/selftests/bpf/bpf_test_no_cfi/bpf_test_no_cfi.c b/tools/testing/selftests/bpf/bpf_test_no_cfi/bpf_test_no_cfi.c
index b1dd889d5d7d..948eb3962732 100644
--- a/tools/testing/selftests/bpf/bpf_test_no_cfi/bpf_test_no_cfi.c
+++ b/tools/testing/selftests/bpf/bpf_test_no_cfi/bpf_test_no_cfi.c
@@ -22,12 +22,12 @@ static int dummy_init_member(const struct btf_type *t,
return 0;
}
-static int dummy_reg(void *kdata)
+static int dummy_reg(void *kdata, struct bpf_link *link)
{
return 0;
}
-static void dummy_unreg(void *kdata)
+static void dummy_unreg(void *kdata, struct bpf_link *link)
{
}
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
index 2a18bd320e92..fd28c1157bd3 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
@@ -18,6 +18,7 @@
#include <linux/in6.h>
#include <linux/un.h>
#include <net/sock.h>
+#include <linux/namei.h>
#include "bpf_testmod.h"
#include "bpf_testmod_kfunc.h"
@@ -53,6 +54,13 @@ struct bpf_testmod_struct_arg_4 {
int b;
};
+struct bpf_testmod_struct_arg_5 {
+ char a;
+ short b;
+ int c;
+ long d;
+};
+
__bpf_hook_start();
noinline int
@@ -111,6 +119,15 @@ bpf_testmod_test_struct_arg_8(u64 a, void *b, short c, int d, void *e,
}
noinline int
+bpf_testmod_test_struct_arg_9(u64 a, void *b, short c, int d, void *e, char f,
+ short g, struct bpf_testmod_struct_arg_5 h, long i)
+{
+ bpf_testmod_test_struct_arg_result = a + (long)b + c + d + (long)e +
+ f + g + h.a + h.b + h.c + h.d + i;
+ return bpf_testmod_test_struct_arg_result;
+}
+
+noinline int
bpf_testmod_test_arg_ptr_to_struct(struct bpf_testmod_struct_arg_1 *a) {
bpf_testmod_test_struct_arg_result = a->a;
return bpf_testmod_test_struct_arg_result;
@@ -154,6 +171,42 @@ __bpf_kfunc void bpf_kfunc_common_test(void)
{
}
+__bpf_kfunc void bpf_kfunc_dynptr_test(struct bpf_dynptr *ptr,
+ struct bpf_dynptr *ptr__nullable)
+{
+}
+
+__bpf_kfunc struct bpf_testmod_ctx *
+bpf_testmod_ctx_create(int *err)
+{
+ struct bpf_testmod_ctx *ctx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
+ if (!ctx) {
+ *err = -ENOMEM;
+ return NULL;
+ }
+ refcount_set(&ctx->usage, 1);
+
+ return ctx;
+}
+
+static void testmod_free_cb(struct rcu_head *head)
+{
+ struct bpf_testmod_ctx *ctx;
+
+ ctx = container_of(head, struct bpf_testmod_ctx, rcu);
+ kfree(ctx);
+}
+
+__bpf_kfunc void bpf_testmod_ctx_release(struct bpf_testmod_ctx *ctx)
+{
+ if (!ctx)
+ return;
+ if (refcount_dec_and_test(&ctx->usage))
+ call_rcu(&ctx->rcu, testmod_free_cb);
+}
+
struct bpf_testmod_btf_type_tag_1 {
int a;
};
@@ -269,6 +322,7 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj,
struct bpf_testmod_struct_arg_2 struct_arg2 = {2, 3};
struct bpf_testmod_struct_arg_3 *struct_arg3;
struct bpf_testmod_struct_arg_4 struct_arg4 = {21, 22};
+ struct bpf_testmod_struct_arg_5 struct_arg5 = {23, 24, 25, 26};
int i = 1;
while (bpf_testmod_return_ptr(i))
@@ -283,6 +337,8 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj,
(void *)20, struct_arg4);
(void)bpf_testmod_test_struct_arg_8(16, (void *)17, 18, 19,
(void *)20, struct_arg4, 23);
+ (void)bpf_testmod_test_struct_arg_9(16, (void *)17, 18, 19, (void *)20,
+ 21, 22, struct_arg5, 27);
(void)bpf_testmod_test_arg_ptr_to_struct(&struct_arg1_2);
@@ -358,13 +414,133 @@ static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
.write = bpf_testmod_test_write,
};
+/* bpf_testmod_uprobe sysfs attribute is so far enabled for x86_64 only,
+ * please see test_uretprobe_regs_change test
+ */
+#ifdef __x86_64__
+
+static int
+uprobe_ret_handler(struct uprobe_consumer *self, unsigned long func,
+ struct pt_regs *regs)
+
+{
+ regs->ax = 0x12345678deadbeef;
+ regs->cx = 0x87654321feebdaed;
+ regs->r11 = (u64) -1;
+ return true;
+}
+
+struct testmod_uprobe {
+ struct path path;
+ loff_t offset;
+ struct uprobe_consumer consumer;
+};
+
+static DEFINE_MUTEX(testmod_uprobe_mutex);
+
+static struct testmod_uprobe uprobe = {
+ .consumer.ret_handler = uprobe_ret_handler,
+};
+
+static int testmod_register_uprobe(loff_t offset)
+{
+ int err = -EBUSY;
+
+ if (uprobe.offset)
+ return -EBUSY;
+
+ mutex_lock(&testmod_uprobe_mutex);
+
+ if (uprobe.offset)
+ goto out;
+
+ err = kern_path("/proc/self/exe", LOOKUP_FOLLOW, &uprobe.path);
+ if (err)
+ goto out;
+
+ err = uprobe_register_refctr(d_real_inode(uprobe.path.dentry),
+ offset, 0, &uprobe.consumer);
+ if (err)
+ path_put(&uprobe.path);
+ else
+ uprobe.offset = offset;
+
+out:
+ mutex_unlock(&testmod_uprobe_mutex);
+ return err;
+}
+
+static void testmod_unregister_uprobe(void)
+{
+ mutex_lock(&testmod_uprobe_mutex);
+
+ if (uprobe.offset) {
+ uprobe_unregister(d_real_inode(uprobe.path.dentry),
+ uprobe.offset, &uprobe.consumer);
+ uprobe.offset = 0;
+ }
+
+ mutex_unlock(&testmod_uprobe_mutex);
+}
+
+static ssize_t
+bpf_testmod_uprobe_write(struct file *file, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t len)
+{
+ unsigned long offset = 0;
+ int err = 0;
+
+ if (kstrtoul(buf, 0, &offset))
+ return -EINVAL;
+
+ if (offset)
+ err = testmod_register_uprobe(offset);
+ else
+ testmod_unregister_uprobe();
+
+ return err ?: strlen(buf);
+}
+
+static struct bin_attribute bin_attr_bpf_testmod_uprobe_file __ro_after_init = {
+ .attr = { .name = "bpf_testmod_uprobe", .mode = 0666, },
+ .write = bpf_testmod_uprobe_write,
+};
+
+static int register_bpf_testmod_uprobe(void)
+{
+ return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_uprobe_file);
+}
+
+static void unregister_bpf_testmod_uprobe(void)
+{
+ testmod_unregister_uprobe();
+ sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_uprobe_file);
+}
+
+#else
+static int register_bpf_testmod_uprobe(void)
+{
+ return 0;
+}
+
+static void unregister_bpf_testmod_uprobe(void) { }
+#endif
+
BTF_KFUNCS_START(bpf_testmod_common_kfunc_ids)
BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW)
BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY)
BTF_ID_FLAGS(func, bpf_kfunc_common_test)
+BTF_ID_FLAGS(func, bpf_kfunc_dynptr_test)
+BTF_ID_FLAGS(func, bpf_testmod_ctx_create, KF_ACQUIRE | KF_RET_NULL)
+BTF_ID_FLAGS(func, bpf_testmod_ctx_release, KF_RELEASE)
BTF_KFUNCS_END(bpf_testmod_common_kfunc_ids)
+BTF_ID_LIST(bpf_testmod_dtor_ids)
+BTF_ID(struct, bpf_testmod_ctx)
+BTF_ID(func, bpf_testmod_ctx_release)
+
static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = {
.owner = THIS_MODULE,
.set = &bpf_testmod_common_kfunc_ids,
@@ -820,7 +996,7 @@ static const struct bpf_verifier_ops bpf_testmod_verifier_ops = {
.is_valid_access = bpf_testmod_ops_is_valid_access,
};
-static int bpf_dummy_reg(void *kdata)
+static int bpf_dummy_reg(void *kdata, struct bpf_link *link)
{
struct bpf_testmod_ops *ops = kdata;
@@ -835,7 +1011,7 @@ static int bpf_dummy_reg(void *kdata)
return 0;
}
-static void bpf_dummy_unreg(void *kdata)
+static void bpf_dummy_unreg(void *kdata, struct bpf_link *link)
{
}
@@ -871,7 +1047,7 @@ struct bpf_struct_ops bpf_bpf_testmod_ops = {
.owner = THIS_MODULE,
};
-static int bpf_dummy_reg2(void *kdata)
+static int bpf_dummy_reg2(void *kdata, struct bpf_link *link)
{
struct bpf_testmod_ops2 *ops = kdata;
@@ -898,6 +1074,12 @@ extern int bpf_fentry_test1(int a);
static int bpf_testmod_init(void)
{
+ const struct btf_id_dtor_kfunc bpf_testmod_dtors[] = {
+ {
+ .btf_id = bpf_testmod_dtor_ids[0],
+ .kfunc_btf_id = bpf_testmod_dtor_ids[1]
+ },
+ };
int ret;
ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_testmod_common_kfunc_set);
@@ -906,13 +1088,22 @@ static int bpf_testmod_init(void)
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set);
ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops);
ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops2, bpf_testmod_ops2);
+ ret = ret ?: register_btf_id_dtor_kfuncs(bpf_testmod_dtors,
+ ARRAY_SIZE(bpf_testmod_dtors),
+ THIS_MODULE);
if (ret < 0)
return ret;
if (bpf_fentry_test1(0) < 0)
return -EINVAL;
sock = NULL;
mutex_init(&sock_lock);
- return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
+ ret = sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
+ if (ret < 0)
+ return ret;
+ ret = register_bpf_testmod_uprobe();
+ if (ret < 0)
+ return ret;
+ return 0;
}
static void bpf_testmod_exit(void)
@@ -927,6 +1118,7 @@ static void bpf_testmod_exit(void)
bpf_kfunc_close_sock();
sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
+ unregister_bpf_testmod_uprobe();
}
module_init(bpf_testmod_init);
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h
index b0d586a6751f..e587a79f2239 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h
@@ -80,6 +80,11 @@ struct sendmsg_args {
int msglen;
};
+struct bpf_testmod_ctx {
+ struct callback_head rcu;
+ refcount_t usage;
+};
+
struct prog_test_ref_kfunc *
bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr) __ksym;
void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym;
@@ -134,4 +139,9 @@ int bpf_kfunc_call_sock_sendmsg(struct sendmsg_args *args) __ksym;
int bpf_kfunc_call_kernel_getsockname(struct addr_args *args) __ksym;
int bpf_kfunc_call_kernel_getpeername(struct addr_args *args) __ksym;
+void bpf_kfunc_dynptr_test(struct bpf_dynptr *ptr, struct bpf_dynptr *ptr__nullable) __ksym;
+
+struct bpf_testmod_ctx *bpf_testmod_ctx_create(int *err) __ksym;
+void bpf_testmod_ctx_release(struct bpf_testmod_ctx *ctx) __ksym;
+
#endif /* _BPF_TESTMOD_KFUNC_H */
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index eeabd798bc3a..4ca84c8d9116 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -58,9 +58,12 @@ CONFIG_MPLS=y
CONFIG_MPLS_IPTUNNEL=y
CONFIG_MPLS_ROUTING=y
CONFIG_MPTCP=y
+CONFIG_NET_ACT_SKBMOD=y
+CONFIG_NET_CLS=y
CONFIG_NET_CLS_ACT=y
CONFIG_NET_CLS_BPF=y
CONFIG_NET_CLS_FLOWER=y
+CONFIG_NET_CLS_MATCHALL=y
CONFIG_NET_FOU=y
CONFIG_NET_FOU_IP_TUNNELS=y
CONFIG_NET_IPGRE=y
@@ -80,8 +83,22 @@ CONFIG_NETFILTER_XT_TARGET_CT=y
CONFIG_NETKIT=y
CONFIG_NF_CONNTRACK=y
CONFIG_NF_CONNTRACK_MARK=y
+CONFIG_NF_CONNTRACK_ZONES=y
CONFIG_NF_DEFRAG_IPV4=y
CONFIG_NF_DEFRAG_IPV6=y
+CONFIG_NF_TABLES=y
+CONFIG_NF_TABLES_INET=y
+CONFIG_NF_TABLES_NETDEV=y
+CONFIG_NF_TABLES_IPV4=y
+CONFIG_NF_TABLES_IPV6=y
+CONFIG_NETFILTER_INGRESS=y
+CONFIG_NF_FLOW_TABLE=y
+CONFIG_NF_FLOW_TABLE_INET=y
+CONFIG_NETFILTER_NETLINK=y
+CONFIG_NFT_FLOW_OFFLOAD=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_FILTER=y
CONFIG_NF_NAT=y
CONFIG_RC_CORE=y
CONFIG_SECURITY=y
diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c
index 35250e6cde7f..e0cba4178e41 100644
--- a/tools/testing/selftests/bpf/network_helpers.c
+++ b/tools/testing/selftests/bpf/network_helpers.c
@@ -94,7 +94,8 @@ static int __start_server(int type, const struct sockaddr *addr, socklen_t addrl
if (settimeo(fd, opts->timeout_ms))
goto error_close;
- if (opts->post_socket_cb && opts->post_socket_cb(fd, NULL)) {
+ if (opts->post_socket_cb &&
+ opts->post_socket_cb(fd, opts->cb_opts)) {
log_err("Failed to call post_socket_cb");
goto error_close;
}
@@ -105,7 +106,7 @@ static int __start_server(int type, const struct sockaddr *addr, socklen_t addrl
}
if (type == SOCK_STREAM) {
- if (listen(fd, 1) < 0) {
+ if (listen(fd, opts->backlog ? MAX(opts->backlog, 0) : 1) < 0) {
log_err("Failed to listed on socket");
goto error_close;
}
@@ -118,22 +119,32 @@ error_close:
return -1;
}
-int start_server(int family, int type, const char *addr_str, __u16 port,
- int timeout_ms)
+int start_server_str(int family, int type, const char *addr_str, __u16 port,
+ const struct network_helper_opts *opts)
{
- struct network_helper_opts opts = {
- .timeout_ms = timeout_ms,
- };
struct sockaddr_storage addr;
socklen_t addrlen;
+ if (!opts)
+ opts = &default_opts;
+
if (make_sockaddr(family, addr_str, port, &addr, &addrlen))
return -1;
- return __start_server(type, (struct sockaddr *)&addr, addrlen, &opts);
+ return __start_server(type, (struct sockaddr *)&addr, addrlen, opts);
}
-static int reuseport_cb(int fd, const struct post_socket_opts *opts)
+int start_server(int family, int type, const char *addr_str, __u16 port,
+ int timeout_ms)
+{
+ struct network_helper_opts opts = {
+ .timeout_ms = timeout_ms,
+ };
+
+ return start_server_str(family, type, addr_str, port, &opts);
+}
+
+static int reuseport_cb(int fd, void *opts)
{
int on = 1;
@@ -238,6 +249,34 @@ error_close:
return -1;
}
+int client_socket(int family, int type,
+ const struct network_helper_opts *opts)
+{
+ int fd;
+
+ if (!opts)
+ opts = &default_opts;
+
+ fd = socket(family, type, opts->proto);
+ if (fd < 0) {
+ log_err("Failed to create client socket");
+ return -1;
+ }
+
+ if (settimeo(fd, opts->timeout_ms))
+ goto error_close;
+
+ if (opts->post_socket_cb &&
+ opts->post_socket_cb(fd, opts->cb_opts))
+ goto error_close;
+
+ return fd;
+
+error_close:
+ save_errno_close(fd);
+ return -1;
+}
+
static int connect_fd_to_addr(int fd,
const struct sockaddr_storage *addr,
socklen_t addrlen, const bool must_fail)
@@ -273,15 +312,12 @@ int connect_to_addr(int type, const struct sockaddr_storage *addr, socklen_t add
if (!opts)
opts = &default_opts;
- fd = socket(addr->ss_family, type, opts->proto);
+ fd = client_socket(addr->ss_family, type, opts);
if (fd < 0) {
log_err("Failed to create client socket");
return -1;
}
- if (settimeo(fd, opts->timeout_ms))
- goto error_close;
-
if (connect_fd_to_addr(fd, addr, addrlen, opts->must_fail))
goto error_close;
@@ -292,66 +328,21 @@ error_close:
return -1;
}
-int connect_to_fd_opts(int server_fd, const struct network_helper_opts *opts)
+int connect_to_fd_opts(int server_fd, int type, const struct network_helper_opts *opts)
{
struct sockaddr_storage addr;
- struct sockaddr_in *addr_in;
- socklen_t addrlen, optlen;
- int fd, type, protocol;
+ socklen_t addrlen;
if (!opts)
opts = &default_opts;
- optlen = sizeof(type);
-
- if (opts->type) {
- type = opts->type;
- } else {
- if (getsockopt(server_fd, SOL_SOCKET, SO_TYPE, &type, &optlen)) {
- log_err("getsockopt(SOL_TYPE)");
- return -1;
- }
- }
-
- if (opts->proto) {
- protocol = opts->proto;
- } else {
- if (getsockopt(server_fd, SOL_SOCKET, SO_PROTOCOL, &protocol, &optlen)) {
- log_err("getsockopt(SOL_PROTOCOL)");
- return -1;
- }
- }
-
addrlen = sizeof(addr);
if (getsockname(server_fd, (struct sockaddr *)&addr, &addrlen)) {
log_err("Failed to get server addr");
return -1;
}
- addr_in = (struct sockaddr_in *)&addr;
- fd = socket(addr_in->sin_family, type, protocol);
- if (fd < 0) {
- log_err("Failed to create client socket");
- return -1;
- }
-
- if (settimeo(fd, opts->timeout_ms))
- goto error_close;
-
- if (opts->cc && opts->cc[0] &&
- setsockopt(fd, SOL_TCP, TCP_CONGESTION, opts->cc,
- strlen(opts->cc) + 1))
- goto error_close;
-
- if (!opts->noconnect)
- if (connect_fd_to_addr(fd, &addr, addrlen, opts->must_fail))
- goto error_close;
-
- return fd;
-
-error_close:
- save_errno_close(fd);
- return -1;
+ return connect_to_addr(type, &addr, addrlen, opts);
}
int connect_to_fd(int server_fd, int timeout_ms)
@@ -359,8 +350,23 @@ int connect_to_fd(int server_fd, int timeout_ms)
struct network_helper_opts opts = {
.timeout_ms = timeout_ms,
};
+ int type, protocol;
+ socklen_t optlen;
+
+ optlen = sizeof(type);
+ if (getsockopt(server_fd, SOL_SOCKET, SO_TYPE, &type, &optlen)) {
+ log_err("getsockopt(SOL_TYPE)");
+ return -1;
+ }
+
+ optlen = sizeof(protocol);
+ if (getsockopt(server_fd, SOL_SOCKET, SO_PROTOCOL, &protocol, &optlen)) {
+ log_err("getsockopt(SOL_PROTOCOL)");
+ return -1;
+ }
+ opts.proto = protocol;
- return connect_to_fd_opts(server_fd, &opts);
+ return connect_to_fd_opts(server_fd, type, &opts);
}
int connect_fd_to_fd(int client_fd, int server_fd, int timeout_ms)
diff --git a/tools/testing/selftests/bpf/network_helpers.h b/tools/testing/selftests/bpf/network_helpers.h
index 883c7ea9d8d5..aac5b94d6379 100644
--- a/tools/testing/selftests/bpf/network_helpers.h
+++ b/tools/testing/selftests/bpf/network_helpers.h
@@ -21,16 +21,22 @@ typedef __u16 __sum16;
#define VIP_NUM 5
#define MAGIC_BYTES 123
-struct post_socket_opts {};
-
struct network_helper_opts {
- const char *cc;
int timeout_ms;
bool must_fail;
- bool noconnect;
- int type;
int proto;
- int (*post_socket_cb)(int fd, const struct post_socket_opts *opts);
+ /* +ve: Passed to listen() as-is.
+ * 0: Default when the test does not set
+ * a particular value during the struct init.
+ * It is changed to 1 before passing to listen().
+ * Most tests only have one on-going connection.
+ * -ve: It is changed to 0 before passing to listen().
+ * It is useful to force syncookie without
+ * changing the "tcp_syncookies" sysctl from 1 to 2.
+ */
+ int backlog;
+ int (*post_socket_cb)(int fd, void *opts);
+ void *cb_opts;
};
/* ipv4 test vector */
@@ -50,6 +56,8 @@ struct ipv6_packet {
extern struct ipv6_packet pkt_v6;
int settimeo(int fd, int timeout_ms);
+int start_server_str(int family, int type, const char *addr_str, __u16 port,
+ const struct network_helper_opts *opts);
int start_server(int family, int type, const char *addr, __u16 port,
int timeout_ms);
int *start_reuseport_server(int family, int type, const char *addr_str,
@@ -58,10 +66,12 @@ int *start_reuseport_server(int family, int type, const char *addr_str,
int start_server_addr(int type, const struct sockaddr_storage *addr, socklen_t len,
const struct network_helper_opts *opts);
void free_fds(int *fds, unsigned int nr_close_fds);
+int client_socket(int family, int type,
+ const struct network_helper_opts *opts);
int connect_to_addr(int type, const struct sockaddr_storage *addr, socklen_t len,
const struct network_helper_opts *opts);
int connect_to_fd(int server_fd, int timeout_ms);
-int connect_to_fd_opts(int server_fd, const struct network_helper_opts *opts);
+int connect_to_fd_opts(int server_fd, int type, const struct network_helper_opts *opts);
int connect_fd_to_fd(int client_fd, int server_fd, int timeout_ms);
int fastopen_connect(int server_fd, const char *data, unsigned int data_len,
int timeout_ms);
diff --git a/tools/testing/selftests/bpf/prog_tests/arena_atomics.c b/tools/testing/selftests/bpf/prog_tests/arena_atomics.c
index 0807a48a58ee..26e7c06c6cb4 100644
--- a/tools/testing/selftests/bpf/prog_tests/arena_atomics.c
+++ b/tools/testing/selftests/bpf/prog_tests/arena_atomics.c
@@ -146,6 +146,22 @@ static void test_xchg(struct arena_atomics *skel)
ASSERT_EQ(skel->arena->xchg32_result, 1, "xchg32_result");
}
+static void test_uaf(struct arena_atomics *skel)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ int err, prog_fd;
+
+ /* No need to attach it, just run it directly */
+ prog_fd = bpf_program__fd(skel->progs.uaf);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ if (!ASSERT_OK(err, "test_run_opts err"))
+ return;
+ if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
+ return;
+
+ ASSERT_EQ(skel->arena->uaf_recovery_fails, 0, "uaf_recovery_fails");
+}
+
void test_arena_atomics(void)
{
struct arena_atomics *skel;
@@ -180,6 +196,8 @@ void test_arena_atomics(void)
test_cmpxchg(skel);
if (test__start_subtest("xchg"))
test_xchg(skel);
+ if (test__start_subtest("uaf"))
+ test_uaf(skel);
cleanup:
arena_atomics__destroy(skel);
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
index 4407ea428e77..070c52c312e5 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
@@ -451,7 +451,7 @@ static void pe_subtest(struct test_bpf_cookie *skel)
attr.type = PERF_TYPE_SOFTWARE;
attr.config = PERF_COUNT_SW_CPU_CLOCK;
attr.freq = 1;
- attr.sample_freq = 1000;
+ attr.sample_freq = 10000;
pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
if (!ASSERT_GE(pfd, 0, "perf_fd"))
goto cleanup;
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c
index b30ff6b3b81a..a4a1f93878d4 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c
@@ -104,6 +104,7 @@ static void test_bpf_nf_ct(int mode)
ASSERT_EQ(skel->bss->test_einval_bpf_tuple, -EINVAL, "Test EINVAL for NULL bpf_tuple");
ASSERT_EQ(skel->bss->test_einval_reserved, -EINVAL, "Test EINVAL for reserved not set to 0");
+ ASSERT_EQ(skel->bss->test_einval_reserved_new, -EINVAL, "Test EINVAL for reserved in new struct not set to 0");
ASSERT_EQ(skel->bss->test_einval_netns_id, -EINVAL, "Test EINVAL for netns_id < -1");
ASSERT_EQ(skel->bss->test_einval_len_opts, -EINVAL, "Test EINVAL for len__opts != NF_BPF_CT_OPTS_SZ");
ASSERT_EQ(skel->bss->test_eproto_l4proto, -EPROTO, "Test EPROTO for l4proto != TCP or UDP");
@@ -122,6 +123,12 @@ static void test_bpf_nf_ct(int mode)
ASSERT_EQ(skel->bss->test_exist_lookup_mark, 43, "Test existing connection lookup ctmark");
ASSERT_EQ(skel->data->test_snat_addr, 0, "Test for source natting");
ASSERT_EQ(skel->data->test_dnat_addr, 0, "Test for destination natting");
+ ASSERT_EQ(skel->data->test_ct_zone_id_alloc_entry, 0, "Test for alloc new entry in specified ct zone");
+ ASSERT_EQ(skel->data->test_ct_zone_id_insert_entry, 0, "Test for insert new entry in specified ct zone");
+ ASSERT_EQ(skel->data->test_ct_zone_id_succ_lookup, 0, "Test for successful lookup in specified ct_zone");
+ ASSERT_EQ(skel->bss->test_ct_zone_dir_enoent_lookup, -ENOENT, "Test ENOENT for lookup with wrong ct zone dir");
+ ASSERT_EQ(skel->bss->test_ct_zone_id_enoent_lookup, -ENOENT, "Test ENOENT for lookup in wrong ct zone");
+
end:
if (client_fd != -1)
close(client_fd);
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
index 0aca02532794..63422f4f3896 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
@@ -23,6 +23,11 @@
static const unsigned int total_bytes = 10 * 1024 * 1024;
static int expected_stg = 0xeB9F;
+struct cb_opts {
+ const char *cc;
+ int map_fd;
+};
+
static int settcpca(int fd, const char *tcp_ca)
{
int err;
@@ -34,55 +39,66 @@ static int settcpca(int fd, const char *tcp_ca)
return 0;
}
-static void do_test(const char *tcp_ca, const struct bpf_map *sk_stg_map)
+static bool start_test(char *addr_str,
+ const struct network_helper_opts *srv_opts,
+ const struct network_helper_opts *cli_opts,
+ int *srv_fd, int *cli_fd)
{
- int lfd = -1, fd = -1;
- int err;
+ *srv_fd = start_server_str(AF_INET6, SOCK_STREAM, addr_str, 0, srv_opts);
+ if (!ASSERT_NEQ(*srv_fd, -1, "start_server_str"))
+ goto err;
- lfd = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0);
- if (!ASSERT_NEQ(lfd, -1, "socket"))
- return;
-
- fd = socket(AF_INET6, SOCK_STREAM, 0);
- if (!ASSERT_NEQ(fd, -1, "socket")) {
- close(lfd);
- return;
- }
+ /* connect to server */
+ *cli_fd = connect_to_fd_opts(*srv_fd, SOCK_STREAM, cli_opts);
+ if (!ASSERT_NEQ(*cli_fd, -1, "connect_to_fd_opts"))
+ goto err;
- if (settcpca(lfd, tcp_ca) || settcpca(fd, tcp_ca))
- goto done;
+ return true;
- if (sk_stg_map) {
- err = bpf_map_update_elem(bpf_map__fd(sk_stg_map), &fd,
- &expected_stg, BPF_NOEXIST);
- if (!ASSERT_OK(err, "bpf_map_update_elem(sk_stg_map)"))
- goto done;
+err:
+ if (*srv_fd != -1) {
+ close(*srv_fd);
+ *srv_fd = -1;
}
+ if (*cli_fd != -1) {
+ close(*cli_fd);
+ *cli_fd = -1;
+ }
+ return false;
+}
- /* connect to server */
- err = connect_fd_to_fd(fd, lfd, 0);
- if (!ASSERT_NEQ(err, -1, "connect"))
- goto done;
-
- if (sk_stg_map) {
- int tmp_stg;
+static void do_test(const struct network_helper_opts *opts)
+{
+ int lfd = -1, fd = -1;
- err = bpf_map_lookup_elem(bpf_map__fd(sk_stg_map), &fd,
- &tmp_stg);
- if (!ASSERT_ERR(err, "bpf_map_lookup_elem(sk_stg_map)") ||
- !ASSERT_EQ(errno, ENOENT, "bpf_map_lookup_elem(sk_stg_map)"))
- goto done;
- }
+ if (!start_test(NULL, opts, opts, &lfd, &fd))
+ goto done;
ASSERT_OK(send_recv_data(lfd, fd, total_bytes), "send_recv_data");
done:
- close(lfd);
- close(fd);
+ if (lfd != -1)
+ close(lfd);
+ if (fd != -1)
+ close(fd);
+}
+
+static int cc_cb(int fd, void *opts)
+{
+ struct cb_opts *cb_opts = (struct cb_opts *)opts;
+
+ return settcpca(fd, cb_opts->cc);
}
static void test_cubic(void)
{
+ struct cb_opts cb_opts = {
+ .cc = "bpf_cubic",
+ };
+ struct network_helper_opts opts = {
+ .post_socket_cb = cc_cb,
+ .cb_opts = &cb_opts,
+ };
struct bpf_cubic *cubic_skel;
struct bpf_link *link;
@@ -96,7 +112,7 @@ static void test_cubic(void)
return;
}
- do_test("bpf_cubic", NULL);
+ do_test(&opts);
ASSERT_EQ(cubic_skel->bss->bpf_cubic_acked_called, 1, "pkts_acked called");
@@ -104,8 +120,37 @@ static void test_cubic(void)
bpf_cubic__destroy(cubic_skel);
}
+static int stg_post_socket_cb(int fd, void *opts)
+{
+ struct cb_opts *cb_opts = (struct cb_opts *)opts;
+ int err;
+
+ err = settcpca(fd, cb_opts->cc);
+ if (err)
+ return err;
+
+ err = bpf_map_update_elem(cb_opts->map_fd, &fd,
+ &expected_stg, BPF_NOEXIST);
+ if (!ASSERT_OK(err, "bpf_map_update_elem(sk_stg_map)"))
+ return err;
+
+ return 0;
+}
+
static void test_dctcp(void)
{
+ struct cb_opts cb_opts = {
+ .cc = "bpf_dctcp",
+ };
+ struct network_helper_opts opts = {
+ .post_socket_cb = cc_cb,
+ .cb_opts = &cb_opts,
+ };
+ struct network_helper_opts cli_opts = {
+ .post_socket_cb = stg_post_socket_cb,
+ .cb_opts = &cb_opts,
+ };
+ int lfd = -1, fd = -1, tmp_stg, err;
struct bpf_dctcp *dctcp_skel;
struct bpf_link *link;
@@ -119,11 +164,58 @@ static void test_dctcp(void)
return;
}
- do_test("bpf_dctcp", dctcp_skel->maps.sk_stg_map);
+ cb_opts.map_fd = bpf_map__fd(dctcp_skel->maps.sk_stg_map);
+ if (!start_test(NULL, &opts, &cli_opts, &lfd, &fd))
+ goto done;
+
+ err = bpf_map_lookup_elem(cb_opts.map_fd, &fd, &tmp_stg);
+ if (!ASSERT_ERR(err, "bpf_map_lookup_elem(sk_stg_map)") ||
+ !ASSERT_EQ(errno, ENOENT, "bpf_map_lookup_elem(sk_stg_map)"))
+ goto done;
+
+ ASSERT_OK(send_recv_data(lfd, fd, total_bytes), "send_recv_data");
ASSERT_EQ(dctcp_skel->bss->stg_result, expected_stg, "stg_result");
+done:
bpf_link__destroy(link);
bpf_dctcp__destroy(dctcp_skel);
+ if (lfd != -1)
+ close(lfd);
+ if (fd != -1)
+ close(fd);
+}
+
+static void test_dctcp_autoattach_map(void)
+{
+ struct cb_opts cb_opts = {
+ .cc = "bpf_dctcp",
+ };
+ struct network_helper_opts opts = {
+ .post_socket_cb = cc_cb,
+ .cb_opts = &cb_opts,
+ };
+ struct bpf_dctcp *dctcp_skel;
+ struct bpf_link *link;
+
+ dctcp_skel = bpf_dctcp__open_and_load();
+ if (!ASSERT_OK_PTR(dctcp_skel, "bpf_dctcp__open_and_load"))
+ return;
+
+ bpf_map__set_autoattach(dctcp_skel->maps.dctcp, true);
+ bpf_map__set_autoattach(dctcp_skel->maps.dctcp_nouse, false);
+
+ if (!ASSERT_OK(bpf_dctcp__attach(dctcp_skel), "bpf_dctcp__attach"))
+ goto destroy;
+
+ /* struct_ops is auto-attached */
+ link = dctcp_skel->links.dctcp;
+ if (!ASSERT_OK_PTR(link, "link"))
+ goto destroy;
+
+ do_test(&opts);
+
+destroy:
+ bpf_dctcp__destroy(dctcp_skel);
}
static char *err_str;
@@ -171,11 +263,22 @@ static void test_invalid_license(void)
static void test_dctcp_fallback(void)
{
int err, lfd = -1, cli_fd = -1, srv_fd = -1;
- struct network_helper_opts opts = {
- .cc = "cubic",
- };
struct bpf_dctcp *dctcp_skel;
struct bpf_link *link = NULL;
+ struct cb_opts dctcp = {
+ .cc = "bpf_dctcp",
+ };
+ struct network_helper_opts srv_opts = {
+ .post_socket_cb = cc_cb,
+ .cb_opts = &dctcp,
+ };
+ struct cb_opts cubic = {
+ .cc = "cubic",
+ };
+ struct network_helper_opts cli_opts = {
+ .post_socket_cb = cc_cb,
+ .cb_opts = &cubic,
+ };
char srv_cc[16];
socklen_t cc_len = sizeof(srv_cc);
@@ -190,13 +293,7 @@ static void test_dctcp_fallback(void)
if (!ASSERT_OK_PTR(link, "dctcp link"))
goto done;
- lfd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
- if (!ASSERT_GE(lfd, 0, "lfd") ||
- !ASSERT_OK(settcpca(lfd, "bpf_dctcp"), "lfd=>bpf_dctcp"))
- goto done;
-
- cli_fd = connect_to_fd_opts(lfd, &opts);
- if (!ASSERT_GE(cli_fd, 0, "cli_fd"))
+ if (!start_test("::1", &srv_opts, &cli_opts, &lfd, &cli_fd))
goto done;
srv_fd = accept(lfd, NULL, 0);
@@ -297,6 +394,13 @@ static void test_unsupp_cong_op(void)
static void test_update_ca(void)
{
+ struct cb_opts cb_opts = {
+ .cc = "tcp_ca_update",
+ };
+ struct network_helper_opts opts = {
+ .post_socket_cb = cc_cb,
+ .cb_opts = &cb_opts,
+ };
struct tcp_ca_update *skel;
struct bpf_link *link;
int saved_ca1_cnt;
@@ -307,25 +411,34 @@ static void test_update_ca(void)
return;
link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
- ASSERT_OK_PTR(link, "attach_struct_ops");
+ if (!ASSERT_OK_PTR(link, "attach_struct_ops"))
+ goto out;
- do_test("tcp_ca_update", NULL);
+ do_test(&opts);
saved_ca1_cnt = skel->bss->ca1_cnt;
ASSERT_GT(saved_ca1_cnt, 0, "ca1_ca1_cnt");
err = bpf_link__update_map(link, skel->maps.ca_update_2);
ASSERT_OK(err, "update_map");
- do_test("tcp_ca_update", NULL);
+ do_test(&opts);
ASSERT_EQ(skel->bss->ca1_cnt, saved_ca1_cnt, "ca2_ca1_cnt");
ASSERT_GT(skel->bss->ca2_cnt, 0, "ca2_ca2_cnt");
bpf_link__destroy(link);
+out:
tcp_ca_update__destroy(skel);
}
static void test_update_wrong(void)
{
+ struct cb_opts cb_opts = {
+ .cc = "tcp_ca_update",
+ };
+ struct network_helper_opts opts = {
+ .post_socket_cb = cc_cb,
+ .cb_opts = &cb_opts,
+ };
struct tcp_ca_update *skel;
struct bpf_link *link;
int saved_ca1_cnt;
@@ -336,24 +449,33 @@ static void test_update_wrong(void)
return;
link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
- ASSERT_OK_PTR(link, "attach_struct_ops");
+ if (!ASSERT_OK_PTR(link, "attach_struct_ops"))
+ goto out;
- do_test("tcp_ca_update", NULL);
+ do_test(&opts);
saved_ca1_cnt = skel->bss->ca1_cnt;
ASSERT_GT(saved_ca1_cnt, 0, "ca1_ca1_cnt");
err = bpf_link__update_map(link, skel->maps.ca_wrong);
ASSERT_ERR(err, "update_map");
- do_test("tcp_ca_update", NULL);
+ do_test(&opts);
ASSERT_GT(skel->bss->ca1_cnt, saved_ca1_cnt, "ca2_ca1_cnt");
bpf_link__destroy(link);
+out:
tcp_ca_update__destroy(skel);
}
static void test_mixed_links(void)
{
+ struct cb_opts cb_opts = {
+ .cc = "tcp_ca_update",
+ };
+ struct network_helper_opts opts = {
+ .post_socket_cb = cc_cb,
+ .cb_opts = &cb_opts,
+ };
struct tcp_ca_update *skel;
struct bpf_link *link, *link_nl;
int err;
@@ -363,12 +485,13 @@ static void test_mixed_links(void)
return;
link_nl = bpf_map__attach_struct_ops(skel->maps.ca_no_link);
- ASSERT_OK_PTR(link_nl, "attach_struct_ops_nl");
+ if (!ASSERT_OK_PTR(link_nl, "attach_struct_ops_nl"))
+ goto out;
link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
ASSERT_OK_PTR(link, "attach_struct_ops");
- do_test("tcp_ca_update", NULL);
+ do_test(&opts);
ASSERT_GT(skel->bss->ca1_cnt, 0, "ca1_ca1_cnt");
err = bpf_link__update_map(link, skel->maps.ca_no_link);
@@ -376,6 +499,7 @@ static void test_mixed_links(void)
bpf_link__destroy(link);
bpf_link__destroy(link_nl);
+out:
tcp_ca_update__destroy(skel);
}
@@ -418,7 +542,8 @@ static void test_link_replace(void)
bpf_link__destroy(link);
link = bpf_map__attach_struct_ops(skel->maps.ca_update_2);
- ASSERT_OK_PTR(link, "attach_struct_ops_2nd");
+ if (!ASSERT_OK_PTR(link, "attach_struct_ops_2nd"))
+ goto out;
/* BPF_F_REPLACE with a wrong old map Fd. It should fail!
*
@@ -441,6 +566,7 @@ static void test_link_replace(void)
bpf_link__destroy(link);
+out:
tcp_ca_update__destroy(skel);
}
@@ -455,6 +581,13 @@ static void test_tcp_ca_kfunc(void)
static void test_cc_cubic(void)
{
+ struct cb_opts cb_opts = {
+ .cc = "bpf_cc_cubic",
+ };
+ struct network_helper_opts opts = {
+ .post_socket_cb = cc_cb,
+ .cb_opts = &cb_opts,
+ };
struct bpf_cc_cubic *cc_cubic_skel;
struct bpf_link *link;
@@ -468,7 +601,7 @@ static void test_cc_cubic(void)
return;
}
- do_test("bpf_cc_cubic", NULL);
+ do_test(&opts);
bpf_link__destroy(link);
bpf_cc_cubic__destroy(cc_cubic_skel);
@@ -506,4 +639,6 @@ void test_bpf_tcp_ca(void)
test_tcp_ca_kfunc();
if (test__start_subtest("cc_cubic"))
test_cc_cubic();
+ if (test__start_subtest("dctcp_autoattach_map"))
+ test_dctcp_autoattach_map();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
index 4c6ada5b270b..73f669014b69 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
@@ -45,12 +45,6 @@ err_out:
return err;
}
-struct scale_test_def {
- const char *file;
- enum bpf_prog_type attach_type;
- bool fails;
-};
-
static void scale_test(const char *file,
enum bpf_prog_type attach_type,
bool should_fail)
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_distill.c b/tools/testing/selftests/bpf/prog_tests/btf_distill.c
new file mode 100644
index 000000000000..bfbe795823a2
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/btf_distill.c
@@ -0,0 +1,552 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024, Oracle and/or its affiliates. */
+
+#include <test_progs.h>
+#include <bpf/btf.h>
+#include "btf_helpers.h"
+
+/* Fabricate base, split BTF with references to base types needed; then create
+ * split BTF with distilled base BTF and ensure expectations are met:
+ * - only referenced base types from split BTF are present
+ * - struct/union/enum are represented as empty unless anonymous, when they
+ * are represented in full in split BTF
+ */
+static void test_distilled_base(void)
+{
+ struct btf *btf1 = NULL, *btf2 = NULL, *btf3 = NULL, *btf4 = NULL;
+
+ btf1 = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf1, "empty_main_btf"))
+ return;
+
+ btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */
+ btf__add_ptr(btf1, 1); /* [2] ptr to int */
+ btf__add_struct(btf1, "s1", 8); /* [3] struct s1 { */
+ btf__add_field(btf1, "f1", 2, 0, 0); /* int *f1; */
+ /* } */
+ btf__add_struct(btf1, "", 12); /* [4] struct { */
+ btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */
+ btf__add_field(btf1, "f2", 3, 32, 0); /* struct s1 f2; */
+ /* } */
+ btf__add_int(btf1, "unsigned int", 4, 0); /* [5] unsigned int */
+ btf__add_union(btf1, "u1", 12); /* [6] union u1 { */
+ btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */
+ btf__add_field(btf1, "f2", 2, 0, 0); /* int *f2; */
+ /* } */
+ btf__add_union(btf1, "", 4); /* [7] union { */
+ btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */
+ /* } */
+ btf__add_enum(btf1, "e1", 4); /* [8] enum e1 { */
+ btf__add_enum_value(btf1, "v1", 1); /* v1 = 1; */
+ /* } */
+ btf__add_enum(btf1, "", 4); /* [9] enum { */
+ btf__add_enum_value(btf1, "av1", 2); /* av1 = 2; */
+ /* } */
+ btf__add_enum64(btf1, "e641", 8, true); /* [10] enum64 { */
+ btf__add_enum64_value(btf1, "v1", 1024); /* v1 = 1024; */
+ /* } */
+ btf__add_enum64(btf1, "", 8, true); /* [11] enum64 { */
+ btf__add_enum64_value(btf1, "v1", 1025); /* v1 = 1025; */
+ /* } */
+ btf__add_struct(btf1, "unneeded", 4); /* [12] struct unneeded { */
+ btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */
+ /* } */
+ btf__add_struct(btf1, "embedded", 4); /* [13] struct embedded { */
+ btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */
+ /* } */
+ btf__add_func_proto(btf1, 1); /* [14] int (*)(int *p1); */
+ btf__add_func_param(btf1, "p1", 1);
+
+ btf__add_array(btf1, 1, 1, 3); /* [15] int [3]; */
+
+ btf__add_struct(btf1, "from_proto", 4); /* [16] struct from_proto { */
+ btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */
+ /* } */
+ btf__add_union(btf1, "u1", 4); /* [17] union u1 { */
+ btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */
+ /* } */
+ VALIDATE_RAW_BTF(
+ btf1,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] PTR '(anon)' type_id=1",
+ "[3] STRUCT 's1' size=8 vlen=1\n"
+ "\t'f1' type_id=2 bits_offset=0",
+ "[4] STRUCT '(anon)' size=12 vlen=2\n"
+ "\t'f1' type_id=1 bits_offset=0\n"
+ "\t'f2' type_id=3 bits_offset=32",
+ "[5] INT 'unsigned int' size=4 bits_offset=0 nr_bits=32 encoding=(none)",
+ "[6] UNION 'u1' size=12 vlen=2\n"
+ "\t'f1' type_id=1 bits_offset=0\n"
+ "\t'f2' type_id=2 bits_offset=0",
+ "[7] UNION '(anon)' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[8] ENUM 'e1' encoding=UNSIGNED size=4 vlen=1\n"
+ "\t'v1' val=1",
+ "[9] ENUM '(anon)' encoding=UNSIGNED size=4 vlen=1\n"
+ "\t'av1' val=2",
+ "[10] ENUM64 'e641' encoding=SIGNED size=8 vlen=1\n"
+ "\t'v1' val=1024",
+ "[11] ENUM64 '(anon)' encoding=SIGNED size=8 vlen=1\n"
+ "\t'v1' val=1025",
+ "[12] STRUCT 'unneeded' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[13] STRUCT 'embedded' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[14] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n"
+ "\t'p1' type_id=1",
+ "[15] ARRAY '(anon)' type_id=1 index_type_id=1 nr_elems=3",
+ "[16] STRUCT 'from_proto' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[17] UNION 'u1' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0");
+
+ btf2 = btf__new_empty_split(btf1);
+ if (!ASSERT_OK_PTR(btf2, "empty_split_btf"))
+ goto cleanup;
+
+ btf__add_ptr(btf2, 3); /* [18] ptr to struct s1 */
+ /* add ptr to struct anon */
+ btf__add_ptr(btf2, 4); /* [19] ptr to struct (anon) */
+ btf__add_const(btf2, 6); /* [20] const union u1 */
+ btf__add_restrict(btf2, 7); /* [21] restrict union (anon) */
+ btf__add_volatile(btf2, 8); /* [22] volatile enum e1 */
+ btf__add_typedef(btf2, "et", 9); /* [23] typedef enum (anon) */
+ btf__add_const(btf2, 10); /* [24] const enum64 e641 */
+ btf__add_ptr(btf2, 11); /* [25] restrict enum64 (anon) */
+ btf__add_struct(btf2, "with_embedded", 4); /* [26] struct with_embedded { */
+ btf__add_field(btf2, "f1", 13, 0, 0); /* struct embedded f1; */
+ /* } */
+ btf__add_func(btf2, "fn", BTF_FUNC_STATIC, 14); /* [27] int fn(int p1); */
+ btf__add_typedef(btf2, "arraytype", 15); /* [28] typedef int[3] foo; */
+ btf__add_func_proto(btf2, 1); /* [29] int (*)(struct from proto p1); */
+ btf__add_func_param(btf2, "p1", 16);
+
+ VALIDATE_RAW_BTF(
+ btf2,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] PTR '(anon)' type_id=1",
+ "[3] STRUCT 's1' size=8 vlen=1\n"
+ "\t'f1' type_id=2 bits_offset=0",
+ "[4] STRUCT '(anon)' size=12 vlen=2\n"
+ "\t'f1' type_id=1 bits_offset=0\n"
+ "\t'f2' type_id=3 bits_offset=32",
+ "[5] INT 'unsigned int' size=4 bits_offset=0 nr_bits=32 encoding=(none)",
+ "[6] UNION 'u1' size=12 vlen=2\n"
+ "\t'f1' type_id=1 bits_offset=0\n"
+ "\t'f2' type_id=2 bits_offset=0",
+ "[7] UNION '(anon)' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[8] ENUM 'e1' encoding=UNSIGNED size=4 vlen=1\n"
+ "\t'v1' val=1",
+ "[9] ENUM '(anon)' encoding=UNSIGNED size=4 vlen=1\n"
+ "\t'av1' val=2",
+ "[10] ENUM64 'e641' encoding=SIGNED size=8 vlen=1\n"
+ "\t'v1' val=1024",
+ "[11] ENUM64 '(anon)' encoding=SIGNED size=8 vlen=1\n"
+ "\t'v1' val=1025",
+ "[12] STRUCT 'unneeded' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[13] STRUCT 'embedded' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[14] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n"
+ "\t'p1' type_id=1",
+ "[15] ARRAY '(anon)' type_id=1 index_type_id=1 nr_elems=3",
+ "[16] STRUCT 'from_proto' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[17] UNION 'u1' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[18] PTR '(anon)' type_id=3",
+ "[19] PTR '(anon)' type_id=4",
+ "[20] CONST '(anon)' type_id=6",
+ "[21] RESTRICT '(anon)' type_id=7",
+ "[22] VOLATILE '(anon)' type_id=8",
+ "[23] TYPEDEF 'et' type_id=9",
+ "[24] CONST '(anon)' type_id=10",
+ "[25] PTR '(anon)' type_id=11",
+ "[26] STRUCT 'with_embedded' size=4 vlen=1\n"
+ "\t'f1' type_id=13 bits_offset=0",
+ "[27] FUNC 'fn' type_id=14 linkage=static",
+ "[28] TYPEDEF 'arraytype' type_id=15",
+ "[29] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n"
+ "\t'p1' type_id=16");
+
+ if (!ASSERT_EQ(0, btf__distill_base(btf2, &btf3, &btf4),
+ "distilled_base") ||
+ !ASSERT_OK_PTR(btf3, "distilled_base") ||
+ !ASSERT_OK_PTR(btf4, "distilled_split") ||
+ !ASSERT_EQ(8, btf__type_cnt(btf3), "distilled_base_type_cnt"))
+ goto cleanup;
+
+ VALIDATE_RAW_BTF(
+ btf4,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] STRUCT 's1' size=8 vlen=0",
+ "[3] UNION 'u1' size=12 vlen=0",
+ "[4] ENUM 'e1' encoding=UNSIGNED size=4 vlen=0",
+ "[5] ENUM 'e641' encoding=UNSIGNED size=8 vlen=0",
+ "[6] STRUCT 'embedded' size=4 vlen=0",
+ "[7] STRUCT 'from_proto' size=4 vlen=0",
+ /* split BTF; these types should match split BTF above from 17-28, with
+ * updated type id references
+ */
+ "[8] PTR '(anon)' type_id=2",
+ "[9] PTR '(anon)' type_id=20",
+ "[10] CONST '(anon)' type_id=3",
+ "[11] RESTRICT '(anon)' type_id=21",
+ "[12] VOLATILE '(anon)' type_id=4",
+ "[13] TYPEDEF 'et' type_id=22",
+ "[14] CONST '(anon)' type_id=5",
+ "[15] PTR '(anon)' type_id=23",
+ "[16] STRUCT 'with_embedded' size=4 vlen=1\n"
+ "\t'f1' type_id=6 bits_offset=0",
+ "[17] FUNC 'fn' type_id=24 linkage=static",
+ "[18] TYPEDEF 'arraytype' type_id=25",
+ "[19] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n"
+ "\t'p1' type_id=7",
+ /* split BTF types added from original base BTF below */
+ "[20] STRUCT '(anon)' size=12 vlen=2\n"
+ "\t'f1' type_id=1 bits_offset=0\n"
+ "\t'f2' type_id=2 bits_offset=32",
+ "[21] UNION '(anon)' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[22] ENUM '(anon)' encoding=UNSIGNED size=4 vlen=1\n"
+ "\t'av1' val=2",
+ "[23] ENUM64 '(anon)' encoding=SIGNED size=8 vlen=1\n"
+ "\t'v1' val=1025",
+ "[24] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n"
+ "\t'p1' type_id=1",
+ "[25] ARRAY '(anon)' type_id=1 index_type_id=1 nr_elems=3");
+
+ if (!ASSERT_EQ(btf__relocate(btf4, btf1), 0, "relocate_split"))
+ goto cleanup;
+
+ VALIDATE_RAW_BTF(
+ btf4,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] PTR '(anon)' type_id=1",
+ "[3] STRUCT 's1' size=8 vlen=1\n"
+ "\t'f1' type_id=2 bits_offset=0",
+ "[4] STRUCT '(anon)' size=12 vlen=2\n"
+ "\t'f1' type_id=1 bits_offset=0\n"
+ "\t'f2' type_id=3 bits_offset=32",
+ "[5] INT 'unsigned int' size=4 bits_offset=0 nr_bits=32 encoding=(none)",
+ "[6] UNION 'u1' size=12 vlen=2\n"
+ "\t'f1' type_id=1 bits_offset=0\n"
+ "\t'f2' type_id=2 bits_offset=0",
+ "[7] UNION '(anon)' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[8] ENUM 'e1' encoding=UNSIGNED size=4 vlen=1\n"
+ "\t'v1' val=1",
+ "[9] ENUM '(anon)' encoding=UNSIGNED size=4 vlen=1\n"
+ "\t'av1' val=2",
+ "[10] ENUM64 'e641' encoding=SIGNED size=8 vlen=1\n"
+ "\t'v1' val=1024",
+ "[11] ENUM64 '(anon)' encoding=SIGNED size=8 vlen=1\n"
+ "\t'v1' val=1025",
+ "[12] STRUCT 'unneeded' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[13] STRUCT 'embedded' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[14] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n"
+ "\t'p1' type_id=1",
+ "[15] ARRAY '(anon)' type_id=1 index_type_id=1 nr_elems=3",
+ "[16] STRUCT 'from_proto' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[17] UNION 'u1' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[18] PTR '(anon)' type_id=3",
+ "[19] PTR '(anon)' type_id=30",
+ "[20] CONST '(anon)' type_id=6",
+ "[21] RESTRICT '(anon)' type_id=31",
+ "[22] VOLATILE '(anon)' type_id=8",
+ "[23] TYPEDEF 'et' type_id=32",
+ "[24] CONST '(anon)' type_id=10",
+ "[25] PTR '(anon)' type_id=33",
+ "[26] STRUCT 'with_embedded' size=4 vlen=1\n"
+ "\t'f1' type_id=13 bits_offset=0",
+ "[27] FUNC 'fn' type_id=34 linkage=static",
+ "[28] TYPEDEF 'arraytype' type_id=35",
+ "[29] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n"
+ "\t'p1' type_id=16",
+ /* below here are (duplicate) anon base types added by distill
+ * process to split BTF.
+ */
+ "[30] STRUCT '(anon)' size=12 vlen=2\n"
+ "\t'f1' type_id=1 bits_offset=0\n"
+ "\t'f2' type_id=3 bits_offset=32",
+ "[31] UNION '(anon)' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[32] ENUM '(anon)' encoding=UNSIGNED size=4 vlen=1\n"
+ "\t'av1' val=2",
+ "[33] ENUM64 '(anon)' encoding=SIGNED size=8 vlen=1\n"
+ "\t'v1' val=1025",
+ "[34] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n"
+ "\t'p1' type_id=1",
+ "[35] ARRAY '(anon)' type_id=1 index_type_id=1 nr_elems=3");
+
+cleanup:
+ btf__free(btf4);
+ btf__free(btf3);
+ btf__free(btf2);
+ btf__free(btf1);
+}
+
+/* ensure we can cope with multiple types with the same name in
+ * distilled base BTF. In this case because sizes are different,
+ * we can still disambiguate them.
+ */
+static void test_distilled_base_multi(void)
+{
+ struct btf *btf1 = NULL, *btf2 = NULL, *btf3 = NULL, *btf4 = NULL;
+
+ btf1 = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf1, "empty_main_btf"))
+ return;
+ btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */
+ btf__add_int(btf1, "int", 8, BTF_INT_SIGNED); /* [2] int */
+ VALIDATE_RAW_BTF(
+ btf1,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] INT 'int' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED");
+ btf2 = btf__new_empty_split(btf1);
+ if (!ASSERT_OK_PTR(btf2, "empty_split_btf"))
+ goto cleanup;
+ btf__add_ptr(btf2, 1);
+ btf__add_const(btf2, 2);
+ VALIDATE_RAW_BTF(
+ btf2,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] INT 'int' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED",
+ "[3] PTR '(anon)' type_id=1",
+ "[4] CONST '(anon)' type_id=2");
+ if (!ASSERT_EQ(0, btf__distill_base(btf2, &btf3, &btf4),
+ "distilled_base") ||
+ !ASSERT_OK_PTR(btf3, "distilled_base") ||
+ !ASSERT_OK_PTR(btf4, "distilled_split") ||
+ !ASSERT_EQ(3, btf__type_cnt(btf3), "distilled_base_type_cnt"))
+ goto cleanup;
+ VALIDATE_RAW_BTF(
+ btf3,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] INT 'int' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED");
+ if (!ASSERT_EQ(btf__relocate(btf4, btf1), 0, "relocate_split"))
+ goto cleanup;
+
+ VALIDATE_RAW_BTF(
+ btf4,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] INT 'int' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED",
+ "[3] PTR '(anon)' type_id=1",
+ "[4] CONST '(anon)' type_id=2");
+
+cleanup:
+ btf__free(btf4);
+ btf__free(btf3);
+ btf__free(btf2);
+ btf__free(btf1);
+}
+
+/* If a needed type is not present in the base BTF we wish to relocate
+ * with, btf__relocate() should error our.
+ */
+static void test_distilled_base_missing_err(void)
+{
+ struct btf *btf1 = NULL, *btf2 = NULL, *btf3 = NULL, *btf4 = NULL, *btf5 = NULL;
+
+ btf1 = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf1, "empty_main_btf"))
+ return;
+ btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */
+ btf__add_int(btf1, "int", 8, BTF_INT_SIGNED); /* [2] int */
+ VALIDATE_RAW_BTF(
+ btf1,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] INT 'int' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED");
+ btf2 = btf__new_empty_split(btf1);
+ if (!ASSERT_OK_PTR(btf2, "empty_split_btf"))
+ goto cleanup;
+ btf__add_ptr(btf2, 1);
+ btf__add_const(btf2, 2);
+ VALIDATE_RAW_BTF(
+ btf2,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] INT 'int' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED",
+ "[3] PTR '(anon)' type_id=1",
+ "[4] CONST '(anon)' type_id=2");
+ if (!ASSERT_EQ(0, btf__distill_base(btf2, &btf3, &btf4),
+ "distilled_base") ||
+ !ASSERT_OK_PTR(btf3, "distilled_base") ||
+ !ASSERT_OK_PTR(btf4, "distilled_split") ||
+ !ASSERT_EQ(3, btf__type_cnt(btf3), "distilled_base_type_cnt"))
+ goto cleanup;
+ VALIDATE_RAW_BTF(
+ btf3,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] INT 'int' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED");
+ btf5 = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf5, "empty_reloc_btf"))
+ return;
+ btf__add_int(btf5, "int", 4, BTF_INT_SIGNED); /* [1] int */
+ VALIDATE_RAW_BTF(
+ btf5,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED");
+ ASSERT_EQ(btf__relocate(btf4, btf5), -EINVAL, "relocate_split");
+
+cleanup:
+ btf__free(btf5);
+ btf__free(btf4);
+ btf__free(btf3);
+ btf__free(btf2);
+ btf__free(btf1);
+}
+
+/* With 2 types of same size in distilled base BTF, relocation should
+ * fail as we have no means to choose between them.
+ */
+static void test_distilled_base_multi_err(void)
+{
+ struct btf *btf1 = NULL, *btf2 = NULL, *btf3 = NULL, *btf4 = NULL;
+
+ btf1 = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf1, "empty_main_btf"))
+ return;
+ btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */
+ btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [2] int */
+ VALIDATE_RAW_BTF(
+ btf1,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED");
+ btf2 = btf__new_empty_split(btf1);
+ if (!ASSERT_OK_PTR(btf2, "empty_split_btf"))
+ goto cleanup;
+ btf__add_ptr(btf2, 1);
+ btf__add_const(btf2, 2);
+ VALIDATE_RAW_BTF(
+ btf2,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[3] PTR '(anon)' type_id=1",
+ "[4] CONST '(anon)' type_id=2");
+ if (!ASSERT_EQ(0, btf__distill_base(btf2, &btf3, &btf4),
+ "distilled_base") ||
+ !ASSERT_OK_PTR(btf3, "distilled_base") ||
+ !ASSERT_OK_PTR(btf4, "distilled_split") ||
+ !ASSERT_EQ(3, btf__type_cnt(btf3), "distilled_base_type_cnt"))
+ goto cleanup;
+ VALIDATE_RAW_BTF(
+ btf3,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED");
+ ASSERT_EQ(btf__relocate(btf4, btf1), -EINVAL, "relocate_split");
+cleanup:
+ btf__free(btf4);
+ btf__free(btf3);
+ btf__free(btf2);
+ btf__free(btf1);
+}
+
+/* With 2 types of same size in base BTF, relocation should
+ * fail as we have no means to choose between them.
+ */
+static void test_distilled_base_multi_err2(void)
+{
+ struct btf *btf1 = NULL, *btf2 = NULL, *btf3 = NULL, *btf4 = NULL, *btf5 = NULL;
+
+ btf1 = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf1, "empty_main_btf"))
+ return;
+ btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */
+ VALIDATE_RAW_BTF(
+ btf1,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED");
+ btf2 = btf__new_empty_split(btf1);
+ if (!ASSERT_OK_PTR(btf2, "empty_split_btf"))
+ goto cleanup;
+ btf__add_ptr(btf2, 1);
+ VALIDATE_RAW_BTF(
+ btf2,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] PTR '(anon)' type_id=1");
+ if (!ASSERT_EQ(0, btf__distill_base(btf2, &btf3, &btf4),
+ "distilled_base") ||
+ !ASSERT_OK_PTR(btf3, "distilled_base") ||
+ !ASSERT_OK_PTR(btf4, "distilled_split") ||
+ !ASSERT_EQ(2, btf__type_cnt(btf3), "distilled_base_type_cnt"))
+ goto cleanup;
+ VALIDATE_RAW_BTF(
+ btf3,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED");
+ btf5 = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf5, "empty_reloc_btf"))
+ return;
+ btf__add_int(btf5, "int", 4, BTF_INT_SIGNED); /* [1] int */
+ btf__add_int(btf5, "int", 4, BTF_INT_SIGNED); /* [2] int */
+ VALIDATE_RAW_BTF(
+ btf5,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED");
+ ASSERT_EQ(btf__relocate(btf4, btf5), -EINVAL, "relocate_split");
+cleanup:
+ btf__free(btf5);
+ btf__free(btf4);
+ btf__free(btf3);
+ btf__free(btf2);
+ btf__free(btf1);
+}
+
+/* create split reference BTF from vmlinux + split BTF with a few type references;
+ * ensure the resultant split reference BTF is as expected, containing only types
+ * needed to disambiguate references from split BTF.
+ */
+static void test_distilled_base_vmlinux(void)
+{
+ struct btf *split_btf = NULL, *vmlinux_btf = btf__load_vmlinux_btf();
+ struct btf *split_dist = NULL, *base_dist = NULL;
+ __s32 int_id, myint_id;
+
+ if (!ASSERT_OK_PTR(vmlinux_btf, "load_vmlinux"))
+ return;
+ int_id = btf__find_by_name_kind(vmlinux_btf, "int", BTF_KIND_INT);
+ if (!ASSERT_GT(int_id, 0, "find_int"))
+ goto cleanup;
+ split_btf = btf__new_empty_split(vmlinux_btf);
+ if (!ASSERT_OK_PTR(split_btf, "new_split"))
+ goto cleanup;
+ myint_id = btf__add_typedef(split_btf, "myint", int_id);
+ btf__add_ptr(split_btf, myint_id);
+
+ if (!ASSERT_EQ(btf__distill_base(split_btf, &base_dist, &split_dist), 0,
+ "distill_vmlinux_base"))
+ goto cleanup;
+
+ if (!ASSERT_OK_PTR(split_dist, "split_distilled") ||
+ !ASSERT_OK_PTR(base_dist, "base_dist"))
+ goto cleanup;
+ VALIDATE_RAW_BTF(
+ split_dist,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] TYPEDEF 'myint' type_id=1",
+ "[3] PTR '(anon)' type_id=2");
+
+cleanup:
+ btf__free(split_dist);
+ btf__free(base_dist);
+ btf__free(split_btf);
+ btf__free(vmlinux_btf);
+}
+
+void test_btf_distill(void)
+{
+ if (test__start_subtest("distilled_base"))
+ test_distilled_base();
+ if (test__start_subtest("distilled_base_multi"))
+ test_distilled_base_multi();
+ if (test__start_subtest("distilled_base_missing_err"))
+ test_distilled_base_missing_err();
+ if (test__start_subtest("distilled_base_multi_err"))
+ test_distilled_base_multi_err();
+ if (test__start_subtest("distilled_base_multi_err2"))
+ test_distilled_base_multi_err2();
+ if (test__start_subtest("distilled_base_vmlinux"))
+ test_distilled_base_vmlinux();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_field_iter.c b/tools/testing/selftests/bpf/prog_tests/btf_field_iter.c
new file mode 100644
index 000000000000..32159d3eb281
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/btf_field_iter.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024, Oracle and/or its affiliates. */
+
+#include <test_progs.h>
+#include <bpf/btf.h>
+#include "btf_helpers.h"
+#include "bpf/libbpf_internal.h"
+
+struct field_data {
+ __u32 ids[5];
+ const char *strs[5];
+} fields[] = {
+ { .ids = {}, .strs = {} },
+ { .ids = {}, .strs = { "int" } },
+ { .ids = {}, .strs = { "int64" } },
+ { .ids = { 1 }, .strs = { "" } },
+ { .ids = { 2, 1 }, .strs = { "" } },
+ { .ids = { 3, 1 }, .strs = { "s1", "f1", "f2" } },
+ { .ids = { 1, 5 }, .strs = { "u1", "f1", "f2" } },
+ { .ids = {}, .strs = { "e1", "v1", "v2" } },
+ { .ids = {}, .strs = { "fw1" } },
+ { .ids = { 1 }, .strs = { "t" } },
+ { .ids = { 2 }, .strs = { "" } },
+ { .ids = { 1 }, .strs = { "" } },
+ { .ids = { 3 }, .strs = { "" } },
+ { .ids = { 1, 1, 3 }, .strs = { "", "p1", "p2" } },
+ { .ids = { 13 }, .strs = { "func" } },
+ { .ids = { 1 }, .strs = { "var1" } },
+ { .ids = { 3 }, .strs = { "var2" } },
+ { .ids = {}, .strs = { "float" } },
+ { .ids = { 11 }, .strs = { "decltag" } },
+ { .ids = { 6 }, .strs = { "typetag" } },
+ { .ids = {}, .strs = { "e64", "eval1", "eval2", "eval3" } },
+ { .ids = { 15, 16 }, .strs = { "datasec1" } }
+
+};
+
+/* Fabricate BTF with various types and check BTF field iteration finds types,
+ * strings expected.
+ */
+void test_btf_field_iter(void)
+{
+ struct btf *btf = NULL;
+ int id;
+
+ btf = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf, "empty_btf"))
+ return;
+
+ btf__add_int(btf, "int", 4, BTF_INT_SIGNED); /* [1] int */
+ btf__add_int(btf, "int64", 8, BTF_INT_SIGNED); /* [2] int64 */
+ btf__add_ptr(btf, 1); /* [3] int * */
+ btf__add_array(btf, 1, 2, 3); /* [4] int64[3] */
+ btf__add_struct(btf, "s1", 12); /* [5] struct s1 { */
+ btf__add_field(btf, "f1", 3, 0, 0); /* int *f1; */
+ btf__add_field(btf, "f2", 1, 0, 0); /* int f2; */
+ /* } */
+ btf__add_union(btf, "u1", 12); /* [6] union u1 { */
+ btf__add_field(btf, "f1", 1, 0, 0); /* int f1; */
+ btf__add_field(btf, "f2", 5, 0, 0); /* struct s1 f2; */
+ /* } */
+ btf__add_enum(btf, "e1", 4); /* [7] enum e1 { */
+ btf__add_enum_value(btf, "v1", 1); /* v1 = 1; */
+ btf__add_enum_value(btf, "v2", 2); /* v2 = 2; */
+ /* } */
+
+ btf__add_fwd(btf, "fw1", BTF_FWD_STRUCT); /* [8] struct fw1; */
+ btf__add_typedef(btf, "t", 1); /* [9] typedef int t; */
+ btf__add_volatile(btf, 2); /* [10] volatile int64; */
+ btf__add_const(btf, 1); /* [11] const int; */
+ btf__add_restrict(btf, 3); /* [12] restrict int *; */
+ btf__add_func_proto(btf, 1); /* [13] int (*)(int p1, int *p2); */
+ btf__add_func_param(btf, "p1", 1);
+ btf__add_func_param(btf, "p2", 3);
+
+ btf__add_func(btf, "func", BTF_FUNC_GLOBAL, 13);/* [14] int func(int p1, int *p2); */
+ btf__add_var(btf, "var1", BTF_VAR_STATIC, 1); /* [15] static int var1; */
+ btf__add_var(btf, "var2", BTF_VAR_STATIC, 3); /* [16] static int *var2; */
+ btf__add_float(btf, "float", 4); /* [17] float; */
+ btf__add_decl_tag(btf, "decltag", 11, -1); /* [18] decltag const int; */
+ btf__add_type_tag(btf, "typetag", 6); /* [19] typetag union u1; */
+ btf__add_enum64(btf, "e64", 8, true); /* [20] enum { */
+ btf__add_enum64_value(btf, "eval1", 1000); /* eval1 = 1000, */
+ btf__add_enum64_value(btf, "eval2", 2000); /* eval2 = 2000, */
+ btf__add_enum64_value(btf, "eval3", 3000); /* eval3 = 3000 */
+ /* } */
+ btf__add_datasec(btf, "datasec1", 12); /* [21] datasec datasec1 */
+ btf__add_datasec_var_info(btf, 15, 0, 4);
+ btf__add_datasec_var_info(btf, 16, 4, 8);
+
+ VALIDATE_RAW_BTF(
+ btf,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] INT 'int64' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED",
+ "[3] PTR '(anon)' type_id=1",
+ "[4] ARRAY '(anon)' type_id=2 index_type_id=1 nr_elems=3",
+ "[5] STRUCT 's1' size=12 vlen=2\n"
+ "\t'f1' type_id=3 bits_offset=0\n"
+ "\t'f2' type_id=1 bits_offset=0",
+ "[6] UNION 'u1' size=12 vlen=2\n"
+ "\t'f1' type_id=1 bits_offset=0\n"
+ "\t'f2' type_id=5 bits_offset=0",
+ "[7] ENUM 'e1' encoding=UNSIGNED size=4 vlen=2\n"
+ "\t'v1' val=1\n"
+ "\t'v2' val=2",
+ "[8] FWD 'fw1' fwd_kind=struct",
+ "[9] TYPEDEF 't' type_id=1",
+ "[10] VOLATILE '(anon)' type_id=2",
+ "[11] CONST '(anon)' type_id=1",
+ "[12] RESTRICT '(anon)' type_id=3",
+ "[13] FUNC_PROTO '(anon)' ret_type_id=1 vlen=2\n"
+ "\t'p1' type_id=1\n"
+ "\t'p2' type_id=3",
+ "[14] FUNC 'func' type_id=13 linkage=global",
+ "[15] VAR 'var1' type_id=1, linkage=static",
+ "[16] VAR 'var2' type_id=3, linkage=static",
+ "[17] FLOAT 'float' size=4",
+ "[18] DECL_TAG 'decltag' type_id=11 component_idx=-1",
+ "[19] TYPE_TAG 'typetag' type_id=6",
+ "[20] ENUM64 'e64' encoding=SIGNED size=8 vlen=3\n"
+ "\t'eval1' val=1000\n"
+ "\t'eval2' val=2000\n"
+ "\t'eval3' val=3000",
+ "[21] DATASEC 'datasec1' size=12 vlen=2\n"
+ "\ttype_id=15 offset=0 size=4\n"
+ "\ttype_id=16 offset=4 size=8");
+
+ for (id = 1; id < btf__type_cnt(btf); id++) {
+ struct btf_type *t = btf_type_by_id(btf, id);
+ struct btf_field_iter it_strs, it_ids;
+ int str_idx = 0, id_idx = 0;
+ __u32 *next_str, *next_id;
+
+ if (!ASSERT_OK_PTR(t, "btf_type_by_id"))
+ break;
+ if (!ASSERT_OK(btf_field_iter_init(&it_strs, t, BTF_FIELD_ITER_STRS),
+ "iter_init_strs"))
+ break;
+ if (!ASSERT_OK(btf_field_iter_init(&it_ids, t, BTF_FIELD_ITER_IDS),
+ "iter_init_ids"))
+ break;
+ while ((next_str = btf_field_iter_next(&it_strs))) {
+ const char *str = btf__str_by_offset(btf, *next_str);
+
+ if (!ASSERT_OK(strcmp(fields[id].strs[str_idx], str), "field_str_match"))
+ break;
+ str_idx++;
+ }
+ /* ensure no more strings are expected */
+ ASSERT_EQ(fields[id].strs[str_idx], NULL, "field_str_cnt");
+
+ while ((next_id = btf_field_iter_next(&it_ids))) {
+ if (!ASSERT_EQ(*next_id, fields[id].ids[id_idx], "field_id_match"))
+ break;
+ id_idx++;
+ }
+ /* ensure no more ids are expected */
+ ASSERT_EQ(fields[id].ids[id_idx], 0, "field_id_cnt");
+ }
+ btf__free(btf);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c b/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c
index addf720428f7..9709c8db7275 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c
@@ -32,7 +32,7 @@ static int run_test(int cgroup_fd, int server_fd, bool classid)
goto out;
}
- fd = connect_to_fd_opts(server_fd, &opts);
+ fd = connect_to_fd_opts(server_fd, SOCK_STREAM, &opts);
if (fd < 0)
err = -1;
else
@@ -52,7 +52,7 @@ void test_cgroup_v1v2(void)
server_fd = start_server(AF_INET, SOCK_STREAM, NULL, port, 0);
if (!ASSERT_GE(server_fd, 0, "server_fd"))
return;
- client_fd = connect_to_fd_opts(server_fd, &opts);
+ client_fd = connect_to_fd_opts(server_fd, SOCK_STREAM, &opts);
if (!ASSERT_GE(client_fd, 0, "client_fd")) {
close(server_fd);
return;
diff --git a/tools/testing/selftests/bpf/prog_tests/cpumask.c b/tools/testing/selftests/bpf/prog_tests/cpumask.c
index ecf89df78109..2570bd4b0cb2 100644
--- a/tools/testing/selftests/bpf/prog_tests/cpumask.c
+++ b/tools/testing/selftests/bpf/prog_tests/cpumask.c
@@ -18,6 +18,11 @@ static const char * const cpumask_success_testcases[] = {
"test_insert_leave",
"test_insert_remove_release",
"test_global_mask_rcu",
+ "test_global_mask_array_one_rcu",
+ "test_global_mask_array_rcu",
+ "test_global_mask_array_l2_rcu",
+ "test_global_mask_nested_rcu",
+ "test_global_mask_nested_deep_rcu",
"test_cpumask_weight",
};
diff --git a/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c b/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c
index 3b7c57fe55a5..08b6391f2f56 100644
--- a/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c
+++ b/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c
@@ -69,15 +69,17 @@ static struct test_case test_cases[] = {
{
N(SCHED_CLS, struct __sk_buff, tstamp),
.read = "r11 = *(u8 *)($ctx + sk_buff::__mono_tc_offset);"
- "w11 &= 3;"
- "if w11 != 0x3 goto pc+2;"
+ "if w11 & 0x4 goto pc+1;"
+ "goto pc+4;"
+ "if w11 & 0x3 goto pc+1;"
+ "goto pc+2;"
"$dst = 0;"
"goto pc+1;"
"$dst = *(u64 *)($ctx + sk_buff::tstamp);",
.write = "r11 = *(u8 *)($ctx + sk_buff::__mono_tc_offset);"
- "if w11 & 0x2 goto pc+1;"
+ "if w11 & 0x4 goto pc+1;"
"goto pc+2;"
- "w11 &= -2;"
+ "w11 &= -4;"
"*(u8 *)($ctx + sk_buff::__mono_tc_offset) = r11;"
"*(u64 *)($ctx + sk_buff::tstamp) = $src;",
},
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
index 596536def43d..49b1ffc9af1f 100644
--- a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
@@ -50,9 +50,9 @@ void serial_test_fexit_stress(void)
out:
for (i = 0; i < bpf_max_tramp_links; i++) {
- if (link_fd[i])
+ if (link_fd[i] > 0)
close(link_fd[i]);
- if (fexit_fd[i])
+ if (fexit_fd[i] > 0)
close(fexit_fd[i]);
}
free(fd);
diff --git a/tools/testing/selftests/bpf/prog_tests/find_vma.c b/tools/testing/selftests/bpf/prog_tests/find_vma.c
index 5165b38f0e59..f7619e0ade10 100644
--- a/tools/testing/selftests/bpf/prog_tests/find_vma.c
+++ b/tools/testing/selftests/bpf/prog_tests/find_vma.c
@@ -29,8 +29,8 @@ static int open_pe(void)
/* create perf event */
attr.size = sizeof(attr);
- attr.type = PERF_TYPE_HARDWARE;
- attr.config = PERF_COUNT_HW_CPU_CYCLES;
+ attr.type = PERF_TYPE_SOFTWARE;
+ attr.config = PERF_COUNT_SW_CPU_CLOCK;
attr.freq = 1;
attr.sample_freq = 1000;
pfd = syscall(__NR_perf_event_open, &attr, 0, -1, -1, PERF_FLAG_FD_CLOEXEC);
diff --git a/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c b/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c
index 284764e7179f..4ddb8a5fece8 100644
--- a/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c
+++ b/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c
@@ -158,15 +158,13 @@ static int send_frags6(int client)
void test_bpf_ip_check_defrag_ok(bool ipv6)
{
+ int family = ipv6 ? AF_INET6 : AF_INET;
struct network_helper_opts rx_opts = {
.timeout_ms = 1000,
- .noconnect = true,
};
struct network_helper_opts tx_ops = {
.timeout_ms = 1000,
- .type = SOCK_RAW,
.proto = IPPROTO_RAW,
- .noconnect = true,
};
struct sockaddr_storage caddr;
struct ip_check_defrag *skel;
@@ -192,7 +190,7 @@ void test_bpf_ip_check_defrag_ok(bool ipv6)
nstoken = open_netns(NS1);
if (!ASSERT_OK_PTR(nstoken, "setns ns1"))
goto out;
- srv_fd = start_server(ipv6 ? AF_INET6 : AF_INET, SOCK_DGRAM, NULL, SERVER_PORT, 0);
+ srv_fd = start_server(family, SOCK_DGRAM, NULL, SERVER_PORT, 0);
close_netns(nstoken);
if (!ASSERT_GE(srv_fd, 0, "start_server"))
goto out;
@@ -201,18 +199,18 @@ void test_bpf_ip_check_defrag_ok(bool ipv6)
nstoken = open_netns(NS0);
if (!ASSERT_OK_PTR(nstoken, "setns ns0"))
goto out;
- client_tx_fd = connect_to_fd_opts(srv_fd, &tx_ops);
+ client_tx_fd = client_socket(family, SOCK_RAW, &tx_ops);
close_netns(nstoken);
- if (!ASSERT_GE(client_tx_fd, 0, "connect_to_fd_opts"))
+ if (!ASSERT_GE(client_tx_fd, 0, "client_socket"))
goto out;
/* Open rx socket in ns0 */
nstoken = open_netns(NS0);
if (!ASSERT_OK_PTR(nstoken, "setns ns0"))
goto out;
- client_rx_fd = connect_to_fd_opts(srv_fd, &rx_opts);
+ client_rx_fd = client_socket(family, SOCK_DGRAM, &rx_opts);
close_netns(nstoken);
- if (!ASSERT_GE(client_rx_fd, 0, "connect_to_fd_opts"))
+ if (!ASSERT_GE(client_rx_fd, 0, "client_socket"))
goto out;
/* Bind rx socket to a premeditated port */
diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
index 2eb71559713c..5b743212292f 100644
--- a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
+++ b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
@@ -78,6 +78,7 @@ static struct kfunc_test_params kfunc_tests[] = {
SYSCALL_TEST(kfunc_syscall_test, 0),
SYSCALL_NULL_CTX_TEST(kfunc_syscall_test_null, 0),
TC_TEST(kfunc_call_test_static_unused_arg, 0),
+ TC_TEST(kfunc_call_ctx, 0),
};
struct syscall_test_args {
diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_param_nullable.c b/tools/testing/selftests/bpf/prog_tests/kfunc_param_nullable.c
new file mode 100644
index 000000000000..c8f4dcaac7c7
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/kfunc_param_nullable.c
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2024 Meta Platforms, Inc */
+
+#include <test_progs.h>
+#include "test_kfunc_param_nullable.skel.h"
+
+void test_kfunc_param_nullable(void)
+{
+ RUN_TESTS(test_kfunc_param_nullable);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/linked_list.c b/tools/testing/selftests/bpf/prog_tests/linked_list.c
index 2fb89de63bd2..77d07e0a4a55 100644
--- a/tools/testing/selftests/bpf/prog_tests/linked_list.c
+++ b/tools/testing/selftests/bpf/prog_tests/linked_list.c
@@ -183,6 +183,18 @@ static void test_linked_list_success(int mode, bool leave_in_map)
if (!leave_in_map)
clear_fields(skel->maps.bss_A);
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_push_pop_nested), &opts);
+ ASSERT_OK(ret, "global_list_push_pop_nested");
+ ASSERT_OK(opts.retval, "global_list_push_pop_nested retval");
+ if (!leave_in_map)
+ clear_fields(skel->maps.bss_A);
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_array_push_pop), &opts);
+ ASSERT_OK(ret, "global_list_array_push_pop");
+ ASSERT_OK(opts.retval, "global_list_array_push_pop retval");
+ if (!leave_in_map)
+ clear_fields(skel->maps.bss_A);
+
if (mode == PUSH_POP)
goto end;
diff --git a/tools/testing/selftests/bpf/prog_tests/mptcp.c b/tools/testing/selftests/bpf/prog_tests/mptcp.c
index 274d2e033e39..d2ca32fa3b21 100644
--- a/tools/testing/selftests/bpf/prog_tests/mptcp.c
+++ b/tools/testing/selftests/bpf/prog_tests/mptcp.c
@@ -89,13 +89,8 @@ static int start_mptcp_server(int family, const char *addr_str, __u16 port,
.timeout_ms = timeout_ms,
.proto = IPPROTO_MPTCP,
};
- struct sockaddr_storage addr;
- socklen_t addrlen;
- if (make_sockaddr(family, addr_str, port, &addr, &addrlen))
- return -1;
-
- return start_server_addr(SOCK_STREAM, &addr, addrlen, &opts);
+ return start_server_str(family, SOCK_STREAM, addr_str, port, &opts);
}
static int verify_tsk(int map_fd, int client_fd)
diff --git a/tools/testing/selftests/bpf/prog_tests/rbtree.c b/tools/testing/selftests/bpf/prog_tests/rbtree.c
index e9300c96607d..9818f06c97c5 100644
--- a/tools/testing/selftests/bpf/prog_tests/rbtree.c
+++ b/tools/testing/selftests/bpf/prog_tests/rbtree.c
@@ -31,6 +31,28 @@ static void test_rbtree_add_nodes(void)
rbtree__destroy(skel);
}
+static void test_rbtree_add_nodes_nested(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+ struct rbtree *skel;
+ int ret;
+
+ skel = rbtree__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
+ return;
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_add_nodes_nested), &opts);
+ ASSERT_OK(ret, "rbtree_add_nodes_nested run");
+ ASSERT_OK(opts.retval, "rbtree_add_nodes_nested retval");
+ ASSERT_EQ(skel->data->less_callback_ran, 1, "rbtree_add_nodes_nested less_callback_ran");
+
+ rbtree__destroy(skel);
+}
+
static void test_rbtree_add_and_remove(void)
{
LIBBPF_OPTS(bpf_test_run_opts, opts,
@@ -53,6 +75,27 @@ static void test_rbtree_add_and_remove(void)
rbtree__destroy(skel);
}
+static void test_rbtree_add_and_remove_array(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+ struct rbtree *skel;
+ int ret;
+
+ skel = rbtree__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
+ return;
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_add_and_remove_array), &opts);
+ ASSERT_OK(ret, "rbtree_add_and_remove_array");
+ ASSERT_OK(opts.retval, "rbtree_add_and_remove_array retval");
+
+ rbtree__destroy(skel);
+}
+
static void test_rbtree_first_and_remove(void)
{
LIBBPF_OPTS(bpf_test_run_opts, opts,
@@ -104,8 +147,12 @@ void test_rbtree_success(void)
{
if (test__start_subtest("rbtree_add_nodes"))
test_rbtree_add_nodes();
+ if (test__start_subtest("rbtree_add_nodes_nested"))
+ test_rbtree_add_nodes_nested();
if (test__start_subtest("rbtree_add_and_remove"))
test_rbtree_add_and_remove();
+ if (test__start_subtest("rbtree_add_and_remove_array"))
+ test_rbtree_add_and_remove_array();
if (test__start_subtest("rbtree_first_and_remove"))
test_rbtree_first_and_remove();
if (test__start_subtest("rbtree_api_release_aliasing"))
diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal.c b/tools/testing/selftests/bpf/prog_tests/send_signal.c
index 920aee41bd58..6cc69900b310 100644
--- a/tools/testing/selftests/bpf/prog_tests/send_signal.c
+++ b/tools/testing/selftests/bpf/prog_tests/send_signal.c
@@ -156,7 +156,8 @@ static void test_send_signal_tracepoint(bool signal_thread)
static void test_send_signal_perf(bool signal_thread)
{
struct perf_event_attr attr = {
- .sample_period = 1,
+ .freq = 1,
+ .sample_freq = 1000,
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_CPU_CLOCK,
};
diff --git a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c
index 597d0467a926..ae87c00867ba 100644
--- a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c
+++ b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c
@@ -77,6 +77,12 @@ struct test {
bool reuseport_has_conns; /* Add a connected socket to reuseport group */
};
+struct cb_opts {
+ int family;
+ int sotype;
+ bool reuseport;
+};
+
static __u32 duration; /* for CHECK macro */
static bool is_ipv6(const char *ip)
@@ -142,19 +148,14 @@ static int make_socket(int sotype, const char *ip, int port,
return fd;
}
-static int make_server(int sotype, const char *ip, int port,
- struct bpf_program *reuseport_prog)
+static int setsockopts(int fd, void *opts)
{
- struct sockaddr_storage addr = {0};
+ struct cb_opts *co = (struct cb_opts *)opts;
const int one = 1;
- int err, fd = -1;
-
- fd = make_socket(sotype, ip, port, &addr);
- if (fd < 0)
- return -1;
+ int err = 0;
/* Enabled for UDPv6 sockets for IPv4-mapped IPv6 to work. */
- if (sotype == SOCK_DGRAM) {
+ if (co->sotype == SOCK_DGRAM) {
err = setsockopt(fd, SOL_IP, IP_RECVORIGDSTADDR, &one,
sizeof(one));
if (CHECK(err, "setsockopt(IP_RECVORIGDSTADDR)", "failed\n")) {
@@ -163,7 +164,7 @@ static int make_server(int sotype, const char *ip, int port,
}
}
- if (sotype == SOCK_DGRAM && addr.ss_family == AF_INET6) {
+ if (co->sotype == SOCK_DGRAM && co->family == AF_INET6) {
err = setsockopt(fd, SOL_IPV6, IPV6_RECVORIGDSTADDR, &one,
sizeof(one));
if (CHECK(err, "setsockopt(IPV6_RECVORIGDSTADDR)", "failed\n")) {
@@ -172,7 +173,7 @@ static int make_server(int sotype, const char *ip, int port,
}
}
- if (sotype == SOCK_STREAM) {
+ if (co->sotype == SOCK_STREAM) {
err = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &one,
sizeof(one));
if (CHECK(err, "setsockopt(SO_REUSEADDR)", "failed\n")) {
@@ -181,7 +182,7 @@ static int make_server(int sotype, const char *ip, int port,
}
}
- if (reuseport_prog) {
+ if (co->reuseport) {
err = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &one,
sizeof(one));
if (CHECK(err, "setsockopt(SO_REUSEPORT)", "failed\n")) {
@@ -190,19 +191,28 @@ static int make_server(int sotype, const char *ip, int port,
}
}
- err = bind(fd, (void *)&addr, inetaddr_len(&addr));
- if (CHECK(err, "bind", "failed\n")) {
- log_err("failed to bind listen socket");
- goto fail;
- }
+fail:
+ return err;
+}
- if (sotype == SOCK_STREAM) {
- err = listen(fd, SOMAXCONN);
- if (CHECK(err, "make_server", "listen")) {
- log_err("failed to listen on port %d", port);
- goto fail;
- }
- }
+static int make_server(int sotype, const char *ip, int port,
+ struct bpf_program *reuseport_prog)
+{
+ struct cb_opts cb_opts = {
+ .family = is_ipv6(ip) ? AF_INET6 : AF_INET,
+ .sotype = sotype,
+ .reuseport = reuseport_prog,
+ };
+ struct network_helper_opts opts = {
+ .backlog = SOMAXCONN,
+ .post_socket_cb = setsockopts,
+ .cb_opts = &cb_opts,
+ };
+ int err, fd;
+
+ fd = start_server_str(cb_opts.family, sotype, ip, port, &opts);
+ if (!ASSERT_OK_FD(fd, "start_server_str"))
+ return -1;
/* Late attach reuseport prog so we can have one init path */
if (reuseport_prog) {
@@ -406,18 +416,12 @@ static int udp_recv_send(int server_fd)
}
/* Reply from original destination address. */
- fd = socket(dst_addr->ss_family, SOCK_DGRAM, 0);
- if (CHECK(fd < 0, "socket", "failed\n")) {
+ fd = start_server_addr(SOCK_DGRAM, dst_addr, sizeof(*dst_addr), NULL);
+ if (!ASSERT_OK_FD(fd, "start_server_addr")) {
log_err("failed to create tx socket");
return -1;
}
- ret = bind(fd, (struct sockaddr *)dst_addr, sizeof(*dst_addr));
- if (CHECK(ret, "bind", "failed\n")) {
- log_err("failed to bind tx socket");
- goto out;
- }
-
msg.msg_control = NULL;
msg.msg_controllen = 0;
n = sendmsg(fd, &msg, 0);
@@ -629,9 +633,6 @@ static void run_lookup_prog(const struct test *t)
* BPF socket lookup.
*/
if (t->reuseport_has_conns) {
- struct sockaddr_storage addr = {};
- socklen_t len = sizeof(addr);
-
/* Add an extra socket to reuseport group */
reuse_conn_fd = make_server(t->sotype, t->listen_at.ip,
t->listen_at.port,
@@ -639,12 +640,9 @@ static void run_lookup_prog(const struct test *t)
if (reuse_conn_fd < 0)
goto close;
- /* Connect the extra socket to itself */
- err = getsockname(reuse_conn_fd, (void *)&addr, &len);
- if (CHECK(err, "getsockname", "errno %d\n", errno))
- goto close;
- err = connect(reuse_conn_fd, (void *)&addr, len);
- if (CHECK(err, "connect", "errno %d\n", errno))
+ /* Connect the extra socket to itself */
+ err = connect_fd_to_fd(reuse_conn_fd, reuse_conn_fd, 0);
+ if (!ASSERT_OK(err, "connect_fd_to_fd"))
goto close;
}
@@ -994,7 +992,7 @@ static void drop_on_reuseport(const struct test *t)
err = update_lookup_map(t->sock_map, SERVER_A, server1);
if (err)
- goto detach;
+ goto close_srv1;
/* second server on destination address we should never reach */
server2 = make_server(t->sotype, t->connect_to.ip, t->connect_to.port,
diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c
index 1d3a20f01b60..7cd8be2780ca 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c
@@ -70,7 +70,7 @@ static void *server_thread(void *arg)
return (void *)(long)err;
}
-static int custom_cb(int fd, const struct post_socket_opts *opts)
+static int custom_cb(int fd, void *opts)
{
char buf;
int err;
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_links.c b/tools/testing/selftests/bpf/prog_tests/tc_links.c
index bc9841144685..1af9ec1149aa 100644
--- a/tools/testing/selftests/bpf/prog_tests/tc_links.c
+++ b/tools/testing/selftests/bpf/prog_tests/tc_links.c
@@ -9,6 +9,8 @@
#define ping_cmd "ping -q -c1 -w1 127.0.0.1 > /dev/null"
#include "test_tc_link.skel.h"
+
+#include "netlink_helpers.h"
#include "tc_helpers.h"
void serial_test_tc_links_basic(void)
@@ -1787,6 +1789,65 @@ void serial_test_tc_links_ingress(void)
test_tc_links_ingress(BPF_TCX_INGRESS, false, false);
}
+struct qdisc_req {
+ struct nlmsghdr n;
+ struct tcmsg t;
+ char buf[1024];
+};
+
+static int qdisc_replace(int ifindex, const char *kind, bool block)
+{
+ struct rtnl_handle rth = { .fd = -1 };
+ struct qdisc_req req;
+ int err;
+
+ err = rtnl_open(&rth, 0);
+ if (!ASSERT_OK(err, "open_rtnetlink"))
+ return err;
+
+ memset(&req, 0, sizeof(req));
+ req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg));
+ req.n.nlmsg_flags = NLM_F_CREATE | NLM_F_REPLACE | NLM_F_REQUEST;
+ req.n.nlmsg_type = RTM_NEWQDISC;
+ req.t.tcm_family = AF_UNSPEC;
+ req.t.tcm_ifindex = ifindex;
+ req.t.tcm_parent = 0xfffffff1;
+
+ addattr_l(&req.n, sizeof(req), TCA_KIND, kind, strlen(kind) + 1);
+ if (block)
+ addattr32(&req.n, sizeof(req), TCA_INGRESS_BLOCK, 1);
+
+ err = rtnl_talk(&rth, &req.n, NULL);
+ ASSERT_OK(err, "talk_rtnetlink");
+ rtnl_close(&rth);
+ return err;
+}
+
+void serial_test_tc_links_dev_chain0(void)
+{
+ int err, ifindex;
+
+ ASSERT_OK(system("ip link add dev foo type veth peer name bar"), "add veth");
+ ifindex = if_nametoindex("foo");
+ ASSERT_NEQ(ifindex, 0, "non_zero_ifindex");
+ err = qdisc_replace(ifindex, "ingress", true);
+ if (!ASSERT_OK(err, "attaching ingress"))
+ goto cleanup;
+ ASSERT_OK(system("tc filter add block 1 matchall action skbmod swap mac"), "add block");
+ err = qdisc_replace(ifindex, "clsact", false);
+ if (!ASSERT_OK(err, "attaching clsact"))
+ goto cleanup;
+ /* Heuristic: kern_sync_rcu() alone does not work; a wait-time of ~5s
+ * triggered the issue without the fix reliably 100% of the time.
+ */
+ sleep(5);
+ ASSERT_OK(system("tc filter add dev foo ingress matchall action skbmod swap mac"), "add filter");
+cleanup:
+ ASSERT_OK(system("ip link del dev foo"), "del veth");
+ ASSERT_EQ(if_nametoindex("foo"), 0, "foo removed");
+ ASSERT_EQ(if_nametoindex("bar"), 0, "bar removed");
+}
+
static void test_tc_links_dev_mixed(int target)
{
LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1);
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
index b1073d36d77a..327d51f59142 100644
--- a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
+++ b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
@@ -890,9 +890,6 @@ static void test_udp_dtime(struct test_tc_dtime *skel, int family, bool bpf_fwd)
ASSERT_EQ(dtimes[INGRESS_FWDNS_P100], 0,
dtime_cnt_str(t, INGRESS_FWDNS_P100));
- /* non mono delivery time is not forwarded */
- ASSERT_EQ(dtimes[INGRESS_FWDNS_P101], 0,
- dtime_cnt_str(t, INGRESS_FWDNS_P101));
for (i = EGRESS_FWDNS_P100; i < SET_DTIME; i++)
ASSERT_GT(dtimes[i], 0, dtime_cnt_str(t, i));
diff --git a/tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c b/tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c
index ae93411fd582..09ca13bdf6ca 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c
@@ -11,6 +11,7 @@ static int sanity_run(struct bpf_program *prog)
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
+ .flags = BPF_F_TEST_SKB_CHECKSUM_COMPLETE,
);
prog_fd = bpf_program__fd(prog);
diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c
index 29e183a80f49..bbcf12696a6b 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c
@@ -3,9 +3,12 @@
#include <test_progs.h>
#include <time.h>
+#include <sys/epoll.h>
+
#include "struct_ops_module.skel.h"
#include "struct_ops_nulled_out_cb.skel.h"
#include "struct_ops_forgotten_cb.skel.h"
+#include "struct_ops_detach.skel.h"
static void check_map_info(struct bpf_map_info *info)
{
@@ -242,6 +245,58 @@ cleanup:
struct_ops_forgotten_cb__destroy(skel);
}
+/* Detach a link from a user space program */
+static void test_detach_link(void)
+{
+ struct epoll_event ev, events[2];
+ struct struct_ops_detach *skel;
+ struct bpf_link *link = NULL;
+ int fd, epollfd = -1, nfds;
+ int err;
+
+ skel = struct_ops_detach__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "struct_ops_detach__open_and_load"))
+ return;
+
+ link = bpf_map__attach_struct_ops(skel->maps.testmod_do_detach);
+ if (!ASSERT_OK_PTR(link, "attach_struct_ops"))
+ goto cleanup;
+
+ fd = bpf_link__fd(link);
+ if (!ASSERT_GE(fd, 0, "link_fd"))
+ goto cleanup;
+
+ epollfd = epoll_create1(0);
+ if (!ASSERT_GE(epollfd, 0, "epoll_create1"))
+ goto cleanup;
+
+ ev.events = EPOLLHUP;
+ ev.data.fd = fd;
+ err = epoll_ctl(epollfd, EPOLL_CTL_ADD, fd, &ev);
+ if (!ASSERT_OK(err, "epoll_ctl"))
+ goto cleanup;
+
+ err = bpf_link__detach(link);
+ if (!ASSERT_OK(err, "detach_link"))
+ goto cleanup;
+
+ /* Wait for EPOLLHUP */
+ nfds = epoll_wait(epollfd, events, 2, 500);
+ if (!ASSERT_EQ(nfds, 1, "epoll_wait"))
+ goto cleanup;
+
+ if (!ASSERT_EQ(events[0].data.fd, fd, "epoll_wait_fd"))
+ goto cleanup;
+ if (!ASSERT_TRUE(events[0].events & EPOLLHUP, "events[0].events"))
+ goto cleanup;
+
+cleanup:
+ if (epollfd >= 0)
+ close(epollfd);
+ bpf_link__destroy(link);
+ struct_ops_detach__destroy(skel);
+}
+
void serial_test_struct_ops_module(void)
{
if (test__start_subtest("struct_ops_load"))
@@ -254,5 +309,7 @@ void serial_test_struct_ops_module(void)
test_struct_ops_nulled_out_cb();
if (test__start_subtest("struct_ops_forgotten_cb"))
test_struct_ops_forgotten_cb();
+ if (test__start_subtest("test_detach_link"))
+ test_detach_link();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/timer_lockup.c b/tools/testing/selftests/bpf/prog_tests/timer_lockup.c
new file mode 100644
index 000000000000..871d16cb95cf
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/timer_lockup.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+#include <sched.h>
+#include <test_progs.h>
+#include <pthread.h>
+#include <network_helpers.h>
+
+#include "timer_lockup.skel.h"
+
+static long cpu;
+static int *timer1_err;
+static int *timer2_err;
+static bool skip;
+
+volatile int k = 0;
+
+static void *timer_lockup_thread(void *arg)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1000,
+ );
+ int i, prog_fd = *(int *)arg;
+ cpu_set_t cpuset;
+
+ CPU_ZERO(&cpuset);
+ CPU_SET(__sync_fetch_and_add(&cpu, 1), &cpuset);
+ ASSERT_OK(pthread_setaffinity_np(pthread_self(), sizeof(cpuset),
+ &cpuset),
+ "cpu affinity");
+
+ for (i = 0; !READ_ONCE(*timer1_err) && !READ_ONCE(*timer2_err); i++) {
+ bpf_prog_test_run_opts(prog_fd, &opts);
+ /* Skip the test if we can't reproduce the race in a reasonable
+ * amount of time.
+ */
+ if (i > 50) {
+ WRITE_ONCE(skip, true);
+ break;
+ }
+ }
+
+ return NULL;
+}
+
+void test_timer_lockup(void)
+{
+ int timer1_prog, timer2_prog;
+ struct timer_lockup *skel;
+ pthread_t thrds[2];
+ void *ret;
+
+ skel = timer_lockup__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "timer_lockup__open_and_load"))
+ return;
+
+ timer1_prog = bpf_program__fd(skel->progs.timer1_prog);
+ timer2_prog = bpf_program__fd(skel->progs.timer2_prog);
+
+ timer1_err = &skel->bss->timer1_err;
+ timer2_err = &skel->bss->timer2_err;
+
+ if (!ASSERT_OK(pthread_create(&thrds[0], NULL, timer_lockup_thread,
+ &timer1_prog),
+ "pthread_create thread1"))
+ goto out;
+ if (!ASSERT_OK(pthread_create(&thrds[1], NULL, timer_lockup_thread,
+ &timer2_prog),
+ "pthread_create thread2")) {
+ pthread_exit(&thrds[0]);
+ goto out;
+ }
+
+ pthread_join(thrds[1], &ret);
+ pthread_join(thrds[0], &ret);
+
+ if (skip) {
+ test__skip();
+ goto out;
+ }
+
+ if (*timer1_err != -EDEADLK && *timer1_err != 0)
+ ASSERT_FAIL("timer1_err bad value");
+ if (*timer2_err != -EDEADLK && *timer2_err != 0)
+ ASSERT_FAIL("timer2_err bad value");
+out:
+ timer_lockup__destroy(skel);
+ return;
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/tracing_struct.c b/tools/testing/selftests/bpf/prog_tests/tracing_struct.c
index fe0fb0c9849a..19e68d4b3532 100644
--- a/tools/testing/selftests/bpf/prog_tests/tracing_struct.c
+++ b/tools/testing/selftests/bpf/prog_tests/tracing_struct.c
@@ -3,8 +3,9 @@
#include <test_progs.h>
#include "tracing_struct.skel.h"
+#include "tracing_struct_many_args.skel.h"
-static void test_fentry(void)
+static void test_struct_args(void)
{
struct tracing_struct *skel;
int err;
@@ -55,6 +56,25 @@ static void test_fentry(void)
ASSERT_EQ(skel->bss->t6, 1, "t6 ret");
+destroy_skel:
+ tracing_struct__destroy(skel);
+}
+
+static void test_struct_many_args(void)
+{
+ struct tracing_struct_many_args *skel;
+ int err;
+
+ skel = tracing_struct_many_args__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "tracing_struct_many_args__open_and_load"))
+ return;
+
+ err = tracing_struct_many_args__attach(skel);
+ if (!ASSERT_OK(err, "tracing_struct_many_args__attach"))
+ goto destroy_skel;
+
+ ASSERT_OK(trigger_module_test_read(256), "trigger_read");
+
ASSERT_EQ(skel->bss->t7_a, 16, "t7:a");
ASSERT_EQ(skel->bss->t7_b, 17, "t7:b");
ASSERT_EQ(skel->bss->t7_c, 18, "t7:c");
@@ -74,12 +94,28 @@ static void test_fentry(void)
ASSERT_EQ(skel->bss->t8_g, 23, "t8:g");
ASSERT_EQ(skel->bss->t8_ret, 156, "t8 ret");
- tracing_struct__detach(skel);
+ ASSERT_EQ(skel->bss->t9_a, 16, "t9:a");
+ ASSERT_EQ(skel->bss->t9_b, 17, "t9:b");
+ ASSERT_EQ(skel->bss->t9_c, 18, "t9:c");
+ ASSERT_EQ(skel->bss->t9_d, 19, "t9:d");
+ ASSERT_EQ(skel->bss->t9_e, 20, "t9:e");
+ ASSERT_EQ(skel->bss->t9_f, 21, "t9:f");
+ ASSERT_EQ(skel->bss->t9_g, 22, "t9:f");
+ ASSERT_EQ(skel->bss->t9_h_a, 23, "t9:h.a");
+ ASSERT_EQ(skel->bss->t9_h_b, 24, "t9:h.b");
+ ASSERT_EQ(skel->bss->t9_h_c, 25, "t9:h.c");
+ ASSERT_EQ(skel->bss->t9_h_d, 26, "t9:h.d");
+ ASSERT_EQ(skel->bss->t9_i, 27, "t9:i");
+ ASSERT_EQ(skel->bss->t9_ret, 258, "t9 ret");
+
destroy_skel:
- tracing_struct__destroy(skel);
+ tracing_struct_many_args__destroy(skel);
}
void test_tracing_struct(void)
{
- test_fentry();
+ if (test__start_subtest("struct_args"))
+ test_struct_args();
+ if (test__start_subtest("struct_many_args"))
+ test_struct_many_args();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c b/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c
new file mode 100644
index 000000000000..bd8c75b620c2
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c
@@ -0,0 +1,385 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+
+#ifdef __x86_64__
+
+#include <unistd.h>
+#include <asm/ptrace.h>
+#include <linux/compiler.h>
+#include <linux/stringify.h>
+#include <sys/wait.h>
+#include <sys/syscall.h>
+#include <sys/prctl.h>
+#include <asm/prctl.h>
+#include "uprobe_syscall.skel.h"
+#include "uprobe_syscall_executed.skel.h"
+
+__naked unsigned long uretprobe_regs_trigger(void)
+{
+ asm volatile (
+ "movq $0xdeadbeef, %rax\n"
+ "ret\n"
+ );
+}
+
+__naked void uretprobe_regs(struct pt_regs *before, struct pt_regs *after)
+{
+ asm volatile (
+ "movq %r15, 0(%rdi)\n"
+ "movq %r14, 8(%rdi)\n"
+ "movq %r13, 16(%rdi)\n"
+ "movq %r12, 24(%rdi)\n"
+ "movq %rbp, 32(%rdi)\n"
+ "movq %rbx, 40(%rdi)\n"
+ "movq %r11, 48(%rdi)\n"
+ "movq %r10, 56(%rdi)\n"
+ "movq %r9, 64(%rdi)\n"
+ "movq %r8, 72(%rdi)\n"
+ "movq %rax, 80(%rdi)\n"
+ "movq %rcx, 88(%rdi)\n"
+ "movq %rdx, 96(%rdi)\n"
+ "movq %rsi, 104(%rdi)\n"
+ "movq %rdi, 112(%rdi)\n"
+ "movq $0, 120(%rdi)\n" /* orig_rax */
+ "movq $0, 128(%rdi)\n" /* rip */
+ "movq $0, 136(%rdi)\n" /* cs */
+ "pushf\n"
+ "pop %rax\n"
+ "movq %rax, 144(%rdi)\n" /* eflags */
+ "movq %rsp, 152(%rdi)\n" /* rsp */
+ "movq $0, 160(%rdi)\n" /* ss */
+
+ /* save 2nd argument */
+ "pushq %rsi\n"
+ "call uretprobe_regs_trigger\n"
+
+ /* save return value and load 2nd argument pointer to rax */
+ "pushq %rax\n"
+ "movq 8(%rsp), %rax\n"
+
+ "movq %r15, 0(%rax)\n"
+ "movq %r14, 8(%rax)\n"
+ "movq %r13, 16(%rax)\n"
+ "movq %r12, 24(%rax)\n"
+ "movq %rbp, 32(%rax)\n"
+ "movq %rbx, 40(%rax)\n"
+ "movq %r11, 48(%rax)\n"
+ "movq %r10, 56(%rax)\n"
+ "movq %r9, 64(%rax)\n"
+ "movq %r8, 72(%rax)\n"
+ "movq %rcx, 88(%rax)\n"
+ "movq %rdx, 96(%rax)\n"
+ "movq %rsi, 104(%rax)\n"
+ "movq %rdi, 112(%rax)\n"
+ "movq $0, 120(%rax)\n" /* orig_rax */
+ "movq $0, 128(%rax)\n" /* rip */
+ "movq $0, 136(%rax)\n" /* cs */
+
+ /* restore return value and 2nd argument */
+ "pop %rax\n"
+ "pop %rsi\n"
+
+ "movq %rax, 80(%rsi)\n"
+
+ "pushf\n"
+ "pop %rax\n"
+
+ "movq %rax, 144(%rsi)\n" /* eflags */
+ "movq %rsp, 152(%rsi)\n" /* rsp */
+ "movq $0, 160(%rsi)\n" /* ss */
+ "ret\n"
+);
+}
+
+static void test_uretprobe_regs_equal(void)
+{
+ struct uprobe_syscall *skel = NULL;
+ struct pt_regs before = {}, after = {};
+ unsigned long *pb = (unsigned long *) &before;
+ unsigned long *pa = (unsigned long *) &after;
+ unsigned long *pp;
+ unsigned int i, cnt;
+ int err;
+
+ skel = uprobe_syscall__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_syscall__open_and_load"))
+ goto cleanup;
+
+ err = uprobe_syscall__attach(skel);
+ if (!ASSERT_OK(err, "uprobe_syscall__attach"))
+ goto cleanup;
+
+ uretprobe_regs(&before, &after);
+
+ pp = (unsigned long *) &skel->bss->regs;
+ cnt = sizeof(before)/sizeof(*pb);
+
+ for (i = 0; i < cnt; i++) {
+ unsigned int offset = i * sizeof(unsigned long);
+
+ /*
+ * Check register before and after uretprobe_regs_trigger call
+ * that triggers the uretprobe.
+ */
+ switch (offset) {
+ case offsetof(struct pt_regs, rax):
+ ASSERT_EQ(pa[i], 0xdeadbeef, "return value");
+ break;
+ default:
+ if (!ASSERT_EQ(pb[i], pa[i], "register before-after value check"))
+ fprintf(stdout, "failed register offset %u\n", offset);
+ }
+
+ /*
+ * Check register seen from bpf program and register after
+ * uretprobe_regs_trigger call
+ */
+ switch (offset) {
+ /*
+ * These values will be different (not set in uretprobe_regs),
+ * we don't care.
+ */
+ case offsetof(struct pt_regs, orig_rax):
+ case offsetof(struct pt_regs, rip):
+ case offsetof(struct pt_regs, cs):
+ case offsetof(struct pt_regs, rsp):
+ case offsetof(struct pt_regs, ss):
+ break;
+ default:
+ if (!ASSERT_EQ(pp[i], pa[i], "register prog-after value check"))
+ fprintf(stdout, "failed register offset %u\n", offset);
+ }
+ }
+
+cleanup:
+ uprobe_syscall__destroy(skel);
+}
+
+#define BPF_TESTMOD_UPROBE_TEST_FILE "/sys/kernel/bpf_testmod_uprobe"
+
+static int write_bpf_testmod_uprobe(unsigned long offset)
+{
+ size_t n, ret;
+ char buf[30];
+ int fd;
+
+ n = sprintf(buf, "%lu", offset);
+
+ fd = open(BPF_TESTMOD_UPROBE_TEST_FILE, O_WRONLY);
+ if (fd < 0)
+ return -errno;
+
+ ret = write(fd, buf, n);
+ close(fd);
+ return ret != n ? (int) ret : 0;
+}
+
+static void test_uretprobe_regs_change(void)
+{
+ struct pt_regs before = {}, after = {};
+ unsigned long *pb = (unsigned long *) &before;
+ unsigned long *pa = (unsigned long *) &after;
+ unsigned long cnt = sizeof(before)/sizeof(*pb);
+ unsigned int i, err, offset;
+
+ offset = get_uprobe_offset(uretprobe_regs_trigger);
+
+ err = write_bpf_testmod_uprobe(offset);
+ if (!ASSERT_OK(err, "register_uprobe"))
+ return;
+
+ uretprobe_regs(&before, &after);
+
+ err = write_bpf_testmod_uprobe(0);
+ if (!ASSERT_OK(err, "unregister_uprobe"))
+ return;
+
+ for (i = 0; i < cnt; i++) {
+ unsigned int offset = i * sizeof(unsigned long);
+
+ switch (offset) {
+ case offsetof(struct pt_regs, rax):
+ ASSERT_EQ(pa[i], 0x12345678deadbeef, "rax");
+ break;
+ case offsetof(struct pt_regs, rcx):
+ ASSERT_EQ(pa[i], 0x87654321feebdaed, "rcx");
+ break;
+ case offsetof(struct pt_regs, r11):
+ ASSERT_EQ(pa[i], (__u64) -1, "r11");
+ break;
+ default:
+ if (!ASSERT_EQ(pa[i], pb[i], "register before-after value check"))
+ fprintf(stdout, "failed register offset %u\n", offset);
+ }
+ }
+}
+
+#ifndef __NR_uretprobe
+#define __NR_uretprobe 467
+#endif
+
+__naked unsigned long uretprobe_syscall_call_1(void)
+{
+ /*
+ * Pretend we are uretprobe trampoline to trigger the return
+ * probe invocation in order to verify we get SIGILL.
+ */
+ asm volatile (
+ "pushq %rax\n"
+ "pushq %rcx\n"
+ "pushq %r11\n"
+ "movq $" __stringify(__NR_uretprobe) ", %rax\n"
+ "syscall\n"
+ "popq %r11\n"
+ "popq %rcx\n"
+ "retq\n"
+ );
+}
+
+__naked unsigned long uretprobe_syscall_call(void)
+{
+ asm volatile (
+ "call uretprobe_syscall_call_1\n"
+ "retq\n"
+ );
+}
+
+static void test_uretprobe_syscall_call(void)
+{
+ LIBBPF_OPTS(bpf_uprobe_multi_opts, opts,
+ .retprobe = true,
+ );
+ struct uprobe_syscall_executed *skel;
+ int pid, status, err, go[2], c;
+
+ if (ASSERT_OK(pipe(go), "pipe"))
+ return;
+
+ skel = uprobe_syscall_executed__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_syscall_executed__open_and_load"))
+ goto cleanup;
+
+ pid = fork();
+ if (!ASSERT_GE(pid, 0, "fork"))
+ goto cleanup;
+
+ /* child */
+ if (pid == 0) {
+ close(go[1]);
+
+ /* wait for parent's kick */
+ err = read(go[0], &c, 1);
+ if (err != 1)
+ exit(-1);
+
+ uretprobe_syscall_call();
+ _exit(0);
+ }
+
+ skel->links.test = bpf_program__attach_uprobe_multi(skel->progs.test, pid,
+ "/proc/self/exe",
+ "uretprobe_syscall_call", &opts);
+ if (!ASSERT_OK_PTR(skel->links.test, "bpf_program__attach_uprobe_multi"))
+ goto cleanup;
+
+ /* kick the child */
+ write(go[1], &c, 1);
+ err = waitpid(pid, &status, 0);
+ ASSERT_EQ(err, pid, "waitpid");
+
+ /* verify the child got killed with SIGILL */
+ ASSERT_EQ(WIFSIGNALED(status), 1, "WIFSIGNALED");
+ ASSERT_EQ(WTERMSIG(status), SIGILL, "WTERMSIG");
+
+ /* verify the uretprobe program wasn't called */
+ ASSERT_EQ(skel->bss->executed, 0, "executed");
+
+cleanup:
+ uprobe_syscall_executed__destroy(skel);
+ close(go[1]);
+ close(go[0]);
+}
+
+/*
+ * Borrowed from tools/testing/selftests/x86/test_shadow_stack.c.
+ *
+ * For use in inline enablement of shadow stack.
+ *
+ * The program can't return from the point where shadow stack gets enabled
+ * because there will be no address on the shadow stack. So it can't use
+ * syscall() for enablement, since it is a function.
+ *
+ * Based on code from nolibc.h. Keep a copy here because this can't pull
+ * in all of nolibc.h.
+ */
+#define ARCH_PRCTL(arg1, arg2) \
+({ \
+ long _ret; \
+ register long _num asm("eax") = __NR_arch_prctl; \
+ register long _arg1 asm("rdi") = (long)(arg1); \
+ register long _arg2 asm("rsi") = (long)(arg2); \
+ \
+ asm volatile ( \
+ "syscall\n" \
+ : "=a"(_ret) \
+ : "r"(_arg1), "r"(_arg2), \
+ "0"(_num) \
+ : "rcx", "r11", "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+#ifndef ARCH_SHSTK_ENABLE
+#define ARCH_SHSTK_ENABLE 0x5001
+#define ARCH_SHSTK_DISABLE 0x5002
+#define ARCH_SHSTK_SHSTK (1ULL << 0)
+#endif
+
+static void test_uretprobe_shadow_stack(void)
+{
+ if (ARCH_PRCTL(ARCH_SHSTK_ENABLE, ARCH_SHSTK_SHSTK)) {
+ test__skip();
+ return;
+ }
+
+ /* Run all of the uretprobe tests. */
+ test_uretprobe_regs_equal();
+ test_uretprobe_regs_change();
+ test_uretprobe_syscall_call();
+
+ ARCH_PRCTL(ARCH_SHSTK_DISABLE, ARCH_SHSTK_SHSTK);
+}
+#else
+static void test_uretprobe_regs_equal(void)
+{
+ test__skip();
+}
+
+static void test_uretprobe_regs_change(void)
+{
+ test__skip();
+}
+
+static void test_uretprobe_syscall_call(void)
+{
+ test__skip();
+}
+
+static void test_uretprobe_shadow_stack(void)
+{
+ test__skip();
+}
+#endif
+
+void test_uprobe_syscall(void)
+{
+ if (test__start_subtest("uretprobe_regs_equal"))
+ test_uretprobe_regs_equal();
+ if (test__start_subtest("uretprobe_regs_change"))
+ test_uretprobe_regs_change();
+ if (test__start_subtest("uretprobe_syscall_call"))
+ test_uretprobe_syscall_call();
+ if (test__start_subtest("uretprobe_shadow_stack"))
+ test_uretprobe_shadow_stack();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/uretprobe_stack.c b/tools/testing/selftests/bpf/prog_tests/uretprobe_stack.c
new file mode 100644
index 000000000000..6deb8d560ddd
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/uretprobe_stack.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <test_progs.h>
+#include "uretprobe_stack.skel.h"
+#include "../sdt.h"
+
+/* We set up target_1() -> target_2() -> target_3() -> target_4() -> USDT()
+ * call chain, each being traced by our BPF program. On entry or return from
+ * each target_*() we are capturing user stack trace and recording it in
+ * global variable, so that user space part of the test can validate it.
+ *
+ * Note, we put each target function into a custom section to get those
+ * __start_XXX/__stop_XXX symbols, generated by linker for us, which allow us
+ * to know address range of those functions
+ */
+__attribute__((section("uprobe__target_4")))
+__weak int target_4(void)
+{
+ STAP_PROBE1(uretprobe_stack, target, 42);
+ return 42;
+}
+
+extern const void *__start_uprobe__target_4;
+extern const void *__stop_uprobe__target_4;
+
+__attribute__((section("uprobe__target_3")))
+__weak int target_3(void)
+{
+ return target_4();
+}
+
+extern const void *__start_uprobe__target_3;
+extern const void *__stop_uprobe__target_3;
+
+__attribute__((section("uprobe__target_2")))
+__weak int target_2(void)
+{
+ return target_3();
+}
+
+extern const void *__start_uprobe__target_2;
+extern const void *__stop_uprobe__target_2;
+
+__attribute__((section("uprobe__target_1")))
+__weak int target_1(int depth)
+{
+ if (depth < 1)
+ return 1 + target_1(depth + 1);
+ else
+ return target_2();
+}
+
+extern const void *__start_uprobe__target_1;
+extern const void *__stop_uprobe__target_1;
+
+extern const void *__start_uretprobe_stack_sec;
+extern const void *__stop_uretprobe_stack_sec;
+
+struct range {
+ long start;
+ long stop;
+};
+
+static struct range targets[] = {
+ {}, /* we want target_1 to map to target[1], so need 1-based indexing */
+ { (long)&__start_uprobe__target_1, (long)&__stop_uprobe__target_1 },
+ { (long)&__start_uprobe__target_2, (long)&__stop_uprobe__target_2 },
+ { (long)&__start_uprobe__target_3, (long)&__stop_uprobe__target_3 },
+ { (long)&__start_uprobe__target_4, (long)&__stop_uprobe__target_4 },
+};
+
+static struct range caller = {
+ (long)&__start_uretprobe_stack_sec,
+ (long)&__stop_uretprobe_stack_sec,
+};
+
+static void validate_stack(__u64 *ips, int stack_len, int cnt, ...)
+{
+ int i, j;
+ va_list args;
+
+ if (!ASSERT_GT(stack_len, 0, "stack_len"))
+ return;
+
+ stack_len /= 8;
+
+ /* check if we have enough entries to satisfy test expectations */
+ if (!ASSERT_GE(stack_len, cnt, "stack_len2"))
+ return;
+
+ if (env.verbosity >= VERBOSE_NORMAL) {
+ printf("caller: %#lx - %#lx\n", caller.start, caller.stop);
+ for (i = 1; i < ARRAY_SIZE(targets); i++)
+ printf("target_%d: %#lx - %#lx\n", i, targets[i].start, targets[i].stop);
+ for (i = 0; i < stack_len; i++) {
+ for (j = 1; j < ARRAY_SIZE(targets); j++) {
+ if (ips[i] >= targets[j].start && ips[i] < targets[j].stop)
+ break;
+ }
+ if (j < ARRAY_SIZE(targets)) { /* found target match */
+ printf("ENTRY #%d: %#lx (in target_%d)\n", i, (long)ips[i], j);
+ } else if (ips[i] >= caller.start && ips[i] < caller.stop) {
+ printf("ENTRY #%d: %#lx (in caller)\n", i, (long)ips[i]);
+ } else {
+ printf("ENTRY #%d: %#lx\n", i, (long)ips[i]);
+ }
+ }
+ }
+
+ va_start(args, cnt);
+
+ for (i = cnt - 1; i >= 0; i--) {
+ /* most recent entry is the deepest target function */
+ const struct range *t = va_arg(args, const struct range *);
+
+ ASSERT_GE(ips[i], t->start, "addr_start");
+ ASSERT_LT(ips[i], t->stop, "addr_stop");
+ }
+
+ va_end(args);
+}
+
+/* __weak prevents inlining */
+__attribute__((section("uretprobe_stack_sec")))
+__weak void test_uretprobe_stack(void)
+{
+ LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
+ struct uretprobe_stack *skel;
+ int err;
+
+ skel = uretprobe_stack__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ err = uretprobe_stack__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto cleanup;
+
+ /* trigger */
+ ASSERT_EQ(target_1(0), 42 + 1, "trigger_return");
+
+ /*
+ * Stacks captured on ENTRY uprobes
+ */
+
+ /* (uprobe 1) target_1 in stack trace*/
+ validate_stack(skel->bss->entry_stack1, skel->bss->entry1_len,
+ 2, &caller, &targets[1]);
+ /* (uprobe 1, recursed) */
+ validate_stack(skel->bss->entry_stack1_recur, skel->bss->entry1_recur_len,
+ 3, &caller, &targets[1], &targets[1]);
+ /* (uprobe 2) caller -> target_1 -> target_1 -> target_2 */
+ validate_stack(skel->bss->entry_stack2, skel->bss->entry2_len,
+ 4, &caller, &targets[1], &targets[1], &targets[2]);
+ /* (uprobe 3) */
+ validate_stack(skel->bss->entry_stack3, skel->bss->entry3_len,
+ 5, &caller, &targets[1], &targets[1], &targets[2], &targets[3]);
+ /* (uprobe 4) caller -> target_1 -> target_1 -> target_2 -> target_3 -> target_4 */
+ validate_stack(skel->bss->entry_stack4, skel->bss->entry4_len,
+ 6, &caller, &targets[1], &targets[1], &targets[2], &targets[3], &targets[4]);
+
+ /* (USDT): full caller -> target_1 -> target_1 -> target_2 (uretprobed)
+ * -> target_3 -> target_4 (uretprobes) chain
+ */
+ validate_stack(skel->bss->usdt_stack, skel->bss->usdt_len,
+ 6, &caller, &targets[1], &targets[1], &targets[2], &targets[3], &targets[4]);
+
+ /*
+ * Now stacks captured on the way out in EXIT uprobes
+ */
+
+ /* (uretprobe 4) everything up to target_4, but excluding it */
+ validate_stack(skel->bss->exit_stack4, skel->bss->exit4_len,
+ 5, &caller, &targets[1], &targets[1], &targets[2], &targets[3]);
+ /* we didn't install uretprobes on target_2 and target_3 */
+ /* (uretprobe 1, recur) first target_1 call only */
+ validate_stack(skel->bss->exit_stack1_recur, skel->bss->exit1_recur_len,
+ 2, &caller, &targets[1]);
+ /* (uretprobe 1) just a caller in the stack trace */
+ validate_stack(skel->bss->exit_stack1, skel->bss->exit1_len,
+ 1, &caller);
+
+cleanup:
+ uretprobe_stack__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c
index 98ef39efa77e..9dc3687bc406 100644
--- a/tools/testing/selftests/bpf/prog_tests/verifier.c
+++ b/tools/testing/selftests/bpf/prog_tests/verifier.c
@@ -87,6 +87,7 @@
#include "verifier_xadd.skel.h"
#include "verifier_xdp.skel.h"
#include "verifier_xdp_direct_packet_access.skel.h"
+#include "verifier_bits_iter.skel.h"
#define MAX_ENTRIES 11
@@ -204,6 +205,7 @@ void test_verifier_var_off(void) { RUN(verifier_var_off); }
void test_verifier_xadd(void) { RUN(verifier_xadd); }
void test_verifier_xdp(void) { RUN(verifier_xdp); }
void test_verifier_xdp_direct_packet_access(void) { RUN(verifier_xdp_direct_packet_access); }
+void test_verifier_bits_iter(void) { RUN(verifier_bits_iter); }
static int init_test_val_map(struct bpf_object *obj, char *map_name)
{
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
index f09505f8b038..53d6ad8c2257 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
@@ -222,7 +222,7 @@ static void test_xdp_adjust_frags_tail_grow(void)
prog = bpf_object__next_program(obj, NULL);
if (bpf_object__load(obj))
- return;
+ goto out;
prog_fd = bpf_program__fd(prog);
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_flowtable.c b/tools/testing/selftests/bpf/prog_tests/xdp_flowtable.c
new file mode 100644
index 000000000000..e1bf141d3401
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_flowtable.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <network_helpers.h>
+#include <bpf/btf.h>
+#include <linux/if_link.h>
+#include <linux/udp.h>
+#include <net/if.h>
+#include <unistd.h>
+
+#include "xdp_flowtable.skel.h"
+
+#define TX_NETNS_NAME "ns0"
+#define RX_NETNS_NAME "ns1"
+
+#define TX_NAME "v0"
+#define FORWARD_NAME "v1"
+#define RX_NAME "d0"
+
+#define TX_MAC "00:00:00:00:00:01"
+#define FORWARD_MAC "00:00:00:00:00:02"
+#define RX_MAC "00:00:00:00:00:03"
+#define DST_MAC "00:00:00:00:00:04"
+
+#define TX_ADDR "10.0.0.1"
+#define FORWARD_ADDR "10.0.0.2"
+#define RX_ADDR "20.0.0.1"
+#define DST_ADDR "20.0.0.2"
+
+#define PREFIX_LEN "8"
+#define N_PACKETS 10
+#define UDP_PORT 12345
+#define UDP_PORT_STR "12345"
+
+static int send_udp_traffic(void)
+{
+ struct sockaddr_storage addr;
+ int i, sock;
+
+ if (make_sockaddr(AF_INET, DST_ADDR, UDP_PORT, &addr, NULL))
+ return -EINVAL;
+
+ sock = socket(AF_INET, SOCK_DGRAM, 0);
+ if (sock < 0)
+ return sock;
+
+ for (i = 0; i < N_PACKETS; i++) {
+ unsigned char buf[] = { 0xaa, 0xbb, 0xcc };
+ int n;
+
+ n = sendto(sock, buf, sizeof(buf), MSG_NOSIGNAL | MSG_CONFIRM,
+ (struct sockaddr *)&addr, sizeof(addr));
+ if (n != sizeof(buf)) {
+ close(sock);
+ return -EINVAL;
+ }
+
+ usleep(50000); /* 50ms */
+ }
+ close(sock);
+
+ return 0;
+}
+
+void test_xdp_flowtable(void)
+{
+ struct xdp_flowtable *skel = NULL;
+ struct nstoken *tok = NULL;
+ int iifindex, stats_fd;
+ __u32 value, key = 0;
+ struct bpf_link *link;
+
+ if (SYS_NOFAIL("nft -v")) {
+ fprintf(stdout, "Missing required nft tool\n");
+ test__skip();
+ return;
+ }
+
+ SYS(out, "ip netns add " TX_NETNS_NAME);
+ SYS(out, "ip netns add " RX_NETNS_NAME);
+
+ tok = open_netns(RX_NETNS_NAME);
+ if (!ASSERT_OK_PTR(tok, "setns"))
+ goto out;
+
+ SYS(out, "sysctl -qw net.ipv4.conf.all.forwarding=1");
+
+ SYS(out, "ip link add " TX_NAME " type veth peer " FORWARD_NAME);
+ SYS(out, "ip link set " TX_NAME " netns " TX_NETNS_NAME);
+ SYS(out, "ip link set dev " FORWARD_NAME " address " FORWARD_MAC);
+ SYS(out,
+ "ip addr add " FORWARD_ADDR "/" PREFIX_LEN " dev " FORWARD_NAME);
+ SYS(out, "ip link set dev " FORWARD_NAME " up");
+
+ SYS(out, "ip link add " RX_NAME " type dummy");
+ SYS(out, "ip link set dev " RX_NAME " address " RX_MAC);
+ SYS(out, "ip addr add " RX_ADDR "/" PREFIX_LEN " dev " RX_NAME);
+ SYS(out, "ip link set dev " RX_NAME " up");
+
+ /* configure the flowtable */
+ SYS(out, "nft add table ip filter");
+ SYS(out,
+ "nft add flowtable ip filter f { hook ingress priority 0\\; "
+ "devices = { " FORWARD_NAME ", " RX_NAME " }\\; }");
+ SYS(out,
+ "nft add chain ip filter forward "
+ "{ type filter hook forward priority 0\\; }");
+ SYS(out,
+ "nft add rule ip filter forward ip protocol udp th dport "
+ UDP_PORT_STR " flow add @f");
+
+ /* Avoid ARP calls */
+ SYS(out,
+ "ip -4 neigh add " DST_ADDR " lladdr " DST_MAC " dev " RX_NAME);
+
+ close_netns(tok);
+ tok = open_netns(TX_NETNS_NAME);
+ if (!ASSERT_OK_PTR(tok, "setns"))
+ goto out;
+
+ SYS(out, "ip addr add " TX_ADDR "/" PREFIX_LEN " dev " TX_NAME);
+ SYS(out, "ip link set dev " TX_NAME " address " TX_MAC);
+ SYS(out, "ip link set dev " TX_NAME " up");
+ SYS(out, "ip route add default via " FORWARD_ADDR);
+
+ close_netns(tok);
+ tok = open_netns(RX_NETNS_NAME);
+ if (!ASSERT_OK_PTR(tok, "setns"))
+ goto out;
+
+ iifindex = if_nametoindex(FORWARD_NAME);
+ if (!ASSERT_NEQ(iifindex, 0, "iifindex"))
+ goto out;
+
+ skel = xdp_flowtable__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel"))
+ goto out;
+
+ link = bpf_program__attach_xdp(skel->progs.xdp_flowtable_do_lookup,
+ iifindex);
+ if (!ASSERT_OK_PTR(link, "prog_attach"))
+ goto out;
+
+ close_netns(tok);
+ tok = open_netns(TX_NETNS_NAME);
+ if (!ASSERT_OK_PTR(tok, "setns"))
+ goto out;
+
+ if (!ASSERT_OK(send_udp_traffic(), "send udp"))
+ goto out;
+
+ close_netns(tok);
+ tok = open_netns(RX_NETNS_NAME);
+ if (!ASSERT_OK_PTR(tok, "setns"))
+ goto out;
+
+ stats_fd = bpf_map__fd(skel->maps.stats);
+ if (!ASSERT_OK(bpf_map_lookup_elem(stats_fd, &key, &value),
+ "bpf_map_update_elem stats"))
+ goto out;
+
+ ASSERT_GE(value, N_PACKETS - 2, "bpf_xdp_flow_lookup failed");
+out:
+ xdp_flowtable__destroy(skel);
+ if (tok)
+ close_netns(tok);
+ SYS_NOFAIL("ip netns del " TX_NETNS_NAME);
+ SYS_NOFAIL("ip netns del " RX_NETNS_NAME);
+}
diff --git a/tools/testing/selftests/bpf/progs/arena_atomics.c b/tools/testing/selftests/bpf/progs/arena_atomics.c
index 55f10563208d..bb0acd79d28a 100644
--- a/tools/testing/selftests/bpf/progs/arena_atomics.c
+++ b/tools/testing/selftests/bpf/progs/arena_atomics.c
@@ -25,20 +25,13 @@ bool skip_tests = true;
__u32 pid = 0;
-#undef __arena
-#if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
-#define __arena __attribute__((address_space(1)))
-#else
-#define __arena SEC(".addr_space.1")
-#endif
-
-__u64 __arena add64_value = 1;
-__u64 __arena add64_result = 0;
-__u32 __arena add32_value = 1;
-__u32 __arena add32_result = 0;
-__u64 __arena add_stack_value_copy = 0;
-__u64 __arena add_stack_result = 0;
-__u64 __arena add_noreturn_value = 1;
+__u64 __arena_global add64_value = 1;
+__u64 __arena_global add64_result = 0;
+__u32 __arena_global add32_value = 1;
+__u32 __arena_global add32_result = 0;
+__u64 __arena_global add_stack_value_copy = 0;
+__u64 __arena_global add_stack_result = 0;
+__u64 __arena_global add_noreturn_value = 1;
SEC("raw_tp/sys_enter")
int add(const void *ctx)
@@ -58,13 +51,13 @@ int add(const void *ctx)
return 0;
}
-__s64 __arena sub64_value = 1;
-__s64 __arena sub64_result = 0;
-__s32 __arena sub32_value = 1;
-__s32 __arena sub32_result = 0;
-__s64 __arena sub_stack_value_copy = 0;
-__s64 __arena sub_stack_result = 0;
-__s64 __arena sub_noreturn_value = 1;
+__s64 __arena_global sub64_value = 1;
+__s64 __arena_global sub64_result = 0;
+__s32 __arena_global sub32_value = 1;
+__s32 __arena_global sub32_result = 0;
+__s64 __arena_global sub_stack_value_copy = 0;
+__s64 __arena_global sub_stack_result = 0;
+__s64 __arena_global sub_noreturn_value = 1;
SEC("raw_tp/sys_enter")
int sub(const void *ctx)
@@ -84,8 +77,8 @@ int sub(const void *ctx)
return 0;
}
-__u64 __arena and64_value = (0x110ull << 32);
-__u32 __arena and32_value = 0x110;
+__u64 __arena_global and64_value = (0x110ull << 32);
+__u32 __arena_global and32_value = 0x110;
SEC("raw_tp/sys_enter")
int and(const void *ctx)
@@ -101,8 +94,8 @@ int and(const void *ctx)
return 0;
}
-__u32 __arena or32_value = 0x110;
-__u64 __arena or64_value = (0x110ull << 32);
+__u32 __arena_global or32_value = 0x110;
+__u64 __arena_global or64_value = (0x110ull << 32);
SEC("raw_tp/sys_enter")
int or(const void *ctx)
@@ -117,8 +110,8 @@ int or(const void *ctx)
return 0;
}
-__u64 __arena xor64_value = (0x110ull << 32);
-__u32 __arena xor32_value = 0x110;
+__u64 __arena_global xor64_value = (0x110ull << 32);
+__u32 __arena_global xor32_value = 0x110;
SEC("raw_tp/sys_enter")
int xor(const void *ctx)
@@ -133,12 +126,12 @@ int xor(const void *ctx)
return 0;
}
-__u32 __arena cmpxchg32_value = 1;
-__u32 __arena cmpxchg32_result_fail = 0;
-__u32 __arena cmpxchg32_result_succeed = 0;
-__u64 __arena cmpxchg64_value = 1;
-__u64 __arena cmpxchg64_result_fail = 0;
-__u64 __arena cmpxchg64_result_succeed = 0;
+__u32 __arena_global cmpxchg32_value = 1;
+__u32 __arena_global cmpxchg32_result_fail = 0;
+__u32 __arena_global cmpxchg32_result_succeed = 0;
+__u64 __arena_global cmpxchg64_value = 1;
+__u64 __arena_global cmpxchg64_result_fail = 0;
+__u64 __arena_global cmpxchg64_result_succeed = 0;
SEC("raw_tp/sys_enter")
int cmpxchg(const void *ctx)
@@ -156,10 +149,10 @@ int cmpxchg(const void *ctx)
return 0;
}
-__u64 __arena xchg64_value = 1;
-__u64 __arena xchg64_result = 0;
-__u32 __arena xchg32_value = 1;
-__u32 __arena xchg32_result = 0;
+__u64 __arena_global xchg64_value = 1;
+__u64 __arena_global xchg64_result = 0;
+__u32 __arena_global xchg32_value = 1;
+__u32 __arena_global xchg32_result = 0;
SEC("raw_tp/sys_enter")
int xchg(const void *ctx)
@@ -176,3 +169,79 @@ int xchg(const void *ctx)
return 0;
}
+
+__u64 __arena_global uaf_sink;
+volatile __u64 __arena_global uaf_recovery_fails;
+
+SEC("syscall")
+int uaf(const void *ctx)
+{
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+#if defined(ENABLE_ATOMICS_TESTS) && !defined(__TARGET_ARCH_arm64) && \
+ !defined(__TARGET_ARCH_x86)
+ __u32 __arena *page32;
+ __u64 __arena *page64;
+ void __arena *page;
+
+ page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
+ bpf_arena_free_pages(&arena, page, 1);
+ uaf_recovery_fails = 24;
+
+ page32 = (__u32 __arena *)page;
+ uaf_sink += __sync_fetch_and_add(page32, 1);
+ uaf_recovery_fails -= 1;
+ __sync_add_and_fetch(page32, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_fetch_and_sub(page32, 1);
+ uaf_recovery_fails -= 1;
+ __sync_sub_and_fetch(page32, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_fetch_and_and(page32, 1);
+ uaf_recovery_fails -= 1;
+ __sync_and_and_fetch(page32, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_fetch_and_or(page32, 1);
+ uaf_recovery_fails -= 1;
+ __sync_or_and_fetch(page32, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_fetch_and_xor(page32, 1);
+ uaf_recovery_fails -= 1;
+ __sync_xor_and_fetch(page32, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_val_compare_and_swap(page32, 0, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_lock_test_and_set(page32, 1);
+ uaf_recovery_fails -= 1;
+
+ page64 = (__u64 __arena *)page;
+ uaf_sink += __sync_fetch_and_add(page64, 1);
+ uaf_recovery_fails -= 1;
+ __sync_add_and_fetch(page64, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_fetch_and_sub(page64, 1);
+ uaf_recovery_fails -= 1;
+ __sync_sub_and_fetch(page64, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_fetch_and_and(page64, 1);
+ uaf_recovery_fails -= 1;
+ __sync_and_and_fetch(page64, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_fetch_and_or(page64, 1);
+ uaf_recovery_fails -= 1;
+ __sync_or_and_fetch(page64, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_fetch_and_xor(page64, 1);
+ uaf_recovery_fails -= 1;
+ __sync_xor_and_fetch(page64, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_val_compare_and_swap(page64, 0, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_lock_test_and_set(page64, 1);
+ uaf_recovery_fails -= 1;
+#endif
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/arena_htab.c b/tools/testing/selftests/bpf/progs/arena_htab.c
index 1e6ac187a6a0..81eaa94afeb0 100644
--- a/tools/testing/selftests/bpf/progs/arena_htab.c
+++ b/tools/testing/selftests/bpf/progs/arena_htab.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#define BPF_NO_KFUNC_PROTOTYPES
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
@@ -18,25 +19,35 @@ void __arena *htab_for_user;
bool skip = false;
int zero = 0;
+char __arena arr1[100000];
+char arr2[1000];
SEC("syscall")
int arena_htab_llvm(void *ctx)
{
#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) || defined(BPF_ARENA_FORCE_ASM)
struct htab __arena *htab;
+ char __arena *arr = arr1;
__u64 i;
htab = bpf_alloc(sizeof(*htab));
cast_kern(htab);
htab_init(htab);
+ cast_kern(arr);
+
/* first run. No old elems in the table */
- for (i = zero; i < 1000; i++)
+ for (i = zero; i < 100000 && can_loop; i++) {
htab_update_elem(htab, i, i);
+ arr[i] = i;
+ }
- /* should replace all elems with new ones */
- for (i = zero; i < 1000; i++)
+ /* should replace some elems with new ones */
+ for (i = zero; i < 1000 && can_loop; i++) {
htab_update_elem(htab, i, i);
+ /* Access mem to make the verifier use bounded loop logic */
+ arr2[i] = i;
+ }
cast_user(htab);
htab_for_user = htab;
#else
diff --git a/tools/testing/selftests/bpf/progs/arena_list.c b/tools/testing/selftests/bpf/progs/arena_list.c
index 93bd0600eba0..3a2ddcacbea6 100644
--- a/tools/testing/selftests/bpf/progs/arena_list.c
+++ b/tools/testing/selftests/bpf/progs/arena_list.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#define BPF_NO_KFUNC_PROTOTYPES
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
diff --git a/tools/testing/selftests/bpf/progs/bpf_dctcp.c b/tools/testing/selftests/bpf/progs/bpf_dctcp.c
index 3c9ffe340312..02f552e7fd4d 100644
--- a/tools/testing/selftests/bpf/progs/bpf_dctcp.c
+++ b/tools/testing/selftests/bpf/progs/bpf_dctcp.c
@@ -65,7 +65,7 @@ static void dctcp_reset(const struct tcp_sock *tp, struct bpf_dctcp *ca)
}
SEC("struct_ops")
-void BPF_PROG(dctcp_init, struct sock *sk)
+void BPF_PROG(bpf_dctcp_init, struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct bpf_dctcp *ca = inet_csk_ca(sk);
@@ -77,7 +77,7 @@ void BPF_PROG(dctcp_init, struct sock *sk)
(void *)fallback, sizeof(fallback)) == -EBUSY)
ebusy_cnt++;
- /* Switch back to myself and the recurred dctcp_init()
+ /* Switch back to myself and the recurred bpf_dctcp_init()
* will get -EBUSY for all bpf_setsockopt(TCP_CONGESTION),
* except the last "cdg" one.
*/
@@ -112,7 +112,7 @@ void BPF_PROG(dctcp_init, struct sock *sk)
}
SEC("struct_ops")
-__u32 BPF_PROG(dctcp_ssthresh, struct sock *sk)
+__u32 BPF_PROG(bpf_dctcp_ssthresh, struct sock *sk)
{
struct bpf_dctcp *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk);
@@ -122,7 +122,7 @@ __u32 BPF_PROG(dctcp_ssthresh, struct sock *sk)
}
SEC("struct_ops")
-void BPF_PROG(dctcp_update_alpha, struct sock *sk, __u32 flags)
+void BPF_PROG(bpf_dctcp_update_alpha, struct sock *sk, __u32 flags)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct bpf_dctcp *ca = inet_csk_ca(sk);
@@ -161,12 +161,12 @@ static void dctcp_react_to_loss(struct sock *sk)
}
SEC("struct_ops")
-void BPF_PROG(dctcp_state, struct sock *sk, __u8 new_state)
+void BPF_PROG(bpf_dctcp_state, struct sock *sk, __u8 new_state)
{
if (new_state == TCP_CA_Recovery &&
new_state != BPF_CORE_READ_BITFIELD(inet_csk(sk), icsk_ca_state))
dctcp_react_to_loss(sk);
- /* We handle RTO in dctcp_cwnd_event to ensure that we perform only
+ /* We handle RTO in bpf_dctcp_cwnd_event to ensure that we perform only
* one loss-adjustment per RTT.
*/
}
@@ -208,7 +208,7 @@ static void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt,
}
SEC("struct_ops")
-void BPF_PROG(dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev)
+void BPF_PROG(bpf_dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev)
{
struct bpf_dctcp *ca = inet_csk_ca(sk);
@@ -227,7 +227,7 @@ void BPF_PROG(dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev)
}
SEC("struct_ops")
-__u32 BPF_PROG(dctcp_cwnd_undo, struct sock *sk)
+__u32 BPF_PROG(bpf_dctcp_cwnd_undo, struct sock *sk)
{
const struct bpf_dctcp *ca = inet_csk_ca(sk);
@@ -237,28 +237,28 @@ __u32 BPF_PROG(dctcp_cwnd_undo, struct sock *sk)
extern void tcp_reno_cong_avoid(struct sock *sk, __u32 ack, __u32 acked) __ksym;
SEC("struct_ops")
-void BPF_PROG(dctcp_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
+void BPF_PROG(bpf_dctcp_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
{
tcp_reno_cong_avoid(sk, ack, acked);
}
SEC(".struct_ops")
struct tcp_congestion_ops dctcp_nouse = {
- .init = (void *)dctcp_init,
- .set_state = (void *)dctcp_state,
+ .init = (void *)bpf_dctcp_init,
+ .set_state = (void *)bpf_dctcp_state,
.flags = TCP_CONG_NEEDS_ECN,
.name = "bpf_dctcp_nouse",
};
SEC(".struct_ops")
struct tcp_congestion_ops dctcp = {
- .init = (void *)dctcp_init,
- .in_ack_event = (void *)dctcp_update_alpha,
- .cwnd_event = (void *)dctcp_cwnd_event,
- .ssthresh = (void *)dctcp_ssthresh,
- .cong_avoid = (void *)dctcp_cong_avoid,
- .undo_cwnd = (void *)dctcp_cwnd_undo,
- .set_state = (void *)dctcp_state,
+ .init = (void *)bpf_dctcp_init,
+ .in_ack_event = (void *)bpf_dctcp_update_alpha,
+ .cwnd_event = (void *)bpf_dctcp_cwnd_event,
+ .ssthresh = (void *)bpf_dctcp_ssthresh,
+ .cong_avoid = (void *)bpf_dctcp_cong_avoid,
+ .undo_cwnd = (void *)bpf_dctcp_cwnd_undo,
+ .set_state = (void *)bpf_dctcp_state,
.flags = TCP_CONG_NEEDS_ECN,
.name = "bpf_dctcp",
};
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c
index c5969ca6f26b..564835ba7d51 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c
@@ -6,12 +6,6 @@
char _license[] SEC("license") = "GPL";
-struct key_t {
- int a;
- int b;
- int c;
-};
-
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 3);
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c
index 85fa710fad90..9f0e0705b2bf 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c
@@ -6,12 +6,6 @@
char _license[] SEC("license") = "GPL";
-struct key_t {
- int a;
- int b;
- int c;
-};
-
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, 3);
diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h
index fb2f5513e29e..81097a3f15eb 100644
--- a/tools/testing/selftests/bpf/progs/bpf_misc.h
+++ b/tools/testing/selftests/bpf/progs/bpf_misc.h
@@ -7,9 +7,9 @@
*
* The test_loader sequentially loads each program in a skeleton.
* Programs could be loaded in privileged and unprivileged modes.
- * - __success, __failure, __msg imply privileged mode;
- * - __success_unpriv, __failure_unpriv, __msg_unpriv imply
- * unprivileged mode.
+ * - __success, __failure, __msg, __regex imply privileged mode;
+ * - __success_unpriv, __failure_unpriv, __msg_unpriv, __regex_unpriv
+ * imply unprivileged mode.
* If combination of privileged and unprivileged attributes is present
* both modes are used. If none are present privileged mode is implied.
*
@@ -24,6 +24,9 @@
* Multiple __msg attributes could be specified.
* __msg_unpriv Same as __msg but for unprivileged mode.
*
+ * __regex Same as __msg, but using a regular expression.
+ * __regex_unpriv Same as __msg_unpriv but using a regular expression.
+ *
* __success Expect program load success in privileged mode.
* __success_unpriv Expect program load success in unprivileged mode.
*
@@ -59,10 +62,12 @@
* __auxiliary_unpriv Same, but load program in unprivileged mode.
*/
#define __msg(msg) __attribute__((btf_decl_tag("comment:test_expect_msg=" msg)))
+#define __regex(regex) __attribute__((btf_decl_tag("comment:test_expect_regex=" regex)))
#define __failure __attribute__((btf_decl_tag("comment:test_expect_failure")))
#define __success __attribute__((btf_decl_tag("comment:test_expect_success")))
#define __description(desc) __attribute__((btf_decl_tag("comment:test_description=" desc)))
#define __msg_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_msg_unpriv=" msg)))
+#define __regex_unpriv(regex) __attribute__((btf_decl_tag("comment:test_expect_regex_unpriv=" regex)))
#define __failure_unpriv __attribute__((btf_decl_tag("comment:test_expect_failure_unpriv")))
#define __success_unpriv __attribute__((btf_decl_tag("comment:test_expect_success_unpriv")))
#define __log_level(lvl) __attribute__((btf_decl_tag("comment:test_log_level="#lvl)))
@@ -135,4 +140,8 @@
/* make it look to compiler like value is read and written */
#define __sink(expr) asm volatile("" : "+g"(expr))
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
#endif
diff --git a/tools/testing/selftests/bpf/progs/cpumask_success.c b/tools/testing/selftests/bpf/progs/cpumask_success.c
index 7a1e64c6c065..fd8106831c32 100644
--- a/tools/testing/selftests/bpf/progs/cpumask_success.c
+++ b/tools/testing/selftests/bpf/progs/cpumask_success.c
@@ -12,6 +12,31 @@ char _license[] SEC("license") = "GPL";
int pid, nr_cpus;
+struct kptr_nested {
+ struct bpf_cpumask __kptr * mask;
+};
+
+struct kptr_nested_pair {
+ struct bpf_cpumask __kptr * mask_1;
+ struct bpf_cpumask __kptr * mask_2;
+};
+
+struct kptr_nested_mid {
+ int dummy;
+ struct kptr_nested m;
+};
+
+struct kptr_nested_deep {
+ struct kptr_nested_mid ptrs[2];
+ struct kptr_nested_pair ptr_pairs[3];
+};
+
+private(MASK) static struct bpf_cpumask __kptr * global_mask_array[2];
+private(MASK) static struct bpf_cpumask __kptr * global_mask_array_l2[2][1];
+private(MASK) static struct bpf_cpumask __kptr * global_mask_array_one[1];
+private(MASK) static struct kptr_nested global_mask_nested[2];
+private(MASK_DEEP) static struct kptr_nested_deep global_mask_nested_deep;
+
static bool is_test_task(void)
{
int cur_pid = bpf_get_current_pid_tgid() >> 32;
@@ -461,6 +486,152 @@ int BPF_PROG(test_global_mask_rcu, struct task_struct *task, u64 clone_flags)
}
SEC("tp_btf/task_newtask")
+int BPF_PROG(test_global_mask_array_one_rcu, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *local, *prev;
+
+ if (!is_test_task())
+ return 0;
+
+ /* Kptr arrays with one element are special cased, being treated
+ * just like a single pointer.
+ */
+
+ local = create_cpumask();
+ if (!local)
+ return 0;
+
+ prev = bpf_kptr_xchg(&global_mask_array_one[0], local);
+ if (prev) {
+ bpf_cpumask_release(prev);
+ err = 3;
+ return 0;
+ }
+
+ bpf_rcu_read_lock();
+ local = global_mask_array_one[0];
+ if (!local) {
+ err = 4;
+ bpf_rcu_read_unlock();
+ return 0;
+ }
+
+ bpf_rcu_read_unlock();
+
+ return 0;
+}
+
+static int _global_mask_array_rcu(struct bpf_cpumask **mask0,
+ struct bpf_cpumask **mask1)
+{
+ struct bpf_cpumask *local;
+
+ if (!is_test_task())
+ return 0;
+
+ /* Check if two kptrs in the array work and independently */
+
+ local = create_cpumask();
+ if (!local)
+ return 0;
+
+ bpf_rcu_read_lock();
+
+ local = bpf_kptr_xchg(mask0, local);
+ if (local) {
+ err = 1;
+ goto err_exit;
+ }
+
+ /* [<mask 0>, NULL] */
+ if (!*mask0 || *mask1) {
+ err = 2;
+ goto err_exit;
+ }
+
+ local = create_cpumask();
+ if (!local) {
+ err = 9;
+ goto err_exit;
+ }
+
+ local = bpf_kptr_xchg(mask1, local);
+ if (local) {
+ err = 10;
+ goto err_exit;
+ }
+
+ /* [<mask 0>, <mask 1>] */
+ if (!*mask0 || !*mask1 || *mask0 == *mask1) {
+ err = 11;
+ goto err_exit;
+ }
+
+err_exit:
+ if (local)
+ bpf_cpumask_release(local);
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_global_mask_array_rcu, struct task_struct *task, u64 clone_flags)
+{
+ return _global_mask_array_rcu(&global_mask_array[0], &global_mask_array[1]);
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_global_mask_array_l2_rcu, struct task_struct *task, u64 clone_flags)
+{
+ return _global_mask_array_rcu(&global_mask_array_l2[0][0], &global_mask_array_l2[1][0]);
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_global_mask_nested_rcu, struct task_struct *task, u64 clone_flags)
+{
+ return _global_mask_array_rcu(&global_mask_nested[0].mask, &global_mask_nested[1].mask);
+}
+
+/* Ensure that the field->offset has been correctly advanced from one
+ * nested struct or array sub-tree to another. In the case of
+ * kptr_nested_deep, it comprises two sub-trees: ktpr_1 and kptr_2. By
+ * calling bpf_kptr_xchg() on every single kptr in both nested sub-trees,
+ * the verifier should reject the program if the field->offset of any kptr
+ * is incorrect.
+ *
+ * For instance, if we have 10 kptrs in a nested struct and a program that
+ * accesses each kptr individually with bpf_kptr_xchg(), the compiler
+ * should emit instructions to access 10 different offsets if it works
+ * correctly. If the field->offset values of any pair of them are
+ * incorrectly the same, the number of unique offsets in btf_record for
+ * this nested struct should be less than 10. The verifier should fail to
+ * discover some of the offsets emitted by the compiler.
+ *
+ * Even if the field->offset values of kptrs are not duplicated, the
+ * verifier should fail to find a btf_field for the instruction accessing a
+ * kptr if the corresponding field->offset is pointing to a random
+ * incorrect offset.
+ */
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_global_mask_nested_deep_rcu, struct task_struct *task, u64 clone_flags)
+{
+ int r, i;
+
+ r = _global_mask_array_rcu(&global_mask_nested_deep.ptrs[0].m.mask,
+ &global_mask_nested_deep.ptrs[1].m.mask);
+ if (r)
+ return r;
+
+ for (i = 0; i < 3; i++) {
+ r = _global_mask_array_rcu(&global_mask_nested_deep.ptr_pairs[i].mask_1,
+ &global_mask_nested_deep.ptr_pairs[i].mask_2);
+ if (r)
+ return r;
+ }
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
int BPF_PROG(test_cpumask_weight, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *local;
diff --git a/tools/testing/selftests/bpf/progs/crypto_bench.c b/tools/testing/selftests/bpf/progs/crypto_bench.c
index e61fe0882293..4ac956b26240 100644
--- a/tools/testing/selftests/bpf/progs/crypto_bench.c
+++ b/tools/testing/selftests/bpf/progs/crypto_bench.c
@@ -57,7 +57,7 @@ int crypto_encrypt(struct __sk_buff *skb)
{
struct __crypto_ctx_value *v;
struct bpf_crypto_ctx *ctx;
- struct bpf_dynptr psrc, pdst, iv;
+ struct bpf_dynptr psrc, pdst;
v = crypto_ctx_value_lookup();
if (!v) {
@@ -73,9 +73,8 @@ int crypto_encrypt(struct __sk_buff *skb)
bpf_dynptr_from_skb(skb, 0, &psrc);
bpf_dynptr_from_mem(dst, len, 0, &pdst);
- bpf_dynptr_from_mem(dst, 0, 0, &iv);
- status = bpf_crypto_encrypt(ctx, &psrc, &pdst, &iv);
+ status = bpf_crypto_encrypt(ctx, &psrc, &pdst, NULL);
__sync_add_and_fetch(&hits, 1);
return 0;
@@ -84,7 +83,7 @@ int crypto_encrypt(struct __sk_buff *skb)
SEC("tc")
int crypto_decrypt(struct __sk_buff *skb)
{
- struct bpf_dynptr psrc, pdst, iv;
+ struct bpf_dynptr psrc, pdst;
struct __crypto_ctx_value *v;
struct bpf_crypto_ctx *ctx;
@@ -98,9 +97,8 @@ int crypto_decrypt(struct __sk_buff *skb)
bpf_dynptr_from_skb(skb, 0, &psrc);
bpf_dynptr_from_mem(dst, len, 0, &pdst);
- bpf_dynptr_from_mem(dst, 0, 0, &iv);
- status = bpf_crypto_decrypt(ctx, &psrc, &pdst, &iv);
+ status = bpf_crypto_decrypt(ctx, &psrc, &pdst, NULL);
__sync_add_and_fetch(&hits, 1);
return 0;
diff --git a/tools/testing/selftests/bpf/progs/crypto_sanity.c b/tools/testing/selftests/bpf/progs/crypto_sanity.c
index 1be0a3fa5efd..645be6cddf36 100644
--- a/tools/testing/selftests/bpf/progs/crypto_sanity.c
+++ b/tools/testing/selftests/bpf/progs/crypto_sanity.c
@@ -89,7 +89,7 @@ int decrypt_sanity(struct __sk_buff *skb)
{
struct __crypto_ctx_value *v;
struct bpf_crypto_ctx *ctx;
- struct bpf_dynptr psrc, pdst, iv;
+ struct bpf_dynptr psrc, pdst;
int err;
err = skb_dynptr_validate(skb, &psrc);
@@ -114,12 +114,8 @@ int decrypt_sanity(struct __sk_buff *skb)
* production code, a percpu map should be used to store the result.
*/
bpf_dynptr_from_mem(dst, sizeof(dst), 0, &pdst);
- /* iv dynptr has to be initialized with 0 size, but proper memory region
- * has to be provided anyway
- */
- bpf_dynptr_from_mem(dst, 0, 0, &iv);
- status = bpf_crypto_decrypt(ctx, &psrc, &pdst, &iv);
+ status = bpf_crypto_decrypt(ctx, &psrc, &pdst, NULL);
return TC_ACT_SHOT;
}
@@ -129,7 +125,7 @@ int encrypt_sanity(struct __sk_buff *skb)
{
struct __crypto_ctx_value *v;
struct bpf_crypto_ctx *ctx;
- struct bpf_dynptr psrc, pdst, iv;
+ struct bpf_dynptr psrc, pdst;
int err;
status = 0;
@@ -156,12 +152,8 @@ int encrypt_sanity(struct __sk_buff *skb)
* production code, a percpu map should be used to store the result.
*/
bpf_dynptr_from_mem(dst, sizeof(dst), 0, &pdst);
- /* iv dynptr has to be initialized with 0 size, but proper memory region
- * has to be provided anyway
- */
- bpf_dynptr_from_mem(dst, 0, 0, &iv);
- status = bpf_crypto_encrypt(ctx, &psrc, &pdst, &iv);
+ status = bpf_crypto_encrypt(ctx, &psrc, &pdst, NULL);
return TC_ACT_SHOT;
}
diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c
index 66a60bfb5867..e35bc1eac52a 100644
--- a/tools/testing/selftests/bpf/progs/dynptr_fail.c
+++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c
@@ -964,7 +964,7 @@ int dynptr_invalidate_slice_reinit(void *ctx)
* mem_or_null pointers.
*/
SEC("?raw_tp")
-__failure __msg("R1 type=scalar expected=percpu_ptr_")
+__failure __regex("R[0-9]+ type=scalar expected=percpu_ptr_")
int dynptr_invalidate_slice_or_null(void *ctx)
{
struct bpf_dynptr ptr;
@@ -982,7 +982,7 @@ int dynptr_invalidate_slice_or_null(void *ctx)
/* Destruction of dynptr should also any slices obtained from it */
SEC("?raw_tp")
-__failure __msg("R7 invalid mem access 'scalar'")
+__failure __regex("R[0-9]+ invalid mem access 'scalar'")
int dynptr_invalidate_slice_failure(void *ctx)
{
struct bpf_dynptr ptr1;
@@ -1069,7 +1069,7 @@ int dynptr_read_into_slot(void *ctx)
/* bpf_dynptr_slice()s are read-only and cannot be written to */
SEC("?tc")
-__failure __msg("R0 cannot write into rdonly_mem")
+__failure __regex("R[0-9]+ cannot write into rdonly_mem")
int skb_invalid_slice_write(struct __sk_buff *skb)
{
struct bpf_dynptr ptr;
@@ -1686,3 +1686,27 @@ int test_dynptr_skb_small_buff(struct __sk_buff *skb)
return !!data;
}
+
+__noinline long global_call_bpf_dynptr(const struct bpf_dynptr *dynptr)
+{
+ long ret = 0;
+ /* Avoid leaving this global function empty to avoid having the compiler
+ * optimize away the call to this global function.
+ */
+ __sink(ret);
+ return ret;
+}
+
+SEC("?raw_tp")
+__failure __msg("arg#1 expected pointer to stack or const struct bpf_dynptr")
+int test_dynptr_reg_type(void *ctx)
+{
+ struct task_struct *current = NULL;
+ /* R1 should be holding a PTR_TO_BTF_ID, so this shouldn't be a
+ * reg->type that can be passed to a function accepting a
+ * ARG_PTR_TO_DYNPTR | MEM_RDONLY. process_dynptr_func() should catch
+ * this.
+ */
+ global_call_bpf_dynptr((const struct bpf_dynptr *)current);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/get_func_ip_test.c b/tools/testing/selftests/bpf/progs/get_func_ip_test.c
index 8956eb78a226..2011cacdeb18 100644
--- a/tools/testing/selftests/bpf/progs/get_func_ip_test.c
+++ b/tools/testing/selftests/bpf/progs/get_func_ip_test.c
@@ -5,13 +5,12 @@
char _license[] SEC("license") = "GPL";
-extern const void bpf_fentry_test1 __ksym;
+extern int bpf_fentry_test1(int a) __ksym;
+extern int bpf_modify_return_test(int a, int *b) __ksym;
+
extern const void bpf_fentry_test2 __ksym;
extern const void bpf_fentry_test3 __ksym;
extern const void bpf_fentry_test4 __ksym;
-extern const void bpf_modify_return_test __ksym;
-extern const void bpf_fentry_test6 __ksym;
-extern const void bpf_fentry_test7 __ksym;
extern bool CONFIG_X86_KERNEL_IBT __kconfig __weak;
diff --git a/tools/testing/selftests/bpf/progs/ip_check_defrag.c b/tools/testing/selftests/bpf/progs/ip_check_defrag.c
index 1c2b6c1616b0..645b2c9f7867 100644
--- a/tools/testing/selftests/bpf/progs/ip_check_defrag.c
+++ b/tools/testing/selftests/bpf/progs/ip_check_defrag.c
@@ -12,7 +12,7 @@
#define IP_OFFSET 0x1FFF
#define NEXTHDR_FRAGMENT 44
-extern int bpf_dynptr_from_skb(struct sk_buff *skb, __u64 flags,
+extern int bpf_dynptr_from_skb(struct __sk_buff *skb, __u64 flags,
struct bpf_dynptr *ptr__uninit) __ksym;
extern void *bpf_dynptr_slice(const struct bpf_dynptr *ptr, uint32_t offset,
void *buffer, uint32_t buffer__sz) __ksym;
@@ -42,7 +42,7 @@ static bool is_frag_v6(struct ipv6hdr *ip6h)
return ip6h->nexthdr == NEXTHDR_FRAGMENT;
}
-static int handle_v4(struct sk_buff *skb)
+static int handle_v4(struct __sk_buff *skb)
{
struct bpf_dynptr ptr;
u8 iph_buf[20] = {};
@@ -64,7 +64,7 @@ static int handle_v4(struct sk_buff *skb)
return NF_ACCEPT;
}
-static int handle_v6(struct sk_buff *skb)
+static int handle_v6(struct __sk_buff *skb)
{
struct bpf_dynptr ptr;
struct ipv6hdr *ip6h;
@@ -89,9 +89,9 @@ static int handle_v6(struct sk_buff *skb)
SEC("netfilter")
int defrag(struct bpf_nf_ctx *ctx)
{
- struct sk_buff *skb = ctx->skb;
+ struct __sk_buff *skb = (struct __sk_buff *)ctx->skb;
- switch (bpf_ntohs(skb->protocol)) {
+ switch (bpf_ntohs(ctx->skb->protocol)) {
case ETH_P_IP:
return handle_v4(skb);
case ETH_P_IPV6:
diff --git a/tools/testing/selftests/bpf/progs/iters.c b/tools/testing/selftests/bpf/progs/iters.c
index fe65e0952a1e..16bdc3e25591 100644
--- a/tools/testing/selftests/bpf/progs/iters.c
+++ b/tools/testing/selftests/bpf/progs/iters.c
@@ -7,8 +7,6 @@
#include "bpf_misc.h"
#include "bpf_compiler.h"
-#define ARRAY_SIZE(x) (int)(sizeof(x) / sizeof((x)[0]))
-
static volatile int zero = 0;
int my_pid;
diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test.c b/tools/testing/selftests/bpf/progs/kfunc_call_test.c
index cf68d1e48a0f..f502f755f567 100644
--- a/tools/testing/selftests/bpf/progs/kfunc_call_test.c
+++ b/tools/testing/selftests/bpf/progs/kfunc_call_test.c
@@ -177,4 +177,41 @@ int kfunc_call_test_static_unused_arg(struct __sk_buff *skb)
return actual != expected ? -1 : 0;
}
+struct ctx_val {
+ struct bpf_testmod_ctx __kptr *ctx;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct ctx_val);
+} ctx_map SEC(".maps");
+
+SEC("tc")
+int kfunc_call_ctx(struct __sk_buff *skb)
+{
+ struct bpf_testmod_ctx *ctx;
+ int err = 0;
+
+ ctx = bpf_testmod_ctx_create(&err);
+ if (!ctx && !err)
+ err = -1;
+ if (ctx) {
+ int key = 0;
+ struct ctx_val *ctx_val = bpf_map_lookup_elem(&ctx_map, &key);
+
+ /* Transfer ctx to map to be freed via implicit dtor call
+ * on cleanup.
+ */
+ if (ctx_val)
+ ctx = bpf_kptr_xchg(&ctx_val->ctx, ctx);
+ if (ctx) {
+ bpf_testmod_ctx_release(ctx);
+ err = -1;
+ }
+ }
+ return err;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/kprobe_multi_session.c b/tools/testing/selftests/bpf/progs/kprobe_multi_session.c
index bbba9eb46551..bd8b7fb7061e 100644
--- a/tools/testing/selftests/bpf/progs/kprobe_multi_session.c
+++ b/tools/testing/selftests/bpf/progs/kprobe_multi_session.c
@@ -4,8 +4,7 @@
#include <bpf/bpf_tracing.h>
#include <stdbool.h>
#include "bpf_kfuncs.h"
-
-#define ARRAY_SIZE(x) (int)(sizeof(x) / sizeof((x)[0]))
+#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/kprobe_multi_session_cookie.c b/tools/testing/selftests/bpf/progs/kprobe_multi_session_cookie.c
index d49070803e22..0835b5edf685 100644
--- a/tools/testing/selftests/bpf/progs/kprobe_multi_session_cookie.c
+++ b/tools/testing/selftests/bpf/progs/kprobe_multi_session_cookie.c
@@ -25,7 +25,7 @@ int BPF_PROG(trigger)
static int check_cookie(__u64 val, __u64 *result)
{
- long *cookie;
+ __u64 *cookie;
if (bpf_get_current_pid_tgid() >> 32 != pid)
return 1;
diff --git a/tools/testing/selftests/bpf/progs/linked_list.c b/tools/testing/selftests/bpf/progs/linked_list.c
index 26205ca80679..421f40835acd 100644
--- a/tools/testing/selftests/bpf/progs/linked_list.c
+++ b/tools/testing/selftests/bpf/progs/linked_list.c
@@ -4,13 +4,26 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
#include "bpf_experimental.h"
-
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(x) (int)(sizeof(x) / sizeof((x)[0]))
-#endif
+#include "bpf_misc.h"
#include "linked_list.h"
+struct head_nested_inner {
+ struct bpf_spin_lock lock;
+ struct bpf_list_head head __contains(foo, node2);
+};
+
+struct head_nested {
+ int dummy;
+ struct head_nested_inner inner;
+};
+
+private(C) struct bpf_spin_lock glock_c;
+private(C) struct bpf_list_head ghead_array[2] __contains(foo, node2);
+private(C) struct bpf_list_head ghead_array_one[1] __contains(foo, node2);
+
+private(D) struct head_nested ghead_nested;
+
static __always_inline
int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool leave_in_map)
{
@@ -310,6 +323,32 @@ int global_list_push_pop(void *ctx)
}
SEC("tc")
+int global_list_push_pop_nested(void *ctx)
+{
+ return test_list_push_pop(&ghead_nested.inner.lock, &ghead_nested.inner.head);
+}
+
+SEC("tc")
+int global_list_array_push_pop(void *ctx)
+{
+ int r;
+
+ r = test_list_push_pop(&glock_c, &ghead_array[0]);
+ if (r)
+ return r;
+
+ r = test_list_push_pop(&glock_c, &ghead_array[1]);
+ if (r)
+ return r;
+
+ /* Arrays with only one element is a special case, being treated
+ * just like a bpf_list_head variable by the verifier, not an
+ * array.
+ */
+ return test_list_push_pop(&glock_c, &ghead_array_one[0]);
+}
+
+SEC("tc")
int map_list_push_pop_multiple(void *ctx)
{
struct map_value *v;
diff --git a/tools/testing/selftests/bpf/progs/map_percpu_stats.c b/tools/testing/selftests/bpf/progs/map_percpu_stats.c
index 10b2325c1720..63245785eb69 100644
--- a/tools/testing/selftests/bpf/progs/map_percpu_stats.c
+++ b/tools/testing/selftests/bpf/progs/map_percpu_stats.c
@@ -7,7 +7,7 @@
__u32 target_id;
-__s64 bpf_map_sum_elem_count(struct bpf_map *map) __ksym;
+__s64 bpf_map_sum_elem_count(const struct bpf_map *map) __ksym;
SEC("iter/bpf_map")
int dump_bpf_map(struct bpf_iter__bpf_map *ctx)
diff --git a/tools/testing/selftests/bpf/progs/nested_trust_common.h b/tools/testing/selftests/bpf/progs/nested_trust_common.h
index 83d33931136e..1784b496be2e 100644
--- a/tools/testing/selftests/bpf/progs/nested_trust_common.h
+++ b/tools/testing/selftests/bpf/progs/nested_trust_common.h
@@ -7,6 +7,6 @@
#include <stdbool.h>
bool bpf_cpumask_test_cpu(unsigned int cpu, const struct cpumask *cpumask) __ksym;
-bool bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym;
+__u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym;
#endif /* _NESTED_TRUST_COMMON_H */
diff --git a/tools/testing/selftests/bpf/progs/nested_trust_failure.c b/tools/testing/selftests/bpf/progs/nested_trust_failure.c
index ea39497f11ed..3568ec450100 100644
--- a/tools/testing/selftests/bpf/progs/nested_trust_failure.c
+++ b/tools/testing/selftests/bpf/progs/nested_trust_failure.c
@@ -31,14 +31,6 @@ int BPF_PROG(test_invalid_nested_user_cpus, struct task_struct *task, u64 clone_
return 0;
}
-SEC("tp_btf/task_newtask")
-__failure __msg("R1 must have zero offset when passed to release func or trusted arg to kfunc")
-int BPF_PROG(test_invalid_nested_offset, struct task_struct *task, u64 clone_flags)
-{
- bpf_cpumask_first_zero(&task->cpus_mask);
- return 0;
-}
-
/* Although R2 is of type sk_buff but sock_common is expected, we will hit untrusted ptr first. */
SEC("tp_btf/tcp_probe")
__failure __msg("R2 type=untrusted_ptr_ expected=ptr_, trusted_ptr_, rcu_ptr_")
diff --git a/tools/testing/selftests/bpf/progs/nested_trust_success.c b/tools/testing/selftests/bpf/progs/nested_trust_success.c
index 833840bffd3b..2b66953ca82e 100644
--- a/tools/testing/selftests/bpf/progs/nested_trust_success.c
+++ b/tools/testing/selftests/bpf/progs/nested_trust_success.c
@@ -32,3 +32,11 @@ int BPF_PROG(test_skb_field, struct sock *sk, struct sk_buff *skb)
bpf_sk_storage_get(&sk_storage_map, skb->sk, 0, 0);
return 0;
}
+
+SEC("tp_btf/task_newtask")
+__success
+int BPF_PROG(test_nested_offset, struct task_struct *task, u64 clone_flags)
+{
+ bpf_cpumask_first_zero(&task->cpus_mask);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/netif_receive_skb.c b/tools/testing/selftests/bpf/progs/netif_receive_skb.c
index c0062645fc68..9e067dcbf607 100644
--- a/tools/testing/selftests/bpf/progs/netif_receive_skb.c
+++ b/tools/testing/selftests/bpf/progs/netif_receive_skb.c
@@ -5,6 +5,7 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
+#include "bpf_misc.h"
#include <errno.h>
@@ -23,10 +24,6 @@ bool skip = false;
#define BADPTR 0
#endif
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-#endif
-
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, 1);
diff --git a/tools/testing/selftests/bpf/progs/profiler.inc.h b/tools/testing/selftests/bpf/progs/profiler.inc.h
index 6957d9f2805e..8bd1ebd7d6af 100644
--- a/tools/testing/selftests/bpf/progs/profiler.inc.h
+++ b/tools/testing/selftests/bpf/progs/profiler.inc.h
@@ -9,6 +9,7 @@
#include "err.h"
#include "bpf_experimental.h"
#include "bpf_compiler.h"
+#include "bpf_misc.h"
#ifndef NULL
#define NULL 0
@@ -133,10 +134,6 @@ struct {
__uint(max_entries, 16);
} disallowed_exec_inodes SEC(".maps");
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(arr) (int)(sizeof(arr) / sizeof(arr[0]))
-#endif
-
static INLINE bool IS_ERR(const void* ptr)
{
return IS_ERR_VALUE((unsigned long)ptr);
diff --git a/tools/testing/selftests/bpf/progs/rbtree.c b/tools/testing/selftests/bpf/progs/rbtree.c
index b09f4fffe57c..a3620c15c136 100644
--- a/tools/testing/selftests/bpf/progs/rbtree.c
+++ b/tools/testing/selftests/bpf/progs/rbtree.c
@@ -13,6 +13,15 @@ struct node_data {
struct bpf_rb_node node;
};
+struct root_nested_inner {
+ struct bpf_spin_lock glock;
+ struct bpf_rb_root root __contains(node_data, node);
+};
+
+struct root_nested {
+ struct root_nested_inner inner;
+};
+
long less_callback_ran = -1;
long removed_key = -1;
long first_data[2] = {-1, -1};
@@ -20,6 +29,9 @@ long first_data[2] = {-1, -1};
#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
private(A) struct bpf_spin_lock glock;
private(A) struct bpf_rb_root groot __contains(node_data, node);
+private(A) struct bpf_rb_root groot_array[2] __contains(node_data, node);
+private(A) struct bpf_rb_root groot_array_one[1] __contains(node_data, node);
+private(B) struct root_nested groot_nested;
static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
{
@@ -72,6 +84,12 @@ long rbtree_add_nodes(void *ctx)
}
SEC("tc")
+long rbtree_add_nodes_nested(void *ctx)
+{
+ return __add_three(&groot_nested.inner.root, &groot_nested.inner.glock);
+}
+
+SEC("tc")
long rbtree_add_and_remove(void *ctx)
{
struct bpf_rb_node *res = NULL;
@@ -110,6 +128,65 @@ err_out:
}
SEC("tc")
+long rbtree_add_and_remove_array(void *ctx)
+{
+ struct bpf_rb_node *res1 = NULL, *res2 = NULL, *res3 = NULL;
+ struct node_data *nodes[3][2] = {{NULL, NULL}, {NULL, NULL}, {NULL, NULL}};
+ struct node_data *n;
+ long k1 = -1, k2 = -1, k3 = -1;
+ int i, j;
+
+ for (i = 0; i < 3; i++) {
+ for (j = 0; j < 2; j++) {
+ nodes[i][j] = bpf_obj_new(typeof(*nodes[i][j]));
+ if (!nodes[i][j])
+ goto err_out;
+ nodes[i][j]->key = i * 2 + j;
+ }
+ }
+
+ bpf_spin_lock(&glock);
+ for (i = 0; i < 2; i++)
+ for (j = 0; j < 2; j++)
+ bpf_rbtree_add(&groot_array[i], &nodes[i][j]->node, less);
+ for (j = 0; j < 2; j++)
+ bpf_rbtree_add(&groot_array_one[0], &nodes[2][j]->node, less);
+ res1 = bpf_rbtree_remove(&groot_array[0], &nodes[0][0]->node);
+ res2 = bpf_rbtree_remove(&groot_array[1], &nodes[1][0]->node);
+ res3 = bpf_rbtree_remove(&groot_array_one[0], &nodes[2][0]->node);
+ bpf_spin_unlock(&glock);
+
+ if (res1) {
+ n = container_of(res1, struct node_data, node);
+ k1 = n->key;
+ bpf_obj_drop(n);
+ }
+ if (res2) {
+ n = container_of(res2, struct node_data, node);
+ k2 = n->key;
+ bpf_obj_drop(n);
+ }
+ if (res3) {
+ n = container_of(res3, struct node_data, node);
+ k3 = n->key;
+ bpf_obj_drop(n);
+ }
+ if (k1 != 0 || k2 != 2 || k3 != 4)
+ return 2;
+
+ return 0;
+
+err_out:
+ for (i = 0; i < 3; i++) {
+ for (j = 0; j < 2; j++) {
+ if (nodes[i][j])
+ bpf_obj_drop(nodes[i][j]);
+ }
+ }
+ return 1;
+}
+
+SEC("tc")
long rbtree_first_and_remove(void *ctx)
{
struct bpf_rb_node *res = NULL;
diff --git a/tools/testing/selftests/bpf/progs/rbtree_fail.c b/tools/testing/selftests/bpf/progs/rbtree_fail.c
index 3fecf1c6dfe5..b722a1e1ddef 100644
--- a/tools/testing/selftests/bpf/progs/rbtree_fail.c
+++ b/tools/testing/selftests/bpf/progs/rbtree_fail.c
@@ -105,7 +105,7 @@ long rbtree_api_remove_unadded_node(void *ctx)
}
SEC("?tc")
-__failure __msg("Unreleased reference id=3 alloc_insn=10")
+__failure __regex("Unreleased reference id=3 alloc_insn=[0-9]+")
long rbtree_api_remove_no_drop(void *ctx)
{
struct bpf_rb_node *res;
diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c
index 1553b9c16aa7..f8d4b7cfcd68 100644
--- a/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c
+++ b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c
@@ -32,7 +32,7 @@ static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
}
SEC("?tc")
-__failure __msg("Unreleased reference id=4 alloc_insn=21")
+__failure __regex("Unreleased reference id=4 alloc_insn=[0-9]+")
long rbtree_refcounted_node_ref_escapes(void *ctx)
{
struct node_acquire *n, *m;
@@ -73,7 +73,7 @@ long refcount_acquire_maybe_null(void *ctx)
}
SEC("?tc")
-__failure __msg("Unreleased reference id=3 alloc_insn=9")
+__failure __regex("Unreleased reference id=3 alloc_insn=[0-9]+")
long rbtree_refcounted_node_ref_escapes_owning_input(void *ctx)
{
struct node_acquire *n, *m;
diff --git a/tools/testing/selftests/bpf/progs/setget_sockopt.c b/tools/testing/selftests/bpf/progs/setget_sockopt.c
index 7a438600ae98..60518aed1ffc 100644
--- a/tools/testing/selftests/bpf/progs/setget_sockopt.c
+++ b/tools/testing/selftests/bpf/progs/setget_sockopt.c
@@ -6,10 +6,7 @@
#include <bpf/bpf_core_read.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
-
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-#endif
+#include "bpf_misc.h"
extern unsigned long CONFIG_HZ __kconfig;
diff --git a/tools/testing/selftests/bpf/progs/skb_pkt_end.c b/tools/testing/selftests/bpf/progs/skb_pkt_end.c
index db4abd2682fc..3bb4451524a1 100644
--- a/tools/testing/selftests/bpf/progs/skb_pkt_end.c
+++ b/tools/testing/selftests/bpf/progs/skb_pkt_end.c
@@ -33,6 +33,8 @@ int main_prog(struct __sk_buff *skb)
struct iphdr *ip = NULL;
struct tcphdr *tcp;
__u8 proto = 0;
+ int urg_ptr;
+ u32 offset;
if (!(ip = get_iphdr(skb)))
goto out;
@@ -48,7 +50,14 @@ int main_prog(struct __sk_buff *skb)
if (!tcp)
goto out;
- return tcp->urg_ptr;
+ urg_ptr = tcp->urg_ptr;
+
+ /* Checksum validation part */
+ proto++;
+ offset = sizeof(struct ethhdr) + offsetof(struct iphdr, protocol);
+ bpf_skb_store_bytes(skb, offset, &proto, sizeof(proto), BPF_F_RECOMPUTE_CSUM);
+
+ return urg_ptr;
out:
return -1;
}
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_detach.c b/tools/testing/selftests/bpf/progs/struct_ops_detach.c
new file mode 100644
index 000000000000..56b787a89876
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_detach.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "../bpf_testmod/bpf_testmod.h"
+
+char _license[] SEC("license") = "GPL";
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_do_detach;
diff --git a/tools/testing/selftests/bpf/progs/test_bpf_ma.c b/tools/testing/selftests/bpf/progs/test_bpf_ma.c
index 3494ca30fa7f..4a4e0b8d9b72 100644
--- a/tools/testing/selftests/bpf/progs/test_bpf_ma.c
+++ b/tools/testing/selftests/bpf/progs/test_bpf_ma.c
@@ -7,10 +7,6 @@
#include "bpf_experimental.h"
#include "bpf_misc.h"
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-#endif
-
struct generic_map_value {
void *data;
};
diff --git a/tools/testing/selftests/bpf/progs/test_bpf_nf.c b/tools/testing/selftests/bpf/progs/test_bpf_nf.c
index 77ad8adf68da..f7b330ddd007 100644
--- a/tools/testing/selftests/bpf/progs/test_bpf_nf.c
+++ b/tools/testing/selftests/bpf/progs/test_bpf_nf.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#define BPF_NO_KFUNC_PROTOTYPES
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
@@ -9,10 +10,14 @@
#define EINVAL 22
#define ENOENT 2
+#define NF_CT_ZONE_DIR_ORIG (1 << IP_CT_DIR_ORIGINAL)
+#define NF_CT_ZONE_DIR_REPL (1 << IP_CT_DIR_REPLY)
+
extern unsigned long CONFIG_HZ __kconfig;
int test_einval_bpf_tuple = 0;
int test_einval_reserved = 0;
+int test_einval_reserved_new = 0;
int test_einval_netns_id = 0;
int test_einval_len_opts = 0;
int test_eproto_l4proto = 0;
@@ -22,6 +27,11 @@ int test_eafnosupport = 0;
int test_alloc_entry = -EINVAL;
int test_insert_entry = -EAFNOSUPPORT;
int test_succ_lookup = -ENOENT;
+int test_ct_zone_id_alloc_entry = -EINVAL;
+int test_ct_zone_id_insert_entry = -EAFNOSUPPORT;
+int test_ct_zone_id_succ_lookup = -ENOENT;
+int test_ct_zone_dir_enoent_lookup = 0;
+int test_ct_zone_id_enoent_lookup = 0;
u32 test_delta_timeout = 0;
u32 test_status = 0;
u32 test_insert_lookup_mark = 0;
@@ -45,6 +55,17 @@ struct bpf_ct_opts___local {
s32 netns_id;
s32 error;
u8 l4proto;
+ u8 dir;
+ u8 reserved[2];
+};
+
+struct bpf_ct_opts___new {
+ s32 netns_id;
+ s32 error;
+ u8 l4proto;
+ u8 dir;
+ u16 ct_zone_id;
+ u8 ct_zone_dir;
u8 reserved[3];
} __attribute__((preserve_access_index));
@@ -220,10 +241,97 @@ nf_ct_test(struct nf_conn *(*lookup_fn)(void *, struct bpf_sock_tuple *, u32,
}
}
+static __always_inline void
+nf_ct_opts_new_test(struct nf_conn *(*lookup_fn)(void *, struct bpf_sock_tuple *, u32,
+ struct bpf_ct_opts___new *, u32),
+ struct nf_conn *(*alloc_fn)(void *, struct bpf_sock_tuple *, u32,
+ struct bpf_ct_opts___new *, u32),
+ void *ctx)
+{
+ struct bpf_ct_opts___new opts_def = { .l4proto = IPPROTO_TCP, .netns_id = -1 };
+ struct bpf_sock_tuple bpf_tuple;
+ struct nf_conn *ct;
+
+ __builtin_memset(&bpf_tuple, 0, sizeof(bpf_tuple.ipv4));
+
+ opts_def.reserved[0] = 1;
+ ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
+ sizeof(opts_def));
+ opts_def.reserved[0] = 0;
+ if (ct)
+ bpf_ct_release(ct);
+ else
+ test_einval_reserved_new = opts_def.error;
+
+ bpf_tuple.ipv4.saddr = bpf_get_prandom_u32(); /* src IP */
+ bpf_tuple.ipv4.daddr = bpf_get_prandom_u32(); /* dst IP */
+ bpf_tuple.ipv4.sport = bpf_get_prandom_u32(); /* src port */
+ bpf_tuple.ipv4.dport = bpf_get_prandom_u32(); /* dst port */
+
+ /* use non-default ct zone */
+ opts_def.ct_zone_id = 10;
+ opts_def.ct_zone_dir = NF_CT_ZONE_DIR_ORIG;
+ ct = alloc_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
+ sizeof(opts_def));
+ if (ct) {
+ __u16 sport = bpf_get_prandom_u32();
+ __u16 dport = bpf_get_prandom_u32();
+ union nf_inet_addr saddr = {};
+ union nf_inet_addr daddr = {};
+ struct nf_conn *ct_ins;
+
+ bpf_ct_set_timeout(ct, 10000);
+
+ /* snat */
+ saddr.ip = bpf_get_prandom_u32();
+ bpf_ct_set_nat_info(ct, &saddr, sport, NF_NAT_MANIP_SRC___local);
+ /* dnat */
+ daddr.ip = bpf_get_prandom_u32();
+ bpf_ct_set_nat_info(ct, &daddr, dport, NF_NAT_MANIP_DST___local);
+
+ ct_ins = bpf_ct_insert_entry(ct);
+ if (ct_ins) {
+ struct nf_conn *ct_lk;
+
+ /* entry should exist in same ct zone we inserted it */
+ ct_lk = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4),
+ &opts_def, sizeof(opts_def));
+ if (ct_lk) {
+ bpf_ct_release(ct_lk);
+ test_ct_zone_id_succ_lookup = 0;
+ }
+
+ /* entry should not exist with wrong direction */
+ opts_def.ct_zone_dir = NF_CT_ZONE_DIR_REPL;
+ ct_lk = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4),
+ &opts_def, sizeof(opts_def));
+ opts_def.ct_zone_dir = NF_CT_ZONE_DIR_ORIG;
+ if (ct_lk)
+ bpf_ct_release(ct_lk);
+ else
+ test_ct_zone_dir_enoent_lookup = opts_def.error;
+
+ /* entry should not exist in default ct zone */
+ opts_def.ct_zone_id = 0;
+ ct_lk = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4),
+ &opts_def, sizeof(opts_def));
+ if (ct_lk)
+ bpf_ct_release(ct_lk);
+ else
+ test_ct_zone_id_enoent_lookup = opts_def.error;
+
+ bpf_ct_release(ct_ins);
+ test_ct_zone_id_insert_entry = 0;
+ }
+ test_ct_zone_id_alloc_entry = 0;
+ }
+}
+
SEC("xdp")
int nf_xdp_ct_test(struct xdp_md *ctx)
{
nf_ct_test((void *)bpf_xdp_ct_lookup, (void *)bpf_xdp_ct_alloc, ctx);
+ nf_ct_opts_new_test((void *)bpf_xdp_ct_lookup, (void *)bpf_xdp_ct_alloc, ctx);
return 0;
}
@@ -231,6 +339,7 @@ SEC("tc")
int nf_skb_ct_test(struct __sk_buff *ctx)
{
nf_ct_test((void *)bpf_skb_ct_lookup, (void *)bpf_skb_ct_alloc, ctx);
+ nf_ct_opts_new_test((void *)bpf_skb_ct_lookup, (void *)bpf_skb_ct_alloc, ctx);
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c b/tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c
index 0e4759ab38ff..a586f087ffeb 100644
--- a/tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c
+++ b/tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#define BPF_NO_KFUNC_PROTOTYPES
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
diff --git a/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c b/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c
index 2dde8e3fe4c9..e68667aec6a6 100644
--- a/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c
+++ b/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c
@@ -45,7 +45,7 @@ int BPF_PROG(not_valid_dynptr, int cmd, union bpf_attr *attr, unsigned int size)
}
SEC("?lsm.s/bpf")
-__failure __msg("arg#0 expected pointer to stack or dynptr_ptr")
+__failure __msg("arg#1 expected pointer to stack or const struct bpf_dynptr")
int BPF_PROG(not_ptr_to_stack, int cmd, union bpf_attr *attr, unsigned int size)
{
unsigned long val = 0;
diff --git a/tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c b/tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c
new file mode 100644
index 000000000000..7ac7e1de34d8
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "bpf_kfuncs.h"
+#include "../bpf_testmod/bpf_testmod_kfunc.h"
+
+SEC("tc")
+int kfunc_dynptr_nullable_test1(struct __sk_buff *skb)
+{
+ struct bpf_dynptr data;
+
+ bpf_dynptr_from_skb(skb, 0, &data);
+ bpf_kfunc_dynptr_test(&data, NULL);
+
+ return 0;
+}
+
+SEC("tc")
+int kfunc_dynptr_nullable_test2(struct __sk_buff *skb)
+{
+ struct bpf_dynptr data;
+
+ bpf_dynptr_from_skb(skb, 0, &data);
+ bpf_kfunc_dynptr_test(&data, &data);
+
+ return 0;
+}
+
+SEC("tc")
+__failure __msg("expected pointer to stack or const struct bpf_dynptr")
+int kfunc_dynptr_nullable_test3(struct __sk_buff *skb)
+{
+ struct bpf_dynptr data;
+
+ bpf_dynptr_from_skb(skb, 0, &data);
+ bpf_kfunc_dynptr_test(NULL, &data);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_kern.h b/tools/testing/selftests/bpf/progs/test_sockmap_kern.h
index 99d2ea9fb658..f48f85f1bd70 100644
--- a/tools/testing/selftests/bpf/progs/test_sockmap_kern.h
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_kern.h
@@ -92,7 +92,7 @@ struct {
__uint(value_size, sizeof(int));
} tls_sock_map SEC(".maps");
-SEC("sk_skb1")
+SEC("sk_skb/stream_parser")
int bpf_prog1(struct __sk_buff *skb)
{
int *f, two = 2;
@@ -104,7 +104,7 @@ int bpf_prog1(struct __sk_buff *skb)
return skb->len;
}
-SEC("sk_skb2")
+SEC("sk_skb/stream_verdict")
int bpf_prog2(struct __sk_buff *skb)
{
__u32 lport = skb->local_port;
@@ -151,7 +151,7 @@ static inline void bpf_write_pass(struct __sk_buff *skb, int offset)
memcpy(c + offset, "PASS", 4);
}
-SEC("sk_skb3")
+SEC("sk_skb/stream_verdict")
int bpf_prog3(struct __sk_buff *skb)
{
int err, *f, ret = SK_PASS;
@@ -177,9 +177,6 @@ int bpf_prog3(struct __sk_buff *skb)
return bpf_sk_redirect_hash(skb, &tls_sock_map, &ret, flags);
#endif
}
- f = bpf_map_lookup_elem(&sock_skb_opts, &one);
- if (f && *f)
- ret = SK_DROP;
err = bpf_skb_adjust_room(skb, 4, 0, 0);
if (err)
return SK_DROP;
@@ -233,7 +230,7 @@ int bpf_sockmap(struct bpf_sock_ops *skops)
return 0;
}
-SEC("sk_msg1")
+SEC("sk_msg")
int bpf_prog4(struct sk_msg_md *msg)
{
int *bytes, zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5;
@@ -263,7 +260,7 @@ int bpf_prog4(struct sk_msg_md *msg)
return SK_PASS;
}
-SEC("sk_msg2")
+SEC("sk_msg")
int bpf_prog6(struct sk_msg_md *msg)
{
int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5, key = 0;
@@ -308,7 +305,7 @@ int bpf_prog6(struct sk_msg_md *msg)
#endif
}
-SEC("sk_msg3")
+SEC("sk_msg")
int bpf_prog8(struct sk_msg_md *msg)
{
void *data_end = (void *)(long) msg->data_end;
@@ -329,7 +326,8 @@ int bpf_prog8(struct sk_msg_md *msg)
return SK_PASS;
}
-SEC("sk_msg4")
+
+SEC("sk_msg")
int bpf_prog9(struct sk_msg_md *msg)
{
void *data_end = (void *)(long) msg->data_end;
@@ -347,7 +345,7 @@ int bpf_prog9(struct sk_msg_md *msg)
return SK_PASS;
}
-SEC("sk_msg5")
+SEC("sk_msg")
int bpf_prog10(struct sk_msg_md *msg)
{
int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop;
diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
index 7f74077d6622..548660e299a5 100644
--- a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
+++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
@@ -10,10 +10,7 @@
#include <bpf/bpf_helpers.h>
#include "bpf_compiler.h"
-
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-#endif
+#include "bpf_misc.h"
/* tcp_mem sysctl has only 3 ints, but this test is doing TCP_MEM_LOOPS */
#define TCP_MEM_LOOPS 28 /* because 30 doesn't fit into 512 bytes of stack */
diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
index 68a75436e8af..81249d119a8b 100644
--- a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
+++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
@@ -10,10 +10,7 @@
#include <bpf/bpf_helpers.h>
#include "bpf_compiler.h"
-
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-#endif
+#include "bpf_misc.h"
/* tcp_mem sysctl has only 3 ints, but this test is doing TCP_MEM_LOOPS */
#define TCP_MEM_LOOPS 20 /* because 30 doesn't fit into 512 bytes of stack */
diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_prog.c b/tools/testing/selftests/bpf/progs/test_sysctl_prog.c
index efc3c61f7852..bbdd08764789 100644
--- a/tools/testing/selftests/bpf/progs/test_sysctl_prog.c
+++ b/tools/testing/selftests/bpf/progs/test_sysctl_prog.c
@@ -10,6 +10,7 @@
#include <bpf/bpf_helpers.h>
#include "bpf_compiler.h"
+#include "bpf_misc.h"
/* Max supported length of a string with unsigned long in base 10 (pow2 - 1). */
#define MAX_ULONG_STR_LEN 0xF
@@ -17,10 +18,6 @@
/* Max supported length of sysctl value string (pow2). */
#define MAX_VALUE_STR_LEN 0x40
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-#endif
-
const char tcp_mem_name[] = "net/ipv4/tcp_mem";
static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx)
{
diff --git a/tools/testing/selftests/bpf/progs/test_tc_dtime.c b/tools/testing/selftests/bpf/progs/test_tc_dtime.c
index 74ec09f040b7..ca8e8734d901 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_dtime.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_dtime.c
@@ -222,17 +222,21 @@ int egress_host(struct __sk_buff *skb)
return TC_ACT_OK;
if (skb_proto(skb_type) == IPPROTO_TCP) {
- if (skb->tstamp_type == BPF_SKB_TSTAMP_DELIVERY_MONO &&
+ if (skb->tstamp_type == BPF_SKB_CLOCK_MONOTONIC &&
skb->tstamp)
inc_dtimes(EGRESS_ENDHOST);
else
inc_errs(EGRESS_ENDHOST);
- } else {
- if (skb->tstamp_type == BPF_SKB_TSTAMP_UNSPEC &&
+ } else if (skb_proto(skb_type) == IPPROTO_UDP) {
+ if (skb->tstamp_type == BPF_SKB_CLOCK_TAI &&
skb->tstamp)
inc_dtimes(EGRESS_ENDHOST);
else
inc_errs(EGRESS_ENDHOST);
+ } else {
+ if (skb->tstamp_type == BPF_SKB_CLOCK_REALTIME &&
+ skb->tstamp)
+ inc_errs(EGRESS_ENDHOST);
}
skb->tstamp = EGRESS_ENDHOST_MAGIC;
@@ -252,7 +256,7 @@ int ingress_host(struct __sk_buff *skb)
if (!skb_type)
return TC_ACT_OK;
- if (skb->tstamp_type == BPF_SKB_TSTAMP_DELIVERY_MONO &&
+ if (skb->tstamp_type == BPF_SKB_CLOCK_MONOTONIC &&
skb->tstamp == EGRESS_FWDNS_MAGIC)
inc_dtimes(INGRESS_ENDHOST);
else
@@ -315,7 +319,6 @@ int egress_fwdns_prio100(struct __sk_buff *skb)
SEC("tc")
int ingress_fwdns_prio101(struct __sk_buff *skb)
{
- __u64 expected_dtime = EGRESS_ENDHOST_MAGIC;
int skb_type;
skb_type = skb_get_type(skb);
@@ -323,29 +326,24 @@ int ingress_fwdns_prio101(struct __sk_buff *skb)
/* Should have handled in prio100 */
return TC_ACT_SHOT;
- if (skb_proto(skb_type) == IPPROTO_UDP)
- expected_dtime = 0;
-
if (skb->tstamp_type) {
if (fwdns_clear_dtime() ||
- skb->tstamp_type != BPF_SKB_TSTAMP_DELIVERY_MONO ||
- skb->tstamp != expected_dtime)
+ (skb->tstamp_type != BPF_SKB_CLOCK_MONOTONIC &&
+ skb->tstamp_type != BPF_SKB_CLOCK_TAI) ||
+ skb->tstamp != EGRESS_ENDHOST_MAGIC)
inc_errs(INGRESS_FWDNS_P101);
else
inc_dtimes(INGRESS_FWDNS_P101);
} else {
- if (!fwdns_clear_dtime() && expected_dtime)
+ if (!fwdns_clear_dtime())
inc_errs(INGRESS_FWDNS_P101);
}
- if (skb->tstamp_type == BPF_SKB_TSTAMP_DELIVERY_MONO) {
+ if (skb->tstamp_type == BPF_SKB_CLOCK_MONOTONIC) {
skb->tstamp = INGRESS_FWDNS_MAGIC;
} else {
if (bpf_skb_set_tstamp(skb, INGRESS_FWDNS_MAGIC,
- BPF_SKB_TSTAMP_DELIVERY_MONO))
- inc_errs(SET_DTIME);
- if (!bpf_skb_set_tstamp(skb, INGRESS_FWDNS_MAGIC,
- BPF_SKB_TSTAMP_UNSPEC))
+ BPF_SKB_CLOCK_MONOTONIC))
inc_errs(SET_DTIME);
}
@@ -370,7 +368,7 @@ int egress_fwdns_prio101(struct __sk_buff *skb)
if (skb->tstamp_type) {
if (fwdns_clear_dtime() ||
- skb->tstamp_type != BPF_SKB_TSTAMP_DELIVERY_MONO ||
+ skb->tstamp_type != BPF_SKB_CLOCK_MONOTONIC ||
skb->tstamp != INGRESS_FWDNS_MAGIC)
inc_errs(EGRESS_FWDNS_P101);
else
@@ -380,14 +378,11 @@ int egress_fwdns_prio101(struct __sk_buff *skb)
inc_errs(EGRESS_FWDNS_P101);
}
- if (skb->tstamp_type == BPF_SKB_TSTAMP_DELIVERY_MONO) {
+ if (skb->tstamp_type == BPF_SKB_CLOCK_MONOTONIC) {
skb->tstamp = EGRESS_FWDNS_MAGIC;
} else {
if (bpf_skb_set_tstamp(skb, EGRESS_FWDNS_MAGIC,
- BPF_SKB_TSTAMP_DELIVERY_MONO))
- inc_errs(SET_DTIME);
- if (!bpf_skb_set_tstamp(skb, INGRESS_FWDNS_MAGIC,
- BPF_SKB_TSTAMP_UNSPEC))
+ BPF_SKB_CLOCK_MONOTONIC))
inc_errs(SET_DTIME);
}
diff --git a/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c
index c8e4553648bf..44ee0d037f95 100644
--- a/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c
+++ b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c
@@ -9,6 +9,7 @@
#include "bpf_kfuncs.h"
#include "test_siphash.h"
#include "test_tcp_custom_syncookie.h"
+#include "bpf_misc.h"
#define MAX_PACKET_OFF 0xffff
diff --git a/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h
index 29a6a53cf229..f8b1b7e68d2e 100644
--- a/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h
+++ b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h
@@ -7,8 +7,6 @@
#define __packed __attribute__((__packed__))
#define __force
-#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
-
#define swap(a, b) \
do { \
typeof(a) __tmp = (a); \
diff --git a/tools/testing/selftests/bpf/progs/timer_lockup.c b/tools/testing/selftests/bpf/progs/timer_lockup.c
new file mode 100644
index 000000000000..3e520133281e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/timer_lockup.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <time.h>
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct elem {
+ struct bpf_timer t;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct elem);
+} timer1_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct elem);
+} timer2_map SEC(".maps");
+
+int timer1_err;
+int timer2_err;
+
+static int timer_cb1(void *map, int *k, struct elem *v)
+{
+ struct bpf_timer *timer;
+ int key = 0;
+
+ timer = bpf_map_lookup_elem(&timer2_map, &key);
+ if (timer)
+ timer2_err = bpf_timer_cancel(timer);
+
+ return 0;
+}
+
+static int timer_cb2(void *map, int *k, struct elem *v)
+{
+ struct bpf_timer *timer;
+ int key = 0;
+
+ timer = bpf_map_lookup_elem(&timer1_map, &key);
+ if (timer)
+ timer1_err = bpf_timer_cancel(timer);
+
+ return 0;
+}
+
+SEC("tc")
+int timer1_prog(void *ctx)
+{
+ struct bpf_timer *timer;
+ int key = 0;
+
+ timer = bpf_map_lookup_elem(&timer1_map, &key);
+ if (timer) {
+ bpf_timer_init(timer, &timer1_map, CLOCK_BOOTTIME);
+ bpf_timer_set_callback(timer, timer_cb1);
+ bpf_timer_start(timer, 1, BPF_F_TIMER_CPU_PIN);
+ }
+
+ return 0;
+}
+
+SEC("tc")
+int timer2_prog(void *ctx)
+{
+ struct bpf_timer *timer;
+ int key = 0;
+
+ timer = bpf_map_lookup_elem(&timer2_map, &key);
+ if (timer) {
+ bpf_timer_init(timer, &timer2_map, CLOCK_BOOTTIME);
+ bpf_timer_set_callback(timer, timer_cb2);
+ bpf_timer_start(timer, 1, BPF_F_TIMER_CPU_PIN);
+ }
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/tracing_struct.c b/tools/testing/selftests/bpf/progs/tracing_struct.c
index 515daef3c84b..c435a3a8328a 100644
--- a/tools/testing/selftests/bpf/progs/tracing_struct.c
+++ b/tools/testing/selftests/bpf/progs/tracing_struct.c
@@ -18,11 +18,6 @@ struct bpf_testmod_struct_arg_3 {
int b[];
};
-struct bpf_testmod_struct_arg_4 {
- u64 a;
- int b;
-};
-
long t1_a_a, t1_a_b, t1_b, t1_c, t1_ret, t1_nregs;
__u64 t1_reg0, t1_reg1, t1_reg2, t1_reg3;
long t2_a, t2_b_a, t2_b_b, t2_c, t2_ret;
@@ -30,9 +25,6 @@ long t3_a, t3_b, t3_c_a, t3_c_b, t3_ret;
long t4_a_a, t4_b, t4_c, t4_d, t4_e_a, t4_e_b, t4_ret;
long t5_ret;
int t6;
-long t7_a, t7_b, t7_c, t7_d, t7_e, t7_f_a, t7_f_b, t7_ret;
-long t8_a, t8_b, t8_c, t8_d, t8_e, t8_f_a, t8_f_b, t8_g, t8_ret;
-
SEC("fentry/bpf_testmod_test_struct_arg_1")
int BPF_PROG2(test_struct_arg_1, struct bpf_testmod_struct_arg_2, a, int, b, int, c)
@@ -138,50 +130,4 @@ int BPF_PROG2(test_struct_arg_11, struct bpf_testmod_struct_arg_3 *, a)
return 0;
}
-SEC("fentry/bpf_testmod_test_struct_arg_7")
-int BPF_PROG2(test_struct_arg_12, __u64, a, void *, b, short, c, int, d,
- void *, e, struct bpf_testmod_struct_arg_4, f)
-{
- t7_a = a;
- t7_b = (long)b;
- t7_c = c;
- t7_d = d;
- t7_e = (long)e;
- t7_f_a = f.a;
- t7_f_b = f.b;
- return 0;
-}
-
-SEC("fexit/bpf_testmod_test_struct_arg_7")
-int BPF_PROG2(test_struct_arg_13, __u64, a, void *, b, short, c, int, d,
- void *, e, struct bpf_testmod_struct_arg_4, f, int, ret)
-{
- t7_ret = ret;
- return 0;
-}
-
-SEC("fentry/bpf_testmod_test_struct_arg_8")
-int BPF_PROG2(test_struct_arg_14, __u64, a, void *, b, short, c, int, d,
- void *, e, struct bpf_testmod_struct_arg_4, f, int, g)
-{
- t8_a = a;
- t8_b = (long)b;
- t8_c = c;
- t8_d = d;
- t8_e = (long)e;
- t8_f_a = f.a;
- t8_f_b = f.b;
- t8_g = g;
- return 0;
-}
-
-SEC("fexit/bpf_testmod_test_struct_arg_8")
-int BPF_PROG2(test_struct_arg_15, __u64, a, void *, b, short, c, int, d,
- void *, e, struct bpf_testmod_struct_arg_4, f, int, g,
- int, ret)
-{
- t8_ret = ret;
- return 0;
-}
-
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/tracing_struct_many_args.c b/tools/testing/selftests/bpf/progs/tracing_struct_many_args.c
new file mode 100644
index 000000000000..4742012ace06
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tracing_struct_many_args.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+struct bpf_testmod_struct_arg_4 {
+ u64 a;
+ int b;
+};
+
+struct bpf_testmod_struct_arg_5 {
+ char a;
+ short b;
+ int c;
+ long d;
+};
+
+long t7_a, t7_b, t7_c, t7_d, t7_e, t7_f_a, t7_f_b, t7_ret;
+long t8_a, t8_b, t8_c, t8_d, t8_e, t8_f_a, t8_f_b, t8_g, t8_ret;
+long t9_a, t9_b, t9_c, t9_d, t9_e, t9_f, t9_g, t9_h_a, t9_h_b, t9_h_c, t9_h_d, t9_i, t9_ret;
+
+SEC("fentry/bpf_testmod_test_struct_arg_7")
+int BPF_PROG2(test_struct_many_args_1, __u64, a, void *, b, short, c, int, d,
+ void *, e, struct bpf_testmod_struct_arg_4, f)
+{
+ t7_a = a;
+ t7_b = (long)b;
+ t7_c = c;
+ t7_d = d;
+ t7_e = (long)e;
+ t7_f_a = f.a;
+ t7_f_b = f.b;
+ return 0;
+}
+
+SEC("fexit/bpf_testmod_test_struct_arg_7")
+int BPF_PROG2(test_struct_many_args_2, __u64, a, void *, b, short, c, int, d,
+ void *, e, struct bpf_testmod_struct_arg_4, f, int, ret)
+{
+ t7_ret = ret;
+ return 0;
+}
+
+SEC("fentry/bpf_testmod_test_struct_arg_8")
+int BPF_PROG2(test_struct_many_args_3, __u64, a, void *, b, short, c, int, d,
+ void *, e, struct bpf_testmod_struct_arg_4, f, int, g)
+{
+ t8_a = a;
+ t8_b = (long)b;
+ t8_c = c;
+ t8_d = d;
+ t8_e = (long)e;
+ t8_f_a = f.a;
+ t8_f_b = f.b;
+ t8_g = g;
+ return 0;
+}
+
+SEC("fexit/bpf_testmod_test_struct_arg_8")
+int BPF_PROG2(test_struct_many_args_4, __u64, a, void *, b, short, c, int, d,
+ void *, e, struct bpf_testmod_struct_arg_4, f, int, g,
+ int, ret)
+{
+ t8_ret = ret;
+ return 0;
+}
+
+SEC("fentry/bpf_testmod_test_struct_arg_9")
+int BPF_PROG2(test_struct_many_args_5, __u64, a, void *, b, short, c, int, d, void *, e,
+ char, f, short, g, struct bpf_testmod_struct_arg_5, h, long, i)
+{
+ t9_a = a;
+ t9_b = (long)b;
+ t9_c = c;
+ t9_d = d;
+ t9_e = (long)e;
+ t9_f = f;
+ t9_g = g;
+ t9_h_a = h.a;
+ t9_h_b = h.b;
+ t9_h_c = h.c;
+ t9_h_d = h.d;
+ t9_i = i;
+ return 0;
+}
+
+SEC("fexit/bpf_testmod_test_struct_arg_9")
+int BPF_PROG2(test_struct_many_args_6, __u64, a, void *, b, short, c, int, d, void *, e,
+ char, f, short, g, struct bpf_testmod_struct_arg_5, h, long, i, int, ret)
+{
+ t9_ret = ret;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/uprobe_syscall.c b/tools/testing/selftests/bpf/progs/uprobe_syscall.c
new file mode 100644
index 000000000000..8a4fa6c7ef59
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/uprobe_syscall.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <string.h>
+
+struct pt_regs regs;
+
+char _license[] SEC("license") = "GPL";
+
+SEC("uretprobe//proc/self/exe:uretprobe_regs_trigger")
+int uretprobe(struct pt_regs *ctx)
+{
+ __builtin_memcpy(&regs, ctx, sizeof(regs));
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/uprobe_syscall_executed.c b/tools/testing/selftests/bpf/progs/uprobe_syscall_executed.c
new file mode 100644
index 000000000000..0d7f1a7db2e2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/uprobe_syscall_executed.c
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <string.h>
+
+struct pt_regs regs;
+
+char _license[] SEC("license") = "GPL";
+
+int executed = 0;
+
+SEC("uretprobe.multi")
+int test(struct pt_regs *regs)
+{
+ executed = 1;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/uretprobe_stack.c b/tools/testing/selftests/bpf/progs/uretprobe_stack.c
new file mode 100644
index 000000000000..9fdcf396b8f4
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/uretprobe_stack.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/usdt.bpf.h>
+
+char _license[] SEC("license") = "GPL";
+
+__u64 entry_stack1[32], exit_stack1[32];
+__u64 entry_stack1_recur[32], exit_stack1_recur[32];
+__u64 entry_stack2[32];
+__u64 entry_stack3[32];
+__u64 entry_stack4[32], exit_stack4[32];
+__u64 usdt_stack[32];
+
+int entry1_len, exit1_len;
+int entry1_recur_len, exit1_recur_len;
+int entry2_len, exit2_len;
+int entry3_len, exit3_len;
+int entry4_len, exit4_len;
+int usdt_len;
+
+#define SZ sizeof(usdt_stack)
+
+SEC("uprobe//proc/self/exe:target_1")
+int BPF_UPROBE(uprobe_1)
+{
+ /* target_1 is recursive wit depth of 2, so we capture two separate
+ * stack traces, depending on which occurence it is
+ */
+ static bool recur = false;
+
+ if (!recur)
+ entry1_len = bpf_get_stack(ctx, &entry_stack1, SZ, BPF_F_USER_STACK);
+ else
+ entry1_recur_len = bpf_get_stack(ctx, &entry_stack1_recur, SZ, BPF_F_USER_STACK);
+
+ recur = true;
+ return 0;
+}
+
+SEC("uretprobe//proc/self/exe:target_1")
+int BPF_URETPROBE(uretprobe_1)
+{
+ /* see above, target_1 is recursive */
+ static bool recur = false;
+
+ /* NOTE: order of returns is reversed to order of entries */
+ if (!recur)
+ exit1_recur_len = bpf_get_stack(ctx, &exit_stack1_recur, SZ, BPF_F_USER_STACK);
+ else
+ exit1_len = bpf_get_stack(ctx, &exit_stack1, SZ, BPF_F_USER_STACK);
+
+ recur = true;
+ return 0;
+}
+
+SEC("uprobe//proc/self/exe:target_2")
+int BPF_UPROBE(uprobe_2)
+{
+ entry2_len = bpf_get_stack(ctx, &entry_stack2, SZ, BPF_F_USER_STACK);
+ return 0;
+}
+
+/* no uretprobe for target_2 */
+
+SEC("uprobe//proc/self/exe:target_3")
+int BPF_UPROBE(uprobe_3)
+{
+ entry3_len = bpf_get_stack(ctx, &entry_stack3, SZ, BPF_F_USER_STACK);
+ return 0;
+}
+
+/* no uretprobe for target_3 */
+
+SEC("uprobe//proc/self/exe:target_4")
+int BPF_UPROBE(uprobe_4)
+{
+ entry4_len = bpf_get_stack(ctx, &entry_stack4, SZ, BPF_F_USER_STACK);
+ return 0;
+}
+
+SEC("uretprobe//proc/self/exe:target_4")
+int BPF_URETPROBE(uretprobe_4)
+{
+ exit4_len = bpf_get_stack(ctx, &exit_stack4, SZ, BPF_F_USER_STACK);
+ return 0;
+}
+
+SEC("usdt//proc/self/exe:uretprobe_stack:target")
+int BPF_USDT(usdt_probe)
+{
+ usdt_len = bpf_get_stack(ctx, &usdt_stack, SZ, BPF_F_USER_STACK);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c b/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c
index 11ab25c42c36..54de0389f878 100644
--- a/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c
+++ b/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c
@@ -221,3 +221,25 @@ int user_ringbuf_callback_reinit_dynptr_ringbuf(void *ctx)
bpf_user_ringbuf_drain(&user_ringbuf, try_reinit_dynptr_ringbuf, NULL, 0);
return 0;
}
+
+__noinline long global_call_bpf_dynptr_data(struct bpf_dynptr *dynptr)
+{
+ bpf_dynptr_data(dynptr, 0xA, 0xA);
+ return 0;
+}
+
+static long callback_adjust_bpf_dynptr_reg_off(struct bpf_dynptr *dynptr,
+ void *ctx)
+{
+ global_call_bpf_dynptr_data(dynptr += 1024);
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("dereference of modified dynptr_ptr ptr R1 off=16384 disallowed")
+int user_ringbuf_callback_const_ptr_to_dynptr_reg_off(void *ctx)
+{
+ bpf_user_ringbuf_drain(&user_ringbuf,
+ callback_adjust_bpf_dynptr_reg_off, NULL, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/verifier_arena.c b/tools/testing/selftests/bpf/progs/verifier_arena.c
index 93144ae6df74..67509c5d3982 100644
--- a/tools/testing/selftests/bpf/progs/verifier_arena.c
+++ b/tools/testing/selftests/bpf/progs/verifier_arena.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#define BPF_NO_KFUNC_PROTOTYPES
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
diff --git a/tools/testing/selftests/bpf/progs/verifier_arena_large.c b/tools/testing/selftests/bpf/progs/verifier_arena_large.c
index ef66ea460264..6065f862d964 100644
--- a/tools/testing/selftests/bpf/progs/verifier_arena_large.c
+++ b/tools/testing/selftests/bpf/progs/verifier_arena_large.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#define BPF_NO_KFUNC_PROTOTYPES
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
diff --git a/tools/testing/selftests/bpf/progs/verifier_bits_iter.c b/tools/testing/selftests/bpf/progs/verifier_bits_iter.c
new file mode 100644
index 000000000000..716113c2bce2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_bits_iter.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2024 Yafang Shao <laoar.shao@gmail.com> */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#include "bpf_misc.h"
+#include "task_kfunc_common.h"
+
+char _license[] SEC("license") = "GPL";
+
+int bpf_iter_bits_new(struct bpf_iter_bits *it, const u64 *unsafe_ptr__ign,
+ u32 nr_bits) __ksym __weak;
+int *bpf_iter_bits_next(struct bpf_iter_bits *it) __ksym __weak;
+void bpf_iter_bits_destroy(struct bpf_iter_bits *it) __ksym __weak;
+
+SEC("iter.s/cgroup")
+__description("bits iter without destroy")
+__failure __msg("Unreleased reference")
+int BPF_PROG(no_destroy, struct bpf_iter_meta *meta, struct cgroup *cgrp)
+{
+ struct bpf_iter_bits it;
+ u64 data = 1;
+
+ bpf_iter_bits_new(&it, &data, 1);
+ bpf_iter_bits_next(&it);
+ return 0;
+}
+
+SEC("iter/cgroup")
+__description("uninitialized iter in ->next()")
+__failure __msg("expected an initialized iter_bits as arg #1")
+int BPF_PROG(next_uninit, struct bpf_iter_meta *meta, struct cgroup *cgrp)
+{
+ struct bpf_iter_bits *it = NULL;
+
+ bpf_iter_bits_next(it);
+ return 0;
+}
+
+SEC("iter/cgroup")
+__description("uninitialized iter in ->destroy()")
+__failure __msg("expected an initialized iter_bits as arg #1")
+int BPF_PROG(destroy_uninit, struct bpf_iter_meta *meta, struct cgroup *cgrp)
+{
+ struct bpf_iter_bits it = {};
+
+ bpf_iter_bits_destroy(&it);
+ return 0;
+}
+
+SEC("syscall")
+__description("null pointer")
+__success __retval(0)
+int null_pointer(void)
+{
+ int nr = 0;
+ int *bit;
+
+ bpf_for_each(bits, bit, NULL, 1)
+ nr++;
+ return nr;
+}
+
+SEC("syscall")
+__description("bits copy")
+__success __retval(10)
+int bits_copy(void)
+{
+ u64 data = 0xf7310UL; /* 4 + 3 + 2 + 1 + 0*/
+ int nr = 0;
+ int *bit;
+
+ bpf_for_each(bits, bit, &data, 1)
+ nr++;
+ return nr;
+}
+
+SEC("syscall")
+__description("bits memalloc")
+__success __retval(64)
+int bits_memalloc(void)
+{
+ u64 data[2];
+ int nr = 0;
+ int *bit;
+
+ __builtin_memset(&data, 0xf0, sizeof(data)); /* 4 * 16 */
+ bpf_for_each(bits, bit, &data[0], sizeof(data) / sizeof(u64))
+ nr++;
+ return nr;
+}
+
+SEC("syscall")
+__description("bit index")
+__success __retval(8)
+int bit_index(void)
+{
+ u64 data = 0x100;
+ int bit_idx = 0;
+ int *bit;
+
+ bpf_for_each(bits, bit, &data, 1) {
+ if (*bit == 0)
+ continue;
+ bit_idx = *bit;
+ }
+ return bit_idx;
+}
+
+SEC("syscall")
+__description("bits nomem")
+__success __retval(0)
+int bits_nomem(void)
+{
+ u64 data[4];
+ int nr = 0;
+ int *bit;
+
+ __builtin_memset(&data, 0xff, sizeof(data));
+ bpf_for_each(bits, bit, &data[0], 513) /* Be greater than 512 */
+ nr++;
+ return nr;
+}
+
+SEC("syscall")
+__description("fewer words")
+__success __retval(1)
+int fewer_words(void)
+{
+ u64 data[2] = {0x1, 0xff};
+ int nr = 0;
+ int *bit;
+
+ bpf_for_each(bits, bit, &data[0], 1)
+ nr++;
+ return nr;
+}
+
+SEC("syscall")
+__description("zero words")
+__success __retval(0)
+int zero_words(void)
+{
+ u64 data[2] = {0x1, 0xff};
+ int nr = 0;
+ int *bit;
+
+ bpf_for_each(bits, bit, &data[0], 0)
+ nr++;
+ return nr;
+}
diff --git a/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c b/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c
index 80c737b6d340..e54bb5385bc1 100644
--- a/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c
+++ b/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c
@@ -551,4 +551,240 @@ int cond_break5(const void *ctx)
return cnt1 > 1 && cnt2 > 1 ? 1 : 0;
}
+#define ARR2_SZ 1000
+SEC(".data.arr2")
+char arr2[ARR2_SZ];
+
+SEC("socket")
+__success __flag(BPF_F_TEST_STATE_FREQ)
+int loop_inside_iter(const void *ctx)
+{
+ struct bpf_iter_num it;
+ int *v, sum = 0;
+ __u64 i = 0;
+
+ bpf_iter_num_new(&it, 0, ARR2_SZ);
+ while ((v = bpf_iter_num_next(&it))) {
+ if (i < ARR2_SZ)
+ sum += arr2[i++];
+ }
+ bpf_iter_num_destroy(&it);
+ return sum;
+}
+
+SEC("socket")
+__success __flag(BPF_F_TEST_STATE_FREQ)
+int loop_inside_iter_signed(const void *ctx)
+{
+ struct bpf_iter_num it;
+ int *v, sum = 0;
+ long i = 0;
+
+ bpf_iter_num_new(&it, 0, ARR2_SZ);
+ while ((v = bpf_iter_num_next(&it))) {
+ if (i < ARR2_SZ && i >= 0)
+ sum += arr2[i++];
+ }
+ bpf_iter_num_destroy(&it);
+ return sum;
+}
+
+volatile const int limit = ARR2_SZ;
+
+SEC("socket")
+__success __flag(BPF_F_TEST_STATE_FREQ)
+int loop_inside_iter_volatile_limit(const void *ctx)
+{
+ struct bpf_iter_num it;
+ int *v, sum = 0;
+ __u64 i = 0;
+
+ bpf_iter_num_new(&it, 0, ARR2_SZ);
+ while ((v = bpf_iter_num_next(&it))) {
+ if (i < limit)
+ sum += arr2[i++];
+ }
+ bpf_iter_num_destroy(&it);
+ return sum;
+}
+
+#define ARR_LONG_SZ 1000
+
+SEC(".data.arr_long")
+long arr_long[ARR_LONG_SZ];
+
+SEC("socket")
+__success
+int test1(const void *ctx)
+{
+ long i;
+
+ for (i = 0; i < ARR_LONG_SZ && can_loop; i++)
+ arr_long[i] = i;
+ return 0;
+}
+
+SEC("socket")
+__success
+int test2(const void *ctx)
+{
+ __u64 i;
+
+ for (i = zero; i < ARR_LONG_SZ && can_loop; i++) {
+ barrier_var(i);
+ arr_long[i] = i;
+ }
+ return 0;
+}
+
+SEC(".data.arr_foo")
+struct {
+ int a;
+ int b;
+} arr_foo[ARR_LONG_SZ];
+
+SEC("socket")
+__success
+int test3(const void *ctx)
+{
+ __u64 i;
+
+ for (i = zero; i < ARR_LONG_SZ && can_loop; i++) {
+ barrier_var(i);
+ arr_foo[i].a = i;
+ arr_foo[i].b = i;
+ }
+ return 0;
+}
+
+SEC("socket")
+__success
+int test4(const void *ctx)
+{
+ long i;
+
+ for (i = zero + ARR_LONG_SZ - 1; i < ARR_LONG_SZ && i >= 0 && can_loop; i--) {
+ barrier_var(i);
+ arr_foo[i].a = i;
+ arr_foo[i].b = i;
+ }
+ return 0;
+}
+
+char buf[10] SEC(".data.buf");
+
+SEC("socket")
+__description("check add const")
+__success
+__naked void check_add_const(void)
+{
+ /* typical LLVM generated loop with may_goto */
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ if r0 > 9 goto l1_%=; \
+l0_%=: r1 = %[buf]; \
+ r2 = r0; \
+ r1 += r2; \
+ r3 = *(u8 *)(r1 +0); \
+ .byte 0xe5; /* may_goto */ \
+ .byte 0; /* regs */ \
+ .short 4; /* off of l1_%=: */ \
+ .long 0; /* imm */ \
+ r0 = r2; \
+ r0 += 1; \
+ if r2 < 9 goto l0_%=; \
+ exit; \
+l1_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns),
+ __imm_ptr(buf)
+ : __clobber_common);
+}
+
+SEC("socket")
+__failure
+__msg("*(u8 *)(r7 +0) = r0")
+__msg("invalid access to map value, value_size=10 off=10 size=1")
+__naked void check_add_const_3regs(void)
+{
+ asm volatile (
+ "r6 = %[buf];"
+ "r7 = %[buf];"
+ "call %[bpf_ktime_get_ns];"
+ "r1 = r0;" /* link r0.id == r1.id == r2.id */
+ "r2 = r0;"
+ "r1 += 1;" /* r1 == r0+1 */
+ "r2 += 2;" /* r2 == r0+2 */
+ "if r0 > 8 goto 1f;" /* r0 range [0, 8] */
+ "r6 += r1;" /* r1 range [1, 9] */
+ "r7 += r2;" /* r2 range [2, 10] */
+ "*(u8 *)(r6 +0) = r0;" /* safe, within bounds */
+ "*(u8 *)(r7 +0) = r0;" /* unsafe, out of bounds */
+ "1: exit;"
+ :
+ : __imm(bpf_ktime_get_ns),
+ __imm_ptr(buf)
+ : __clobber_common);
+}
+
+SEC("socket")
+__failure
+__msg("*(u8 *)(r8 -1) = r0")
+__msg("invalid access to map value, value_size=10 off=10 size=1")
+__naked void check_add_const_3regs_2if(void)
+{
+ asm volatile (
+ "r6 = %[buf];"
+ "r7 = %[buf];"
+ "r8 = %[buf];"
+ "call %[bpf_ktime_get_ns];"
+ "if r0 < 2 goto 1f;"
+ "r1 = r0;" /* link r0.id == r1.id == r2.id */
+ "r2 = r0;"
+ "r1 += 1;" /* r1 == r0+1 */
+ "r2 += 2;" /* r2 == r0+2 */
+ "if r2 > 11 goto 1f;" /* r2 range [0, 11] -> r0 range [-2, 9]; r1 range [-1, 10] */
+ "if r0 s< 0 goto 1f;" /* r0 range [0, 9] -> r1 range [1, 10]; r2 range [2, 11]; */
+ "r6 += r0;" /* r0 range [0, 9] */
+ "r7 += r1;" /* r1 range [1, 10] */
+ "r8 += r2;" /* r2 range [2, 11] */
+ "*(u8 *)(r6 +0) = r0;" /* safe, within bounds */
+ "*(u8 *)(r7 -1) = r0;" /* safe */
+ "*(u8 *)(r8 -1) = r0;" /* unsafe */
+ "1: exit;"
+ :
+ : __imm(bpf_ktime_get_ns),
+ __imm_ptr(buf)
+ : __clobber_common);
+}
+
+SEC("socket")
+__failure
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void check_add_const_regsafe_off(void)
+{
+ asm volatile (
+ "r8 = %[buf];"
+ "call %[bpf_ktime_get_ns];"
+ "r6 = r0;"
+ "call %[bpf_ktime_get_ns];"
+ "r7 = r0;"
+ "call %[bpf_ktime_get_ns];"
+ "r1 = r0;" /* same ids for r1 and r0 */
+ "if r6 > r7 goto 1f;" /* this jump can't be predicted */
+ "r1 += 1;" /* r1.off == +1 */
+ "goto 2f;"
+ "1: r1 += 100;" /* r1.off == +100 */
+ "goto +0;" /* verify r1.off in regsafe() after this insn */
+ "2: if r0 > 8 goto 3f;" /* r0 range [0,8], r1 range either [1,9] or [100,108]*/
+ "r8 += r1;"
+ "*(u8 *)(r8 +0) = r0;" /* potentially unsafe, buf size is 10 */
+ "3: exit;"
+ :
+ : __imm(bpf_ktime_get_ns),
+ __imm_ptr(buf)
+ : __clobber_common);
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_netfilter_ctx.c b/tools/testing/selftests/bpf/progs/verifier_netfilter_ctx.c
index 65bba330e7e5..ab9f9f2620ed 100644
--- a/tools/testing/selftests/bpf/progs/verifier_netfilter_ctx.c
+++ b/tools/testing/selftests/bpf/progs/verifier_netfilter_ctx.c
@@ -79,7 +79,7 @@ int with_invalid_ctx_access_test5(struct bpf_nf_ctx *ctx)
return NF_ACCEPT;
}
-extern int bpf_dynptr_from_skb(struct sk_buff *skb, __u64 flags,
+extern int bpf_dynptr_from_skb(struct __sk_buff *skb, __u64 flags,
struct bpf_dynptr *ptr__uninit) __ksym;
extern void *bpf_dynptr_slice(const struct bpf_dynptr *ptr, uint32_t offset,
void *buffer, uint32_t buffer__sz) __ksym;
@@ -90,8 +90,8 @@ __success __failure_unpriv
__retval(0)
int with_valid_ctx_access_test6(struct bpf_nf_ctx *ctx)
{
+ struct __sk_buff *skb = (struct __sk_buff *)ctx->skb;
const struct nf_hook_state *state = ctx->state;
- struct sk_buff *skb = ctx->skb;
const struct iphdr *iph;
const struct tcphdr *th;
u8 buffer_iph[20] = {};
@@ -99,7 +99,7 @@ int with_valid_ctx_access_test6(struct bpf_nf_ctx *ctx)
struct bpf_dynptr ptr;
uint8_t ihl;
- if (skb->len <= 20 || bpf_dynptr_from_skb(skb, 0, &ptr))
+ if (ctx->skb->len <= 20 || bpf_dynptr_from_skb(skb, 0, &ptr))
return NF_ACCEPT;
iph = bpf_dynptr_slice(&ptr, 0, buffer_iph, sizeof(buffer_iph));
diff --git a/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
index 4a58e0398e72..6a6fad625f7e 100644
--- a/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
+++ b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
@@ -8,8 +8,6 @@
#include "bpf_misc.h"
#include <../../../tools/include/linux/filter.h>
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
-
int vals[] SEC(".data.vals") = {1, 2, 3, 4};
__naked __noinline __used
diff --git a/tools/testing/selftests/bpf/progs/wq.c b/tools/testing/selftests/bpf/progs/wq.c
index 49e712acbf60..f8d3ae0c29ae 100644
--- a/tools/testing/selftests/bpf/progs/wq.c
+++ b/tools/testing/selftests/bpf/progs/wq.c
@@ -32,6 +32,7 @@ struct {
} hmap_malloc SEC(".maps");
struct elem {
+ int ok_offset;
struct bpf_wq w;
};
@@ -53,7 +54,7 @@ __u32 ok;
__u32 ok_sleepable;
static int test_elem_callback(void *map, int *key,
- int (callback_fn)(void *map, int *key, struct bpf_wq *wq))
+ int (callback_fn)(void *map, int *key, void *value))
{
struct elem init = {}, *val;
struct bpf_wq *wq;
@@ -70,6 +71,8 @@ static int test_elem_callback(void *map, int *key,
if (!val)
return -2;
+ val->ok_offset = *key;
+
wq = &val->w;
if (bpf_wq_init(wq, map, 0) != 0)
return -3;
@@ -84,7 +87,7 @@ static int test_elem_callback(void *map, int *key,
}
static int test_hmap_elem_callback(void *map, int *key,
- int (callback_fn)(void *map, int *key, struct bpf_wq *wq))
+ int (callback_fn)(void *map, int *key, void *value))
{
struct hmap_elem init = {}, *val;
struct bpf_wq *wq;
@@ -114,7 +117,7 @@ static int test_hmap_elem_callback(void *map, int *key,
}
/* callback for non sleepable workqueue */
-static int wq_callback(void *map, int *key, struct bpf_wq *work)
+static int wq_callback(void *map, int *key, void *value)
{
bpf_kfunc_common_test();
ok |= (1 << *key);
@@ -122,10 +125,16 @@ static int wq_callback(void *map, int *key, struct bpf_wq *work)
}
/* callback for sleepable workqueue */
-static int wq_cb_sleepable(void *map, int *key, struct bpf_wq *work)
+static int wq_cb_sleepable(void *map, int *key, void *value)
{
+ struct elem *data = (struct elem *)value;
+ int offset = data->ok_offset;
+
+ if (*key != offset)
+ return 0;
+
bpf_kfunc_call_test_sleepable();
- ok_sleepable |= (1 << *key);
+ ok_sleepable |= (1 << offset);
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/wq_failures.c b/tools/testing/selftests/bpf/progs/wq_failures.c
index 4cbdb425f223..25b51a72fe0f 100644
--- a/tools/testing/selftests/bpf/progs/wq_failures.c
+++ b/tools/testing/selftests/bpf/progs/wq_failures.c
@@ -28,14 +28,14 @@ struct {
} lru SEC(".maps");
/* callback for non sleepable workqueue */
-static int wq_callback(void *map, int *key, struct bpf_wq *work)
+static int wq_callback(void *map, int *key, void *value)
{
bpf_kfunc_common_test();
return 0;
}
/* callback for sleepable workqueue */
-static int wq_cb_sleepable(void *map, int *key, struct bpf_wq *work)
+static int wq_cb_sleepable(void *map, int *key, void *value)
{
bpf_kfunc_call_test_sleepable();
return 0;
diff --git a/tools/testing/selftests/bpf/progs/xdp_flowtable.c b/tools/testing/selftests/bpf/progs/xdp_flowtable.c
new file mode 100644
index 000000000000..7fdc7b23ee74
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/xdp_flowtable.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0
+#define BPF_NO_KFUNC_PROTOTYPES
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#define ETH_P_IP 0x0800
+#define ETH_P_IPV6 0x86dd
+#define IP_MF 0x2000 /* "More Fragments" */
+#define IP_OFFSET 0x1fff /* "Fragment Offset" */
+#define AF_INET 2
+#define AF_INET6 10
+
+struct bpf_flowtable_opts___local {
+ s32 error;
+};
+
+struct flow_offload_tuple_rhash *
+bpf_xdp_flow_lookup(struct xdp_md *, struct bpf_fib_lookup *,
+ struct bpf_flowtable_opts___local *, u32) __ksym;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, __u32);
+ __type(value, __u32);
+ __uint(max_entries, 1);
+} stats SEC(".maps");
+
+static bool xdp_flowtable_offload_check_iphdr(struct iphdr *iph)
+{
+ /* ip fragmented traffic */
+ if (iph->frag_off & bpf_htons(IP_MF | IP_OFFSET))
+ return false;
+
+ /* ip options */
+ if (iph->ihl * 4 != sizeof(*iph))
+ return false;
+
+ if (iph->ttl <= 1)
+ return false;
+
+ return true;
+}
+
+static bool xdp_flowtable_offload_check_tcp_state(void *ports, void *data_end,
+ u8 proto)
+{
+ if (proto == IPPROTO_TCP) {
+ struct tcphdr *tcph = ports;
+
+ if (tcph + 1 > data_end)
+ return false;
+
+ if (tcph->fin || tcph->rst)
+ return false;
+ }
+
+ return true;
+}
+
+struct flow_ports___local {
+ __be16 source, dest;
+} __attribute__((preserve_access_index));
+
+SEC("xdp.frags")
+int xdp_flowtable_do_lookup(struct xdp_md *ctx)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ struct bpf_flowtable_opts___local opts = {};
+ struct flow_offload_tuple_rhash *tuplehash;
+ struct bpf_fib_lookup tuple = {
+ .ifindex = ctx->ingress_ifindex,
+ };
+ void *data = (void *)(long)ctx->data;
+ struct ethhdr *eth = data;
+ struct flow_ports___local *ports;
+ __u32 *val, key = 0;
+
+ if (eth + 1 > data_end)
+ return XDP_DROP;
+
+ switch (eth->h_proto) {
+ case bpf_htons(ETH_P_IP): {
+ struct iphdr *iph = data + sizeof(*eth);
+
+ ports = (struct flow_ports___local *)(iph + 1);
+ if (ports + 1 > data_end)
+ return XDP_PASS;
+
+ /* sanity check on ip header */
+ if (!xdp_flowtable_offload_check_iphdr(iph))
+ return XDP_PASS;
+
+ if (!xdp_flowtable_offload_check_tcp_state(ports, data_end,
+ iph->protocol))
+ return XDP_PASS;
+
+ tuple.family = AF_INET;
+ tuple.tos = iph->tos;
+ tuple.l4_protocol = iph->protocol;
+ tuple.tot_len = bpf_ntohs(iph->tot_len);
+ tuple.ipv4_src = iph->saddr;
+ tuple.ipv4_dst = iph->daddr;
+ tuple.sport = ports->source;
+ tuple.dport = ports->dest;
+ break;
+ }
+ case bpf_htons(ETH_P_IPV6): {
+ struct in6_addr *src = (struct in6_addr *)tuple.ipv6_src;
+ struct in6_addr *dst = (struct in6_addr *)tuple.ipv6_dst;
+ struct ipv6hdr *ip6h = data + sizeof(*eth);
+
+ ports = (struct flow_ports___local *)(ip6h + 1);
+ if (ports + 1 > data_end)
+ return XDP_PASS;
+
+ if (ip6h->hop_limit <= 1)
+ return XDP_PASS;
+
+ if (!xdp_flowtable_offload_check_tcp_state(ports, data_end,
+ ip6h->nexthdr))
+ return XDP_PASS;
+
+ tuple.family = AF_INET6;
+ tuple.l4_protocol = ip6h->nexthdr;
+ tuple.tot_len = bpf_ntohs(ip6h->payload_len);
+ *src = ip6h->saddr;
+ *dst = ip6h->daddr;
+ tuple.sport = ports->source;
+ tuple.dport = ports->dest;
+ break;
+ }
+ default:
+ return XDP_PASS;
+ }
+
+ tuplehash = bpf_xdp_flow_lookup(ctx, &tuple, &opts, sizeof(opts));
+ if (!tuplehash)
+ return XDP_PASS;
+
+ val = bpf_map_lookup_elem(&stats, &key);
+ if (val)
+ __sync_add_and_fetch(val, 1);
+
+ return XDP_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c b/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c
index 7ea9785738b5..f8f5dc9f72b8 100644
--- a/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c
+++ b/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: LGPL-2.1 OR BSD-2-Clause
/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+#define BPF_NO_KFUNC_PROTOTYPES
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
diff --git a/tools/testing/selftests/bpf/progs/xfrm_info.c b/tools/testing/selftests/bpf/progs/xfrm_info.c
index f6a501fbba2b..a1d9f106c3f0 100644
--- a/tools/testing/selftests/bpf/progs/xfrm_info.c
+++ b/tools/testing/selftests/bpf/progs/xfrm_info.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#define BPF_NO_KFUNC_PROTOTYPES
#include "vmlinux.h"
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
diff --git a/tools/testing/selftests/bpf/test_loader.c b/tools/testing/selftests/bpf/test_loader.c
index 524c38e9cde4..f14e10b0de96 100644
--- a/tools/testing/selftests/bpf/test_loader.c
+++ b/tools/testing/selftests/bpf/test_loader.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include <linux/capability.h>
#include <stdlib.h>
+#include <regex.h>
#include <test_progs.h>
#include <bpf/btf.h>
@@ -17,9 +18,11 @@
#define TEST_TAG_EXPECT_FAILURE "comment:test_expect_failure"
#define TEST_TAG_EXPECT_SUCCESS "comment:test_expect_success"
#define TEST_TAG_EXPECT_MSG_PFX "comment:test_expect_msg="
+#define TEST_TAG_EXPECT_REGEX_PFX "comment:test_expect_regex="
#define TEST_TAG_EXPECT_FAILURE_UNPRIV "comment:test_expect_failure_unpriv"
#define TEST_TAG_EXPECT_SUCCESS_UNPRIV "comment:test_expect_success_unpriv"
#define TEST_TAG_EXPECT_MSG_PFX_UNPRIV "comment:test_expect_msg_unpriv="
+#define TEST_TAG_EXPECT_REGEX_PFX_UNPRIV "comment:test_expect_regex_unpriv="
#define TEST_TAG_LOG_LEVEL_PFX "comment:test_log_level="
#define TEST_TAG_PROG_FLAGS_PFX "comment:test_prog_flags="
#define TEST_TAG_DESCRIPTION_PFX "comment:test_description="
@@ -46,10 +49,16 @@ enum mode {
UNPRIV = 2
};
+struct expect_msg {
+ const char *substr; /* substring match */
+ const char *regex_str; /* regex-based match */
+ regex_t regex;
+};
+
struct test_subspec {
char *name;
bool expect_failure;
- const char **expect_msgs;
+ struct expect_msg *expect_msgs;
size_t expect_msg_cnt;
int retval;
bool execute;
@@ -89,6 +98,16 @@ void test_loader_fini(struct test_loader *tester)
static void free_test_spec(struct test_spec *spec)
{
+ int i;
+
+ /* Deallocate expect_msgs arrays. */
+ for (i = 0; i < spec->priv.expect_msg_cnt; i++)
+ if (spec->priv.expect_msgs[i].regex_str)
+ regfree(&spec->priv.expect_msgs[i].regex);
+ for (i = 0; i < spec->unpriv.expect_msg_cnt; i++)
+ if (spec->unpriv.expect_msgs[i].regex_str)
+ regfree(&spec->unpriv.expect_msgs[i].regex);
+
free(spec->priv.name);
free(spec->unpriv.name);
free(spec->priv.expect_msgs);
@@ -100,18 +119,38 @@ static void free_test_spec(struct test_spec *spec)
spec->unpriv.expect_msgs = NULL;
}
-static int push_msg(const char *msg, struct test_subspec *subspec)
+static int push_msg(const char *substr, const char *regex_str, struct test_subspec *subspec)
{
void *tmp;
+ int regcomp_res;
+ char error_msg[100];
+ struct expect_msg *msg;
- tmp = realloc(subspec->expect_msgs, (1 + subspec->expect_msg_cnt) * sizeof(void *));
+ tmp = realloc(subspec->expect_msgs,
+ (1 + subspec->expect_msg_cnt) * sizeof(struct expect_msg));
if (!tmp) {
ASSERT_FAIL("failed to realloc memory for messages\n");
return -ENOMEM;
}
subspec->expect_msgs = tmp;
- subspec->expect_msgs[subspec->expect_msg_cnt++] = msg;
+ msg = &subspec->expect_msgs[subspec->expect_msg_cnt];
+
+ if (substr) {
+ msg->substr = substr;
+ msg->regex_str = NULL;
+ } else {
+ msg->regex_str = regex_str;
+ msg->substr = NULL;
+ regcomp_res = regcomp(&msg->regex, regex_str, REG_EXTENDED|REG_NEWLINE);
+ if (regcomp_res != 0) {
+ regerror(regcomp_res, &msg->regex, error_msg, sizeof(error_msg));
+ PRINT_FAIL("Regexp compilation error in '%s': '%s'\n",
+ regex_str, error_msg);
+ return -EINVAL;
+ }
+ }
+ subspec->expect_msg_cnt += 1;
return 0;
}
@@ -233,13 +272,25 @@ static int parse_test_spec(struct test_loader *tester,
spec->mode_mask |= UNPRIV;
} else if (str_has_pfx(s, TEST_TAG_EXPECT_MSG_PFX)) {
msg = s + sizeof(TEST_TAG_EXPECT_MSG_PFX) - 1;
- err = push_msg(msg, &spec->priv);
+ err = push_msg(msg, NULL, &spec->priv);
if (err)
goto cleanup;
spec->mode_mask |= PRIV;
} else if (str_has_pfx(s, TEST_TAG_EXPECT_MSG_PFX_UNPRIV)) {
msg = s + sizeof(TEST_TAG_EXPECT_MSG_PFX_UNPRIV) - 1;
- err = push_msg(msg, &spec->unpriv);
+ err = push_msg(msg, NULL, &spec->unpriv);
+ if (err)
+ goto cleanup;
+ spec->mode_mask |= UNPRIV;
+ } else if (str_has_pfx(s, TEST_TAG_EXPECT_REGEX_PFX)) {
+ msg = s + sizeof(TEST_TAG_EXPECT_REGEX_PFX) - 1;
+ err = push_msg(NULL, msg, &spec->priv);
+ if (err)
+ goto cleanup;
+ spec->mode_mask |= PRIV;
+ } else if (str_has_pfx(s, TEST_TAG_EXPECT_REGEX_PFX_UNPRIV)) {
+ msg = s + sizeof(TEST_TAG_EXPECT_REGEX_PFX_UNPRIV) - 1;
+ err = push_msg(NULL, msg, &spec->unpriv);
if (err)
goto cleanup;
spec->mode_mask |= UNPRIV;
@@ -337,16 +388,13 @@ static int parse_test_spec(struct test_loader *tester,
}
if (!spec->unpriv.expect_msgs) {
- size_t sz = spec->priv.expect_msg_cnt * sizeof(void *);
+ for (i = 0; i < spec->priv.expect_msg_cnt; i++) {
+ struct expect_msg *msg = &spec->priv.expect_msgs[i];
- spec->unpriv.expect_msgs = malloc(sz);
- if (!spec->unpriv.expect_msgs) {
- PRINT_FAIL("failed to allocate memory for unpriv.expect_msgs\n");
- err = -ENOMEM;
- goto cleanup;
+ err = push_msg(msg->substr, msg->regex_str, &spec->unpriv);
+ if (err)
+ goto cleanup;
}
- memcpy(spec->unpriv.expect_msgs, spec->priv.expect_msgs, sz);
- spec->unpriv.expect_msg_cnt = spec->priv.expect_msg_cnt;
}
}
@@ -402,27 +450,40 @@ static void validate_case(struct test_loader *tester,
struct bpf_program *prog,
int load_err)
{
- int i, j;
+ int i, j, err;
+ char *match;
+ regmatch_t reg_match[1];
for (i = 0; i < subspec->expect_msg_cnt; i++) {
- char *match;
- const char *expect_msg;
-
- expect_msg = subspec->expect_msgs[i];
+ struct expect_msg *msg = &subspec->expect_msgs[i];
+
+ if (msg->substr) {
+ match = strstr(tester->log_buf + tester->next_match_pos, msg->substr);
+ if (match)
+ tester->next_match_pos = match - tester->log_buf + strlen(msg->substr);
+ } else {
+ err = regexec(&msg->regex,
+ tester->log_buf + tester->next_match_pos, 1, reg_match, 0);
+ if (err == 0) {
+ match = tester->log_buf + tester->next_match_pos + reg_match[0].rm_so;
+ tester->next_match_pos += reg_match[0].rm_eo;
+ } else {
+ match = NULL;
+ }
+ }
- match = strstr(tester->log_buf + tester->next_match_pos, expect_msg);
if (!ASSERT_OK_PTR(match, "expect_msg")) {
- /* if we are in verbose mode, we've already emitted log */
if (env.verbosity == VERBOSE_NONE)
emit_verifier_log(tester->log_buf, true /*force*/);
- for (j = 0; j < i; j++)
- fprintf(stderr,
- "MATCHED MSG: '%s'\n", subspec->expect_msgs[j]);
- fprintf(stderr, "EXPECTED MSG: '%s'\n", expect_msg);
+ for (j = 0; j <= i; j++) {
+ msg = &subspec->expect_msgs[j];
+ fprintf(stderr, "%s %s: '%s'\n",
+ j < i ? "MATCHED " : "EXPECTED",
+ msg->substr ? "SUBSTR" : " REGEX",
+ msg->substr ?: msg->regex_str);
+ }
return;
}
-
- tester->next_match_pos = match - tester->log_buf + strlen(expect_msg);
}
}
diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h
index 0ba5a20b19ba..51341d50213b 100644
--- a/tools/testing/selftests/bpf/test_progs.h
+++ b/tools/testing/selftests/bpf/test_progs.h
@@ -377,6 +377,15 @@ int test__join_cgroup(const char *path);
___ok; \
})
+#define ASSERT_OK_FD(fd, name) ({ \
+ static int duration = 0; \
+ int ___fd = (fd); \
+ bool ___ok = ___fd >= 0; \
+ CHECK(!___ok, (name), "unexpected fd: %d (errno %d)\n", \
+ ___fd, errno); \
+ ___ok; \
+})
+
#define SYS(goto_label, fmt, ...) \
({ \
char cmd[1024]; \
diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c
index 92752f5eeded..3e02d7267de8 100644
--- a/tools/testing/selftests/bpf/test_sockmap.c
+++ b/tools/testing/selftests/bpf/test_sockmap.c
@@ -63,7 +63,8 @@ int passed;
int failed;
int map_fd[9];
struct bpf_map *maps[9];
-int prog_fd[11];
+struct bpf_program *progs[9];
+struct bpf_link *links[9];
int txmsg_pass;
int txmsg_redir;
@@ -680,7 +681,8 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
}
}
- s->bytes_recvd += recv;
+ if (recv > 0)
+ s->bytes_recvd += recv;
if (opt->check_recved_len && s->bytes_recvd > total_bytes) {
errno = EMSGSIZE;
@@ -952,7 +954,8 @@ enum {
static int run_options(struct sockmap_options *options, int cg_fd, int test)
{
- int i, key, next_key, err, tx_prog_fd = -1, zero = 0;
+ int i, key, next_key, err, zero = 0;
+ struct bpf_program *tx_prog;
/* If base test skip BPF setup */
if (test == BASE || test == BASE_SENDPAGE)
@@ -960,48 +963,44 @@ static int run_options(struct sockmap_options *options, int cg_fd, int test)
/* Attach programs to sockmap */
if (!txmsg_omit_skb_parser) {
- err = bpf_prog_attach(prog_fd[0], map_fd[0],
- BPF_SK_SKB_STREAM_PARSER, 0);
- if (err) {
+ links[0] = bpf_program__attach_sockmap(progs[0], map_fd[0]);
+ if (!links[0]) {
fprintf(stderr,
- "ERROR: bpf_prog_attach (sockmap %i->%i): %d (%s)\n",
- prog_fd[0], map_fd[0], err, strerror(errno));
- return err;
+ "ERROR: bpf_program__attach_sockmap (sockmap %i->%i): (%s)\n",
+ bpf_program__fd(progs[0]), map_fd[0], strerror(errno));
+ return -1;
}
}
- err = bpf_prog_attach(prog_fd[1], map_fd[0],
- BPF_SK_SKB_STREAM_VERDICT, 0);
- if (err) {
- fprintf(stderr, "ERROR: bpf_prog_attach (sockmap): %d (%s)\n",
- err, strerror(errno));
- return err;
+ links[1] = bpf_program__attach_sockmap(progs[1], map_fd[0]);
+ if (!links[1]) {
+ fprintf(stderr, "ERROR: bpf_program__attach_sockmap (sockmap): (%s)\n",
+ strerror(errno));
+ return -1;
}
/* Attach programs to TLS sockmap */
if (txmsg_ktls_skb) {
if (!txmsg_omit_skb_parser) {
- err = bpf_prog_attach(prog_fd[0], map_fd[8],
- BPF_SK_SKB_STREAM_PARSER, 0);
- if (err) {
+ links[2] = bpf_program__attach_sockmap(progs[0], map_fd[8]);
+ if (!links[2]) {
fprintf(stderr,
- "ERROR: bpf_prog_attach (TLS sockmap %i->%i): %d (%s)\n",
- prog_fd[0], map_fd[8], err, strerror(errno));
- return err;
+ "ERROR: bpf_program__attach_sockmap (TLS sockmap %i->%i): (%s)\n",
+ bpf_program__fd(progs[0]), map_fd[8], strerror(errno));
+ return -1;
}
}
- err = bpf_prog_attach(prog_fd[2], map_fd[8],
- BPF_SK_SKB_STREAM_VERDICT, 0);
- if (err) {
- fprintf(stderr, "ERROR: bpf_prog_attach (TLS sockmap): %d (%s)\n",
- err, strerror(errno));
- return err;
+ links[3] = bpf_program__attach_sockmap(progs[2], map_fd[8]);
+ if (!links[3]) {
+ fprintf(stderr, "ERROR: bpf_program__attach_sockmap (TLS sockmap): (%s)\n",
+ strerror(errno));
+ return -1;
}
}
/* Attach to cgroups */
- err = bpf_prog_attach(prog_fd[3], cg_fd, BPF_CGROUP_SOCK_OPS, 0);
+ err = bpf_prog_attach(bpf_program__fd(progs[3]), cg_fd, BPF_CGROUP_SOCK_OPS, 0);
if (err) {
fprintf(stderr, "ERROR: bpf_prog_attach (groups): %d (%s)\n",
err, strerror(errno));
@@ -1017,30 +1016,31 @@ run:
/* Attach txmsg program to sockmap */
if (txmsg_pass)
- tx_prog_fd = prog_fd[4];
+ tx_prog = progs[4];
else if (txmsg_redir)
- tx_prog_fd = prog_fd[5];
+ tx_prog = progs[5];
else if (txmsg_apply)
- tx_prog_fd = prog_fd[6];
+ tx_prog = progs[6];
else if (txmsg_cork)
- tx_prog_fd = prog_fd[7];
+ tx_prog = progs[7];
else if (txmsg_drop)
- tx_prog_fd = prog_fd[8];
+ tx_prog = progs[8];
else
- tx_prog_fd = 0;
+ tx_prog = NULL;
- if (tx_prog_fd) {
- int redir_fd, i = 0;
+ if (tx_prog) {
+ int redir_fd;
- err = bpf_prog_attach(tx_prog_fd,
- map_fd[1], BPF_SK_MSG_VERDICT, 0);
- if (err) {
+ links[4] = bpf_program__attach_sockmap(tx_prog, map_fd[1]);
+ if (!links[4]) {
fprintf(stderr,
- "ERROR: bpf_prog_attach (txmsg): %d (%s)\n",
- err, strerror(errno));
+ "ERROR: bpf_program__attach_sockmap (txmsg): (%s)\n",
+ strerror(errno));
+ err = -1;
goto out;
}
+ i = 0;
err = bpf_map_update_elem(map_fd[1], &i, &c1, BPF_ANY);
if (err) {
fprintf(stderr,
@@ -1279,16 +1279,14 @@ run:
fprintf(stderr, "unknown test\n");
out:
/* Detatch and zero all the maps */
- bpf_prog_detach2(prog_fd[3], cg_fd, BPF_CGROUP_SOCK_OPS);
- bpf_prog_detach2(prog_fd[0], map_fd[0], BPF_SK_SKB_STREAM_PARSER);
- bpf_prog_detach2(prog_fd[1], map_fd[0], BPF_SK_SKB_STREAM_VERDICT);
- bpf_prog_detach2(prog_fd[0], map_fd[8], BPF_SK_SKB_STREAM_PARSER);
- bpf_prog_detach2(prog_fd[2], map_fd[8], BPF_SK_SKB_STREAM_VERDICT);
+ bpf_prog_detach2(bpf_program__fd(progs[3]), cg_fd, BPF_CGROUP_SOCK_OPS);
- if (tx_prog_fd >= 0)
- bpf_prog_detach2(tx_prog_fd, map_fd[1], BPF_SK_MSG_VERDICT);
+ for (i = 0; i < ARRAY_SIZE(links); i++) {
+ if (links[i])
+ bpf_link__detach(links[i]);
+ }
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < ARRAY_SIZE(map_fd); i++) {
key = next_key = 0;
bpf_map_update_elem(map_fd[i], &key, &zero, BPF_ANY);
while (bpf_map_get_next_key(map_fd[i], &key, &next_key) == 0) {
@@ -1783,34 +1781,6 @@ char *map_names[] = {
"tls_sock_map",
};
-int prog_attach_type[] = {
- BPF_SK_SKB_STREAM_PARSER,
- BPF_SK_SKB_STREAM_VERDICT,
- BPF_SK_SKB_STREAM_VERDICT,
- BPF_CGROUP_SOCK_OPS,
- BPF_SK_MSG_VERDICT,
- BPF_SK_MSG_VERDICT,
- BPF_SK_MSG_VERDICT,
- BPF_SK_MSG_VERDICT,
- BPF_SK_MSG_VERDICT,
- BPF_SK_MSG_VERDICT,
- BPF_SK_MSG_VERDICT,
-};
-
-int prog_type[] = {
- BPF_PROG_TYPE_SK_SKB,
- BPF_PROG_TYPE_SK_SKB,
- BPF_PROG_TYPE_SK_SKB,
- BPF_PROG_TYPE_SOCK_OPS,
- BPF_PROG_TYPE_SK_MSG,
- BPF_PROG_TYPE_SK_MSG,
- BPF_PROG_TYPE_SK_MSG,
- BPF_PROG_TYPE_SK_MSG,
- BPF_PROG_TYPE_SK_MSG,
- BPF_PROG_TYPE_SK_MSG,
- BPF_PROG_TYPE_SK_MSG,
-};
-
static int populate_progs(char *bpf_file)
{
struct bpf_program *prog;
@@ -1829,17 +1799,10 @@ static int populate_progs(char *bpf_file)
return -1;
}
- bpf_object__for_each_program(prog, obj) {
- bpf_program__set_type(prog, prog_type[i]);
- bpf_program__set_expected_attach_type(prog,
- prog_attach_type[i]);
- i++;
- }
-
i = bpf_object__load(obj);
i = 0;
bpf_object__for_each_program(prog, obj) {
- prog_fd[i] = bpf_program__fd(prog);
+ progs[i] = prog;
i++;
}
@@ -1853,6 +1816,9 @@ static int populate_progs(char *bpf_file)
}
}
+ for (i = 0; i < ARRAY_SIZE(links); i++)
+ links[i] = NULL;
+
return 0;
}
@@ -1970,7 +1936,6 @@ static void test_selftests_ktls(int cg_fd, struct sockmap_options *opt)
static int test_selftest(int cg_fd, struct sockmap_options *opt)
{
-
test_selftests_sockmap(cg_fd, opt);
test_selftests_sockhash(cg_fd, opt);
test_selftests_ktls(cg_fd, opt);
diff --git a/tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c b/tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c
index 7b5fc98838cd..3844f9b8232a 100644
--- a/tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c
+++ b/tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c
@@ -139,14 +139,14 @@ out:
return ret;
}
-static int v6only_true(int fd, const struct post_socket_opts *opts)
+static int v6only_true(int fd, void *opts)
{
int mode = true;
return setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &mode, sizeof(mode));
}
-static int v6only_false(int fd, const struct post_socket_opts *opts)
+static int v6only_false(int fd, void *opts)
{
int mode = false;
@@ -156,10 +156,6 @@ static int v6only_false(int fd, const struct post_socket_opts *opts)
int main(int argc, char **argv)
{
struct network_helper_opts opts = { 0 };
- struct sockaddr_in addr4;
- struct sockaddr_in6 addr6;
- struct sockaddr_in addr4dual;
- struct sockaddr_in6 addr6dual;
int server = -1;
int server_v6 = -1;
int server_dual = -1;
@@ -181,36 +177,17 @@ int main(int argc, char **argv)
goto err;
}
- memset(&addr4, 0, sizeof(addr4));
- addr4.sin_family = AF_INET;
- addr4.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
- addr4.sin_port = 0;
- memcpy(&addr4dual, &addr4, sizeof(addr4dual));
-
- memset(&addr6, 0, sizeof(addr6));
- addr6.sin6_family = AF_INET6;
- addr6.sin6_addr = in6addr_loopback;
- addr6.sin6_port = 0;
-
- memset(&addr6dual, 0, sizeof(addr6dual));
- addr6dual.sin6_family = AF_INET6;
- addr6dual.sin6_addr = in6addr_any;
- addr6dual.sin6_port = 0;
-
- server = start_server_addr(SOCK_STREAM, (struct sockaddr_storage *)&addr4,
- sizeof(addr4), NULL);
+ server = start_server_str(AF_INET, SOCK_STREAM, "127.0.0.1", 0, NULL);
if (server == -1)
goto err;
opts.post_socket_cb = v6only_true;
- server_v6 = start_server_addr(SOCK_STREAM, (struct sockaddr_storage *)&addr6,
- sizeof(addr6), &opts);
+ server_v6 = start_server_str(AF_INET6, SOCK_STREAM, "::1", 0, &opts);
if (server_v6 == -1)
goto err;
opts.post_socket_cb = v6only_false;
- server_dual = start_server_addr(SOCK_STREAM, (struct sockaddr_storage *)&addr6dual,
- sizeof(addr6dual), &opts);
+ server_dual = start_server_str(AF_INET6, SOCK_STREAM, "::0", 0, &opts);
if (server_dual == -1)
goto err;
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index df04bda1c927..610392dfc4fb 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -1237,11 +1237,6 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
fixup_prog_kfuncs(prog, fd_array, test->fixup_kfunc_btf_id);
}
-struct libcap {
- struct __user_cap_header_struct hdr;
- struct __user_cap_data_struct data[2];
-};
-
static int set_admin(bool admin)
{
int err;
diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c
index 70e29f316fe7..465d196c7165 100644
--- a/tools/testing/selftests/bpf/trace_helpers.c
+++ b/tools/testing/selftests/bpf/trace_helpers.c
@@ -211,7 +211,7 @@ long ksym_get_addr(const char *name)
*/
int kallsyms_find(const char *sym, unsigned long long *addr)
{
- char type, name[500];
+ char type, name[500], *match;
unsigned long long value;
int err = 0;
FILE *f;
@@ -221,6 +221,17 @@ int kallsyms_find(const char *sym, unsigned long long *addr)
return -EINVAL;
while (fscanf(f, "%llx %c %499s%*[^\n]\n", &value, &type, name) > 0) {
+ /* If CONFIG_LTO_CLANG_THIN is enabled, static variable/function
+ * symbols could be promoted to global due to cross-file inlining.
+ * For such cases, clang compiler will add .llvm.<hash> suffix
+ * to those symbols to avoid potential naming conflict.
+ * Let us ignore .llvm.<hash> suffix during symbol comparison.
+ */
+ if (type == 'd') {
+ match = strstr(name, ".llvm.");
+ if (match)
+ *match = '\0';
+ }
if (strcmp(name, sym) == 0) {
*addr = value;
goto out;
diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c
index ab25a81fd3a1..d0cdd156cd55 100644
--- a/tools/testing/selftests/bpf/verifier/calls.c
+++ b/tools/testing/selftests/bpf/verifier/calls.c
@@ -76,7 +76,7 @@
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
- .errstr = "R1 must have zero offset when passed to release func or trusted arg to kfunc",
+ .errstr = "arg#0 expected pointer to ctx, but got PTR",
.fixup_kfunc_btf_id = {
{ "bpf_kfunc_call_test_pass_ctx", 2 },
},
@@ -276,6 +276,19 @@
.result = ACCEPT,
},
{
+ "calls: invalid kfunc call: must provide (attach_prog_fd, btf_id) pair when freplace",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_EXT,
+ .result = REJECT,
+ .errstr = "Tracing programs must provide btf_id",
+ .fixup_kfunc_btf_id = {
+ { "bpf_dynptr_from_skb", 0 },
+ },
+},
+{
"calls: basic sanity",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
diff --git a/tools/testing/selftests/bpf/verifier/precise.c b/tools/testing/selftests/bpf/verifier/precise.c
index 0a9293a57211..90643ccc221d 100644
--- a/tools/testing/selftests/bpf/verifier/precise.c
+++ b/tools/testing/selftests/bpf/verifier/precise.c
@@ -39,12 +39,12 @@
.result = VERBOSE_ACCEPT,
.errstr =
"mark_precise: frame0: last_idx 26 first_idx 20\
- mark_precise: frame0: regs=r2 stack= before 25\
- mark_precise: frame0: regs=r2 stack= before 24\
- mark_precise: frame0: regs=r2 stack= before 23\
- mark_precise: frame0: regs=r2 stack= before 22\
- mark_precise: frame0: regs=r2 stack= before 20\
- mark_precise: frame0: parent state regs=r2 stack=:\
+ mark_precise: frame0: regs=r2,r9 stack= before 25\
+ mark_precise: frame0: regs=r2,r9 stack= before 24\
+ mark_precise: frame0: regs=r2,r9 stack= before 23\
+ mark_precise: frame0: regs=r2,r9 stack= before 22\
+ mark_precise: frame0: regs=r2,r9 stack= before 20\
+ mark_precise: frame0: parent state regs=r2,r9 stack=:\
mark_precise: frame0: last_idx 19 first_idx 10\
mark_precise: frame0: regs=r2,r9 stack= before 19\
mark_precise: frame0: regs=r9 stack= before 18\
@@ -100,11 +100,11 @@
.errstr =
"26: (85) call bpf_probe_read_kernel#113\
mark_precise: frame0: last_idx 26 first_idx 22\
- mark_precise: frame0: regs=r2 stack= before 25\
- mark_precise: frame0: regs=r2 stack= before 24\
- mark_precise: frame0: regs=r2 stack= before 23\
- mark_precise: frame0: regs=r2 stack= before 22\
- mark_precise: frame0: parent state regs=r2 stack=:\
+ mark_precise: frame0: regs=r2,r9 stack= before 25\
+ mark_precise: frame0: regs=r2,r9 stack= before 24\
+ mark_precise: frame0: regs=r2,r9 stack= before 23\
+ mark_precise: frame0: regs=r2,r9 stack= before 22\
+ mark_precise: frame0: parent state regs=r2,r9 stack=:\
mark_precise: frame0: last_idx 20 first_idx 20\
mark_precise: frame0: regs=r2,r9 stack= before 20\
mark_precise: frame0: parent state regs=r2,r9 stack=:\
diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c
index 2eac0895b0a1..8144fd145237 100644
--- a/tools/testing/selftests/bpf/xskxceiver.c
+++ b/tools/testing/selftests/bpf/xskxceiver.c
@@ -196,6 +196,12 @@ static int xsk_configure_umem(struct ifobject *ifobj, struct xsk_umem_info *umem
};
int ret;
+ if (umem->fill_size)
+ cfg.fill_size = umem->fill_size;
+
+ if (umem->comp_size)
+ cfg.comp_size = umem->comp_size;
+
if (umem->unaligned_mode)
cfg.flags |= XDP_UMEM_UNALIGNED_CHUNK_FLAG;
@@ -265,6 +271,10 @@ static int __xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_i
cfg.bind_flags |= XDP_SHARED_UMEM;
if (ifobject->mtu > MAX_ETH_PKT_SIZE)
cfg.bind_flags |= XDP_USE_SG;
+ if (umem->comp_size)
+ cfg.tx_size = umem->comp_size;
+ if (umem->fill_size)
+ cfg.rx_size = umem->fill_size;
txr = ifobject->tx_on ? &xsk->tx : NULL;
rxr = ifobject->rx_on ? &xsk->rx : NULL;
@@ -1616,7 +1626,7 @@ static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream
if (umem->num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS)
buffers_to_fill = umem->num_frames;
else
- buffers_to_fill = XSK_RING_PROD__DEFAULT_NUM_DESCS;
+ buffers_to_fill = umem->fill_size;
ret = xsk_ring_prod__reserve(&umem->fq, buffers_to_fill, &idx);
if (ret != buffers_to_fill)
@@ -1899,11 +1909,15 @@ static int testapp_validate_traffic(struct test_spec *test)
}
if (test->set_ring) {
- if (ifobj_tx->hw_ring_size_supp)
- return set_ring_size(ifobj_tx);
-
- ksft_test_result_skip("Changing HW ring size not supported.\n");
- return TEST_SKIP;
+ if (ifobj_tx->hw_ring_size_supp) {
+ if (set_ring_size(ifobj_tx)) {
+ ksft_test_result_skip("Failed to change HW ring size.\n");
+ return TEST_FAILURE;
+ }
+ } else {
+ ksft_test_result_skip("Changing HW ring size not supported.\n");
+ return TEST_SKIP;
+ }
}
xsk_attach_xdp_progs(test, ifobj_rx, ifobj_tx);
@@ -2441,7 +2455,7 @@ static int testapp_hw_sw_min_ring_size(struct test_spec *test)
static int testapp_hw_sw_max_ring_size(struct test_spec *test)
{
- u32 max_descs = XSK_RING_PROD__DEFAULT_NUM_DESCS * 2;
+ u32 max_descs = XSK_RING_PROD__DEFAULT_NUM_DESCS * 4;
int ret;
test->set_ring = true;
@@ -2449,7 +2463,8 @@ static int testapp_hw_sw_max_ring_size(struct test_spec *test)
test->ifobj_tx->ring.tx_pending = test->ifobj_tx->ring.tx_max_pending;
test->ifobj_tx->ring.rx_pending = test->ifobj_tx->ring.rx_max_pending;
test->ifobj_rx->umem->num_frames = max_descs;
- test->ifobj_rx->xsk->rxqsize = max_descs;
+ test->ifobj_rx->umem->fill_size = max_descs;
+ test->ifobj_rx->umem->comp_size = max_descs;
test->ifobj_tx->xsk->batch_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
test->ifobj_rx->xsk->batch_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
@@ -2457,9 +2472,12 @@ static int testapp_hw_sw_max_ring_size(struct test_spec *test)
if (ret)
return ret;
- /* Set batch_size to 4095 */
- test->ifobj_tx->xsk->batch_size = max_descs - 1;
- test->ifobj_rx->xsk->batch_size = max_descs - 1;
+ /* Set batch_size to 8152 for testing, as the ice HW ignores the 3 lowest bits when
+ * updating the Rx HW tail register.
+ */
+ test->ifobj_tx->xsk->batch_size = test->ifobj_tx->ring.tx_max_pending - 8;
+ test->ifobj_rx->xsk->batch_size = test->ifobj_tx->ring.tx_max_pending - 8;
+ pkt_stream_replace(test, max_descs, MIN_PKT_SIZE);
return testapp_validate_traffic(test);
}
diff --git a/tools/testing/selftests/bpf/xskxceiver.h b/tools/testing/selftests/bpf/xskxceiver.h
index 906de5fab7a3..885c948c5d83 100644
--- a/tools/testing/selftests/bpf/xskxceiver.h
+++ b/tools/testing/selftests/bpf/xskxceiver.h
@@ -80,6 +80,8 @@ struct xsk_umem_info {
void *buffer;
u32 frame_size;
u32 base_addr;
+ u32 fill_size;
+ u32 comp_size;
bool unaligned_mode;
};
diff --git a/tools/testing/selftests/breakpoints/step_after_suspend_test.c b/tools/testing/selftests/breakpoints/step_after_suspend_test.c
index b8703c499d28..dfec31fb9b30 100644
--- a/tools/testing/selftests/breakpoints/step_after_suspend_test.c
+++ b/tools/testing/selftests/breakpoints/step_after_suspend_test.c
@@ -130,7 +130,6 @@ int run_test(int cpu)
void suspend(void)
{
int power_state_fd;
- struct sigevent event = {};
int timerfd;
int err;
struct itimerspec spec = {};
diff --git a/tools/testing/selftests/cgroup/.gitignore b/tools/testing/selftests/cgroup/.gitignore
index 2732e0b29271..952e4448bf07 100644
--- a/tools/testing/selftests/cgroup/.gitignore
+++ b/tools/testing/selftests/cgroup/.gitignore
@@ -1,11 +1,12 @@
# SPDX-License-Identifier: GPL-2.0-only
-test_memcontrol
test_core
-test_freezer
-test_kmem
-test_kill
test_cpu
test_cpuset
-test_zswap
+test_freezer
test_hugetlb_memcg
+test_kill
+test_kmem
+test_memcontrol
+test_pids
+test_zswap
wait_inotify
diff --git a/tools/testing/selftests/cgroup/Makefile b/tools/testing/selftests/cgroup/Makefile
index 16461dc0ffdf..1b897152bab6 100644
--- a/tools/testing/selftests/cgroup/Makefile
+++ b/tools/testing/selftests/cgroup/Makefile
@@ -6,26 +6,29 @@ all: ${HELPER_PROGS}
TEST_FILES := with_stress.sh
TEST_PROGS := test_stress.sh test_cpuset_prs.sh test_cpuset_v1_hp.sh
TEST_GEN_FILES := wait_inotify
-TEST_GEN_PROGS = test_memcontrol
-TEST_GEN_PROGS += test_kmem
-TEST_GEN_PROGS += test_core
-TEST_GEN_PROGS += test_freezer
-TEST_GEN_PROGS += test_kill
+# Keep the lists lexicographically sorted
+TEST_GEN_PROGS = test_core
TEST_GEN_PROGS += test_cpu
TEST_GEN_PROGS += test_cpuset
-TEST_GEN_PROGS += test_zswap
+TEST_GEN_PROGS += test_freezer
TEST_GEN_PROGS += test_hugetlb_memcg
+TEST_GEN_PROGS += test_kill
+TEST_GEN_PROGS += test_kmem
+TEST_GEN_PROGS += test_memcontrol
+TEST_GEN_PROGS += test_pids
+TEST_GEN_PROGS += test_zswap
LOCAL_HDRS += $(selfdir)/clone3/clone3_selftests.h $(selfdir)/pidfd/pidfd.h
include ../lib.mk
-$(OUTPUT)/test_memcontrol: cgroup_util.c
-$(OUTPUT)/test_kmem: cgroup_util.c
$(OUTPUT)/test_core: cgroup_util.c
-$(OUTPUT)/test_freezer: cgroup_util.c
-$(OUTPUT)/test_kill: cgroup_util.c
$(OUTPUT)/test_cpu: cgroup_util.c
$(OUTPUT)/test_cpuset: cgroup_util.c
-$(OUTPUT)/test_zswap: cgroup_util.c
+$(OUTPUT)/test_freezer: cgroup_util.c
$(OUTPUT)/test_hugetlb_memcg: cgroup_util.c
+$(OUTPUT)/test_kill: cgroup_util.c
+$(OUTPUT)/test_kmem: cgroup_util.c
+$(OUTPUT)/test_memcontrol: cgroup_util.c
+$(OUTPUT)/test_pids: cgroup_util.c
+$(OUTPUT)/test_zswap: cgroup_util.c
diff --git a/tools/testing/selftests/cgroup/config b/tools/testing/selftests/cgroup/config
index 97d549ee894f..39f979690dd3 100644
--- a/tools/testing/selftests/cgroup/config
+++ b/tools/testing/selftests/cgroup/config
@@ -3,5 +3,4 @@ CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_SCHED=y
CONFIG_MEMCG=y
-CONFIG_MEMCG_KMEM=y
CONFIG_PAGE_COUNTER=y
diff --git a/tools/testing/selftests/cgroup/test_cpuset_prs.sh b/tools/testing/selftests/cgroup/test_cpuset_prs.sh
index b5eb1be2248c..7c08cc153367 100755
--- a/tools/testing/selftests/cgroup/test_cpuset_prs.sh
+++ b/tools/testing/selftests/cgroup/test_cpuset_prs.sh
@@ -28,6 +28,14 @@ CPULIST=$(cat $CGROUP2/cpuset.cpus.effective)
NR_CPUS=$(lscpu | grep "^CPU(s):" | sed -e "s/.*:[[:space:]]*//")
[[ $NR_CPUS -lt 8 ]] && skip_test "Test needs at least 8 cpus available!"
+# Check to see if /dev/console exists and is writable
+if [[ -c /dev/console && -w /dev/console ]]
+then
+ CONSOLE=/dev/console
+else
+ CONSOLE=/dev/null
+fi
+
# Set verbose flag and delay factor
PROG=$1
VERBOSE=0
@@ -103,8 +111,8 @@ console_msg()
{
MSG=$1
echo "$MSG"
- echo "" > /dev/console
- echo "$MSG" > /dev/console
+ echo "" > $CONSOLE
+ echo "$MSG" > $CONSOLE
pause 0.01
}
@@ -161,6 +169,14 @@ test_add_proc()
# T = put a task into cgroup
# O<c>=<v> = Write <v> to CPU online file of <c>
#
+# ECPUs - effective CPUs of cpusets
+# Pstate - partition root state
+# ISOLCPUS - isolated CPUs (<icpus>[,<icpus2>])
+#
+# Note that if there are 2 fields in ISOLCPUS, the first one is for
+# sched-debug matching which includes offline CPUs and single-CPU partitions
+# while the second one is for matching cpuset.cpus.isolated.
+#
SETUP_A123_PARTITIONS="C1-3:P1:S+ C2-3:P1:S+ C3:P1"
TEST_MATRIX=(
# old-A1 old-A2 old-A3 old-B1 new-A1 new-A2 new-A3 new-B1 fail ECPUs Pstate ISOLCPUS
@@ -220,23 +236,29 @@ TEST_MATRIX=(
" C0-3:S+ C1-3:S+ C2-3 . X2-3 X2-3:P2 . . 0 A1:0-1,A2:2-3,A3:2-3 A1:P0,A2:P2 2-3"
" C0-3:S+ C1-3:S+ C2-3 . X2-3 X3:P2 . . 0 A1:0-2,A2:3,A3:3 A1:P0,A2:P2 3"
" C0-3:S+ C1-3:S+ C2-3 . X2-3 X2-3 X2-3:P2 . 0 A1:0-1,A2:1,A3:2-3 A1:P0,A3:P2 2-3"
- " C0-3:S+ C1-3:S+ C2-3 . X2-3 X2-3 X2-3:P2:C3 . 0 A1:0-2,A2:1-2,A3:3 A1:P0,A3:P2 3"
+ " C0-3:S+ C1-3:S+ C2-3 . X2-3 X2-3 X2-3:P2:C3 . 0 A1:0-1,A2:1,A3:2-3 A1:P0,A3:P2 2-3"
" C0-3:S+ C1-3:S+ C2-3 C2-3 . . . P2 0 A1:0-3,A2:1-3,A3:2-3,B1:2-3 A1:P0,A3:P0,B1:P-2"
" C0-3:S+ C1-3:S+ C2-3 C4-5 . . . P2 0 B1:4-5 B1:P2 4-5"
" C0-3:S+ C1-3:S+ C2-3 C4 X2-3 X2-3 X2-3:P2 P2 0 A3:2-3,B1:4 A3:P2,B1:P2 2-4"
" C0-3:S+ C1-3:S+ C2-3 C4 X2-3 X2-3 X2-3:P2:C1-3 P2 0 A3:2-3,B1:4 A3:P2,B1:P2 2-4"
" C0-3:S+ C1-3:S+ C2-3 C4 X1-3 X1-3:P2 P2 . 0 A2:1,A3:2-3 A2:P2,A3:P2 1-3"
" C0-3:S+ C1-3:S+ C2-3 C4 X2-3 X2-3 X2-3:P2 P2:C4-5 0 A3:2-3,B1:4-5 A3:P2,B1:P2 2-5"
+ " C4:X0-3:S+ X1-3:S+ X2-3 . . P2 . . 0 A1:4,A2:1-3,A3:1-3 A2:P2 1-3"
+ " C4:X0-3:S+ X1-3:S+ X2-3 . . . P2 . 0 A1:4,A2:4,A3:2-3 A3:P2 2-3"
# Nested remote/local partition tests
" C0-3:S+ C1-3:S+ C2-3 C4-5 X2-3 X2-3:P1 P2 P1 0 A1:0-1,A2:,A3:2-3,B1:4-5 \
A1:P0,A2:P1,A3:P2,B1:P1 2-3"
" C0-3:S+ C1-3:S+ C2-3 C4 X2-3 X2-3:P1 P2 P1 0 A1:0-1,A2:,A3:2-3,B1:4 \
A1:P0,A2:P1,A3:P2,B1:P1 2-4,2-3"
+ " C0-3:S+ C1-3:S+ C2-3 C4 X2-3 X2-3:P1 . P1 0 A1:0-1,A2:2-3,A3:2-3,B1:4 \
+ A1:P0,A2:P1,A3:P0,B1:P1"
" C0-3:S+ C1-3:S+ C3 C4 X2-3 X2-3:P1 P2 P1 0 A1:0-1,A2:2,A3:3,B1:4 \
A1:P0,A2:P1,A3:P2,B1:P1 2-4,3"
" C0-4:S+ C1-4:S+ C2-4 . X2-4 X2-4:P2 X4:P1 . 0 A1:0-1,A2:2-3,A3:4 \
A1:P0,A2:P2,A3:P1 2-4,2-3"
+ " C0-4:S+ C1-4:S+ C2-4 . X2-4 X2-4:P2 X3-4:P1 . 0 A1:0-1,A2:2,A3:3-4 \
+ A1:P0,A2:P2,A3:P1 2"
" C0-4:X2-4:S+ C1-4:X2-4:S+:P2 C2-4:X4:P1 \
. . X5 . . 0 A1:0-4,A2:1-4,A3:2-4 \
A1:P0,A2:P-2,A3:P-1"
@@ -262,8 +284,8 @@ TEST_MATRIX=(
. . X2-3 P2 . . 0 A1:0-2,A2:3,XA2:3 A2:P2 3"
# Invalid to valid local partition direct transition tests
- " C1-3:S+:P2 C2-3:X1:P2 . . . . . . 0 A1:1-3,XA1:1-3,A2:2-3:XA2: A1:P2,A2:P-2 1-3"
- " C1-3:S+:P2 C2-3:X1:P2 . . . X3:P2 . . 0 A1:1-2,XA1:1-3,A2:3:XA2:3 A1:P2,A2:P2 1-3"
+ " C1-3:S+:P2 X4:P2 . . . . . . 0 A1:1-3,XA1:1-3,A2:1-3:XA2: A1:P2,A2:P-2 1-3"
+ " C1-3:S+:P2 X4:P2 . . . X3:P2 . . 0 A1:1-2,XA1:1-3,A2:3:XA2:3 A1:P2,A2:P2 1-3"
" C0-3:P2 . . C4-6 C0-4 . . . 0 A1:0-4,B1:4-6 A1:P-2,B1:P0"
" C0-3:P2 . . C4-6 C0-4:C0-3 . . . 0 A1:0-3,B1:4-6 A1:P2,B1:P0 0-3"
" C0-3:P2 . . C3-5:C4-5 . . . . 0 A1:0-3,B1:4-5 A1:P2,B1:P0 0-3"
@@ -274,32 +296,26 @@ TEST_MATRIX=(
" C0-3:X1-3:S+:P2 C1-3:X2-3:S+:P2 C2-3:X3:P2 \
. . X4 . . 0 A1:1-3,A2:1-3,A3:2-3,XA2:,XA3: A1:P2,A2:P-2,A3:P-2 1-3"
" C0-3:X1-3:S+:P2 C1-3:X2-3:S+:P2 C2-3:X3:P2 \
- . . C4 . . 0 A1:1-3,A2:1-3,A3:2-3,XA2:,XA3: A1:P2,A2:P-2,A3:P-2 1-3"
+ . . C4:X . . 0 A1:1-3,A2:1-3,A3:2-3,XA2:,XA3: A1:P2,A2:P-2,A3:P-2 1-3"
# Local partition CPU change tests
" C0-5:S+:P2 C4-5:S+:P1 . . . C3-5 . . 0 A1:0-2,A2:3-5 A1:P2,A2:P1 0-2"
" C0-5:S+:P2 C4-5:S+:P1 . . C1-5 . . . 0 A1:1-3,A2:4-5 A1:P2,A2:P1 1-3"
# cpus_allowed/exclusive_cpus update tests
" C0-3:X2-3:S+ C1-3:X2-3:S+ C2-3:X2-3 \
- . C4 . P2 . 0 A1:4,A2:4,XA2:,XA3:,A3:4 \
+ . X:C4 . P2 . 0 A1:4,A2:4,XA2:,XA3:,A3:4 \
A1:P0,A3:P-2"
" C0-3:X2-3:S+ C1-3:X2-3:S+ C2-3:X2-3 \
. X1 . P2 . 0 A1:0-3,A2:1-3,XA1:1,XA2:,XA3:,A3:2-3 \
A1:P0,A3:P-2"
" C0-3:X2-3:S+ C1-3:X2-3:S+ C2-3:X2-3 \
- . . C3 P2 . 0 A1:0-2,A2:0-2,XA2:3,XA3:3,A3:3 \
- A1:P0,A3:P2 3"
- " C0-3:X2-3:S+ C1-3:X2-3:S+ C2-3:X2-3 \
. . X3 P2 . 0 A1:0-2,A2:1-2,XA2:3,XA3:3,A3:3 \
A1:P0,A3:P2 3"
" C0-3:X2-3:S+ C1-3:X2-3:S+ C2-3:X2-3:P2 \
. . X3 . . 0 A1:0-3,A2:1-3,XA2:3,XA3:3,A3:2-3 \
A1:P0,A3:P-2"
" C0-3:X2-3:S+ C1-3:X2-3:S+ C2-3:X2-3:P2 \
- . . C3 . . 0 A1:0-3,A2:3,XA2:3,XA3:3,A3:3 \
- A1:P0,A3:P-2"
- " C0-3:X2-3:S+ C1-3:X2-3:S+ C2-3:X2-3:P2 \
- . C4 . . . 0 A1:4,A2:4,A3:4,XA1:,XA2:,XA3 \
+ . X4 . . . 0 A1:0-3,A2:1-3,A3:2-3,XA1:4,XA2:,XA3 \
A1:P0,A3:P-2"
# old-A1 old-A2 old-A3 old-B1 new-A1 new-A2 new-A3 new-B1 fail ECPUs Pstate ISOLCPUS
@@ -346,6 +362,9 @@ TEST_MATRIX=(
" C0-1:P1 . . P1:C2-3 C0-2 . . . 0 A1:0-2,B1:2-3 A1:P-1,B1:P-1"
" C0-1 . . P1:C2-3 C0-2 . . . 0 A1:0-2,B1:2-3 A1:P0,B1:P-1"
+ # cpuset.cpus can overlap with sibling cpuset.cpus.exclusive but not subsumed by it
+ " C0-3 . . C4-5 X5 . . . 0 A1:0-3,B1:4-5"
+
# old-A1 old-A2 old-A3 old-B1 new-A1 new-A2 new-A3 new-B1 fail ECPUs Pstate ISOLCPUS
# ------ ------ ------ ------ ------ ------ ------ ------ ---- ----- ------ --------
# Failure cases:
@@ -355,6 +374,9 @@ TEST_MATRIX=(
# Changes to cpuset.cpus.exclusive that violate exclusivity rule is rejected
" C0-3 . . C4-5 X0-3 . . X3-5 1 A1:0-3,B1:4-5"
+
+ # cpuset.cpus cannot be a subset of sibling cpuset.cpus.exclusive
+ " C0-3 . . C4-5 X3-5 . . . 1 A1:0-3,B1:4-5"
)
#
@@ -556,14 +578,15 @@ check_cgroup_states()
do
set -- $(echo $CHK | sed -e "s/:/ /g")
CGRP=$1
+ CGRP_DIR=$CGRP
STATE=$2
FILE=
EVAL=$(expr substr $STATE 2 2)
- [[ $CGRP = A2 ]] && CGRP=A1/A2
- [[ $CGRP = A3 ]] && CGRP=A1/A2/A3
+ [[ $CGRP = A2 ]] && CGRP_DIR=A1/A2
+ [[ $CGRP = A3 ]] && CGRP_DIR=A1/A2/A3
case $STATE in
- P*) FILE=$CGRP/cpuset.cpus.partition
+ P*) FILE=$CGRP_DIR/cpuset.cpus.partition
;;
*) echo "Unknown state: $STATE!"
exit 1
@@ -587,6 +610,16 @@ check_cgroup_states()
;;
esac
[[ $EVAL != $VAL ]] && return 1
+
+ #
+ # For root partition, dump sched-domains info to console if
+ # verbose mode set for manual comparison with sched debug info.
+ #
+ [[ $VAL -eq 1 && $VERBOSE -gt 0 ]] && {
+ DOMS=$(cat $CGRP_DIR/cpuset.cpus.effective)
+ [[ -n "$DOMS" ]] &&
+ echo " [$CGRP] sched-domain: $DOMS" > $CONSOLE
+ }
done
return 0
}
@@ -694,9 +727,9 @@ null_isolcpus_check()
[[ $VERBOSE -gt 0 ]] || return 0
# Retry a few times before printing error
RETRY=0
- while [[ $RETRY -lt 5 ]]
+ while [[ $RETRY -lt 8 ]]
do
- pause 0.01
+ pause 0.02
check_isolcpus "."
[[ $? -eq 0 ]] && return 0
((RETRY++))
@@ -726,7 +759,7 @@ run_state_test()
while [[ $I -lt $CNT ]]
do
- echo "Running test $I ..." > /dev/console
+ echo "Running test $I ..." > $CONSOLE
[[ $VERBOSE -gt 1 ]] && {
echo ""
eval echo \${$TEST[$I]}
@@ -783,7 +816,7 @@ run_state_test()
while [[ $NEWLIST != $CPULIST && $RETRY -lt 8 ]]
do
# Wait a bit longer & recheck a few times
- pause 0.01
+ pause 0.02
((RETRY++))
NEWLIST=$(cat cpuset.cpus.effective)
done
diff --git a/tools/testing/selftests/cgroup/test_pids.c b/tools/testing/selftests/cgroup/test_pids.c
new file mode 100644
index 000000000000..9ecb83c6cc5c
--- /dev/null
+++ b/tools/testing/selftests/cgroup/test_pids.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+
+#include <errno.h>
+#include <linux/limits.h>
+#include <signal.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "../kselftest.h"
+#include "cgroup_util.h"
+
+static int run_success(const char *cgroup, void *arg)
+{
+ return 0;
+}
+
+static int run_pause(const char *cgroup, void *arg)
+{
+ return pause();
+}
+
+/*
+ * This test checks that pids.max prevents forking new children above the
+ * specified limit in the cgroup.
+ */
+static int test_pids_max(const char *root)
+{
+ int ret = KSFT_FAIL;
+ char *cg_pids;
+ int pid;
+
+ cg_pids = cg_name(root, "pids_test");
+ if (!cg_pids)
+ goto cleanup;
+
+ if (cg_create(cg_pids))
+ goto cleanup;
+
+ if (cg_read_strcmp(cg_pids, "pids.max", "max\n"))
+ goto cleanup;
+
+ if (cg_write(cg_pids, "pids.max", "2"))
+ goto cleanup;
+
+ if (cg_enter_current(cg_pids))
+ goto cleanup;
+
+ pid = cg_run_nowait(cg_pids, run_pause, NULL);
+ if (pid < 0)
+ goto cleanup;
+
+ if (cg_run_nowait(cg_pids, run_success, NULL) != -1 || errno != EAGAIN)
+ goto cleanup;
+
+ if (kill(pid, SIGINT))
+ goto cleanup;
+
+ ret = KSFT_PASS;
+
+cleanup:
+ cg_enter_current(root);
+ cg_destroy(cg_pids);
+ free(cg_pids);
+
+ return ret;
+}
+
+/*
+ * This test checks that pids.events are counted in cgroup associated with pids.max
+ */
+static int test_pids_events(const char *root)
+{
+ int ret = KSFT_FAIL;
+ char *cg_parent = NULL, *cg_child = NULL;
+ int pid;
+
+ cg_parent = cg_name(root, "pids_parent");
+ cg_child = cg_name(cg_parent, "pids_child");
+ if (!cg_parent || !cg_child)
+ goto cleanup;
+
+ if (cg_create(cg_parent))
+ goto cleanup;
+ if (cg_write(cg_parent, "cgroup.subtree_control", "+pids"))
+ goto cleanup;
+ if (cg_create(cg_child))
+ goto cleanup;
+
+ if (cg_write(cg_parent, "pids.max", "2"))
+ goto cleanup;
+
+ if (cg_read_strcmp(cg_child, "pids.max", "max\n"))
+ goto cleanup;
+
+ if (cg_enter_current(cg_child))
+ goto cleanup;
+
+ pid = cg_run_nowait(cg_child, run_pause, NULL);
+ if (pid < 0)
+ goto cleanup;
+
+ if (cg_run_nowait(cg_child, run_success, NULL) != -1 || errno != EAGAIN)
+ goto cleanup;
+
+ if (kill(pid, SIGINT))
+ goto cleanup;
+
+ if (cg_read_key_long(cg_child, "pids.events", "max ") != 0)
+ goto cleanup;
+ if (cg_read_key_long(cg_parent, "pids.events", "max ") != 1)
+ goto cleanup;
+
+
+ ret = KSFT_PASS;
+
+cleanup:
+ cg_enter_current(root);
+ if (cg_child)
+ cg_destroy(cg_child);
+ if (cg_parent)
+ cg_destroy(cg_parent);
+ free(cg_child);
+ free(cg_parent);
+
+ return ret;
+}
+
+
+
+#define T(x) { x, #x }
+struct pids_test {
+ int (*fn)(const char *root);
+ const char *name;
+} tests[] = {
+ T(test_pids_max),
+ T(test_pids_events),
+};
+#undef T
+
+int main(int argc, char **argv)
+{
+ char root[PATH_MAX];
+
+ ksft_print_header();
+ ksft_set_plan(ARRAY_SIZE(tests));
+ if (cg_find_unified_root(root, sizeof(root), NULL))
+ ksft_exit_skip("cgroup v2 isn't mounted\n");
+
+ /*
+ * Check that pids controller is available:
+ * pids is listed in cgroup.controllers
+ */
+ if (cg_read_strstr(root, "cgroup.controllers", "pids"))
+ ksft_exit_skip("pids controller isn't available\n");
+
+ if (cg_read_strstr(root, "cgroup.subtree_control", "pids"))
+ if (cg_write(root, "cgroup.subtree_control", "+pids"))
+ ksft_exit_skip("Failed to set pids controller\n");
+
+ for (int i = 0; i < ARRAY_SIZE(tests); i++) {
+ switch (tests[i].fn(root)) {
+ case KSFT_PASS:
+ ksft_test_result_pass("%s\n", tests[i].name);
+ break;
+ case KSFT_SKIP:
+ ksft_test_result_skip("%s\n", tests[i].name);
+ break;
+ default:
+ ksft_test_result_fail("%s\n", tests[i].name);
+ break;
+ }
+ }
+
+ ksft_finished();
+}
diff --git a/tools/testing/selftests/damon/Makefile b/tools/testing/selftests/damon/Makefile
index 29a22f50e762..1e2e98cc809d 100644
--- a/tools/testing/selftests/damon/Makefile
+++ b/tools/testing/selftests/damon/Makefile
@@ -4,7 +4,7 @@
TEST_GEN_FILES += huge_count_read_write
TEST_GEN_FILES += debugfs_target_ids_read_before_terminate_race
TEST_GEN_FILES += debugfs_target_ids_pid_leak
-TEST_GEN_FILES += access_memory
+TEST_GEN_FILES += access_memory access_memory_even
TEST_FILES = _chk_dependency.sh _debugfs_common.sh
@@ -13,6 +13,7 @@ TEST_PROGS = debugfs_attrs.sh debugfs_schemes.sh debugfs_target_ids.sh
TEST_PROGS += sysfs.sh
TEST_PROGS += sysfs_update_schemes_tried_regions_wss_estimation.py
TEST_PROGS += damos_quota.py damos_quota_goal.py damos_apply_interval.py
+TEST_PROGS += damos_tried_regions.py damon_nr_regions.py
TEST_PROGS += reclaim.sh lru_sort.sh
# regression tests (reproducers of previously found bugs)
diff --git a/tools/testing/selftests/damon/_damon_sysfs.py b/tools/testing/selftests/damon/_damon_sysfs.py
index 2bd44c32be1b..6e136dc3df19 100644
--- a/tools/testing/selftests/damon/_damon_sysfs.py
+++ b/tools/testing/selftests/damon/_damon_sysfs.py
@@ -175,16 +175,24 @@ class DamosStats:
self.sz_applied = sz_applied
self.qt_exceeds = qt_exceeds
+class DamosTriedRegion:
+ def __init__(self, start, end, nr_accesses, age):
+ self.start = start
+ self.end = end
+ self.nr_accesses = nr_accesses
+ self.age = age
+
class Damos:
action = None
access_pattern = None
quota = None
apply_interval_us = None
- # todo: Support watermarks, stats, tried_regions
+ # todo: Support watermarks, stats
idx = None
context = None
tried_bytes = None
stats = None
+ tried_regions = None
def __init__(self, action='stat', access_pattern=DamosAccessPattern(),
quota=DamosQuota(), apply_interval_us=0):
@@ -398,6 +406,35 @@ class Kdamond:
err = write_file(os.path.join(self.sysfs_dir(), 'state'), 'on')
return err
+ def stop(self):
+ err = write_file(os.path.join(self.sysfs_dir(), 'state'), 'off')
+ return err
+
+ def update_schemes_tried_regions(self):
+ err = write_file(os.path.join(self.sysfs_dir(), 'state'),
+ 'update_schemes_tried_regions')
+ if err is not None:
+ return err
+ for context in self.contexts:
+ for scheme in context.schemes:
+ tried_regions = []
+ tried_regions_dir = os.path.join(
+ scheme.sysfs_dir(), 'tried_regions')
+ for filename in os.listdir(
+ os.path.join(scheme.sysfs_dir(), 'tried_regions')):
+ tried_region_dir = os.path.join(tried_regions_dir, filename)
+ if not os.path.isdir(tried_region_dir):
+ continue
+ region_values = []
+ for f in ['start', 'end', 'nr_accesses', 'age']:
+ content, err = read_file(
+ os.path.join(tried_region_dir, f))
+ if err is not None:
+ return err
+ region_values.append(int(content))
+ tried_regions.append(DamosTriedRegion(*region_values))
+ scheme.tried_regions = tried_regions
+
def update_schemes_tried_bytes(self):
err = write_file(os.path.join(self.sysfs_dir(), 'state'),
'update_schemes_tried_bytes')
@@ -444,6 +481,25 @@ class Kdamond:
goal.effective_bytes = int(content)
return None
+ def commit(self):
+ nr_contexts_file = os.path.join(self.sysfs_dir(),
+ 'contexts', 'nr_contexts')
+ content, err = read_file(nr_contexts_file)
+ if err is not None:
+ return err
+ if int(content) != len(self.contexts):
+ err = write_file(nr_contexts_file, '%d' % len(self.contexts))
+ if err is not None:
+ return err
+
+ for context in self.contexts:
+ err = context.stage()
+ if err is not None:
+ return err
+ err = write_file(os.path.join(self.sysfs_dir(), 'state'), 'commit')
+ return err
+
+
def commit_schemes_quota_goals(self):
for context in self.contexts:
for scheme in context.schemes:
@@ -478,3 +534,10 @@ class Kdamonds:
if err is not None:
return err
return None
+
+ def stop(self):
+ for kdamond in self.kdamonds:
+ err = kdamond.stop()
+ if err is not None:
+ return err
+ return None
diff --git a/tools/testing/selftests/damon/access_memory.c b/tools/testing/selftests/damon/access_memory.c
index 585a2fa54329..56b17e8fe1be 100644
--- a/tools/testing/selftests/damon/access_memory.c
+++ b/tools/testing/selftests/damon/access_memory.c
@@ -35,7 +35,7 @@ int main(int argc, char *argv[])
start_clock = clock();
while ((clock() - start_clock) * 1000 / CLOCKS_PER_SEC <
access_time_ms)
- memset(regions[i], i, 1024 * 1024 * 10);
+ memset(regions[i], i, sz_region);
}
return 0;
}
diff --git a/tools/testing/selftests/damon/access_memory_even.c b/tools/testing/selftests/damon/access_memory_even.c
new file mode 100644
index 000000000000..3be121487432
--- /dev/null
+++ b/tools/testing/selftests/damon/access_memory_even.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Artificial memory access program for testing DAMON.
+ *
+ * Receives number of regions and size of each region from user. Allocate the
+ * regions and repeatedly access even numbered (starting from zero) regions.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+int main(int argc, char *argv[])
+{
+ char **regions;
+ clock_t start_clock;
+ int nr_regions;
+ int sz_region;
+ int access_time_ms;
+ int i;
+
+ if (argc != 3) {
+ printf("Usage: %s <number> <size (bytes)>\n", argv[0]);
+ return -1;
+ }
+
+ nr_regions = atoi(argv[1]);
+ sz_region = atoi(argv[2]);
+
+ regions = malloc(sizeof(*regions) * nr_regions);
+ for (i = 0; i < nr_regions; i++)
+ regions[i] = malloc(sz_region);
+
+ while (1) {
+ for (i = 0; i < nr_regions; i++) {
+ if (i % 2 == 0)
+ memset(regions[i], i, sz_region);
+ }
+ }
+ return 0;
+}
diff --git a/tools/testing/selftests/damon/damon_nr_regions.py b/tools/testing/selftests/damon/damon_nr_regions.py
new file mode 100644
index 000000000000..2e8a74aff543
--- /dev/null
+++ b/tools/testing/selftests/damon/damon_nr_regions.py
@@ -0,0 +1,145 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+
+import subprocess
+import time
+
+import _damon_sysfs
+
+def test_nr_regions(real_nr_regions, min_nr_regions, max_nr_regions):
+ '''
+ Create process of the given 'real_nr_regions' regions, monitor it using
+ DAMON with given '{min,max}_nr_regions' monitoring parameter.
+
+ Exit with non-zero return code if the given {min,max}_nr_regions is not
+ kept.
+ '''
+ sz_region = 10 * 1024 * 1024
+ proc = subprocess.Popen(['./access_memory_even', '%d' % real_nr_regions,
+ '%d' % sz_region])
+
+ # stat every monitored regions
+ kdamonds = _damon_sysfs.Kdamonds([_damon_sysfs.Kdamond(
+ contexts=[_damon_sysfs.DamonCtx(
+ monitoring_attrs=_damon_sysfs.DamonAttrs(
+ min_nr_regions=min_nr_regions,
+ max_nr_regions=max_nr_regions),
+ ops='vaddr',
+ targets=[_damon_sysfs.DamonTarget(pid=proc.pid)],
+ schemes=[_damon_sysfs.Damos(action='stat',
+ )] # schemes
+ )] # contexts
+ )]) # kdamonds
+
+ err = kdamonds.start()
+ if err is not None:
+ proc.terminate()
+ print('kdamond start failed: %s' % err)
+ exit(1)
+
+ collected_nr_regions = []
+ while proc.poll() is None:
+ time.sleep(0.1)
+ err = kdamonds.kdamonds[0].update_schemes_tried_regions()
+ if err is not None:
+ proc.terminate()
+ print('tried regions update failed: %s' % err)
+ exit(1)
+
+ scheme = kdamonds.kdamonds[0].contexts[0].schemes[0]
+ if scheme.tried_regions is None:
+ proc.terminate()
+ print('tried regions is not collected')
+ exit(1)
+
+ nr_tried_regions = len(scheme.tried_regions)
+ if nr_tried_regions <= 0:
+ proc.terminate()
+ print('tried regions is not created')
+ exit(1)
+ collected_nr_regions.append(nr_tried_regions)
+ if len(collected_nr_regions) > 10:
+ break
+ proc.terminate()
+ kdamonds.stop()
+
+ test_name = 'nr_regions test with %d/%d/%d real/min/max nr_regions' % (
+ real_nr_regions, min_nr_regions, max_nr_regions)
+ if (collected_nr_regions[0] < min_nr_regions or
+ collected_nr_regions[-1] > max_nr_regions):
+ print('fail %s' % test_name)
+ print('number of regions that collected are:')
+ for nr in collected_nr_regions:
+ print(nr)
+ exit(1)
+ print('pass %s ' % test_name)
+
+def main():
+ # test min_nr_regions larger than real nr regions
+ test_nr_regions(10, 20, 100)
+
+ # test max_nr_regions smaller than real nr regions
+ test_nr_regions(15, 3, 10)
+
+ # test online-tuned max_nr_regions that smaller than real nr regions
+ sz_region = 10 * 1024 * 1024
+ proc = subprocess.Popen(['./access_memory_even', '14', '%d' % sz_region])
+
+ # stat every monitored regions
+ kdamonds = _damon_sysfs.Kdamonds([_damon_sysfs.Kdamond(
+ contexts=[_damon_sysfs.DamonCtx(
+ monitoring_attrs=_damon_sysfs.DamonAttrs(
+ min_nr_regions=10, max_nr_regions=1000),
+ ops='vaddr',
+ targets=[_damon_sysfs.DamonTarget(pid=proc.pid)],
+ schemes=[_damon_sysfs.Damos(action='stat',
+ )] # schemes
+ )] # contexts
+ )]) # kdamonds
+
+ err = kdamonds.start()
+ if err is not None:
+ proc.terminate()
+ print('kdamond start failed: %s' % err)
+ exit(1)
+
+ # wait until the real regions are found
+ time.sleep(3)
+
+ attrs = kdamonds.kdamonds[0].contexts[0].monitoring_attrs
+ attrs.min_nr_regions = 3
+ attrs.max_nr_regions = 7
+ err = kdamonds.kdamonds[0].commit()
+ if err is not None:
+ proc.terminate()
+ print('commit failed: %s' % err)
+ exit(1)
+ # wait for next merge operation is executed
+ time.sleep(0.3)
+
+ err = kdamonds.kdamonds[0].update_schemes_tried_regions()
+ if err is not None:
+ proc.terminate()
+ print('tried regions update failed: %s' % err)
+ exit(1)
+
+ scheme = kdamonds.kdamonds[0].contexts[0].schemes[0]
+ if scheme.tried_regions is None:
+ proc.terminate()
+ print('tried regions is not collected')
+ exit(1)
+
+ nr_tried_regions = len(scheme.tried_regions)
+ if nr_tried_regions <= 0:
+ proc.terminate()
+ print('tried regions is not created')
+ exit(1)
+ proc.terminate()
+
+ if nr_tried_regions > 7:
+ print('fail online-tuned max_nr_regions: %d > 7' % nr_tried_regions)
+ exit(1)
+ print('pass online-tuned max_nr_regions')
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/testing/selftests/damon/damos_tried_regions.py b/tools/testing/selftests/damon/damos_tried_regions.py
new file mode 100644
index 000000000000..3b347eb28bd2
--- /dev/null
+++ b/tools/testing/selftests/damon/damos_tried_regions.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+
+import subprocess
+import time
+
+import _damon_sysfs
+
+def main():
+ # repeatedly access even-numbered ones in 14 regions of 10 MiB size
+ sz_region = 10 * 1024 * 1024
+ proc = subprocess.Popen(['./access_memory_even', '14', '%d' % sz_region])
+
+ # stat every monitored regions
+ kdamonds = _damon_sysfs.Kdamonds([_damon_sysfs.Kdamond(
+ contexts=[_damon_sysfs.DamonCtx(
+ ops='vaddr',
+ targets=[_damon_sysfs.DamonTarget(pid=proc.pid)],
+ schemes=[_damon_sysfs.Damos(action='stat',
+ )] # schemes
+ )] # contexts
+ )]) # kdamonds
+
+ err = kdamonds.start()
+ if err is not None:
+ proc.terminate()
+ print('kdamond start failed: %s' % err)
+ exit(1)
+
+ collected_nr_regions = []
+ while proc.poll() is None:
+ time.sleep(0.1)
+ err = kdamonds.kdamonds[0].update_schemes_tried_regions()
+ if err is not None:
+ proc.terminate()
+ print('tried regions update failed: %s' % err)
+ exit(1)
+
+ scheme = kdamonds.kdamonds[0].contexts[0].schemes[0]
+ if scheme.tried_regions is None:
+ proc.terminate()
+ print('tried regions is not collected')
+ exit(1)
+
+ nr_tried_regions = len(scheme.tried_regions)
+ if nr_tried_regions <= 0:
+ proc.terminate()
+ print('tried regions is not created')
+ exit(1)
+ collected_nr_regions.append(nr_tried_regions)
+ if len(collected_nr_regions) > 10:
+ break
+ proc.terminate()
+
+ collected_nr_regions.sort()
+ sample = collected_nr_regions[4]
+ print('50-th percentile nr_regions: %d' % sample)
+ print('expectation (>= 14) is %s' % 'met' if sample >= 14 else 'not met')
+ if collected_nr_regions[4] < 14:
+ print('full nr_regions:')
+ print('\n'.join(collected_nr_regions))
+ exit(1)
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/testing/selftests/devices/Makefile b/tools/testing/selftests/devices/Makefile
deleted file mode 100644
index ca29249b30c3..000000000000
--- a/tools/testing/selftests/devices/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-TEST_PROGS := test_discoverable_devices.py
-TEST_FILES := boards ksft.py
-
-include ../lib.mk
diff --git a/tools/testing/selftests/devices/error_logs/Makefile b/tools/testing/selftests/devices/error_logs/Makefile
new file mode 100644
index 000000000000..d546c3fb0a7f
--- /dev/null
+++ b/tools/testing/selftests/devices/error_logs/Makefile
@@ -0,0 +1,3 @@
+TEST_PROGS := test_device_error_logs.py
+
+include ../../lib.mk
diff --git a/tools/testing/selftests/devices/error_logs/test_device_error_logs.py b/tools/testing/selftests/devices/error_logs/test_device_error_logs.py
new file mode 100755
index 000000000000..3dd56c8ec92c
--- /dev/null
+++ b/tools/testing/selftests/devices/error_logs/test_device_error_logs.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (c) 2024 Collabora Ltd
+#
+# This test checks for the presence of error (or more critical) log messages
+# coming from devices in the kernel log.
+#
+# One failed test case is reported for each device that has outputted error
+# logs. Devices with no errors do not produce a passing test case to avoid
+# polluting the results, therefore a successful run will list 0 tests run.
+#
+
+import glob
+import os
+import re
+import sys
+
+# Allow ksft module to be imported from different directory
+this_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(this_dir, "../../kselftest/"))
+
+import ksft
+
+kmsg = "/dev/kmsg"
+
+RE_log = re.compile(
+ r"(?P<prefix>[0-9]+),(?P<sequence>[0-9]+),(?P<timestamp>[0-9]+),(?P<flag>[^;]*)(,[^;]*)*;(?P<message>.*)"
+)
+RE_tag = re.compile(r" (?P<key>[^=]+)=(?P<value>.*)")
+
+PREFIX_ERROR = 3
+
+logs = []
+error_log_per_device = {}
+
+
+def parse_kmsg():
+ current_log = {}
+
+ with open(kmsg) as f:
+ os.set_blocking(f.fileno(), False)
+
+ for line in f:
+ tag_line = RE_tag.match(line)
+ log_line = RE_log.match(line)
+
+ if log_line:
+ if current_log:
+ logs.append(current_log) # Save last log
+
+ current_log = {
+ "prefix": int(log_line.group("prefix")),
+ "sequence": int(log_line.group("sequence")),
+ "timestamp": int(log_line.group("timestamp")),
+ "flag": log_line.group("flag"),
+ "message": log_line.group("message"),
+ }
+ elif tag_line:
+ current_log[tag_line.group("key")] = tag_line.group("value")
+
+
+def generate_per_device_error_log():
+ for log in logs:
+ if log.get("DEVICE") and log["prefix"] <= PREFIX_ERROR:
+ if not error_log_per_device.get(log["DEVICE"]):
+ error_log_per_device[log["DEVICE"]] = []
+ error_log_per_device[log["DEVICE"]].append(log)
+
+
+parse_kmsg()
+
+generate_per_device_error_log()
+num_tests = len(error_log_per_device)
+
+ksft.print_header()
+ksft.set_plan(num_tests)
+
+for device in error_log_per_device:
+ for log in error_log_per_device[device]:
+ ksft.print_msg(log["message"])
+ ksft.test_result_fail(device)
+if num_tests == 0:
+ ksft.print_msg("No device error logs found")
+ksft.finished()
diff --git a/tools/testing/selftests/devices/probe/Makefile b/tools/testing/selftests/devices/probe/Makefile
new file mode 100644
index 000000000000..f630108c3fdf
--- /dev/null
+++ b/tools/testing/selftests/devices/probe/Makefile
@@ -0,0 +1,4 @@
+TEST_PROGS := test_discoverable_devices.py
+TEST_FILES := boards
+
+include ../../lib.mk
diff --git a/tools/testing/selftests/devices/boards/Dell Inc.,XPS 13 9300.yaml b/tools/testing/selftests/devices/probe/boards/Dell Inc.,XPS 13 9300.yaml
index ff932eb19f0b..ff932eb19f0b 100644
--- a/tools/testing/selftests/devices/boards/Dell Inc.,XPS 13 9300.yaml
+++ b/tools/testing/selftests/devices/probe/boards/Dell Inc.,XPS 13 9300.yaml
diff --git a/tools/testing/selftests/devices/boards/google,spherion.yaml b/tools/testing/selftests/devices/probe/boards/google,spherion.yaml
index 17157ecd8c14..3ea843324797 100644
--- a/tools/testing/selftests/devices/boards/google,spherion.yaml
+++ b/tools/testing/selftests/devices/probe/boards/google,spherion.yaml
@@ -11,6 +11,10 @@
# this, several optional keys can be used:
# - dt-mmio: identify the MMIO address of the controller as defined in the
# Devicetree.
+# - of-fullname-regex: regular expression to match against the OF_FULLNAME
+# property. Useful when the controller's address is not unique across other
+# sibling controllers. In this case, dt-mmio can't be used, and this property
+# allows the matching to include parent nodes as well to make it unique.
# - usb-version: for USB controllers to differentiate between USB3 and USB2
# buses sharing the same controller.
# - acpi-uid: _UID property of the controller as supplied by the ACPI. Useful to
diff --git a/tools/testing/selftests/devices/test_discoverable_devices.py b/tools/testing/selftests/devices/probe/test_discoverable_devices.py
index fbae8deb593d..d94a74b8a054 100755
--- a/tools/testing/selftests/devices/test_discoverable_devices.py
+++ b/tools/testing/selftests/devices/probe/test_discoverable_devices.py
@@ -14,13 +14,19 @@
# the description and examples of the file structure and vocabulary.
#
+import argparse
import glob
-import ksft
import os
import re
import sys
import yaml
+# Allow ksft module to be imported from different directory
+this_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(this_dir, "../../kselftest/"))
+
+import ksft
+
pci_controllers = []
usb_controllers = []
@@ -63,6 +69,22 @@ def get_dt_mmio(sysfs_dev_dir):
sysfs_dev_dir = os.path.dirname(sysfs_dev_dir)
+def get_of_fullname(sysfs_dev_dir):
+ re_of_fullname = re.compile("OF_FULLNAME=(.*)")
+ of_full_name = None
+
+ # PCI controllers' sysfs don't have an of_node, so have to read it from the
+ # parent
+ while not of_full_name:
+ try:
+ with open(os.path.join(sysfs_dev_dir, "uevent")) as f:
+ of_fullname = re_of_fullname.search(f.read()).group(1)
+ return of_fullname
+ except:
+ pass
+ sysfs_dev_dir = os.path.dirname(sysfs_dev_dir)
+
+
def get_acpi_uid(sysfs_dev_dir):
with open(os.path.join(sysfs_dev_dir, "firmware_node", "uid")) as f:
return f.read()
@@ -96,6 +118,11 @@ def find_controller_in_sysfs(controller, parent_sysfs=None):
if str(controller["dt-mmio"]) != get_dt_mmio(c):
continue
+ if controller.get("of-fullname-regex"):
+ re_of_fullname = re.compile(str(controller["of-fullname-regex"]))
+ if not re_of_fullname.match(get_of_fullname(c)):
+ continue
+
if controller.get("usb-version"):
if controller["usb-version"] != get_usb_version(c):
continue
@@ -194,6 +221,9 @@ def generate_pathname(device):
if device.get("dt-mmio"):
pathname += "@" + str(device["dt-mmio"])
+ if device.get("of-fullname-regex"):
+ pathname += "-" + str(device["of-fullname-regex"])
+
if device.get("name"):
pathname = pathname + "/" + device["name"]
@@ -296,14 +326,24 @@ def run_test(yaml_file):
parse_device_tree_node(device_tree)
+parser = argparse.ArgumentParser()
+parser.add_argument(
+ "--boards-dir", default="boards", help="Directory containing the board YAML files"
+)
+args = parser.parse_args()
+
find_pci_controller_dirs()
find_usb_controller_dirs()
ksft.print_header()
+if not os.path.exists(args.boards_dir):
+ ksft.print_msg(f"Boards directory '{args.boards_dir}' doesn't exist")
+ ksft.exit_fail()
+
board_file = ""
for board_filename in get_board_filenames():
- full_board_filename = os.path.join("boards", board_filename + ".yaml")
+ full_board_filename = os.path.join(args.boards_dir, board_filename + ".yaml")
if os.path.exists(full_board_filename):
board_file = full_board_filename
diff --git a/tools/testing/selftests/dma/dma_map_benchmark.c b/tools/testing/selftests/dma/dma_map_benchmark.c
index 5c997f17fcbd..b12f1f9babf8 100644
--- a/tools/testing/selftests/dma/dma_map_benchmark.c
+++ b/tools/testing/selftests/dma/dma_map_benchmark.c
@@ -33,7 +33,6 @@ int main(int argc, char **argv)
int granule = 1;
int cmd = DMA_MAP_BENCHMARK;
- char *p;
while ((opt = getopt(argc, argv, "t:s:n:b:d:x:g:")) != -1) {
switch (opt) {
diff --git a/tools/testing/selftests/drivers/dma-buf/udmabuf.c b/tools/testing/selftests/drivers/dma-buf/udmabuf.c
index c812080e304e..6062723a172e 100644
--- a/tools/testing/selftests/drivers/dma-buf/udmabuf.c
+++ b/tools/testing/selftests/drivers/dma-buf/udmabuf.c
@@ -9,52 +9,162 @@
#include <errno.h>
#include <fcntl.h>
#include <malloc.h>
+#include <stdbool.h>
#include <sys/ioctl.h>
#include <sys/syscall.h>
+#include <sys/mman.h>
#include <linux/memfd.h>
#include <linux/udmabuf.h>
+#include "../../kselftest.h"
#define TEST_PREFIX "drivers/dma-buf/udmabuf"
#define NUM_PAGES 4
+#define NUM_ENTRIES 4
+#define MEMFD_SIZE 1024 /* in pages */
-static int memfd_create(const char *name, unsigned int flags)
+static unsigned int page_size;
+
+static int create_memfd_with_seals(off64_t size, bool hpage)
+{
+ int memfd, ret;
+ unsigned int flags = MFD_ALLOW_SEALING;
+
+ if (hpage)
+ flags |= MFD_HUGETLB;
+
+ memfd = memfd_create("udmabuf-test", flags);
+ if (memfd < 0) {
+ ksft_print_msg("%s: [skip,no-memfd]\n", TEST_PREFIX);
+ exit(KSFT_SKIP);
+ }
+
+ ret = fcntl(memfd, F_ADD_SEALS, F_SEAL_SHRINK);
+ if (ret < 0) {
+ ksft_print_msg("%s: [skip,fcntl-add-seals]\n", TEST_PREFIX);
+ exit(KSFT_SKIP);
+ }
+
+ ret = ftruncate(memfd, size);
+ if (ret == -1) {
+ ksft_print_msg("%s: [FAIL,memfd-truncate]\n", TEST_PREFIX);
+ exit(KSFT_FAIL);
+ }
+
+ return memfd;
+}
+
+static int create_udmabuf_list(int devfd, int memfd, off64_t memfd_size)
+{
+ struct udmabuf_create_list *list;
+ int ubuf_fd, i;
+
+ list = malloc(sizeof(struct udmabuf_create_list) +
+ sizeof(struct udmabuf_create_item) * NUM_ENTRIES);
+ if (!list) {
+ ksft_print_msg("%s: [FAIL, udmabuf-malloc]\n", TEST_PREFIX);
+ exit(KSFT_FAIL);
+ }
+
+ for (i = 0; i < NUM_ENTRIES; i++) {
+ list->list[i].memfd = memfd;
+ list->list[i].offset = i * (memfd_size / NUM_ENTRIES);
+ list->list[i].size = getpagesize() * NUM_PAGES;
+ }
+
+ list->count = NUM_ENTRIES;
+ list->flags = UDMABUF_FLAGS_CLOEXEC;
+ ubuf_fd = ioctl(devfd, UDMABUF_CREATE_LIST, list);
+ free(list);
+ if (ubuf_fd < 0) {
+ ksft_print_msg("%s: [FAIL, udmabuf-create]\n", TEST_PREFIX);
+ exit(KSFT_FAIL);
+ }
+
+ return ubuf_fd;
+}
+
+static void write_to_memfd(void *addr, off64_t size, char chr)
+{
+ int i;
+
+ for (i = 0; i < size / page_size; i++) {
+ *((char *)addr + (i * page_size)) = chr;
+ }
+}
+
+static void *mmap_fd(int fd, off64_t size)
+{
+ void *addr;
+
+ addr = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
+ if (addr == MAP_FAILED) {
+ ksft_print_msg("%s: ubuf_fd mmap fail\n", TEST_PREFIX);
+ exit(KSFT_FAIL);
+ }
+
+ return addr;
+}
+
+static int compare_chunks(void *addr1, void *addr2, off64_t memfd_size)
{
- return syscall(__NR_memfd_create, name, flags);
+ off64_t off;
+ int i = 0, j, k = 0, ret = 0;
+ char char1, char2;
+
+ while (i < NUM_ENTRIES) {
+ off = i * (memfd_size / NUM_ENTRIES);
+ for (j = 0; j < NUM_PAGES; j++, k++) {
+ char1 = *((char *)addr1 + off + (j * getpagesize()));
+ char2 = *((char *)addr2 + (k * getpagesize()));
+ if (char1 != char2) {
+ ret = -1;
+ goto err;
+ }
+ }
+ i++;
+ }
+err:
+ munmap(addr1, memfd_size);
+ munmap(addr2, NUM_ENTRIES * NUM_PAGES * getpagesize());
+ return ret;
}
int main(int argc, char *argv[])
{
struct udmabuf_create create;
int devfd, memfd, buf, ret;
- off_t size;
- void *mem;
+ off64_t size;
+ void *addr1, *addr2;
+
+ ksft_print_header();
+ ksft_set_plan(6);
devfd = open("/dev/udmabuf", O_RDWR);
if (devfd < 0) {
- printf("%s: [skip,no-udmabuf: Unable to access DMA buffer device file]\n",
- TEST_PREFIX);
- exit(77);
+ ksft_print_msg(
+ "%s: [skip,no-udmabuf: Unable to access DMA buffer device file]\n",
+ TEST_PREFIX);
+ exit(KSFT_SKIP);
}
memfd = memfd_create("udmabuf-test", MFD_ALLOW_SEALING);
if (memfd < 0) {
- printf("%s: [skip,no-memfd]\n", TEST_PREFIX);
- exit(77);
+ ksft_print_msg("%s: [skip,no-memfd]\n", TEST_PREFIX);
+ exit(KSFT_SKIP);
}
ret = fcntl(memfd, F_ADD_SEALS, F_SEAL_SHRINK);
if (ret < 0) {
- printf("%s: [skip,fcntl-add-seals]\n", TEST_PREFIX);
- exit(77);
+ ksft_print_msg("%s: [skip,fcntl-add-seals]\n", TEST_PREFIX);
+ exit(KSFT_SKIP);
}
-
size = getpagesize() * NUM_PAGES;
ret = ftruncate(memfd, size);
if (ret == -1) {
- printf("%s: [FAIL,memfd-truncate]\n", TEST_PREFIX);
- exit(1);
+ ksft_print_msg("%s: [FAIL,memfd-truncate]\n", TEST_PREFIX);
+ exit(KSFT_FAIL);
}
memset(&create, 0, sizeof(create));
@@ -64,44 +174,86 @@ int main(int argc, char *argv[])
create.offset = getpagesize()/2;
create.size = getpagesize();
buf = ioctl(devfd, UDMABUF_CREATE, &create);
- if (buf >= 0) {
- printf("%s: [FAIL,test-1]\n", TEST_PREFIX);
- exit(1);
- }
+ if (buf >= 0)
+ ksft_test_result_fail("%s: [FAIL,test-1]\n", TEST_PREFIX);
+ else
+ ksft_test_result_pass("%s: [PASS,test-1]\n", TEST_PREFIX);
/* should fail (size not multiple of page) */
create.memfd = memfd;
create.offset = 0;
create.size = getpagesize()/2;
buf = ioctl(devfd, UDMABUF_CREATE, &create);
- if (buf >= 0) {
- printf("%s: [FAIL,test-2]\n", TEST_PREFIX);
- exit(1);
- }
+ if (buf >= 0)
+ ksft_test_result_fail("%s: [FAIL,test-2]\n", TEST_PREFIX);
+ else
+ ksft_test_result_pass("%s: [PASS,test-2]\n", TEST_PREFIX);
/* should fail (not memfd) */
create.memfd = 0; /* stdin */
create.offset = 0;
create.size = size;
buf = ioctl(devfd, UDMABUF_CREATE, &create);
- if (buf >= 0) {
- printf("%s: [FAIL,test-3]\n", TEST_PREFIX);
- exit(1);
- }
+ if (buf >= 0)
+ ksft_test_result_fail("%s: [FAIL,test-3]\n", TEST_PREFIX);
+ else
+ ksft_test_result_pass("%s: [PASS,test-3]\n", TEST_PREFIX);
/* should work */
+ page_size = getpagesize();
+ addr1 = mmap_fd(memfd, size);
+ write_to_memfd(addr1, size, 'a');
create.memfd = memfd;
create.offset = 0;
create.size = size;
buf = ioctl(devfd, UDMABUF_CREATE, &create);
- if (buf < 0) {
- printf("%s: [FAIL,test-4]\n", TEST_PREFIX);
- exit(1);
- }
+ if (buf < 0)
+ ksft_test_result_fail("%s: [FAIL,test-4]\n", TEST_PREFIX);
+ else
+ ksft_test_result_pass("%s: [PASS,test-4]\n", TEST_PREFIX);
+
+ munmap(addr1, size);
+ close(buf);
+ close(memfd);
+
+ /* should work (migration of 4k size pages)*/
+ size = MEMFD_SIZE * page_size;
+ memfd = create_memfd_with_seals(size, false);
+ addr1 = mmap_fd(memfd, size);
+ write_to_memfd(addr1, size, 'a');
+ buf = create_udmabuf_list(devfd, memfd, size);
+ addr2 = mmap_fd(buf, NUM_PAGES * NUM_ENTRIES * getpagesize());
+ write_to_memfd(addr1, size, 'b');
+ ret = compare_chunks(addr1, addr2, size);
+ if (ret < 0)
+ ksft_test_result_fail("%s: [FAIL,test-5]\n", TEST_PREFIX);
+ else
+ ksft_test_result_pass("%s: [PASS,test-5]\n", TEST_PREFIX);
+
+ close(buf);
+ close(memfd);
+
+ /* should work (migration of 2MB size huge pages)*/
+ page_size = getpagesize() * 512; /* 2 MB */
+ size = MEMFD_SIZE * page_size;
+ memfd = create_memfd_with_seals(size, true);
+ addr1 = mmap_fd(memfd, size);
+ write_to_memfd(addr1, size, 'a');
+ buf = create_udmabuf_list(devfd, memfd, size);
+ addr2 = mmap_fd(buf, NUM_PAGES * NUM_ENTRIES * getpagesize());
+ write_to_memfd(addr1, size, 'b');
+ ret = compare_chunks(addr1, addr2, size);
+ if (ret < 0)
+ ksft_test_result_fail("%s: [FAIL,test-6]\n", TEST_PREFIX);
+ else
+ ksft_test_result_pass("%s: [PASS,test-6]\n", TEST_PREFIX);
- fprintf(stderr, "%s: ok\n", TEST_PREFIX);
close(buf);
close(memfd);
close(devfd);
+
+ ksft_print_msg("%s: ok\n", TEST_PREFIX);
+ ksft_print_cnts();
+
return 0;
}
diff --git a/tools/testing/selftests/drivers/net/hw/Makefile b/tools/testing/selftests/drivers/net/hw/Makefile
index 4933d045ab66..c9f2f48fc30f 100644
--- a/tools/testing/selftests/drivers/net/hw/Makefile
+++ b/tools/testing/selftests/drivers/net/hw/Makefile
@@ -11,6 +11,7 @@ TEST_PROGS = \
hw_stats_l3_gre.sh \
loopback.sh \
pp_alloc_fail.py \
+ rss_ctx.py \
#
TEST_FILES := \
diff --git a/tools/testing/selftests/drivers/net/hw/rss_ctx.py b/tools/testing/selftests/drivers/net/hw/rss_ctx.py
new file mode 100755
index 000000000000..931dbc36ca43
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/hw/rss_ctx.py
@@ -0,0 +1,522 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+
+import datetime
+import random
+from lib.py import ksft_run, ksft_pr, ksft_exit, ksft_eq, ksft_ge, ksft_lt
+from lib.py import NetDrvEpEnv
+from lib.py import EthtoolFamily, NetdevFamily
+from lib.py import KsftSkipEx
+from lib.py import rand_port
+from lib.py import ethtool, ip, defer, GenerateTraffic, CmdExitFailure
+
+
+def _rss_key_str(key):
+ return ":".join(["{:02x}".format(x) for x in key])
+
+
+def _rss_key_rand(length):
+ return [random.randint(0, 255) for _ in range(length)]
+
+
+def get_rss(cfg, context=0):
+ return ethtool(f"-x {cfg.ifname} context {context}", json=True)[0]
+
+
+def get_drop_err_sum(cfg):
+ stats = ip("-s -s link show dev " + cfg.ifname, json=True)[0]
+ cnt = 0
+ for key in ['errors', 'dropped', 'over_errors', 'fifo_errors',
+ 'length_errors', 'crc_errors', 'missed_errors',
+ 'frame_errors']:
+ cnt += stats["stats64"]["rx"][key]
+ return cnt, stats["stats64"]["tx"]["carrier_changes"]
+
+
+def ethtool_create(cfg, act, opts):
+ output = ethtool(f"{act} {cfg.ifname} {opts}").stdout
+ # Output will be something like: "New RSS context is 1" or
+ # "Added rule with ID 7", we want the integer from the end
+ return int(output.split()[-1])
+
+
+def require_ntuple(cfg):
+ features = ethtool(f"-k {cfg.ifname}", json=True)[0]
+ if not features["ntuple-filters"]["active"]:
+ # ntuple is more of a capability than a config knob, don't bother
+ # trying to enable it (until some driver actually needs it).
+ raise KsftSkipEx("Ntuple filters not enabled on the device: " + str(features["ntuple-filters"]))
+
+
+# Get Rx packet counts for all queues, as a simple list of integers
+# if @prev is specified the prev counts will be subtracted
+def _get_rx_cnts(cfg, prev=None):
+ cfg.wait_hw_stats_settle()
+ data = cfg.netdevnl.qstats_get({"ifindex": cfg.ifindex, "scope": ["queue"]}, dump=True)
+ data = [x for x in data if x['queue-type'] == "rx"]
+ max_q = max([x["queue-id"] for x in data])
+ queue_stats = [0] * (max_q + 1)
+ for q in data:
+ queue_stats[q["queue-id"]] = q["rx-packets"]
+ if prev and q["queue-id"] < len(prev):
+ queue_stats[q["queue-id"]] -= prev[q["queue-id"]]
+ return queue_stats
+
+
+def _send_traffic_check(cfg, port, name, params):
+ # params is a dict with 3 possible keys:
+ # - "target": required, which queues we expect to get iperf traffic
+ # - "empty": optional, which queues should see no traffic at all
+ # - "noise": optional, which queues we expect to see low traffic;
+ # used for queues of the main context, since some background
+ # OS activity may use those queues while we're testing
+ # the value for each is a list, or some other iterable containing queue ids.
+
+ cnts = _get_rx_cnts(cfg)
+ GenerateTraffic(cfg, port=port).wait_pkts_and_stop(20000)
+ cnts = _get_rx_cnts(cfg, prev=cnts)
+
+ directed = sum(cnts[i] for i in params['target'])
+
+ ksft_ge(directed, 20000, f"traffic on {name}: " + str(cnts))
+ if params.get('noise'):
+ ksft_lt(sum(cnts[i] for i in params['noise']), directed / 2,
+ "traffic on other queues:" + str(cnts))
+ if params.get('empty'):
+ ksft_eq(sum(cnts[i] for i in params['empty']), 0,
+ "traffic on inactive queues: " + str(cnts))
+
+
+def test_rss_key_indir(cfg):
+ """Test basics like updating the main RSS key and indirection table."""
+
+ if len(_get_rx_cnts(cfg)) < 2:
+ KsftSkipEx("Device has only one queue (or doesn't support queue stats)")
+
+ data = get_rss(cfg)
+ want_keys = ['rss-hash-key', 'rss-hash-function', 'rss-indirection-table']
+ for k in want_keys:
+ if k not in data:
+ raise KsftFailEx("ethtool results missing key: " + k)
+ if not data[k]:
+ raise KsftFailEx(f"ethtool results empty for '{k}': {data[k]}")
+
+ key_len = len(data['rss-hash-key'])
+
+ # Set the key
+ key = _rss_key_rand(key_len)
+ ethtool(f"-X {cfg.ifname} hkey " + _rss_key_str(key))
+
+ data = get_rss(cfg)
+ ksft_eq(key, data['rss-hash-key'])
+
+ # Set the indirection table
+ ethtool(f"-X {cfg.ifname} equal 2")
+ reset_indir = defer(ethtool, f"-X {cfg.ifname} default")
+ data = get_rss(cfg)
+ ksft_eq(0, min(data['rss-indirection-table']))
+ ksft_eq(1, max(data['rss-indirection-table']))
+
+ # Check we only get traffic on the first 2 queues
+ cnts = _get_rx_cnts(cfg)
+ GenerateTraffic(cfg).wait_pkts_and_stop(20000)
+ cnts = _get_rx_cnts(cfg, prev=cnts)
+ # 2 queues, 20k packets, must be at least 5k per queue
+ ksft_ge(cnts[0], 5000, "traffic on main context (1/2): " + str(cnts))
+ ksft_ge(cnts[1], 5000, "traffic on main context (2/2): " + str(cnts))
+ # The other queues should be unused
+ ksft_eq(sum(cnts[2:]), 0, "traffic on unused queues: " + str(cnts))
+
+ # Restore, and check traffic gets spread again
+ reset_indir.exec()
+
+ cnts = _get_rx_cnts(cfg)
+ GenerateTraffic(cfg).wait_pkts_and_stop(20000)
+ cnts = _get_rx_cnts(cfg, prev=cnts)
+ # First two queues get less traffic than all the rest
+ ksft_lt(sum(cnts[:2]), sum(cnts[2:]), "traffic distributed: " + str(cnts))
+
+
+def test_rss_queue_reconfigure(cfg, main_ctx=True):
+ """Make sure queue changes can't override requested RSS config.
+
+ By default main RSS table should change to include all queues.
+ When user sets a specific RSS config the driver should preserve it,
+ even when queue count changes. Driver should refuse to deactivate
+ queues used in the user-set RSS config.
+ """
+
+ if not main_ctx:
+ require_ntuple(cfg)
+
+ # Start with 4 queues, an arbitrary known number.
+ try:
+ qcnt = len(_get_rx_cnts(cfg))
+ ethtool(f"-L {cfg.ifname} combined 4")
+ defer(ethtool, f"-L {cfg.ifname} combined {qcnt}")
+ except:
+ raise KsftSkipEx("Not enough queues for the test or qstat not supported")
+
+ if main_ctx:
+ ctx_id = 0
+ ctx_ref = ""
+ else:
+ ctx_id = ethtool_create(cfg, "-X", "context new")
+ ctx_ref = f"context {ctx_id}"
+ defer(ethtool, f"-X {cfg.ifname} {ctx_ref} delete")
+
+ # Indirection table should be distributing to all queues.
+ data = get_rss(cfg, context=ctx_id)
+ ksft_eq(0, min(data['rss-indirection-table']))
+ ksft_eq(3, max(data['rss-indirection-table']))
+
+ # Increase queues, indirection table should be distributing to all queues.
+ # It's unclear whether tables of additional contexts should be reset, too.
+ if main_ctx:
+ ethtool(f"-L {cfg.ifname} combined 5")
+ data = get_rss(cfg)
+ ksft_eq(0, min(data['rss-indirection-table']))
+ ksft_eq(4, max(data['rss-indirection-table']))
+ ethtool(f"-L {cfg.ifname} combined 4")
+
+ # Configure the table explicitly
+ port = rand_port()
+ ethtool(f"-X {cfg.ifname} {ctx_ref} weight 1 0 0 1")
+ if main_ctx:
+ other_key = 'empty'
+ defer(ethtool, f"-X {cfg.ifname} default")
+ else:
+ other_key = 'noise'
+ flow = f"flow-type tcp{cfg.addr_ipver} dst-port {port} context {ctx_id}"
+ ntuple = ethtool_create(cfg, "-N", flow)
+ defer(ethtool, f"-N {cfg.ifname} delete {ntuple}")
+
+ _send_traffic_check(cfg, port, ctx_ref, { 'target': (0, 3),
+ other_key: (1, 2) })
+
+ # We should be able to increase queues, but table should be left untouched
+ ethtool(f"-L {cfg.ifname} combined 5")
+ data = get_rss(cfg, context=ctx_id)
+ ksft_eq({0, 3}, set(data['rss-indirection-table']))
+
+ _send_traffic_check(cfg, port, ctx_ref, { 'target': (0, 3),
+ other_key: (1, 2, 4) })
+
+ # Setting queue count to 3 should fail, queue 3 is used
+ try:
+ ethtool(f"-L {cfg.ifname} combined 3")
+ except CmdExitFailure:
+ pass
+ else:
+ raise Exception(f"Driver didn't prevent us from deactivating a used queue (context {ctx_id})")
+
+
+def test_rss_resize(cfg):
+ """Test resizing of the RSS table.
+
+ Some devices dynamically increase and decrease the size of the RSS
+ indirection table based on the number of enabled queues.
+ When that happens driver must maintain the balance of entries
+ (preferably duplicating the smaller table).
+ """
+
+ channels = cfg.ethnl.channels_get({'header': {'dev-index': cfg.ifindex}})
+ ch_max = channels['combined-max']
+ qcnt = channels['combined-count']
+
+ if ch_max < 2:
+ raise KsftSkipEx(f"Not enough queues for the test: {ch_max}")
+
+ ethtool(f"-L {cfg.ifname} combined 2")
+ defer(ethtool, f"-L {cfg.ifname} combined {qcnt}")
+
+ ethtool(f"-X {cfg.ifname} weight 1 7")
+ defer(ethtool, f"-X {cfg.ifname} default")
+
+ ethtool(f"-L {cfg.ifname} combined {ch_max}")
+ data = get_rss(cfg)
+ ksft_eq(0, min(data['rss-indirection-table']))
+ ksft_eq(1, max(data['rss-indirection-table']))
+
+ ksft_eq(7,
+ data['rss-indirection-table'].count(1) /
+ data['rss-indirection-table'].count(0),
+ f"Table imbalance after resize: {data['rss-indirection-table']}")
+
+
+def test_hitless_key_update(cfg):
+ """Test that flows may be rehashed without impacting traffic.
+
+ Some workloads may want to rehash the flows in response to an imbalance.
+ Most effective way to do that is changing the RSS key. Check that changing
+ the key does not cause link flaps or traffic disruption.
+
+ Disrupting traffic for key update is not a bug, but makes the key
+ update unusable for rehashing under load.
+ """
+ data = get_rss(cfg)
+ key_len = len(data['rss-hash-key'])
+
+ key = _rss_key_rand(key_len)
+
+ tgen = GenerateTraffic(cfg)
+ try:
+ errors0, carrier0 = get_drop_err_sum(cfg)
+ t0 = datetime.datetime.now()
+ ethtool(f"-X {cfg.ifname} hkey " + _rss_key_str(key))
+ t1 = datetime.datetime.now()
+ errors1, carrier1 = get_drop_err_sum(cfg)
+ finally:
+ tgen.wait_pkts_and_stop(5000)
+
+ ksft_lt((t1 - t0).total_seconds(), 0.2)
+ ksft_eq(errors1 - errors1, 0)
+ ksft_eq(carrier1 - carrier0, 0)
+
+
+def test_rss_context(cfg, ctx_cnt=1, create_with_cfg=None):
+ """
+ Test separating traffic into RSS contexts.
+ The queues will be allocated 2 for each context:
+ ctx0 ctx1 ctx2 ctx3
+ [0 1] [2 3] [4 5] [6 7] ...
+ """
+
+ require_ntuple(cfg)
+
+ requested_ctx_cnt = ctx_cnt
+
+ # Try to allocate more queues when necessary
+ qcnt = len(_get_rx_cnts(cfg))
+ if qcnt < 2 + 2 * ctx_cnt:
+ try:
+ ksft_pr(f"Increasing queue count {qcnt} -> {2 + 2 * ctx_cnt}")
+ ethtool(f"-L {cfg.ifname} combined {2 + 2 * ctx_cnt}")
+ defer(ethtool, f"-L {cfg.ifname} combined {qcnt}")
+ except:
+ raise KsftSkipEx("Not enough queues for the test")
+
+ ports = []
+
+ # Use queues 0 and 1 for normal traffic
+ ethtool(f"-X {cfg.ifname} equal 2")
+ defer(ethtool, f"-X {cfg.ifname} default")
+
+ for i in range(ctx_cnt):
+ want_cfg = f"start {2 + i * 2} equal 2"
+ create_cfg = want_cfg if create_with_cfg else ""
+
+ try:
+ ctx_id = ethtool_create(cfg, "-X", f"context new {create_cfg}")
+ defer(ethtool, f"-X {cfg.ifname} context {ctx_id} delete")
+ except CmdExitFailure:
+ # try to carry on and skip at the end
+ if i == 0:
+ raise
+ ksft_pr(f"Failed to create context {i + 1}, trying to test what we got")
+ ctx_cnt = i
+ break
+
+ if not create_with_cfg:
+ ethtool(f"-X {cfg.ifname} context {ctx_id} {want_cfg}")
+
+ # Sanity check the context we just created
+ data = get_rss(cfg, ctx_id)
+ ksft_eq(min(data['rss-indirection-table']), 2 + i * 2, "Unexpected context cfg: " + str(data))
+ ksft_eq(max(data['rss-indirection-table']), 2 + i * 2 + 1, "Unexpected context cfg: " + str(data))
+
+ ports.append(rand_port())
+ flow = f"flow-type tcp{cfg.addr_ipver} dst-port {ports[i]} context {ctx_id}"
+ ntuple = ethtool_create(cfg, "-N", flow)
+ defer(ethtool, f"-N {cfg.ifname} delete {ntuple}")
+
+ for i in range(ctx_cnt):
+ _send_traffic_check(cfg, ports[i], f"context {i}",
+ { 'target': (2+i*2, 3+i*2),
+ 'noise': (0, 1),
+ 'empty': list(range(2, 2+i*2)) + list(range(4+i*2, 2+2*ctx_cnt)) })
+
+ if requested_ctx_cnt != ctx_cnt:
+ raise KsftSkipEx(f"Tested only {ctx_cnt} contexts, wanted {requested_ctx_cnt}")
+
+
+def test_rss_context4(cfg):
+ test_rss_context(cfg, 4)
+
+
+def test_rss_context32(cfg):
+ test_rss_context(cfg, 32)
+
+
+def test_rss_context4_create_with_cfg(cfg):
+ test_rss_context(cfg, 4, create_with_cfg=True)
+
+
+def test_rss_context_queue_reconfigure(cfg):
+ test_rss_queue_reconfigure(cfg, main_ctx=False)
+
+
+def test_rss_context_out_of_order(cfg, ctx_cnt=4):
+ """
+ Test separating traffic into RSS contexts.
+ Contexts are removed in semi-random order, and steering re-tested
+ to make sure removal doesn't break steering to surviving contexts.
+ Test requires 3 contexts to work.
+ """
+
+ require_ntuple(cfg)
+
+ requested_ctx_cnt = ctx_cnt
+
+ # Try to allocate more queues when necessary
+ qcnt = len(_get_rx_cnts(cfg))
+ if qcnt < 2 + 2 * ctx_cnt:
+ try:
+ ksft_pr(f"Increasing queue count {qcnt} -> {2 + 2 * ctx_cnt}")
+ ethtool(f"-L {cfg.ifname} combined {2 + 2 * ctx_cnt}")
+ defer(ethtool, f"-L {cfg.ifname} combined {qcnt}")
+ except:
+ raise KsftSkipEx("Not enough queues for the test")
+
+ ntuple = []
+ ctx = []
+ ports = []
+
+ def remove_ctx(idx):
+ ntuple[idx].exec()
+ ntuple[idx] = None
+ ctx[idx].exec()
+ ctx[idx] = None
+
+ def check_traffic():
+ for i in range(ctx_cnt):
+ if ctx[i]:
+ expected = {
+ 'target': (2+i*2, 3+i*2),
+ 'noise': (0, 1),
+ 'empty': list(range(2, 2+i*2)) + list(range(4+i*2, 2+2*ctx_cnt))
+ }
+ else:
+ expected = {
+ 'target': (0, 1),
+ 'empty': range(2, 2+2*ctx_cnt)
+ }
+
+ _send_traffic_check(cfg, ports[i], f"context {i}", expected)
+
+ # Use queues 0 and 1 for normal traffic
+ ethtool(f"-X {cfg.ifname} equal 2")
+ defer(ethtool, f"-X {cfg.ifname} default")
+
+ for i in range(ctx_cnt):
+ ctx_id = ethtool_create(cfg, "-X", f"context new start {2 + i * 2} equal 2")
+ ctx.append(defer(ethtool, f"-X {cfg.ifname} context {ctx_id} delete"))
+
+ ports.append(rand_port())
+ flow = f"flow-type tcp{cfg.addr_ipver} dst-port {ports[i]} context {ctx_id}"
+ ntuple_id = ethtool_create(cfg, "-N", flow)
+ ntuple.append(defer(ethtool, f"-N {cfg.ifname} delete {ntuple_id}"))
+
+ check_traffic()
+
+ # Remove middle context
+ remove_ctx(ctx_cnt // 2)
+ check_traffic()
+
+ # Remove first context
+ remove_ctx(0)
+ check_traffic()
+
+ # Remove last context
+ remove_ctx(-1)
+ check_traffic()
+
+ if requested_ctx_cnt != ctx_cnt:
+ raise KsftSkipEx(f"Tested only {ctx_cnt} contexts, wanted {requested_ctx_cnt}")
+
+
+def test_rss_context_overlap(cfg, other_ctx=0):
+ """
+ Test contexts overlapping with each other.
+ Use 4 queues for the main context, but only queues 2 and 3 for context 1.
+ """
+
+ require_ntuple(cfg)
+
+ queue_cnt = len(_get_rx_cnts(cfg))
+ if queue_cnt < 4:
+ try:
+ ksft_pr(f"Increasing queue count {queue_cnt} -> 4")
+ ethtool(f"-L {cfg.ifname} combined 4")
+ defer(ethtool, f"-L {cfg.ifname} combined {queue_cnt}")
+ except:
+ raise KsftSkipEx("Not enough queues for the test")
+
+ if other_ctx == 0:
+ ethtool(f"-X {cfg.ifname} equal 4")
+ defer(ethtool, f"-X {cfg.ifname} default")
+ else:
+ other_ctx = ethtool_create(cfg, "-X", "context new")
+ ethtool(f"-X {cfg.ifname} context {other_ctx} equal 4")
+ defer(ethtool, f"-X {cfg.ifname} context {other_ctx} delete")
+
+ ctx_id = ethtool_create(cfg, "-X", "context new")
+ ethtool(f"-X {cfg.ifname} context {ctx_id} start 2 equal 2")
+ defer(ethtool, f"-X {cfg.ifname} context {ctx_id} delete")
+
+ port = rand_port()
+ if other_ctx:
+ flow = f"flow-type tcp{cfg.addr_ipver} dst-port {port} context {other_ctx}"
+ ntuple_id = ethtool_create(cfg, "-N", flow)
+ ntuple = defer(ethtool, f"-N {cfg.ifname} delete {ntuple_id}")
+
+ # Test the main context
+ cnts = _get_rx_cnts(cfg)
+ GenerateTraffic(cfg, port=port).wait_pkts_and_stop(20000)
+ cnts = _get_rx_cnts(cfg, prev=cnts)
+
+ ksft_ge(sum(cnts[ :4]), 20000, "traffic on main context: " + str(cnts))
+ ksft_ge(sum(cnts[ :2]), 7000, "traffic on main context (1/2): " + str(cnts))
+ ksft_ge(sum(cnts[2:4]), 7000, "traffic on main context (2/2): " + str(cnts))
+ if other_ctx == 0:
+ ksft_eq(sum(cnts[4: ]), 0, "traffic on other queues: " + str(cnts))
+
+ # Now create a rule for context 1 and make sure traffic goes to a subset
+ if other_ctx:
+ ntuple.exec()
+ flow = f"flow-type tcp{cfg.addr_ipver} dst-port {port} context {ctx_id}"
+ ntuple_id = ethtool_create(cfg, "-N", flow)
+ defer(ethtool, f"-N {cfg.ifname} delete {ntuple_id}")
+
+ cnts = _get_rx_cnts(cfg)
+ GenerateTraffic(cfg, port=port).wait_pkts_and_stop(20000)
+ cnts = _get_rx_cnts(cfg, prev=cnts)
+
+ directed = sum(cnts[2:4])
+ ksft_lt(sum(cnts[ :2]), directed / 2, "traffic on main context: " + str(cnts))
+ ksft_ge(directed, 20000, "traffic on extra context: " + str(cnts))
+ if other_ctx == 0:
+ ksft_eq(sum(cnts[4: ]), 0, "traffic on other queues: " + str(cnts))
+
+
+def test_rss_context_overlap2(cfg):
+ test_rss_context_overlap(cfg, True)
+
+
+def main() -> None:
+ with NetDrvEpEnv(__file__, nsim_test=False) as cfg:
+ cfg.ethnl = EthtoolFamily()
+ cfg.netdevnl = NetdevFamily()
+
+ ksft_run([test_rss_key_indir, test_rss_queue_reconfigure,
+ test_rss_resize, test_hitless_key_update,
+ test_rss_context, test_rss_context4, test_rss_context32,
+ test_rss_context_queue_reconfigure,
+ test_rss_context_overlap, test_rss_context_overlap2,
+ test_rss_context_out_of_order, test_rss_context4_create_with_cfg],
+ args=(cfg, ))
+ ksft_exit()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/testing/selftests/drivers/net/lib/py/env.py b/tools/testing/selftests/drivers/net/lib/py/env.py
index edcedd7bffab..a5e800b8f103 100644
--- a/tools/testing/selftests/drivers/net/lib/py/env.py
+++ b/tools/testing/selftests/drivers/net/lib/py/env.py
@@ -1,9 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
import os
+import time
from pathlib import Path
from lib.py import KsftSkipEx, KsftXfailEx
-from lib.py import cmd, ip
+from lib.py import cmd, ethtool, ip
from lib.py import NetNS, NetdevSimDev
from .remote import Remote
@@ -82,6 +83,8 @@ class NetDrvEpEnv:
self.env = _load_env_file(src_path)
+ self._stats_settle_time = None
+
# Things we try to destroy
self.remote = None
# These are for local testing state
@@ -222,3 +225,17 @@ class NetDrvEpEnv:
if remote:
if not self._require_cmd(comm, "remote"):
raise KsftSkipEx("Test requires (remote) command: " + comm)
+
+ def wait_hw_stats_settle(self):
+ """
+ Wait for HW stats to become consistent, some devices DMA HW stats
+ periodically so events won't be reflected until next sync.
+ Good drivers will tell us via ethtool what their sync period is.
+ """
+ if self._stats_settle_time is None:
+ data = ethtool("-c " + self.ifname, json=True)[0]
+
+ self._stats_settle_time = 0.025 + \
+ data.get('stats-block-usecs', 0) / 1000 / 1000
+
+ time.sleep(self._stats_settle_time)
diff --git a/tools/testing/selftests/drivers/net/lib/py/load.py b/tools/testing/selftests/drivers/net/lib/py/load.py
index abdb677bdb1c..d9c10613ae67 100644
--- a/tools/testing/selftests/drivers/net/lib/py/load.py
+++ b/tools/testing/selftests/drivers/net/lib/py/load.py
@@ -5,28 +5,45 @@ import time
from lib.py import ksft_pr, cmd, ip, rand_port, wait_port_listen
class GenerateTraffic:
- def __init__(self, env):
+ def __init__(self, env, port=None):
env.require_cmd("iperf3", remote=True)
self.env = env
- port = rand_port()
- self._iperf_server = cmd(f"iperf3 -s -p {port}", background=True)
+ if port is None:
+ port = rand_port()
+ self._iperf_server = cmd(f"iperf3 -s -1 -p {port}", background=True)
wait_port_listen(port)
time.sleep(0.1)
self._iperf_client = cmd(f"iperf3 -c {env.addr} -P 16 -p {port} -t 86400",
background=True, host=env.remote)
# Wait for traffic to ramp up
- pkt = ip("-s link show dev " + env.ifname, json=True)[0]["stats64"]["rx"]["packets"]
+ if not self._wait_pkts(pps=1000):
+ self.stop(verbose=True)
+ raise Exception("iperf3 traffic did not ramp up")
+
+ def _wait_pkts(self, pkt_cnt=None, pps=None):
+ """
+ Wait until we've seen pkt_cnt or until traffic ramps up to pps.
+ Only one of pkt_cnt or pss can be specified.
+ """
+ pkt_start = ip("-s link show dev " + self.env.ifname, json=True)[0]["stats64"]["rx"]["packets"]
for _ in range(50):
time.sleep(0.1)
- now = ip("-s link show dev " + env.ifname, json=True)[0]["stats64"]["rx"]["packets"]
- if now - pkt > 1000:
- return
- pkt = now
- self.stop(verbose=True)
- raise Exception("iperf3 traffic did not ramp up")
+ pkt_now = ip("-s link show dev " + self.env.ifname, json=True)[0]["stats64"]["rx"]["packets"]
+ if pps:
+ if pkt_now - pkt_start > pps / 10:
+ return True
+ pkt_start = pkt_now
+ elif pkt_cnt:
+ if pkt_now - pkt_start > pkt_cnt:
+ return True
+ return False
+
+ def wait_pkts_and_stop(self, pkt_cnt):
+ failed = not self._wait_pkts(pkt_cnt=pkt_cnt)
+ self.stop(verbose=failed)
def stop(self, verbose=None):
self._iperf_client.process(terminate=True)
diff --git a/tools/testing/selftests/drivers/net/mlxsw/mirror_gre.sh b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre.sh
index 76f1ab4898d9..e1ad623146d7 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/mirror_gre.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre.sh
@@ -15,6 +15,13 @@ source $lib_dir/mirror_lib.sh
source $lib_dir/mirror_gre_lib.sh
source $lib_dir/mirror_gre_topo_lib.sh
+ALL_TESTS="
+ test_keyful
+ test_soft
+ test_tos_fixed
+ test_ttl_inherit
+"
+
setup_keyful()
{
tunnel_create gt6-key ip6gretap 2001:db8:3::1 2001:db8:3::2 \
@@ -118,15 +125,15 @@ test_span_gre_ttl_inherit()
RET=0
ip link set dev $tundev type $type ttl inherit
- mirror_install $swp1 ingress $tundev "matchall $tcflags"
- fail_test_span_gre_dir $tundev ingress
+ mirror_install $swp1 ingress $tundev "matchall"
+ fail_test_span_gre_dir $tundev
ip link set dev $tundev type $type ttl 100
- quick_test_span_gre_dir $tundev ingress
+ quick_test_span_gre_dir $tundev
mirror_uninstall $swp1 ingress
- log_test "$what: no offload on TTL of inherit ($tcflags)"
+ log_test "$what: no offload on TTL of inherit"
}
test_span_gre_tos_fixed()
@@ -138,61 +145,49 @@ test_span_gre_tos_fixed()
RET=0
ip link set dev $tundev type $type tos 0x10
- mirror_install $swp1 ingress $tundev "matchall $tcflags"
- fail_test_span_gre_dir $tundev ingress
+ mirror_install $swp1 ingress $tundev "matchall"
+ fail_test_span_gre_dir $tundev
ip link set dev $tundev type $type tos inherit
- quick_test_span_gre_dir $tundev ingress
+ quick_test_span_gre_dir $tundev
mirror_uninstall $swp1 ingress
- log_test "$what: no offload on a fixed TOS ($tcflags)"
+ log_test "$what: no offload on a fixed TOS"
}
test_span_failable()
{
- local should_fail=$1; shift
local tundev=$1; shift
local what=$1; shift
RET=0
- mirror_install $swp1 ingress $tundev "matchall $tcflags"
- if ((should_fail)); then
- fail_test_span_gre_dir $tundev ingress
- else
- quick_test_span_gre_dir $tundev ingress
- fi
+ mirror_install $swp1 ingress $tundev "matchall"
+ fail_test_span_gre_dir $tundev
mirror_uninstall $swp1 ingress
- log_test "$what: should_fail=$should_fail ($tcflags)"
+ log_test "fail $what"
}
-test_failable()
+test_keyful()
{
- local should_fail=$1; shift
-
- test_span_failable $should_fail gt6-key "mirror to keyful gretap"
- test_span_failable $should_fail gt6-soft "mirror to gretap w/ soft underlay"
+ test_span_failable gt6-key "mirror to keyful gretap"
}
-test_sw()
+test_soft()
{
- slow_path_trap_install $swp1 ingress
- slow_path_trap_install $swp1 egress
-
- test_failable 0
-
- slow_path_trap_uninstall $swp1 egress
- slow_path_trap_uninstall $swp1 ingress
+ test_span_failable gt6-soft "mirror to gretap w/ soft underlay"
}
-test_hw()
+test_tos_fixed()
{
- test_failable 1
-
test_span_gre_tos_fixed gt4 gretap "mirror to gretap"
test_span_gre_tos_fixed gt6 ip6gretap "mirror to ip6gretap"
+}
+
+test_ttl_inherit()
+{
test_span_gre_ttl_inherit gt4 gretap "mirror to gretap"
test_span_gre_ttl_inherit gt6 ip6gretap "mirror to ip6gretap"
}
@@ -202,16 +197,6 @@ trap cleanup EXIT
setup_prepare
setup_wait
-if ! tc_offload_check; then
- check_err 1 "Could not test offloaded functionality"
- log_test "mlxsw-specific tests for mirror to gretap"
- exit
-fi
-
-tcflags="skip_hw"
-test_sw
-
-tcflags="skip_sw"
-test_hw
+tests_run
exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh
index e5589e2fca85..d43093310e23 100644
--- a/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh
@@ -79,7 +79,7 @@ mirror_gre_tunnels_create()
cat >> $MIRROR_GRE_BATCH_FILE <<-EOF
filter add dev $swp1 ingress pref 1000 \
protocol ipv6 \
- flower $tcflags dst_ip $match_dip \
+ flower skip_sw dst_ip $match_dip \
action mirred egress mirror dev $tun
EOF
done
@@ -107,7 +107,7 @@ mirror_gre_tunnels_destroy()
done
}
-__mirror_gre_test()
+mirror_gre_test()
{
local count=$1; shift
local should_fail=$1; shift
@@ -131,20 +131,6 @@ __mirror_gre_test()
done
}
-mirror_gre_test()
-{
- local count=$1; shift
- local should_fail=$1; shift
-
- if ! tc_offload_check $TC_FLOWER_NUM_NETIFS; then
- check_err 1 "Could not test offloaded functionality"
- return
- fi
-
- tcflags="skip_sw"
- __mirror_gre_test $count $should_fail
-}
-
mirror_gre_setup_prepare()
{
h1=${NETIFS[p1]}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
index 31252bc8775e..4994bea5daf8 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
@@ -11,7 +11,7 @@ ALL_TESTS="single_mask_test identical_filters_test two_masks_test \
multiple_masks_test ctcam_edge_cases_test delta_simple_test \
delta_two_masks_one_key_test delta_simple_rehash_test \
bloom_simple_test bloom_complex_test bloom_delta_test \
- max_erp_entries_test max_group_size_test"
+ max_erp_entries_test max_group_size_test collision_test"
NUM_NETIFS=2
source $lib_dir/lib.sh
source $lib_dir/tc_common.sh
@@ -457,7 +457,7 @@ delta_two_masks_one_key_test()
{
# If 2 keys are the same and only differ in mask in a way that
# they belong under the same ERP (second is delta of the first),
- # there should be no C-TCAM spill.
+ # there should be C-TCAM spill.
RET=0
@@ -474,8 +474,8 @@ delta_two_masks_one_key_test()
tp_record "mlxsw:*" "tc filter add dev $h2 ingress protocol ip \
pref 2 handle 102 flower $tcflags dst_ip 192.0.2.2 \
action drop"
- tp_check_hits "mlxsw:mlxsw_sp_acl_atcam_entry_add_ctcam_spill" 0
- check_err $? "incorrect C-TCAM spill while inserting the second rule"
+ tp_check_hits "mlxsw:mlxsw_sp_acl_atcam_entry_add_ctcam_spill" 1
+ check_err $? "C-TCAM spill did not happen while inserting the second rule"
$MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
-t ip -q
@@ -1087,6 +1087,53 @@ max_group_size_test()
log_test "max ACL group size test ($tcflags). max size $max_size"
}
+collision_test()
+{
+ # Filters cannot share an eRP if in the common unmasked part (i.e.,
+ # without the delta bits) they have the same values. If the driver does
+ # not prevent such configuration (by spilling into the C-TCAM), then
+ # multiple entries will be present in the device with the same key,
+ # leading to collisions and a reduced scale.
+ #
+ # Create such a scenario and make sure all the filters are successfully
+ # added.
+
+ RET=0
+
+ local ret
+
+ if [[ "$tcflags" != "skip_sw" ]]; then
+ return 0;
+ fi
+
+ # Add a single dst_ip/24 filter and multiple dst_ip/32 filters that all
+ # have the same values in the common unmasked part (dst_ip/24).
+
+ tc filter add dev $h2 ingress pref 1 proto ipv4 handle 101 \
+ flower $tcflags dst_ip 198.51.100.0/24 \
+ action drop
+
+ for i in {0..255}; do
+ tc filter add dev $h2 ingress pref 2 proto ipv4 \
+ handle $((102 + i)) \
+ flower $tcflags dst_ip 198.51.100.${i}/32 \
+ action drop
+ ret=$?
+ [[ $ret -ne 0 ]] && break
+ done
+
+ check_err $ret "failed to add all the filters"
+
+ for i in {255..0}; do
+ tc filter del dev $h2 ingress pref 2 proto ipv4 \
+ handle $((102 + i)) flower
+ done
+
+ tc filter del dev $h2 ingress pref 1 proto ipv4 handle 101 flower
+
+ log_test "collision test ($tcflags)"
+}
+
setup_prepare()
{
h1=${NETIFS[p1]}
diff --git a/tools/testing/selftests/drivers/platform/x86/intel/ifs/Makefile b/tools/testing/selftests/drivers/platform/x86/intel/ifs/Makefile
new file mode 100644
index 000000000000..03d0449d307c
--- /dev/null
+++ b/tools/testing/selftests/drivers/platform/x86/intel/ifs/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for ifs(In Field Scan) selftests
+
+TEST_PROGS := test_ifs.sh
+
+include ../../../../../lib.mk
diff --git a/tools/testing/selftests/drivers/platform/x86/intel/ifs/test_ifs.sh b/tools/testing/selftests/drivers/platform/x86/intel/ifs/test_ifs.sh
new file mode 100755
index 000000000000..8b68964b29f4
--- /dev/null
+++ b/tools/testing/selftests/drivers/platform/x86/intel/ifs/test_ifs.sh
@@ -0,0 +1,494 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test the functionality of the Intel IFS(In Field Scan) driver.
+#
+
+# Matched with kselftest framework: tools/testing/selftests/kselftest.h
+readonly KSFT_PASS=0
+readonly KSFT_FAIL=1
+readonly KSFT_XFAIL=2
+readonly KSFT_SKIP=4
+
+readonly CPU_SYSFS="/sys/devices/system/cpu"
+readonly CPU_OFFLINE_SYSFS="${CPU_SYSFS}/offline"
+readonly IMG_PATH="/lib/firmware/intel/ifs_0"
+readonly IFS_SCAN_MODE="0"
+readonly IFS_ARRAY_BIST_SCAN_MODE="1"
+readonly IFS_PATH="/sys/devices/virtual/misc/intel_ifs"
+readonly IFS_SCAN_SYSFS_PATH="${IFS_PATH}_${IFS_SCAN_MODE}"
+readonly IFS_ARRAY_BIST_SYSFS_PATH="${IFS_PATH}_${IFS_ARRAY_BIST_SCAN_MODE}"
+readonly RUN_TEST="run_test"
+readonly STATUS="status"
+readonly DETAILS="details"
+readonly STATUS_PASS="pass"
+readonly PASS="PASS"
+readonly FAIL="FAIL"
+readonly INFO="INFO"
+readonly XFAIL="XFAIL"
+readonly SKIP="SKIP"
+readonly IFS_NAME="intel_ifs"
+readonly ALL="all"
+readonly SIBLINGS="siblings"
+
+# Matches arch/x86/include/asm/intel-family.h and
+# drivers/platform/x86/intel/ifs/core.c requirement as follows
+readonly SAPPHIRERAPIDS_X="8f"
+readonly EMERALDRAPIDS_X="cf"
+
+readonly INTEL_FAM6="06"
+
+LOOP_TIMES=3
+FML=""
+MODEL=""
+STEPPING=""
+CPU_FMS=""
+TRUE="true"
+FALSE="false"
+RESULT=$KSFT_PASS
+IMAGE_NAME=""
+INTERVAL_TIME=1
+OFFLINE_CPUS=""
+# For IFS cleanup tags
+ORIGIN_IFS_LOADED=""
+IFS_IMAGE_NEED_RESTORE=$FALSE
+IFS_LOG="/tmp/ifs_logs.$$"
+RANDOM_CPU=""
+DEFAULT_IMG_ID=""
+
+append_log()
+{
+ echo -e "$1" | tee -a "$IFS_LOG"
+}
+
+online_offline_cpu_list()
+{
+ local on_off=$1
+ local target_cpus=$2
+ local cpu=""
+ local cpu_start=""
+ local cpu_end=""
+ local i=""
+
+ if [[ -n "$target_cpus" ]]; then
+ for cpu in $(echo "$target_cpus" | tr ',' ' '); do
+ if [[ "$cpu" == *"-"* ]]; then
+ cpu_start=""
+ cpu_end=""
+ i=""
+ cpu_start=$(echo "$cpu" | cut -d "-" -f 1)
+ cpu_end=$(echo "$cpu" | cut -d "-" -f 2)
+ for((i=cpu_start;i<=cpu_end;i++)); do
+ append_log "[$INFO] echo $on_off > \
+${CPU_SYSFS}/cpu${i}/online"
+ echo "$on_off" > "$CPU_SYSFS"/cpu"$i"/online
+ done
+ else
+ set_target_cpu "$on_off" "$cpu"
+ fi
+ done
+ fi
+}
+
+ifs_scan_result_summary()
+{
+ local failed_info pass_num skip_num fail_num
+
+ if [[ -e "$IFS_LOG" ]]; then
+ failed_info=$(grep ^"\[${FAIL}\]" "$IFS_LOG")
+ fail_num=$(grep -c ^"\[${FAIL}\]" "$IFS_LOG")
+ skip_num=$(grep -c ^"\[${SKIP}\]" "$IFS_LOG")
+ pass_num=$(grep -c ^"\[${PASS}\]" "$IFS_LOG")
+
+ if [[ "$fail_num" -ne 0 ]]; then
+ RESULT=$KSFT_FAIL
+ echo "[$INFO] IFS test failure summary:"
+ echo "$failed_info"
+ elif [[ "$skip_num" -ne 0 ]]; then
+ RESULT=$KSFT_SKIP
+ fi
+ echo "[$INFO] IFS test pass:$pass_num, skip:$skip_num, fail:$fail_num"
+ else
+ echo "[$INFO] No file $IFS_LOG for IFS scan summary"
+ fi
+}
+
+ifs_cleanup()
+{
+ echo "[$INFO] Restore environment after IFS test"
+
+ # Restore ifs origin image if origin image backup step is needed
+ [[ "$IFS_IMAGE_NEED_RESTORE" == "$TRUE" ]] && {
+ mv -f "$IMG_PATH"/"$IMAGE_NAME"_origin "$IMG_PATH"/"$IMAGE_NAME"
+ }
+
+ # Restore the CPUs to the state before testing
+ [[ -z "$OFFLINE_CPUS" ]] || online_offline_cpu_list "0" "$OFFLINE_CPUS"
+
+ lsmod | grep -q "$IFS_NAME" && [[ "$ORIGIN_IFS_LOADED" == "$FALSE" ]] && {
+ echo "[$INFO] modprobe -r $IFS_NAME"
+ modprobe -r "$IFS_NAME"
+ }
+
+ ifs_scan_result_summary
+ [[ -e "$IFS_LOG" ]] && rm -rf "$IFS_LOG"
+
+ echo "[RESULT] IFS test exit with $RESULT"
+ exit "$RESULT"
+}
+
+do_cmd()
+{
+ local cmd=$*
+ local ret=""
+
+ append_log "[$INFO] $cmd"
+ eval "$cmd"
+ ret=$?
+ if [[ $ret -ne 0 ]]; then
+ append_log "[$FAIL] $cmd failed. Return code is $ret"
+ RESULT=$KSFT_XFAIL
+ ifs_cleanup
+ fi
+}
+
+test_exit()
+{
+ local info=$1
+ RESULT=$2
+
+ declare -A EXIT_MAP
+ EXIT_MAP[$KSFT_PASS]=$PASS
+ EXIT_MAP[$KSFT_FAIL]=$FAIL
+ EXIT_MAP[$KSFT_XFAIL]=$XFAIL
+ EXIT_MAP[$KSFT_SKIP]=$SKIP
+
+ append_log "[${EXIT_MAP[$RESULT]}] $info"
+ ifs_cleanup
+}
+
+online_all_cpus()
+{
+ local off_cpus=""
+
+ OFFLINE_CPUS=$(cat "$CPU_OFFLINE_SYSFS")
+ online_offline_cpu_list "1" "$OFFLINE_CPUS"
+
+ off_cpus=$(cat "$CPU_OFFLINE_SYSFS")
+ if [[ -z "$off_cpus" ]]; then
+ append_log "[$INFO] All CPUs are online."
+ else
+ append_log "[$XFAIL] There is offline cpu:$off_cpus after online all cpu!"
+ RESULT=$KSFT_XFAIL
+ ifs_cleanup
+ fi
+}
+
+get_cpu_fms()
+{
+ FML=$(grep -m 1 "family" /proc/cpuinfo | awk -F ":" '{printf "%02x",$2;}')
+ MODEL=$(grep -m 1 "model" /proc/cpuinfo | awk -F ":" '{printf "%02x",$2;}')
+ STEPPING=$(grep -m 1 "stepping" /proc/cpuinfo | awk -F ":" '{printf "%02x",$2;}')
+ CPU_FMS="${FML}-${MODEL}-${STEPPING}"
+}
+
+check_cpu_ifs_support_interval_time()
+{
+ get_cpu_fms
+
+ if [[ "$FML" != "$INTEL_FAM6" ]]; then
+ test_exit "CPU family:$FML does not support IFS" "$KSFT_SKIP"
+ fi
+
+ # Ucode has time interval requirement for IFS scan on same CPU as follows:
+ case $MODEL in
+ "$SAPPHIRERAPIDS_X")
+ INTERVAL_TIME=180;
+ ;;
+ "$EMERALDRAPIDS_X")
+ INTERVAL_TIME=30;
+ ;;
+ *)
+ # Set default interval time for other platforms
+ INTERVAL_TIME=1;
+ append_log "[$INFO] CPU FML:$FML model:0x$MODEL, default: 1s interval time"
+ ;;
+ esac
+}
+
+check_ifs_loaded()
+{
+ local ifs_info=""
+
+ ifs_info=$(lsmod | grep "$IFS_NAME")
+ if [[ -z "$ifs_info" ]]; then
+ append_log "[$INFO] modprobe $IFS_NAME"
+ modprobe "$IFS_NAME" || {
+ test_exit "Check if CONFIG_INTEL_IFS is set to m or \
+platform doesn't support ifs" "$KSFT_SKIP"
+ }
+ ifs_info=$(lsmod | grep "$IFS_NAME")
+ [[ -n "$ifs_info" ]] || test_exit "No ifs module listed by lsmod" "$KSFT_FAIL"
+ fi
+}
+
+test_ifs_scan_entry()
+{
+ local ifs_info=""
+
+ ifs_info=$(lsmod | grep "$IFS_NAME")
+
+ if [[ -z "$ifs_info" ]]; then
+ ORIGIN_IFS_LOADED="$FALSE"
+ check_ifs_loaded
+ else
+ ORIGIN_IFS_LOADED="$TRUE"
+ append_log "[$INFO] Module $IFS_NAME is already loaded"
+ fi
+
+ if [[ -d "$IFS_SCAN_SYSFS_PATH" ]]; then
+ append_log "[$PASS] IFS sysfs $IFS_SCAN_SYSFS_PATH entry is created\n"
+ else
+ test_exit "No sysfs entry in $IFS_SCAN_SYSFS_PATH" "$KSFT_FAIL"
+ fi
+}
+
+load_image()
+{
+ local image_id=$1
+ local image_info=""
+ local ret=""
+
+ check_ifs_loaded
+ if [[ -e "${IMG_PATH}/${IMAGE_NAME}" ]]; then
+ append_log "[$INFO] echo 0x$image_id > ${IFS_SCAN_SYSFS_PATH}/current_batch"
+ echo "0x$image_id" > "$IFS_SCAN_SYSFS_PATH"/current_batch 2>/dev/null
+ ret=$?
+ [[ "$ret" -eq 0 ]] || {
+ append_log "[$FAIL] Load ifs image $image_id failed with ret:$ret\n"
+ return "$ret"
+ }
+ image_info=$(cat ${IFS_SCAN_SYSFS_PATH}/current_batch)
+ if [[ "$image_info" == 0x"$image_id" ]]; then
+ append_log "[$PASS] load IFS current_batch:$image_info"
+ else
+ append_log "[$FAIL] current_batch:$image_info is not expected:$image_id"
+ return "$KSFT_FAIL"
+ fi
+ else
+ append_log "[$FAIL] No IFS image file ${IMG_PATH}/${IMAGE_NAME}"\
+ return "$KSFT_FAIL"
+ fi
+ return 0
+}
+
+test_load_origin_ifs_image()
+{
+ local image_id=$1
+
+ IMAGE_NAME="${CPU_FMS}-${image_id}.scan"
+
+ load_image "$image_id" || return $?
+ return 0
+}
+
+test_load_bad_ifs_image()
+{
+ local image_id=$1
+
+ IMAGE_NAME="${CPU_FMS}-${image_id}.scan"
+
+ do_cmd "mv -f ${IMG_PATH}/${IMAGE_NAME} ${IMG_PATH}/${IMAGE_NAME}_origin"
+
+ # Set IFS_IMAGE_NEED_RESTORE to true before corrupt the origin ifs image file
+ IFS_IMAGE_NEED_RESTORE=$TRUE
+ do_cmd "dd if=/dev/urandom of=${IMG_PATH}/${IMAGE_NAME} bs=1K count=6 2>/dev/null"
+
+ # Use the specified judgment for negative testing
+ append_log "[$INFO] echo 0x$image_id > ${IFS_SCAN_SYSFS_PATH}/current_batch"
+ echo "0x$image_id" > "$IFS_SCAN_SYSFS_PATH"/current_batch 2>/dev/null
+ ret=$?
+ if [[ "$ret" -ne 0 ]]; then
+ append_log "[$PASS] Load invalid ifs image failed with ret:$ret not 0 as expected"
+ else
+ append_log "[$FAIL] Load invalid ifs image ret:$ret unexpectedly"
+ fi
+
+ do_cmd "mv -f ${IMG_PATH}/${IMAGE_NAME}_origin ${IMG_PATH}/${IMAGE_NAME}"
+ IFS_IMAGE_NEED_RESTORE=$FALSE
+}
+
+test_bad_and_origin_ifs_image()
+{
+ local image_id=$1
+
+ append_log "[$INFO] Test loading bad and then loading original IFS image:"
+ test_load_origin_ifs_image "$image_id" || return $?
+ test_load_bad_ifs_image "$image_id"
+ # Load origin image again and make sure it's worked
+ test_load_origin_ifs_image "$image_id" || return $?
+ append_log "[$INFO] Loading invalid IFS image and then loading initial image passed.\n"
+}
+
+ifs_test_cpu()
+{
+ local ifs_mode=$1
+ local cpu_num=$2
+ local image_id status details ret result result_info
+
+ echo "$cpu_num" > "$IFS_PATH"_"$ifs_mode"/"$RUN_TEST"
+ ret=$?
+
+ status=$(cat "${IFS_PATH}_${ifs_mode}/${STATUS}")
+ details=$(cat "${IFS_PATH}_${ifs_mode}/${DETAILS}")
+
+ if [[ "$ret" -eq 0 && "$status" == "$STATUS_PASS" ]]; then
+ result="$PASS"
+ else
+ result="$FAIL"
+ fi
+
+ cpu_num=$(cat "${CPU_SYSFS}/cpu${cpu_num}/topology/thread_siblings_list")
+
+ # There is no image file for IFS ARRAY BIST scan
+ if [[ -e "${IFS_PATH}_${ifs_mode}/current_batch" ]]; then
+ image_id=$(cat "${IFS_PATH}_${ifs_mode}/current_batch")
+ result_info=$(printf "[%s] ifs_%1d cpu(s):%s, current_batch:0x%02x, \
+ret:%2d, status:%s, details:0x%016x" \
+ "$result" "$ifs_mode" "$cpu_num" "$image_id" "$ret" \
+ "$status" "$details")
+ else
+ result_info=$(printf "[%s] ifs_%1d cpu(s):%s, ret:%2d, status:%s, details:0x%016x" \
+ "$result" "$ifs_mode" "$cpu_num" "$ret" "$status" "$details")
+ fi
+
+ append_log "$result_info"
+}
+
+ifs_test_cpus()
+{
+ local cpus_type=$1
+ local ifs_mode=$2
+ local image_id=$3
+ local cpu_max_num=""
+ local cpu_num=""
+
+ case "$cpus_type" in
+ "$ALL")
+ cpu_max_num=$(($(nproc) - 1))
+ cpus=$(seq 0 $cpu_max_num)
+ ;;
+ "$SIBLINGS")
+ cpus=$(cat ${CPU_SYSFS}/cpu*/topology/thread_siblings_list \
+ | sed -e 's/,.*//' \
+ | sed -e 's/-.*//' \
+ | sort -n \
+ | uniq)
+ ;;
+ *)
+ test_exit "Invalid cpus_type:$cpus_type" "$KSFT_XFAIL"
+ ;;
+ esac
+
+ for cpu_num in $cpus; do
+ ifs_test_cpu "$ifs_mode" "$cpu_num"
+ done
+
+ if [[ -z "$image_id" ]]; then
+ append_log "[$INFO] ifs_$ifs_mode test $cpus_type cpus completed\n"
+ else
+ append_log "[$INFO] ifs_$ifs_mode $cpus_type cpus with $CPU_FMS-$image_id.scan \
+completed\n"
+ fi
+}
+
+test_ifs_same_cpu_loop()
+{
+ local ifs_mode=$1
+ local cpu_num=$2
+ local loop_times=$3
+
+ append_log "[$INFO] Test ifs mode $ifs_mode on CPU:$cpu_num for $loop_times rounds:"
+ [[ "$ifs_mode" == "$IFS_SCAN_MODE" ]] && {
+ load_image "$DEFAULT_IMG_ID" || return $?
+ }
+ for (( i=1; i<=loop_times; i++ )); do
+ append_log "[$INFO] Loop iteration: $i in total of $loop_times"
+ # Only IFS scan needs the interval time
+ if [[ "$ifs_mode" == "$IFS_SCAN_MODE" ]]; then
+ do_cmd "sleep $INTERVAL_TIME"
+ elif [[ "$ifs_mode" == "$IFS_ARRAY_BIST_SCAN_MODE" ]]; then
+ true
+ else
+ test_exit "Invalid ifs_mode:$ifs_mode" "$KSFT_XFAIL"
+ fi
+
+ ifs_test_cpu "$ifs_mode" "$cpu_num"
+ done
+ append_log "[$INFO] $loop_times rounds of ifs_$ifs_mode test on CPU:$cpu_num completed.\n"
+}
+
+test_ifs_scan_available_imgs()
+{
+ local image_ids=""
+ local image_id=""
+
+ append_log "[$INFO] Test ifs scan with available images:"
+ image_ids=$(find "$IMG_PATH" -maxdepth 1 -name "${CPU_FMS}-[0-9a-fA-F][0-9a-fA-F].scan" \
+ 2>/dev/null \
+ | sort \
+ | awk -F "-" '{print $NF}' \
+ | cut -d "." -f 1)
+
+ for image_id in $image_ids; do
+ load_image "$image_id" || return $?
+
+ ifs_test_cpus "$SIBLINGS" "$IFS_SCAN_MODE" "$image_id"
+ # IFS scan requires time interval for the scan on the same CPU
+ do_cmd "sleep $INTERVAL_TIME"
+ done
+}
+
+prepare_ifs_test_env()
+{
+ local max_cpu=""
+
+ check_cpu_ifs_support_interval_time
+
+ online_all_cpus
+ max_cpu=$(($(nproc) - 1))
+ RANDOM_CPU=$(shuf -i 0-$max_cpu -n 1)
+
+ DEFAULT_IMG_ID=$(find $IMG_PATH -maxdepth 1 -name "${CPU_FMS}-[0-9a-fA-F][0-9a-fA-F].scan" \
+ 2>/dev/null \
+ | sort \
+ | head -n 1 \
+ | awk -F "-" '{print $NF}' \
+ | cut -d "." -f 1)
+}
+
+test_ifs()
+{
+ prepare_ifs_test_env
+
+ test_ifs_scan_entry
+
+ if [[ -z "$DEFAULT_IMG_ID" ]]; then
+ append_log "[$SKIP] No proper ${IMG_PATH}/${CPU_FMS}-*.scan, skip ifs_0 scan"
+ else
+ test_bad_and_origin_ifs_image "$DEFAULT_IMG_ID"
+ test_ifs_scan_available_imgs
+ test_ifs_same_cpu_loop "$IFS_SCAN_MODE" "$RANDOM_CPU" "$LOOP_TIMES"
+ fi
+
+ if [[ -d "$IFS_ARRAY_BIST_SYSFS_PATH" ]]; then
+ ifs_test_cpus "$SIBLINGS" "$IFS_ARRAY_BIST_SCAN_MODE"
+ test_ifs_same_cpu_loop "$IFS_ARRAY_BIST_SCAN_MODE" "$RANDOM_CPU" "$LOOP_TIMES"
+ else
+ append_log "[$SKIP] No $IFS_ARRAY_BIST_SYSFS_PATH, skip IFS ARRAY BIST scan"
+ fi
+}
+
+trap ifs_cleanup SIGTERM SIGINT
+test_ifs
+ifs_cleanup
diff --git a/tools/testing/selftests/exec/Makefile b/tools/testing/selftests/exec/Makefile
index fb4472ddffd8..ba012bc5aab9 100644
--- a/tools/testing/selftests/exec/Makefile
+++ b/tools/testing/selftests/exec/Makefile
@@ -1,10 +1,14 @@
# SPDX-License-Identifier: GPL-2.0
CFLAGS = -Wall
CFLAGS += -Wno-nonnull
-CFLAGS += -D_GNU_SOURCE
+
+ALIGNS := 0x1000 0x200000 0x1000000
+ALIGN_PIES := $(patsubst %,load_address.%,$(ALIGNS))
+ALIGN_STATIC_PIES := $(patsubst %,load_address.static.%,$(ALIGNS))
+ALIGNMENT_TESTS := $(ALIGN_PIES) $(ALIGN_STATIC_PIES)
TEST_PROGS := binfmt_script.py
-TEST_GEN_PROGS := execveat load_address_4096 load_address_2097152 load_address_16777216 non-regular
+TEST_GEN_PROGS := execveat non-regular $(ALIGNMENT_TESTS)
TEST_GEN_FILES := execveat.symlink execveat.denatured script subdir
# Makefile is a run-time dependency, since it's accessed by the execveat test
TEST_FILES := Makefile
@@ -28,9 +32,9 @@ $(OUTPUT)/execveat.symlink: $(OUTPUT)/execveat
$(OUTPUT)/execveat.denatured: $(OUTPUT)/execveat
cp $< $@
chmod -x $@
-$(OUTPUT)/load_address_4096: load_address.c
- $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000 -pie -static $< -o $@
-$(OUTPUT)/load_address_2097152: load_address.c
- $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x200000 -pie -static $< -o $@
-$(OUTPUT)/load_address_16777216: load_address.c
- $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000000 -pie -static $< -o $@
+$(OUTPUT)/load_address.0x%: load_address.c
+ $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=$(lastword $(subst ., ,$@)) \
+ -fPIE -pie $< -o $@
+$(OUTPUT)/load_address.static.0x%: load_address.c
+ $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=$(lastword $(subst ., ,$@)) \
+ -fPIE -static-pie $< -o $@
diff --git a/tools/testing/selftests/exec/load_address.c b/tools/testing/selftests/exec/load_address.c
index 17e3207d34ae..8257fddba8c8 100644
--- a/tools/testing/selftests/exec/load_address.c
+++ b/tools/testing/selftests/exec/load_address.c
@@ -5,11 +5,13 @@
#include <link.h>
#include <stdio.h>
#include <stdlib.h>
+#include <stdbool.h>
#include "../kselftest.h"
struct Statistics {
unsigned long long load_address;
unsigned long long alignment;
+ bool interp;
};
int ExtractStatistics(struct dl_phdr_info *info, size_t size, void *data)
@@ -26,11 +28,20 @@ int ExtractStatistics(struct dl_phdr_info *info, size_t size, void *data)
stats->alignment = 0;
for (i = 0; i < info->dlpi_phnum; i++) {
+ unsigned long long align;
+
+ if (info->dlpi_phdr[i].p_type == PT_INTERP) {
+ stats->interp = true;
+ continue;
+ }
+
if (info->dlpi_phdr[i].p_type != PT_LOAD)
continue;
- if (info->dlpi_phdr[i].p_align > stats->alignment)
- stats->alignment = info->dlpi_phdr[i].p_align;
+ align = info->dlpi_phdr[i].p_align;
+
+ if (align > stats->alignment)
+ stats->alignment = align;
}
return 1; // Terminate dl_iterate_phdr.
@@ -38,27 +49,57 @@ int ExtractStatistics(struct dl_phdr_info *info, size_t size, void *data)
int main(int argc, char **argv)
{
- struct Statistics extracted;
- unsigned long long misalign;
+ struct Statistics extracted = { };
+ unsigned long long misalign, pow2;
+ bool interp_needed;
+ char buf[1024];
+ FILE *maps;
int ret;
ksft_print_header();
- ksft_set_plan(1);
+ ksft_set_plan(4);
+
+ /* Dump maps file for debugging reference. */
+ maps = fopen("/proc/self/maps", "r");
+ if (!maps)
+ ksft_exit_fail_msg("FAILED: /proc/self/maps: %s\n", strerror(errno));
+ while (fgets(buf, sizeof(buf), maps)) {
+ ksft_print_msg("%s", buf);
+ }
+ fclose(maps);
+ /* Walk the program headers. */
ret = dl_iterate_phdr(ExtractStatistics, &extracted);
if (ret != 1)
ksft_exit_fail_msg("FAILED: dl_iterate_phdr\n");
- if (extracted.alignment == 0)
- ksft_exit_fail_msg("FAILED: No alignment found\n");
- else if (extracted.alignment & (extracted.alignment - 1))
- ksft_exit_fail_msg("FAILED: Alignment is not a power of 2\n");
+ /* Report our findings. */
+ ksft_print_msg("load_address=%#llx alignment=%#llx\n",
+ extracted.load_address, extracted.alignment);
+
+ /* If we're named with ".static." we expect no INTERP. */
+ interp_needed = strstr(argv[0], ".static.") == NULL;
+
+ /* Were we built as expected? */
+ ksft_test_result(interp_needed == extracted.interp,
+ "%s INTERP program header %s\n",
+ interp_needed ? "Wanted" : "Unwanted",
+ extracted.interp ? "seen" : "missing");
+
+ /* Did we find an alignment? */
+ ksft_test_result(extracted.alignment != 0,
+ "Alignment%s found\n", extracted.alignment ? "" : " NOT");
+
+ /* Is the alignment sane? */
+ pow2 = extracted.alignment & (extracted.alignment - 1);
+ ksft_test_result(pow2 == 0,
+ "Alignment is%s a power of 2: %#llx\n",
+ pow2 == 0 ? "" : " NOT", extracted.alignment);
+ /* Is the load address aligned? */
misalign = extracted.load_address & (extracted.alignment - 1);
- if (misalign)
- ksft_exit_fail_msg("FAILED: alignment = %llu, load_address = %llu\n",
- extracted.alignment, extracted.load_address);
+ ksft_test_result(misalign == 0, "Load Address is %saligned (%#llx)\n",
+ misalign ? "MIS" : "", misalign);
- ksft_test_result_pass("Completed\n");
ksft_finished();
}
diff --git a/tools/testing/selftests/filesystems/eventfd/eventfd_test.c b/tools/testing/selftests/filesystems/eventfd/eventfd_test.c
index f142a137526c..85acb4e3ef00 100644
--- a/tools/testing/selftests/filesystems/eventfd/eventfd_test.c
+++ b/tools/testing/selftests/filesystems/eventfd/eventfd_test.c
@@ -13,6 +13,8 @@
#include <sys/eventfd.h>
#include "../../kselftest_harness.h"
+#define EVENTFD_TEST_ITERATIONS 100000UL
+
struct error {
int code;
char msg[512];
@@ -40,7 +42,7 @@ static inline int sys_eventfd2(unsigned int count, int flags)
return syscall(__NR_eventfd2, count, flags);
}
-TEST(eventfd01)
+TEST(eventfd_check_flag_rdwr)
{
int fd, flags;
@@ -54,7 +56,7 @@ TEST(eventfd01)
close(fd);
}
-TEST(eventfd02)
+TEST(eventfd_check_flag_cloexec)
{
int fd, flags;
@@ -68,7 +70,7 @@ TEST(eventfd02)
close(fd);
}
-TEST(eventfd03)
+TEST(eventfd_check_flag_nonblock)
{
int fd, flags;
@@ -83,7 +85,7 @@ TEST(eventfd03)
close(fd);
}
-TEST(eventfd04)
+TEST(eventfd_chek_flag_cloexec_and_nonblock)
{
int fd, flags;
@@ -161,7 +163,7 @@ static int verify_fdinfo(int fd, struct error *err, const char *prefix,
return 0;
}
-TEST(eventfd05)
+TEST(eventfd_check_flag_semaphore)
{
struct error err = {0};
int fd, ret;
@@ -183,4 +185,128 @@ TEST(eventfd05)
close(fd);
}
+/*
+ * A write(2) fails with the error EINVAL if the size of the supplied buffer
+ * is less than 8 bytes, or if an attempt is made to write the value
+ * 0xffffffffffffffff.
+ */
+TEST(eventfd_check_write)
+{
+ uint64_t value = 1;
+ ssize_t size;
+ int fd;
+
+ fd = sys_eventfd2(0, 0);
+ ASSERT_GE(fd, 0);
+
+ size = write(fd, &value, sizeof(int));
+ EXPECT_EQ(size, -1);
+ EXPECT_EQ(errno, EINVAL);
+
+ size = write(fd, &value, sizeof(value));
+ EXPECT_EQ(size, sizeof(value));
+
+ value = (uint64_t)-1;
+ size = write(fd, &value, sizeof(value));
+ EXPECT_EQ(size, -1);
+ EXPECT_EQ(errno, EINVAL);
+
+ close(fd);
+}
+
+/*
+ * A read(2) fails with the error EINVAL if the size of the supplied buffer is
+ * less than 8 bytes.
+ */
+TEST(eventfd_check_read)
+{
+ uint64_t value;
+ ssize_t size;
+ int fd;
+
+ fd = sys_eventfd2(1, 0);
+ ASSERT_GE(fd, 0);
+
+ size = read(fd, &value, sizeof(int));
+ EXPECT_EQ(size, -1);
+ EXPECT_EQ(errno, EINVAL);
+
+ size = read(fd, &value, sizeof(value));
+ EXPECT_EQ(size, sizeof(value));
+ EXPECT_EQ(value, 1);
+
+ close(fd);
+}
+
+
+/*
+ * If EFD_SEMAPHORE was not specified and the eventfd counter has a nonzero
+ * value, then a read(2) returns 8 bytes containing that value, and the
+ * counter's value is reset to zero.
+ * If the eventfd counter is zero at the time of the call to read(2), then the
+ * call fails with the error EAGAIN if the file descriptor has been made nonblocking.
+ */
+TEST(eventfd_check_read_with_nonsemaphore)
+{
+ uint64_t value;
+ ssize_t size;
+ int fd;
+ int i;
+
+ fd = sys_eventfd2(0, EFD_NONBLOCK);
+ ASSERT_GE(fd, 0);
+
+ value = 1;
+ for (i = 0; i < EVENTFD_TEST_ITERATIONS; i++) {
+ size = write(fd, &value, sizeof(value));
+ EXPECT_EQ(size, sizeof(value));
+ }
+
+ size = read(fd, &value, sizeof(value));
+ EXPECT_EQ(size, sizeof(uint64_t));
+ EXPECT_EQ(value, EVENTFD_TEST_ITERATIONS);
+
+ size = read(fd, &value, sizeof(value));
+ EXPECT_EQ(size, -1);
+ EXPECT_EQ(errno, EAGAIN);
+
+ close(fd);
+}
+
+/*
+ * If EFD_SEMAPHORE was specified and the eventfd counter has a nonzero value,
+ * then a read(2) returns 8 bytes containing the value 1, and the counter's
+ * value is decremented by 1.
+ * If the eventfd counter is zero at the time of the call to read(2), then the
+ * call fails with the error EAGAIN if the file descriptor has been made nonblocking.
+ */
+TEST(eventfd_check_read_with_semaphore)
+{
+ uint64_t value;
+ ssize_t size;
+ int fd;
+ int i;
+
+ fd = sys_eventfd2(0, EFD_SEMAPHORE|EFD_NONBLOCK);
+ ASSERT_GE(fd, 0);
+
+ value = 1;
+ for (i = 0; i < EVENTFD_TEST_ITERATIONS; i++) {
+ size = write(fd, &value, sizeof(value));
+ EXPECT_EQ(size, sizeof(value));
+ }
+
+ for (i = 0; i < EVENTFD_TEST_ITERATIONS; i++) {
+ size = read(fd, &value, sizeof(value));
+ EXPECT_EQ(size, sizeof(value));
+ EXPECT_EQ(value, 1);
+ }
+
+ size = read(fd, &value, sizeof(value));
+ EXPECT_EQ(size, -1);
+ EXPECT_EQ(errno, EAGAIN);
+
+ close(fd);
+}
+
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/filesystems/statmount/Makefile b/tools/testing/selftests/filesystems/statmount/Makefile
index 07a0d5b545ca..3af3136e35a4 100644
--- a/tools/testing/selftests/filesystems/statmount/Makefile
+++ b/tools/testing/selftests/filesystems/statmount/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-or-later
CFLAGS += -Wall -O2 -g $(KHDR_INCLUDES)
-TEST_GEN_PROGS := statmount_test
+TEST_GEN_PROGS := statmount_test statmount_test_ns
include ../../lib.mk
diff --git a/tools/testing/selftests/filesystems/statmount/statmount.h b/tools/testing/selftests/filesystems/statmount/statmount.h
new file mode 100644
index 000000000000..f4294bab9d73
--- /dev/null
+++ b/tools/testing/selftests/filesystems/statmount/statmount.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __STATMOUNT_H
+#define __STATMOUNT_H
+
+#include <stdint.h>
+#include <linux/mount.h>
+#include <asm/unistd.h>
+
+static inline int statmount(uint64_t mnt_id, uint64_t mnt_ns_id, uint64_t mask,
+ struct statmount *buf, size_t bufsize,
+ unsigned int flags)
+{
+ struct mnt_id_req req = {
+ .size = MNT_ID_REQ_SIZE_VER0,
+ .mnt_id = mnt_id,
+ .param = mask,
+ };
+
+ if (mnt_ns_id) {
+ req.size = MNT_ID_REQ_SIZE_VER1;
+ req.mnt_ns_id = mnt_ns_id;
+ }
+
+ return syscall(__NR_statmount, &req, buf, bufsize, flags);
+}
+
+static ssize_t listmount(uint64_t mnt_id, uint64_t mnt_ns_id,
+ uint64_t last_mnt_id, uint64_t list[], size_t num,
+ unsigned int flags)
+{
+ struct mnt_id_req req = {
+ .size = MNT_ID_REQ_SIZE_VER0,
+ .mnt_id = mnt_id,
+ .param = last_mnt_id,
+ };
+
+ if (mnt_ns_id) {
+ req.size = MNT_ID_REQ_SIZE_VER1;
+ req.mnt_ns_id = mnt_ns_id;
+ }
+
+ return syscall(__NR_listmount, &req, list, num, flags);
+}
+
+#endif /* __STATMOUNT_H */
diff --git a/tools/testing/selftests/filesystems/statmount/statmount_test.c b/tools/testing/selftests/filesystems/statmount/statmount_test.c
index e8c019d72cbf..c773334bbcc9 100644
--- a/tools/testing/selftests/filesystems/statmount/statmount_test.c
+++ b/tools/testing/selftests/filesystems/statmount/statmount_test.c
@@ -4,17 +4,15 @@
#include <assert.h>
#include <stddef.h>
-#include <stdint.h>
#include <sched.h>
#include <fcntl.h>
#include <sys/param.h>
#include <sys/mount.h>
#include <sys/stat.h>
#include <sys/statfs.h>
-#include <linux/mount.h>
#include <linux/stat.h>
-#include <asm/unistd.h>
+#include "statmount.h"
#include "../../kselftest.h"
static const char *const known_fs[] = {
@@ -36,18 +34,6 @@ static const char *const known_fs[] = {
"ufs", "v7", "vboxsf", "vfat", "virtiofs", "vxfs", "xenfs", "xfs",
"zonefs", NULL };
-static int statmount(uint64_t mnt_id, uint64_t mask, struct statmount *buf,
- size_t bufsize, unsigned int flags)
-{
- struct mnt_id_req req = {
- .size = MNT_ID_REQ_SIZE_VER0,
- .mnt_id = mnt_id,
- .param = mask,
- };
-
- return syscall(__NR_statmount, &req, buf, bufsize, flags);
-}
-
static struct statmount *statmount_alloc(uint64_t mnt_id, uint64_t mask, unsigned int flags)
{
size_t bufsize = 1 << 15;
@@ -56,7 +42,7 @@ static struct statmount *statmount_alloc(uint64_t mnt_id, uint64_t mask, unsigne
int ret;
for (;;) {
- ret = statmount(mnt_id, mask, tmp, bufsize, flags);
+ ret = statmount(mnt_id, 0, mask, tmp, bufsize, flags);
if (ret != -1)
break;
if (tofree)
@@ -121,7 +107,7 @@ static char root_mntpoint[] = "/tmp/statmount_test_root.XXXXXX";
static int orig_root;
static uint64_t root_id, parent_id;
static uint32_t old_root_id, old_parent_id;
-
+static FILE *f_mountinfo;
static void cleanup_namespace(void)
{
@@ -146,7 +132,7 @@ static void setup_namespace(void)
uid_t uid = getuid();
gid_t gid = getgid();
- ret = unshare(CLONE_NEWNS|CLONE_NEWUSER);
+ ret = unshare(CLONE_NEWNS|CLONE_NEWUSER|CLONE_NEWPID);
if (ret == -1)
ksft_exit_fail_msg("unsharing mountns and userns: %s\n",
strerror(errno));
@@ -157,6 +143,11 @@ static void setup_namespace(void)
sprintf(buf, "0 %d 1", gid);
write_file("/proc/self/gid_map", buf);
+ f_mountinfo = fopen("/proc/self/mountinfo", "re");
+ if (!f_mountinfo)
+ ksft_exit_fail_msg("failed to open mountinfo: %s\n",
+ strerror(errno));
+
ret = mount("", "/", NULL, MS_REC|MS_PRIVATE, NULL);
if (ret == -1)
ksft_exit_fail_msg("making mount tree private: %s\n",
@@ -216,25 +207,13 @@ static int setup_mount_tree(int log2_num)
return 0;
}
-static ssize_t listmount(uint64_t mnt_id, uint64_t last_mnt_id,
- uint64_t list[], size_t num, unsigned int flags)
-{
- struct mnt_id_req req = {
- .size = MNT_ID_REQ_SIZE_VER0,
- .mnt_id = mnt_id,
- .param = last_mnt_id,
- };
-
- return syscall(__NR_listmount, &req, list, num, flags);
-}
-
static void test_listmount_empty_root(void)
{
ssize_t res;
const unsigned int size = 32;
uint64_t list[size];
- res = listmount(LSMT_ROOT, 0, list, size, 0);
+ res = listmount(LSMT_ROOT, 0, 0, list, size, 0);
if (res == -1) {
ksft_test_result_fail("listmount: %s\n", strerror(errno));
return;
@@ -259,7 +238,7 @@ static void test_statmount_zero_mask(void)
struct statmount sm;
int ret;
- ret = statmount(root_id, 0, &sm, sizeof(sm), 0);
+ ret = statmount(root_id, 0, 0, &sm, sizeof(sm), 0);
if (ret == -1) {
ksft_test_result_fail("statmount zero mask: %s\n",
strerror(errno));
@@ -285,7 +264,7 @@ static void test_statmount_mnt_basic(void)
int ret;
uint64_t mask = STATMOUNT_MNT_BASIC;
- ret = statmount(root_id, mask, &sm, sizeof(sm), 0);
+ ret = statmount(root_id, 0, mask, &sm, sizeof(sm), 0);
if (ret == -1) {
ksft_test_result_fail("statmount mnt basic: %s\n",
strerror(errno));
@@ -345,7 +324,7 @@ static void test_statmount_sb_basic(void)
struct statx sx;
struct statfs sf;
- ret = statmount(root_id, mask, &sm, sizeof(sm), 0);
+ ret = statmount(root_id, 0, mask, &sm, sizeof(sm), 0);
if (ret == -1) {
ksft_test_result_fail("statmount sb basic: %s\n",
strerror(errno));
@@ -470,6 +449,88 @@ static void test_statmount_fs_type(void)
free(sm);
}
+static void test_statmount_mnt_opts(void)
+{
+ struct statmount *sm;
+ const char *statmount_opts;
+ char *line = NULL;
+ size_t len = 0;
+
+ sm = statmount_alloc(root_id, STATMOUNT_MNT_BASIC | STATMOUNT_MNT_OPTS,
+ 0);
+ if (!sm) {
+ ksft_test_result_fail("statmount mnt opts: %s\n",
+ strerror(errno));
+ return;
+ }
+
+ while (getline(&line, &len, f_mountinfo) != -1) {
+ int i;
+ char *p, *p2;
+ unsigned int old_mnt_id;
+
+ old_mnt_id = atoi(line);
+ if (old_mnt_id != sm->mnt_id_old)
+ continue;
+
+ for (p = line, i = 0; p && i < 5; i++)
+ p = strchr(p + 1, ' ');
+ if (!p)
+ continue;
+
+ p2 = strchr(p + 1, ' ');
+ if (!p2)
+ continue;
+ *p2 = '\0';
+ p = strchr(p2 + 1, '-');
+ if (!p)
+ continue;
+ for (p++, i = 0; p && i < 2; i++)
+ p = strchr(p + 1, ' ');
+ if (!p)
+ continue;
+ p++;
+
+ /* skip generic superblock options */
+ if (strncmp(p, "ro", 2) == 0)
+ p += 2;
+ else if (strncmp(p, "rw", 2) == 0)
+ p += 2;
+ if (*p == ',')
+ p++;
+ if (strncmp(p, "sync", 4) == 0)
+ p += 4;
+ if (*p == ',')
+ p++;
+ if (strncmp(p, "dirsync", 7) == 0)
+ p += 7;
+ if (*p == ',')
+ p++;
+ if (strncmp(p, "lazytime", 8) == 0)
+ p += 8;
+ if (*p == ',')
+ p++;
+ p2 = strrchr(p, '\n');
+ if (p2)
+ *p2 = '\0';
+
+ statmount_opts = sm->str + sm->mnt_opts;
+ if (strcmp(statmount_opts, p) != 0)
+ ksft_test_result_fail(
+ "unexpected mount options: '%s' != '%s'\n",
+ statmount_opts, p);
+ else
+ ksft_test_result_pass("statmount mount options\n");
+ free(sm);
+ free(line);
+ return;
+ }
+
+ ksft_test_result_fail("didnt't find mount entry\n");
+ free(sm);
+ free(line);
+}
+
static void test_statmount_string(uint64_t mask, size_t off, const char *name)
{
struct statmount *sm;
@@ -506,14 +567,14 @@ static void test_statmount_string(uint64_t mask, size_t off, const char *name)
exactsize = sm->size;
shortsize = sizeof(*sm) + i;
- ret = statmount(root_id, mask, sm, exactsize, 0);
+ ret = statmount(root_id, 0, mask, sm, exactsize, 0);
if (ret == -1) {
ksft_test_result_fail("statmount exact size: %s\n",
strerror(errno));
goto out;
}
errno = 0;
- ret = statmount(root_id, mask, sm, shortsize, 0);
+ ret = statmount(root_id, 0, mask, sm, shortsize, 0);
if (ret != -1 || errno != EOVERFLOW) {
ksft_test_result_fail("should have failed with EOVERFLOW: %s\n",
strerror(errno));
@@ -541,7 +602,7 @@ static void test_listmount_tree(void)
if (res == -1)
return;
- num = res = listmount(LSMT_ROOT, 0, list, size, 0);
+ num = res = listmount(LSMT_ROOT, 0, 0, list, size, 0);
if (res == -1) {
ksft_test_result_fail("listmount: %s\n", strerror(errno));
return;
@@ -553,7 +614,7 @@ static void test_listmount_tree(void)
}
for (i = 0; i < size - step;) {
- res = listmount(LSMT_ROOT, i ? list2[i - 1] : 0, list2 + i, step, 0);
+ res = listmount(LSMT_ROOT, 0, i ? list2[i - 1] : 0, list2 + i, step, 0);
if (res == -1)
ksft_test_result_fail("short listmount: %s\n",
strerror(errno));
@@ -585,18 +646,18 @@ int main(void)
int ret;
uint64_t all_mask = STATMOUNT_SB_BASIC | STATMOUNT_MNT_BASIC |
STATMOUNT_PROPAGATE_FROM | STATMOUNT_MNT_ROOT |
- STATMOUNT_MNT_POINT | STATMOUNT_FS_TYPE;
+ STATMOUNT_MNT_POINT | STATMOUNT_FS_TYPE | STATMOUNT_MNT_NS_ID;
ksft_print_header();
- ret = statmount(0, 0, NULL, 0, 0);
+ ret = statmount(0, 0, 0, NULL, 0, 0);
assert(ret == -1);
if (errno == ENOSYS)
ksft_exit_skip("statmount() syscall not supported\n");
setup_namespace();
- ksft_set_plan(14);
+ ksft_set_plan(15);
test_listmount_empty_root();
test_statmount_zero_mask();
test_statmount_mnt_basic();
@@ -604,6 +665,7 @@ int main(void)
test_statmount_mnt_root();
test_statmount_mnt_point();
test_statmount_fs_type();
+ test_statmount_mnt_opts();
test_statmount_string(STATMOUNT_MNT_ROOT, str_off(mnt_root), "mount root");
test_statmount_string(STATMOUNT_MNT_POINT, str_off(mnt_point), "mount point");
test_statmount_string(STATMOUNT_FS_TYPE, str_off(fs_type), "fs type");
diff --git a/tools/testing/selftests/filesystems/statmount/statmount_test_ns.c b/tools/testing/selftests/filesystems/statmount/statmount_test_ns.c
new file mode 100644
index 000000000000..e044f5fc57fd
--- /dev/null
+++ b/tools/testing/selftests/filesystems/statmount/statmount_test_ns.c
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#define _GNU_SOURCE
+
+#include <assert.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <sched.h>
+#include <stdlib.h>
+#include <sys/mount.h>
+#include <sys/stat.h>
+#include <sys/wait.h>
+#include <linux/nsfs.h>
+#include <linux/stat.h>
+
+#include "statmount.h"
+#include "../../kselftest.h"
+
+#define NSID_PASS 0
+#define NSID_FAIL 1
+#define NSID_SKIP 2
+#define NSID_ERROR 3
+
+static void handle_result(int ret, const char *testname)
+{
+ if (ret == NSID_PASS)
+ ksft_test_result_pass("%s\n", testname);
+ else if (ret == NSID_FAIL)
+ ksft_test_result_fail("%s\n", testname);
+ else if (ret == NSID_ERROR)
+ ksft_exit_fail_msg("%s\n", testname);
+ else
+ ksft_test_result_skip("%s\n", testname);
+}
+
+static inline int wait_for_pid(pid_t pid)
+{
+ int status, ret;
+
+again:
+ ret = waitpid(pid, &status, 0);
+ if (ret == -1) {
+ if (errno == EINTR)
+ goto again;
+
+ ksft_print_msg("waitpid returned -1, errno=%d\n", errno);
+ return -1;
+ }
+
+ if (!WIFEXITED(status)) {
+ ksft_print_msg(
+ "waitpid !WIFEXITED, WIFSIGNALED=%d, WTERMSIG=%d\n",
+ WIFSIGNALED(status), WTERMSIG(status));
+ return -1;
+ }
+
+ ret = WEXITSTATUS(status);
+ return ret;
+}
+
+static int get_mnt_ns_id(const char *mnt_ns, uint64_t *mnt_ns_id)
+{
+ int fd = open(mnt_ns, O_RDONLY);
+
+ if (fd < 0) {
+ ksft_print_msg("failed to open for ns %s: %s\n",
+ mnt_ns, strerror(errno));
+ sleep(60);
+ return NSID_ERROR;
+ }
+
+ if (ioctl(fd, NS_GET_MNTNS_ID, mnt_ns_id) < 0) {
+ ksft_print_msg("failed to get the nsid for ns %s: %s\n",
+ mnt_ns, strerror(errno));
+ return NSID_ERROR;
+ }
+ close(fd);
+ return NSID_PASS;
+}
+
+static int get_mnt_id(const char *path, uint64_t *mnt_id)
+{
+ struct statx sx;
+ int ret;
+
+ ret = statx(AT_FDCWD, path, 0, STATX_MNT_ID_UNIQUE, &sx);
+ if (ret == -1) {
+ ksft_print_msg("retrieving unique mount ID for %s: %s\n", path,
+ strerror(errno));
+ return NSID_ERROR;
+ }
+
+ if (!(sx.stx_mask & STATX_MNT_ID_UNIQUE)) {
+ ksft_print_msg("no unique mount ID available for %s\n", path);
+ return NSID_ERROR;
+ }
+
+ *mnt_id = sx.stx_mnt_id;
+ return NSID_PASS;
+}
+
+static int write_file(const char *path, const char *val)
+{
+ int fd = open(path, O_WRONLY);
+ size_t len = strlen(val);
+ int ret;
+
+ if (fd == -1) {
+ ksft_print_msg("opening %s for write: %s\n", path, strerror(errno));
+ return NSID_ERROR;
+ }
+
+ ret = write(fd, val, len);
+ if (ret == -1) {
+ ksft_print_msg("writing to %s: %s\n", path, strerror(errno));
+ return NSID_ERROR;
+ }
+ if (ret != len) {
+ ksft_print_msg("short write to %s\n", path);
+ return NSID_ERROR;
+ }
+
+ ret = close(fd);
+ if (ret == -1) {
+ ksft_print_msg("closing %s\n", path);
+ return NSID_ERROR;
+ }
+
+ return NSID_PASS;
+}
+
+static int setup_namespace(void)
+{
+ int ret;
+ char buf[32];
+ uid_t uid = getuid();
+ gid_t gid = getgid();
+
+ ret = unshare(CLONE_NEWNS|CLONE_NEWUSER|CLONE_NEWPID);
+ if (ret == -1)
+ ksft_exit_fail_msg("unsharing mountns and userns: %s\n",
+ strerror(errno));
+
+ sprintf(buf, "0 %d 1", uid);
+ ret = write_file("/proc/self/uid_map", buf);
+ if (ret != NSID_PASS)
+ return ret;
+ ret = write_file("/proc/self/setgroups", "deny");
+ if (ret != NSID_PASS)
+ return ret;
+ sprintf(buf, "0 %d 1", gid);
+ ret = write_file("/proc/self/gid_map", buf);
+ if (ret != NSID_PASS)
+ return ret;
+
+ ret = mount("", "/", NULL, MS_REC|MS_PRIVATE, NULL);
+ if (ret == -1) {
+ ksft_print_msg("making mount tree private: %s\n",
+ strerror(errno));
+ return NSID_ERROR;
+ }
+
+ return NSID_PASS;
+}
+
+static int _test_statmount_mnt_ns_id(void)
+{
+ struct statmount sm;
+ uint64_t mnt_ns_id;
+ uint64_t root_id;
+ int ret;
+
+ ret = get_mnt_ns_id("/proc/self/ns/mnt", &mnt_ns_id);
+ if (ret != NSID_PASS)
+ return ret;
+
+ ret = get_mnt_id("/", &root_id);
+ if (ret != NSID_PASS)
+ return ret;
+
+ ret = statmount(root_id, 0, STATMOUNT_MNT_NS_ID, &sm, sizeof(sm), 0);
+ if (ret == -1) {
+ ksft_print_msg("statmount mnt ns id: %s\n", strerror(errno));
+ return NSID_ERROR;
+ }
+
+ if (sm.size != sizeof(sm)) {
+ ksft_print_msg("unexpected size: %u != %u\n", sm.size,
+ (uint32_t)sizeof(sm));
+ return NSID_FAIL;
+ }
+ if (sm.mask != STATMOUNT_MNT_NS_ID) {
+ ksft_print_msg("statmount mnt ns id unavailable\n");
+ return NSID_SKIP;
+ }
+
+ if (sm.mnt_ns_id != mnt_ns_id) {
+ ksft_print_msg("unexpected mnt ns ID: 0x%llx != 0x%llx\n",
+ (unsigned long long)sm.mnt_ns_id,
+ (unsigned long long)mnt_ns_id);
+ return NSID_FAIL;
+ }
+
+ return NSID_PASS;
+}
+
+static void test_statmount_mnt_ns_id(void)
+{
+ pid_t pid;
+ int ret;
+
+ pid = fork();
+ if (pid < 0)
+ ksft_exit_fail_msg("failed to fork: %s\n", strerror(errno));
+
+ /* We're the original pid, wait for the result. */
+ if (pid != 0) {
+ ret = wait_for_pid(pid);
+ handle_result(ret, "test statmount ns id");
+ return;
+ }
+
+ ret = setup_namespace();
+ if (ret != NSID_PASS)
+ exit(ret);
+ ret = _test_statmount_mnt_ns_id();
+ exit(ret);
+}
+
+static int validate_external_listmount(pid_t pid, uint64_t child_nr_mounts)
+{
+ uint64_t list[256];
+ uint64_t mnt_ns_id;
+ uint64_t nr_mounts;
+ char buf[256];
+ int ret;
+
+ /* Get the mount ns id for our child. */
+ snprintf(buf, sizeof(buf), "/proc/%lu/ns/mnt", (unsigned long)pid);
+ ret = get_mnt_ns_id(buf, &mnt_ns_id);
+
+ nr_mounts = listmount(LSMT_ROOT, mnt_ns_id, 0, list, 256, 0);
+ if (nr_mounts == (uint64_t)-1) {
+ ksft_print_msg("listmount: %s\n", strerror(errno));
+ return NSID_ERROR;
+ }
+
+ if (nr_mounts != child_nr_mounts) {
+ ksft_print_msg("listmount results is %zi != %zi\n", nr_mounts,
+ child_nr_mounts);
+ return NSID_FAIL;
+ }
+
+ /* Validate that all of our entries match our mnt_ns_id. */
+ for (int i = 0; i < nr_mounts; i++) {
+ struct statmount sm;
+
+ ret = statmount(list[i], mnt_ns_id, STATMOUNT_MNT_NS_ID, &sm,
+ sizeof(sm), 0);
+ if (ret < 0) {
+ ksft_print_msg("statmount mnt ns id: %s\n", strerror(errno));
+ return NSID_ERROR;
+ }
+
+ if (sm.mask != STATMOUNT_MNT_NS_ID) {
+ ksft_print_msg("statmount mnt ns id unavailable\n");
+ return NSID_SKIP;
+ }
+
+ if (sm.mnt_ns_id != mnt_ns_id) {
+ ksft_print_msg("listmount gave us the wrong ns id: 0x%llx != 0x%llx\n",
+ (unsigned long long)sm.mnt_ns_id,
+ (unsigned long long)mnt_ns_id);
+ return NSID_FAIL;
+ }
+ }
+
+ return NSID_PASS;
+}
+
+static void test_listmount_ns(void)
+{
+ uint64_t nr_mounts;
+ char pval;
+ int child_ready_pipe[2];
+ int parent_ready_pipe[2];
+ pid_t pid;
+ int ret, child_ret;
+
+ if (pipe(child_ready_pipe) < 0)
+ ksft_exit_fail_msg("failed to create the child pipe: %s\n",
+ strerror(errno));
+ if (pipe(parent_ready_pipe) < 0)
+ ksft_exit_fail_msg("failed to create the parent pipe: %s\n",
+ strerror(errno));
+
+ pid = fork();
+ if (pid < 0)
+ ksft_exit_fail_msg("failed to fork: %s\n", strerror(errno));
+
+ if (pid == 0) {
+ char cval;
+ uint64_t list[256];
+
+ close(child_ready_pipe[0]);
+ close(parent_ready_pipe[1]);
+
+ ret = setup_namespace();
+ if (ret != NSID_PASS)
+ exit(ret);
+
+ nr_mounts = listmount(LSMT_ROOT, 0, 0, list, 256, 0);
+ if (nr_mounts == (uint64_t)-1) {
+ ksft_print_msg("listmount: %s\n", strerror(errno));
+ exit(NSID_FAIL);
+ }
+
+ /*
+ * Tell our parent how many mounts we have, and then wait for it
+ * to tell us we're done.
+ */
+ write(child_ready_pipe[1], &nr_mounts, sizeof(nr_mounts));
+ read(parent_ready_pipe[0], &cval, sizeof(cval));
+ exit(NSID_PASS);
+ }
+
+ close(child_ready_pipe[1]);
+ close(parent_ready_pipe[0]);
+
+ /* Wait until the child has created everything. */
+ if (read(child_ready_pipe[0], &nr_mounts, sizeof(nr_mounts)) !=
+ sizeof(nr_mounts))
+ ret = NSID_ERROR;
+
+ ret = validate_external_listmount(pid, nr_mounts);
+
+ if (write(parent_ready_pipe[1], &pval, sizeof(pval)) != sizeof(pval))
+ ret = NSID_ERROR;
+
+ child_ret = wait_for_pid(pid);
+ if (child_ret != NSID_PASS)
+ ret = child_ret;
+ handle_result(ret, "test listmount ns id");
+}
+
+int main(void)
+{
+ int ret;
+
+ ksft_print_header();
+ ret = statmount(0, 0, 0, NULL, 0, 0);
+ assert(ret == -1);
+ if (errno == ENOSYS)
+ ksft_exit_skip("statmount() syscall not supported\n");
+
+ ksft_set_plan(2);
+ test_statmount_mnt_ns_id();
+ test_listmount_ns();
+
+ if (ksft_get_fail_cnt() + ksft_get_error_cnt() > 0)
+ ksft_exit_fail();
+ else
+ ksft_exit_pass();
+}
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/fgraph-multi.tc b/tools/testing/selftests/ftrace/test.d/ftrace/fgraph-multi.tc
new file mode 100644
index 000000000000..ff88f97e41fb
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/fgraph-multi.tc
@@ -0,0 +1,103 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: ftrace - function graph filters
+# requires: set_ftrace_filter function_graph:tracer
+
+# Make sure that function graph filtering works
+
+INSTANCE1="instances/test1_$$"
+INSTANCE2="instances/test2_$$"
+INSTANCE3="instances/test3_$$"
+
+WD=`pwd`
+
+do_reset() {
+ cd $WD
+ if [ -d $INSTANCE1 ]; then
+ echo nop > $INSTANCE1/current_tracer
+ rmdir $INSTANCE1
+ fi
+ if [ -d $INSTANCE2 ]; then
+ echo nop > $INSTANCE2/current_tracer
+ rmdir $INSTANCE2
+ fi
+ if [ -d $INSTANCE3 ]; then
+ echo nop > $INSTANCE3/current_tracer
+ rmdir $INSTANCE3
+ fi
+}
+
+mkdir $INSTANCE1
+if ! grep -q function_graph $INSTANCE1/available_tracers; then
+ echo "function_graph not allowed with instances"
+ rmdir $INSTANCE1
+ exit_unsupported
+fi
+
+mkdir $INSTANCE2
+mkdir $INSTANCE3
+
+fail() { # msg
+ do_reset
+ echo $1
+ exit_fail
+}
+
+disable_tracing
+clear_trace
+
+do_test() {
+ REGEX=$1
+ TEST=$2
+
+ # filter something, schedule is always good
+ if ! echo "$REGEX" > set_ftrace_filter; then
+ fail "can not enable filter $REGEX"
+ fi
+
+ echo > trace
+ echo function_graph > current_tracer
+ enable_tracing
+ sleep 1
+ # search for functions (has "{" or ";" on the line)
+ echo 0 > tracing_on
+ count=`cat trace | grep -v '^#' | grep -e '{' -e ';' | grep -v "$TEST" | wc -l`
+ echo 1 > tracing_on
+ if [ $count -ne 0 ]; then
+ fail "Graph filtering not working by itself against $TEST?"
+ fi
+
+ # Make sure we did find something
+ echo 0 > tracing_on
+ count=`cat trace | grep -v '^#' | grep -e '{' -e ';' | grep "$TEST" | wc -l`
+ echo 1 > tracing_on
+ if [ $count -eq 0 ]; then
+ fail "No traces found with $TEST?"
+ fi
+}
+
+do_test '*sched*' 'sched'
+cd $INSTANCE1
+do_test '*lock*' 'lock'
+cd $WD
+cd $INSTANCE2
+do_test '*rcu*' 'rcu'
+cd $WD
+cd $INSTANCE3
+echo function_graph > current_tracer
+
+sleep 1
+count=`cat trace | grep -v '^#' | grep -e '{' -e ';' | grep "$TEST" | wc -l`
+if [ $count -eq 0 ]; then
+ fail "No traces found with all tracing?"
+fi
+
+cd $WD
+echo nop > current_tracer
+echo nop > $INSTANCE1/current_tracer
+echo nop > $INSTANCE2/current_tracer
+echo nop > $INSTANCE3/current_tracer
+
+do_reset
+
+exit 0
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc
index 2f7211254529..8dcce001881d 100644
--- a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc
@@ -8,12 +8,18 @@
# Also test it on an instance directory
do_function_fork=1
+do_funcgraph_proc=1
if [ ! -f options/function-fork ]; then
do_function_fork=0
echo "no option for function-fork found. Option will not be tested."
fi
+if [ ! -f options/funcgraph-proc ]; then
+ do_funcgraph_proc=0
+ echo "no option for function-fork found. Option will not be tested."
+fi
+
read PID _ < /proc/self/stat
if [ $do_function_fork -eq 1 ]; then
@@ -21,12 +27,19 @@ if [ $do_function_fork -eq 1 ]; then
orig_value=`grep function-fork trace_options`
fi
+if [ $do_funcgraph_proc -eq 1 ]; then
+ orig_value2=`cat options/funcgraph-proc`
+ echo 1 > options/funcgraph-proc
+fi
+
do_reset() {
- if [ $do_function_fork -eq 0 ]; then
- return
+ if [ $do_function_fork -eq 1 ]; then
+ echo $orig_value > trace_options
fi
- echo $orig_value > trace_options
+ if [ $do_funcgraph_proc -eq 1 ]; then
+ echo $orig_value2 > options/funcgraph-proc
+ fi
}
fail() { # msg
@@ -36,13 +49,15 @@ fail() { # msg
}
do_test() {
+ TRACER=$1
+
disable_tracing
echo do_execve* > set_ftrace_filter
echo $FUNCTION_FORK >> set_ftrace_filter
echo $PID > set_ftrace_pid
- echo function > current_tracer
+ echo $TRACER > current_tracer
if [ $do_function_fork -eq 1 ]; then
# don't allow children to be traced
@@ -82,7 +97,11 @@ do_test() {
fi
}
-do_test
+do_test function
+if grep -s function_graph available_tracers; then
+ do_test function_graph
+fi
+
do_reset
exit 0
diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile
index 994fa3468f17..f79f9bac7918 100644
--- a/tools/testing/selftests/futex/functional/Makefile
+++ b/tools/testing/selftests/futex/functional/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
INCLUDES := -I../include -I../../ $(KHDR_INCLUDES)
-CFLAGS := $(CFLAGS) -g -O2 -Wall -D_GNU_SOURCE= -pthread $(INCLUDES) $(KHDR_INCLUDES)
+CFLAGS := $(CFLAGS) -g -O2 -Wall -pthread $(INCLUDES) $(KHDR_INCLUDES)
LDLIBS := -lpthread -lrt
LOCAL_HDRS := \
diff --git a/tools/testing/selftests/hid/hid_bpf.c b/tools/testing/selftests/hid/hid_bpf.c
index f825623e3edc..dc0408a831d0 100644
--- a/tools/testing/selftests/hid/hid_bpf.c
+++ b/tools/testing/selftests/hid/hid_bpf.c
@@ -460,7 +460,7 @@ FIXTURE(hid_bpf) {
int hid_id;
pthread_t tid;
struct hid *skel;
- int hid_links[3]; /* max number of programs loaded in a single test */
+ struct bpf_link *hid_links[3]; /* max number of programs loaded in a single test */
};
static void detach_bpf(FIXTURE_DATA(hid_bpf) * self)
{
@@ -470,9 +470,14 @@ static void detach_bpf(FIXTURE_DATA(hid_bpf) * self)
close(self->hidraw_fd);
self->hidraw_fd = 0;
+ if (!self->skel)
+ return;
+
+ hid__detach(self->skel);
+
for (i = 0; i < ARRAY_SIZE(self->hid_links); i++) {
if (self->hid_links[i])
- close(self->hid_links[i]);
+ bpf_link__destroy(self->hid_links[i]);
}
hid__destroy(self->skel);
@@ -527,14 +532,7 @@ static void load_programs(const struct test_program programs[],
FIXTURE_DATA(hid_bpf) * self,
const FIXTURE_VARIANT(hid_bpf) * variant)
{
- int attach_fd, err = -EINVAL;
- struct attach_prog_args args = {
- .retval = -1,
- };
- DECLARE_LIBBPF_OPTS(bpf_test_run_opts, tattr,
- .ctx_in = &args,
- .ctx_size_in = sizeof(args),
- );
+ int err = -EINVAL;
ASSERT_LE(progs_count, ARRAY_SIZE(self->hid_links))
TH_LOG("too many programs are to be loaded");
@@ -545,37 +543,45 @@ static void load_programs(const struct test_program programs[],
for (int i = 0; i < progs_count; i++) {
struct bpf_program *prog;
+ struct bpf_map *map;
+ int *ops_hid_id;
prog = bpf_object__find_program_by_name(*self->skel->skeleton->obj,
programs[i].name);
ASSERT_OK_PTR(prog) TH_LOG("can not find program by name '%s'", programs[i].name);
bpf_program__set_autoload(prog, true);
+
+ map = bpf_object__find_map_by_name(*self->skel->skeleton->obj,
+ programs[i].name + 4);
+ ASSERT_OK_PTR(map) TH_LOG("can not find struct_ops by name '%s'",
+ programs[i].name + 4);
+
+ /* hid_id is the first field of struct hid_bpf_ops */
+ ops_hid_id = bpf_map__initial_value(map, NULL);
+ ASSERT_OK_PTR(ops_hid_id) TH_LOG("unable to retrieve struct_ops data");
+
+ *ops_hid_id = self->hid_id;
}
err = hid__load(self->skel);
ASSERT_OK(err) TH_LOG("hid_skel_load failed: %d", err);
- attach_fd = bpf_program__fd(self->skel->progs.attach_prog);
- ASSERT_GE(attach_fd, 0) TH_LOG("locate attach_prog: %d", attach_fd);
-
for (int i = 0; i < progs_count; i++) {
- struct bpf_program *prog;
+ struct bpf_map *map;
- prog = bpf_object__find_program_by_name(*self->skel->skeleton->obj,
- programs[i].name);
- ASSERT_OK_PTR(prog) TH_LOG("can not find program by name '%s'", programs[i].name);
-
- args.prog_fd = bpf_program__fd(prog);
- args.hid = self->hid_id;
- args.insert_head = programs[i].insert_head;
- err = bpf_prog_test_run_opts(attach_fd, &tattr);
- ASSERT_GE(args.retval, 0)
- TH_LOG("attach_hid(%s): %d", programs[i].name, args.retval);
+ map = bpf_object__find_map_by_name(*self->skel->skeleton->obj,
+ programs[i].name + 4);
+ ASSERT_OK_PTR(map) TH_LOG("can not find struct_ops by name '%s'",
+ programs[i].name + 4);
- self->hid_links[i] = args.retval;
+ self->hid_links[i] = bpf_map__attach_struct_ops(map);
+ ASSERT_OK_PTR(self->hid_links[i]) TH_LOG("failed to attach struct ops '%s'",
+ programs[i].name + 4);
}
+ hid__attach(self->skel);
+
self->hidraw_fd = open_hidraw(self->dev_id);
ASSERT_GE(self->hidraw_fd, 0) TH_LOG("open_hidraw");
}
@@ -640,6 +646,47 @@ TEST_F(hid_bpf, raw_event)
}
/*
+ * Attach hid_first_event to the given uhid device,
+ * retrieve and open the matching hidraw node,
+ * inject one event in the uhid device,
+ * check that the program sees it and can change the data
+ */
+TEST_F(hid_bpf, subprog_raw_event)
+{
+ const struct test_program progs[] = {
+ { .name = "hid_subprog_first_event" },
+ };
+ __u8 buf[10] = {0};
+ int err;
+
+ LOAD_PROGRAMS(progs);
+
+ /* inject one event */
+ buf[0] = 1;
+ buf[1] = 42;
+ uhid_send_event(_metadata, self->uhid_fd, buf, 6);
+
+ /* read the data from hidraw */
+ memset(buf, 0, sizeof(buf));
+ err = read(self->hidraw_fd, buf, sizeof(buf));
+ ASSERT_EQ(err, 6) TH_LOG("read_hidraw");
+ ASSERT_EQ(buf[0], 1);
+ ASSERT_EQ(buf[2], 47);
+
+ /* inject another event */
+ memset(buf, 0, sizeof(buf));
+ buf[0] = 1;
+ buf[1] = 47;
+ uhid_send_event(_metadata, self->uhid_fd, buf, 6);
+
+ /* read the data from hidraw */
+ memset(buf, 0, sizeof(buf));
+ err = read(self->hidraw_fd, buf, sizeof(buf));
+ ASSERT_EQ(err, 6) TH_LOG("read_hidraw");
+ ASSERT_EQ(buf[2], 52);
+}
+
+/*
* Ensures that we can attach/detach programs
*/
TEST_F(hid_bpf, test_attach_detach)
@@ -648,13 +695,17 @@ TEST_F(hid_bpf, test_attach_detach)
{ .name = "hid_first_event" },
{ .name = "hid_second_event" },
};
+ struct bpf_link *link;
__u8 buf[10] = {0};
- int err, link;
+ int err, link_fd;
LOAD_PROGRAMS(progs);
link = self->hid_links[0];
- ASSERT_GT(link, 0) TH_LOG("HID-BPF link not created");
+ ASSERT_OK_PTR(link) TH_LOG("HID-BPF link not created");
+
+ link_fd = bpf_link__fd(link);
+ ASSERT_GE(link_fd, 0) TH_LOG("HID-BPF link FD not valid");
/* inject one event */
buf[0] = 1;
@@ -673,7 +724,7 @@ TEST_F(hid_bpf, test_attach_detach)
/* pin the first program and immediately unpin it */
#define PIN_PATH "/sys/fs/bpf/hid_first_event"
- err = bpf_obj_pin(link, PIN_PATH);
+ err = bpf_obj_pin(link_fd, PIN_PATH);
ASSERT_OK(err) TH_LOG("error while calling bpf_obj_pin");
remove(PIN_PATH);
#undef PIN_PATH
@@ -876,6 +927,325 @@ TEST_F(hid_bpf, test_hid_user_raw_request_call)
}
/*
+ * Call hid_hw_raw_request against the given uhid device,
+ * check that the program is called and prevents the
+ * call to uhid.
+ */
+TEST_F(hid_bpf, test_hid_filter_raw_request_call)
+{
+ const struct test_program progs[] = {
+ { .name = "hid_test_filter_raw_request" },
+ };
+ __u8 buf[10] = {0};
+ int err;
+
+ LOAD_PROGRAMS(progs);
+
+ /* first check that we did not attach to device_event */
+
+ /* inject one event */
+ buf[0] = 1;
+ buf[1] = 42;
+ uhid_send_event(_metadata, self->uhid_fd, buf, 6);
+
+ /* read the data from hidraw */
+ memset(buf, 0, sizeof(buf));
+ err = read(self->hidraw_fd, buf, sizeof(buf));
+ ASSERT_EQ(err, 6) TH_LOG("read_hidraw");
+ ASSERT_EQ(buf[0], 1);
+ ASSERT_EQ(buf[1], 42);
+ ASSERT_EQ(buf[2], 0) TH_LOG("leftovers_from_previous_test");
+
+ /* now check that our program is preventing hid_hw_raw_request() */
+
+ /* emit hid_hw_raw_request from hidraw */
+ /* Get Feature */
+ memset(buf, 0, sizeof(buf));
+ buf[0] = 0x1; /* Report Number */
+ err = ioctl(self->hidraw_fd, HIDIOCGFEATURE(sizeof(buf)), buf);
+ ASSERT_LT(err, 0) TH_LOG("unexpected success while reading HIDIOCGFEATURE: %d", err);
+ ASSERT_EQ(errno, 20) TH_LOG("unexpected error code while reading HIDIOCGFEATURE: %d",
+ errno);
+
+ /* remove our bpf program and check that we can now emit commands */
+
+ /* detach the program */
+ detach_bpf(self);
+
+ self->hidraw_fd = open_hidraw(self->dev_id);
+ ASSERT_GE(self->hidraw_fd, 0) TH_LOG("open_hidraw");
+
+ err = ioctl(self->hidraw_fd, HIDIOCGFEATURE(sizeof(buf)), buf);
+ ASSERT_GE(err, 0) TH_LOG("error while reading HIDIOCGFEATURE: %d", err);
+}
+
+/*
+ * Call hid_hw_raw_request against the given uhid device,
+ * check that the program is called and can issue the call
+ * to uhid and transform the answer.
+ */
+TEST_F(hid_bpf, test_hid_change_raw_request_call)
+{
+ const struct test_program progs[] = {
+ { .name = "hid_test_hidraw_raw_request" },
+ };
+ __u8 buf[10] = {0};
+ int err;
+
+ LOAD_PROGRAMS(progs);
+
+ /* emit hid_hw_raw_request from hidraw */
+ /* Get Feature */
+ memset(buf, 0, sizeof(buf));
+ buf[0] = 0x1; /* Report Number */
+ err = ioctl(self->hidraw_fd, HIDIOCGFEATURE(sizeof(buf)), buf);
+ ASSERT_EQ(err, 3) TH_LOG("unexpected returned size while reading HIDIOCGFEATURE: %d", err);
+
+ ASSERT_EQ(buf[0], 2);
+ ASSERT_EQ(buf[1], 3);
+ ASSERT_EQ(buf[2], 4);
+}
+
+/*
+ * Call hid_hw_raw_request against the given uhid device,
+ * check that the program is not making infinite loops.
+ */
+TEST_F(hid_bpf, test_hid_infinite_loop_raw_request_call)
+{
+ const struct test_program progs[] = {
+ { .name = "hid_test_infinite_loop_raw_request" },
+ };
+ __u8 buf[10] = {0};
+ int err;
+
+ LOAD_PROGRAMS(progs);
+
+ /* emit hid_hw_raw_request from hidraw */
+ /* Get Feature */
+ memset(buf, 0, sizeof(buf));
+ buf[0] = 0x1; /* Report Number */
+ err = ioctl(self->hidraw_fd, HIDIOCGFEATURE(sizeof(buf)), buf);
+ ASSERT_EQ(err, 3) TH_LOG("unexpected returned size while reading HIDIOCGFEATURE: %d", err);
+}
+
+/*
+ * Call hid_hw_output_report against the given uhid device,
+ * check that the program is called and prevents the
+ * call to uhid.
+ */
+TEST_F(hid_bpf, test_hid_filter_output_report_call)
+{
+ const struct test_program progs[] = {
+ { .name = "hid_test_filter_output_report" },
+ };
+ __u8 buf[10] = {0};
+ int err;
+
+ LOAD_PROGRAMS(progs);
+
+ /* first check that we did not attach to device_event */
+
+ /* inject one event */
+ buf[0] = 1;
+ buf[1] = 42;
+ uhid_send_event(_metadata, self->uhid_fd, buf, 6);
+
+ /* read the data from hidraw */
+ memset(buf, 0, sizeof(buf));
+ err = read(self->hidraw_fd, buf, sizeof(buf));
+ ASSERT_EQ(err, 6) TH_LOG("read_hidraw");
+ ASSERT_EQ(buf[0], 1);
+ ASSERT_EQ(buf[1], 42);
+ ASSERT_EQ(buf[2], 0) TH_LOG("leftovers_from_previous_test");
+
+ /* now check that our program is preventing hid_hw_output_report() */
+
+ buf[0] = 1; /* report ID */
+ buf[1] = 2;
+ buf[2] = 42;
+
+ err = write(self->hidraw_fd, buf, 3);
+ ASSERT_LT(err, 0) TH_LOG("unexpected success while sending hid_hw_output_report: %d", err);
+ ASSERT_EQ(errno, 25) TH_LOG("unexpected error code while sending hid_hw_output_report: %d",
+ errno);
+
+ /* remove our bpf program and check that we can now emit commands */
+
+ /* detach the program */
+ detach_bpf(self);
+
+ self->hidraw_fd = open_hidraw(self->dev_id);
+ ASSERT_GE(self->hidraw_fd, 0) TH_LOG("open_hidraw");
+
+ err = write(self->hidraw_fd, buf, 3);
+ ASSERT_GE(err, 0) TH_LOG("error while sending hid_hw_output_report: %d", err);
+}
+
+/*
+ * Call hid_hw_output_report against the given uhid device,
+ * check that the program is called and can issue the call
+ * to uhid and transform the answer.
+ */
+TEST_F(hid_bpf, test_hid_change_output_report_call)
+{
+ const struct test_program progs[] = {
+ { .name = "hid_test_hidraw_output_report" },
+ };
+ __u8 buf[10] = {0};
+ int err;
+
+ LOAD_PROGRAMS(progs);
+
+ /* emit hid_hw_output_report from hidraw */
+ buf[0] = 1; /* report ID */
+ buf[1] = 2;
+ buf[2] = 42;
+
+ err = write(self->hidraw_fd, buf, 10);
+ ASSERT_EQ(err, 2) TH_LOG("unexpected returned size while sending hid_hw_output_report: %d",
+ err);
+}
+
+/*
+ * Call hid_hw_output_report against the given uhid device,
+ * check that the program is not making infinite loops.
+ */
+TEST_F(hid_bpf, test_hid_infinite_loop_output_report_call)
+{
+ const struct test_program progs[] = {
+ { .name = "hid_test_infinite_loop_output_report" },
+ };
+ __u8 buf[10] = {0};
+ int err;
+
+ LOAD_PROGRAMS(progs);
+
+ /* emit hid_hw_output_report from hidraw */
+ buf[0] = 1; /* report ID */
+ buf[1] = 2;
+ buf[2] = 42;
+
+ err = write(self->hidraw_fd, buf, 8);
+ ASSERT_EQ(err, 2) TH_LOG("unexpected returned size while sending hid_hw_output_report: %d",
+ err);
+}
+
+/*
+ * Attach hid_multiply_event_wq to the given uhid device,
+ * retrieve and open the matching hidraw node,
+ * inject one event in the uhid device,
+ * check that the program sees it and can add extra data
+ */
+TEST_F(hid_bpf, test_multiply_events_wq)
+{
+ const struct test_program progs[] = {
+ { .name = "hid_test_multiply_events_wq" },
+ };
+ __u8 buf[10] = {0};
+ int err;
+
+ LOAD_PROGRAMS(progs);
+
+ /* inject one event */
+ buf[0] = 1;
+ buf[1] = 42;
+ uhid_send_event(_metadata, self->uhid_fd, buf, 6);
+
+ /* read the data from hidraw */
+ memset(buf, 0, sizeof(buf));
+ err = read(self->hidraw_fd, buf, sizeof(buf));
+ ASSERT_EQ(err, 6) TH_LOG("read_hidraw");
+ ASSERT_EQ(buf[0], 1);
+ ASSERT_EQ(buf[1], 47);
+
+ usleep(100000);
+
+ /* read the data from hidraw */
+ memset(buf, 0, sizeof(buf));
+ err = read(self->hidraw_fd, buf, sizeof(buf));
+ ASSERT_EQ(err, 9) TH_LOG("read_hidraw");
+ ASSERT_EQ(buf[0], 2);
+ ASSERT_EQ(buf[1], 3);
+}
+
+/*
+ * Attach hid_multiply_event to the given uhid device,
+ * retrieve and open the matching hidraw node,
+ * inject one event in the uhid device,
+ * check that the program sees it and can add extra data
+ */
+TEST_F(hid_bpf, test_multiply_events)
+{
+ const struct test_program progs[] = {
+ { .name = "hid_test_multiply_events" },
+ };
+ __u8 buf[10] = {0};
+ int err;
+
+ LOAD_PROGRAMS(progs);
+
+ /* inject one event */
+ buf[0] = 1;
+ buf[1] = 42;
+ uhid_send_event(_metadata, self->uhid_fd, buf, 6);
+
+ /* read the data from hidraw */
+ memset(buf, 0, sizeof(buf));
+ err = read(self->hidraw_fd, buf, sizeof(buf));
+ ASSERT_EQ(err, 9) TH_LOG("read_hidraw");
+ ASSERT_EQ(buf[0], 2);
+ ASSERT_EQ(buf[1], 47);
+
+ /* read the data from hidraw */
+ memset(buf, 0, sizeof(buf));
+ err = read(self->hidraw_fd, buf, sizeof(buf));
+ ASSERT_EQ(err, 9) TH_LOG("read_hidraw");
+ ASSERT_EQ(buf[0], 2);
+ ASSERT_EQ(buf[1], 52);
+}
+
+/*
+ * Call hid_bpf_input_report against the given uhid device,
+ * check that the program is not making infinite loops.
+ */
+TEST_F(hid_bpf, test_hid_infinite_loop_input_report_call)
+{
+ const struct test_program progs[] = {
+ { .name = "hid_test_infinite_loop_input_report" },
+ };
+ __u8 buf[10] = {0};
+ int err;
+
+ LOAD_PROGRAMS(progs);
+
+ /* emit hid_hw_output_report from hidraw */
+ buf[0] = 1; /* report ID */
+ buf[1] = 2;
+ buf[2] = 42;
+
+ uhid_send_event(_metadata, self->uhid_fd, buf, 6);
+
+ /* read the data from hidraw */
+ memset(buf, 0, sizeof(buf));
+ err = read(self->hidraw_fd, buf, sizeof(buf));
+ ASSERT_EQ(err, 6) TH_LOG("read_hidraw");
+ ASSERT_EQ(buf[0], 1);
+ ASSERT_EQ(buf[1], 3);
+
+ /* read the data from hidraw: hid_bpf_try_input_report should work exactly one time */
+ memset(buf, 0, sizeof(buf));
+ err = read(self->hidraw_fd, buf, sizeof(buf));
+ ASSERT_EQ(err, 6) TH_LOG("read_hidraw");
+ ASSERT_EQ(buf[0], 1);
+ ASSERT_EQ(buf[1], 4);
+
+ /* read the data from hidraw: there should be none */
+ memset(buf, 0, sizeof(buf));
+ err = read(self->hidraw_fd, buf, sizeof(buf));
+ ASSERT_EQ(err, -1) TH_LOG("read_hidraw");
+}
+
+/*
* Attach hid_insert{0,1,2} to the given uhid device,
* retrieve and open the matching hidraw node,
* inject one event in the uhid device,
diff --git a/tools/testing/selftests/hid/progs/hid.c b/tools/testing/selftests/hid/progs/hid.c
index f67d35def142..ee9bbbcf751b 100644
--- a/tools/testing/selftests/hid/progs/hid.c
+++ b/tools/testing/selftests/hid/progs/hid.c
@@ -14,8 +14,8 @@ struct attach_prog_args {
__u64 callback_check = 52;
__u64 callback2_check = 52;
-SEC("?fmod_ret/hid_bpf_device_event")
-int BPF_PROG(hid_first_event, struct hid_bpf_ctx *hid_ctx)
+SEC("?struct_ops/hid_device_event")
+int BPF_PROG(hid_first_event, struct hid_bpf_ctx *hid_ctx, enum hid_report_type type)
{
__u8 *rw_data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 3 /* size */);
@@ -29,8 +29,38 @@ int BPF_PROG(hid_first_event, struct hid_bpf_ctx *hid_ctx)
return hid_ctx->size;
}
-SEC("?fmod_ret/hid_bpf_device_event")
-int BPF_PROG(hid_second_event, struct hid_bpf_ctx *hid_ctx)
+SEC(".struct_ops.link")
+struct hid_bpf_ops first_event = {
+ .hid_device_event = (void *)hid_first_event,
+ .hid_id = 2,
+};
+
+int __hid_subprog_first_event(struct hid_bpf_ctx *hid_ctx, enum hid_report_type type)
+{
+ __u8 *rw_data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 3 /* size */);
+
+ if (!rw_data)
+ return 0; /* EPERM check */
+
+ rw_data[2] = rw_data[1] + 5;
+
+ return hid_ctx->size;
+}
+
+SEC("?struct_ops/hid_device_event")
+int BPF_PROG(hid_subprog_first_event, struct hid_bpf_ctx *hid_ctx, enum hid_report_type type)
+{
+ return __hid_subprog_first_event(hid_ctx, type);
+}
+
+SEC(".struct_ops.link")
+struct hid_bpf_ops subprog_first_event = {
+ .hid_device_event = (void *)hid_subprog_first_event,
+ .hid_id = 2,
+};
+
+SEC("?struct_ops/hid_device_event")
+int BPF_PROG(hid_second_event, struct hid_bpf_ctx *hid_ctx, enum hid_report_type type)
{
__u8 *rw_data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 4 /* size */);
@@ -42,8 +72,13 @@ int BPF_PROG(hid_second_event, struct hid_bpf_ctx *hid_ctx)
return hid_ctx->size;
}
-SEC("?fmod_ret/hid_bpf_device_event")
-int BPF_PROG(hid_change_report_id, struct hid_bpf_ctx *hid_ctx)
+SEC(".struct_ops.link")
+struct hid_bpf_ops second_event = {
+ .hid_device_event = (void *)hid_second_event,
+};
+
+SEC("?struct_ops/hid_device_event")
+int BPF_PROG(hid_change_report_id, struct hid_bpf_ctx *hid_ctx, enum hid_report_type type)
{
__u8 *rw_data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 3 /* size */);
@@ -55,15 +90,10 @@ int BPF_PROG(hid_change_report_id, struct hid_bpf_ctx *hid_ctx)
return 9;
}
-SEC("syscall")
-int attach_prog(struct attach_prog_args *ctx)
-{
- ctx->retval = hid_bpf_attach_prog(ctx->hid,
- ctx->prog_fd,
- ctx->insert_head ? HID_BPF_FLAG_INSERT_HEAD :
- HID_BPF_FLAG_NONE);
- return 0;
-}
+SEC(".struct_ops.link")
+struct hid_bpf_ops change_report_id = {
+ .hid_device_event = (void *)hid_change_report_id,
+};
struct hid_hw_request_syscall_args {
/* data needs to come at offset 0 so we can use it in calls */
@@ -181,7 +211,12 @@ static const __u8 rdesc[] = {
0xc0, /* END_COLLECTION */
};
-SEC("?fmod_ret/hid_bpf_rdesc_fixup")
+/*
+ * the following program is marked as sleepable (struct_ops.s).
+ * This is not strictly mandatory but is a nice test for
+ * sleepable struct_ops
+ */
+SEC("?struct_ops.s/hid_rdesc_fixup")
int BPF_PROG(hid_rdesc_fixup, struct hid_bpf_ctx *hid_ctx)
{
__u8 *data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 4096 /* size */);
@@ -200,8 +235,13 @@ int BPF_PROG(hid_rdesc_fixup, struct hid_bpf_ctx *hid_ctx)
return sizeof(rdesc) + 73;
}
-SEC("?fmod_ret/hid_bpf_device_event")
-int BPF_PROG(hid_test_insert1, struct hid_bpf_ctx *hid_ctx)
+SEC(".struct_ops.link")
+struct hid_bpf_ops rdesc_fixup = {
+ .hid_rdesc_fixup = (void *)hid_rdesc_fixup,
+};
+
+SEC("?struct_ops/hid_device_event")
+int BPF_PROG(hid_test_insert1, struct hid_bpf_ctx *hid_ctx, enum hid_report_type type)
{
__u8 *data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 4 /* size */);
@@ -217,8 +257,14 @@ int BPF_PROG(hid_test_insert1, struct hid_bpf_ctx *hid_ctx)
return 0;
}
-SEC("?fmod_ret/hid_bpf_device_event")
-int BPF_PROG(hid_test_insert2, struct hid_bpf_ctx *hid_ctx)
+SEC(".struct_ops.link")
+struct hid_bpf_ops test_insert1 = {
+ .hid_device_event = (void *)hid_test_insert1,
+ .flags = BPF_F_BEFORE,
+};
+
+SEC("?struct_ops/hid_device_event")
+int BPF_PROG(hid_test_insert2, struct hid_bpf_ctx *hid_ctx, enum hid_report_type type)
{
__u8 *data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 4 /* size */);
@@ -234,8 +280,13 @@ int BPF_PROG(hid_test_insert2, struct hid_bpf_ctx *hid_ctx)
return 0;
}
-SEC("?fmod_ret/hid_bpf_device_event")
-int BPF_PROG(hid_test_insert3, struct hid_bpf_ctx *hid_ctx)
+SEC(".struct_ops.link")
+struct hid_bpf_ops test_insert2 = {
+ .hid_device_event = (void *)hid_test_insert2,
+};
+
+SEC("?struct_ops/hid_device_event")
+int BPF_PROG(hid_test_insert3, struct hid_bpf_ctx *hid_ctx, enum hid_report_type type)
{
__u8 *data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 4 /* size */);
@@ -250,3 +301,300 @@ int BPF_PROG(hid_test_insert3, struct hid_bpf_ctx *hid_ctx)
return 0;
}
+
+SEC(".struct_ops.link")
+struct hid_bpf_ops test_insert3 = {
+ .hid_device_event = (void *)hid_test_insert3,
+};
+
+SEC("?struct_ops/hid_hw_request")
+int BPF_PROG(hid_test_filter_raw_request, struct hid_bpf_ctx *hctx, unsigned char reportnum,
+ enum hid_report_type rtype, enum hid_class_request reqtype, __u64 source)
+{
+ return -20;
+}
+
+SEC(".struct_ops.link")
+struct hid_bpf_ops test_filter_raw_request = {
+ .hid_hw_request = (void *)hid_test_filter_raw_request,
+};
+
+static struct file *current_file;
+
+SEC("fentry/hidraw_open")
+int BPF_PROG(hidraw_open, struct inode *inode, struct file *file)
+{
+ current_file = file;
+ return 0;
+}
+
+SEC("?struct_ops.s/hid_hw_request")
+int BPF_PROG(hid_test_hidraw_raw_request, struct hid_bpf_ctx *hctx, unsigned char reportnum,
+ enum hid_report_type rtype, enum hid_class_request reqtype, __u64 source)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 3 /* size */);
+ int ret;
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ /* check if the incoming request comes from our hidraw operation */
+ if (source == (__u64)current_file) {
+ data[0] = reportnum;
+
+ ret = hid_bpf_hw_request(hctx, data, 2, rtype, reqtype);
+ if (ret != 2)
+ return -1;
+ data[0] = reportnum + 1;
+ data[1] = reportnum + 2;
+ data[2] = reportnum + 3;
+ return 3;
+ }
+
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct hid_bpf_ops test_hidraw_raw_request = {
+ .hid_hw_request = (void *)hid_test_hidraw_raw_request,
+};
+
+SEC("?struct_ops.s/hid_hw_request")
+int BPF_PROG(hid_test_infinite_loop_raw_request, struct hid_bpf_ctx *hctx, unsigned char reportnum,
+ enum hid_report_type rtype, enum hid_class_request reqtype, __u64 source)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 3 /* size */);
+ int ret;
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ /* always forward the request as-is to the device, hid-bpf should prevent
+ * infinite loops.
+ */
+ data[0] = reportnum;
+
+ ret = hid_bpf_hw_request(hctx, data, 2, rtype, reqtype);
+ if (ret == 2)
+ return 3;
+
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct hid_bpf_ops test_infinite_loop_raw_request = {
+ .hid_hw_request = (void *)hid_test_infinite_loop_raw_request,
+};
+
+SEC("?struct_ops/hid_hw_output_report")
+int BPF_PROG(hid_test_filter_output_report, struct hid_bpf_ctx *hctx, unsigned char reportnum,
+ enum hid_report_type rtype, enum hid_class_request reqtype, __u64 source)
+{
+ return -25;
+}
+
+SEC(".struct_ops.link")
+struct hid_bpf_ops test_filter_output_report = {
+ .hid_hw_output_report = (void *)hid_test_filter_output_report,
+};
+
+SEC("?struct_ops.s/hid_hw_output_report")
+int BPF_PROG(hid_test_hidraw_output_report, struct hid_bpf_ctx *hctx, __u64 source)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 3 /* size */);
+ int ret;
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ /* check if the incoming request comes from our hidraw operation */
+ if (source == (__u64)current_file)
+ return hid_bpf_hw_output_report(hctx, data, 2);
+
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct hid_bpf_ops test_hidraw_output_report = {
+ .hid_hw_output_report = (void *)hid_test_hidraw_output_report,
+};
+
+SEC("?struct_ops.s/hid_hw_output_report")
+int BPF_PROG(hid_test_infinite_loop_output_report, struct hid_bpf_ctx *hctx, __u64 source)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 3 /* size */);
+ int ret;
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ /* always forward the request as-is to the device, hid-bpf should prevent
+ * infinite loops.
+ */
+
+ ret = hid_bpf_hw_output_report(hctx, data, 2);
+ if (ret == 2)
+ return 2;
+
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct hid_bpf_ops test_infinite_loop_output_report = {
+ .hid_hw_output_report = (void *)hid_test_infinite_loop_output_report,
+};
+
+struct elem {
+ struct bpf_wq work;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct elem);
+} hmap SEC(".maps");
+
+static int wq_cb_sleepable(void *map, int *key, struct bpf_wq *work)
+{
+ __u8 buf[9] = {2, 3, 4, 5, 6, 7, 8, 9, 10};
+ struct hid_bpf_ctx *hid_ctx;
+
+ hid_ctx = hid_bpf_allocate_context(*key);
+ if (!hid_ctx)
+ return 0; /* EPERM check */
+
+ hid_bpf_input_report(hid_ctx, HID_INPUT_REPORT, buf, sizeof(buf));
+
+ hid_bpf_release_context(hid_ctx);
+
+ return 0;
+}
+
+static int test_inject_input_report_callback(int *key)
+{
+ struct elem init = {}, *val;
+ struct bpf_wq *wq;
+
+ if (bpf_map_update_elem(&hmap, key, &init, 0))
+ return -1;
+
+ val = bpf_map_lookup_elem(&hmap, key);
+ if (!val)
+ return -2;
+
+ wq = &val->work;
+ if (bpf_wq_init(wq, &hmap, 0) != 0)
+ return -3;
+
+ if (bpf_wq_set_callback(wq, wq_cb_sleepable, 0))
+ return -4;
+
+ if (bpf_wq_start(wq, 0))
+ return -5;
+
+ return 0;
+}
+
+SEC("?struct_ops/hid_device_event")
+int BPF_PROG(hid_test_multiply_events_wq, struct hid_bpf_ctx *hid_ctx, enum hid_report_type type)
+{
+ __u8 *data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 9 /* size */);
+ int hid = hid_ctx->hid->id;
+ int ret;
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ if (data[0] != 1)
+ return 0;
+
+ ret = test_inject_input_report_callback(&hid);
+ if (ret)
+ return ret;
+
+ data[1] += 5;
+
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct hid_bpf_ops test_multiply_events_wq = {
+ .hid_device_event = (void *)hid_test_multiply_events_wq,
+};
+
+SEC("?struct_ops/hid_device_event")
+int BPF_PROG(hid_test_multiply_events, struct hid_bpf_ctx *hid_ctx, enum hid_report_type type)
+{
+ __u8 *data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 9 /* size */);
+ __u8 buf[9];
+ int ret;
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ if (data[0] != 1)
+ return 0;
+
+ /*
+ * we have to use an intermediate buffer as hid_bpf_input_report
+ * will memset data to \0
+ */
+ __builtin_memcpy(buf, data, sizeof(buf));
+
+ buf[0] = 2;
+ buf[1] += 5;
+ ret = hid_bpf_try_input_report(hid_ctx, HID_INPUT_REPORT, buf, sizeof(buf));
+ if (ret < 0)
+ return ret;
+
+ /*
+ * In real world we should reset the original buffer as data might be garbage now,
+ * but it actually now has the content of 'buf'
+ */
+ data[1] += 5;
+
+ return 9;
+}
+
+SEC(".struct_ops.link")
+struct hid_bpf_ops test_multiply_events = {
+ .hid_device_event = (void *)hid_test_multiply_events,
+};
+
+SEC("?struct_ops/hid_device_event")
+int BPF_PROG(hid_test_infinite_loop_input_report, struct hid_bpf_ctx *hctx,
+ enum hid_report_type report_type, __u64 source)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 6 /* size */);
+ __u8 buf[6];
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ /*
+ * we have to use an intermediate buffer as hid_bpf_input_report
+ * will memset data to \0
+ */
+ __builtin_memcpy(buf, data, sizeof(buf));
+
+ /* always forward the request as-is to the device, hid-bpf should prevent
+ * infinite loops.
+ * the return value is ignored so the event is passing to userspace.
+ */
+
+ hid_bpf_try_input_report(hctx, report_type, buf, sizeof(buf));
+
+ /* each time we process the event, we increment by one data[1]:
+ * after each successful call to hid_bpf_try_input_report, buf
+ * has been memcopied into data by the kernel.
+ */
+ data[1] += 1;
+
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct hid_bpf_ops test_infinite_loop_input_report = {
+ .hid_device_event = (void *)hid_test_infinite_loop_input_report,
+};
diff --git a/tools/testing/selftests/hid/progs/hid_bpf_helpers.h b/tools/testing/selftests/hid/progs/hid_bpf_helpers.h
index 9cd56821d0f1..cfe37f491906 100644
--- a/tools/testing/selftests/hid/progs/hid_bpf_helpers.h
+++ b/tools/testing/selftests/hid/progs/hid_bpf_helpers.h
@@ -7,6 +7,7 @@
/* "undefine" structs and enums in vmlinux.h, because we "override" them below */
#define hid_bpf_ctx hid_bpf_ctx___not_used
+#define hid_bpf_ops hid_bpf_ops___not_used
#define hid_report_type hid_report_type___not_used
#define hid_class_request hid_class_request___not_used
#define hid_bpf_attach_flags hid_bpf_attach_flags___not_used
@@ -20,13 +21,11 @@
#define HID_REQ_SET_REPORT HID_REQ_SET_REPORT___not_used
#define HID_REQ_SET_IDLE HID_REQ_SET_IDLE___not_used
#define HID_REQ_SET_PROTOCOL HID_REQ_SET_PROTOCOL___not_used
-#define HID_BPF_FLAG_NONE HID_BPF_FLAG_NONE___not_used
-#define HID_BPF_FLAG_INSERT_HEAD HID_BPF_FLAG_INSERT_HEAD___not_used
-#define HID_BPF_FLAG_MAX HID_BPF_FLAG_MAX___not_used
#include "vmlinux.h"
#undef hid_bpf_ctx
+#undef hid_bpf_ops
#undef hid_report_type
#undef hid_class_request
#undef hid_bpf_attach_flags
@@ -40,9 +39,6 @@
#undef HID_REQ_SET_REPORT
#undef HID_REQ_SET_IDLE
#undef HID_REQ_SET_PROTOCOL
-#undef HID_BPF_FLAG_NONE
-#undef HID_BPF_FLAG_INSERT_HEAD
-#undef HID_BPF_FLAG_MAX
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
@@ -57,10 +53,8 @@ enum hid_report_type {
};
struct hid_bpf_ctx {
- __u32 index;
- const struct hid_device *hid;
+ struct hid_device *hid;
__u32 allocated_size;
- enum hid_report_type report_type;
union {
__s32 retval;
__s32 size;
@@ -76,17 +70,28 @@ enum hid_class_request {
HID_REQ_SET_PROTOCOL = 0x0B,
};
-enum hid_bpf_attach_flags {
- HID_BPF_FLAG_NONE = 0,
- HID_BPF_FLAG_INSERT_HEAD = _BITUL(0),
- HID_BPF_FLAG_MAX,
+struct hid_bpf_ops {
+ int hid_id;
+ u32 flags;
+ struct list_head list;
+ int (*hid_device_event)(struct hid_bpf_ctx *ctx, enum hid_report_type report_type,
+ u64 source);
+ int (*hid_rdesc_fixup)(struct hid_bpf_ctx *ctx);
+ int (*hid_hw_request)(struct hid_bpf_ctx *ctx, unsigned char reportnum,
+ enum hid_report_type rtype, enum hid_class_request reqtype,
+ u64 source);
+ int (*hid_hw_output_report)(struct hid_bpf_ctx *ctx, u64 source);
+ struct hid_device *hdev;
};
+#ifndef BPF_F_BEFORE
+#define BPF_F_BEFORE (1U << 3)
+#endif
+
/* following are kfuncs exported by HID for HID-BPF */
extern __u8 *hid_bpf_get_data(struct hid_bpf_ctx *ctx,
unsigned int offset,
const size_t __sz) __ksym;
-extern int hid_bpf_attach_prog(unsigned int hid_id, int prog_fd, u32 flags) __ksym;
extern struct hid_bpf_ctx *hid_bpf_allocate_context(unsigned int hid_id) __ksym;
extern void hid_bpf_release_context(struct hid_bpf_ctx *ctx) __ksym;
extern int hid_bpf_hw_request(struct hid_bpf_ctx *ctx,
@@ -100,5 +105,18 @@ extern int hid_bpf_input_report(struct hid_bpf_ctx *ctx,
enum hid_report_type type,
__u8 *data,
size_t buf__sz) __ksym;
+extern int hid_bpf_try_input_report(struct hid_bpf_ctx *ctx,
+ enum hid_report_type type,
+ __u8 *data,
+ size_t buf__sz) __ksym;
+
+/* bpf_wq implementation */
+extern int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags) __weak __ksym;
+extern int bpf_wq_start(struct bpf_wq *wq, unsigned int flags) __weak __ksym;
+extern int bpf_wq_set_callback_impl(struct bpf_wq *wq,
+ int (callback_fn)(void *map, int *key, struct bpf_wq *wq),
+ unsigned int flags__k, void *aux__ign) __ksym;
+#define bpf_wq_set_callback(timer, cb, flags) \
+ bpf_wq_set_callback_impl(timer, cb, flags, NULL)
#endif /* __HID_BPF_HELPERS_H */
diff --git a/tools/testing/selftests/intel_pstate/Makefile b/tools/testing/selftests/intel_pstate/Makefile
index 05d66ef50c97..f45372cb00fe 100644
--- a/tools/testing/selftests/intel_pstate/Makefile
+++ b/tools/testing/selftests/intel_pstate/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-CFLAGS := $(CFLAGS) -Wall -D_GNU_SOURCE
+CFLAGS := $(CFLAGS) -Wall
LDLIBS += -lm
ARCH ?= $(shell uname -m 2>/dev/null || echo not)
diff --git a/tools/testing/selftests/iommu/Makefile b/tools/testing/selftests/iommu/Makefile
index 32c5fdfd0eef..fd6477911f24 100644
--- a/tools/testing/selftests/iommu/Makefile
+++ b/tools/testing/selftests/iommu/Makefile
@@ -2,8 +2,6 @@
CFLAGS += -Wall -O2 -Wno-unused-function
CFLAGS += $(KHDR_INCLUDES)
-CFLAGS += -D_GNU_SOURCE
-
TEST_GEN_PROGS :=
TEST_GEN_PROGS += iommufd
TEST_GEN_PROGS += iommufd_fail_nth
diff --git a/tools/testing/selftests/iommu/iommufd.c b/tools/testing/selftests/iommu/iommufd.c
index edf1c99c9936..6343f4053bd4 100644
--- a/tools/testing/selftests/iommu/iommufd.c
+++ b/tools/testing/selftests/iommu/iommufd.c
@@ -279,6 +279,9 @@ TEST_F(iommufd_ioas, alloc_hwpt_nested)
uint32_t parent_hwpt_id = 0;
uint32_t parent_hwpt_id_not_work = 0;
uint32_t test_hwpt_id = 0;
+ uint32_t iopf_hwpt_id;
+ uint32_t fault_id;
+ uint32_t fault_fd;
if (self->device_id) {
/* Negative tests */
@@ -326,6 +329,7 @@ TEST_F(iommufd_ioas, alloc_hwpt_nested)
sizeof(data));
/* Allocate two nested hwpts sharing one common parent hwpt */
+ test_ioctl_fault_alloc(&fault_id, &fault_fd);
test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id, 0,
&nested_hwpt_id[0],
IOMMU_HWPT_DATA_SELFTEST, &data,
@@ -334,6 +338,14 @@ TEST_F(iommufd_ioas, alloc_hwpt_nested)
&nested_hwpt_id[1],
IOMMU_HWPT_DATA_SELFTEST, &data,
sizeof(data));
+ test_err_hwpt_alloc_iopf(ENOENT, self->device_id, parent_hwpt_id,
+ UINT32_MAX, IOMMU_HWPT_FAULT_ID_VALID,
+ &iopf_hwpt_id, IOMMU_HWPT_DATA_SELFTEST,
+ &data, sizeof(data));
+ test_cmd_hwpt_alloc_iopf(self->device_id, parent_hwpt_id, fault_id,
+ IOMMU_HWPT_FAULT_ID_VALID, &iopf_hwpt_id,
+ IOMMU_HWPT_DATA_SELFTEST, &data,
+ sizeof(data));
test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[0],
IOMMU_TEST_IOTLB_DEFAULT);
test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[1],
@@ -504,14 +516,24 @@ TEST_F(iommufd_ioas, alloc_hwpt_nested)
_test_ioctl_destroy(self->fd, nested_hwpt_id[1]));
test_ioctl_destroy(nested_hwpt_id[0]);
+ /* Switch from nested_hwpt_id[1] to iopf_hwpt_id */
+ test_cmd_mock_domain_replace(self->stdev_id, iopf_hwpt_id);
+ EXPECT_ERRNO(EBUSY,
+ _test_ioctl_destroy(self->fd, iopf_hwpt_id));
+ /* Trigger an IOPF on the device */
+ test_cmd_trigger_iopf(self->device_id, fault_fd);
+
/* Detach from nested_hwpt_id[1] and destroy it */
test_cmd_mock_domain_replace(self->stdev_id, parent_hwpt_id);
test_ioctl_destroy(nested_hwpt_id[1]);
+ test_ioctl_destroy(iopf_hwpt_id);
/* Detach from the parent hw_pagetable and destroy it */
test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
test_ioctl_destroy(parent_hwpt_id);
test_ioctl_destroy(parent_hwpt_id_not_work);
+ close(fault_fd);
+ test_ioctl_destroy(fault_id);
} else {
test_err_hwpt_alloc(ENOENT, self->device_id, self->ioas_id, 0,
&parent_hwpt_id);
@@ -1722,10 +1744,17 @@ FIXTURE_VARIANT(iommufd_dirty_tracking)
FIXTURE_SETUP(iommufd_dirty_tracking)
{
+ unsigned long size;
int mmap_flags;
void *vrc;
int rc;
+ if (variant->buffer_size < MOCK_PAGE_SIZE) {
+ SKIP(return,
+ "Skipping buffer_size=%lu, less than MOCK_PAGE_SIZE=%lu",
+ variant->buffer_size, MOCK_PAGE_SIZE);
+ }
+
self->fd = open("/dev/iommu", O_RDWR);
ASSERT_NE(-1, self->fd);
@@ -1749,12 +1778,11 @@ FIXTURE_SETUP(iommufd_dirty_tracking)
assert(vrc == self->buffer);
self->page_size = MOCK_PAGE_SIZE;
- self->bitmap_size =
- variant->buffer_size / self->page_size / BITS_PER_BYTE;
+ self->bitmap_size = variant->buffer_size / self->page_size;
/* Provision with an extra (PAGE_SIZE) for the unaligned case */
- rc = posix_memalign(&self->bitmap, PAGE_SIZE,
- self->bitmap_size + PAGE_SIZE);
+ size = DIV_ROUND_UP(self->bitmap_size, BITS_PER_BYTE);
+ rc = posix_memalign(&self->bitmap, PAGE_SIZE, size + PAGE_SIZE);
assert(!rc);
assert(self->bitmap);
assert((uintptr_t)self->bitmap % PAGE_SIZE == 0);
@@ -1775,51 +1803,63 @@ FIXTURE_SETUP(iommufd_dirty_tracking)
FIXTURE_TEARDOWN(iommufd_dirty_tracking)
{
munmap(self->buffer, variant->buffer_size);
- munmap(self->bitmap, self->bitmap_size);
+ munmap(self->bitmap, DIV_ROUND_UP(self->bitmap_size, BITS_PER_BYTE));
teardown_iommufd(self->fd, _metadata);
}
-FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128k)
+FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty8k)
+{
+ /* half of an u8 index bitmap */
+ .buffer_size = 8UL * 1024UL,
+};
+
+FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty16k)
+{
+ /* one u8 index bitmap */
+ .buffer_size = 16UL * 1024UL,
+};
+
+FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64k)
{
/* one u32 index bitmap */
- .buffer_size = 128UL * 1024UL,
+ .buffer_size = 64UL * 1024UL,
};
-FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty256k)
+FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128k)
{
/* one u64 index bitmap */
- .buffer_size = 256UL * 1024UL,
+ .buffer_size = 128UL * 1024UL,
};
-FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty640k)
+FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty320k)
{
/* two u64 index and trailing end bitmap */
- .buffer_size = 640UL * 1024UL,
+ .buffer_size = 320UL * 1024UL,
};
-FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M)
+FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64M)
{
- /* 4K bitmap (128M IOVA range) */
- .buffer_size = 128UL * 1024UL * 1024UL,
+ /* 4K bitmap (64M IOVA range) */
+ .buffer_size = 64UL * 1024UL * 1024UL,
};
-FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M_huge)
+FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64M_huge)
{
- /* 4K bitmap (128M IOVA range) */
- .buffer_size = 128UL * 1024UL * 1024UL,
+ /* 4K bitmap (64M IOVA range) */
+ .buffer_size = 64UL * 1024UL * 1024UL,
.hugepages = true,
};
-FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty256M)
+FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M)
{
- /* 8K bitmap (256M IOVA range) */
- .buffer_size = 256UL * 1024UL * 1024UL,
+ /* 8K bitmap (128M IOVA range) */
+ .buffer_size = 128UL * 1024UL * 1024UL,
};
-FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty256M_huge)
+FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M_huge)
{
- /* 8K bitmap (256M IOVA range) */
- .buffer_size = 256UL * 1024UL * 1024UL,
+ /* 8K bitmap (128M IOVA range) */
+ .buffer_size = 128UL * 1024UL * 1024UL,
.hugepages = true,
};
diff --git a/tools/testing/selftests/iommu/iommufd_fail_nth.c b/tools/testing/selftests/iommu/iommufd_fail_nth.c
index f590417cd67a..c5d5e69452b0 100644
--- a/tools/testing/selftests/iommu/iommufd_fail_nth.c
+++ b/tools/testing/selftests/iommu/iommufd_fail_nth.c
@@ -615,7 +615,7 @@ TEST_FAIL_NTH(basic_fail_nth, device)
if (_test_cmd_get_hw_info(self->fd, idev_id, &info, sizeof(info), NULL))
return -1;
- if (_test_cmd_hwpt_alloc(self->fd, idev_id, ioas_id, 0, &hwpt_id,
+ if (_test_cmd_hwpt_alloc(self->fd, idev_id, ioas_id, 0, 0, &hwpt_id,
IOMMU_HWPT_DATA_NONE, 0, 0))
return -1;
diff --git a/tools/testing/selftests/iommu/iommufd_utils.h b/tools/testing/selftests/iommu/iommufd_utils.h
index 8d2b46b2114d..40f6f14ce136 100644
--- a/tools/testing/selftests/iommu/iommufd_utils.h
+++ b/tools/testing/selftests/iommu/iommufd_utils.h
@@ -22,6 +22,8 @@
#define BIT_MASK(nr) (1UL << ((nr) % __BITS_PER_LONG))
#define BIT_WORD(nr) ((nr) / __BITS_PER_LONG)
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+
static inline void set_bit(unsigned int nr, unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
@@ -153,7 +155,7 @@ static int _test_cmd_mock_domain_replace(int fd, __u32 stdev_id, __u32 pt_id,
EXPECT_ERRNO(_errno, _test_cmd_mock_domain_replace(self->fd, stdev_id, \
pt_id, NULL))
-static int _test_cmd_hwpt_alloc(int fd, __u32 device_id, __u32 pt_id,
+static int _test_cmd_hwpt_alloc(int fd, __u32 device_id, __u32 pt_id, __u32 ft_id,
__u32 flags, __u32 *hwpt_id, __u32 data_type,
void *data, size_t data_len)
{
@@ -165,6 +167,7 @@ static int _test_cmd_hwpt_alloc(int fd, __u32 device_id, __u32 pt_id,
.data_type = data_type,
.data_len = data_len,
.data_uptr = (uint64_t)data,
+ .fault_id = ft_id,
};
int ret;
@@ -177,24 +180,36 @@ static int _test_cmd_hwpt_alloc(int fd, __u32 device_id, __u32 pt_id,
}
#define test_cmd_hwpt_alloc(device_id, pt_id, flags, hwpt_id) \
- ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, flags, \
+ ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, 0, flags, \
hwpt_id, IOMMU_HWPT_DATA_NONE, NULL, \
0))
#define test_err_hwpt_alloc(_errno, device_id, pt_id, flags, hwpt_id) \
EXPECT_ERRNO(_errno, _test_cmd_hwpt_alloc( \
- self->fd, device_id, pt_id, flags, \
+ self->fd, device_id, pt_id, 0, flags, \
hwpt_id, IOMMU_HWPT_DATA_NONE, NULL, 0))
#define test_cmd_hwpt_alloc_nested(device_id, pt_id, flags, hwpt_id, \
data_type, data, data_len) \
- ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, flags, \
+ ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, 0, flags, \
hwpt_id, data_type, data, data_len))
#define test_err_hwpt_alloc_nested(_errno, device_id, pt_id, flags, hwpt_id, \
data_type, data, data_len) \
EXPECT_ERRNO(_errno, \
- _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, flags, \
+ _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, 0, flags, \
hwpt_id, data_type, data, data_len))
+#define test_cmd_hwpt_alloc_iopf(device_id, pt_id, fault_id, flags, hwpt_id, \
+ data_type, data, data_len) \
+ ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, fault_id, \
+ flags, hwpt_id, data_type, data, \
+ data_len))
+#define test_err_hwpt_alloc_iopf(_errno, device_id, pt_id, fault_id, flags, \
+ hwpt_id, data_type, data, data_len) \
+ EXPECT_ERRNO(_errno, \
+ _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, fault_id, \
+ flags, hwpt_id, data_type, data, \
+ data_len))
+
#define test_cmd_hwpt_check_iotlb(hwpt_id, iotlb_id, expected) \
({ \
struct iommu_test_cmd test_cmd = { \
@@ -346,12 +361,12 @@ static int _test_cmd_mock_domain_set_dirty(int fd, __u32 hwpt_id, size_t length,
static int _test_mock_dirty_bitmaps(int fd, __u32 hwpt_id, size_t length,
__u64 iova, size_t page_size,
size_t pte_page_size, __u64 *bitmap,
- __u64 bitmap_size, __u32 flags,
+ __u64 nbits, __u32 flags,
struct __test_metadata *_metadata)
{
unsigned long npte = pte_page_size / page_size, pteset = 2 * npte;
- unsigned long nbits = bitmap_size * BITS_PER_BYTE;
unsigned long j, i, nr = nbits / pteset ?: 1;
+ unsigned long bitmap_size = DIV_ROUND_UP(nbits, BITS_PER_BYTE);
__u64 out_dirty = 0;
/* Mark all even bits as dirty in the mock domain */
@@ -684,3 +699,66 @@ static int _test_cmd_get_hw_info(int fd, __u32 device_id, void *data,
#define test_cmd_get_hw_capabilities(device_id, caps, mask) \
ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, NULL, 0, &caps))
+
+static int _test_ioctl_fault_alloc(int fd, __u32 *fault_id, __u32 *fault_fd)
+{
+ struct iommu_fault_alloc cmd = {
+ .size = sizeof(cmd),
+ };
+ int ret;
+
+ ret = ioctl(fd, IOMMU_FAULT_QUEUE_ALLOC, &cmd);
+ if (ret)
+ return ret;
+ *fault_id = cmd.out_fault_id;
+ *fault_fd = cmd.out_fault_fd;
+ return 0;
+}
+
+#define test_ioctl_fault_alloc(fault_id, fault_fd) \
+ ({ \
+ ASSERT_EQ(0, _test_ioctl_fault_alloc(self->fd, fault_id, \
+ fault_fd)); \
+ ASSERT_NE(0, *(fault_id)); \
+ ASSERT_NE(0, *(fault_fd)); \
+ })
+
+static int _test_cmd_trigger_iopf(int fd, __u32 device_id, __u32 fault_fd)
+{
+ struct iommu_test_cmd trigger_iopf_cmd = {
+ .size = sizeof(trigger_iopf_cmd),
+ .op = IOMMU_TEST_OP_TRIGGER_IOPF,
+ .trigger_iopf = {
+ .dev_id = device_id,
+ .pasid = 0x1,
+ .grpid = 0x2,
+ .perm = IOMMU_PGFAULT_PERM_READ | IOMMU_PGFAULT_PERM_WRITE,
+ .addr = 0xdeadbeaf,
+ },
+ };
+ struct iommu_hwpt_page_response response = {
+ .code = IOMMUFD_PAGE_RESP_SUCCESS,
+ };
+ struct iommu_hwpt_pgfault fault = {};
+ ssize_t bytes;
+ int ret;
+
+ ret = ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_TRIGGER_IOPF), &trigger_iopf_cmd);
+ if (ret)
+ return ret;
+
+ bytes = read(fault_fd, &fault, sizeof(fault));
+ if (bytes <= 0)
+ return -EIO;
+
+ response.cookie = fault.cookie;
+
+ bytes = write(fault_fd, &response, sizeof(response));
+ if (bytes <= 0)
+ return -EIO;
+
+ return 0;
+}
+
+#define test_cmd_trigger_iopf(device_id, fault_fd) \
+ ASSERT_EQ(0, _test_cmd_trigger_iopf(self->fd, device_id, fault_fd))
diff --git a/tools/testing/selftests/kselftest.h b/tools/testing/selftests/kselftest.h
index 76c2a6945d3e..b8967b6e29d5 100644
--- a/tools/testing/selftests/kselftest.h
+++ b/tools/testing/selftests/kselftest.h
@@ -168,15 +168,7 @@ static inline __printf(1, 2) void ksft_print_msg(const char *msg, ...)
static inline void ksft_perror(const char *msg)
{
-#ifndef NOLIBC
ksft_print_msg("%s: %s (%d)\n", msg, strerror(errno), errno);
-#else
- /*
- * nolibc doesn't provide strerror() and it seems
- * inappropriate to add one, just print the errno.
- */
- ksft_print_msg("%s: %d)\n", msg, errno);
-#endif
}
static inline __printf(1, 2) void ksft_test_result_pass(const char *msg, ...)
diff --git a/tools/testing/selftests/devices/ksft.py b/tools/testing/selftests/kselftest/ksft.py
index cd89fb2bc10e..cd89fb2bc10e 100644
--- a/tools/testing/selftests/devices/ksft.py
+++ b/tools/testing/selftests/kselftest/ksft.py
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index ac280dcba996..48d32c5aa3eb 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -112,6 +112,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/vmx_invalid_nested_guest_state
TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_nested_tsc_scaling_test
+TEST_GEN_PROGS_x86_64 += x86_64/apic_bus_clock_test
TEST_GEN_PROGS_x86_64 += x86_64/xapic_ipi_test
TEST_GEN_PROGS_x86_64 += x86_64/xapic_state_test
TEST_GEN_PROGS_x86_64 += x86_64/xcr0_cpuid_test
@@ -145,6 +146,7 @@ TEST_GEN_PROGS_x86_64 += set_memory_region_test
TEST_GEN_PROGS_x86_64 += steal_time
TEST_GEN_PROGS_x86_64 += kvm_binary_stats_test
TEST_GEN_PROGS_x86_64 += system_counter_offset_test
+TEST_GEN_PROGS_x86_64 += pre_fault_memory_test
# Compiled outputs used by test targets
TEST_GEN_PROGS_EXTENDED_x86_64 += x86_64/nx_huge_pages_test
@@ -231,7 +233,7 @@ LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/tools/arch/$(ARCH)/include
endif
CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \
-Wno-gnu-variable-sized-type-not-at-end -MD -MP -DCONFIG_64BIT \
- -D_GNU_SOURCE -fno-builtin-memcmp -fno-builtin-memcpy \
+ -fno-builtin-memcmp -fno-builtin-memcpy \
-fno-builtin-memset -fno-builtin-strnlen \
-fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) \
-I$(LINUX_TOOL_ARCH_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude \
diff --git a/tools/testing/selftests/kvm/aarch64/set_id_regs.c b/tools/testing/selftests/kvm/aarch64/set_id_regs.c
index a7de39fa2a0a..d20981663831 100644
--- a/tools/testing/selftests/kvm/aarch64/set_id_regs.c
+++ b/tools/testing/selftests/kvm/aarch64/set_id_regs.c
@@ -219,6 +219,7 @@ static void guest_code(void)
GUEST_REG_SYNC(SYS_ID_AA64MMFR1_EL1);
GUEST_REG_SYNC(SYS_ID_AA64MMFR2_EL1);
GUEST_REG_SYNC(SYS_ID_AA64ZFR0_EL1);
+ GUEST_REG_SYNC(SYS_CTR_EL0);
GUEST_DONE();
}
@@ -490,11 +491,25 @@ static void test_clidr(struct kvm_vcpu *vcpu)
test_reg_vals[encoding_to_range_idx(SYS_CLIDR_EL1)] = clidr;
}
+static void test_ctr(struct kvm_vcpu *vcpu)
+{
+ u64 ctr;
+
+ vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CTR_EL0), &ctr);
+ ctr &= ~CTR_EL0_DIC_MASK;
+ if (ctr & CTR_EL0_IminLine_MASK)
+ ctr--;
+
+ vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CTR_EL0), ctr);
+ test_reg_vals[encoding_to_range_idx(SYS_CTR_EL0)] = ctr;
+}
+
static void test_vcpu_ftr_id_regs(struct kvm_vcpu *vcpu)
{
u64 val;
test_clidr(vcpu);
+ test_ctr(vcpu);
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), &val);
val++;
@@ -524,7 +539,9 @@ static void test_reset_preserves_id_regs(struct kvm_vcpu *vcpu)
for (int i = 0; i < ARRAY_SIZE(test_regs); i++)
test_assert_id_reg_unchanged(vcpu, test_regs[i].reg);
+ test_assert_id_reg_unchanged(vcpu, SYS_MPIDR_EL1);
test_assert_id_reg_unchanged(vcpu, SYS_CLIDR_EL1);
+ test_assert_id_reg_unchanged(vcpu, SYS_CTR_EL0);
ksft_test_result_pass("%s\n", __func__);
}
diff --git a/tools/testing/selftests/kvm/include/x86_64/apic.h b/tools/testing/selftests/kvm/include/x86_64/apic.h
index bed316fdecd5..0f268b55fa06 100644
--- a/tools/testing/selftests/kvm/include/x86_64/apic.h
+++ b/tools/testing/selftests/kvm/include/x86_64/apic.h
@@ -60,6 +60,14 @@
#define APIC_VECTOR_MASK 0x000FF
#define APIC_ICR2 0x310
#define SET_APIC_DEST_FIELD(x) ((x) << 24)
+#define APIC_LVTT 0x320
+#define APIC_LVT_TIMER_ONESHOT (0 << 17)
+#define APIC_LVT_TIMER_PERIODIC (1 << 17)
+#define APIC_LVT_TIMER_TSCDEADLINE (2 << 17)
+#define APIC_LVT_MASKED (1 << 16)
+#define APIC_TMICT 0x380
+#define APIC_TMCCT 0x390
+#define APIC_TDCR 0x3E0
void apic_disable(void);
void xapic_enable(void);
diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index c0c7c1fe93f9..a0c1440017bb 100644
--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -23,6 +23,7 @@
extern bool host_cpu_is_intel;
extern bool host_cpu_is_amd;
+extern uint64_t guest_tsc_khz;
/* Forced emulation prefix, used to invoke the emulator unconditionally. */
#define KVM_FEP "ud2; .byte 'k', 'v', 'm';"
@@ -816,6 +817,23 @@ static inline void cpu_relax(void)
asm volatile("rep; nop" ::: "memory");
}
+static inline void udelay(unsigned long usec)
+{
+ uint64_t start, now, cycles;
+
+ GUEST_ASSERT(guest_tsc_khz);
+ cycles = guest_tsc_khz / 1000 * usec;
+
+ /*
+ * Deliberately don't PAUSE, a.k.a. cpu_relax(), so that the delay is
+ * as accurate as possible, e.g. doesn't trigger PAUSE-Loop VM-Exits.
+ */
+ start = rdtsc();
+ do {
+ now = rdtsc();
+ } while (now - start < cycles);
+}
+
#define ud2() \
__asm__ __volatile__( \
"ud2\n" \
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index ad00e4761886..56b170b725b3 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -21,6 +21,7 @@
uint32_t guest_random_seed;
struct guest_random_state guest_rng;
+static uint32_t last_guest_seed;
static int vcpu_mmap_sz(void);
@@ -434,7 +435,10 @@ struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
slot0 = memslot2region(vm, 0);
ucall_init(vm, slot0->region.guest_phys_addr + slot0->region.memory_size);
- pr_info("Random seed: 0x%x\n", guest_random_seed);
+ if (guest_random_seed != last_guest_seed) {
+ pr_info("Random seed: 0x%x\n", guest_random_seed);
+ last_guest_seed = guest_random_seed;
+ }
guest_rng = new_guest_random_state(guest_random_seed);
sync_global_to_guest(vm, guest_rng);
@@ -2319,7 +2323,8 @@ void __attribute((constructor)) kvm_selftest_init(void)
/* Tell stdout not to buffer its content. */
setbuf(stdout, NULL);
- guest_random_seed = random();
+ guest_random_seed = last_guest_seed = random();
+ pr_info("Random seed: 0x%x\n", guest_random_seed);
kvm_selftest_arch_init();
}
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index 594b061aef52..153739f2e201 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -25,6 +25,7 @@ vm_vaddr_t exception_handlers;
bool host_cpu_is_amd;
bool host_cpu_is_intel;
bool is_forced_emulation_enabled;
+uint64_t guest_tsc_khz;
static void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent)
{
@@ -616,6 +617,11 @@ void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
void kvm_arch_vm_post_create(struct kvm_vm *vm)
{
+ int r;
+
+ TEST_ASSERT(kvm_has_cap(KVM_CAP_GET_TSC_KHZ),
+ "Require KVM_GET_TSC_KHZ to provide udelay() to guest.");
+
vm_create_irqchip(vm);
vm_init_descriptor_tables(vm);
@@ -628,6 +634,11 @@ void kvm_arch_vm_post_create(struct kvm_vm *vm)
vm_sev_ioctl(vm, KVM_SEV_INIT2, &init);
}
+
+ r = __vm_ioctl(vm, KVM_GET_TSC_KHZ, NULL);
+ TEST_ASSERT(r > 0, "KVM_GET_TSC_KHZ did not provide a valid TSC frequency.");
+ guest_tsc_khz = r;
+ sync_global_to_guest(vm, guest_tsc_khz);
}
void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
diff --git a/tools/testing/selftests/kvm/memslot_modification_stress_test.c b/tools/testing/selftests/kvm/memslot_modification_stress_test.c
index 05fcf902e067..49f162573126 100644
--- a/tools/testing/selftests/kvm/memslot_modification_stress_test.c
+++ b/tools/testing/selftests/kvm/memslot_modification_stress_test.c
@@ -53,12 +53,6 @@ static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
}
}
-struct memslot_antagonist_args {
- struct kvm_vm *vm;
- useconds_t delay;
- uint64_t nr_modifications;
-};
-
static void add_remove_memslot(struct kvm_vm *vm, useconds_t delay,
uint64_t nr_modifications)
{
diff --git a/tools/testing/selftests/kvm/pre_fault_memory_test.c b/tools/testing/selftests/kvm/pre_fault_memory_test.c
new file mode 100644
index 000000000000..0350a8896a2f
--- /dev/null
+++ b/tools/testing/selftests/kvm/pre_fault_memory_test.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024, Intel, Inc
+ *
+ * Author:
+ * Isaku Yamahata <isaku.yamahata at gmail.com>
+ */
+#include <linux/sizes.h>
+
+#include <test_util.h>
+#include <kvm_util.h>
+#include <processor.h>
+
+/* Arbitrarily chosen values */
+#define TEST_SIZE (SZ_2M + PAGE_SIZE)
+#define TEST_NPAGES (TEST_SIZE / PAGE_SIZE)
+#define TEST_SLOT 10
+
+static void guest_code(uint64_t base_gpa)
+{
+ volatile uint64_t val __used;
+ int i;
+
+ for (i = 0; i < TEST_NPAGES; i++) {
+ uint64_t *src = (uint64_t *)(base_gpa + i * PAGE_SIZE);
+
+ val = *src;
+ }
+
+ GUEST_DONE();
+}
+
+static void pre_fault_memory(struct kvm_vcpu *vcpu, u64 gpa, u64 size,
+ u64 left)
+{
+ struct kvm_pre_fault_memory range = {
+ .gpa = gpa,
+ .size = size,
+ .flags = 0,
+ };
+ u64 prev;
+ int ret, save_errno;
+
+ do {
+ prev = range.size;
+ ret = __vcpu_ioctl(vcpu, KVM_PRE_FAULT_MEMORY, &range);
+ save_errno = errno;
+ TEST_ASSERT((range.size < prev) ^ (ret < 0),
+ "%sexpecting range.size to change on %s",
+ ret < 0 ? "not " : "",
+ ret < 0 ? "failure" : "success");
+ } while (ret >= 0 ? range.size : save_errno == EINTR);
+
+ TEST_ASSERT(range.size == left,
+ "Completed with %lld bytes left, expected %" PRId64,
+ range.size, left);
+
+ if (left == 0)
+ __TEST_ASSERT_VM_VCPU_IOCTL(!ret, "KVM_PRE_FAULT_MEMORY", ret, vcpu->vm);
+ else
+ /* No memory slot causes RET_PF_EMULATE. it results in -ENOENT. */
+ __TEST_ASSERT_VM_VCPU_IOCTL(ret && save_errno == ENOENT,
+ "KVM_PRE_FAULT_MEMORY", ret, vcpu->vm);
+}
+
+static void __test_pre_fault_memory(unsigned long vm_type, bool private)
+{
+ const struct vm_shape shape = {
+ .mode = VM_MODE_DEFAULT,
+ .type = vm_type,
+ };
+ struct kvm_vcpu *vcpu;
+ struct kvm_run *run;
+ struct kvm_vm *vm;
+ struct ucall uc;
+
+ uint64_t guest_test_phys_mem;
+ uint64_t guest_test_virt_mem;
+ uint64_t alignment, guest_page_size;
+
+ vm = vm_create_shape_with_one_vcpu(shape, &vcpu, guest_code);
+
+ alignment = guest_page_size = vm_guest_mode_params[VM_MODE_DEFAULT].page_size;
+ guest_test_phys_mem = (vm->max_gfn - TEST_NPAGES) * guest_page_size;
+#ifdef __s390x__
+ alignment = max(0x100000UL, guest_page_size);
+#else
+ alignment = SZ_2M;
+#endif
+ guest_test_phys_mem = align_down(guest_test_phys_mem, alignment);
+ guest_test_virt_mem = guest_test_phys_mem & ((1ULL << (vm->va_bits - 1)) - 1);
+
+ vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
+ guest_test_phys_mem, TEST_SLOT, TEST_NPAGES,
+ private ? KVM_MEM_GUEST_MEMFD : 0);
+ virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, TEST_NPAGES);
+
+ if (private)
+ vm_mem_set_private(vm, guest_test_phys_mem, TEST_SIZE);
+ pre_fault_memory(vcpu, guest_test_phys_mem, SZ_2M, 0);
+ pre_fault_memory(vcpu, guest_test_phys_mem + SZ_2M, PAGE_SIZE * 2, PAGE_SIZE);
+ pre_fault_memory(vcpu, guest_test_phys_mem + TEST_SIZE, PAGE_SIZE, PAGE_SIZE);
+
+ vcpu_args_set(vcpu, 1, guest_test_virt_mem);
+ vcpu_run(vcpu);
+
+ run = vcpu->run;
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+ "Wanted KVM_EXIT_IO, got exit reason: %u (%s)",
+ run->exit_reason, exit_reason_str(run->exit_reason));
+
+ switch (get_ucall(vcpu, &uc)) {
+ case UCALL_ABORT:
+ REPORT_GUEST_ASSERT(uc);
+ break;
+ case UCALL_DONE:
+ break;
+ default:
+ TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
+ break;
+ }
+
+ kvm_vm_free(vm);
+}
+
+static void test_pre_fault_memory(unsigned long vm_type, bool private)
+{
+ if (vm_type && !(kvm_check_cap(KVM_CAP_VM_TYPES) & BIT(vm_type))) {
+ pr_info("Skipping tests for vm_type 0x%lx\n", vm_type);
+ return;
+ }
+
+ __test_pre_fault_memory(vm_type, private);
+}
+
+int main(int argc, char *argv[])
+{
+ TEST_REQUIRE(kvm_check_cap(KVM_CAP_PRE_FAULT_MEMORY));
+
+ test_pre_fault_memory(0, false);
+#ifdef __x86_64__
+ test_pre_fault_memory(KVM_X86_SW_PROTECTED_VM, false);
+ test_pre_fault_memory(KVM_X86_SW_PROTECTED_VM, true);
+#endif
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/riscv/get-reg-list.c b/tools/testing/selftests/kvm/riscv/get-reg-list.c
index 222198dd6d04..f92c2fb23fcd 100644
--- a/tools/testing/selftests/kvm/riscv/get-reg-list.c
+++ b/tools/testing/selftests/kvm/riscv/get-reg-list.c
@@ -49,6 +49,7 @@ bool filter_reg(__u64 reg)
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SVNAPOT:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SVPBMT:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZACAS:
+ case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZAWRS:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBA:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBB:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBC:
@@ -56,6 +57,11 @@ bool filter_reg(__u64 reg)
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBKC:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBKX:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBS:
+ case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZCA:
+ case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZCB:
+ case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZCD:
+ case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZCF:
+ case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZCMOP:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZFA:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZFH:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZFHMIN:
@@ -68,6 +74,7 @@ bool filter_reg(__u64 reg)
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZIHINTNTL:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZIHPM:
+ case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZIMOP:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKND:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKNE:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKNH:
@@ -415,6 +422,7 @@ static const char *isa_ext_single_id_to_str(__u64 reg_off)
KVM_ISA_EXT_ARR(SVNAPOT),
KVM_ISA_EXT_ARR(SVPBMT),
KVM_ISA_EXT_ARR(ZACAS),
+ KVM_ISA_EXT_ARR(ZAWRS),
KVM_ISA_EXT_ARR(ZBA),
KVM_ISA_EXT_ARR(ZBB),
KVM_ISA_EXT_ARR(ZBC),
@@ -422,6 +430,11 @@ static const char *isa_ext_single_id_to_str(__u64 reg_off)
KVM_ISA_EXT_ARR(ZBKC),
KVM_ISA_EXT_ARR(ZBKX),
KVM_ISA_EXT_ARR(ZBS),
+ KVM_ISA_EXT_ARR(ZCA),
+ KVM_ISA_EXT_ARR(ZCB),
+ KVM_ISA_EXT_ARR(ZCD),
+ KVM_ISA_EXT_ARR(ZCF),
+ KVM_ISA_EXT_ARR(ZCMOP),
KVM_ISA_EXT_ARR(ZFA),
KVM_ISA_EXT_ARR(ZFH),
KVM_ISA_EXT_ARR(ZFHMIN),
@@ -434,6 +447,7 @@ static const char *isa_ext_single_id_to_str(__u64 reg_off)
KVM_ISA_EXT_ARR(ZIHINTNTL),
KVM_ISA_EXT_ARR(ZIHINTPAUSE),
KVM_ISA_EXT_ARR(ZIHPM),
+ KVM_ISA_EXT_ARR(ZIMOP),
KVM_ISA_EXT_ARR(ZKND),
KVM_ISA_EXT_ARR(ZKNE),
KVM_ISA_EXT_ARR(ZKNH),
@@ -939,6 +953,7 @@ KVM_ISA_EXT_SIMPLE_CONFIG(svinval, SVINVAL);
KVM_ISA_EXT_SIMPLE_CONFIG(svnapot, SVNAPOT);
KVM_ISA_EXT_SIMPLE_CONFIG(svpbmt, SVPBMT);
KVM_ISA_EXT_SIMPLE_CONFIG(zacas, ZACAS);
+KVM_ISA_EXT_SIMPLE_CONFIG(zawrs, ZAWRS);
KVM_ISA_EXT_SIMPLE_CONFIG(zba, ZBA);
KVM_ISA_EXT_SIMPLE_CONFIG(zbb, ZBB);
KVM_ISA_EXT_SIMPLE_CONFIG(zbc, ZBC);
@@ -946,6 +961,11 @@ KVM_ISA_EXT_SIMPLE_CONFIG(zbkb, ZBKB);
KVM_ISA_EXT_SIMPLE_CONFIG(zbkc, ZBKC);
KVM_ISA_EXT_SIMPLE_CONFIG(zbkx, ZBKX);
KVM_ISA_EXT_SIMPLE_CONFIG(zbs, ZBS);
+KVM_ISA_EXT_SIMPLE_CONFIG(zca, ZCA),
+KVM_ISA_EXT_SIMPLE_CONFIG(zcb, ZCB),
+KVM_ISA_EXT_SIMPLE_CONFIG(zcd, ZCD),
+KVM_ISA_EXT_SIMPLE_CONFIG(zcf, ZCF),
+KVM_ISA_EXT_SIMPLE_CONFIG(zcmop, ZCMOP);
KVM_ISA_EXT_SIMPLE_CONFIG(zfa, ZFA);
KVM_ISA_EXT_SIMPLE_CONFIG(zfh, ZFH);
KVM_ISA_EXT_SIMPLE_CONFIG(zfhmin, ZFHMIN);
@@ -958,6 +978,7 @@ KVM_ISA_EXT_SIMPLE_CONFIG(zifencei, ZIFENCEI);
KVM_ISA_EXT_SIMPLE_CONFIG(zihintntl, ZIHINTNTL);
KVM_ISA_EXT_SIMPLE_CONFIG(zihintpause, ZIHINTPAUSE);
KVM_ISA_EXT_SIMPLE_CONFIG(zihpm, ZIHPM);
+KVM_ISA_EXT_SIMPLE_CONFIG(zimop, ZIMOP);
KVM_ISA_EXT_SIMPLE_CONFIG(zknd, ZKND);
KVM_ISA_EXT_SIMPLE_CONFIG(zkne, ZKNE);
KVM_ISA_EXT_SIMPLE_CONFIG(zknh, ZKNH);
@@ -995,6 +1016,7 @@ struct vcpu_reg_list *vcpu_configs[] = {
&config_svnapot,
&config_svpbmt,
&config_zacas,
+ &config_zawrs,
&config_zba,
&config_zbb,
&config_zbc,
@@ -1002,6 +1024,11 @@ struct vcpu_reg_list *vcpu_configs[] = {
&config_zbkc,
&config_zbkx,
&config_zbs,
+ &config_zca,
+ &config_zcb,
+ &config_zcd,
+ &config_zcf,
+ &config_zcmop,
&config_zfa,
&config_zfh,
&config_zfhmin,
@@ -1014,6 +1041,7 @@ struct vcpu_reg_list *vcpu_configs[] = {
&config_zihintntl,
&config_zihintpause,
&config_zihpm,
+ &config_zimop,
&config_zknd,
&config_zkne,
&config_zknh,
diff --git a/tools/testing/selftests/kvm/x86_64/apic_bus_clock_test.c b/tools/testing/selftests/kvm/x86_64/apic_bus_clock_test.c
new file mode 100644
index 000000000000..f8916bb34405
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/apic_bus_clock_test.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 Intel Corporation
+ *
+ * Verify KVM correctly emulates the APIC bus frequency when the VMM configures
+ * the frequency via KVM_CAP_X86_APIC_BUS_CYCLES_NS. Start the APIC timer by
+ * programming TMICT (timer initial count) to the largest value possible (so
+ * that the timer will not expire during the test). Then, after an arbitrary
+ * amount of time has elapsed, verify TMCCT (timer current count) is within 1%
+ * of the expected value based on the time elapsed, the APIC bus frequency, and
+ * the programmed TDCR (timer divide configuration register).
+ */
+
+#include "apic.h"
+#include "test_util.h"
+
+/*
+ * Possible TDCR values with matching divide count. Used to modify APIC
+ * timer frequency.
+ */
+static const struct {
+ const uint32_t tdcr;
+ const uint32_t divide_count;
+} tdcrs[] = {
+ {0x0, 2},
+ {0x1, 4},
+ {0x2, 8},
+ {0x3, 16},
+ {0x8, 32},
+ {0x9, 64},
+ {0xa, 128},
+ {0xb, 1},
+};
+
+static bool is_x2apic;
+
+static void apic_enable(void)
+{
+ if (is_x2apic)
+ x2apic_enable();
+ else
+ xapic_enable();
+}
+
+static uint32_t apic_read_reg(unsigned int reg)
+{
+ return is_x2apic ? x2apic_read_reg(reg) : xapic_read_reg(reg);
+}
+
+static void apic_write_reg(unsigned int reg, uint32_t val)
+{
+ if (is_x2apic)
+ x2apic_write_reg(reg, val);
+ else
+ xapic_write_reg(reg, val);
+}
+
+static void apic_guest_code(uint64_t apic_hz, uint64_t delay_ms)
+{
+ uint64_t tsc_hz = guest_tsc_khz * 1000;
+ const uint32_t tmict = ~0u;
+ uint64_t tsc0, tsc1, freq;
+ uint32_t tmcct;
+ int i;
+
+ apic_enable();
+
+ /*
+ * Setup one-shot timer. The vector does not matter because the
+ * interrupt should not fire.
+ */
+ apic_write_reg(APIC_LVTT, APIC_LVT_TIMER_ONESHOT | APIC_LVT_MASKED);
+
+ for (i = 0; i < ARRAY_SIZE(tdcrs); i++) {
+ apic_write_reg(APIC_TDCR, tdcrs[i].tdcr);
+ apic_write_reg(APIC_TMICT, tmict);
+
+ tsc0 = rdtsc();
+ udelay(delay_ms * 1000);
+ tmcct = apic_read_reg(APIC_TMCCT);
+ tsc1 = rdtsc();
+
+ /*
+ * Stop the timer _after_ reading the current, final count, as
+ * writing the initial counter also modifies the current count.
+ */
+ apic_write_reg(APIC_TMICT, 0);
+
+ freq = (tmict - tmcct) * tdcrs[i].divide_count * tsc_hz / (tsc1 - tsc0);
+ /* Check if measured frequency is within 5% of configured frequency. */
+ __GUEST_ASSERT(freq < apic_hz * 105 / 100 && freq > apic_hz * 95 / 100,
+ "Frequency = %lu (wanted %lu - %lu), bus = %lu, div = %u, tsc = %lu",
+ freq, apic_hz * 95 / 100, apic_hz * 105 / 100,
+ apic_hz, tdcrs[i].divide_count, tsc_hz);
+ }
+
+ GUEST_DONE();
+}
+
+static void test_apic_bus_clock(struct kvm_vcpu *vcpu)
+{
+ bool done = false;
+ struct ucall uc;
+
+ while (!done) {
+ vcpu_run(vcpu);
+
+ TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
+
+ switch (get_ucall(vcpu, &uc)) {
+ case UCALL_DONE:
+ done = true;
+ break;
+ case UCALL_ABORT:
+ REPORT_GUEST_ASSERT(uc);
+ break;
+ default:
+ TEST_FAIL("Unknown ucall %lu", uc.cmd);
+ break;
+ }
+ }
+}
+
+static void run_apic_bus_clock_test(uint64_t apic_hz, uint64_t delay_ms,
+ bool x2apic)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+ int ret;
+
+ is_x2apic = x2apic;
+
+ vm = vm_create(1);
+
+ sync_global_to_guest(vm, is_x2apic);
+
+ vm_enable_cap(vm, KVM_CAP_X86_APIC_BUS_CYCLES_NS,
+ NSEC_PER_SEC / apic_hz);
+
+ vcpu = vm_vcpu_add(vm, 0, apic_guest_code);
+ vcpu_args_set(vcpu, 2, apic_hz, delay_ms);
+
+ ret = __vm_enable_cap(vm, KVM_CAP_X86_APIC_BUS_CYCLES_NS,
+ NSEC_PER_SEC / apic_hz);
+ TEST_ASSERT(ret < 0 && errno == EINVAL,
+ "Setting of APIC bus frequency after vCPU is created should fail.");
+
+ if (!is_x2apic)
+ virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);
+
+ test_apic_bus_clock(vcpu);
+ kvm_vm_free(vm);
+}
+
+static void help(char *name)
+{
+ puts("");
+ printf("usage: %s [-h] [-d delay] [-f APIC bus freq]\n", name);
+ puts("");
+ printf("-d: Delay (in msec) guest uses to measure APIC bus frequency.\n");
+ printf("-f: The APIC bus frequency (in MHz) to be configured for the guest.\n");
+ puts("");
+}
+
+int main(int argc, char *argv[])
+{
+ /*
+ * Arbitrarilty default to 25MHz for the APIC bus frequency, which is
+ * different enough from the default 1GHz to be interesting.
+ */
+ uint64_t apic_hz = 25 * 1000 * 1000;
+ uint64_t delay_ms = 100;
+ int opt;
+
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_APIC_BUS_CYCLES_NS));
+
+ while ((opt = getopt(argc, argv, "d:f:h")) != -1) {
+ switch (opt) {
+ case 'f':
+ apic_hz = atoi_positive("APIC bus frequency", optarg) * 1000 * 1000;
+ break;
+ case 'd':
+ delay_ms = atoi_positive("Delay in milliseconds", optarg);
+ break;
+ case 'h':
+ default:
+ help(argv[0]);
+ exit(KSFT_SKIP);
+ }
+ }
+
+ run_apic_bus_clock_test(apic_hz, delay_ms, false);
+ run_apic_bus_clock_test(apic_hz, delay_ms, true);
+}
diff --git a/tools/testing/selftests/kvm/x86_64/max_vcpuid_cap_test.c b/tools/testing/selftests/kvm/x86_64/max_vcpuid_cap_test.c
index 3cc4b86832fe..7e2bfb3c3f3b 100644
--- a/tools/testing/selftests/kvm/x86_64/max_vcpuid_cap_test.c
+++ b/tools/testing/selftests/kvm/x86_64/max_vcpuid_cap_test.c
@@ -26,19 +26,37 @@ int main(int argc, char *argv[])
TEST_ASSERT(ret < 0,
"Setting KVM_CAP_MAX_VCPU_ID beyond KVM cap should fail");
+ /* Test BOOT_CPU_ID interaction (MAX_VCPU_ID cannot be lower) */
+ if (kvm_has_cap(KVM_CAP_SET_BOOT_CPU_ID)) {
+ vm_ioctl(vm, KVM_SET_BOOT_CPU_ID, (void *)MAX_VCPU_ID);
+
+ /* Try setting KVM_CAP_MAX_VCPU_ID below BOOT_CPU_ID */
+ ret = __vm_enable_cap(vm, KVM_CAP_MAX_VCPU_ID, MAX_VCPU_ID - 1);
+ TEST_ASSERT(ret < 0,
+ "Setting KVM_CAP_MAX_VCPU_ID below BOOT_CPU_ID should fail");
+ }
+
/* Set KVM_CAP_MAX_VCPU_ID */
vm_enable_cap(vm, KVM_CAP_MAX_VCPU_ID, MAX_VCPU_ID);
-
/* Try to set KVM_CAP_MAX_VCPU_ID again */
ret = __vm_enable_cap(vm, KVM_CAP_MAX_VCPU_ID, MAX_VCPU_ID + 1);
TEST_ASSERT(ret < 0,
"Setting KVM_CAP_MAX_VCPU_ID multiple times should fail");
- /* Create vCPU with id beyond KVM_CAP_MAX_VCPU_ID cap*/
+ /* Create vCPU with id beyond KVM_CAP_MAX_VCPU_ID cap */
ret = __vm_ioctl(vm, KVM_CREATE_VCPU, (void *)MAX_VCPU_ID);
TEST_ASSERT(ret < 0, "Creating vCPU with ID > MAX_VCPU_ID should fail");
+ /* Create vCPU with bits 63:32 != 0, but an otherwise valid id */
+ ret = __vm_ioctl(vm, KVM_CREATE_VCPU, (void *)(1L << 32));
+ TEST_ASSERT(ret < 0, "Creating vCPU with ID[63:32] != 0 should fail");
+
+ /* Create vCPU with id within bounds */
+ ret = __vm_ioctl(vm, KVM_CREATE_VCPU, (void *)0);
+ TEST_ASSERT(ret >= 0, "Creating vCPU with ID 0 should succeed");
+
+ close(ret);
kvm_vm_free(vm);
return 0;
}
diff --git a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
index 96446134c00b..698cb36989db 100644
--- a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
+++ b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
@@ -7,15 +7,28 @@
#include "pmu.h"
#include "processor.h"
-/* Number of LOOP instructions for the guest measurement payload. */
-#define NUM_BRANCHES 10
+/* Number of iterations of the loop for the guest measurement payload. */
+#define NUM_LOOPS 10
+
+/* Each iteration of the loop retires one branch instruction. */
+#define NUM_BRANCH_INSNS_RETIRED (NUM_LOOPS)
+
+/*
+ * Number of instructions in each loop. 1 CLFLUSH/CLFLUSHOPT/NOP, 1 MFENCE,
+ * 1 LOOP.
+ */
+#define NUM_INSNS_PER_LOOP 3
+
/*
* Number of "extra" instructions that will be counted, i.e. the number of
- * instructions that are needed to set up the loop and then disabled the
- * counter. 1 CLFLUSH/CLFLUSHOPT/NOP, 1 MFENCE, 2 MOV, 2 XOR, 1 WRMSR.
+ * instructions that are needed to set up the loop and then disable the
+ * counter. 2 MOV, 2 XOR, 1 WRMSR.
*/
-#define NUM_EXTRA_INSNS 7
-#define NUM_INSNS_RETIRED (NUM_BRANCHES + NUM_EXTRA_INSNS)
+#define NUM_EXTRA_INSNS 5
+
+/* Total number of instructions retired within the measured section. */
+#define NUM_INSNS_RETIRED (NUM_LOOPS * NUM_INSNS_PER_LOOP + NUM_EXTRA_INSNS)
+
static uint8_t kvm_pmu_version;
static bool kvm_has_perf_caps;
@@ -100,7 +113,7 @@ static void guest_assert_event_count(uint8_t idx,
GUEST_ASSERT_EQ(count, NUM_INSNS_RETIRED);
break;
case INTEL_ARCH_BRANCHES_RETIRED_INDEX:
- GUEST_ASSERT_EQ(count, NUM_BRANCHES);
+ GUEST_ASSERT_EQ(count, NUM_BRANCH_INSNS_RETIRED);
break;
case INTEL_ARCH_LLC_REFERENCES_INDEX:
case INTEL_ARCH_LLC_MISSES_INDEX:
@@ -120,7 +133,7 @@ static void guest_assert_event_count(uint8_t idx,
}
sanity_checks:
- __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
+ __asm__ __volatile__("loop ." : "+c"((int){NUM_LOOPS}));
GUEST_ASSERT_EQ(_rdpmc(pmc), count);
wrmsr(pmc_msr, 0xdead);
@@ -134,8 +147,8 @@ sanity_checks:
* before the end of the sequence.
*
* If CLFUSH{,OPT} is supported, flush the cacheline containing (at least) the
- * start of the loop to force LLC references and misses, i.e. to allow testing
- * that those events actually count.
+ * CLFUSH{,OPT} instruction on each loop iteration to force LLC references and
+ * misses, i.e. to allow testing that those events actually count.
*
* If forced emulation is enabled (and specified), force emulation on a subset
* of the measured code to verify that KVM correctly emulates instructions and
@@ -145,10 +158,11 @@ sanity_checks:
#define GUEST_MEASURE_EVENT(_msr, _value, clflush, FEP) \
do { \
__asm__ __volatile__("wrmsr\n\t" \
+ " mov $" __stringify(NUM_LOOPS) ", %%ecx\n\t" \
+ "1:\n\t" \
clflush "\n\t" \
"mfence\n\t" \
- "1: mov $" __stringify(NUM_BRANCHES) ", %%ecx\n\t" \
- FEP "loop .\n\t" \
+ FEP "loop 1b\n\t" \
FEP "mov %%edi, %%ecx\n\t" \
FEP "xor %%eax, %%eax\n\t" \
FEP "xor %%edx, %%edx\n\t" \
@@ -163,9 +177,9 @@ do { \
wrmsr(pmc_msr, 0); \
\
if (this_cpu_has(X86_FEATURE_CLFLUSHOPT)) \
- GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflushopt 1f", FEP); \
+ GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflushopt .", FEP); \
else if (this_cpu_has(X86_FEATURE_CLFLUSH)) \
- GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflush 1f", FEP); \
+ GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflush .", FEP); \
else \
GUEST_MEASURE_EVENT(_ctrl_msr, _value, "nop", FEP); \
\
@@ -500,7 +514,7 @@ static void guest_test_fixed_counters(void)
wrmsr(MSR_CORE_PERF_FIXED_CTR0 + i, 0);
wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, FIXED_PMC_CTRL(i, FIXED_PMC_KERNEL));
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, FIXED_PMC_GLOBAL_CTRL_ENABLE(i));
- __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
+ __asm__ __volatile__("loop ." : "+c"((int){NUM_LOOPS}));
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
val = rdmsr(MSR_CORE_PERF_FIXED_CTR0 + i);
diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
index 26b3e7efe5dd..c15513cd74d1 100644
--- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
+++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
@@ -32,8 +32,8 @@ struct __kvm_pmu_event_filter {
/*
* This event list comprises Intel's known architectural events, plus AMD's
- * "retired branch instructions" for Zen1-Zen3 (and* possibly other AMD CPUs).
- * Note, AMD and Intel use the same encoding for instructions retired.
+ * Branch Instructions Retired for Zen CPUs. Note, AMD and Intel use the
+ * same encoding for Instructions Retired.
*/
kvm_static_assert(INTEL_ARCH_INSTRUCTIONS_RETIRED == AMD_ZEN_INSTRUCTIONS_RETIRED);
@@ -353,38 +353,13 @@ static bool use_intel_pmu(void)
kvm_pmu_has(X86_PMU_FEATURE_BRANCH_INSNS_RETIRED);
}
-static bool is_zen1(uint32_t family, uint32_t model)
-{
- return family == 0x17 && model <= 0x0f;
-}
-
-static bool is_zen2(uint32_t family, uint32_t model)
-{
- return family == 0x17 && model >= 0x30 && model <= 0x3f;
-}
-
-static bool is_zen3(uint32_t family, uint32_t model)
-{
- return family == 0x19 && model <= 0x0f;
-}
-
/*
- * Determining AMD support for a PMU event requires consulting the AMD
- * PPR for the CPU or reference material derived therefrom. The AMD
- * test code herein has been verified to work on Zen1, Zen2, and Zen3.
- *
- * Feel free to add more AMD CPUs that are documented to support event
- * select 0xc2 umask 0 as "retired branch instructions."
+ * On AMD, all Family 17h+ CPUs (Zen and its successors) use event encoding
+ * 0xc2,0 for Branch Instructions Retired.
*/
static bool use_amd_pmu(void)
{
- uint32_t family = kvm_cpu_family();
- uint32_t model = kvm_cpu_model();
-
- return host_cpu_is_amd &&
- (is_zen1(family, model) ||
- is_zen2(family, model) ||
- is_zen3(family, model));
+ return host_cpu_is_amd && kvm_cpu_family() >= 0x17;
}
/*
diff --git a/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c b/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c
index d691d86e5bc3..49913784bc82 100644
--- a/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c
+++ b/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c
@@ -33,6 +33,20 @@ static void guest_not_bsp_vcpu(void *arg)
GUEST_DONE();
}
+static void test_set_invalid_bsp(struct kvm_vm *vm)
+{
+ unsigned long max_vcpu_id = vm_check_cap(vm, KVM_CAP_MAX_VCPU_ID);
+ int r;
+
+ if (max_vcpu_id) {
+ r = __vm_ioctl(vm, KVM_SET_BOOT_CPU_ID, (void *)(max_vcpu_id + 1));
+ TEST_ASSERT(r == -1 && errno == EINVAL, "BSP with ID > MAX should fail");
+ }
+
+ r = __vm_ioctl(vm, KVM_SET_BOOT_CPU_ID, (void *)(1L << 32));
+ TEST_ASSERT(r == -1 && errno == EINVAL, "BSP with ID[63:32]!=0 should fail");
+}
+
static void test_set_bsp_busy(struct kvm_vcpu *vcpu, const char *msg)
{
int r = __vm_ioctl(vcpu->vm, KVM_SET_BOOT_CPU_ID,
@@ -80,6 +94,8 @@ static struct kvm_vm *create_vm(uint32_t nr_vcpus, uint32_t bsp_vcpu_id,
vm = vm_create(nr_vcpus);
+ test_set_invalid_bsp(vm);
+
vm_ioctl(vm, KVM_SET_BOOT_CPU_ID, (void *)(unsigned long)bsp_vcpu_id);
for (i = 0; i < nr_vcpus; i++)
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index 429535816dbd..d6edcfcb5be8 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -38,6 +38,14 @@ else
CLANG_FLAGS += --target=$(notdir $(CROSS_COMPILE:%-=%))
endif # CROSS_COMPILE
+# gcc defaults to silence (off) for the following warnings, but clang defaults
+# to the opposite. The warnings are not useful for the kernel itself, which is
+# why they have remained disabled in gcc for the main kernel build. And it is
+# only due to including kernel data structures in the selftests, that we get the
+# warnings from clang. Therefore, disable the warnings for clang builds.
+CFLAGS += -Wno-address-of-packed-member
+CFLAGS += -Wno-gnu-variable-sized-type-not-at-end
+
CC := $(CLANG) $(CLANG_FLAGS) -fintegrated-as
else
CC := $(CROSS_COMPILE)gcc
@@ -188,6 +196,9 @@ endef
clean: $(if $(TEST_GEN_MODS_DIR),clean_mods_dir)
$(CLEAN)
+# Build with _GNU_SOURCE by default
+CFLAGS += -D_GNU_SOURCE=
+
# Enables to extend CFLAGS and LDFLAGS from command line, e.g.
# make USERCFLAGS=-Werror USERLDFLAGS=-static
CFLAGS += $(USERCFLAGS)
diff --git a/tools/testing/selftests/livepatch/test-livepatch.sh b/tools/testing/selftests/livepatch/test-livepatch.sh
index e3455a6b1158..65c9c058458d 100755
--- a/tools/testing/selftests/livepatch/test-livepatch.sh
+++ b/tools/testing/selftests/livepatch/test-livepatch.sh
@@ -4,7 +4,9 @@
. $(dirname $0)/functions.sh
-MOD_LIVEPATCH=test_klp_livepatch
+MOD_LIVEPATCH1=test_klp_livepatch
+MOD_LIVEPATCH2=test_klp_syscall
+MOD_LIVEPATCH3=test_klp_callbacks_demo
MOD_REPLACE=test_klp_atomic_replace
setup_config
@@ -16,33 +18,33 @@ setup_config
start_test "basic function patching"
-load_lp $MOD_LIVEPATCH
+load_lp $MOD_LIVEPATCH1
-if [[ "$(cat /proc/cmdline)" != "$MOD_LIVEPATCH: this has been live patched" ]] ; then
+if [[ "$(cat /proc/cmdline)" != "$MOD_LIVEPATCH1: this has been live patched" ]] ; then
echo -e "FAIL\n\n"
die "livepatch kselftest(s) failed"
fi
-disable_lp $MOD_LIVEPATCH
-unload_lp $MOD_LIVEPATCH
+disable_lp $MOD_LIVEPATCH1
+unload_lp $MOD_LIVEPATCH1
-if [[ "$(cat /proc/cmdline)" == "$MOD_LIVEPATCH: this has been live patched" ]] ; then
+if [[ "$(cat /proc/cmdline)" == "$MOD_LIVEPATCH1: this has been live patched" ]] ; then
echo -e "FAIL\n\n"
die "livepatch kselftest(s) failed"
fi
-check_result "% insmod test_modules/$MOD_LIVEPATCH.ko
-livepatch: enabling patch '$MOD_LIVEPATCH'
-livepatch: '$MOD_LIVEPATCH': initializing patching transition
-livepatch: '$MOD_LIVEPATCH': starting patching transition
-livepatch: '$MOD_LIVEPATCH': completing patching transition
-livepatch: '$MOD_LIVEPATCH': patching complete
-% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH/enabled
-livepatch: '$MOD_LIVEPATCH': initializing unpatching transition
-livepatch: '$MOD_LIVEPATCH': starting unpatching transition
-livepatch: '$MOD_LIVEPATCH': completing unpatching transition
-livepatch: '$MOD_LIVEPATCH': unpatching complete
-% rmmod $MOD_LIVEPATCH"
+check_result "% insmod test_modules/$MOD_LIVEPATCH1.ko
+livepatch: enabling patch '$MOD_LIVEPATCH1'
+livepatch: '$MOD_LIVEPATCH1': initializing patching transition
+livepatch: '$MOD_LIVEPATCH1': starting patching transition
+livepatch: '$MOD_LIVEPATCH1': completing patching transition
+livepatch: '$MOD_LIVEPATCH1': patching complete
+% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH1/enabled
+livepatch: '$MOD_LIVEPATCH1': initializing unpatching transition
+livepatch: '$MOD_LIVEPATCH1': starting unpatching transition
+livepatch: '$MOD_LIVEPATCH1': completing unpatching transition
+livepatch: '$MOD_LIVEPATCH1': unpatching complete
+% rmmod $MOD_LIVEPATCH1"
# - load a livepatch that modifies the output from /proc/cmdline and
@@ -53,7 +55,7 @@ livepatch: '$MOD_LIVEPATCH': unpatching complete
start_test "multiple livepatches"
-load_lp $MOD_LIVEPATCH
+load_lp $MOD_LIVEPATCH1
grep 'live patched' /proc/cmdline > /dev/kmsg
grep 'live patched' /proc/meminfo > /dev/kmsg
@@ -69,26 +71,26 @@ unload_lp $MOD_REPLACE
grep 'live patched' /proc/cmdline > /dev/kmsg
grep 'live patched' /proc/meminfo > /dev/kmsg
-disable_lp $MOD_LIVEPATCH
-unload_lp $MOD_LIVEPATCH
+disable_lp $MOD_LIVEPATCH1
+unload_lp $MOD_LIVEPATCH1
grep 'live patched' /proc/cmdline > /dev/kmsg
grep 'live patched' /proc/meminfo > /dev/kmsg
-check_result "% insmod test_modules/$MOD_LIVEPATCH.ko
-livepatch: enabling patch '$MOD_LIVEPATCH'
-livepatch: '$MOD_LIVEPATCH': initializing patching transition
-livepatch: '$MOD_LIVEPATCH': starting patching transition
-livepatch: '$MOD_LIVEPATCH': completing patching transition
-livepatch: '$MOD_LIVEPATCH': patching complete
-$MOD_LIVEPATCH: this has been live patched
+check_result "% insmod test_modules/$MOD_LIVEPATCH1.ko
+livepatch: enabling patch '$MOD_LIVEPATCH1'
+livepatch: '$MOD_LIVEPATCH1': initializing patching transition
+livepatch: '$MOD_LIVEPATCH1': starting patching transition
+livepatch: '$MOD_LIVEPATCH1': completing patching transition
+livepatch: '$MOD_LIVEPATCH1': patching complete
+$MOD_LIVEPATCH1: this has been live patched
% insmod test_modules/$MOD_REPLACE.ko replace=0
livepatch: enabling patch '$MOD_REPLACE'
livepatch: '$MOD_REPLACE': initializing patching transition
livepatch: '$MOD_REPLACE': starting patching transition
livepatch: '$MOD_REPLACE': completing patching transition
livepatch: '$MOD_REPLACE': patching complete
-$MOD_LIVEPATCH: this has been live patched
+$MOD_LIVEPATCH1: this has been live patched
$MOD_REPLACE: this has been live patched
% echo 0 > /sys/kernel/livepatch/$MOD_REPLACE/enabled
livepatch: '$MOD_REPLACE': initializing unpatching transition
@@ -96,35 +98,57 @@ livepatch: '$MOD_REPLACE': starting unpatching transition
livepatch: '$MOD_REPLACE': completing unpatching transition
livepatch: '$MOD_REPLACE': unpatching complete
% rmmod $MOD_REPLACE
-$MOD_LIVEPATCH: this has been live patched
-% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH/enabled
-livepatch: '$MOD_LIVEPATCH': initializing unpatching transition
-livepatch: '$MOD_LIVEPATCH': starting unpatching transition
-livepatch: '$MOD_LIVEPATCH': completing unpatching transition
-livepatch: '$MOD_LIVEPATCH': unpatching complete
-% rmmod $MOD_LIVEPATCH"
+$MOD_LIVEPATCH1: this has been live patched
+% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH1/enabled
+livepatch: '$MOD_LIVEPATCH1': initializing unpatching transition
+livepatch: '$MOD_LIVEPATCH1': starting unpatching transition
+livepatch: '$MOD_LIVEPATCH1': completing unpatching transition
+livepatch: '$MOD_LIVEPATCH1': unpatching complete
+% rmmod $MOD_LIVEPATCH1"
# - load a livepatch that modifies the output from /proc/cmdline and
# verify correct behavior
-# - load an atomic replace livepatch and verify that only the second is active
-# - remove the first livepatch and verify that the atomic replace livepatch
-# is still active
+# - load two additional livepatches and check the number of livepatch modules
+# applied
+# - load an atomic replace livepatch and check that the other three modules were
+# disabled
+# - remove all livepatches besides the atomic replace one and verify that the
+# atomic replace livepatch is still active
# - remove the atomic replace livepatch and verify that none are active
start_test "atomic replace livepatch"
-load_lp $MOD_LIVEPATCH
+load_lp $MOD_LIVEPATCH1
grep 'live patched' /proc/cmdline > /dev/kmsg
grep 'live patched' /proc/meminfo > /dev/kmsg
+for mod in $MOD_LIVEPATCH2 $MOD_LIVEPATCH3; do
+ load_lp "$mod"
+done
+
+mods=(/sys/kernel/livepatch/*)
+nmods=${#mods[@]}
+if [ "$nmods" -ne 3 ]; then
+ die "Expecting three modules listed, found $nmods"
+fi
+
load_lp $MOD_REPLACE replace=1
grep 'live patched' /proc/cmdline > /dev/kmsg
grep 'live patched' /proc/meminfo > /dev/kmsg
-unload_lp $MOD_LIVEPATCH
+mods=(/sys/kernel/livepatch/*)
+nmods=${#mods[@]}
+if [ "$nmods" -ne 1 ]; then
+ die "Expecting only one moduled listed, found $nmods"
+fi
+
+# These modules were disabled by the atomic replace
+for mod in $MOD_LIVEPATCH3 $MOD_LIVEPATCH2 $MOD_LIVEPATCH1; do
+ unload_lp "$mod"
+done
grep 'live patched' /proc/cmdline > /dev/kmsg
grep 'live patched' /proc/meminfo > /dev/kmsg
@@ -135,13 +159,27 @@ unload_lp $MOD_REPLACE
grep 'live patched' /proc/cmdline > /dev/kmsg
grep 'live patched' /proc/meminfo > /dev/kmsg
-check_result "% insmod test_modules/$MOD_LIVEPATCH.ko
-livepatch: enabling patch '$MOD_LIVEPATCH'
-livepatch: '$MOD_LIVEPATCH': initializing patching transition
-livepatch: '$MOD_LIVEPATCH': starting patching transition
-livepatch: '$MOD_LIVEPATCH': completing patching transition
-livepatch: '$MOD_LIVEPATCH': patching complete
-$MOD_LIVEPATCH: this has been live patched
+check_result "% insmod test_modules/$MOD_LIVEPATCH1.ko
+livepatch: enabling patch '$MOD_LIVEPATCH1'
+livepatch: '$MOD_LIVEPATCH1': initializing patching transition
+livepatch: '$MOD_LIVEPATCH1': starting patching transition
+livepatch: '$MOD_LIVEPATCH1': completing patching transition
+livepatch: '$MOD_LIVEPATCH1': patching complete
+$MOD_LIVEPATCH1: this has been live patched
+% insmod test_modules/$MOD_LIVEPATCH2.ko
+livepatch: enabling patch '$MOD_LIVEPATCH2'
+livepatch: '$MOD_LIVEPATCH2': initializing patching transition
+livepatch: '$MOD_LIVEPATCH2': starting patching transition
+livepatch: '$MOD_LIVEPATCH2': completing patching transition
+livepatch: '$MOD_LIVEPATCH2': patching complete
+% insmod test_modules/$MOD_LIVEPATCH3.ko
+livepatch: enabling patch '$MOD_LIVEPATCH3'
+livepatch: '$MOD_LIVEPATCH3': initializing patching transition
+$MOD_LIVEPATCH3: pre_patch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH3': starting patching transition
+livepatch: '$MOD_LIVEPATCH3': completing patching transition
+$MOD_LIVEPATCH3: post_patch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH3': patching complete
% insmod test_modules/$MOD_REPLACE.ko replace=1
livepatch: enabling patch '$MOD_REPLACE'
livepatch: '$MOD_REPLACE': initializing patching transition
@@ -149,7 +187,9 @@ livepatch: '$MOD_REPLACE': starting patching transition
livepatch: '$MOD_REPLACE': completing patching transition
livepatch: '$MOD_REPLACE': patching complete
$MOD_REPLACE: this has been live patched
-% rmmod $MOD_LIVEPATCH
+% rmmod $MOD_LIVEPATCH3
+% rmmod $MOD_LIVEPATCH2
+% rmmod $MOD_LIVEPATCH1
$MOD_REPLACE: this has been live patched
% echo 0 > /sys/kernel/livepatch/$MOD_REPLACE/enabled
livepatch: '$MOD_REPLACE': initializing unpatching transition
diff --git a/tools/testing/selftests/livepatch/test-syscall.sh b/tools/testing/selftests/livepatch/test-syscall.sh
index b76a881d4013..289eb7d4c4b3 100755
--- a/tools/testing/selftests/livepatch/test-syscall.sh
+++ b/tools/testing/selftests/livepatch/test-syscall.sh
@@ -15,7 +15,10 @@ setup_config
start_test "patch getpid syscall while being heavily hammered"
-for i in $(seq 1 $(getconf _NPROCESSORS_ONLN)); do
+NPROC=$(getconf _NPROCESSORS_ONLN)
+MAXPROC=128
+
+for i in $(seq 1 $(($NPROC < $MAXPROC ? $NPROC : $MAXPROC))); do
./test_klp-call_getpid &
pids[$i]="$!"
done
diff --git a/tools/testing/selftests/livepatch/test-sysfs.sh b/tools/testing/selftests/livepatch/test-sysfs.sh
index 6c646afa7395..05a14f5a7bfb 100755
--- a/tools/testing/selftests/livepatch/test-sysfs.sh
+++ b/tools/testing/selftests/livepatch/test-sysfs.sh
@@ -18,6 +18,7 @@ check_sysfs_rights "$MOD_LIVEPATCH" "" "drwxr-xr-x"
check_sysfs_rights "$MOD_LIVEPATCH" "enabled" "-rw-r--r--"
check_sysfs_value "$MOD_LIVEPATCH" "enabled" "1"
check_sysfs_rights "$MOD_LIVEPATCH" "force" "--w-------"
+check_sysfs_rights "$MOD_LIVEPATCH" "replace" "-r--r--r--"
check_sysfs_rights "$MOD_LIVEPATCH" "transition" "-r--r--r--"
check_sysfs_value "$MOD_LIVEPATCH" "transition" "0"
check_sysfs_rights "$MOD_LIVEPATCH" "vmlinux/patched" "-r--r--r--"
@@ -83,4 +84,51 @@ test_klp_callbacks_demo: post_unpatch_callback: vmlinux
livepatch: 'test_klp_callbacks_demo': unpatching complete
% rmmod test_klp_callbacks_demo"
+start_test "sysfs test replace enabled"
+
+MOD_LIVEPATCH=test_klp_atomic_replace
+load_lp $MOD_LIVEPATCH replace=1
+
+check_sysfs_rights "$MOD_LIVEPATCH" "replace" "-r--r--r--"
+check_sysfs_value "$MOD_LIVEPATCH" "replace" "1"
+
+disable_lp $MOD_LIVEPATCH
+unload_lp $MOD_LIVEPATCH
+
+check_result "% insmod test_modules/$MOD_LIVEPATCH.ko replace=1
+livepatch: enabling patch '$MOD_LIVEPATCH'
+livepatch: '$MOD_LIVEPATCH': initializing patching transition
+livepatch: '$MOD_LIVEPATCH': starting patching transition
+livepatch: '$MOD_LIVEPATCH': completing patching transition
+livepatch: '$MOD_LIVEPATCH': patching complete
+% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH/enabled
+livepatch: '$MOD_LIVEPATCH': initializing unpatching transition
+livepatch: '$MOD_LIVEPATCH': starting unpatching transition
+livepatch: '$MOD_LIVEPATCH': completing unpatching transition
+livepatch: '$MOD_LIVEPATCH': unpatching complete
+% rmmod $MOD_LIVEPATCH"
+
+start_test "sysfs test replace disabled"
+
+load_lp $MOD_LIVEPATCH replace=0
+
+check_sysfs_rights "$MOD_LIVEPATCH" "replace" "-r--r--r--"
+check_sysfs_value "$MOD_LIVEPATCH" "replace" "0"
+
+disable_lp $MOD_LIVEPATCH
+unload_lp $MOD_LIVEPATCH
+
+check_result "% insmod test_modules/$MOD_LIVEPATCH.ko replace=0
+livepatch: enabling patch '$MOD_LIVEPATCH'
+livepatch: '$MOD_LIVEPATCH': initializing patching transition
+livepatch: '$MOD_LIVEPATCH': starting patching transition
+livepatch: '$MOD_LIVEPATCH': completing patching transition
+livepatch: '$MOD_LIVEPATCH': patching complete
+% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH/enabled
+livepatch: '$MOD_LIVEPATCH': initializing unpatching transition
+livepatch: '$MOD_LIVEPATCH': starting unpatching transition
+livepatch: '$MOD_LIVEPATCH': completing unpatching transition
+livepatch: '$MOD_LIVEPATCH': unpatching complete
+% rmmod $MOD_LIVEPATCH"
+
exit 0
diff --git a/tools/testing/selftests/lkdtm/tests.txt b/tools/testing/selftests/lkdtm/tests.txt
index 368973f05250..cff124c1eddd 100644
--- a/tools/testing/selftests/lkdtm/tests.txt
+++ b/tools/testing/selftests/lkdtm/tests.txt
@@ -31,6 +31,7 @@ SLAB_FREE_CROSS
SLAB_FREE_PAGE
#SOFTLOCKUP Hangs the system
#HARDLOCKUP Hangs the system
+#SMP_CALL_LOCKUP Hangs the system
#SPINLOCKUP Hangs the system
#HUNG_TASK Hangs the system
EXEC_DATA
diff --git a/tools/testing/selftests/mm/.gitignore b/tools/testing/selftests/mm/.gitignore
index 0b9ab987601c..064e7b125643 100644
--- a/tools/testing/selftests/mm/.gitignore
+++ b/tools/testing/selftests/mm/.gitignore
@@ -6,6 +6,7 @@ hugepage-shm
hugepage-vmemmap
hugetlb-madvise
hugetlb-read-hwpoison
+hugetlb-soft-offline
khugepaged
map_hugetlb
map_populate
diff --git a/tools/testing/selftests/mm/Makefile b/tools/testing/selftests/mm/Makefile
index 3b49bc3d0a3b..e1aa09ddaa3d 100644
--- a/tools/testing/selftests/mm/Makefile
+++ b/tools/testing/selftests/mm/Makefile
@@ -2,6 +2,7 @@
# Makefile for mm selftests
LOCAL_HDRS += $(selfdir)/mm/local_config.h $(top_srcdir)/mm/gup_test.h
+LOCAL_HDRS += $(selfdir)/mm/mseal_helpers.h
include local_config.mk
@@ -42,6 +43,7 @@ TEST_GEN_FILES += gup_test
TEST_GEN_FILES += hmm-tests
TEST_GEN_FILES += hugetlb-madvise
TEST_GEN_FILES += hugetlb-read-hwpoison
+TEST_GEN_FILES += hugetlb-soft-offline
TEST_GEN_FILES += hugepage-mmap
TEST_GEN_FILES += hugepage-mremap
TEST_GEN_FILES += hugepage-shm
@@ -73,6 +75,7 @@ TEST_GEN_FILES += ksm_functional_tests
TEST_GEN_FILES += mdwe_test
TEST_GEN_FILES += hugetlb_fault_after_madv
TEST_GEN_FILES += hugetlb_madv_vs_map
+TEST_GEN_FILES += hugetlb_dio
ifneq ($(ARCH),arm64)
TEST_GEN_FILES += soft-dirty
diff --git a/tools/testing/selftests/mm/hugepage-mremap.c b/tools/testing/selftests/mm/hugepage-mremap.c
index c463d1c09c9b..ada9156cc497 100644
--- a/tools/testing/selftests/mm/hugepage-mremap.c
+++ b/tools/testing/selftests/mm/hugepage-mremap.c
@@ -15,7 +15,7 @@
#define _GNU_SOURCE
#include <stdlib.h>
#include <stdio.h>
-#include <unistd.h>
+#include <asm-generic/unistd.h>
#include <sys/mman.h>
#include <errno.h>
#include <fcntl.h> /* Definition of O_* constants */
diff --git a/tools/testing/selftests/mm/hugetlb-soft-offline.c b/tools/testing/selftests/mm/hugetlb-soft-offline.c
new file mode 100644
index 000000000000..f086f0e04756
--- /dev/null
+++ b/tools/testing/selftests/mm/hugetlb-soft-offline.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test soft offline behavior for HugeTLB pages:
+ * - if enable_soft_offline = 0, hugepages should stay intact and soft
+ * offlining failed with EOPNOTSUPP.
+ * - if enable_soft_offline = 1, a hugepage should be dissolved and
+ * nr_hugepages/free_hugepages should be reduced by 1.
+ *
+ * Before running, make sure more than 2 hugepages of default_hugepagesz
+ * are allocated. For example, if /proc/meminfo/Hugepagesize is 2048kB:
+ * echo 8 > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages
+ */
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <linux/magic.h>
+#include <linux/memfd.h>
+#include <sys/mman.h>
+#include <sys/statfs.h>
+#include <sys/types.h>
+
+#include "../kselftest.h"
+
+#ifndef MADV_SOFT_OFFLINE
+#define MADV_SOFT_OFFLINE 101
+#endif
+
+#define EPREFIX " !!! "
+
+static int do_soft_offline(int fd, size_t len, int expect_errno)
+{
+ char *filemap = NULL;
+ char *hwp_addr = NULL;
+ const unsigned long pagesize = getpagesize();
+ int ret = 0;
+
+ if (ftruncate(fd, len) < 0) {
+ ksft_perror(EPREFIX "ftruncate to len failed");
+ return -1;
+ }
+
+ filemap = mmap(NULL, len, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_POPULATE, fd, 0);
+ if (filemap == MAP_FAILED) {
+ ksft_perror(EPREFIX "mmap failed");
+ ret = -1;
+ goto untruncate;
+ }
+
+ memset(filemap, 0xab, len);
+ ksft_print_msg("Allocated %#lx bytes of hugetlb pages\n", len);
+
+ hwp_addr = filemap + len / 2;
+ ret = madvise(hwp_addr, pagesize, MADV_SOFT_OFFLINE);
+ ksft_print_msg("MADV_SOFT_OFFLINE %p ret=%d, errno=%d\n",
+ hwp_addr, ret, errno);
+ if (ret != 0)
+ ksft_perror(EPREFIX "madvise failed");
+
+ if (errno == expect_errno)
+ ret = 0;
+ else {
+ ksft_print_msg("MADV_SOFT_OFFLINE should ret %d\n",
+ expect_errno);
+ ret = -1;
+ }
+
+ munmap(filemap, len);
+untruncate:
+ if (ftruncate(fd, 0) < 0)
+ ksft_perror(EPREFIX "ftruncate back to 0 failed");
+
+ return ret;
+}
+
+static int set_enable_soft_offline(int value)
+{
+ char cmd[256] = {0};
+ FILE *cmdfile = NULL;
+
+ if (value != 0 && value != 1)
+ return -EINVAL;
+
+ sprintf(cmd, "echo %d > /proc/sys/vm/enable_soft_offline", value);
+ cmdfile = popen(cmd, "r");
+
+ if (cmdfile)
+ ksft_print_msg("enable_soft_offline => %d\n", value);
+ else {
+ ksft_perror(EPREFIX "failed to set enable_soft_offline");
+ return errno;
+ }
+
+ pclose(cmdfile);
+ return 0;
+}
+
+static int read_nr_hugepages(unsigned long hugepage_size,
+ unsigned long *nr_hugepages)
+{
+ char buffer[256] = {0};
+ char cmd[256] = {0};
+
+ sprintf(cmd, "cat /sys/kernel/mm/hugepages/hugepages-%ldkB/nr_hugepages",
+ hugepage_size);
+ FILE *cmdfile = popen(cmd, "r");
+
+ if (cmdfile == NULL) {
+ ksft_perror(EPREFIX "failed to popen nr_hugepages");
+ return -1;
+ }
+
+ if (!fgets(buffer, sizeof(buffer), cmdfile)) {
+ ksft_perror(EPREFIX "failed to read nr_hugepages");
+ pclose(cmdfile);
+ return -1;
+ }
+
+ *nr_hugepages = atoll(buffer);
+ pclose(cmdfile);
+ return 0;
+}
+
+static int create_hugetlbfs_file(struct statfs *file_stat)
+{
+ int fd;
+
+ fd = memfd_create("hugetlb_tmp", MFD_HUGETLB);
+ if (fd < 0) {
+ ksft_perror(EPREFIX "could not open hugetlbfs file");
+ return -1;
+ }
+
+ memset(file_stat, 0, sizeof(*file_stat));
+ if (fstatfs(fd, file_stat)) {
+ ksft_perror(EPREFIX "fstatfs failed");
+ goto close;
+ }
+ if (file_stat->f_type != HUGETLBFS_MAGIC) {
+ ksft_print_msg(EPREFIX "not hugetlbfs file\n");
+ goto close;
+ }
+
+ return fd;
+close:
+ close(fd);
+ return -1;
+}
+
+static void test_soft_offline_common(int enable_soft_offline)
+{
+ int fd;
+ int expect_errno = enable_soft_offline ? 0 : EOPNOTSUPP;
+ struct statfs file_stat;
+ unsigned long hugepagesize_kb = 0;
+ unsigned long nr_hugepages_before = 0;
+ unsigned long nr_hugepages_after = 0;
+ int ret;
+
+ ksft_print_msg("Test soft-offline when enabled_soft_offline=%d\n",
+ enable_soft_offline);
+
+ fd = create_hugetlbfs_file(&file_stat);
+ if (fd < 0)
+ ksft_exit_fail_msg("Failed to create hugetlbfs file\n");
+
+ hugepagesize_kb = file_stat.f_bsize / 1024;
+ ksft_print_msg("Hugepagesize is %ldkB\n", hugepagesize_kb);
+
+ if (set_enable_soft_offline(enable_soft_offline) != 0) {
+ close(fd);
+ ksft_exit_fail_msg("Failed to set enable_soft_offline\n");
+ }
+
+ if (read_nr_hugepages(hugepagesize_kb, &nr_hugepages_before) != 0) {
+ close(fd);
+ ksft_exit_fail_msg("Failed to read nr_hugepages\n");
+ }
+
+ ksft_print_msg("Before MADV_SOFT_OFFLINE nr_hugepages=%ld\n",
+ nr_hugepages_before);
+
+ ret = do_soft_offline(fd, 2 * file_stat.f_bsize, expect_errno);
+
+ if (read_nr_hugepages(hugepagesize_kb, &nr_hugepages_after) != 0) {
+ close(fd);
+ ksft_exit_fail_msg("Failed to read nr_hugepages\n");
+ }
+
+ ksft_print_msg("After MADV_SOFT_OFFLINE nr_hugepages=%ld\n",
+ nr_hugepages_after);
+
+ // No need for the hugetlbfs file from now on.
+ close(fd);
+
+ if (enable_soft_offline) {
+ if (nr_hugepages_before != nr_hugepages_after + 1) {
+ ksft_test_result_fail("MADV_SOFT_OFFLINE should reduced 1 hugepage\n");
+ return;
+ }
+ } else {
+ if (nr_hugepages_before != nr_hugepages_after) {
+ ksft_test_result_fail("MADV_SOFT_OFFLINE reduced %lu hugepages\n",
+ nr_hugepages_before - nr_hugepages_after);
+ return;
+ }
+ }
+
+ ksft_test_result(ret == 0,
+ "Test soft-offline when enabled_soft_offline=%d\n",
+ enable_soft_offline);
+}
+
+int main(int argc, char **argv)
+{
+ ksft_print_header();
+ ksft_set_plan(2);
+
+ test_soft_offline_common(1);
+ test_soft_offline_common(0);
+
+ ksft_finished();
+}
diff --git a/tools/testing/selftests/mm/hugetlb_dio.c b/tools/testing/selftests/mm/hugetlb_dio.c
new file mode 100644
index 000000000000..f9ac20c657ec
--- /dev/null
+++ b/tools/testing/selftests/mm/hugetlb_dio.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This program tests for hugepage leaks after DIO writes to a file using a
+ * hugepage as the user buffer. During DIO, the user buffer is pinned and
+ * should be properly unpinned upon completion. This patch verifies that the
+ * kernel correctly unpins the buffer at DIO completion for both aligned and
+ * unaligned user buffer offsets (w.r.t page boundary), ensuring the hugepage
+ * is freed upon unmapping.
+ */
+
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <sys/stat.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <string.h>
+#include <sys/mman.h>
+#include "vm_util.h"
+#include "../kselftest.h"
+
+void run_dio_using_hugetlb(unsigned int start_off, unsigned int end_off)
+{
+ int fd;
+ char *buffer = NULL;
+ char *orig_buffer = NULL;
+ size_t h_pagesize = 0;
+ size_t writesize;
+ int free_hpage_b = 0;
+ int free_hpage_a = 0;
+ const int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB;
+ const int mmap_prot = PROT_READ | PROT_WRITE;
+
+ writesize = end_off - start_off;
+
+ /* Get the default huge page size */
+ h_pagesize = default_huge_page_size();
+ if (!h_pagesize)
+ ksft_exit_fail_msg("Unable to determine huge page size\n");
+
+ /* Open the file to DIO */
+ fd = open("/tmp", O_TMPFILE | O_RDWR | O_DIRECT, 0664);
+ if (fd < 0)
+ ksft_exit_fail_perror("Error opening file\n");
+
+ /* Get the free huge pages before allocation */
+ free_hpage_b = get_free_hugepages();
+ if (free_hpage_b == 0) {
+ close(fd);
+ ksft_exit_skip("No free hugepage, exiting!\n");
+ }
+
+ /* Allocate a hugetlb page */
+ orig_buffer = mmap(NULL, h_pagesize, mmap_prot, mmap_flags, -1, 0);
+ if (orig_buffer == MAP_FAILED) {
+ close(fd);
+ ksft_exit_fail_perror("Error mapping memory\n");
+ }
+ buffer = orig_buffer;
+ buffer += start_off;
+
+ memset(buffer, 'A', writesize);
+
+ /* Write the buffer to the file */
+ if (write(fd, buffer, writesize) != (writesize)) {
+ munmap(orig_buffer, h_pagesize);
+ close(fd);
+ ksft_exit_fail_perror("Error writing to file\n");
+ }
+
+ /* unmap the huge page */
+ munmap(orig_buffer, h_pagesize);
+ close(fd);
+
+ /* Get the free huge pages after unmap*/
+ free_hpage_a = get_free_hugepages();
+
+ /*
+ * If the no. of free hugepages before allocation and after unmap does
+ * not match - that means there could still be a page which is pinned.
+ */
+ if (free_hpage_a != free_hpage_b) {
+ ksft_print_msg("No. Free pages before allocation : %d\n", free_hpage_b);
+ ksft_print_msg("No. Free pages after munmap : %d\n", free_hpage_a);
+ ksft_test_result_fail(": Huge pages not freed!\n");
+ } else {
+ ksft_print_msg("No. Free pages before allocation : %d\n", free_hpage_b);
+ ksft_print_msg("No. Free pages after munmap : %d\n", free_hpage_a);
+ ksft_test_result_pass(": Huge pages freed successfully !\n");
+ }
+}
+
+int main(void)
+{
+ size_t pagesize = 0;
+
+ ksft_print_header();
+ ksft_set_plan(4);
+
+ /* Get base page size */
+ pagesize = psize();
+
+ /* start and end is aligned to pagesize */
+ run_dio_using_hugetlb(0, (pagesize * 3));
+
+ /* start is aligned but end is not aligned */
+ run_dio_using_hugetlb(0, (pagesize * 3) - (pagesize / 2));
+
+ /* start is unaligned and end is aligned */
+ run_dio_using_hugetlb(pagesize / 2, (pagesize * 3));
+
+ /* both start and end are unaligned */
+ run_dio_using_hugetlb(pagesize / 2, (pagesize * 3) + (pagesize / 2));
+
+ ksft_finished();
+}
diff --git a/tools/testing/selftests/mm/ksm_functional_tests.c b/tools/testing/selftests/mm/ksm_functional_tests.c
index b61803e36d1c..66b4e111b5a2 100644
--- a/tools/testing/selftests/mm/ksm_functional_tests.c
+++ b/tools/testing/selftests/mm/ksm_functional_tests.c
@@ -11,7 +11,7 @@
#include <string.h>
#include <stdbool.h>
#include <stdint.h>
-#include <unistd.h>
+#include <asm-generic/unistd.h>
#include <errno.h>
#include <fcntl.h>
#include <sys/mman.h>
@@ -369,7 +369,6 @@ unmap:
munmap(map, size);
}
-#ifdef __NR_userfaultfd
static void test_unmerge_uffd_wp(void)
{
struct uffdio_writeprotect uffd_writeprotect;
@@ -430,7 +429,6 @@ close_uffd:
unmap:
munmap(map, size);
}
-#endif
/* Verify that KSM can be enabled / queried with prctl. */
static void test_prctl(void)
@@ -686,9 +684,7 @@ int main(int argc, char **argv)
exit(test_child_ksm());
}
-#ifdef __NR_userfaultfd
tests++;
-#endif
ksft_print_header();
ksft_set_plan(tests);
@@ -700,9 +696,7 @@ int main(int argc, char **argv)
test_unmerge();
test_unmerge_zero_pages();
test_unmerge_discarded();
-#ifdef __NR_userfaultfd
test_unmerge_uffd_wp();
-#endif
test_prot_none();
diff --git a/tools/testing/selftests/mm/memfd_secret.c b/tools/testing/selftests/mm/memfd_secret.c
index 9a0597310a76..74c911aa3aea 100644
--- a/tools/testing/selftests/mm/memfd_secret.c
+++ b/tools/testing/selftests/mm/memfd_secret.c
@@ -17,7 +17,7 @@
#include <stdlib.h>
#include <string.h>
-#include <unistd.h>
+#include <asm-generic/unistd.h>
#include <errno.h>
#include <stdio.h>
#include <fcntl.h>
@@ -28,8 +28,6 @@
#define pass(fmt, ...) ksft_test_result_pass(fmt, ##__VA_ARGS__)
#define skip(fmt, ...) ksft_test_result_skip(fmt, ##__VA_ARGS__)
-#ifdef __NR_memfd_secret
-
#define PATTERN 0x55
static const int prot = PROT_READ | PROT_WRITE;
@@ -334,13 +332,3 @@ int main(int argc, char *argv[])
ksft_finished();
}
-
-#else /* __NR_memfd_secret */
-
-int main(int argc, char *argv[])
-{
- printf("skip: skipping memfd_secret test (missing __NR_memfd_secret)\n");
- return KSFT_SKIP;
-}
-
-#endif /* __NR_memfd_secret */
diff --git a/tools/testing/selftests/mm/mkdirty.c b/tools/testing/selftests/mm/mkdirty.c
index b8a7efe9204e..1db134063c38 100644
--- a/tools/testing/selftests/mm/mkdirty.c
+++ b/tools/testing/selftests/mm/mkdirty.c
@@ -9,7 +9,7 @@
*/
#include <fcntl.h>
#include <signal.h>
-#include <unistd.h>
+#include <asm-generic/unistd.h>
#include <string.h>
#include <errno.h>
#include <stdlib.h>
@@ -265,7 +265,6 @@ munmap:
munmap(mmap_mem, mmap_size);
}
-#ifdef __NR_userfaultfd
static void test_uffdio_copy(void)
{
struct uffdio_register uffdio_register;
@@ -322,7 +321,6 @@ munmap:
munmap(dst, pagesize);
free(src);
}
-#endif /* __NR_userfaultfd */
int main(void)
{
@@ -335,9 +333,7 @@ int main(void)
thpsize / 1024);
tests += 3;
}
-#ifdef __NR_userfaultfd
tests += 1;
-#endif /* __NR_userfaultfd */
ksft_print_header();
ksft_set_plan(tests);
@@ -367,9 +363,7 @@ int main(void)
if (thpsize)
test_pte_mapped_thp();
/* Placing a fresh page via userfaultfd may set the PTE dirty. */
-#ifdef __NR_userfaultfd
test_uffdio_copy();
-#endif /* __NR_userfaultfd */
err = ksft_get_fail_cnt();
if (err)
diff --git a/tools/testing/selftests/mm/mlock2.h b/tools/testing/selftests/mm/mlock2.h
index 4417eaa5cfb7..1e5731bab499 100644
--- a/tools/testing/selftests/mm/mlock2.h
+++ b/tools/testing/selftests/mm/mlock2.h
@@ -3,6 +3,7 @@
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
+#include <asm-generic/unistd.h>
static int mlock2_(void *start, size_t len, int flags)
{
diff --git a/tools/testing/selftests/mm/mseal_helpers.h b/tools/testing/selftests/mm/mseal_helpers.h
new file mode 100644
index 000000000000..0cfce31c76d2
--- /dev/null
+++ b/tools/testing/selftests/mm/mseal_helpers.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#define FAIL_TEST_IF_FALSE(test_passed) \
+ do { \
+ if (!(test_passed)) { \
+ ksft_test_result_fail("%s: line:%d\n", \
+ __func__, __LINE__); \
+ return; \
+ } \
+ } while (0)
+
+#define SKIP_TEST_IF_FALSE(test_passed) \
+ do { \
+ if (!(test_passed)) { \
+ ksft_test_result_skip("%s: line:%d\n", \
+ __func__, __LINE__); \
+ return; \
+ } \
+ } while (0)
+
+#define REPORT_TEST_PASS() ksft_test_result_pass("%s\n", __func__)
+
+#ifndef PKEY_DISABLE_ACCESS
+#define PKEY_DISABLE_ACCESS 0x1
+#endif
+
+#ifndef PKEY_DISABLE_WRITE
+#define PKEY_DISABLE_WRITE 0x2
+#endif
+
+#ifndef PKEY_BITS_PER_PKEY
+#define PKEY_BITS_PER_PKEY 2
+#endif
+
+#ifndef PKEY_MASK
+#define PKEY_MASK (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE)
+#endif
+
+#ifndef u64
+#define u64 unsigned long long
+#endif
diff --git a/tools/testing/selftests/mm/mseal_test.c b/tools/testing/selftests/mm/mseal_test.c
index 41998cf1dcf5..a818f010de47 100644
--- a/tools/testing/selftests/mm/mseal_test.c
+++ b/tools/testing/selftests/mm/mseal_test.c
@@ -3,7 +3,7 @@
#include <linux/mman.h>
#include <sys/mman.h>
#include <stdint.h>
-#include <unistd.h>
+#include <asm-generic/unistd.h>
#include <string.h>
#include <sys/time.h>
#include <sys/resource.h>
@@ -17,54 +17,7 @@
#include <sys/ioctl.h>
#include <sys/vfs.h>
#include <sys/stat.h>
-
-/*
- * need those definition for manually build using gcc.
- * gcc -I ../../../../usr/include -DDEBUG -O3 -DDEBUG -O3 mseal_test.c -o mseal_test
- */
-#ifndef PKEY_DISABLE_ACCESS
-# define PKEY_DISABLE_ACCESS 0x1
-#endif
-
-#ifndef PKEY_DISABLE_WRITE
-# define PKEY_DISABLE_WRITE 0x2
-#endif
-
-#ifndef PKEY_BITS_PER_PKEY
-#define PKEY_BITS_PER_PKEY 2
-#endif
-
-#ifndef PKEY_MASK
-#define PKEY_MASK (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE)
-#endif
-
-#define FAIL_TEST_IF_FALSE(c) do {\
- if (!(c)) {\
- ksft_test_result_fail("%s, line:%d\n", __func__, __LINE__);\
- goto test_end;\
- } \
- } \
- while (0)
-
-#define SKIP_TEST_IF_FALSE(c) do {\
- if (!(c)) {\
- ksft_test_result_skip("%s, line:%d\n", __func__, __LINE__);\
- goto test_end;\
- } \
- } \
- while (0)
-
-
-#define TEST_END_CHECK() {\
- ksft_test_result_pass("%s\n", __func__);\
- return;\
-test_end:\
- return;\
-}
-
-#ifndef u64
-#define u64 unsigned long long
-#endif
+#include "mseal_helpers.h"
static unsigned long get_vma_size(void *addr, int *prot)
{
@@ -287,7 +240,7 @@ static void test_seal_addseal(void)
ret = sys_mseal(ptr, size);
FAIL_TEST_IF_FALSE(!ret);
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_seal_unmapped_start(void)
@@ -315,7 +268,7 @@ static void test_seal_unmapped_start(void)
ret = sys_mseal(ptr + 2 * page_size, 2 * page_size);
FAIL_TEST_IF_FALSE(!ret);
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_seal_unmapped_middle(void)
@@ -347,7 +300,7 @@ static void test_seal_unmapped_middle(void)
ret = sys_mseal(ptr + 3 * page_size, page_size);
FAIL_TEST_IF_FALSE(!ret);
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_seal_unmapped_end(void)
@@ -376,7 +329,7 @@ static void test_seal_unmapped_end(void)
ret = sys_mseal(ptr, 2 * page_size);
FAIL_TEST_IF_FALSE(!ret);
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_seal_multiple_vmas(void)
@@ -407,7 +360,7 @@ static void test_seal_multiple_vmas(void)
ret = sys_mseal(ptr, size);
FAIL_TEST_IF_FALSE(!ret);
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_seal_split_start(void)
@@ -432,7 +385,7 @@ static void test_seal_split_start(void)
ret = sys_mseal(ptr + page_size, 3 * page_size);
FAIL_TEST_IF_FALSE(!ret);
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_seal_split_end(void)
@@ -457,7 +410,7 @@ static void test_seal_split_end(void)
ret = sys_mseal(ptr, 3 * page_size);
FAIL_TEST_IF_FALSE(!ret);
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_seal_invalid_input(void)
@@ -492,7 +445,7 @@ static void test_seal_invalid_input(void)
ret = sys_mseal(ptr - page_size, 5 * page_size);
FAIL_TEST_IF_FALSE(ret < 0);
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_seal_zero_length(void)
@@ -516,7 +469,7 @@ static void test_seal_zero_length(void)
ret = sys_mprotect(ptr, size, PROT_READ | PROT_WRITE);
FAIL_TEST_IF_FALSE(!ret);
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_seal_zero_address(void)
@@ -542,7 +495,7 @@ static void test_seal_zero_address(void)
ret = sys_mprotect(ptr, size, PROT_READ | PROT_WRITE);
FAIL_TEST_IF_FALSE(ret);
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_seal_twice(void)
@@ -562,7 +515,7 @@ static void test_seal_twice(void)
ret = sys_mseal(ptr, size);
FAIL_TEST_IF_FALSE(!ret);
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_seal_mprotect(bool seal)
@@ -586,7 +539,7 @@ static void test_seal_mprotect(bool seal)
else
FAIL_TEST_IF_FALSE(!ret);
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_seal_start_mprotect(bool seal)
@@ -616,7 +569,7 @@ static void test_seal_start_mprotect(bool seal)
PROT_READ | PROT_WRITE);
FAIL_TEST_IF_FALSE(!ret);
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_seal_end_mprotect(bool seal)
@@ -646,7 +599,7 @@ static void test_seal_end_mprotect(bool seal)
else
FAIL_TEST_IF_FALSE(!ret);
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_seal_mprotect_unalign_len(bool seal)
@@ -675,7 +628,7 @@ static void test_seal_mprotect_unalign_len(bool seal)
PROT_READ | PROT_WRITE);
FAIL_TEST_IF_FALSE(!ret);
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_seal_mprotect_unalign_len_variant_2(bool seal)
@@ -703,7 +656,7 @@ static void test_seal_mprotect_unalign_len_variant_2(bool seal)
PROT_READ | PROT_WRITE);
FAIL_TEST_IF_FALSE(!ret);
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_seal_mprotect_two_vma(bool seal)
@@ -738,7 +691,7 @@ static void test_seal_mprotect_two_vma(bool seal)
else
FAIL_TEST_IF_FALSE(!ret);
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_seal_mprotect_two_vma_with_split(bool seal)
@@ -785,7 +738,7 @@ static void test_seal_mprotect_two_vma_with_split(bool seal)
PROT_READ | PROT_WRITE);
FAIL_TEST_IF_FALSE(!ret);
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_seal_mprotect_partial_mprotect(bool seal)
@@ -811,7 +764,7 @@ static void test_seal_mprotect_partial_mprotect(bool seal)
else
FAIL_TEST_IF_FALSE(!ret);
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_seal_mprotect_two_vma_with_gap(bool seal)
@@ -854,7 +807,7 @@ static void test_seal_mprotect_two_vma_with_gap(bool seal)
ret = sys_mprotect(ptr + 3 * page_size, page_size, PROT_READ);
FAIL_TEST_IF_FALSE(ret == 0);
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_seal_mprotect_split(bool seal)
@@ -891,7 +844,7 @@ static void test_seal_mprotect_split(bool seal)
else
FAIL_TEST_IF_FALSE(!ret);
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_seal_mprotect_merge(bool seal)
@@ -925,7 +878,7 @@ static void test_seal_mprotect_merge(bool seal)
ret = sys_mprotect(ptr + 2 * page_size, 2 * page_size, PROT_READ);
FAIL_TEST_IF_FALSE(ret == 0);
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_seal_munmap(bool seal)
@@ -950,7 +903,7 @@ static void test_seal_munmap(bool seal)
else
FAIL_TEST_IF_FALSE(!ret);
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
/*
@@ -990,7 +943,7 @@ static void test_seal_munmap_two_vma(bool seal)
else
FAIL_TEST_IF_FALSE(!ret);
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
/*
@@ -1028,7 +981,7 @@ static void test_seal_munmap_vma_with_gap(bool seal)
ret = sys_munmap(ptr, size);
FAIL_TEST_IF_FALSE(!ret);
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_munmap_start_freed(bool seal)
@@ -1068,7 +1021,7 @@ static void test_munmap_start_freed(bool seal)
FAIL_TEST_IF_FALSE(size == 0);
}
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_munmap_end_freed(bool seal)
@@ -1098,7 +1051,7 @@ static void test_munmap_end_freed(bool seal)
else
FAIL_TEST_IF_FALSE(!ret);
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_munmap_middle_freed(bool seal)
@@ -1142,7 +1095,7 @@ static void test_munmap_middle_freed(bool seal)
FAIL_TEST_IF_FALSE(size == 0);
}
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_seal_mremap_shrink(bool seal)
@@ -1171,7 +1124,7 @@ static void test_seal_mremap_shrink(bool seal)
}
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_seal_mremap_expand(bool seal)
@@ -1203,7 +1156,7 @@ static void test_seal_mremap_expand(bool seal)
}
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_seal_mremap_move(bool seal)
@@ -1236,7 +1189,7 @@ static void test_seal_mremap_move(bool seal)
}
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_seal_mmap_overwrite_prot(bool seal)
@@ -1264,7 +1217,7 @@ static void test_seal_mmap_overwrite_prot(bool seal)
} else
FAIL_TEST_IF_FALSE(ret2 == ptr);
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_seal_mmap_expand(bool seal)
@@ -1295,7 +1248,7 @@ static void test_seal_mmap_expand(bool seal)
} else
FAIL_TEST_IF_FALSE(ret2 == ptr);
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_seal_mmap_shrink(bool seal)
@@ -1323,7 +1276,7 @@ static void test_seal_mmap_shrink(bool seal)
} else
FAIL_TEST_IF_FALSE(ret2 == ptr);
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_seal_mremap_shrink_fixed(bool seal)
@@ -1354,7 +1307,7 @@ static void test_seal_mremap_shrink_fixed(bool seal)
} else
FAIL_TEST_IF_FALSE(ret2 == newAddr);
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_seal_mremap_expand_fixed(bool seal)
@@ -1385,7 +1338,7 @@ static void test_seal_mremap_expand_fixed(bool seal)
} else
FAIL_TEST_IF_FALSE(ret2 == newAddr);
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_seal_mremap_move_fixed(bool seal)
@@ -1415,7 +1368,7 @@ static void test_seal_mremap_move_fixed(bool seal)
} else
FAIL_TEST_IF_FALSE(ret2 == newAddr);
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_seal_mremap_move_fixed_zero(bool seal)
@@ -1447,7 +1400,7 @@ static void test_seal_mremap_move_fixed_zero(bool seal)
}
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_seal_mremap_move_dontunmap(bool seal)
@@ -1476,7 +1429,7 @@ static void test_seal_mremap_move_dontunmap(bool seal)
}
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_seal_mremap_move_dontunmap_anyaddr(bool seal)
@@ -1510,7 +1463,7 @@ static void test_seal_mremap_move_dontunmap_anyaddr(bool seal)
}
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
@@ -1603,7 +1556,7 @@ static void test_seal_merge_and_split(void)
FAIL_TEST_IF_FALSE(size == 22 * page_size);
FAIL_TEST_IF_FALSE(prot == 0x4);
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_seal_discard_ro_anon_on_rw(bool seal)
@@ -1632,7 +1585,7 @@ static void test_seal_discard_ro_anon_on_rw(bool seal)
else
FAIL_TEST_IF_FALSE(!ret);
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_seal_discard_ro_anon_on_pkey(bool seal)
@@ -1679,7 +1632,7 @@ static void test_seal_discard_ro_anon_on_pkey(bool seal)
else
FAIL_TEST_IF_FALSE(!ret);
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_seal_discard_ro_anon_on_filebacked(bool seal)
@@ -1716,7 +1669,7 @@ static void test_seal_discard_ro_anon_on_filebacked(bool seal)
FAIL_TEST_IF_FALSE(!ret);
close(fd);
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_seal_discard_ro_anon_on_shared(bool seal)
@@ -1745,7 +1698,7 @@ static void test_seal_discard_ro_anon_on_shared(bool seal)
else
FAIL_TEST_IF_FALSE(!ret);
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
static void test_seal_discard_ro_anon(bool seal)
@@ -1775,7 +1728,7 @@ static void test_seal_discard_ro_anon(bool seal)
else
FAIL_TEST_IF_FALSE(!ret);
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
int main(int argc, char **argv)
diff --git a/tools/testing/selftests/mm/pagemap_ioctl.c b/tools/testing/selftests/mm/pagemap_ioctl.c
index 2d785aca72a5..fc90af2a97b8 100644
--- a/tools/testing/selftests/mm/pagemap_ioctl.c
+++ b/tools/testing/selftests/mm/pagemap_ioctl.c
@@ -15,7 +15,7 @@
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <math.h>
-#include <asm/unistd.h>
+#include <asm-generic/unistd.h>
#include <pthread.h>
#include <sys/resource.h>
#include <assert.h>
@@ -1567,8 +1567,10 @@ int main(int argc, char *argv[])
/* 7. File Hugetlb testing */
mem_size = 2*1024*1024;
fd = memfd_create("uffd-test", MFD_HUGETLB | MFD_NOEXEC_SEAL);
+ if (fd < 0)
+ ksft_exit_fail_msg("uffd-test creation failed %d %s\n", errno, strerror(errno));
mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
- if (mem) {
+ if (mem != MAP_FAILED) {
wp_init(mem, mem_size);
wp_addr_range(mem, mem_size);
diff --git a/tools/testing/selftests/mm/protection_keys.c b/tools/testing/selftests/mm/protection_keys.c
index 48dc151f8fca..eaa6d1fc5328 100644
--- a/tools/testing/selftests/mm/protection_keys.c
+++ b/tools/testing/selftests/mm/protection_keys.c
@@ -42,7 +42,7 @@
#include <sys/wait.h>
#include <sys/stat.h>
#include <fcntl.h>
-#include <unistd.h>
+#include <asm-generic/unistd.h>
#include <sys/ptrace.h>
#include <setjmp.h>
diff --git a/tools/testing/selftests/mm/run_vmtests.sh b/tools/testing/selftests/mm/run_vmtests.sh
index 3157204b9047..03ac4f2e1cce 100755
--- a/tools/testing/selftests/mm/run_vmtests.sh
+++ b/tools/testing/selftests/mm/run_vmtests.sh
@@ -265,6 +265,7 @@ CATEGORY="hugetlb" run_test ./map_hugetlb
CATEGORY="hugetlb" run_test ./hugepage-mremap
CATEGORY="hugetlb" run_test ./hugepage-vmemmap
CATEGORY="hugetlb" run_test ./hugetlb-madvise
+CATEGORY="hugetlb" run_test ./hugetlb_dio
nr_hugepages_tmp=$(cat /proc/sys/vm/nr_hugepages)
# For this test, we need one and just one huge page
@@ -331,6 +332,12 @@ CATEGORY="hugetlb" run_test ./thuge-gen
CATEGORY="hugetlb" run_test ./charge_reserved_hugetlb.sh -cgroup-v2
CATEGORY="hugetlb" run_test ./hugetlb_reparenting_test.sh -cgroup-v2
if $RUN_DESTRUCTIVE; then
+nr_hugepages_tmp=$(cat /proc/sys/vm/nr_hugepages)
+enable_soft_offline=$(cat /proc/sys/vm/enable_soft_offline)
+echo 8 > /proc/sys/vm/nr_hugepages
+CATEGORY="hugetlb" run_test ./hugetlb-soft-offline
+echo "$nr_hugepages_tmp" > /proc/sys/vm/nr_hugepages
+echo "$enable_soft_offline" > /proc/sys/vm/enable_soft_offline
CATEGORY="hugetlb" run_test ./hugetlb-read-hwpoison
fi
diff --git a/tools/testing/selftests/mm/seal_elf.c b/tools/testing/selftests/mm/seal_elf.c
index f2babec79bb6..7aa1366063e4 100644
--- a/tools/testing/selftests/mm/seal_elf.c
+++ b/tools/testing/selftests/mm/seal_elf.c
@@ -2,7 +2,7 @@
#define _GNU_SOURCE
#include <sys/mman.h>
#include <stdint.h>
-#include <unistd.h>
+#include <asm-generic/unistd.h>
#include <string.h>
#include <sys/time.h>
#include <sys/resource.h>
@@ -16,38 +16,7 @@
#include <sys/ioctl.h>
#include <sys/vfs.h>
#include <sys/stat.h>
-
-/*
- * need those definition for manually build using gcc.
- * gcc -I ../../../../usr/include -DDEBUG -O3 -DDEBUG -O3 seal_elf.c -o seal_elf
- */
-#define FAIL_TEST_IF_FALSE(c) do {\
- if (!(c)) {\
- ksft_test_result_fail("%s, line:%d\n", __func__, __LINE__);\
- goto test_end;\
- } \
- } \
- while (0)
-
-#define SKIP_TEST_IF_FALSE(c) do {\
- if (!(c)) {\
- ksft_test_result_skip("%s, line:%d\n", __func__, __LINE__);\
- goto test_end;\
- } \
- } \
- while (0)
-
-
-#define TEST_END_CHECK() {\
- ksft_test_result_pass("%s\n", __func__);\
- return;\
-test_end:\
- return;\
-}
-
-#ifndef u64
-#define u64 unsigned long long
-#endif
+#include "mseal_helpers.h"
/*
* define sys_xyx to call syscall directly.
@@ -158,7 +127,7 @@ static void test_seal_elf(void)
FAIL_TEST_IF_FALSE(ret < 0);
ksft_print_msg("somestr is sealed, mprotect is rejected\n");
- TEST_END_CHECK();
+ REPORT_TEST_PASS();
}
int main(int argc, char **argv)
diff --git a/tools/testing/selftests/mm/split_huge_page_test.c b/tools/testing/selftests/mm/split_huge_page_test.c
index d3c7f5fb3e7b..e5e8dafc9d94 100644
--- a/tools/testing/selftests/mm/split_huge_page_test.c
+++ b/tools/testing/selftests/mm/split_huge_page_test.c
@@ -300,7 +300,7 @@ int create_pagecache_thp_and_fd(const char *testfile, size_t fd_size, int *fd,
char **addr)
{
size_t i;
- int __attribute__((unused)) dummy = 0;
+ int dummy = 0;
srand(time(NULL));
@@ -341,6 +341,7 @@ int create_pagecache_thp_and_fd(const char *testfile, size_t fd_size, int *fd,
for (size_t i = 0; i < fd_size; i++)
dummy += *(*addr + i);
+ asm volatile("" : "+r" (dummy));
if (!check_huge_file(*addr, fd_size / pmd_pagesize, pmd_pagesize)) {
ksft_print_msg("No large pagecache folio generated, please provide a filesystem supporting large folio\n");
diff --git a/tools/testing/selftests/mm/thuge-gen.c b/tools/testing/selftests/mm/thuge-gen.c
index ea7fd8fe2876..e4370b79b62f 100644
--- a/tools/testing/selftests/mm/thuge-gen.c
+++ b/tools/testing/selftests/mm/thuge-gen.c
@@ -13,8 +13,9 @@
sudo ipcs | awk '$1 == "0x00000000" {print $2}' | xargs -n1 sudo ipcrm -m
(warning this will remove all if someone else uses them) */
-#define _GNU_SOURCE 1
+#define _GNU_SOURCE
#include <sys/mman.h>
+#include <linux/mman.h>
#include <stdlib.h>
#include <stdio.h>
#include <sys/ipc.h>
@@ -28,19 +29,23 @@
#include "vm_util.h"
#include "../kselftest.h"
-#define MAP_HUGE_2MB (21 << MAP_HUGE_SHIFT)
-#define MAP_HUGE_1GB (30 << MAP_HUGE_SHIFT)
-#define MAP_HUGE_SHIFT 26
-#define MAP_HUGE_MASK 0x3f
#if !defined(MAP_HUGETLB)
#define MAP_HUGETLB 0x40000
#endif
#define SHM_HUGETLB 04000 /* segment will use huge TLB pages */
+#ifndef SHM_HUGE_SHIFT
#define SHM_HUGE_SHIFT 26
+#endif
+#ifndef SHM_HUGE_MASK
#define SHM_HUGE_MASK 0x3f
+#endif
+#ifndef SHM_HUGE_2MB
#define SHM_HUGE_2MB (21 << SHM_HUGE_SHIFT)
+#endif
+#ifndef SHM_HUGE_1GB
#define SHM_HUGE_1GB (30 << SHM_HUGE_SHIFT)
+#endif
#define NUM_PAGESIZES 5
#define NUM_PAGES 4
diff --git a/tools/testing/selftests/mm/uffd-common.c b/tools/testing/selftests/mm/uffd-common.c
index 7ad6ba660c7d..717539eddf98 100644
--- a/tools/testing/selftests/mm/uffd-common.c
+++ b/tools/testing/selftests/mm/uffd-common.c
@@ -673,11 +673,7 @@ int uffd_open_dev(unsigned int flags)
int uffd_open_sys(unsigned int flags)
{
-#ifdef __NR_userfaultfd
return syscall(__NR_userfaultfd, flags);
-#else
- return -1;
-#endif
}
int uffd_open(unsigned int flags)
diff --git a/tools/testing/selftests/mm/uffd-stress.c b/tools/testing/selftests/mm/uffd-stress.c
index f78bab0f3d45..a4b83280998a 100644
--- a/tools/testing/selftests/mm/uffd-stress.c
+++ b/tools/testing/selftests/mm/uffd-stress.c
@@ -33,10 +33,10 @@
* pthread_mutex_lock will also verify the atomicity of the memory
* transfer (UFFDIO_COPY).
*/
-
+#include <asm-generic/unistd.h>
#include "uffd-common.h"
-#ifdef __NR_userfaultfd
+uint64_t features;
#define BOUNCE_RANDOM (1<<0)
#define BOUNCE_RACINGFAULTS (1<<1)
@@ -247,10 +247,14 @@ static int userfaultfd_stress(void)
unsigned long nr;
struct uffd_args args[nr_cpus];
uint64_t mem_size = nr_pages * page_size;
+ int flags = 0;
memset(args, 0, sizeof(struct uffd_args) * nr_cpus);
- if (uffd_test_ctx_init(UFFD_FEATURE_WP_UNPOPULATED, NULL))
+ if (features & UFFD_FEATURE_WP_UNPOPULATED && test_type == TEST_ANON)
+ flags = UFFD_FEATURE_WP_UNPOPULATED;
+
+ if (uffd_test_ctx_init(flags, NULL))
err("context init failed");
if (posix_memalign(&area, page_size, page_size))
@@ -385,8 +389,6 @@ static void set_test_type(const char *type)
static void parse_test_type_arg(const char *raw_type)
{
- uint64_t features = UFFD_API_FEATURES;
-
set_test_type(raw_type);
if (!test_type)
@@ -409,12 +411,15 @@ static void parse_test_type_arg(const char *raw_type)
* feature.
*/
- if (userfaultfd_open(&features))
- err("Userfaultfd open failed");
+ if (uffd_get_features(&features))
+ err("failed to get available features");
test_uffdio_wp = test_uffdio_wp &&
(features & UFFD_FEATURE_PAGEFAULT_FLAG_WP);
+ if (test_type != TEST_ANON && !(features & UFFD_FEATURE_WP_HUGETLBFS_SHMEM))
+ test_uffdio_wp = false;
+
close(uffd);
uffd = -1;
}
@@ -466,15 +471,3 @@ int main(int argc, char **argv)
nr_pages, nr_pages_per_cpu);
return userfaultfd_stress();
}
-
-#else /* __NR_userfaultfd */
-
-#warning "missing __NR_userfaultfd definition"
-
-int main(void)
-{
- printf("skip: Skipping userfaultfd test (missing __NR_userfaultfd)\n");
- return KSFT_SKIP;
-}
-
-#endif /* __NR_userfaultfd */
diff --git a/tools/testing/selftests/mm/uffd-unit-tests.c b/tools/testing/selftests/mm/uffd-unit-tests.c
index 21ec23206ab4..b3d21eed203d 100644
--- a/tools/testing/selftests/mm/uffd-unit-tests.c
+++ b/tools/testing/selftests/mm/uffd-unit-tests.c
@@ -5,12 +5,11 @@
* Copyright (C) 2015-2023 Red Hat, Inc.
*/
+#include <asm-generic/unistd.h>
#include "uffd-common.h"
#include "../../../../mm/gup_test.h"
-#ifdef __NR_userfaultfd
-
/* The unit test doesn't need a large or random size, make it 32MB for now */
#define UFFD_TEST_MEM_SIZE (32UL << 20)
@@ -1554,14 +1553,3 @@ int main(int argc, char *argv[])
return ksft_get_fail_cnt() ? KSFT_FAIL : KSFT_PASS;
}
-#else /* __NR_userfaultfd */
-
-#warning "missing __NR_userfaultfd definition"
-
-int main(void)
-{
- printf("Skipping %s (missing __NR_userfaultfd)\n", __file__);
- return KSFT_SKIP;
-}
-
-#endif /* __NR_userfaultfd */
diff --git a/tools/testing/selftests/mm/va_high_addr_switch.c b/tools/testing/selftests/mm/va_high_addr_switch.c
index cfbc501290d3..fa7eabfaf841 100644
--- a/tools/testing/selftests/mm/va_high_addr_switch.c
+++ b/tools/testing/selftests/mm/va_high_addr_switch.c
@@ -9,26 +9,9 @@
#include <sys/mman.h>
#include <string.h>
+#include "vm_util.h"
#include "../kselftest.h"
-#ifdef __powerpc64__
-#define PAGE_SIZE (64 << 10)
-/*
- * This will work with 16M and 2M hugepage size
- */
-#define HUGETLB_SIZE (16 << 20)
-#elif __aarch64__
-/*
- * The default hugepage size for 64k base pagesize
- * is 512MB.
- */
-#define PAGE_SIZE (64 << 10)
-#define HUGETLB_SIZE (512 << 20)
-#else
-#define PAGE_SIZE (4 << 10)
-#define HUGETLB_SIZE (2 << 20)
-#endif
-
/*
* The hint addr value is used to allocate addresses
* beyond the high address switch boundary.
@@ -37,18 +20,8 @@
#define ADDR_MARK_128TB (1UL << 47)
#define ADDR_MARK_256TB (1UL << 48)
-#define HIGH_ADDR_128TB ((void *) (1UL << 48))
-#define HIGH_ADDR_256TB ((void *) (1UL << 49))
-
-#define LOW_ADDR ((void *) (1UL << 30))
-
-#ifdef __aarch64__
-#define ADDR_SWITCH_HINT ADDR_MARK_256TB
-#define HIGH_ADDR HIGH_ADDR_256TB
-#else
-#define ADDR_SWITCH_HINT ADDR_MARK_128TB
-#define HIGH_ADDR HIGH_ADDR_128TB
-#endif
+#define HIGH_ADDR_128TB (1UL << 48)
+#define HIGH_ADDR_256TB (1UL << 49)
struct testcase {
void *addr;
@@ -59,195 +32,230 @@ struct testcase {
unsigned int keep_mapped:1;
};
-static struct testcase testcases[] = {
- {
- /*
- * If stack is moved, we could possibly allocate
- * this at the requested address.
- */
- .addr = ((void *)(ADDR_SWITCH_HINT - PAGE_SIZE)),
- .size = PAGE_SIZE,
- .flags = MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(ADDR_SWITCH_HINT - PAGE_SIZE, PAGE_SIZE)",
- .low_addr_required = 1,
- },
- {
- /*
- * Unless MAP_FIXED is specified, allocation based on hint
- * addr is never at requested address or above it, which is
- * beyond high address switch boundary in this case. Instead,
- * a suitable allocation is found in lower address space.
- */
- .addr = ((void *)(ADDR_SWITCH_HINT - PAGE_SIZE)),
- .size = 2 * PAGE_SIZE,
- .flags = MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(ADDR_SWITCH_HINT - PAGE_SIZE, (2 * PAGE_SIZE))",
- .low_addr_required = 1,
- },
- {
- /*
- * Exact mapping at high address switch boundary, should
- * be obtained even without MAP_FIXED as area is free.
- */
- .addr = ((void *)(ADDR_SWITCH_HINT)),
- .size = PAGE_SIZE,
- .flags = MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(ADDR_SWITCH_HINT, PAGE_SIZE)",
- .keep_mapped = 1,
- },
- {
- .addr = (void *)(ADDR_SWITCH_HINT),
- .size = 2 * PAGE_SIZE,
- .flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
- .msg = "mmap(ADDR_SWITCH_HINT, 2 * PAGE_SIZE, MAP_FIXED)",
- },
- {
- .addr = NULL,
- .size = 2 * PAGE_SIZE,
- .flags = MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(NULL)",
- .low_addr_required = 1,
- },
- {
- .addr = LOW_ADDR,
- .size = 2 * PAGE_SIZE,
- .flags = MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(LOW_ADDR)",
- .low_addr_required = 1,
- },
- {
- .addr = HIGH_ADDR,
- .size = 2 * PAGE_SIZE,
- .flags = MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(HIGH_ADDR)",
- .keep_mapped = 1,
- },
- {
- .addr = HIGH_ADDR,
- .size = 2 * PAGE_SIZE,
- .flags = MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(HIGH_ADDR) again",
- .keep_mapped = 1,
- },
- {
- .addr = HIGH_ADDR,
- .size = 2 * PAGE_SIZE,
- .flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
- .msg = "mmap(HIGH_ADDR, MAP_FIXED)",
- },
- {
- .addr = (void *) -1,
- .size = 2 * PAGE_SIZE,
- .flags = MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(-1)",
- .keep_mapped = 1,
- },
- {
- .addr = (void *) -1,
- .size = 2 * PAGE_SIZE,
- .flags = MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(-1) again",
- },
- {
- .addr = ((void *)(ADDR_SWITCH_HINT - PAGE_SIZE)),
- .size = PAGE_SIZE,
- .flags = MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(ADDR_SWITCH_HINT - PAGE_SIZE, PAGE_SIZE)",
- .low_addr_required = 1,
- },
- {
- .addr = (void *)(ADDR_SWITCH_HINT - PAGE_SIZE),
- .size = 2 * PAGE_SIZE,
- .flags = MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(ADDR_SWITCH_HINT - PAGE_SIZE, 2 * PAGE_SIZE)",
- .low_addr_required = 1,
- .keep_mapped = 1,
- },
- {
- .addr = (void *)(ADDR_SWITCH_HINT - PAGE_SIZE / 2),
- .size = 2 * PAGE_SIZE,
- .flags = MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(ADDR_SWITCH_HINT - PAGE_SIZE/2 , 2 * PAGE_SIZE)",
- .low_addr_required = 1,
- .keep_mapped = 1,
- },
- {
- .addr = ((void *)(ADDR_SWITCH_HINT)),
- .size = PAGE_SIZE,
- .flags = MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(ADDR_SWITCH_HINT, PAGE_SIZE)",
- },
- {
- .addr = (void *)(ADDR_SWITCH_HINT),
- .size = 2 * PAGE_SIZE,
- .flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
- .msg = "mmap(ADDR_SWITCH_HINT, 2 * PAGE_SIZE, MAP_FIXED)",
- },
-};
+static struct testcase *testcases;
+static struct testcase *hugetlb_testcases;
+static int sz_testcases, sz_hugetlb_testcases;
+static unsigned long switch_hint;
-static struct testcase hugetlb_testcases[] = {
- {
- .addr = NULL,
- .size = HUGETLB_SIZE,
- .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(NULL, MAP_HUGETLB)",
- .low_addr_required = 1,
- },
- {
- .addr = LOW_ADDR,
- .size = HUGETLB_SIZE,
- .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(LOW_ADDR, MAP_HUGETLB)",
- .low_addr_required = 1,
- },
- {
- .addr = HIGH_ADDR,
- .size = HUGETLB_SIZE,
- .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(HIGH_ADDR, MAP_HUGETLB)",
- .keep_mapped = 1,
- },
- {
- .addr = HIGH_ADDR,
- .size = HUGETLB_SIZE,
- .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(HIGH_ADDR, MAP_HUGETLB) again",
- .keep_mapped = 1,
- },
- {
- .addr = HIGH_ADDR,
- .size = HUGETLB_SIZE,
- .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
- .msg = "mmap(HIGH_ADDR, MAP_FIXED | MAP_HUGETLB)",
- },
- {
- .addr = (void *) -1,
- .size = HUGETLB_SIZE,
- .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(-1, MAP_HUGETLB)",
- .keep_mapped = 1,
- },
- {
- .addr = (void *) -1,
- .size = HUGETLB_SIZE,
- .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(-1, MAP_HUGETLB) again",
- },
- {
- .addr = (void *)(ADDR_SWITCH_HINT - PAGE_SIZE),
- .size = 2 * HUGETLB_SIZE,
- .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(ADDR_SWITCH_HINT - PAGE_SIZE, 2*HUGETLB_SIZE, MAP_HUGETLB)",
- .low_addr_required = 1,
- .keep_mapped = 1,
- },
- {
- .addr = (void *)(ADDR_SWITCH_HINT),
- .size = 2 * HUGETLB_SIZE,
- .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
- .msg = "mmap(ADDR_SWITCH_HINT , 2*HUGETLB_SIZE, MAP_FIXED | MAP_HUGETLB)",
- },
-};
+/* Initialize testcases inside a function to compute parameters at runtime */
+void testcases_init(void)
+{
+ unsigned long pagesize = getpagesize();
+ unsigned long hugepagesize = default_huge_page_size();
+ unsigned long low_addr = (1UL << 30);
+ unsigned long addr_switch_hint = ADDR_MARK_128TB;
+ unsigned long high_addr = HIGH_ADDR_128TB;
+
+#ifdef __aarch64__
+
+ /* Post LPA2, the lower userspace VA on a 16K pagesize is 47 bits. */
+ if (pagesize != (16UL << 10)) {
+ addr_switch_hint = ADDR_MARK_256TB;
+ high_addr = HIGH_ADDR_256TB;
+ }
+#endif
+
+ struct testcase t[] = {
+ {
+ /*
+ * If stack is moved, we could possibly allocate
+ * this at the requested address.
+ */
+ .addr = ((void *)(addr_switch_hint - pagesize)),
+ .size = pagesize,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(addr_switch_hint - pagesize, pagesize)",
+ .low_addr_required = 1,
+ },
+ {
+ /*
+ * Unless MAP_FIXED is specified, allocation based on hint
+ * addr is never at requested address or above it, which is
+ * beyond high address switch boundary in this case. Instead,
+ * a suitable allocation is found in lower address space.
+ */
+ .addr = ((void *)(addr_switch_hint - pagesize)),
+ .size = 2 * pagesize,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(addr_switch_hint - pagesize, (2 * pagesize))",
+ .low_addr_required = 1,
+ },
+ {
+ /*
+ * Exact mapping at high address switch boundary, should
+ * be obtained even without MAP_FIXED as area is free.
+ */
+ .addr = ((void *)(addr_switch_hint)),
+ .size = pagesize,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(addr_switch_hint, pagesize)",
+ .keep_mapped = 1,
+ },
+ {
+ .addr = (void *)(addr_switch_hint),
+ .size = 2 * pagesize,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+ .msg = "mmap(addr_switch_hint, 2 * pagesize, MAP_FIXED)",
+ },
+ {
+ .addr = NULL,
+ .size = 2 * pagesize,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(NULL)",
+ .low_addr_required = 1,
+ },
+ {
+ .addr = (void *)low_addr,
+ .size = 2 * pagesize,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(low_addr)",
+ .low_addr_required = 1,
+ },
+ {
+ .addr = (void *)high_addr,
+ .size = 2 * pagesize,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(high_addr)",
+ .keep_mapped = 1,
+ },
+ {
+ .addr = (void *)high_addr,
+ .size = 2 * pagesize,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(high_addr) again",
+ .keep_mapped = 1,
+ },
+ {
+ .addr = (void *)high_addr,
+ .size = 2 * pagesize,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+ .msg = "mmap(high_addr, MAP_FIXED)",
+ },
+ {
+ .addr = (void *) -1,
+ .size = 2 * pagesize,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(-1)",
+ .keep_mapped = 1,
+ },
+ {
+ .addr = (void *) -1,
+ .size = 2 * pagesize,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(-1) again",
+ },
+ {
+ .addr = ((void *)(addr_switch_hint - pagesize)),
+ .size = pagesize,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(addr_switch_hint - pagesize, pagesize)",
+ .low_addr_required = 1,
+ },
+ {
+ .addr = (void *)(addr_switch_hint - pagesize),
+ .size = 2 * pagesize,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(addr_switch_hint - pagesize, 2 * pagesize)",
+ .low_addr_required = 1,
+ .keep_mapped = 1,
+ },
+ {
+ .addr = (void *)(addr_switch_hint - pagesize / 2),
+ .size = 2 * pagesize,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(addr_switch_hint - pagesize/2 , 2 * pagesize)",
+ .low_addr_required = 1,
+ .keep_mapped = 1,
+ },
+ {
+ .addr = ((void *)(addr_switch_hint)),
+ .size = pagesize,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(addr_switch_hint, pagesize)",
+ },
+ {
+ .addr = (void *)(addr_switch_hint),
+ .size = 2 * pagesize,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+ .msg = "mmap(addr_switch_hint, 2 * pagesize, MAP_FIXED)",
+ },
+ };
+
+ struct testcase ht[] = {
+ {
+ .addr = NULL,
+ .size = hugepagesize,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(NULL, MAP_HUGETLB)",
+ .low_addr_required = 1,
+ },
+ {
+ .addr = (void *)low_addr,
+ .size = hugepagesize,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(low_addr, MAP_HUGETLB)",
+ .low_addr_required = 1,
+ },
+ {
+ .addr = (void *)high_addr,
+ .size = hugepagesize,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(high_addr, MAP_HUGETLB)",
+ .keep_mapped = 1,
+ },
+ {
+ .addr = (void *)high_addr,
+ .size = hugepagesize,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(high_addr, MAP_HUGETLB) again",
+ .keep_mapped = 1,
+ },
+ {
+ .addr = (void *)high_addr,
+ .size = hugepagesize,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+ .msg = "mmap(high_addr, MAP_FIXED | MAP_HUGETLB)",
+ },
+ {
+ .addr = (void *) -1,
+ .size = hugepagesize,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(-1, MAP_HUGETLB)",
+ .keep_mapped = 1,
+ },
+ {
+ .addr = (void *) -1,
+ .size = hugepagesize,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(-1, MAP_HUGETLB) again",
+ },
+ {
+ .addr = (void *)(addr_switch_hint - pagesize),
+ .size = 2 * hugepagesize,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(addr_switch_hint - pagesize, 2*hugepagesize, MAP_HUGETLB)",
+ .low_addr_required = 1,
+ .keep_mapped = 1,
+ },
+ {
+ .addr = (void *)(addr_switch_hint),
+ .size = 2 * hugepagesize,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+ .msg = "mmap(addr_switch_hint , 2*hugepagesize, MAP_FIXED | MAP_HUGETLB)",
+ },
+ };
+
+ testcases = malloc(sizeof(t));
+ hugetlb_testcases = malloc(sizeof(ht));
+
+ /* Copy into global arrays */
+ memcpy(testcases, t, sizeof(t));
+ memcpy(hugetlb_testcases, ht, sizeof(ht));
+
+ sz_testcases = ARRAY_SIZE(t);
+ sz_hugetlb_testcases = ARRAY_SIZE(ht);
+ switch_hint = addr_switch_hint;
+}
static int run_test(struct testcase *test, int count)
{
@@ -267,7 +275,7 @@ static int run_test(struct testcase *test, int count)
continue;
}
- if (t->low_addr_required && p >= (void *)(ADDR_SWITCH_HINT)) {
+ if (t->low_addr_required && p >= (void *)(switch_hint)) {
printf("FAILED\n");
ret = KSFT_FAIL;
} else {
@@ -292,7 +300,7 @@ static int supported_arch(void)
#elif defined(__x86_64__)
return 1;
#elif defined(__aarch64__)
- return getpagesize() == PAGE_SIZE;
+ return 1;
#else
return 0;
#endif
@@ -305,8 +313,10 @@ int main(int argc, char **argv)
if (!supported_arch())
return KSFT_SKIP;
- ret = run_test(testcases, ARRAY_SIZE(testcases));
+ testcases_init();
+
+ ret = run_test(testcases, sz_testcases);
if (argc == 2 && !strcmp(argv[1], "--run-hugetlb"))
- ret = run_test(hugetlb_testcases, ARRAY_SIZE(hugetlb_testcases));
+ ret = run_test(hugetlb_testcases, sz_hugetlb_testcases);
return ret;
}
diff --git a/tools/testing/selftests/mm/va_high_addr_switch.sh b/tools/testing/selftests/mm/va_high_addr_switch.sh
index a0a75f302904..2c725773cd79 100755
--- a/tools/testing/selftests/mm/va_high_addr_switch.sh
+++ b/tools/testing/selftests/mm/va_high_addr_switch.sh
@@ -57,8 +57,4 @@ check_test_requirements()
}
check_test_requirements
-./va_high_addr_switch
-
-# In order to run hugetlb testcases, "--run-hugetlb" must be appended
-# to the binary.
./va_high_addr_switch --run-hugetlb
diff --git a/tools/testing/selftests/mqueue/mq_perf_tests.c b/tools/testing/selftests/mqueue/mq_perf_tests.c
index 5c16159d0bcd..fb898850867c 100644
--- a/tools/testing/selftests/mqueue/mq_perf_tests.c
+++ b/tools/testing/selftests/mqueue/mq_perf_tests.c
@@ -323,7 +323,8 @@ void *fake_cont_thread(void *arg)
void *cont_thread(void *arg)
{
char buff[MSG_SIZE];
- int i, priority;
+ int i;
+ unsigned int priority;
for (i = 0; i < num_cpus_to_pin; i++)
if (cpu_threads[i] == pthread_self())
@@ -425,7 +426,8 @@ struct test test2[] = {
void *perf_test_thread(void *arg)
{
char buff[MSG_SIZE];
- int prio_out, prio_in;
+ int prio_out;
+ unsigned int prio_in;
int i;
clockid_t clock;
pthread_t *t;
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index d9393569d03a..8eaffd7a641c 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for net selftests
-CFLAGS = -Wall -Wl,--no-as-needed -O2 -g
+CFLAGS += -Wall -Wl,--no-as-needed -O2 -g
CFLAGS += -I../../../../usr/include/ $(KHDR_INCLUDES)
# Additional include paths needed by kselftest.h
CFLAGS += -I../
@@ -55,6 +55,7 @@ TEST_PROGS += bind_bhash.sh
TEST_PROGS += ip_local_port_range.sh
TEST_PROGS += rps_default_mask.sh
TEST_PROGS += big_tcp.sh
+TEST_PROGS += netns-sysctl.sh
TEST_PROGS_EXTENDED := toeplitz_client.sh toeplitz.sh
TEST_GEN_FILES = socket nettest
TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy reuseport_addr_any
diff --git a/tools/testing/selftests/net/amt.sh b/tools/testing/selftests/net/amt.sh
index 7e7ed6c558da..d458b45c775b 100755
--- a/tools/testing/selftests/net/amt.sh
+++ b/tools/testing/selftests/net/amt.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
# Author: Taehee Yoo <ap420073@gmail.com>
diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
index d4891f7a2bfa..5b9baf708950 100644
--- a/tools/testing/selftests/net/config
+++ b/tools/testing/selftests/net/config
@@ -26,7 +26,6 @@ CONFIG_INET_ESP=y
CONFIG_INET_ESP_OFFLOAD=y
CONFIG_NET_FOU=y
CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_IP_GRE=m
CONFIG_NETFILTER=y
CONFIG_NETFILTER_ADVANCED=y
CONFIG_NF_CONNTRACK=m
@@ -75,7 +74,12 @@ CONFIG_NET_SCH_ETF=m
CONFIG_NET_SCH_NETEM=y
CONFIG_NET_SCH_PRIO=m
CONFIG_NFT_COMPAT=m
+CONFIG_NF_CONNTRACK_OVS=y
CONFIG_NF_FLOW_TABLE=m
+CONFIG_OPENVSWITCH=m
+CONFIG_OPENVSWITCH_GENEVE=m
+CONFIG_OPENVSWITCH_GRE=m
+CONFIG_OPENVSWITCH_VXLAN=m
CONFIG_PSAMPLE=m
CONFIG_TCP_MD5SIG=y
CONFIG_TEST_BLACKHOLE_DEV=m
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
index 73895711cdf4..5f3c28fc8624 100755
--- a/tools/testing/selftests/net/fib_tests.sh
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -1737,53 +1737,53 @@ ipv4_rt_dsfield()
# DSCP 0x10 should match the specific route, no matter the ECN bits
$IP route get fibmatch 172.16.102.1 dsfield 0x10 | \
- grep -q "via 172.16.103.2"
+ grep -q "172.16.102.0/24 tos 0x10 via 172.16.103.2"
log_test $? 0 "IPv4 route with DSCP and ECN:Not-ECT"
$IP route get fibmatch 172.16.102.1 dsfield 0x11 | \
- grep -q "via 172.16.103.2"
+ grep -q "172.16.102.0/24 tos 0x10 via 172.16.103.2"
log_test $? 0 "IPv4 route with DSCP and ECN:ECT(1)"
$IP route get fibmatch 172.16.102.1 dsfield 0x12 | \
- grep -q "via 172.16.103.2"
+ grep -q "172.16.102.0/24 tos 0x10 via 172.16.103.2"
log_test $? 0 "IPv4 route with DSCP and ECN:ECT(0)"
$IP route get fibmatch 172.16.102.1 dsfield 0x13 | \
- grep -q "via 172.16.103.2"
+ grep -q "172.16.102.0/24 tos 0x10 via 172.16.103.2"
log_test $? 0 "IPv4 route with DSCP and ECN:CE"
# Unknown DSCP should match the generic route, no matter the ECN bits
$IP route get fibmatch 172.16.102.1 dsfield 0x14 | \
- grep -q "via 172.16.101.2"
+ grep -q "172.16.102.0/24 via 172.16.101.2"
log_test $? 0 "IPv4 route with unknown DSCP and ECN:Not-ECT"
$IP route get fibmatch 172.16.102.1 dsfield 0x15 | \
- grep -q "via 172.16.101.2"
+ grep -q "172.16.102.0/24 via 172.16.101.2"
log_test $? 0 "IPv4 route with unknown DSCP and ECN:ECT(1)"
$IP route get fibmatch 172.16.102.1 dsfield 0x16 | \
- grep -q "via 172.16.101.2"
+ grep -q "172.16.102.0/24 via 172.16.101.2"
log_test $? 0 "IPv4 route with unknown DSCP and ECN:ECT(0)"
$IP route get fibmatch 172.16.102.1 dsfield 0x17 | \
- grep -q "via 172.16.101.2"
+ grep -q "172.16.102.0/24 via 172.16.101.2"
log_test $? 0 "IPv4 route with unknown DSCP and ECN:CE"
# Null DSCP should match the generic route, no matter the ECN bits
$IP route get fibmatch 172.16.102.1 dsfield 0x00 | \
- grep -q "via 172.16.101.2"
+ grep -q "172.16.102.0/24 via 172.16.101.2"
log_test $? 0 "IPv4 route with no DSCP and ECN:Not-ECT"
$IP route get fibmatch 172.16.102.1 dsfield 0x01 | \
- grep -q "via 172.16.101.2"
+ grep -q "172.16.102.0/24 via 172.16.101.2"
log_test $? 0 "IPv4 route with no DSCP and ECN:ECT(1)"
$IP route get fibmatch 172.16.102.1 dsfield 0x02 | \
- grep -q "via 172.16.101.2"
+ grep -q "172.16.102.0/24 via 172.16.101.2"
log_test $? 0 "IPv4 route with no DSCP and ECN:ECT(0)"
$IP route get fibmatch 172.16.102.1 dsfield 0x03 | \
- grep -q "via 172.16.101.2"
+ grep -q "172.16.102.0/24 via 172.16.101.2"
log_test $? 0 "IPv4 route with no DSCP and ECN:CE"
}
diff --git a/tools/testing/selftests/net/forwarding/Makefile b/tools/testing/selftests/net/forwarding/Makefile
index fa7b59ff4029..224346426ef2 100644
--- a/tools/testing/selftests/net/forwarding/Makefile
+++ b/tools/testing/selftests/net/forwarding/Makefile
@@ -39,6 +39,7 @@ TEST_PROGS = bridge_fdb_learning_limit.sh \
ipip_hier_gre.sh \
lib_sh_test.sh \
local_termination.sh \
+ min_max_mtu.sh \
mirror_gre_bound.sh \
mirror_gre_bridge_1d.sh \
mirror_gre_bridge_1d_vlan.sh \
@@ -70,6 +71,7 @@ TEST_PROGS = bridge_fdb_learning_limit.sh \
router_broadcast.sh \
router_mpath_nh_res.sh \
router_mpath_nh.sh \
+ router_mpath_seed.sh \
router_multicast.sh \
router_multipath.sh \
router_nh.sh \
diff --git a/tools/testing/selftests/net/forwarding/devlink_lib.sh b/tools/testing/selftests/net/forwarding/devlink_lib.sh
index f1de525cfa55..62a05bca1e82 100644
--- a/tools/testing/selftests/net/forwarding/devlink_lib.sh
+++ b/tools/testing/selftests/net/forwarding/devlink_lib.sh
@@ -122,6 +122,8 @@ devlink_reload()
still_pending=$(devlink resource show "$DEVLINK_DEV" | \
grep -c "size_new")
check_err $still_pending "Failed reload - There are still unset sizes"
+
+ udevadm settle
}
declare -A DEVLINK_ORIG
diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
index eabbdf00d8ca..ff96bb7535ff 100644
--- a/tools/testing/selftests/net/forwarding/lib.sh
+++ b/tools/testing/selftests/net/forwarding/lib.sh
@@ -1134,12 +1134,19 @@ bridge_ageing_time_get()
}
declare -A SYSCTL_ORIG
+sysctl_save()
+{
+ local key=$1; shift
+
+ SYSCTL_ORIG[$key]=$(sysctl -n $key)
+}
+
sysctl_set()
{
local key=$1; shift
local value=$1; shift
- SYSCTL_ORIG[$key]=$(sysctl -n $key)
+ sysctl_save "$key"
sysctl -qw $key="$value"
}
@@ -1218,22 +1225,6 @@ trap_uninstall()
tc filter del dev $dev $direction pref 1 flower
}
-slow_path_trap_install()
-{
- # For slow-path testing, we need to install a trap to get to
- # slow path the packets that would otherwise be switched in HW.
- if [ "${tcflags/skip_hw}" != "$tcflags" ]; then
- trap_install "$@"
- fi
-}
-
-slow_path_trap_uninstall()
-{
- if [ "${tcflags/skip_hw}" != "$tcflags" ]; then
- trap_uninstall "$@"
- fi
-}
-
__icmp_capture_add_del()
{
local add_del=$1; shift
@@ -1250,22 +1241,34 @@ __icmp_capture_add_del()
icmp_capture_install()
{
- __icmp_capture_add_del add 100 "" "$@"
+ local tundev=$1; shift
+ local filter=$1; shift
+
+ __icmp_capture_add_del add 100 "" "$tundev" "$filter"
}
icmp_capture_uninstall()
{
- __icmp_capture_add_del del 100 "" "$@"
+ local tundev=$1; shift
+ local filter=$1; shift
+
+ __icmp_capture_add_del del 100 "" "$tundev" "$filter"
}
icmp6_capture_install()
{
- __icmp_capture_add_del add 100 v6 "$@"
+ local tundev=$1; shift
+ local filter=$1; shift
+
+ __icmp_capture_add_del add 100 v6 "$tundev" "$filter"
}
icmp6_capture_uninstall()
{
- __icmp_capture_add_del del 100 v6 "$@"
+ local tundev=$1; shift
+ local filter=$1; shift
+
+ __icmp_capture_add_del del 100 v6 "$tundev" "$filter"
}
__vlan_capture_add_del()
@@ -1283,12 +1286,18 @@ __vlan_capture_add_del()
vlan_capture_install()
{
- __vlan_capture_add_del add 100 "$@"
+ local dev=$1; shift
+ local filter=$1; shift
+
+ __vlan_capture_add_del add 100 "$dev" "$filter"
}
vlan_capture_uninstall()
{
- __vlan_capture_add_del del 100 "$@"
+ local dev=$1; shift
+ local filter=$1; shift
+
+ __vlan_capture_add_del del 100 "$dev" "$filter"
}
__dscp_capture_add_del()
@@ -1648,34 +1657,61 @@ __start_traffic()
local sip=$1; shift
local dip=$1; shift
local dmac=$1; shift
+ local -a mz_args=("$@")
$MZ $h_in -p $pktsize -A $sip -B $dip -c 0 \
- -a own -b $dmac -t "$proto" -q "$@" &
+ -a own -b $dmac -t "$proto" -q "${mz_args[@]}" &
sleep 1
}
start_traffic_pktsize()
{
local pktsize=$1; shift
+ local h_in=$1; shift
+ local sip=$1; shift
+ local dip=$1; shift
+ local dmac=$1; shift
+ local -a mz_args=("$@")
- __start_traffic $pktsize udp "$@"
+ __start_traffic $pktsize udp "$h_in" "$sip" "$dip" "$dmac" \
+ "${mz_args[@]}"
}
start_tcp_traffic_pktsize()
{
local pktsize=$1; shift
+ local h_in=$1; shift
+ local sip=$1; shift
+ local dip=$1; shift
+ local dmac=$1; shift
+ local -a mz_args=("$@")
- __start_traffic $pktsize tcp "$@"
+ __start_traffic $pktsize tcp "$h_in" "$sip" "$dip" "$dmac" \
+ "${mz_args[@]}"
}
start_traffic()
{
- start_traffic_pktsize 8000 "$@"
+ local h_in=$1; shift
+ local sip=$1; shift
+ local dip=$1; shift
+ local dmac=$1; shift
+ local -a mz_args=("$@")
+
+ start_traffic_pktsize 8000 "$h_in" "$sip" "$dip" "$dmac" \
+ "${mz_args[@]}"
}
start_tcp_traffic()
{
- start_tcp_traffic_pktsize 8000 "$@"
+ local h_in=$1; shift
+ local sip=$1; shift
+ local dip=$1; shift
+ local dmac=$1; shift
+ local -a mz_args=("$@")
+
+ start_tcp_traffic_pktsize 8000 "$h_in" "$sip" "$dip" "$dmac" \
+ "${mz_args[@]}"
}
stop_traffic()
diff --git a/tools/testing/selftests/net/forwarding/min_max_mtu.sh b/tools/testing/selftests/net/forwarding/min_max_mtu.sh
new file mode 100755
index 000000000000..97bb8b221bed
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/min_max_mtu.sh
@@ -0,0 +1,283 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# +--------------------+
+# | H1 |
+# | |
+# | $h1.10 + |
+# | 192.0.2.2/24 | |
+# | 2001:db8:1::2/64 | |
+# | | |
+# | $h1 + |
+# | | |
+# +------------------|-+
+# |
+# +------------------|-+
+# | SW | |
+# | $swp1 + |
+# | | |
+# | $swp1.10 + |
+# | 192.0.2.1/24 |
+# | 2001:db8:1::1/64 |
+# | |
+# +--------------------+
+
+ALL_TESTS="
+ ping_ipv4
+ ping_ipv6
+ max_mtu_config_test
+ max_mtu_traffic_test
+ min_mtu_config_test
+ min_mtu_traffic_test
+"
+
+NUM_NETIFS=2
+source lib.sh
+
+h1_create()
+{
+ simple_if_init $h1
+ vlan_create $h1 10 v$h1 192.0.2.2/24 2001:db8:1::2/64
+}
+
+h1_destroy()
+{
+ vlan_destroy $h1 10 192.0.2.2/24 2001:db8:1::2/64
+ simple_if_fini $h1
+}
+
+switch_create()
+{
+ ip li set dev $swp1 up
+ vlan_create $swp1 10 "" 192.0.2.1/24 2001:db8:1::1/64
+}
+
+switch_destroy()
+{
+ ip li set dev $swp1 down
+ vlan_destroy $swp1 10
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ vrf_prepare
+
+ h1_create
+
+ switch_create
+
+ forwarding_enable
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ forwarding_restore
+
+ switch_destroy
+
+ h1_destroy
+
+ vrf_cleanup
+}
+
+ping_ipv4()
+{
+ ping_test $h1.10 192.0.2.1
+}
+
+ping_ipv6()
+{
+ ping6_test $h1.10 2001:db8:1::1
+}
+
+min_max_mtu_get_if()
+{
+ local dev=$1; shift
+ local min_max=$1; shift
+
+ ip -d -j link show $dev | jq ".[].$min_max"
+}
+
+ensure_compatible_min_max_mtu()
+{
+ local min_max=$1; shift
+
+ local mtu=$(min_max_mtu_get_if ${NETIFS[p1]} $min_max)
+ local i
+
+ for ((i = 2; i <= NUM_NETIFS; ++i)); do
+ local current_mtu=$(min_max_mtu_get_if ${NETIFS[p$i]} $min_max)
+
+ if [ $current_mtu -ne $mtu ]; then
+ return 1
+ fi
+ done
+}
+
+mtu_set_if()
+{
+ local dev=$1; shift
+ local mtu=$1; shift
+ local should_fail=${1:-0}; shift
+
+ mtu_set $dev $mtu 2>/dev/null
+ check_err_fail $should_fail $? "Set MTU $mtu for $dev"
+}
+
+mtu_set_all_if()
+{
+ local mtu=$1; shift
+ local i
+
+ for ((i = 1; i <= NUM_NETIFS; ++i)); do
+ mtu_set_if ${NETIFS[p$i]} $mtu
+ mtu_set_if ${NETIFS[p$i]}.10 $mtu
+ done
+}
+
+mtu_restore_all_if()
+{
+ local i
+
+ for ((i = 1; i <= NUM_NETIFS; ++i)); do
+ mtu_restore ${NETIFS[p$i]}.10
+ mtu_restore ${NETIFS[p$i]}
+ done
+}
+
+mtu_test_ping4()
+{
+ local mtu=$1; shift
+ local should_fail=$1; shift
+
+ # Ping adds 8 bytes for ICMP header and 20 bytes for IP header
+ local ping_headers_len=$((20 + 8))
+ local pkt_size=$((mtu - ping_headers_len))
+
+ ping_do $h1.10 192.0.2.1 "-s $pkt_size -M do"
+ check_err_fail $should_fail $? "Ping, packet size: $pkt_size"
+}
+
+mtu_test_ping6()
+{
+ local mtu=$1; shift
+ local should_fail=$1; shift
+
+ # Ping adds 8 bytes for ICMP header and 40 bytes for IPv6 header
+ local ping6_headers_len=$((40 + 8))
+ local pkt_size=$((mtu - ping6_headers_len))
+
+ ping6_do $h1.10 2001:db8:1::1 "-s $pkt_size -M do"
+ check_err_fail $should_fail $? "Ping6, packet size: $pkt_size"
+}
+
+max_mtu_config_test()
+{
+ local i
+
+ RET=0
+
+ for ((i = 1; i <= NUM_NETIFS; ++i)); do
+ local dev=${NETIFS[p$i]}
+ local max_mtu=$(min_max_mtu_get_if $dev "max_mtu")
+ local should_fail
+
+ should_fail=0
+ mtu_set_if $dev $max_mtu $should_fail
+ mtu_restore $dev
+
+ should_fail=1
+ mtu_set_if $dev $((max_mtu + 1)) $should_fail
+ mtu_restore $dev
+ done
+
+ log_test "Test maximum MTU configuration"
+}
+
+max_mtu_traffic_test()
+{
+ local should_fail
+ local max_mtu
+
+ RET=0
+
+ if ! ensure_compatible_min_max_mtu "max_mtu"; then
+ log_test_xfail "Topology has incompatible maximum MTU values"
+ return
+ fi
+
+ max_mtu=$(min_max_mtu_get_if ${NETIFS[p1]} "max_mtu")
+
+ should_fail=0
+ mtu_set_all_if $max_mtu
+ mtu_test_ping4 $max_mtu $should_fail
+ mtu_test_ping6 $max_mtu $should_fail
+ mtu_restore_all_if
+
+ should_fail=1
+ mtu_set_all_if $((max_mtu - 1))
+ mtu_test_ping4 $max_mtu $should_fail
+ mtu_test_ping6 $max_mtu $should_fail
+ mtu_restore_all_if
+
+ log_test "Test traffic, packet size is maximum MTU"
+}
+
+min_mtu_config_test()
+{
+ local i
+
+ RET=0
+
+ for ((i = 1; i <= NUM_NETIFS; ++i)); do
+ local dev=${NETIFS[p$i]}
+ local min_mtu=$(min_max_mtu_get_if $dev "min_mtu")
+ local should_fail
+
+ should_fail=0
+ mtu_set_if $dev $min_mtu $should_fail
+ mtu_restore $dev
+
+ should_fail=1
+ mtu_set_if $dev $((min_mtu - 1)) $should_fail
+ mtu_restore $dev
+ done
+
+ log_test "Test minimum MTU configuration"
+}
+
+min_mtu_traffic_test()
+{
+ local should_fail=0
+ local min_mtu
+
+ RET=0
+
+ if ! ensure_compatible_min_max_mtu "min_mtu"; then
+ log_test_xfail "Topology has incompatible minimum MTU values"
+ return
+ fi
+
+ min_mtu=$(min_max_mtu_get_if ${NETIFS[p1]} "min_mtu")
+ mtu_set_all_if $min_mtu
+ mtu_test_ping4 $min_mtu $should_fail
+ # Do not test minimum MTU with IPv6, as IPv6 requires higher MTU.
+
+ mtu_restore_all_if
+
+ log_test "Test traffic, packet size is minimum MTU"
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre.sh b/tools/testing/selftests/net/forwarding/mirror_gre.sh
index 0266443601bc..921c733ee04f 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre.sh
@@ -74,7 +74,7 @@ test_span_gre_mac()
RET=0
- mirror_install $swp1 $direction $tundev "matchall $tcflags"
+ mirror_install $swp1 $direction $tundev "matchall"
icmp_capture_install h3-${tundev} "src_mac $src_mac dst_mac $dst_mac"
mirror_test v$h1 192.0.2.1 192.0.2.2 h3-${tundev} 100 10
@@ -82,29 +82,29 @@ test_span_gre_mac()
icmp_capture_uninstall h3-${tundev}
mirror_uninstall $swp1 $direction
- log_test "$direction $what: envelope MAC ($tcflags)"
+ log_test "$direction $what: envelope MAC"
}
test_two_spans()
{
RET=0
- mirror_install $swp1 ingress gt4 "matchall $tcflags"
- mirror_install $swp1 egress gt6 "matchall $tcflags"
- quick_test_span_gre_dir gt4 ingress
- quick_test_span_gre_dir gt6 egress
+ mirror_install $swp1 ingress gt4 "matchall"
+ mirror_install $swp1 egress gt6 "matchall"
+ quick_test_span_gre_dir gt4 8 0
+ quick_test_span_gre_dir gt6 0 8
mirror_uninstall $swp1 ingress
- fail_test_span_gre_dir gt4 ingress
- quick_test_span_gre_dir gt6 egress
+ fail_test_span_gre_dir gt4 8 0
+ quick_test_span_gre_dir gt6 0 8
- mirror_install $swp1 ingress gt4 "matchall $tcflags"
+ mirror_install $swp1 ingress gt4 "matchall"
mirror_uninstall $swp1 egress
- quick_test_span_gre_dir gt4 ingress
- fail_test_span_gre_dir gt6 egress
+ quick_test_span_gre_dir gt4 8 0
+ fail_test_span_gre_dir gt6 0 8
mirror_uninstall $swp1 ingress
- log_test "two simultaneously configured mirrors ($tcflags)"
+ log_test "two simultaneously configured mirrors"
}
test_gretap()
@@ -131,30 +131,11 @@ test_ip6gretap_mac()
test_span_gre_mac gt6 egress "mirror to ip6gretap"
}
-test_all()
-{
- slow_path_trap_install $swp1 ingress
- slow_path_trap_install $swp1 egress
-
- tests_run
-
- slow_path_trap_uninstall $swp1 egress
- slow_path_trap_uninstall $swp1 ingress
-}
-
trap cleanup EXIT
setup_prepare
setup_wait
-tcflags="skip_hw"
-test_all
-
-if ! tc_offload_check; then
- echo "WARN: Could not test offloaded functionality"
-else
- tcflags="skip_sw"
- test_all
-fi
+tests_run
exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bound.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bound.sh
index 6c257ec03756..e3cd48e18eeb 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_bound.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_bound.sh
@@ -196,32 +196,11 @@ test_ip6gretap()
full_test_span_gre_dir gt6 egress 0 8 "mirror to ip6gretap w/ UL"
}
-test_all()
-{
- RET=0
-
- slow_path_trap_install $swp1 ingress
- slow_path_trap_install $swp1 egress
-
- tests_run
-
- slow_path_trap_uninstall $swp1 egress
- slow_path_trap_uninstall $swp1 ingress
-}
-
trap cleanup EXIT
setup_prepare
setup_wait
-tcflags="skip_hw"
-test_all
-
-if ! tc_offload_check; then
- echo "WARN: Could not test offloaded functionality"
-else
- tcflags="skip_sw"
- test_all
-fi
+tests_run
exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh
index 04fd14b0a9b7..6c7bd33332c2 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh
@@ -108,30 +108,11 @@ test_ip6gretap()
full_test_span_gre_dir gt6 egress 0 8 "mirror to ip6gretap"
}
-test_all()
-{
- slow_path_trap_install $swp1 ingress
- slow_path_trap_install $swp1 egress
-
- tests_run
-
- slow_path_trap_uninstall $swp1 egress
- slow_path_trap_uninstall $swp1 ingress
-}
-
trap cleanup EXIT
setup_prepare
setup_wait
-tcflags="skip_hw"
-test_all
-
-if ! tc_offload_check; then
- echo "WARN: Could not test offloaded functionality"
-else
- tcflags="skip_sw"
- test_all
-fi
+tests_run
exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh
index f35313c76fac..909ec956a5e5 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh
@@ -104,30 +104,11 @@ test_ip6gretap_stp()
full_test_span_gre_stp gt6 $swp3.555 "mirror to ip6gretap"
}
-test_all()
-{
- slow_path_trap_install $swp1 ingress
- slow_path_trap_install $swp1 egress
-
- tests_run
-
- slow_path_trap_uninstall $swp1 egress
- slow_path_trap_uninstall $swp1 ingress
-}
-
trap cleanup EXIT
setup_prepare
setup_wait
-tcflags="skip_hw"
-test_all
-
-if ! tc_offload_check; then
- echo "WARN: Could not test offloaded functionality"
-else
- tcflags="skip_sw"
- test_all
-fi
+tests_run
exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh
index 0cf4c47a46f9..40ac9dd3aff1 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh
@@ -104,30 +104,11 @@ test_ip6gretap()
full_test_span_gre_dir gt6 egress 0 8 "mirror to ip6gretap"
}
-tests()
-{
- slow_path_trap_install $swp1 ingress
- slow_path_trap_install $swp1 egress
-
- tests_run
-
- slow_path_trap_uninstall $swp1 egress
- slow_path_trap_uninstall $swp1 ingress
-}
-
trap cleanup EXIT
setup_prepare
setup_wait
-tcflags="skip_hw"
-tests
-
-if ! tc_offload_check; then
- echo "WARN: Could not test offloaded functionality"
-else
- tcflags="skip_sw"
- tests
-fi
+tests_run
exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh
index c53148b1dc63..fe4d7c906a70 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh
@@ -227,10 +227,10 @@ test_lag_slave()
RET=0
tc filter add dev $swp1 ingress pref 999 \
- proto 802.1q flower vlan_ethtype arp $tcflags \
+ proto 802.1q flower vlan_ethtype arp \
action pass
mirror_install $swp1 ingress gt4 \
- "proto 802.1q flower vlan_id 333 $tcflags"
+ "proto 802.1q flower vlan_id 333"
# Test connectivity through $up_dev when $down_dev is set down.
ip link set dev $down_dev down
@@ -239,7 +239,7 @@ test_lag_slave()
setup_wait_dev $host_dev
$ARPING -I br1 192.0.2.130 -qfc 1
sleep 2
- mirror_test vrf-h1 192.0.2.1 192.0.2.18 $host_dev 1 10
+ mirror_test vrf-h1 192.0.2.1 192.0.2.18 $host_dev 1 ">= 10"
# Test lack of connectivity when both slaves are down.
ip link set dev $up_dev down
@@ -252,7 +252,7 @@ test_lag_slave()
mirror_uninstall $swp1 ingress
tc filter del dev $swp1 ingress pref 999
- log_test "$what ($tcflags)"
+ log_test "$what"
}
test_mirror_gretap_first()
@@ -265,30 +265,11 @@ test_mirror_gretap_second()
test_lag_slave $h4 $swp4 $swp3 "mirror to gretap: LAG second slave"
}
-test_all()
-{
- slow_path_trap_install $swp1 ingress
- slow_path_trap_install $swp1 egress
-
- tests_run
-
- slow_path_trap_uninstall $swp1 egress
- slow_path_trap_uninstall $swp1 ingress
-}
-
trap cleanup EXIT
setup_prepare
setup_wait
-tcflags="skip_hw"
-test_all
-
-if ! tc_offload_check; then
- echo "WARN: Could not test offloaded functionality"
-else
- tcflags="skip_sw"
- test_all
-fi
+tests_run
exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
index 5ea9d63915f7..65ae9d960c18 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
@@ -73,7 +73,7 @@ test_span_gre_ttl()
RET=0
mirror_install $swp1 ingress $tundev \
- "prot ip flower $tcflags ip_prot icmp"
+ "prot ip flower ip_prot icmp"
tc filter add dev $h3 ingress pref 77 prot $prot \
flower skip_hw ip_ttl 50 action pass
@@ -81,13 +81,13 @@ test_span_gre_ttl()
ip link set dev $tundev type $type ttl 50
sleep 2
- mirror_test v$h1 192.0.2.1 192.0.2.2 $h3 77 10
+ mirror_test v$h1 192.0.2.1 192.0.2.2 $h3 77 ">= 10"
ip link set dev $tundev type $type ttl 100
tc filter del dev $h3 ingress pref 77
mirror_uninstall $swp1 ingress
- log_test "$what: TTL change ($tcflags)"
+ log_test "$what: TTL change"
}
test_span_gre_tun_up()
@@ -98,15 +98,15 @@ test_span_gre_tun_up()
RET=0
ip link set dev $tundev down
- mirror_install $swp1 ingress $tundev "matchall $tcflags"
- fail_test_span_gre_dir $tundev ingress
+ mirror_install $swp1 ingress $tundev "matchall"
+ fail_test_span_gre_dir $tundev
ip link set dev $tundev up
- quick_test_span_gre_dir $tundev ingress
+ quick_test_span_gre_dir $tundev
mirror_uninstall $swp1 ingress
- log_test "$what: tunnel down/up ($tcflags)"
+ log_test "$what: tunnel down/up"
}
test_span_gre_egress_up()
@@ -118,8 +118,8 @@ test_span_gre_egress_up()
RET=0
ip link set dev $swp3 down
- mirror_install $swp1 ingress $tundev "matchall $tcflags"
- fail_test_span_gre_dir $tundev ingress
+ mirror_install $swp1 ingress $tundev "matchall"
+ fail_test_span_gre_dir $tundev
# After setting the device up, wait for neighbor to get resolved so that
# we can expect mirroring to work.
@@ -127,10 +127,10 @@ test_span_gre_egress_up()
setup_wait_dev $swp3
ping -c 1 -I $swp3 $remote_ip &>/dev/null
- quick_test_span_gre_dir $tundev ingress
+ quick_test_span_gre_dir $tundev
mirror_uninstall $swp1 ingress
- log_test "$what: egress down/up ($tcflags)"
+ log_test "$what: egress down/up"
}
test_span_gre_remote_ip()
@@ -144,14 +144,14 @@ test_span_gre_remote_ip()
RET=0
ip link set dev $tundev type $type remote $wrong_ip
- mirror_install $swp1 ingress $tundev "matchall $tcflags"
- fail_test_span_gre_dir $tundev ingress
+ mirror_install $swp1 ingress $tundev "matchall"
+ fail_test_span_gre_dir $tundev
ip link set dev $tundev type $type remote $correct_ip
- quick_test_span_gre_dir $tundev ingress
+ quick_test_span_gre_dir $tundev
mirror_uninstall $swp1 ingress
- log_test "$what: remote address change ($tcflags)"
+ log_test "$what: remote address change"
}
test_span_gre_tun_del()
@@ -165,10 +165,10 @@ test_span_gre_tun_del()
RET=0
- mirror_install $swp1 ingress $tundev "matchall $tcflags"
- quick_test_span_gre_dir $tundev ingress
+ mirror_install $swp1 ingress $tundev "matchall"
+ quick_test_span_gre_dir $tundev
ip link del dev $tundev
- fail_test_span_gre_dir $tundev ingress
+ fail_test_span_gre_dir $tundev
tunnel_create $tundev $type $local_ip $remote_ip \
ttl 100 tos inherit $flags
@@ -176,11 +176,11 @@ test_span_gre_tun_del()
# Recreating the tunnel doesn't reestablish mirroring, so reinstall it
# and verify it works for the follow-up tests.
mirror_uninstall $swp1 ingress
- mirror_install $swp1 ingress $tundev "matchall $tcflags"
- quick_test_span_gre_dir $tundev ingress
+ mirror_install $swp1 ingress $tundev "matchall"
+ quick_test_span_gre_dir $tundev
mirror_uninstall $swp1 ingress
- log_test "$what: tunnel deleted ($tcflags)"
+ log_test "$what: tunnel deleted"
}
test_span_gre_route_del()
@@ -192,18 +192,18 @@ test_span_gre_route_del()
RET=0
- mirror_install $swp1 ingress $tundev "matchall $tcflags"
- quick_test_span_gre_dir $tundev ingress
+ mirror_install $swp1 ingress $tundev "matchall"
+ quick_test_span_gre_dir $tundev
ip route del $route dev $edev
- fail_test_span_gre_dir $tundev ingress
+ fail_test_span_gre_dir $tundev
ip route add $route dev $edev
- quick_test_span_gre_dir $tundev ingress
+ quick_test_span_gre_dir $tundev
mirror_uninstall $swp1 ingress
- log_test "$what: underlay route removal ($tcflags)"
+ log_test "$what: underlay route removal"
}
test_ttl()
@@ -244,30 +244,11 @@ test_route_del()
test_span_gre_route_del gt6 $swp3 2001:db8:2::/64 "mirror to ip6gretap"
}
-test_all()
-{
- slow_path_trap_install $swp1 ingress
- slow_path_trap_install $swp1 egress
-
- tests_run
-
- slow_path_trap_uninstall $swp1 egress
- slow_path_trap_uninstall $swp1 ingress
-}
-
trap cleanup EXIT
setup_prepare
setup_wait
-tcflags="skip_hw"
-test_all
-
-if ! tc_offload_check; then
- echo "WARN: Could not test offloaded functionality"
-else
- tcflags="skip_sw"
- test_all
-fi
+tests_run
exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_flower.sh b/tools/testing/selftests/net/forwarding/mirror_gre_flower.sh
index 09389f3b9369..3a84f3ab5856 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_flower.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_flower.sh
@@ -64,12 +64,19 @@ cleanup()
test_span_gre_dir_acl()
{
- test_span_gre_dir_ips "$@" 192.0.2.3 192.0.2.4
+ local tundev=$1; shift
+ local forward_type=$1; shift
+ local backward_type=$1; shift
+
+ test_span_gre_dir_ips "$tundev" "$forward_type" \
+ "$backward_type" 192.0.2.3 192.0.2.4
}
fail_test_span_gre_dir_acl()
{
- fail_test_span_gre_dir_ips "$@" 192.0.2.3 192.0.2.4
+ local tundev=$1; shift
+
+ fail_test_span_gre_dir_ips "$tundev" 192.0.2.3 192.0.2.4
}
full_test_span_gre_dir_acl()
@@ -84,16 +91,15 @@ full_test_span_gre_dir_acl()
RET=0
mirror_install $swp1 $direction $tundev \
- "protocol ip flower $tcflags dst_ip $match_dip"
- fail_test_span_gre_dir $tundev $direction
- test_span_gre_dir_acl "$tundev" "$direction" \
- "$forward_type" "$backward_type"
+ "protocol ip flower dst_ip $match_dip"
+ fail_test_span_gre_dir $tundev
+ test_span_gre_dir_acl "$tundev" "$forward_type" "$backward_type"
mirror_uninstall $swp1 $direction
# Test lack of mirroring after ACL mirror is uninstalled.
- fail_test_span_gre_dir_acl "$tundev" "$direction"
+ fail_test_span_gre_dir_acl "$tundev"
- log_test "$direction $what ($tcflags)"
+ log_test "$direction $what"
}
test_gretap()
@@ -108,30 +114,11 @@ test_ip6gretap()
full_test_span_gre_dir_acl gt6 egress 0 8 192.0.2.3 "ACL mirror to ip6gretap"
}
-test_all()
-{
- slow_path_trap_install $swp1 ingress
- slow_path_trap_install $swp1 egress
-
- tests_run
-
- slow_path_trap_uninstall $swp1 egress
- slow_path_trap_uninstall $swp1 ingress
-}
-
trap cleanup EXIT
setup_prepare
setup_wait
-tcflags="skip_hw"
-test_all
-
-if ! tc_offload_check; then
- echo "WARN: Could not test offloaded functionality"
-else
- tcflags="skip_sw"
- test_all
-fi
+tests_run
exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_lag_lacp.sh b/tools/testing/selftests/net/forwarding/mirror_gre_lag_lacp.sh
index 9edf4cb104a8..1261e6f46e34 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_lag_lacp.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_lag_lacp.sh
@@ -37,8 +37,14 @@
# | \ / |
# | \____________________________________________/ |
# | | |
-# | + lag2 (team) |
-# | 192.0.2.130/28 |
+# | + lag2 (team) ------> + gt4-dst (gretap) |
+# | 192.0.2.130/28 loc=192.0.2.130 |
+# | rem=192.0.2.129 |
+# | ttl=100 |
+# | tos=inherit |
+# | |
+# | |
+# | |
# | |
# +---------------------------------------------------------------------------+
@@ -50,9 +56,6 @@ ALL_TESTS="
NUM_NETIFS=6
source lib.sh
source mirror_lib.sh
-source mirror_gre_lib.sh
-
-require_command $ARPING
vlan_host_create()
{
@@ -122,16 +125,21 @@ h3_create()
{
vrf_create vrf-h3
ip link set dev vrf-h3 up
- tc qdisc add dev $h3 clsact
- tc qdisc add dev $h4 clsact
h3_create_team
+
+ tunnel_create gt4-dst gretap 192.0.2.130 192.0.2.129 \
+ ttl 100 tos inherit
+ ip link set dev gt4-dst master vrf-h3
+ tc qdisc add dev gt4-dst clsact
}
h3_destroy()
{
+ tc qdisc del dev gt4-dst clsact
+ ip link set dev gt4-dst nomaster
+ tunnel_destroy gt4-dst
+
h3_destroy_team
- tc qdisc del dev $h4 clsact
- tc qdisc del dev $h3 clsact
ip link set dev vrf-h3 down
vrf_destroy vrf-h3
}
@@ -188,18 +196,12 @@ setup_prepare()
h2_create
h3_create
switch_create
-
- trap_install $h3 ingress
- trap_install $h4 ingress
}
cleanup()
{
pre_cleanup
- trap_uninstall $h4 ingress
- trap_uninstall $h3 ingress
-
switch_destroy
h3_destroy
h2_destroy
@@ -218,7 +220,8 @@ test_lag_slave()
RET=0
mirror_install $swp1 ingress gt4 \
- "proto 802.1q flower vlan_id 333 $tcflags"
+ "proto 802.1q flower vlan_id 333"
+ vlan_capture_install gt4-dst "vlan_ethtype ipv4 ip_proto icmp type 8"
# Move $down_dev away from the team. That will prompt change in
# txability of the connected device, without changing its upness. The
@@ -226,13 +229,14 @@ test_lag_slave()
# other slave.
ip link set dev $down_dev nomaster
sleep 2
- mirror_test vrf-h1 192.0.2.1 192.0.2.18 $up_dev 1 10
+ mirror_test vrf-h1 192.0.2.1 192.0.2.18 gt4-dst 100 10
# Test lack of connectivity when neither slave is txable.
ip link set dev $up_dev nomaster
sleep 2
- mirror_test vrf-h1 192.0.2.1 192.0.2.18 $h3 1 0
- mirror_test vrf-h1 192.0.2.1 192.0.2.18 $h4 1 0
+ mirror_test vrf-h1 192.0.2.1 192.0.2.18 gt4-dst 100 0
+
+ vlan_capture_uninstall gt4-dst
mirror_uninstall $swp1 ingress
# Recreate H3's team device, because mlxsw, which this test is
@@ -243,7 +247,7 @@ test_lag_slave()
# Wait for ${h,swp}{3,4}.
setup_wait
- log_test "$what ($tcflags)"
+ log_test "$what"
}
test_mirror_gretap_first()
@@ -256,30 +260,11 @@ test_mirror_gretap_second()
test_lag_slave $h4 $h3 "mirror to gretap: LAG second slave"
}
-test_all()
-{
- slow_path_trap_install $swp1 ingress
- slow_path_trap_install $swp1 egress
-
- tests_run
-
- slow_path_trap_uninstall $swp1 egress
- slow_path_trap_uninstall $swp1 ingress
-}
-
trap cleanup EXIT
setup_prepare
setup_wait
-tcflags="skip_hw"
-test_all
-
-if ! tc_offload_check; then
- echo "WARN: Could not test offloaded functionality"
-else
- tcflags="skip_sw"
- test_all
-fi
+tests_run
exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh b/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh
index 0c36546e131e..20078cc55f24 100644
--- a/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh
@@ -5,22 +5,34 @@ source "$net_forwarding_dir/mirror_lib.sh"
quick_test_span_gre_dir_ips()
{
local tundev=$1; shift
+ local ip1=$1; shift
+ local ip2=$1; shift
+ local forward_type=$1; shift
+ local backward_type=$1; shift
- do_test_span_dir_ips 10 h3-$tundev "$@"
+ do_test_span_dir_ips 10 h3-$tundev "$ip1" "$ip2" \
+ "$forward_type" "$backward_type"
}
fail_test_span_gre_dir_ips()
{
local tundev=$1; shift
+ local ip1=$1; shift
+ local ip2=$1; shift
- do_test_span_dir_ips 0 h3-$tundev "$@"
+ do_test_span_dir_ips 0 h3-$tundev "$ip1" "$ip2"
}
test_span_gre_dir_ips()
{
local tundev=$1; shift
+ local forward_type=$1; shift
+ local backward_type=$1; shift
+ local ip1=$1; shift
+ local ip2=$1; shift
- test_span_dir_ips h3-$tundev "$@"
+ test_span_dir_ips h3-$tundev "$forward_type" \
+ "$backward_type" "$ip1" "$ip2"
}
full_test_span_gre_dir_ips()
@@ -35,12 +47,12 @@ full_test_span_gre_dir_ips()
RET=0
- mirror_install $swp1 $direction $tundev "matchall $tcflags"
- test_span_dir_ips "h3-$tundev" "$direction" "$forward_type" \
+ mirror_install $swp1 $direction $tundev "matchall"
+ test_span_dir_ips "h3-$tundev" "$forward_type" \
"$backward_type" "$ip1" "$ip2"
mirror_uninstall $swp1 $direction
- log_test "$direction $what ($tcflags)"
+ log_test "$direction $what"
}
full_test_span_gre_dir_vlan_ips()
@@ -56,45 +68,63 @@ full_test_span_gre_dir_vlan_ips()
RET=0
- mirror_install $swp1 $direction $tundev "matchall $tcflags"
+ mirror_install $swp1 $direction $tundev "matchall"
- test_span_dir_ips "h3-$tundev" "$direction" "$forward_type" \
+ test_span_dir_ips "h3-$tundev" "$forward_type" \
"$backward_type" "$ip1" "$ip2"
tc filter add dev $h3 ingress pref 77 prot 802.1q \
flower $vlan_match \
action pass
- mirror_test v$h1 $ip1 $ip2 $h3 77 10
+ mirror_test v$h1 $ip1 $ip2 $h3 77 '>= 10'
tc filter del dev $h3 ingress pref 77
mirror_uninstall $swp1 $direction
- log_test "$direction $what ($tcflags)"
+ log_test "$direction $what"
}
quick_test_span_gre_dir()
{
- quick_test_span_gre_dir_ips "$@" 192.0.2.1 192.0.2.2
+ local tundev=$1; shift
+ local forward_type=${1-8}; shift
+ local backward_type=${1-0}; shift
+
+ quick_test_span_gre_dir_ips "$tundev" 192.0.2.1 192.0.2.2 \
+ "$forward_type" "$backward_type"
}
fail_test_span_gre_dir()
{
- fail_test_span_gre_dir_ips "$@" 192.0.2.1 192.0.2.2
-}
+ local tundev=$1; shift
-test_span_gre_dir()
-{
- test_span_gre_dir_ips "$@" 192.0.2.1 192.0.2.2
+ fail_test_span_gre_dir_ips "$tundev" 192.0.2.1 192.0.2.2
}
full_test_span_gre_dir()
{
- full_test_span_gre_dir_ips "$@" 192.0.2.1 192.0.2.2
+ local tundev=$1; shift
+ local direction=$1; shift
+ local forward_type=$1; shift
+ local backward_type=$1; shift
+ local what=$1; shift
+
+ full_test_span_gre_dir_ips "$tundev" "$direction" "$forward_type" \
+ "$backward_type" "$what" 192.0.2.1 192.0.2.2
}
full_test_span_gre_dir_vlan()
{
- full_test_span_gre_dir_vlan_ips "$@" 192.0.2.1 192.0.2.2
+ local tundev=$1; shift
+ local direction=$1; shift
+ local vlan_match=$1; shift
+ local forward_type=$1; shift
+ local backward_type=$1; shift
+ local what=$1; shift
+
+ full_test_span_gre_dir_vlan_ips "$tundev" "$direction" "$vlan_match" \
+ "$forward_type" "$backward_type" \
+ "$what" 192.0.2.1 192.0.2.2
}
full_test_span_gre_stp_ips()
@@ -104,27 +134,39 @@ full_test_span_gre_stp_ips()
local what=$1; shift
local ip1=$1; shift
local ip2=$1; shift
+ local forward_type=$1; shift
+ local backward_type=$1; shift
local h3mac=$(mac_get $h3)
RET=0
- mirror_install $swp1 ingress $tundev "matchall $tcflags"
- quick_test_span_gre_dir_ips $tundev ingress $ip1 $ip2
+ mirror_install $swp1 ingress $tundev "matchall"
+ quick_test_span_gre_dir_ips $tundev $ip1 $ip2 \
+ "$forward_type" "$backward_type"
bridge link set dev $nbpdev state disabled
sleep 1
- fail_test_span_gre_dir_ips $tundev ingress $ip1 $ip2
+ fail_test_span_gre_dir_ips $tundev $ip1 $ip2
bridge link set dev $nbpdev state forwarding
sleep 1
- quick_test_span_gre_dir_ips $tundev ingress $ip1 $ip2
+ quick_test_span_gre_dir_ips $tundev $ip1 $ip2 \
+ "$forward_type" "$backward_type"
mirror_uninstall $swp1 ingress
- log_test "$what: STP state ($tcflags)"
+ log_test "$what: STP state"
}
full_test_span_gre_stp()
{
- full_test_span_gre_stp_ips "$@" 192.0.2.1 192.0.2.2
+ local tundev=$1; shift
+ local nbpdev=$1; shift
+ local what=$1; shift
+ local forward_type=${1-8}; shift
+ local backward_type=${1-0}; shift
+
+ full_test_span_gre_stp_ips "$tundev" "$nbpdev" "$what" \
+ 192.0.2.1 192.0.2.2 \
+ "$forward_type" "$backward_type"
}
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_neigh.sh b/tools/testing/selftests/net/forwarding/mirror_gre_neigh.sh
index fc0508e40fca..2cbfbecf25c8 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_neigh.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_neigh.sh
@@ -60,41 +60,32 @@ test_span_gre_neigh()
local addr=$1; shift
local tundev=$1; shift
local direction=$1; shift
+ local forward_type=$1; shift
+ local backward_type=$1; shift
local what=$1; shift
RET=0
ip neigh replace dev $swp3 $addr lladdr 00:11:22:33:44:55
- mirror_install $swp1 $direction $tundev "matchall $tcflags"
- fail_test_span_gre_dir $tundev ingress
+ mirror_install $swp1 $direction $tundev "matchall"
+ fail_test_span_gre_dir $tundev "$forward_type" "$backward_type"
ip neigh del dev $swp3 $addr
- quick_test_span_gre_dir $tundev ingress
+ quick_test_span_gre_dir $tundev "$forward_type" "$backward_type"
mirror_uninstall $swp1 $direction
- log_test "$direction $what: neighbor change ($tcflags)"
+ log_test "$direction $what: neighbor change"
}
test_gretap()
{
- test_span_gre_neigh 192.0.2.130 gt4 ingress "mirror to gretap"
- test_span_gre_neigh 192.0.2.130 gt4 egress "mirror to gretap"
+ test_span_gre_neigh 192.0.2.130 gt4 ingress 8 0 "mirror to gretap"
+ test_span_gre_neigh 192.0.2.130 gt4 egress 0 8 "mirror to gretap"
}
test_ip6gretap()
{
- test_span_gre_neigh 2001:db8:2::2 gt6 ingress "mirror to ip6gretap"
- test_span_gre_neigh 2001:db8:2::2 gt6 egress "mirror to ip6gretap"
-}
-
-test_all()
-{
- slow_path_trap_install $swp1 ingress
- slow_path_trap_install $swp1 egress
-
- tests_run
-
- slow_path_trap_uninstall $swp1 egress
- slow_path_trap_uninstall $swp1 ingress
+ test_span_gre_neigh 2001:db8:2::2 gt6 ingress 8 0 "mirror to ip6gretap"
+ test_span_gre_neigh 2001:db8:2::2 gt6 egress 0 8 "mirror to ip6gretap"
}
trap cleanup EXIT
@@ -102,14 +93,6 @@ trap cleanup EXIT
setup_prepare
setup_wait
-tcflags="skip_hw"
-test_all
-
-if ! tc_offload_check; then
- echo "WARN: Could not test offloaded functionality"
-else
- tcflags="skip_sw"
- test_all
-fi
+tests_run
exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_nh.sh b/tools/testing/selftests/net/forwarding/mirror_gre_nh.sh
index 6f9ef1820e93..34bc646938e3 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_nh.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_nh.sh
@@ -75,42 +75,31 @@ cleanup()
test_gretap()
{
RET=0
- mirror_install $swp1 ingress gt4 "matchall $tcflags"
+ mirror_install $swp1 ingress gt4 "matchall"
# For IPv4, test that there's no mirroring without the route directing
# the traffic to tunnel remote address. Then add it and test that
# mirroring starts. For IPv6 we can't test this due to the limitation
# that routes for locally-specified IPv6 addresses can't be added.
- fail_test_span_gre_dir gt4 ingress
+ fail_test_span_gre_dir gt4
ip route add 192.0.2.130/32 via 192.0.2.162
- quick_test_span_gre_dir gt4 ingress
+ quick_test_span_gre_dir gt4
ip route del 192.0.2.130/32 via 192.0.2.162
mirror_uninstall $swp1 ingress
- log_test "mirror to gre with next-hop remote ($tcflags)"
+ log_test "mirror to gre with next-hop remote"
}
test_ip6gretap()
{
RET=0
- mirror_install $swp1 ingress gt6 "matchall $tcflags"
- quick_test_span_gre_dir gt6 ingress
+ mirror_install $swp1 ingress gt6 "matchall"
+ quick_test_span_gre_dir gt6
mirror_uninstall $swp1 ingress
- log_test "mirror to ip6gre with next-hop remote ($tcflags)"
-}
-
-test_all()
-{
- slow_path_trap_install $swp1 ingress
- slow_path_trap_install $swp1 egress
-
- tests_run
-
- slow_path_trap_uninstall $swp1 egress
- slow_path_trap_uninstall $swp1 ingress
+ log_test "mirror to ip6gre with next-hop remote"
}
trap cleanup EXIT
@@ -118,14 +107,6 @@ trap cleanup EXIT
setup_prepare
setup_wait
-tcflags="skip_hw"
-test_all
-
-if ! tc_offload_check; then
- echo "WARN: Could not test offloaded functionality"
-else
- tcflags="skip_sw"
- test_all
-fi
+tests_run
exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_vlan.sh b/tools/testing/selftests/net/forwarding/mirror_gre_vlan.sh
index 88cecdb9a861..63689928cb51 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_vlan.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_vlan.sh
@@ -63,30 +63,11 @@ test_gretap()
full_test_span_gre_dir gt4 egress 0 8 "mirror to gretap"
}
-test_all()
-{
- slow_path_trap_install $swp1 ingress
- slow_path_trap_install $swp1 egress
-
- tests_run
-
- slow_path_trap_uninstall $swp1 egress
- slow_path_trap_uninstall $swp1 ingress
-}
-
trap cleanup EXIT
setup_prepare
setup_wait
-tcflags="skip_hw"
-test_all
-
-if ! tc_offload_check; then
- echo "WARN: Could not test offloaded functionality"
-else
- tcflags="skip_sw"
- test_all
-fi
+tests_run
exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
index c8a9b5bd841f..1b902cc579f6 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
@@ -153,21 +153,21 @@ test_span_gre_forbidden_cpu()
RET=0
# Run the pass-test first, to prime neighbor table.
- mirror_install $swp1 ingress $tundev "matchall $tcflags"
- quick_test_span_gre_dir $tundev ingress
+ mirror_install $swp1 ingress $tundev "matchall"
+ quick_test_span_gre_dir $tundev
# Now forbid the VLAN at the bridge and see it fail.
bridge vlan del dev br1 vid 555 self
sleep 1
- fail_test_span_gre_dir $tundev ingress
+ fail_test_span_gre_dir $tundev
bridge vlan add dev br1 vid 555 self
sleep 1
- quick_test_span_gre_dir $tundev ingress
+ quick_test_span_gre_dir $tundev
mirror_uninstall $swp1 ingress
- log_test "$what: vlan forbidden at a bridge ($tcflags)"
+ log_test "$what: vlan forbidden at a bridge"
}
test_gretap_forbidden_cpu()
@@ -187,22 +187,22 @@ test_span_gre_forbidden_egress()
RET=0
- mirror_install $swp1 ingress $tundev "matchall $tcflags"
- quick_test_span_gre_dir $tundev ingress
+ mirror_install $swp1 ingress $tundev "matchall"
+ quick_test_span_gre_dir $tundev
bridge vlan del dev $swp3 vid 555
sleep 1
- fail_test_span_gre_dir $tundev ingress
+ fail_test_span_gre_dir $tundev
bridge vlan add dev $swp3 vid 555
# Re-prime FDB
$ARPING -I br1.555 192.0.2.130 -fqc 1
sleep 1
- quick_test_span_gre_dir $tundev ingress
+ quick_test_span_gre_dir $tundev
mirror_uninstall $swp1 ingress
- log_test "$what: vlan forbidden at a bridge egress ($tcflags)"
+ log_test "$what: vlan forbidden at a bridge egress"
}
test_gretap_forbidden_egress()
@@ -223,30 +223,30 @@ test_span_gre_untagged_egress()
RET=0
- mirror_install $swp1 ingress $tundev "matchall $tcflags"
+ mirror_install $swp1 ingress $tundev "matchall"
- quick_test_span_gre_dir $tundev ingress
- quick_test_span_vlan_dir $h3 555 ingress "$ul_proto"
+ quick_test_span_gre_dir $tundev
+ quick_test_span_vlan_dir $h3 555 "$ul_proto"
h3_addr_add_del del $h3.555
bridge vlan add dev $swp3 vid 555 pvid untagged
h3_addr_add_del add $h3
sleep 5
- quick_test_span_gre_dir $tundev ingress
- fail_test_span_vlan_dir $h3 555 ingress "$ul_proto"
+ quick_test_span_gre_dir $tundev
+ fail_test_span_vlan_dir $h3 555 "$ul_proto"
h3_addr_add_del del $h3
bridge vlan add dev $swp3 vid 555
h3_addr_add_del add $h3.555
sleep 5
- quick_test_span_gre_dir $tundev ingress
- quick_test_span_vlan_dir $h3 555 ingress "$ul_proto"
+ quick_test_span_gre_dir $tundev
+ quick_test_span_vlan_dir $h3 555 "$ul_proto"
mirror_uninstall $swp1 ingress
- log_test "$what: vlan untagged at a bridge egress ($tcflags)"
+ log_test "$what: vlan untagged at a bridge egress"
}
test_gretap_untagged_egress()
@@ -267,19 +267,19 @@ test_span_gre_fdb_roaming()
RET=0
- mirror_install $swp1 ingress $tundev "matchall $tcflags"
- quick_test_span_gre_dir $tundev ingress
+ mirror_install $swp1 ingress $tundev "matchall"
+ quick_test_span_gre_dir $tundev
while ((RET == 0)); do
bridge fdb del dev $swp3 $h3mac vlan 555 master 2>/dev/null
bridge fdb add dev $swp2 $h3mac vlan 555 master static
sleep 1
- fail_test_span_gre_dir $tundev ingress
+ fail_test_span_gre_dir $tundev
if ! bridge fdb sh dev $swp2 vlan 555 master \
| grep -q $h3mac; then
printf "TEST: %-60s [RETRY]\n" \
- "$what: MAC roaming ($tcflags)"
+ "$what: MAC roaming"
# ARP or ND probably reprimed the FDB while the test
# was running. We would get a spurious failure.
RET=0
@@ -292,11 +292,11 @@ test_span_gre_fdb_roaming()
# Re-prime FDB
$ARPING -I br1.555 192.0.2.130 -fqc 1
sleep 1
- quick_test_span_gre_dir $tundev ingress
+ quick_test_span_gre_dir $tundev
mirror_uninstall $swp1 ingress
- log_test "$what: MAC roaming ($tcflags)"
+ log_test "$what: MAC roaming"
}
test_gretap_fdb_roaming()
@@ -319,30 +319,11 @@ test_ip6gretap_stp()
full_test_span_gre_stp gt6 $swp3 "mirror to ip6gretap"
}
-test_all()
-{
- slow_path_trap_install $swp1 ingress
- slow_path_trap_install $swp1 egress
-
- tests_run
-
- slow_path_trap_uninstall $swp1 egress
- slow_path_trap_uninstall $swp1 ingress
-}
-
trap cleanup EXIT
setup_prepare
setup_wait
-tcflags="skip_hw"
-test_all
-
-if ! tc_offload_check; then
- echo "WARN: Could not test offloaded functionality"
-else
- tcflags="skip_sw"
- test_all
-fi
+tests_run
exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_lib.sh b/tools/testing/selftests/net/forwarding/mirror_lib.sh
index 3e8ebeff3019..6bf9d5ae933c 100644
--- a/tools/testing/selftests/net/forwarding/mirror_lib.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_lib.sh
@@ -44,14 +44,17 @@ mirror_test()
local type="icmp echoreq"
fi
+ if [[ -z ${expect//[[:digit:]]/} ]]; then
+ expect="== $expect"
+ fi
+
local t0=$(tc_rule_stats_get $dev $pref)
$MZ $proto $vrf_name ${sip:+-A $sip} -B $dip -a own -b bc -q \
-c 10 -d 100msec -t $type
sleep 0.5
local t1=$(tc_rule_stats_get $dev $pref)
local delta=$((t1 - t0))
- # Tolerate a couple stray extra packets.
- ((expect <= delta && delta <= expect + 2))
+ ((delta $expect))
check_err $? "Expected to capture $expect packets, got $delta."
}
@@ -59,36 +62,42 @@ do_test_span_dir_ips()
{
local expect=$1; shift
local dev=$1; shift
- local direction=$1; shift
local ip1=$1; shift
local ip2=$1; shift
+ local forward_type=${1-8}; shift
+ local backward_type=${1-0}; shift
- icmp_capture_install $dev
+ icmp_capture_install $dev "type $forward_type"
mirror_test v$h1 $ip1 $ip2 $dev 100 $expect
+ icmp_capture_uninstall $dev
+
+ icmp_capture_install $dev "type $backward_type"
mirror_test v$h2 $ip2 $ip1 $dev 100 $expect
icmp_capture_uninstall $dev
}
quick_test_span_dir_ips()
{
- do_test_span_dir_ips 10 "$@"
-}
+ local dev=$1; shift
+ local ip1=$1; shift
+ local ip2=$1; shift
+ local forward_type=${1-8}; shift
+ local backward_type=${1-0}; shift
-fail_test_span_dir_ips()
-{
- do_test_span_dir_ips 0 "$@"
+ do_test_span_dir_ips 10 "$dev" "$ip1" "$ip2" \
+ "$forward_type" "$backward_type"
}
test_span_dir_ips()
{
local dev=$1; shift
- local direction=$1; shift
local forward_type=$1; shift
local backward_type=$1; shift
local ip1=$1; shift
local ip2=$1; shift
- quick_test_span_dir_ips "$dev" "$direction" "$ip1" "$ip2"
+ quick_test_span_dir_ips "$dev" "$ip1" "$ip2" \
+ "$forward_type" "$backward_type"
icmp_capture_install $dev "type $forward_type"
mirror_test v$h1 $ip1 $ip2 $dev 100 10
@@ -99,14 +108,14 @@ test_span_dir_ips()
icmp_capture_uninstall $dev
}
-fail_test_span_dir()
-{
- fail_test_span_dir_ips "$@" 192.0.2.1 192.0.2.2
-}
-
test_span_dir()
{
- test_span_dir_ips "$@" 192.0.2.1 192.0.2.2
+ local dev=$1; shift
+ local forward_type=$1; shift
+ local backward_type=$1; shift
+
+ test_span_dir_ips "$dev" "$forward_type" "$backward_type" \
+ 192.0.2.1 192.0.2.2
}
do_test_span_vlan_dir_ips()
@@ -114,7 +123,6 @@ do_test_span_vlan_dir_ips()
local expect=$1; shift
local dev=$1; shift
local vid=$1; shift
- local direction=$1; shift
local ul_proto=$1; shift
local ip1=$1; shift
local ip2=$1; shift
@@ -123,27 +131,50 @@ do_test_span_vlan_dir_ips()
# The traffic is meant for local box anyway, so will be trapped to
# kernel.
vlan_capture_install $dev "skip_hw vlan_id $vid vlan_ethtype $ul_proto"
- mirror_test v$h1 $ip1 $ip2 $dev 100 $expect
- mirror_test v$h2 $ip2 $ip1 $dev 100 $expect
+ mirror_test v$h1 $ip1 $ip2 $dev 100 "$expect"
+ mirror_test v$h2 $ip2 $ip1 $dev 100 "$expect"
vlan_capture_uninstall $dev
}
quick_test_span_vlan_dir_ips()
{
- do_test_span_vlan_dir_ips 10 "$@"
+ local dev=$1; shift
+ local vid=$1; shift
+ local ul_proto=$1; shift
+ local ip1=$1; shift
+ local ip2=$1; shift
+
+ do_test_span_vlan_dir_ips '>= 10' "$dev" "$vid" "$ul_proto" \
+ "$ip1" "$ip2"
}
fail_test_span_vlan_dir_ips()
{
- do_test_span_vlan_dir_ips 0 "$@"
+ local dev=$1; shift
+ local vid=$1; shift
+ local ul_proto=$1; shift
+ local ip1=$1; shift
+ local ip2=$1; shift
+
+ do_test_span_vlan_dir_ips 0 "$dev" "$vid" "$ul_proto" "$ip1" "$ip2"
}
quick_test_span_vlan_dir()
{
- quick_test_span_vlan_dir_ips "$@" 192.0.2.1 192.0.2.2
+ local dev=$1; shift
+ local vid=$1; shift
+ local ul_proto=$1; shift
+
+ quick_test_span_vlan_dir_ips "$dev" "$vid" "$ul_proto" \
+ 192.0.2.1 192.0.2.2
}
fail_test_span_vlan_dir()
{
- fail_test_span_vlan_dir_ips "$@" 192.0.2.1 192.0.2.2
+ local dev=$1; shift
+ local vid=$1; shift
+ local ul_proto=$1; shift
+
+ fail_test_span_vlan_dir_ips "$dev" "$vid" "$ul_proto" \
+ 192.0.2.1 192.0.2.2
}
diff --git a/tools/testing/selftests/net/forwarding/mirror_vlan.sh b/tools/testing/selftests/net/forwarding/mirror_vlan.sh
index 0b44e148235e..2f150a414d38 100755
--- a/tools/testing/selftests/net/forwarding/mirror_vlan.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_vlan.sh
@@ -40,12 +40,16 @@ setup_prepare()
vlan_create $h2 111 v$h2 192.0.2.18/28
bridge vlan add dev $swp2 vid 111
+
+ trap_install $h3 ingress
}
cleanup()
{
pre_cleanup
+ trap_uninstall $h3 ingress
+
vlan_destroy $h2 111
vlan_destroy $h1 111
vlan_destroy $h3 555
@@ -63,11 +67,11 @@ test_vlan_dir()
RET=0
- mirror_install $swp1 $direction $swp3.555 "matchall $tcflags"
- test_span_dir "$h3.555" "$direction" "$forward_type" "$backward_type"
+ mirror_install $swp1 $direction $swp3.555 "matchall"
+ test_span_dir "$h3.555" "$forward_type" "$backward_type"
mirror_uninstall $swp1 $direction
- log_test "$direction mirror to vlan ($tcflags)"
+ log_test "$direction mirror to vlan"
}
test_vlan()
@@ -84,14 +88,12 @@ test_tagged_vlan_dir()
RET=0
- mirror_install $swp1 $direction $swp3.555 "matchall $tcflags"
- do_test_span_vlan_dir_ips 10 "$h3.555" 111 "$direction" ip \
- 192.0.2.17 192.0.2.18
- do_test_span_vlan_dir_ips 0 "$h3.555" 555 "$direction" ip \
- 192.0.2.17 192.0.2.18
+ mirror_install $swp1 $direction $swp3.555 "matchall"
+ do_test_span_vlan_dir_ips '>= 10' "$h3.555" 111 ip 192.0.2.17 192.0.2.18
+ do_test_span_vlan_dir_ips 0 "$h3.555" 555 ip 192.0.2.17 192.0.2.18
mirror_uninstall $swp1 $direction
- log_test "$direction mirror tagged to vlan ($tcflags)"
+ log_test "$direction mirror tagged to vlan"
}
test_tagged_vlan()
@@ -100,32 +102,11 @@ test_tagged_vlan()
test_tagged_vlan_dir egress 0 8
}
-test_all()
-{
- slow_path_trap_install $swp1 ingress
- slow_path_trap_install $swp1 egress
- trap_install $h3 ingress
-
- tests_run
-
- trap_uninstall $h3 ingress
- slow_path_trap_uninstall $swp1 egress
- slow_path_trap_uninstall $swp1 ingress
-}
-
trap cleanup EXIT
setup_prepare
setup_wait
-tcflags="skip_hw"
-test_all
-
-if ! tc_offload_check; then
- echo "WARN: Could not test offloaded functionality"
-else
- tcflags="skip_sw"
- test_all
-fi
+tests_run
exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/router_mpath_seed.sh b/tools/testing/selftests/net/forwarding/router_mpath_seed.sh
new file mode 100755
index 000000000000..314cb906c1eb
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/router_mpath_seed.sh
@@ -0,0 +1,333 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# +-------------------------+ +-------------------------+
+# | H1 | | H2 |
+# | $h1 + | | + $h2 |
+# | 192.0.2.1/28 | | | | 192.0.2.34/28 |
+# | 2001:db8:1::1/64 | | | | 2001:db8:3::2/64 |
+# +-------------------|-----+ +-|-----------------------+
+# | |
+# +-------------------|-----+ +-|-----------------------+
+# | R1 | | | | R2 |
+# | $rp11 + | | + $rp21 |
+# | 192.0.2.2/28 | | 192.0.2.33/28 |
+# | 2001:db8:1::2/64 | | 2001:db8:3::1/64 |
+# | | | |
+# | $rp12 + | | + $rp22 |
+# | 192.0.2.17/28 | | | | 192.0.2.18..27/28 |
+# | 2001:db8:2::17/64 | | | | 2001:db8:2::18..27/64 |
+# +-------------------|-----+ +-|-----------------------+
+# | |
+# `----------'
+
+ALL_TESTS="
+ ping_ipv4
+ ping_ipv6
+ test_mpath_seed_stability_ipv4
+ test_mpath_seed_stability_ipv6
+ test_mpath_seed_get
+ test_mpath_seed_ipv4
+ test_mpath_seed_ipv6
+"
+NUM_NETIFS=6
+source lib.sh
+
+h1_create()
+{
+ simple_if_init $h1 192.0.2.1/28 2001:db8:1::1/64
+ ip -4 route add 192.0.2.32/28 vrf v$h1 nexthop via 192.0.2.2
+ ip -6 route add 2001:db8:3::/64 vrf v$h1 nexthop via 2001:db8:1::2
+}
+
+h1_destroy()
+{
+ ip -6 route del 2001:db8:3::/64 vrf v$h1 nexthop via 2001:db8:1::2
+ ip -4 route del 192.0.2.32/28 vrf v$h1 nexthop via 192.0.2.2
+ simple_if_fini $h1 192.0.2.1/28 2001:db8:1::1/64
+}
+
+h2_create()
+{
+ simple_if_init $h2 192.0.2.34/28 2001:db8:3::2/64
+ ip -4 route add 192.0.2.0/28 vrf v$h2 nexthop via 192.0.2.33
+ ip -6 route add 2001:db8:1::/64 vrf v$h2 nexthop via 2001:db8:3::1
+}
+
+h2_destroy()
+{
+ ip -6 route del 2001:db8:1::/64 vrf v$h2 nexthop via 2001:db8:3::1
+ ip -4 route del 192.0.2.0/28 vrf v$h2 nexthop via 192.0.2.33
+ simple_if_fini $h2 192.0.2.34/28 2001:db8:3::2/64
+}
+
+router1_create()
+{
+ simple_if_init $rp11 192.0.2.2/28 2001:db8:1::2/64
+ __simple_if_init $rp12 v$rp11 192.0.2.17/28 2001:db8:2::17/64
+}
+
+router1_destroy()
+{
+ __simple_if_fini $rp12 192.0.2.17/28 2001:db8:2::17/64
+ simple_if_fini $rp11 192.0.2.2/28 2001:db8:1::2/64
+}
+
+router2_create()
+{
+ simple_if_init $rp21 192.0.2.33/28 2001:db8:3::1/64
+ __simple_if_init $rp22 v$rp21 192.0.2.18/28 2001:db8:2::18/64
+ ip -4 route add 192.0.2.0/28 vrf v$rp21 nexthop via 192.0.2.17
+ ip -6 route add 2001:db8:1::/64 vrf v$rp21 nexthop via 2001:db8:2::17
+}
+
+router2_destroy()
+{
+ ip -6 route del 2001:db8:1::/64 vrf v$rp21 nexthop via 2001:db8:2::17
+ ip -4 route del 192.0.2.0/28 vrf v$rp21 nexthop via 192.0.2.17
+ __simple_if_fini $rp22 192.0.2.18/28 2001:db8:2::18/64
+ simple_if_fini $rp21 192.0.2.33/28 2001:db8:3::1/64
+}
+
+nexthops_create()
+{
+ local i
+ for i in $(seq 10); do
+ ip nexthop add id $((1000 + i)) via 192.0.2.18 dev $rp12
+ ip nexthop add id $((2000 + i)) via 2001:db8:2::18 dev $rp12
+ done
+
+ ip nexthop add id 1000 group $(seq -s / 1001 1010) hw_stats on
+ ip nexthop add id 2000 group $(seq -s / 2001 2010) hw_stats on
+ ip -4 route add 192.0.2.32/28 vrf v$rp11 nhid 1000
+ ip -6 route add 2001:db8:3::/64 vrf v$rp11 nhid 2000
+}
+
+nexthops_destroy()
+{
+ local i
+
+ ip -6 route del 2001:db8:3::/64 vrf v$rp11 nhid 2000
+ ip -4 route del 192.0.2.32/28 vrf v$rp11 nhid 1000
+ ip nexthop del id 2000
+ ip nexthop del id 1000
+
+ for i in $(seq 10 -1 1); do
+ ip nexthop del id $((2000 + i))
+ ip nexthop del id $((1000 + i))
+ done
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ rp11=${NETIFS[p2]}
+
+ rp12=${NETIFS[p3]}
+ rp22=${NETIFS[p4]}
+
+ rp21=${NETIFS[p5]}
+ h2=${NETIFS[p6]}
+
+ sysctl_save net.ipv4.fib_multipath_hash_seed
+
+ vrf_prepare
+
+ h1_create
+ h2_create
+ router1_create
+ router2_create
+
+ forwarding_enable
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ forwarding_restore
+
+ nexthops_destroy
+ router2_destroy
+ router1_destroy
+ h2_destroy
+ h1_destroy
+
+ vrf_cleanup
+
+ sysctl_restore net.ipv4.fib_multipath_hash_seed
+}
+
+ping_ipv4()
+{
+ ping_test $h1 192.0.2.34
+}
+
+ping_ipv6()
+{
+ ping6_test $h1 2001:db8:3::2
+}
+
+test_mpath_seed_get()
+{
+ RET=0
+
+ local i
+ for ((i = 0; i < 100; i++)); do
+ local seed_w=$((999331 * i))
+ sysctl -qw net.ipv4.fib_multipath_hash_seed=$seed_w
+ local seed_r=$(sysctl -n net.ipv4.fib_multipath_hash_seed)
+ ((seed_r == seed_w))
+ check_err $? "mpath seed written as $seed_w, but read as $seed_r"
+ done
+
+ log_test "mpath seed set/get"
+}
+
+nh_stats_snapshot()
+{
+ local group_id=$1; shift
+
+ ip -j -s -s nexthop show id $group_id |
+ jq -c '[.[].group_stats | sort_by(.id) | .[].packets]'
+}
+
+get_active_nh()
+{
+ local s0=$1; shift
+ local s1=$1; shift
+
+ jq -n --argjson s0 "$s0" --argjson s1 "$s1" -f /dev/stdin <<-"EOF"
+ [range($s0 | length)] |
+ map($s1[.] - $s0[.]) |
+ map(if . > 8 then 1 else 0 end) |
+ index(1)
+ EOF
+}
+
+probe_nh()
+{
+ local group_id=$1; shift
+ local -a mz=("$@")
+
+ local s0=$(nh_stats_snapshot $group_id)
+ "${mz[@]}"
+ local s1=$(nh_stats_snapshot $group_id)
+
+ get_active_nh "$s0" "$s1"
+}
+
+probe_seed()
+{
+ local group_id=$1; shift
+ local seed=$1; shift
+ local -a mz=("$@")
+
+ sysctl -qw net.ipv4.fib_multipath_hash_seed=$seed
+ probe_nh "$group_id" "${mz[@]}"
+}
+
+test_mpath_seed()
+{
+ local group_id=$1; shift
+ local what=$1; shift
+ local -a mz=("$@")
+ local ii
+
+ RET=0
+
+ local -a tally=(0 0 0 0 0 0 0 0 0 0)
+ for ((ii = 0; ii < 100; ii++)); do
+ local act=$(probe_seed $group_id $((999331 * ii)) "${mz[@]}")
+ ((tally[act]++))
+ done
+
+ local tally_str="${tally[@]}"
+ for ((ii = 0; ii < ${#tally[@]}; ii++)); do
+ ((tally[ii] > 0))
+ check_err $? "NH #$ii not hit, tally='$tally_str'"
+ done
+
+ log_test "mpath seed $what"
+ sysctl -qw net.ipv4.fib_multipath_hash_seed=0
+}
+
+test_mpath_seed_ipv4()
+{
+ test_mpath_seed 1000 IPv4 \
+ $MZ $h1 -A 192.0.2.1 -B 192.0.2.34 -q \
+ -p 64 -d 0 -c 10 -t udp
+}
+
+test_mpath_seed_ipv6()
+{
+ test_mpath_seed 2000 IPv6 \
+ $MZ -6 $h1 -A 2001:db8:1::1 -B 2001:db8:3::2 -q \
+ -p 64 -d 0 -c 10 -t udp
+}
+
+check_mpath_seed_stability()
+{
+ local seed=$1; shift
+ local act_0=$1; shift
+ local act_1=$1; shift
+
+ ((act_0 == act_1))
+ check_err $? "seed $seed: active NH moved from $act_0 to $act_1 after seed change"
+}
+
+test_mpath_seed_stability()
+{
+ local group_id=$1; shift
+ local what=$1; shift
+ local -a mz=("$@")
+
+ RET=0
+
+ local seed_0=0
+ local seed_1=3221338814
+ local seed_2=3735928559
+
+ # Initial active NH before touching the seed at all.
+ local act_ini=$(probe_nh $group_id "${mz[@]}")
+
+ local act_0_0=$(probe_seed $group_id $seed_0 "${mz[@]}")
+ local act_1_0=$(probe_seed $group_id $seed_1 "${mz[@]}")
+ local act_2_0=$(probe_seed $group_id $seed_2 "${mz[@]}")
+
+ local act_0_1=$(probe_seed $group_id $seed_0 "${mz[@]}")
+ local act_1_1=$(probe_seed $group_id $seed_1 "${mz[@]}")
+ local act_2_1=$(probe_seed $group_id $seed_2 "${mz[@]}")
+
+ check_mpath_seed_stability initial $act_ini $act_0_0
+ check_mpath_seed_stability $seed_0 $act_0_0 $act_0_1
+ check_mpath_seed_stability $seed_1 $act_1_0 $act_1_1
+ check_mpath_seed_stability $seed_2 $act_2_0 $act_2_1
+
+ log_test "mpath seed stability $what"
+ sysctl -qw net.ipv4.fib_multipath_hash_seed=0
+}
+
+test_mpath_seed_stability_ipv4()
+{
+ test_mpath_seed_stability 1000 IPv4 \
+ $MZ $h1 -A 192.0.2.1 -B 192.0.2.34 -q \
+ -p 64 -d 0 -c 10 -t udp
+}
+
+test_mpath_seed_stability_ipv6()
+{
+ test_mpath_seed_stability 2000 IPv6 \
+ $MZ -6 $h1 -A 2001:db8:1::1 -B 2001:db8:3::2 -q \
+ -p 64 -d 0 -c 10 -t udp
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+nexthops_create
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
index 6f0a2e452ba1..3f9d50f1ef9e 100755
--- a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
+++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
@@ -680,9 +680,9 @@ test_learning()
local mac=de:ad:be:ef:13:37
local dst=192.0.2.100
- # Enable learning on the VxLAN device and set ageing time to 10 seconds
- ip link set dev br1 type bridge ageing_time 1000
- ip link set dev vx1 type vxlan ageing 10
+ # Enable learning on the VxLAN device and set ageing time to 30 seconds
+ ip link set dev br1 type bridge ageing_time 3000
+ ip link set dev vx1 type vxlan ageing 30
ip link set dev vx1 type vxlan learning
reapply_config
@@ -740,7 +740,7 @@ test_learning()
vxlan_flood_test $mac $dst 0 10 0
- sleep 20
+ sleep 60
bridge fdb show brport vx1 | grep $mac | grep -q self
check_fail $?
diff --git a/tools/testing/selftests/net/hsr/hsr_ping.sh b/tools/testing/selftests/net/hsr/hsr_ping.sh
index 3684b813b0f6..f5d207fc770a 100755
--- a/tools/testing/selftests/net/hsr/hsr_ping.sh
+++ b/tools/testing/selftests/net/hsr/hsr_ping.sh
@@ -152,6 +152,15 @@ setup_hsr_interfaces()
ip -net "$ns3" addr add 100.64.0.3/24 dev hsr3
ip -net "$ns3" addr add dead:beef:1::3/64 dev hsr3 nodad
+ ip -net "$ns1" link set address 00:11:22:00:01:01 dev ns1eth1
+ ip -net "$ns1" link set address 00:11:22:00:01:02 dev ns1eth2
+
+ ip -net "$ns2" link set address 00:11:22:00:02:01 dev ns2eth1
+ ip -net "$ns2" link set address 00:11:22:00:02:02 dev ns2eth2
+
+ ip -net "$ns3" link set address 00:11:22:00:03:01 dev ns3eth1
+ ip -net "$ns3" link set address 00:11:22:00:03:02 dev ns3eth2
+
# All Links up
ip -net "$ns1" link set ns1eth1 up
ip -net "$ns1" link set ns1eth2 up
diff --git a/tools/testing/selftests/net/hsr/hsr_redbox.sh b/tools/testing/selftests/net/hsr/hsr_redbox.sh
index 1f36785347c0..998103502d5d 100755
--- a/tools/testing/selftests/net/hsr/hsr_redbox.sh
+++ b/tools/testing/selftests/net/hsr/hsr_redbox.sh
@@ -96,6 +96,21 @@ setup_hsr_interfaces()
ip -n "${ns4}" link set ns4eth1 up
ip -n "${ns5}" link set ns5eth1 up
+ ip -net "$ns1" link set address 00:11:22:00:01:01 dev ns1eth1
+ ip -net "$ns1" link set address 00:11:22:00:01:02 dev ns1eth2
+
+ ip -net "$ns2" link set address 00:11:22:00:02:01 dev ns2eth1
+ ip -net "$ns2" link set address 00:11:22:00:02:02 dev ns2eth2
+ ip -net "$ns2" link set address 00:11:22:00:02:03 dev ns2eth3
+
+ ip -net "$ns3" link set address 00:11:22:00:03:11 dev ns3eth1
+ ip -net "$ns3" link set address 00:11:22:00:03:11 dev ns3eth2
+ ip -net "$ns3" link set address 00:11:22:00:03:11 dev ns3eth3
+ ip -net "$ns3" link set address 00:11:22:00:03:11 dev ns3br1
+
+ ip -net "$ns4" link set address 00:11:22:00:04:01 dev ns4eth1
+ ip -net "$ns5" link set address 00:11:22:00:05:01 dev ns5eth1
+
ip -net "${ns1}" link add name hsr1 type hsr slave1 ns1eth1 slave2 ns1eth2 supervision 45 version ${HSRv} proto 0
ip -net "${ns2}" link add name hsr2 type hsr slave1 ns2eth1 slave2 ns2eth2 interlink ns2eth3 supervision 45 version ${HSRv} proto 0
diff --git a/tools/testing/selftests/net/lib.sh b/tools/testing/selftests/net/lib.sh
index 9155c914c064..d0219032f773 100644
--- a/tools/testing/selftests/net/lib.sh
+++ b/tools/testing/selftests/net/lib.sh
@@ -125,28 +125,36 @@ slowwait_for_counter()
slowwait "$timeout" until_counter_is ">= $((base + delta))" "$@"
}
+remove_ns_list()
+{
+ local item=$1
+ local ns
+ local ns_list=("${NS_LIST[@]}")
+ NS_LIST=()
+
+ for ns in "${ns_list[@]}"; do
+ if [ "${ns}" != "${item}" ]; then
+ NS_LIST+=("${ns}")
+ fi
+ done
+}
+
cleanup_ns()
{
local ns=""
- local errexit=0
local ret=0
- # disable errexit temporary
- if [[ $- =~ "e" ]]; then
- errexit=1
- set +e
- fi
-
for ns in "$@"; do
[ -z "${ns}" ] && continue
- ip netns delete "${ns}" &> /dev/null
+ ip netns delete "${ns}" &> /dev/null || true
if ! busywait $BUSYWAIT_TIMEOUT ip netns list \| grep -vq "^$ns$" &> /dev/null; then
echo "Warn: Failed to remove namespace $ns"
ret=1
+ else
+ remove_ns_list "${ns}"
fi
done
- [ $errexit -eq 1 ] && set -e
return $ret
}
@@ -159,29 +167,30 @@ cleanup_all_ns()
# setup_ns local remote
setup_ns()
{
- local ns=""
local ns_name=""
local ns_list=()
- local ns_exist=
for ns_name in "$@"; do
+ # avoid conflicts with local var: internal error
+ if [ "${ns_name}" = "ns_name" ]; then
+ echo "Failed to setup namespace '${ns_name}': invalid name"
+ cleanup_ns "${ns_list[@]}"
+ exit $ksft_fail
+ fi
+
# Some test may setup/remove same netns multi times
- if unset ${ns_name} 2> /dev/null; then
- ns="${ns_name,,}-$(mktemp -u XXXXXX)"
- eval readonly ${ns_name}="$ns"
- ns_exist=false
+ if [ -z "${!ns_name}" ]; then
+ eval "${ns_name}=${ns_name,,}-$(mktemp -u XXXXXX)"
else
- eval ns='$'${ns_name}
- cleanup_ns "$ns"
- ns_exist=true
+ cleanup_ns "${!ns_name}"
fi
- if ! ip netns add "$ns"; then
+ if ! ip netns add "${!ns_name}"; then
echo "Failed to create namespace $ns_name"
cleanup_ns "${ns_list[@]}"
return $ksft_skip
fi
- ip -n "$ns" link set lo up
- ! $ns_exist && ns_list+=("$ns")
+ ip -n "${!ns_name}" link set lo up
+ ns_list+=("${!ns_name}")
done
NS_LIST+=("${ns_list[@]}")
}
@@ -190,10 +199,10 @@ tc_rule_stats_get()
{
local dev=$1; shift
local pref=$1; shift
- local dir=$1; shift
+ local dir=${1:-ingress}; shift
local selector=${1:-.packets}; shift
- tc -j -s filter show dev $dev ${dir:-ingress} pref $pref \
+ tc -j -s filter show dev $dev $dir pref $pref \
| jq ".[1].options.actions[].stats$selector"
}
diff --git a/tools/testing/selftests/net/lib/py/ksft.py b/tools/testing/selftests/net/lib/py/ksft.py
index 4769b4eb1ea1..f26c20df9db4 100644
--- a/tools/testing/selftests/net/lib/py/ksft.py
+++ b/tools/testing/selftests/net/lib/py/ksft.py
@@ -6,6 +6,7 @@ import sys
import time
import traceback
from .consts import KSFT_MAIN_NAME
+from .utils import global_defer_queue
KSFT_RESULT = None
KSFT_RESULT_ALL = True
@@ -57,6 +58,11 @@ def ksft_ge(a, b, comment=""):
_fail("Check failed", a, "<", b, comment)
+def ksft_lt(a, b, comment=""):
+ if a >= b:
+ _fail("Check failed", a, ">=", b, comment)
+
+
class ksft_raises:
def __init__(self, expected_type):
self.exception = None
@@ -103,6 +109,24 @@ def ktap_result(ok, cnt=1, case="", comment=""):
print(res)
+def ksft_flush_defer():
+ global KSFT_RESULT
+
+ i = 0
+ qlen_start = len(global_defer_queue)
+ while global_defer_queue:
+ i += 1
+ entry = global_defer_queue.pop()
+ try:
+ entry.exec_only()
+ except:
+ ksft_pr(f"Exception while handling defer / cleanup (callback {i} of {qlen_start})!")
+ tb = traceback.format_exc()
+ for line in tb.strip().split('\n'):
+ ksft_pr("Defer Exception|", line)
+ KSFT_RESULT = False
+
+
def ksft_run(cases=None, globs=None, case_pfx=None, args=()):
cases = cases or []
@@ -122,32 +146,41 @@ def ksft_run(cases=None, globs=None, case_pfx=None, args=()):
global KSFT_RESULT
cnt = 0
+ stop = False
for case in cases:
KSFT_RESULT = True
cnt += 1
+ comment = ""
+ cnt_key = ""
+
try:
case(*args)
except KsftSkipEx as e:
- ktap_result(True, cnt, case, comment="SKIP " + str(e))
- totals['skip'] += 1
- continue
+ comment = "SKIP " + str(e)
+ cnt_key = 'skip'
except KsftXfailEx as e:
- ktap_result(True, cnt, case, comment="XFAIL " + str(e))
- totals['xfail'] += 1
- continue
- except Exception as e:
+ comment = "XFAIL " + str(e)
+ cnt_key = 'xfail'
+ except BaseException as e:
+ stop |= isinstance(e, KeyboardInterrupt)
tb = traceback.format_exc()
for line in tb.strip().split('\n'):
ksft_pr("Exception|", line)
- ktap_result(False, cnt, case)
- totals['fail'] += 1
- continue
-
- ktap_result(KSFT_RESULT, cnt, case)
- if KSFT_RESULT:
- totals['pass'] += 1
- else:
- totals['fail'] += 1
+ if stop:
+ ksft_pr("Stopping tests due to KeyboardInterrupt.")
+ KSFT_RESULT = False
+ cnt_key = 'fail'
+
+ ksft_flush_defer()
+
+ if not cnt_key:
+ cnt_key = 'pass' if KSFT_RESULT else 'fail'
+
+ ktap_result(KSFT_RESULT, cnt, case, comment=comment)
+ totals[cnt_key] += 1
+
+ if stop:
+ break
print(
f"# Totals: pass:{totals['pass']} fail:{totals['fail']} xfail:{totals['xfail']} xpass:0 skip:{totals['skip']} error:0"
diff --git a/tools/testing/selftests/net/lib/py/utils.py b/tools/testing/selftests/net/lib/py/utils.py
index 0540ea24921d..72590c3f90f1 100644
--- a/tools/testing/selftests/net/lib/py/utils.py
+++ b/tools/testing/selftests/net/lib/py/utils.py
@@ -1,12 +1,18 @@
# SPDX-License-Identifier: GPL-2.0
+import errno
import json as _json
import random
import re
+import socket
import subprocess
import time
+class CmdExitFailure(Exception):
+ pass
+
+
class cmd:
def __init__(self, comm, shell=True, fail=True, ns=None, background=False, host=None, timeout=5):
if ns:
@@ -41,8 +47,8 @@ class cmd:
if self.proc.returncode != 0 and fail:
if len(stderr) > 0 and stderr[-1] == "\n":
stderr = stderr[:-1]
- raise Exception("Command failed: %s\nSTDOUT: %s\nSTDERR: %s" %
- (self.proc.args, stdout, stderr))
+ raise CmdExitFailure("Command failed: %s\nSTDOUT: %s\nSTDERR: %s" %
+ (self.proc.args, stdout, stderr))
class bkg(cmd):
@@ -60,6 +66,40 @@ class bkg(cmd):
return self.process(terminate=self.terminate, fail=self.check_fail)
+global_defer_queue = []
+
+
+class defer:
+ def __init__(self, func, *args, **kwargs):
+ global global_defer_queue
+
+ if not callable(func):
+ raise Exception("defer created with un-callable object, did you call the function instead of passing its name?")
+
+ self.func = func
+ self.args = args
+ self.kwargs = kwargs
+
+ self._queue = global_defer_queue
+ self._queue.append(self)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, ex_type, ex_value, ex_tb):
+ return self.exec()
+
+ def exec_only(self):
+ self.func(*self.args, **self.kwargs)
+
+ def cancel(self):
+ self._queue.remove(self)
+
+ def exec(self):
+ self.cancel()
+ self.exec_only()
+
+
def tool(name, args, json=None, ns=None, host=None):
cmd_str = name + ' '
if json:
@@ -77,11 +117,24 @@ def ip(args, json=None, ns=None, host=None):
return tool('ip', args, json=json, host=host)
+def ethtool(args, json=None, ns=None, host=None):
+ return tool('ethtool', args, json=json, ns=ns, host=host)
+
+
def rand_port():
"""
- Get unprivileged port, for now just random, one day we may decide to check if used.
+ Get a random unprivileged port, try to make sure it's not already used.
"""
- return random.randint(10000, 65535)
+ for _ in range(1000):
+ port = random.randint(10000, 65535)
+ try:
+ with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
+ s.bind(("", port))
+ return port
+ except OSError as e:
+ if e.errno != errno.EADDRINUSE:
+ raise
+ raise Exception("Can't find any free unprivileged port")
def wait_port_listen(port, proto="tcp", ns=None, host=None, sleep=0.005, deadline=5):
diff --git a/tools/testing/selftests/net/mptcp/mptcp_lib.sh b/tools/testing/selftests/net/mptcp/mptcp_lib.sh
index 6ffa9b7a3260..438280e68434 100644
--- a/tools/testing/selftests/net/mptcp/mptcp_lib.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_lib.sh
@@ -1,6 +1,9 @@
#! /bin/bash
# SPDX-License-Identifier: GPL-2.0
+. "$(dirname "${0}")/../lib.sh"
+. "$(dirname "${0}")/../net_helper.sh"
+
readonly KSFT_PASS=0
readonly KSFT_FAIL=1
readonly KSFT_SKIP=4
@@ -361,20 +364,7 @@ mptcp_lib_check_transfer() {
# $1: ns, $2: port
mptcp_lib_wait_local_port_listen() {
- local listener_ns="${1}"
- local port="${2}"
-
- local port_hex
- port_hex="$(printf "%04X" "${port}")"
-
- local _
- for _ in $(seq 10); do
- ip netns exec "${listener_ns}" cat /proc/net/tcp* | \
- awk "BEGIN {rc=1} {if (\$2 ~ /:${port_hex}\$/ && \$4 ~ /0A/) \
- {rc=0; exit}} END {exit rc}" &&
- break
- sleep 0.1
- done
+ wait_local_port_listen "${@}" "tcp"
}
mptcp_lib_check_output() {
@@ -438,17 +428,13 @@ mptcp_lib_check_tools() {
}
mptcp_lib_ns_init() {
- local sec rndh
-
- sec=$(date +%s)
- rndh=$(printf %x "${sec}")-$(mktemp -u XXXXXX)
+ if ! setup_ns "${@}"; then
+ mptcp_lib_pr_fail "Failed to setup namespaces ${*}"
+ exit ${KSFT_FAIL}
+ fi
local netns
for netns in "${@}"; do
- eval "${netns}=${netns}-${rndh}"
-
- ip netns add "${!netns}" || exit ${KSFT_SKIP}
- ip -net "${!netns}" link set lo up
ip netns exec "${!netns}" sysctl -q net.mptcp.enabled=1
ip netns exec "${!netns}" sysctl -q net.ipv4.conf.all.rp_filter=0
ip netns exec "${!netns}" sysctl -q net.ipv4.conf.default.rp_filter=0
@@ -456,9 +442,10 @@ mptcp_lib_ns_init() {
}
mptcp_lib_ns_exit() {
+ cleanup_ns "${@}"
+
local netns
for netns in "${@}"; do
- ip netns del "${netns}"
rm -f /tmp/"${netns}".{nstat,out}
done
}
diff --git a/tools/testing/selftests/net/netfilter/nft_concat_range.sh b/tools/testing/selftests/net/netfilter/nft_concat_range.sh
index 6d66240e149c..47088b005390 100755
--- a/tools/testing/selftests/net/netfilter/nft_concat_range.sh
+++ b/tools/testing/selftests/net/netfilter/nft_concat_range.sh
@@ -27,7 +27,7 @@ TYPES="net_port port_net net6_port port_proto net6_port_mac net6_port_mac_proto
net6_port_net6_port net_port_mac_proto_net"
# Reported bugs, also described by TYPE_ variables below
-BUGS="flush_remove_add reload"
+BUGS="flush_remove_add reload net_port_proto_match"
# List of possible paths to pktgen script from kernel tree for performance tests
PKTGEN_SCRIPT_PATHS="
@@ -371,6 +371,22 @@ race_repeat 0
perf_duration 0
"
+TYPE_net_port_proto_match="
+display net,port,proto
+type_spec ipv4_addr . inet_service . inet_proto
+chain_spec ip daddr . udp dport . meta l4proto
+dst addr4 port proto
+src
+start 1
+count 9
+src_delta 9
+tools sendip bash
+proto udp
+
+race_repeat 0
+
+perf_duration 0
+"
# Set template for all tests, types and rules are filled in depending on test
set_template='
flush ruleset
@@ -1555,6 +1571,64 @@ test_bug_reload() {
nft flush ruleset
}
+# - add ranged element, check that packets match it
+# - delete element again, check it is gone
+test_bug_net_port_proto_match() {
+ setup veth send_"${proto}" set || return ${ksft_skip}
+ rstart=${start}
+
+ range_size=1
+ for i in $(seq 1 10); do
+ for j in $(seq 1 20) ; do
+ elem=$(printf "10.%d.%d.0/24 . %d1-%d0 . 6-17 " ${i} ${j} ${i} "$((i+1))")
+
+ nft "add element inet filter test { $elem }" || return 1
+ nft "get element inet filter test { $elem }" | grep -q "$elem"
+ if [ $? -ne 0 ];then
+ local got=$(nft "get element inet filter test { $elem }")
+ err "post-add: should have returned $elem but got $got"
+ return 1
+ fi
+ done
+ done
+
+ # recheck after set was filled
+ for i in $(seq 1 10); do
+ for j in $(seq 1 20) ; do
+ elem=$(printf "10.%d.%d.0/24 . %d1-%d0 . 6-17 " ${i} ${j} ${i} "$((i+1))")
+
+ nft "get element inet filter test { $elem }" | grep -q "$elem"
+ if [ $? -ne 0 ];then
+ local got=$(nft "get element inet filter test { $elem }")
+ err "post-fill: should have returned $elem but got $got"
+ return 1
+ fi
+ done
+ done
+
+ # random del and re-fetch
+ for i in $(seq 1 10); do
+ for j in $(seq 1 20) ; do
+ local rnd=$((RANDOM%10))
+ local got=""
+
+ elem=$(printf "10.%d.%d.0/24 . %d1-%d0 . 6-17 " ${i} ${j} ${i} "$((i+1))")
+ if [ $rnd -gt 0 ];then
+ continue
+ fi
+
+ nft "delete element inet filter test { $elem }"
+ got=$(nft "get element inet filter test { $elem }" 2>/dev/null)
+ if [ $? -eq 0 ];then
+ err "post-delete: query for $elem returned $got instead of error."
+ return 1
+ fi
+ done
+ done
+
+ nft flush ruleset
+}
+
test_reported_issues() {
eval test_bug_"${subtest}"
}
diff --git a/tools/testing/selftests/net/netfilter/nft_queue.sh b/tools/testing/selftests/net/netfilter/nft_queue.sh
index 8538f08c64c2..c61d23a8c88d 100755
--- a/tools/testing/selftests/net/netfilter/nft_queue.sh
+++ b/tools/testing/selftests/net/netfilter/nft_queue.sh
@@ -375,6 +375,42 @@ EOF
wait 2>/dev/null
}
+test_queue_removal()
+{
+ read tainted_then < /proc/sys/kernel/tainted
+
+ ip netns exec "$ns1" nft -f - <<EOF
+flush ruleset
+table ip filter {
+ chain output {
+ type filter hook output priority 0; policy accept;
+ ip protocol icmp queue num 0
+ }
+}
+EOF
+ ip netns exec "$ns1" ./nf_queue -q 0 -d 30000 -t "$timeout" &
+ local nfqpid=$!
+
+ busywait "$BUSYWAIT_TIMEOUT" nf_queue_wait "$ns1" 0
+
+ ip netns exec "$ns1" ping -w 2 -f -c 10 127.0.0.1 -q >/dev/null
+ kill $nfqpid
+
+ ip netns exec "$ns1" nft flush ruleset
+
+ if [ "$tainted_then" -ne 0 ];then
+ return
+ fi
+
+ read tainted_now < /proc/sys/kernel/tainted
+ if [ "$tainted_now" -eq 0 ];then
+ echo "PASS: queue program exiting while packets queued"
+ else
+ echo "TAINT: queue program exiting while packets queued"
+ ret=1
+ fi
+}
+
ip netns exec "$nsrouter" sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
ip netns exec "$nsrouter" sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
ip netns exec "$nsrouter" sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
@@ -413,5 +449,6 @@ test_tcp_localhost
test_tcp_localhost_connectclose
test_tcp_localhost_requeue
test_icmp_vrf
+test_queue_removal
exit $ret
diff --git a/tools/testing/selftests/net/netns-sysctl.sh b/tools/testing/selftests/net/netns-sysctl.sh
new file mode 100755
index 000000000000..45c34a3b9aae
--- /dev/null
+++ b/tools/testing/selftests/net/netns-sysctl.sh
@@ -0,0 +1,40 @@
+#!/bin/bash -e
+# SPDX-License-Identifier: GPL-2.0
+#
+# This test checks that the network buffer sysctls are present
+# in a network namespaces, and that they are readonly.
+
+source lib.sh
+
+cleanup() {
+ cleanup_ns $test_ns
+}
+
+trap cleanup EXIT
+
+fail() {
+ echo "ERROR: $*" >&2
+ exit 1
+}
+
+setup_ns test_ns
+
+for sc in {r,w}mem_{default,max}; do
+ # check that this is writable in a netns
+ [ -w "/proc/sys/net/core/$sc" ] ||
+ fail "$sc isn't writable in the init netns!"
+
+ # change the value in the host netns
+ sysctl -qw "net.core.$sc=300000" ||
+ fail "Can't write $sc in init netns!"
+
+ # check that the value is read from the init netns
+ [ "$(ip netns exec $test_ns sysctl -n "net.core.$sc")" -eq 300000 ] ||
+ fail "Value for $sc mismatch!"
+
+ # check that this isn't writable in a netns
+ ip netns exec $test_ns [ -w "/proc/sys/net/core/$sc" ] &&
+ fail "$sc is writable in a netns!"
+done
+
+echo 'Test passed OK'
diff --git a/tools/testing/selftests/net/openvswitch/openvswitch.sh b/tools/testing/selftests/net/openvswitch/openvswitch.sh
index 15bca0708717..cc0bfae2bafa 100755
--- a/tools/testing/selftests/net/openvswitch/openvswitch.sh
+++ b/tools/testing/selftests/net/openvswitch/openvswitch.sh
@@ -11,6 +11,11 @@ ksft_skip=4
PAUSE_ON_FAIL=no
VERBOSE=0
TRACING=0
+WAIT_TIMEOUT=5
+
+if test "X$KSFT_MACHINE_SLOW" == "Xyes"; then
+ WAIT_TIMEOUT=10
+fi
tests="
arp_ping eth-arp: Basic arp ping between two NS
@@ -20,10 +25,37 @@ tests="
nat_related_v4 ip4-nat-related: ICMP related matches work with SNAT
netlink_checks ovsnl: validate netlink attrs and settings
upcall_interfaces ovs: test the upcall interfaces
- drop_reason drop: test drop reasons are emitted"
+ drop_reason drop: test drop reasons are emitted
+ psample psample: Sampling packets with psample"
info() {
- [ $VERBOSE = 0 ] || echo $*
+ [ "${ovs_dir}" != "" ] &&
+ echo "`date +"[%m-%d %H:%M:%S]"` $*" >> ${ovs_dir}/debug.log
+ [ $VERBOSE = 0 ] || echo $*
+}
+
+ovs_wait() {
+ info "waiting $WAIT_TIMEOUT s for: $@"
+
+ if "$@" ; then
+ info "wait succeeded immediately"
+ return 0
+ fi
+
+ # A quick re-check helps speed up small races in fast systems.
+ # However, fractional sleeps might not necessarily work.
+ local start=0
+ sleep 0.1 || { sleep 1; start=1; }
+
+ for (( i=start; i<WAIT_TIMEOUT; i++ )); do
+ if "$@" ; then
+ info "wait succeeded after $i seconds"
+ return 0
+ fi
+ sleep 1
+ done
+ info "wait failed after $i seconds"
+ return 1
}
ovs_base=`pwd`
@@ -65,7 +97,8 @@ ovs_setenv() {
ovs_sbx() {
if test "X$2" != X; then
- (ovs_setenv $1; shift; "$@" >> ${ovs_dir}/debug.log)
+ (ovs_setenv $1; shift;
+ info "run cmd: $@"; "$@" >> ${ovs_dir}/debug.log)
else
ovs_setenv $1
fi
@@ -102,12 +135,21 @@ ovs_netns_spawn_daemon() {
shift
netns=$1
shift
- info "spawning cmd: $*"
- ip netns exec $netns $* >> $ovs_dir/stdout 2>> $ovs_dir/stderr &
+ if [ "$netns" == "_default" ]; then
+ $* >> $ovs_dir/stdout 2>> $ovs_dir/stderr &
+ else
+ ip netns exec $netns $* >> $ovs_dir/stdout 2>> $ovs_dir/stderr &
+ fi
pid=$!
ovs_sbx "$sbx" on_exit "kill -TERM $pid 2>/dev/null"
}
+ovs_spawn_daemon() {
+ sbx=$1
+ shift
+ ovs_netns_spawn_daemon $sbx "_default" $*
+}
+
ovs_add_netns_and_veths () {
info "Adding netns attached: sbx:$1 dp:$2 {$3, $4, $5}"
ovs_sbx "$1" ip netns add "$3" || return 1
@@ -139,7 +181,7 @@ ovs_add_flow () {
info "Adding flow to DP: sbx:$1 br:$2 flow:$3 act:$4"
ovs_sbx "$1" python3 $ovs_base/ovs-dpctl.py add-flow "$2" "$3" "$4"
if [ $? -ne 0 ]; then
- echo "Flow [ $3 : $4 ] failed" >> ${ovs_dir}/debug.log
+ info "Flow [ $3 : $4 ] failed"
return 1
fi
return 0
@@ -170,6 +212,19 @@ ovs_drop_reason_count()
return `echo "$perf_output" | grep "$pattern" | wc -l`
}
+ovs_test_flow_fails () {
+ ERR_MSG="Flow actions may not be safe on all matching packets"
+
+ PRE_TEST=$(dmesg | grep -c "${ERR_MSG}")
+ ovs_add_flow $@ &> /dev/null $@ && return 1
+ POST_TEST=$(dmesg | grep -c "${ERR_MSG}")
+
+ if [ "$PRE_TEST" == "$POST_TEST" ]; then
+ return 1
+ fi
+ return 0
+}
+
usage() {
echo
echo "$0 [OPTIONS] [TEST]..."
@@ -184,6 +239,91 @@ usage() {
exit 1
}
+
+# psample test
+# - use psample to observe packets
+test_psample() {
+ sbx_add "test_psample" || return $?
+
+ # Add a datapath with per-vport dispatching.
+ ovs_add_dp "test_psample" psample -V 2:1 || return 1
+
+ info "create namespaces"
+ ovs_add_netns_and_veths "test_psample" "psample" \
+ client c0 c1 172.31.110.10/24 -u || return 1
+ ovs_add_netns_and_veths "test_psample" "psample" \
+ server s0 s1 172.31.110.20/24 -u || return 1
+
+ # Check if psample actions can be configured.
+ ovs_add_flow "test_psample" psample \
+ 'in_port(1),eth(),eth_type(0x0806),arp()' 'psample(group=1)' &> /dev/null
+ if [ $? == 1 ]; then
+ info "no support for psample - skipping"
+ ovs_exit_sig
+ return $ksft_skip
+ fi
+
+ ovs_del_flows "test_psample" psample
+
+ # Test action verification.
+ OLDIFS=$IFS
+ IFS='*'
+ min_key='in_port(1),eth(),eth_type(0x0800),ipv4()'
+ for testcase in \
+ "cookie to large"*"psample(group=1,cookie=1615141312111009080706050403020100)" \
+ "no group with cookie"*"psample(cookie=abcd)" \
+ "no group"*"psample()";
+ do
+ set -- $testcase;
+ ovs_test_flow_fails "test_psample" psample $min_key $2
+ if [ $? == 1 ]; then
+ info "failed - $1"
+ return 1
+ fi
+ done
+ IFS=$OLDIFS
+
+ ovs_del_flows "test_psample" psample
+ # Allow ARP
+ ovs_add_flow "test_psample" psample \
+ 'in_port(1),eth(),eth_type(0x0806),arp()' '2' || return 1
+ ovs_add_flow "test_psample" psample \
+ 'in_port(2),eth(),eth_type(0x0806),arp()' '1' || return 1
+
+ # Sample first 14 bytes of all traffic.
+ ovs_add_flow "test_psample" psample \
+ "in_port(1),eth(),eth_type(0x0800),ipv4()" \
+ "trunc(14),psample(group=1,cookie=c0ffee),2"
+
+ # Sample all traffic. In this case, use a sample() action with both
+ # psample and an upcall emulating simultaneous local sampling and
+ # sFlow / IPFIX.
+ nlpid=$(grep -E "listening on upcall packet handler" \
+ $ovs_dir/s0.out | cut -d ":" -f 2 | tr -d ' ')
+
+ ovs_add_flow "test_psample" psample \
+ "in_port(2),eth(),eth_type(0x0800),ipv4()" \
+ "sample(sample=100%,actions(psample(group=2,cookie=eeff0c),userspace(pid=${nlpid},userdata=eeff0c))),1"
+
+ # Record psample data.
+ ovs_spawn_daemon "test_psample" python3 $ovs_base/ovs-dpctl.py psample-events
+ ovs_wait grep -q "listening for psample events" ${ovs_dir}/stdout
+
+ # Send a single ping.
+ ovs_sbx "test_psample" ip netns exec client ping -I c1 172.31.110.20 -c 1 || return 1
+
+ # We should have received one userspace action upcall and 2 psample packets.
+ ovs_wait grep -q "userspace action command" $ovs_dir/s0.out || return 1
+
+ # client -> server samples should only contain the first 14 bytes of the packet.
+ ovs_wait grep -qE "rate:4294967295,group:1,cookie:c0ffee data:[0-9a-f]{28}$" \
+ $ovs_dir/stdout || return 1
+
+ ovs_wait grep -q "rate:4294967295,group:2,cookie:eeff0c" $ovs_dir/stdout || return 1
+
+ return 0
+}
+
# drop_reason test
# - drop packets and verify the right drop reason is reported
test_drop_reason() {
@@ -599,7 +739,8 @@ test_upcall_interfaces() {
ovs_add_netns_and_veths "test_upcall_interfaces" ui0 upc left0 l0 \
172.31.110.1/24 -u || return 1
- sleep 1
+ ovs_wait grep -q "listening on upcall packet handler" ${ovs_dir}/left0.out
+
info "sending arping"
ip netns exec upc arping -I l0 172.31.110.20 -c 1 \
>$ovs_dir/arping.stdout 2>$ovs_dir/arping.stderr
@@ -613,16 +754,20 @@ run_test() {
tname="$1"
tdesc="$2"
- if ! lsmod | grep openvswitch >/dev/null 2>&1; then
- stdbuf -o0 printf "TEST: %-60s [NOMOD]\n" "${tdesc}"
- return $ksft_skip
- fi
-
if python3 ovs-dpctl.py -h 2>&1 | \
grep -E "Need to (install|upgrade) the python" >/dev/null 2>&1; then
stdbuf -o0 printf "TEST: %-60s [PYLIB]\n" "${tdesc}"
return $ksft_skip
fi
+
+ python3 ovs-dpctl.py show >/dev/null 2>&1 || \
+ echo "[DPCTL] show exception."
+
+ if ! lsmod | grep openvswitch >/dev/null 2>&1; then
+ stdbuf -o0 printf "TEST: %-60s [NOMOD]\n" "${tdesc}"
+ return $ksft_skip
+ fi
+
printf "TEST: %-60s [START]\n" "${tname}"
unset IFS
diff --git a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
index 9f8dec2f6539..8a0396bfaf99 100644
--- a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
+++ b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
@@ -8,8 +8,10 @@ import argparse
import errno
import ipaddress
import logging
+import math
import multiprocessing
import re
+import socket
import struct
import sys
import time
@@ -26,13 +28,16 @@ try:
from pyroute2.netlink import genlmsg
from pyroute2.netlink import nla
from pyroute2.netlink import nlmsg_atoms
+ from pyroute2.netlink.event import EventSocket
from pyroute2.netlink.exceptions import NetlinkError
from pyroute2.netlink.generic import GenericNetlinkSocket
+ from pyroute2.netlink.nlsocket import Marshal
import pyroute2
+ import pyroute2.iproute
except ModuleNotFoundError:
print("Need to install the python pyroute2 package >= 0.6.")
- sys.exit(0)
+ sys.exit(1)
OVS_DATAPATH_FAMILY = "ovs_datapath"
@@ -58,6 +63,7 @@ OVS_FLOW_CMD_DEL = 2
OVS_FLOW_CMD_GET = 3
OVS_FLOW_CMD_SET = 4
+UINT32_MAX = 0xFFFFFFFF
def macstr(mac):
outstr = ":".join(["%02X" % i for i in mac])
@@ -198,6 +204,18 @@ def convert_ipv4(data):
return int(ipaddress.IPv4Address(ip)), int(ipaddress.IPv4Address(mask))
+def convert_ipv6(data):
+ ip, _, mask = data.partition('/')
+
+ if not ip:
+ ip = mask = 0
+ elif not mask:
+ mask = 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff'
+ elif mask.isdigit():
+ mask = ipaddress.IPv6Network("::/" + mask).hostmask
+
+ return ipaddress.IPv6Address(ip).packed, ipaddress.IPv6Address(mask).packed
+
def convert_int(size):
def convert_int_sized(data):
value, _, mask = data.partition('/')
@@ -267,6 +285,75 @@ def parse_extract_field(
return str_skipped, data
+def parse_attrs(actstr, attr_desc):
+ """Parses the given action string and returns a list of netlink
+ attributes based on a list of attribute descriptions.
+
+ Each element in the attribute description list is a tuple such as:
+ (name, attr_name, parse_func)
+ where:
+ name: is the string representing the attribute
+ attr_name: is the name of the attribute as defined in the uAPI.
+ parse_func: is a callable accepting a string and returning either
+ a single object (the parsed attribute value) or a tuple of
+ two values (the parsed attribute value and the remaining string)
+
+ Returns a list of attributes and the remaining string.
+ """
+ def parse_attr(actstr, key, func):
+ actstr = actstr[len(key) :]
+
+ if not func:
+ return None, actstr
+
+ delim = actstr[0]
+ actstr = actstr[1:]
+
+ if delim == "=":
+ pos = strcspn(actstr, ",)")
+ ret = func(actstr[:pos])
+ else:
+ ret = func(actstr)
+
+ if isinstance(ret, tuple):
+ (datum, actstr) = ret
+ else:
+ datum = ret
+ actstr = actstr[strcspn(actstr, ",)"):]
+
+ if delim == "(":
+ if not actstr or actstr[0] != ")":
+ raise ValueError("Action contains unbalanced parentheses")
+
+ actstr = actstr[1:]
+
+ actstr = actstr[strspn(actstr, ", ") :]
+
+ return datum, actstr
+
+ attrs = []
+ attr_desc = list(attr_desc)
+ while actstr and actstr[0] != ")" and attr_desc:
+ found = False
+ for i, (key, attr, func) in enumerate(attr_desc):
+ if actstr.startswith(key):
+ datum, actstr = parse_attr(actstr, key, func)
+ attrs.append([attr, datum])
+ found = True
+ del attr_desc[i]
+
+ if not found:
+ raise ValueError("Unknown attribute: '%s'" % actstr)
+
+ actstr = actstr[strspn(actstr, ", ") :]
+
+ if actstr[0] != ")":
+ raise ValueError("Action string contains extra garbage or has "
+ "unbalanced parenthesis: '%s'" % actstr)
+
+ return attrs, actstr[1:]
+
+
class ovs_dp_msg(genlmsg):
# include the OVS version
# We need a custom header rather than just being able to rely on
@@ -282,15 +369,15 @@ class ovsactions(nla):
("OVS_ACTION_ATTR_UNSPEC", "none"),
("OVS_ACTION_ATTR_OUTPUT", "uint32"),
("OVS_ACTION_ATTR_USERSPACE", "userspace"),
- ("OVS_ACTION_ATTR_SET", "none"),
+ ("OVS_ACTION_ATTR_SET", "ovskey"),
("OVS_ACTION_ATTR_PUSH_VLAN", "none"),
("OVS_ACTION_ATTR_POP_VLAN", "flag"),
- ("OVS_ACTION_ATTR_SAMPLE", "none"),
+ ("OVS_ACTION_ATTR_SAMPLE", "sample"),
("OVS_ACTION_ATTR_RECIRC", "uint32"),
("OVS_ACTION_ATTR_HASH", "none"),
("OVS_ACTION_ATTR_PUSH_MPLS", "none"),
("OVS_ACTION_ATTR_POP_MPLS", "flag"),
- ("OVS_ACTION_ATTR_SET_MASKED", "none"),
+ ("OVS_ACTION_ATTR_SET_MASKED", "ovskey"),
("OVS_ACTION_ATTR_CT", "ctact"),
("OVS_ACTION_ATTR_TRUNC", "uint32"),
("OVS_ACTION_ATTR_PUSH_ETH", "none"),
@@ -304,8 +391,85 @@ class ovsactions(nla):
("OVS_ACTION_ATTR_ADD_MPLS", "none"),
("OVS_ACTION_ATTR_DEC_TTL", "none"),
("OVS_ACTION_ATTR_DROP", "uint32"),
+ ("OVS_ACTION_ATTR_PSAMPLE", "psample"),
)
+ class psample(nla):
+ nla_flags = NLA_F_NESTED
+
+ nla_map = (
+ ("OVS_PSAMPLE_ATTR_UNSPEC", "none"),
+ ("OVS_PSAMPLE_ATTR_GROUP", "uint32"),
+ ("OVS_PSAMPLE_ATTR_COOKIE", "array(uint8)"),
+ )
+
+ def dpstr(self, more=False):
+ args = "group=%d" % self.get_attr("OVS_PSAMPLE_ATTR_GROUP")
+
+ cookie = self.get_attr("OVS_PSAMPLE_ATTR_COOKIE")
+ if cookie:
+ args += ",cookie(%s)" % \
+ "".join(format(x, "02x") for x in cookie)
+
+ return "psample(%s)" % args
+
+ def parse(self, actstr):
+ desc = (
+ ("group", "OVS_PSAMPLE_ATTR_GROUP", int),
+ ("cookie", "OVS_PSAMPLE_ATTR_COOKIE",
+ lambda x: list(bytearray.fromhex(x)))
+ )
+
+ attrs, actstr = parse_attrs(actstr, desc)
+
+ for attr in attrs:
+ self["attrs"].append(attr)
+
+ return actstr
+
+ class sample(nla):
+ nla_flags = NLA_F_NESTED
+
+ nla_map = (
+ ("OVS_SAMPLE_ATTR_UNSPEC", "none"),
+ ("OVS_SAMPLE_ATTR_PROBABILITY", "uint32"),
+ ("OVS_SAMPLE_ATTR_ACTIONS", "ovsactions"),
+ )
+
+ def dpstr(self, more=False):
+ args = []
+
+ args.append("sample={:.2f}%".format(
+ 100 * self.get_attr("OVS_SAMPLE_ATTR_PROBABILITY") /
+ UINT32_MAX))
+
+ actions = self.get_attr("OVS_SAMPLE_ATTR_ACTIONS")
+ if actions:
+ args.append("actions(%s)" % actions.dpstr(more))
+
+ return "sample(%s)" % ",".join(args)
+
+ def parse(self, actstr):
+ def parse_nested_actions(actstr):
+ subacts = ovsactions()
+ parsed_len = subacts.parse(actstr)
+ return subacts, actstr[parsed_len :]
+
+ def percent_to_rate(percent):
+ percent = float(percent.strip('%'))
+ return int(math.floor(UINT32_MAX * (percent / 100.0) + .5))
+
+ desc = (
+ ("sample", "OVS_SAMPLE_ATTR_PROBABILITY", percent_to_rate),
+ ("actions", "OVS_SAMPLE_ATTR_ACTIONS", parse_nested_actions),
+ )
+ attrs, actstr = parse_attrs(actstr, desc)
+
+ for attr in attrs:
+ self["attrs"].append(attr)
+
+ return actstr
+
class ctact(nla):
nla_flags = NLA_F_NESTED
@@ -427,50 +591,77 @@ class ovsactions(nla):
print_str += "userdata="
for f in self.get_attr("OVS_USERSPACE_ATTR_USERDATA"):
print_str += "%x." % f
- if self.get_attr("OVS_USERSPACE_ATTR_TUN_PORT") is not None:
+ if self.get_attr("OVS_USERSPACE_ATTR_EGRESS_TUN_PORT") is not None:
print_str += "egress_tun_port=%d" % self.get_attr(
- "OVS_USERSPACE_ATTR_TUN_PORT"
+ "OVS_USERSPACE_ATTR_EGRESS_TUN_PORT"
)
print_str += ")"
return print_str
+ def parse(self, actstr):
+ attrs_desc = (
+ ("pid", "OVS_USERSPACE_ATTR_PID", int),
+ ("userdata", "OVS_USERSPACE_ATTR_USERDATA",
+ lambda x: list(bytearray.fromhex(x))),
+ ("egress_tun_port", "OVS_USERSPACE_ATTR_EGRESS_TUN_PORT", int)
+ )
+
+ attrs, actstr = parse_attrs(actstr, attrs_desc)
+ for attr in attrs:
+ self["attrs"].append(attr)
+
+ return actstr
+
def dpstr(self, more=False):
print_str = ""
- for field in self.nla_map:
+ for field in self["attrs"]:
if field[1] == "none" or self.get_attr(field[0]) is None:
continue
if print_str != "":
print_str += ","
- if field[1] == "uint32":
- if field[0] == "OVS_ACTION_ATTR_OUTPUT":
- print_str += "%d" % int(self.get_attr(field[0]))
- elif field[0] == "OVS_ACTION_ATTR_RECIRC":
- print_str += "recirc(0x%x)" % int(self.get_attr(field[0]))
- elif field[0] == "OVS_ACTION_ATTR_TRUNC":
- print_str += "trunc(%d)" % int(self.get_attr(field[0]))
- elif field[0] == "OVS_ACTION_ATTR_DROP":
- print_str += "drop(%d)" % int(self.get_attr(field[0]))
- elif field[1] == "flag":
- if field[0] == "OVS_ACTION_ATTR_CT_CLEAR":
- print_str += "ct_clear"
- elif field[0] == "OVS_ACTION_ATTR_POP_VLAN":
- print_str += "pop_vlan"
- elif field[0] == "OVS_ACTION_ATTR_POP_ETH":
- print_str += "pop_eth"
- elif field[0] == "OVS_ACTION_ATTR_POP_NSH":
- print_str += "pop_nsh"
- elif field[0] == "OVS_ACTION_ATTR_POP_MPLS":
- print_str += "pop_mpls"
+ if field[0] == "OVS_ACTION_ATTR_OUTPUT":
+ print_str += "%d" % int(self.get_attr(field[0]))
+ elif field[0] == "OVS_ACTION_ATTR_RECIRC":
+ print_str += "recirc(0x%x)" % int(self.get_attr(field[0]))
+ elif field[0] == "OVS_ACTION_ATTR_TRUNC":
+ print_str += "trunc(%d)" % int(self.get_attr(field[0]))
+ elif field[0] == "OVS_ACTION_ATTR_DROP":
+ print_str += "drop(%d)" % int(self.get_attr(field[0]))
+ elif field[0] == "OVS_ACTION_ATTR_CT_CLEAR":
+ print_str += "ct_clear"
+ elif field[0] == "OVS_ACTION_ATTR_POP_VLAN":
+ print_str += "pop_vlan"
+ elif field[0] == "OVS_ACTION_ATTR_POP_ETH":
+ print_str += "pop_eth"
+ elif field[0] == "OVS_ACTION_ATTR_POP_NSH":
+ print_str += "pop_nsh"
+ elif field[0] == "OVS_ACTION_ATTR_POP_MPLS":
+ print_str += "pop_mpls"
else:
datum = self.get_attr(field[0])
if field[0] == "OVS_ACTION_ATTR_CLONE":
print_str += "clone("
print_str += datum.dpstr(more)
print_str += ")"
+ elif field[0] == "OVS_ACTION_ATTR_SET" or \
+ field[0] == "OVS_ACTION_ATTR_SET_MASKED":
+ print_str += "set"
+ field = datum
+ mask = None
+ if field[0] == "OVS_ACTION_ATTR_SET_MASKED":
+ print_str += "_masked"
+ field = datum[0]
+ mask = datum[1]
+ print_str += "("
+ print_str += field.dpstr(mask, more)
+ print_str += ")"
else:
- print_str += datum.dpstr(more)
+ try:
+ print_str += datum.dpstr(more)
+ except:
+ print_str += "{ATTR: %s not decoded}" % field[0]
return print_str
@@ -544,6 +735,25 @@ class ovsactions(nla):
self["attrs"].append(("OVS_ACTION_ATTR_CLONE", subacts))
actstr = actstr[parsedLen:]
parsed = True
+ elif parse_starts_block(actstr, "set(", False):
+ parencount += 1
+ k = ovskey()
+ actstr = actstr[len("set("):]
+ actstr = k.parse(actstr, None)
+ self["attrs"].append(("OVS_ACTION_ATTR_SET", k))
+ if not actstr.startswith(")"):
+ actstr = ")" + actstr
+ parsed = True
+ elif parse_starts_block(actstr, "set_masked(", False):
+ parencount += 1
+ k = ovskey()
+ m = ovskey()
+ actstr = actstr[len("set_masked("):]
+ actstr = k.parse(actstr, m)
+ self["attrs"].append(("OVS_ACTION_ATTR_SET_MASKED", [k, m]))
+ if not actstr.startswith(")"):
+ actstr = ")" + actstr
+ parsed = True
elif parse_starts_block(actstr, "ct(", False):
parencount += 1
actstr = actstr[len("ct(") :]
@@ -637,6 +847,37 @@ class ovsactions(nla):
self["attrs"].append(["OVS_ACTION_ATTR_CT", ctact])
parsed = True
+ elif parse_starts_block(actstr, "sample(", False):
+ sampleact = self.sample()
+ actstr = sampleact.parse(actstr[len("sample(") : ])
+ self["attrs"].append(["OVS_ACTION_ATTR_SAMPLE", sampleact])
+ parsed = True
+
+ elif parse_starts_block(actstr, "psample(", False):
+ psampleact = self.psample()
+ actstr = psampleact.parse(actstr[len("psample(") : ])
+ self["attrs"].append(["OVS_ACTION_ATTR_PSAMPLE", psampleact])
+ parsed = True
+
+ elif parse_starts_block(actstr, "userspace(", False):
+ uact = self.userspace()
+ actstr = uact.parse(actstr[len("userspace(") : ])
+ self["attrs"].append(["OVS_ACTION_ATTR_USERSPACE", uact])
+ parsed = True
+
+ elif parse_starts_block(actstr, "trunc(", False):
+ parencount += 1
+ actstr, val = parse_extract_field(
+ actstr,
+ "trunc(",
+ r"([0-9]+)",
+ int,
+ False,
+ None,
+ )
+ self["attrs"].append(["OVS_ACTION_ATTR_TRUNC", val])
+ parsed = True
+
actstr = actstr[strspn(actstr, ", ") :]
while parencount > 0:
parencount -= 1
@@ -675,7 +916,7 @@ class ovskey(nla):
("OVS_KEY_ATTR_ARP", "ovs_key_arp"),
("OVS_KEY_ATTR_ND", "ovs_key_nd"),
("OVS_KEY_ATTR_SKB_MARK", "uint32"),
- ("OVS_KEY_ATTR_TUNNEL", "none"),
+ ("OVS_KEY_ATTR_TUNNEL", "ovs_key_tunnel"),
("OVS_KEY_ATTR_SCTP", "ovs_key_sctp"),
("OVS_KEY_ATTR_TCP_FLAGS", "be16"),
("OVS_KEY_ATTR_DP_HASH", "uint32"),
@@ -907,21 +1148,21 @@ class ovskey(nla):
"src",
"src",
lambda x: str(ipaddress.IPv6Address(x)),
- lambda x: int.from_bytes(x, "big"),
- lambda x: ipaddress.IPv6Address(x),
+ lambda x: ipaddress.IPv6Address(x).packed if x else 0,
+ convert_ipv6,
),
(
"dst",
"dst",
lambda x: str(ipaddress.IPv6Address(x)),
- lambda x: int.from_bytes(x, "big"),
- lambda x: ipaddress.IPv6Address(x),
+ lambda x: ipaddress.IPv6Address(x).packed if x else 0,
+ convert_ipv6,
),
- ("label", "label", "%d", int),
- ("proto", "proto", "%d", int),
- ("tclass", "tclass", "%d", int),
- ("hlimit", "hlimit", "%d", int),
- ("frag", "frag", "%d", int),
+ ("label", "label", "%d", lambda x: int(x) if x else 0),
+ ("proto", "proto", "%d", lambda x: int(x) if x else 0),
+ ("tclass", "tclass", "%d", lambda x: int(x) if x else 0),
+ ("hlimit", "hlimit", "%d", lambda x: int(x) if x else 0),
+ ("frag", "frag", "%d", lambda x: int(x) if x else 0),
)
def __init__(
@@ -1119,7 +1360,7 @@ class ovskey(nla):
"target",
"target",
lambda x: str(ipaddress.IPv6Address(x)),
- lambda x: int.from_bytes(x, "big"),
+ convert_ipv6,
),
("sll", "sll", macstr, lambda x: int.from_bytes(x, "big")),
("tll", "tll", macstr, lambda x: int.from_bytes(x, "big")),
@@ -1204,13 +1445,13 @@ class ovskey(nla):
"src",
"src",
lambda x: str(ipaddress.IPv6Address(x)),
- lambda x: int.from_bytes(x, "big", convertmac),
+ convert_ipv6,
),
(
"dst",
"dst",
lambda x: str(ipaddress.IPv6Address(x)),
- lambda x: int.from_bytes(x, "big"),
+ convert_ipv6,
),
("tp_src", "tp_src", "%d", int),
("tp_dst", "tp_dst", "%d", int),
@@ -1235,6 +1476,163 @@ class ovskey(nla):
init=init,
)
+ class ovs_key_tunnel(nla):
+ nla_flags = NLA_F_NESTED
+
+ nla_map = (
+ ("OVS_TUNNEL_KEY_ATTR_ID", "be64"),
+ ("OVS_TUNNEL_KEY_ATTR_IPV4_SRC", "ipaddr"),
+ ("OVS_TUNNEL_KEY_ATTR_IPV4_DST", "ipaddr"),
+ ("OVS_TUNNEL_KEY_ATTR_TOS", "uint8"),
+ ("OVS_TUNNEL_KEY_ATTR_TTL", "uint8"),
+ ("OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT", "flag"),
+ ("OVS_TUNNEL_KEY_ATTR_CSUM", "flag"),
+ ("OVS_TUNNEL_KEY_ATTR_OAM", "flag"),
+ ("OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS", "array(uint32)"),
+ ("OVS_TUNNEL_KEY_ATTR_TP_SRC", "be16"),
+ ("OVS_TUNNEL_KEY_ATTR_TP_DST", "be16"),
+ ("OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS", "none"),
+ ("OVS_TUNNEL_KEY_ATTR_IPV6_SRC", "ipaddr"),
+ ("OVS_TUNNEL_KEY_ATTR_IPV6_DST", "ipaddr"),
+ ("OVS_TUNNEL_KEY_ATTR_PAD", "none"),
+ ("OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS", "none"),
+ ("OVS_TUNNEL_KEY_ATTR_IPV4_INFO_BRIDGE", "flag"),
+ )
+
+ def parse(self, flowstr, mask=None):
+ if not flowstr.startswith("tunnel("):
+ return None, None
+
+ k = ovskey.ovs_key_tunnel()
+ if mask is not None:
+ mask = ovskey.ovs_key_tunnel()
+
+ flowstr = flowstr[len("tunnel("):]
+
+ v6_address = None
+
+ fields = [
+ ("tun_id=", r"(\d+)", int, "OVS_TUNNEL_KEY_ATTR_ID",
+ 0xffffffffffffffff, None, None),
+
+ ("src=", r"([0-9a-fA-F\.]+)", str,
+ "OVS_TUNNEL_KEY_ATTR_IPV4_SRC", "255.255.255.255", "0.0.0.0",
+ False),
+ ("dst=", r"([0-9a-fA-F\.]+)", str,
+ "OVS_TUNNEL_KEY_ATTR_IPV4_DST", "255.255.255.255", "0.0.0.0",
+ False),
+
+ ("ipv6_src=", r"([0-9a-fA-F:]+)", str,
+ "OVS_TUNNEL_KEY_ATTR_IPV6_SRC",
+ "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", "::", True),
+ ("ipv6_dst=", r"([0-9a-fA-F:]+)", str,
+ "OVS_TUNNEL_KEY_ATTR_IPV6_DST",
+ "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", "::", True),
+
+ ("tos=", r"(\d+)", int, "OVS_TUNNEL_KEY_ATTR_TOS", 255, 0,
+ None),
+ ("ttl=", r"(\d+)", int, "OVS_TUNNEL_KEY_ATTR_TTL", 255, 0,
+ None),
+
+ ("tp_src=", r"(\d+)", int, "OVS_TUNNEL_KEY_ATTR_TP_SRC",
+ 65535, 0, None),
+ ("tp_dst=", r"(\d+)", int, "OVS_TUNNEL_KEY_ATTR_TP_DST",
+ 65535, 0, None),
+ ]
+
+ forced_include = ["OVS_TUNNEL_KEY_ATTR_TTL"]
+
+ for prefix, regex, typ, attr_name, mask_val, default_val, v46_flag in fields:
+ flowstr, value = parse_extract_field(flowstr, prefix, regex, typ, False)
+ if not attr_name:
+ raise Exception("Bad list value in tunnel fields")
+
+ if value is None and attr_name in forced_include:
+ value = default_val
+ mask_val = default_val
+
+ if value is not None:
+ if v46_flag is not None:
+ if v6_address is None:
+ v6_address = v46_flag
+ if v46_flag != v6_address:
+ raise ValueError("Cannot mix v6 and v4 addresses")
+ k["attrs"].append([attr_name, value])
+ if mask is not None:
+ mask["attrs"].append([attr_name, mask_val])
+ else:
+ if v46_flag is not None:
+ if v6_address is None or v46_flag != v6_address:
+ continue
+ if mask is not None:
+ mask["attrs"].append([attr_name, default_val])
+
+ if k["attrs"][0][0] != "OVS_TUNNEL_KEY_ATTR_ID":
+ raise ValueError("Needs a tunid set")
+
+ if flowstr.startswith("flags("):
+ flowstr = flowstr[len("flags("):]
+ flagspos = flowstr.find(")")
+ flags = flowstr[:flagspos]
+ flowstr = flowstr[flagspos + 1:]
+
+ flag_attrs = {
+ "df": "OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT",
+ "csum": "OVS_TUNNEL_KEY_ATTR_CSUM",
+ "oam": "OVS_TUNNEL_KEY_ATTR_OAM"
+ }
+
+ for flag in flags.split("|"):
+ if flag in flag_attrs:
+ k["attrs"].append([flag_attrs[flag], True])
+ if mask is not None:
+ mask["attrs"].append([flag_attrs[flag], True])
+
+ flowstr = flowstr[strspn(flowstr, ", ") :]
+ return flowstr, k, mask
+
+ def dpstr(self, mask=None, more=False):
+ print_str = "tunnel("
+
+ flagsattrs = []
+ for k in self["attrs"]:
+ noprint = False
+ if k[0] == "OVS_TUNNEL_KEY_ATTR_ID":
+ print_str += "tun_id=%d" % k[1]
+ elif k[0] == "OVS_TUNNEL_KEY_ATTR_IPV4_SRC":
+ print_str += "src=%s" % k[1]
+ elif k[0] == "OVS_TUNNEL_KEY_ATTR_IPV4_DST":
+ print_str += "dst=%s" % k[1]
+ elif k[0] == "OVS_TUNNEL_KEY_ATTR_IPV6_SRC":
+ print_str += "ipv6_src=%s" % k[1]
+ elif k[0] == "OVS_TUNNEL_KEY_ATTR_IPV6_DST":
+ print_str += "ipv6_dst=%s" % k[1]
+ elif k[0] == "OVS_TUNNEL_KEY_ATTR_TOS":
+ print_str += "tos=%d" % k[1]
+ elif k[0] == "OVS_TUNNEL_KEY_ATTR_TTL":
+ print_str += "ttl=%d" % k[1]
+ elif k[0] == "OVS_TUNNEL_KEY_ATTR_TP_SRC":
+ print_str += "tp_src=%d" % k[1]
+ elif k[0] == "OVS_TUNNEL_KEY_ATTR_TP_DST":
+ print_str += "tp_dst=%d" % k[1]
+ elif k[0] == "OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT":
+ noprint = True
+ flagsattrs.append("df")
+ elif k[0] == "OVS_TUNNEL_KEY_ATTR_CSUM":
+ noprint = True
+ flagsattrs.append("csum")
+ elif k[0] == "OVS_TUNNEL_KEY_ATTR_OAM":
+ noprint = True
+ flagsattrs.append("oam")
+
+ if not noprint:
+ print_str += ","
+
+ if len(flagsattrs):
+ print_str += "flags(" + "|".join(flagsattrs) + ")"
+ print_str += ")"
+ return print_str
+
class ovs_key_mpls(nla):
fields = (("lse", ">I"),)
@@ -1243,6 +1641,7 @@ class ovskey(nla):
("OVS_KEY_ATTR_PRIORITY", "skb_priority", intparse),
("OVS_KEY_ATTR_SKB_MARK", "skb_mark", intparse),
("OVS_KEY_ATTR_RECIRC_ID", "recirc_id", intparse),
+ ("OVS_KEY_ATTR_TUNNEL", "tunnel", ovskey.ovs_key_tunnel),
("OVS_KEY_ATTR_DP_HASH", "dp_hash", intparse),
("OVS_KEY_ATTR_CT_STATE", "ct_state", parse_ct_state),
("OVS_KEY_ATTR_CT_ZONE", "ct_zone", intparse),
@@ -1309,7 +1708,7 @@ class ovskey(nla):
mask["attrs"].append([field[0], m])
self["attrs"].append([field[0], k])
- flowstr = flowstr[strspn(flowstr, "),") :]
+ flowstr = flowstr[strspn(flowstr, "), ") :]
return flowstr
@@ -1346,6 +1745,13 @@ class ovskey(nla):
True,
),
(
+ "OVS_KEY_ATTR_TUNNEL",
+ "tunnel",
+ None,
+ False,
+ False,
+ ),
+ (
"OVS_KEY_ATTR_CT_STATE",
"ct_state",
"0x%04x",
@@ -1617,7 +2023,7 @@ class OvsVport(GenericNetlinkSocket):
("OVS_VPORT_ATTR_PORT_NO", "uint32"),
("OVS_VPORT_ATTR_TYPE", "uint32"),
("OVS_VPORT_ATTR_NAME", "asciiz"),
- ("OVS_VPORT_ATTR_OPTIONS", "none"),
+ ("OVS_VPORT_ATTR_OPTIONS", "vportopts"),
("OVS_VPORT_ATTR_UPCALL_PID", "array(uint32)"),
("OVS_VPORT_ATTR_STATS", "vportstats"),
("OVS_VPORT_ATTR_PAD", "none"),
@@ -1625,6 +2031,13 @@ class OvsVport(GenericNetlinkSocket):
("OVS_VPORT_ATTR_NETNSID", "uint32"),
)
+ class vportopts(nla):
+ nla_map = (
+ ("OVS_TUNNEL_ATTR_UNSPEC", "none"),
+ ("OVS_TUNNEL_ATTR_DST_PORT", "uint16"),
+ ("OVS_TUNNEL_ATTR_EXTENSION", "none"),
+ )
+
class vportstats(nla):
fields = (
("rx_packets", "=Q"),
@@ -1693,7 +2106,7 @@ class OvsVport(GenericNetlinkSocket):
raise ne
return reply
- def attach(self, dpindex, vport_ifname, ptype):
+ def attach(self, dpindex, vport_ifname, ptype, dport, lwt):
msg = OvsVport.ovs_vport_msg()
msg["cmd"] = OVS_VPORT_CMD_NEW
@@ -1702,12 +2115,43 @@ class OvsVport(GenericNetlinkSocket):
msg["dpifindex"] = dpindex
port_type = OvsVport.str_to_type(ptype)
- msg["attrs"].append(["OVS_VPORT_ATTR_TYPE", port_type])
msg["attrs"].append(["OVS_VPORT_ATTR_NAME", vport_ifname])
msg["attrs"].append(
["OVS_VPORT_ATTR_UPCALL_PID", [self.upcall_packet.epid]]
)
+ TUNNEL_DEFAULTS = [("geneve", 6081),
+ ("vxlan", 4789)]
+
+ for tnl in TUNNEL_DEFAULTS:
+ if ptype == tnl[0]:
+ if not dport:
+ dport = tnl[1]
+
+ if not lwt:
+ vportopt = OvsVport.ovs_vport_msg.vportopts()
+ vportopt["attrs"].append(
+ ["OVS_TUNNEL_ATTR_DST_PORT", socket.htons(dport)]
+ )
+ msg["attrs"].append(
+ ["OVS_VPORT_ATTR_OPTIONS", vportopt]
+ )
+ else:
+ port_type = OvsVport.OVS_VPORT_TYPE_NETDEV
+ ipr = pyroute2.iproute.IPRoute()
+
+ if tnl[0] == "geneve":
+ ipr.link("add", ifname=vport_ifname, kind=tnl[0],
+ geneve_port=dport,
+ geneve_collect_metadata=True,
+ geneve_udp_zero_csum6_rx=1)
+ elif tnl[0] == "vxlan":
+ ipr.link("add", ifname=vport_ifname, kind=tnl[0],
+ vxlan_learning=0, vxlan_collect_metadata=1,
+ vxlan_udp_zero_csum6_rx=1, vxlan_port=dport)
+ break
+ msg["attrs"].append(["OVS_VPORT_ATTR_TYPE", port_type])
+
try:
reply = self.nlm_request(
msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST | NLM_F_ACK
@@ -2018,10 +2462,71 @@ class OvsFlow(GenericNetlinkSocket):
print("MISS upcall[%d/%s]: %s" % (seq, pktpres, keystr), flush=True)
def execute(self, packetmsg):
- print("userspace execute command")
+ print("userspace execute command", flush=True)
def action(self, packetmsg):
- print("userspace action command")
+ print("userspace action command", flush=True)
+
+
+class psample_sample(genlmsg):
+ nla_map = (
+ ("PSAMPLE_ATTR_IIFINDEX", "none"),
+ ("PSAMPLE_ATTR_OIFINDEX", "none"),
+ ("PSAMPLE_ATTR_ORIGSIZE", "none"),
+ ("PSAMPLE_ATTR_SAMPLE_GROUP", "uint32"),
+ ("PSAMPLE_ATTR_GROUP_SEQ", "none"),
+ ("PSAMPLE_ATTR_SAMPLE_RATE", "uint32"),
+ ("PSAMPLE_ATTR_DATA", "array(uint8)"),
+ ("PSAMPLE_ATTR_GROUP_REFCOUNT", "none"),
+ ("PSAMPLE_ATTR_TUNNEL", "none"),
+ ("PSAMPLE_ATTR_PAD", "none"),
+ ("PSAMPLE_ATTR_OUT_TC", "none"),
+ ("PSAMPLE_ATTR_OUT_TC_OCC", "none"),
+ ("PSAMPLE_ATTR_LATENCY", "none"),
+ ("PSAMPLE_ATTR_TIMESTAMP", "none"),
+ ("PSAMPLE_ATTR_PROTO", "none"),
+ ("PSAMPLE_ATTR_USER_COOKIE", "array(uint8)"),
+ )
+
+ def dpstr(self):
+ fields = []
+ data = ""
+ for (attr, value) in self["attrs"]:
+ if attr == "PSAMPLE_ATTR_SAMPLE_GROUP":
+ fields.append("group:%d" % value)
+ if attr == "PSAMPLE_ATTR_SAMPLE_RATE":
+ fields.append("rate:%d" % value)
+ if attr == "PSAMPLE_ATTR_USER_COOKIE":
+ value = "".join(format(x, "02x") for x in value)
+ fields.append("cookie:%s" % value)
+ if attr == "PSAMPLE_ATTR_DATA" and len(value) > 0:
+ data = "data:%s" % "".join(format(x, "02x") for x in value)
+
+ return ("%s %s" % (",".join(fields), data)).strip()
+
+
+class psample_msg(Marshal):
+ PSAMPLE_CMD_SAMPLE = 0
+ PSAMPLE_CMD_GET_GROUP = 1
+ PSAMPLE_CMD_NEW_GROUP = 2
+ PSAMPLE_CMD_DEL_GROUP = 3
+ PSAMPLE_CMD_SET_FILTER = 4
+ msg_map = {PSAMPLE_CMD_SAMPLE: psample_sample}
+
+
+class PsampleEvent(EventSocket):
+ genl_family = "psample"
+ mcast_groups = ["packets"]
+ marshal_class = psample_msg
+
+ def read_samples(self):
+ print("listening for psample events", flush=True)
+ while True:
+ try:
+ for msg in self.get():
+ print(msg.dpstr(), flush=True)
+ except NetlinkError as ne:
+ raise ne
def print_ovsdp_full(dp_lookup_rep, ifindex, ndb=NDB(), vpl=OvsVport()):
@@ -2053,12 +2558,19 @@ def print_ovsdp_full(dp_lookup_rep, ifindex, ndb=NDB(), vpl=OvsVport()):
for iface in ndb.interfaces:
rep = vpl.info(iface.ifname, ifindex)
if rep is not None:
+ opts = ""
+ vpo = rep.get_attr("OVS_VPORT_ATTR_OPTIONS")
+ if vpo:
+ dpo = vpo.get_attr("OVS_TUNNEL_ATTR_DST_PORT")
+ if dpo:
+ opts += " tnl-dport:%s" % socket.ntohs(dpo)
print(
- " port %d: %s (%s)"
+ " port %d: %s (%s%s)"
% (
rep.get_attr("OVS_VPORT_ATTR_PORT_NO"),
rep.get_attr("OVS_VPORT_ATTR_NAME"),
OvsVport.type_to_str(rep.get_attr("OVS_VPORT_ATTR_TYPE")),
+ opts,
)
)
@@ -2081,7 +2593,7 @@ def main(argv):
help="Increment 'verbose' output counter.",
default=0,
)
- subparsers = parser.add_subparsers()
+ subparsers = parser.add_subparsers(dest="subcommand")
showdpcmd = subparsers.add_parser("show")
showdpcmd.add_argument(
@@ -2120,12 +2632,30 @@ def main(argv):
"--ptype",
type=str,
default="netdev",
- choices=["netdev", "internal"],
+ choices=["netdev", "internal", "geneve", "vxlan"],
help="Interface type (default netdev)",
)
+ addifcmd.add_argument(
+ "-p",
+ "--dport",
+ type=int,
+ default=0,
+ help="Destination port (0 for default)"
+ )
+ addifcmd.add_argument(
+ "-l",
+ "--lwt",
+ type=bool,
+ default=True,
+ help="Use LWT infrastructure instead of vport (default true)."
+ )
delifcmd = subparsers.add_parser("del-if")
delifcmd.add_argument("dpname", help="Datapath Name")
delifcmd.add_argument("delif", help="Interface name for adding")
+ delifcmd.add_argument("-d",
+ "--dellink",
+ type=bool, default=False,
+ help="Delete the link as well.")
dumpflcmd = subparsers.add_parser("dump-flows")
dumpflcmd.add_argument("dumpdp", help="Datapath Name")
@@ -2138,6 +2668,8 @@ def main(argv):
delfscmd = subparsers.add_parser("del-flows")
delfscmd.add_argument("flsbr", help="Datapath name")
+ subparsers.add_parser("psample-events")
+
args = parser.parse_args()
if args.verbose > 0:
@@ -2152,6 +2684,9 @@ def main(argv):
sys.setrecursionlimit(100000)
+ if args.subcommand == "psample-events":
+ PsampleEvent().read_samples()
+
if hasattr(args, "showdp"):
found = False
for iface in ndb.interfaces:
@@ -2186,7 +2721,8 @@ def main(argv):
print("DP '%s' not found." % args.dpname)
return 1
dpindex = rep["dpifindex"]
- rep = ovsvp.attach(rep["dpifindex"], args.addif, args.ptype)
+ rep = ovsvp.attach(rep["dpifindex"], args.addif, args.ptype,
+ args.dport, args.lwt)
msg = "vport '%s'" % args.addif
if rep and rep["header"]["error"] is None:
msg += " added."
@@ -2207,6 +2743,9 @@ def main(argv):
msg += " removed."
else:
msg += " failed to remove."
+ if args.dellink:
+ ipr = pyroute2.iproute.IPRoute()
+ ipr.link("del", index=ipr.link_lookup(ifname=args.delif)[0])
elif hasattr(args, "dumpdp"):
rep = ovsdp.info(args.dumpdp, 0)
if rep is None:
diff --git a/tools/testing/selftests/net/openvswitch/settings b/tools/testing/selftests/net/openvswitch/settings
new file mode 100644
index 000000000000..e2206265f67c
--- /dev/null
+++ b/tools/testing/selftests/net/openvswitch/settings
@@ -0,0 +1 @@
+timeout=900
diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh
index cfc84958025a..5175c0c83a23 100755
--- a/tools/testing/selftests/net/pmtu.sh
+++ b/tools/testing/selftests/net/pmtu.sh
@@ -842,25 +842,97 @@ setup_bridge() {
run_cmd ${ns_a} ip link set veth_A-C master br0
}
+setup_ovs_via_internal_utility() {
+ type="${1}"
+ a_addr="${2}"
+ b_addr="${3}"
+ dport="${4}"
+
+ run_cmd python3 ./openvswitch/ovs-dpctl.py add-if ovs_br0 ${type}_a -t ${type} || return 1
+
+ ports=$(python3 ./openvswitch/ovs-dpctl.py show)
+ br0_port=$(echo "$ports" | grep -E "\sovs_br0" | sed -e 's@port @@' | cut -d: -f1 | xargs)
+ type_a_port=$(echo "$ports" | grep ${type}_a | sed -e 's@port @@' | cut -d: -f1 | xargs)
+ veth_a_port=$(echo "$ports" | grep veth_A | sed -e 's@port @@' | cut -d: -f1 | xargs)
+
+ v4_a_tun="${prefix4}.${a_r1}.1"
+ v4_b_tun="${prefix4}.${b_r1}.1"
+
+ v6_a_tun="${prefix6}:${a_r1}::1"
+ v6_b_tun="${prefix6}:${b_r1}::1"
+
+ if [ "${v4_a_tun}" = "${a_addr}" ]; then
+ run_cmd python3 ./openvswitch/ovs-dpctl.py add-flow ovs_br0 \
+ "recirc_id(0),in_port(${veth_a_port}),eth(),eth_type(0x0800),ipv4()" \
+ "set(tunnel(tun_id=1,dst=${v4_b_tun},ttl=64,tp_dst=${dport},flags(df|csum))),${type_a_port}"
+ run_cmd python3 ./openvswitch/ovs-dpctl.py add-flow ovs_br0 \
+ "recirc_id(0),in_port(${veth_a_port}),eth(),eth_type(0x86dd),ipv6()" \
+ "set(tunnel(tun_id=1,dst=${v4_b_tun},ttl=64,tp_dst=${dport},flags(df|csum))),${type_a_port}"
+ run_cmd python3 ./openvswitch/ovs-dpctl.py add-flow ovs_br0 \
+ "recirc_id(0),tunnel(tun_id=1,src=${v4_b_tun},dst=${v4_a_tun}),in_port(${type_a_port}),eth(),eth_type(0x0800),ipv4()" \
+ "${veth_a_port}"
+ run_cmd python3 ./openvswitch/ovs-dpctl.py add-flow ovs_br0 \
+ "recirc_id(0),tunnel(tun_id=1,src=${v4_b_tun},dst=${v4_a_tun}),in_port(${type_a_port}),eth(),eth_type(0x86dd),ipv6()" \
+ "${veth_a_port}"
+ run_cmd python3 ./openvswitch/ovs-dpctl.py add-flow ovs_br0 \
+ "recirc_id(0),tunnel(tun_id=1,src=${v4_b_tun},dst=${v4_a_tun}),in_port(${type_a_port}),eth(),eth_type(0x0806),arp()" \
+ "${veth_a_port}"
+ run_cmd python3 ./openvswitch/ovs-dpctl.py add-flow ovs_br0 \
+ "recirc_id(0),in_port(${veth_a_port}),eth(),eth_type(0x0806),arp(sip=${veth4_c_addr},tip=${tunnel4_b_addr})" \
+ "set(tunnel(tun_id=1,dst=${v4_b_tun},ttl=64,tp_dst=${dport},flags(df|csum))),${type_a_port}"
+ else
+ run_cmd python3 ./openvswitch/ovs-dpctl.py add-flow ovs_br0 \
+ "recirc_id(0),in_port(${veth_a_port}),eth(),eth_type(0x0800),ipv4()" \
+ "set(tunnel(tun_id=1,ipv6_dst=${v6_b_tun},ttl=64,tp_dst=${dport},flags(df|csum))),${type_a_port}"
+ run_cmd python3 ./openvswitch/ovs-dpctl.py add-flow ovs_br0 \
+ "recirc_id(0),in_port(${veth_a_port}),eth(),eth_type(0x86dd),ipv6()" \
+ "set(tunnel(tun_id=1,ipv6_dst=${v6_b_tun},ttl=64,tp_dst=${dport},flags(df|csum))),${type_a_port}"
+ run_cmd python3 ./openvswitch/ovs-dpctl.py add-flow ovs_br0 \
+ "recirc_id(0),tunnel(tun_id=1,ipv6_src=${v6_b_tun},ipv6_dst=${v6_a_tun}),in_port(${type_a_port}),eth(),eth_type(0x0800),ipv4()" \
+ "${veth_a_port}"
+ run_cmd python3 ./openvswitch/ovs-dpctl.py add-flow ovs_br0 \
+ "recirc_id(0),tunnel(tun_id=1,ipv6_src=${v6_b_tun},ipv6_dst=${v6_a_tun}),in_port(${type_a_port}),eth(),eth_type(0x86dd),ipv6()" \
+ "${veth_a_port}"
+ run_cmd python3 ./openvswitch/ovs-dpctl.py add-flow ovs_br0 \
+ "recirc_id(0),tunnel(tun_id=1,ipv6_src=${v6_b_tun},ipv6_dst=${v6_a_tun}),in_port(${type_a_port}),eth(),eth_type(0x0806),arp()" \
+ "${veth_a_port}"
+ run_cmd python3 ./openvswitch/ovs-dpctl.py add-flow ovs_br0 \
+ "recirc_id(0),in_port(${veth_a_port}),eth(),eth_type(0x0806),arp(sip=${veth4_c_addr},tip=${tunnel4_b_addr})" \
+ "set(tunnel(tun_id=1,ipv6_dst=${v6_b_tun},ttl=64,tp_dst=${dport},flags(df|csum))),${type_a_port}"
+ fi
+}
+
+setup_ovs_via_vswitchd() {
+ type="${1}"
+ b_addr="${2}"
+
+ run_cmd ovs-vsctl add-port ovs_br0 ${type}_a -- \
+ set interface ${type}_a type=${type} \
+ options:remote_ip=${b_addr} options:key=1 options:csum=true || return 1
+}
+
setup_ovs_vxlan_or_geneve() {
type="${1}"
a_addr="${2}"
b_addr="${3}"
+ dport="6081"
if [ "${type}" = "vxlan" ]; then
+ dport="4789"
opts="${opts} ttl 64 dstport 4789"
opts_b="local ${b_addr}"
fi
- run_cmd ovs-vsctl add-port ovs_br0 ${type}_a -- \
- set interface ${type}_a type=${type} \
- options:remote_ip=${b_addr} options:key=1 options:csum=true || return 1
+ setup_ovs_via_internal_utility "${type}" "${a_addr}" "${b_addr}" \
+ "${dport}" || \
+ setup_ovs_via_vswitchd "${type}" "${b_addr}" || return 1
run_cmd ${ns_b} ip link add ${type}_b type ${type} id 1 ${opts_b} remote ${a_addr} ${opts} || return 1
run_cmd ${ns_b} ip addr add ${tunnel4_b_addr}/${tunnel4_mask} dev ${type}_b
run_cmd ${ns_b} ip addr add ${tunnel6_b_addr}/${tunnel6_mask} dev ${type}_b
+ run_cmd ip link set ${type}_a up
run_cmd ${ns_b} ip link set ${type}_b up
}
@@ -880,8 +952,24 @@ setup_ovs_vxlan6() {
setup_ovs_vxlan_or_geneve vxlan ${prefix6}:${a_r1}::1 ${prefix6}:${b_r1}::1
}
+setup_ovs_br_internal() {
+ run_cmd python3 ./openvswitch/ovs-dpctl.py add-dp ovs_br0 || \
+ return 1
+}
+
+setup_ovs_br_vswitchd() {
+ run_cmd ovs-vsctl add-br ovs_br0 || return 1
+}
+
+setup_ovs_add_if() {
+ ifname="${1}"
+ run_cmd python3 ./openvswitch/ovs-dpctl.py add-if ovs_br0 \
+ "${ifname}" || \
+ run_cmd ovs-vsctl add-port ovs_br0 "${ifname}"
+}
+
setup_ovs_bridge() {
- run_cmd ovs-vsctl add-br ovs_br0 || return $ksft_skip
+ setup_ovs_br_internal || setup_ovs_br_vswitchd || return $ksft_skip
run_cmd ip link set ovs_br0 up
run_cmd ${ns_c} ip link add veth_C-A type veth peer name veth_A-C
@@ -891,7 +979,7 @@ setup_ovs_bridge() {
run_cmd ${ns_c} ip link set veth_C-A up
run_cmd ${ns_c} ip addr add ${veth4_c_addr}/${veth4_mask} dev veth_C-A
run_cmd ${ns_c} ip addr add ${veth6_c_addr}/${veth6_mask} dev veth_C-A
- run_cmd ovs-vsctl add-port ovs_br0 veth_A-C
+ setup_ovs_add_if veth_A-C
# Move veth_A-R1 to init
run_cmd ${ns_a} ip link set veth_A-R1 netns 1
@@ -922,6 +1010,18 @@ trace() {
sleep 1
}
+cleanup_del_ovs_internal() {
+ # squelch the output of the del-if commands since it can be wordy
+ python3 ./openvswitch/ovs-dpctl.py del-if ovs_br0 -d true vxlan_a >/dev/null 2>&1
+ python3 ./openvswitch/ovs-dpctl.py del-if ovs_br0 -d true geneve_a >/dev/null 2>&1
+ python3 ./openvswitch/ovs-dpctl.py del-dp ovs_br0 >/dev/null 2>&1
+}
+
+cleanup_del_ovs_vswitchd() {
+ ovs-vsctl --if-exists del-port vxlan_a 2>/dev/null
+ ovs-vsctl --if-exists del-br ovs_br0 2>/dev/null
+}
+
cleanup() {
for pid in ${tcpdump_pids}; do
kill ${pid}
@@ -940,10 +1040,10 @@ cleanup() {
cleanup_all_ns
- ip link del veth_A-C 2>/dev/null
- ip link del veth_A-R1 2>/dev/null
- ovs-vsctl --if-exists del-port vxlan_a 2>/dev/null
- ovs-vsctl --if-exists del-br ovs_br0 2>/dev/null
+ ip link del veth_A-C 2>/dev/null
+ ip link del veth_A-R1 2>/dev/null
+ cleanup_del_ovs_internal
+ cleanup_del_ovs_vswitchd
rm -f "$tmpoutfile"
}
@@ -1397,6 +1497,12 @@ test_pmtu_ipvX_over_ovs_vxlanY_or_geneveY_exception() {
outer_family=${3}
ll_mtu=4000
+ if [ "${type}" = "vxlan" ]; then
+ tun_a="vxlan_sys_4789"
+ elif [ "${type}" = "geneve" ]; then
+ tun_a="genev_sys_6081"
+ fi
+
if [ ${outer_family} -eq 4 ]; then
setup namespaces routing ovs_bridge ovs_${type}4 || return $ksft_skip
# IPv4 header UDP header VXLAN/GENEVE header Ethernet header
@@ -1407,17 +1513,11 @@ test_pmtu_ipvX_over_ovs_vxlanY_or_geneveY_exception() {
exp_mtu=$((${ll_mtu} - 40 - 8 - 8 - 14))
fi
- if [ "${type}" = "vxlan" ]; then
- tun_a="vxlan_sys_4789"
- elif [ "${type}" = "geneve" ]; then
- tun_a="genev_sys_6081"
- fi
-
- trace "" "${tun_a}" "${ns_b}" ${type}_b \
- "" veth_A-R1 "${ns_r1}" veth_R1-A \
- "${ns_b}" veth_B-R1 "${ns_r1}" veth_R1-B \
- "" ovs_br0 "" veth-A-C \
- "${ns_c}" veth_C-A
+ trace "" ${type}_a "${ns_b}" ${type}_b \
+ "" veth_A-R1 "${ns_r1}" veth_R1-A \
+ "${ns_b}" veth_B-R1 "${ns_r1}" veth_R1-B \
+ "" ovs_br0 "" veth-A_C \
+ "${ns_c}" veth_C-A "" "${tun_a}"
if [ ${family} -eq 4 ]; then
ping=ping
@@ -1436,8 +1536,9 @@ test_pmtu_ipvX_over_ovs_vxlanY_or_geneveY_exception() {
mtu "${ns_b}" veth_B-R1 ${ll_mtu}
mtu "${ns_r1}" veth_R1-B ${ll_mtu}
- mtu "" ${tun_a} $((${ll_mtu} + 1000))
- mtu "${ns_b}" ${type}_b $((${ll_mtu} + 1000))
+ mtu "" ${tun_a} $((${ll_mtu} + 1000)) 2>/dev/null || \
+ mtu "" ${type}_a $((${ll_mtu} + 1000)) 2>/dev/null
+ mtu "${ns_b}" ${type}_b $((${ll_mtu} + 1000))
run_cmd ${ns_c} ${ping} -q -M want -i 0.1 -c 20 -s $((${ll_mtu} + 500)) ${dst} || return 1
diff --git a/tools/testing/selftests/net/tcp_ao/Makefile b/tools/testing/selftests/net/tcp_ao/Makefile
index 522d991e310e..bd88b90b902b 100644
--- a/tools/testing/selftests/net/tcp_ao/Makefile
+++ b/tools/testing/selftests/net/tcp_ao/Makefile
@@ -26,7 +26,7 @@ LIB := $(LIBDIR)/libaotst.a
LDLIBS += $(LIB) -pthread
LIBDEPS := lib/aolib.h Makefile
-CFLAGS := -Wall -O2 -g -D_GNU_SOURCE -fno-strict-aliasing
+CFLAGS += -Wall -O2 -g -fno-strict-aliasing
CFLAGS += $(KHDR_INCLUDES)
CFLAGS += -iquote ./lib/ -I ../../../../include/
diff --git a/tools/testing/selftests/net/tcp_ao/self-connect.c b/tools/testing/selftests/net/tcp_ao/self-connect.c
index e154d9e198a9..a5698b0a3718 100644
--- a/tools/testing/selftests/net/tcp_ao/self-connect.c
+++ b/tools/testing/selftests/net/tcp_ao/self-connect.c
@@ -30,8 +30,6 @@ static void setup_lo_intf(const char *lo_intf)
static void tcp_self_connect(const char *tst, unsigned int port,
bool different_keyids, bool check_restore)
{
- uint64_t before_challenge_ack, after_challenge_ack;
- uint64_t before_syn_challenge, after_syn_challenge;
struct tcp_ao_counters before_ao, after_ao;
uint64_t before_aogood, after_aogood;
struct netstat *ns_before, *ns_after;
@@ -62,8 +60,6 @@ static void tcp_self_connect(const char *tst, unsigned int port,
ns_before = netstat_read();
before_aogood = netstat_get(ns_before, "TCPAOGood", NULL);
- before_challenge_ack = netstat_get(ns_before, "TCPChallengeACK", NULL);
- before_syn_challenge = netstat_get(ns_before, "TCPSYNChallenge", NULL);
if (test_get_tcp_ao_counters(sk, &before_ao))
test_error("test_get_tcp_ao_counters()");
@@ -82,8 +78,6 @@ static void tcp_self_connect(const char *tst, unsigned int port,
ns_after = netstat_read();
after_aogood = netstat_get(ns_after, "TCPAOGood", NULL);
- after_challenge_ack = netstat_get(ns_after, "TCPChallengeACK", NULL);
- after_syn_challenge = netstat_get(ns_after, "TCPSYNChallenge", NULL);
if (test_get_tcp_ao_counters(sk, &after_ao))
test_error("test_get_tcp_ao_counters()");
if (!check_restore) {
@@ -98,18 +92,6 @@ static void tcp_self_connect(const char *tst, unsigned int port,
close(sk);
return;
}
- if (after_challenge_ack <= before_challenge_ack ||
- after_syn_challenge <= before_syn_challenge) {
- /*
- * It's also meant to test simultaneous open, so check
- * these counters as well.
- */
- test_fail("%s: Didn't challenge SYN or ACK: %zu <= %zu OR %zu <= %zu",
- tst, after_challenge_ack, before_challenge_ack,
- after_syn_challenge, before_syn_challenge);
- close(sk);
- return;
- }
if (test_tcp_ao_counters_cmp(tst, &before_ao, &after_ao, TEST_CNT_GOOD)) {
close(sk);
diff --git a/tools/testing/selftests/net/udpgso.c b/tools/testing/selftests/net/udpgso.c
index 85b3baa3f7f3..3e74cfa1a2bf 100644
--- a/tools/testing/selftests/net/udpgso.c
+++ b/tools/testing/selftests/net/udpgso.c
@@ -53,6 +53,7 @@ static bool cfg_do_ipv6;
static bool cfg_do_connected;
static bool cfg_do_connectionless;
static bool cfg_do_msgmore;
+static bool cfg_do_recv = true;
static bool cfg_do_setsockopt;
static int cfg_specific_test_id = -1;
@@ -414,6 +415,9 @@ static void run_one(struct testcase *test, int fdt, int fdr,
if (!sent)
return;
+ if (!cfg_do_recv)
+ return;
+
if (test->gso_len)
mss = test->gso_len;
else
@@ -464,8 +468,10 @@ static void run_test(struct sockaddr *addr, socklen_t alen)
if (fdr == -1)
error(1, errno, "socket r");
- if (bind(fdr, addr, alen))
- error(1, errno, "bind");
+ if (cfg_do_recv) {
+ if (bind(fdr, addr, alen))
+ error(1, errno, "bind");
+ }
/* Have tests fail quickly instead of hang */
if (setsockopt(fdr, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)))
@@ -524,7 +530,7 @@ static void parse_opts(int argc, char **argv)
{
int c;
- while ((c = getopt(argc, argv, "46cCmst:")) != -1) {
+ while ((c = getopt(argc, argv, "46cCmRst:")) != -1) {
switch (c) {
case '4':
cfg_do_ipv4 = true;
@@ -541,6 +547,9 @@ static void parse_opts(int argc, char **argv)
case 'm':
cfg_do_msgmore = true;
break;
+ case 'R':
+ cfg_do_recv = false;
+ break;
case 's':
cfg_do_setsockopt = true;
break;
diff --git a/tools/testing/selftests/net/udpgso.sh b/tools/testing/selftests/net/udpgso.sh
index 6c63178086b0..85d1fa3c1ff7 100755
--- a/tools/testing/selftests/net/udpgso.sh
+++ b/tools/testing/selftests/net/udpgso.sh
@@ -27,6 +27,31 @@ test_route_mtu() {
ip route add local fd00::1/128 table local dev lo mtu 1500
}
+setup_dummy_sink() {
+ ip link add name sink mtu 1500 type dummy
+ ip addr add dev sink 10.0.0.0/24
+ ip addr add dev sink fd00::2/64 nodad
+ ip link set dev sink up
+}
+
+test_hw_gso_hw_csum() {
+ setup_dummy_sink
+ ethtool -K sink tx-checksum-ip-generic on >/dev/null
+ ethtool -K sink tx-udp-segmentation on >/dev/null
+}
+
+test_sw_gso_hw_csum() {
+ setup_dummy_sink
+ ethtool -K sink tx-checksum-ip-generic on >/dev/null
+ ethtool -K sink tx-udp-segmentation off >/dev/null
+}
+
+test_sw_gso_sw_csum() {
+ setup_dummy_sink
+ ethtool -K sink tx-checksum-ip-generic off >/dev/null
+ ethtool -K sink tx-udp-segmentation off >/dev/null
+}
+
if [ "$#" -gt 0 ]; then
"$1"
shift 2 # pop "test_*" arg and "--" delimiter
@@ -56,3 +81,21 @@ echo "ipv4 msg_more"
echo "ipv6 msg_more"
./in_netns.sh "$0" test_dev_mtu -- ./udpgso -6 -C -m
+
+echo "ipv4 hw-gso hw-csum"
+./in_netns.sh "$0" test_hw_gso_hw_csum -- ./udpgso -4 -C -R
+
+echo "ipv6 hw-gso hw-csum"
+./in_netns.sh "$0" test_hw_gso_hw_csum -- ./udpgso -6 -C -R
+
+echo "ipv4 sw-gso hw-csum"
+./in_netns.sh "$0" test_sw_gso_hw_csum -- ./udpgso -4 -C -R
+
+echo "ipv6 sw-gso hw-csum"
+./in_netns.sh "$0" test_sw_gso_hw_csum -- ./udpgso -6 -C -R
+
+echo "ipv4 sw-gso sw-csum"
+./in_netns.sh "$0" test_sw_gso_sw_csum -- ./udpgso -4 -C -R
+
+echo "ipv6 sw-gso sw-csum"
+./in_netns.sh "$0" test_sw_gso_sw_csum -- ./udpgso -6 -C -R
diff --git a/tools/testing/selftests/net/vrf_route_leaking.sh b/tools/testing/selftests/net/vrf_route_leaking.sh
index 2da32f4c479b..152171fb1fc8 100755
--- a/tools/testing/selftests/net/vrf_route_leaking.sh
+++ b/tools/testing/selftests/net/vrf_route_leaking.sh
@@ -59,6 +59,7 @@
# while it is forwarded between different vrfs.
source lib.sh
+PATH=$PWD:$PWD/tools/testing/selftests/net:$PATH
VERBOSE=0
PAUSE_ON_FAIL=no
DEFAULT_TTYPE=sym
@@ -533,6 +534,86 @@ ipv6_ping_frag_asym()
ipv6_ping_frag asym
}
+ipv4_ping_local()
+{
+ log_section "IPv4 (sym route): VRF ICMP local error route lookup ping"
+
+ setup_sym
+
+ check_connectivity || return
+
+ run_cmd ip netns exec $r1 ip vrf exec blue ping -c1 -w1 ${H2_N2_IP}
+ log_test $? 0 "VRF ICMP local IPv4"
+}
+
+ipv4_tcp_local()
+{
+ log_section "IPv4 (sym route): VRF tcp local connection"
+
+ setup_sym
+
+ check_connectivity || return
+
+ run_cmd nettest -s -O "$h2" -l ${H2_N2_IP} -I eth0 -3 eth0 &
+ sleep 1
+ run_cmd nettest -N "$r1" -d blue -r ${H2_N2_IP}
+ log_test $? 0 "VRF tcp local connection IPv4"
+}
+
+ipv4_udp_local()
+{
+ log_section "IPv4 (sym route): VRF udp local connection"
+
+ setup_sym
+
+ check_connectivity || return
+
+ run_cmd nettest -s -D -O "$h2" -l ${H2_N2_IP} -I eth0 -3 eth0 &
+ sleep 1
+ run_cmd nettest -D -N "$r1" -d blue -r ${H2_N2_IP}
+ log_test $? 0 "VRF udp local connection IPv4"
+}
+
+ipv6_ping_local()
+{
+ log_section "IPv6 (sym route): VRF ICMP local error route lookup ping"
+
+ setup_sym
+
+ check_connectivity6 || return
+
+ run_cmd ip netns exec $r1 ip vrf exec blue ${ping6} -c1 -w1 ${H2_N2_IP6}
+ log_test $? 0 "VRF ICMP local IPv6"
+}
+
+ipv6_tcp_local()
+{
+ log_section "IPv6 (sym route): VRF tcp local connection"
+
+ setup_sym
+
+ check_connectivity6 || return
+
+ run_cmd nettest -s -6 -O "$h2" -l ${H2_N2_IP6} -I eth0 -3 eth0 &
+ sleep 1
+ run_cmd nettest -6 -N "$r1" -d blue -r ${H2_N2_IP6}
+ log_test $? 0 "VRF tcp local connection IPv6"
+}
+
+ipv6_udp_local()
+{
+ log_section "IPv6 (sym route): VRF udp local connection"
+
+ setup_sym
+
+ check_connectivity6 || return
+
+ run_cmd nettest -s -6 -D -O "$h2" -l ${H2_N2_IP6} -I eth0 -3 eth0 &
+ sleep 1
+ run_cmd nettest -6 -D -N "$r1" -d blue -r ${H2_N2_IP6}
+ log_test $? 0 "VRF udp local connection IPv6"
+}
+
################################################################################
# usage
@@ -555,8 +636,10 @@ EOF
# Some systems don't have a ping6 binary anymore
command -v ping6 > /dev/null 2>&1 && ping6=$(command -v ping6) || ping6=$(command -v ping)
-TESTS_IPV4="ipv4_ping_ttl ipv4_traceroute ipv4_ping_frag ipv4_ping_ttl_asym ipv4_traceroute_asym"
-TESTS_IPV6="ipv6_ping_ttl ipv6_traceroute ipv6_ping_ttl_asym ipv6_traceroute_asym"
+TESTS_IPV4="ipv4_ping_ttl ipv4_traceroute ipv4_ping_frag ipv4_ping_local ipv4_tcp_local
+ipv4_udp_local ipv4_ping_ttl_asym ipv4_traceroute_asym"
+TESTS_IPV6="ipv6_ping_ttl ipv6_traceroute ipv6_ping_local ipv6_tcp_local ipv6_udp_local
+ipv6_ping_ttl_asym ipv6_traceroute_asym"
ret=0
nsuccess=0
@@ -594,12 +677,18 @@ do
ipv4_traceroute|traceroute) ipv4_traceroute;;&
ipv4_traceroute_asym|traceroute) ipv4_traceroute_asym;;&
ipv4_ping_frag|ping) ipv4_ping_frag;;&
+ ipv4_ping_local|ping) ipv4_ping_local;;&
+ ipv4_tcp_local) ipv4_tcp_local;;&
+ ipv4_udp_local) ipv4_udp_local;;&
ipv6_ping_ttl|ping) ipv6_ping_ttl;;&
ipv6_ping_ttl_asym|ping) ipv6_ping_ttl_asym;;&
ipv6_traceroute|traceroute) ipv6_traceroute;;&
ipv6_traceroute_asym|traceroute) ipv6_traceroute_asym;;&
ipv6_ping_frag|ping) ipv6_ping_frag;;&
+ ipv6_ping_local|ping) ipv6_ping_local;;&
+ ipv6_tcp_local) ipv6_tcp_local;;&
+ ipv6_udp_local) ipv6_udp_local;;&
# setup namespaces and config, but do not run any tests
setup_sym|setup) setup_sym; exit 0;;
diff --git a/tools/testing/selftests/net/ynl.mk b/tools/testing/selftests/net/ynl.mk
new file mode 100644
index 000000000000..59cb26cf3f73
--- /dev/null
+++ b/tools/testing/selftests/net/ynl.mk
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0
+
+# YNL selftest build snippet
+
+# Inputs:
+#
+# YNL_GENS: families we need in the selftests
+# YNL_PROGS: TEST_PROGS which need YNL (TODO, none exist, yet)
+# YNL_GEN_FILES: TEST_GEN_FILES which need YNL
+
+YNL_OUTPUTS := $(patsubst %,$(OUTPUT)/%,$(YNL_GEN_FILES))
+
+$(YNL_OUTPUTS): $(OUTPUT)/libynl.a
+$(YNL_OUTPUTS): CFLAGS += \
+ -I$(top_srcdir)/usr/include/ $(KHDR_INCLUDES) \
+ -I$(top_srcdir)/tools/net/ynl/lib/ \
+ -I$(top_srcdir)/tools/net/ynl/generated/
+
+$(OUTPUT)/libynl.a:
+ $(Q)$(MAKE) -C $(top_srcdir)/tools/net/ynl GENS="$(YNL_GENS)" libynl.a
+ $(Q)cp $(top_srcdir)/tools/net/ynl/libynl.a $(OUTPUT)/libynl.a
diff --git a/tools/testing/selftests/nolibc/Makefile b/tools/testing/selftests/nolibc/Makefile
index 40dd95228051..3fbabab46958 100644
--- a/tools/testing/selftests/nolibc/Makefile
+++ b/tools/testing/selftests/nolibc/Makefile
@@ -152,7 +152,7 @@ CFLAGS_mips32be = -EB -mabi=32
CFLAGS_STACKPROTECTOR ?= $(call cc-option,-mstack-protector-guard=global $(call cc-option,-fstack-protector-all))
CFLAGS ?= -Os -fno-ident -fno-asynchronous-unwind-tables -std=c89 -W -Wall -Wextra \
$(call cc-option,-fno-stack-protector) \
- $(CFLAGS_$(XARCH)) $(CFLAGS_STACKPROTECTOR)
+ $(CFLAGS_$(XARCH)) $(CFLAGS_STACKPROTECTOR) $(CFLAGS_EXTRA)
LDFLAGS :=
REPORT ?= awk '/\[OK\][\r]*$$/{p++} /\[FAIL\][\r]*$$/{if (!f) printf("\n"); f++; print;} /\[SKIPPED\][\r]*$$/{s++} \
diff --git a/tools/testing/selftests/nolibc/nolibc-test.c b/tools/testing/selftests/nolibc/nolibc-test.c
index 94bb6e11c16f..093d0512f4c5 100644
--- a/tools/testing/selftests/nolibc/nolibc-test.c
+++ b/tools/testing/selftests/nolibc/nolibc-test.c
@@ -64,6 +64,14 @@ static const char *argv0;
/* will be used by constructor tests */
static int constructor_test_value;
+static const int is_nolibc =
+#ifdef NOLIBC
+ 1
+#else
+ 0
+#endif
+;
+
/* definition of a series of tests */
struct test {
const char *name; /* test name */
@@ -607,7 +615,7 @@ int expect_strne(const char *expr, int llen, const char *cmp)
static __attribute__((unused))
int expect_str_buf_eq(size_t expr, const char *buf, size_t val, int llen, const char *cmp)
{
- llen += printf(" = %lu <%s> ", expr, buf);
+ llen += printf(" = %lu <%s> ", (unsigned long)expr, buf);
if (strcmp(buf, cmp) != 0) {
result(llen, FAIL);
return 1;
@@ -621,6 +629,51 @@ int expect_str_buf_eq(size_t expr, const char *buf, size_t val, int llen, const
return 0;
}
+#define EXPECT_STRTOX(cond, func, input, base, expected, chars, expected_errno) \
+ do { if (!(cond)) result(llen, SKIPPED); else ret += expect_strtox(llen, func, input, base, expected, chars, expected_errno); } while (0)
+
+static __attribute__((unused))
+int expect_strtox(int llen, void *func, const char *input, int base, intmax_t expected, int expected_chars, int expected_errno)
+{
+ char *endptr;
+ int actual_errno, actual_chars;
+ intmax_t r;
+
+ errno = 0;
+ if (func == strtol) {
+ r = strtol(input, &endptr, base);
+ } else if (func == strtoul) {
+ r = strtoul(input, &endptr, base);
+ } else {
+ result(llen, FAIL);
+ return 1;
+ }
+ actual_errno = errno;
+ actual_chars = endptr - input;
+
+ llen += printf(" %lld = %lld", (long long)expected, (long long)r);
+ if (r != expected) {
+ result(llen, FAIL);
+ return 1;
+ }
+ if (expected_chars == -1) {
+ if (*endptr != '\0') {
+ result(llen, FAIL);
+ return 1;
+ }
+ } else if (expected_chars != actual_chars) {
+ result(llen, FAIL);
+ return 1;
+ }
+ if (actual_errno != expected_errno) {
+ result(llen, FAIL);
+ return 1;
+ }
+
+ result(llen, OK);
+ return 0;
+}
+
/* declare tests based on line numbers. There must be exactly one test per line. */
#define CASE_TEST(name) \
case __LINE__: llen += printf("%d %s", test, #name);
@@ -942,6 +995,7 @@ int run_syscall(int min, int max)
int ret = 0;
void *p1, *p2;
int has_gettid = 1;
+ int has_brk;
/* <proc> indicates whether or not /proc is mounted */
proc = stat("/proc", &stat_buf) == 0;
@@ -954,6 +1008,9 @@ int run_syscall(int min, int max)
has_gettid = __GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 30);
#endif
+ /* on musl setting brk()/sbrk() always fails */
+ has_brk = brk(0) == 0;
+
for (test = min; test >= 0 && test <= max; test++) {
int llen = 0; /* line length */
@@ -969,9 +1026,9 @@ int run_syscall(int min, int max)
CASE_TEST(kill_0); EXPECT_SYSZR(1, kill(getpid(), 0)); break;
CASE_TEST(kill_CONT); EXPECT_SYSZR(1, kill(getpid(), 0)); break;
CASE_TEST(kill_BADPID); EXPECT_SYSER(1, kill(INT_MAX, 0), -1, ESRCH); break;
- CASE_TEST(sbrk_0); EXPECT_PTRNE(1, sbrk(0), (void *)-1); break;
- CASE_TEST(sbrk); if ((p1 = p2 = sbrk(4096)) != (void *)-1) p2 = sbrk(-4096); EXPECT_SYSZR(1, (p2 == (void *)-1) || p2 == p1); break;
- CASE_TEST(brk); EXPECT_SYSZR(1, brk(sbrk(0))); break;
+ CASE_TEST(sbrk_0); EXPECT_PTRNE(has_brk, sbrk(0), (void *)-1); break;
+ CASE_TEST(sbrk); if ((p1 = p2 = sbrk(4096)) != (void *)-1) p2 = sbrk(-4096); EXPECT_SYSZR(has_brk, (p2 == (void *)-1) || p2 == p1); break;
+ CASE_TEST(brk); EXPECT_SYSZR(has_brk, brk(sbrk(0))); break;
CASE_TEST(chdir_root); EXPECT_SYSZR(1, chdir("/")); chdir(getenv("PWD")); break;
CASE_TEST(chdir_dot); EXPECT_SYSZR(1, chdir(".")); break;
CASE_TEST(chdir_blah); EXPECT_SYSER(1, chdir("/blah"), -1, ENOENT); break;
@@ -1076,19 +1133,17 @@ int run_stdlib(int min, int max)
CASE_TEST(strchr_foobar_z); EXPECT_STRZR(1, strchr("foobar", 'z')); break;
CASE_TEST(strrchr_foobar_o); EXPECT_STREQ(1, strrchr("foobar", 'o'), "obar"); break;
CASE_TEST(strrchr_foobar_z); EXPECT_STRZR(1, strrchr("foobar", 'z')); break;
-#ifdef NOLIBC
- CASE_TEST(strlcat_0); EXPECT_STRBUFEQ(1, strlcat(buf, "bar", 0), buf, 3, "test"); break;
- CASE_TEST(strlcat_1); EXPECT_STRBUFEQ(1, strlcat(buf, "bar", 1), buf, 4, "test"); break;
- CASE_TEST(strlcat_5); EXPECT_STRBUFEQ(1, strlcat(buf, "bar", 5), buf, 7, "test"); break;
- CASE_TEST(strlcat_6); EXPECT_STRBUFEQ(1, strlcat(buf, "bar", 6), buf, 7, "testb"); break;
- CASE_TEST(strlcat_7); EXPECT_STRBUFEQ(1, strlcat(buf, "bar", 7), buf, 7, "testba"); break;
- CASE_TEST(strlcat_8); EXPECT_STRBUFEQ(1, strlcat(buf, "bar", 8), buf, 7, "testbar"); break;
- CASE_TEST(strlcpy_0); EXPECT_STRBUFEQ(1, strlcpy(buf, "bar", 0), buf, 3, "test"); break;
- CASE_TEST(strlcpy_1); EXPECT_STRBUFEQ(1, strlcpy(buf, "bar", 1), buf, 3, ""); break;
- CASE_TEST(strlcpy_2); EXPECT_STRBUFEQ(1, strlcpy(buf, "bar", 2), buf, 3, "b"); break;
- CASE_TEST(strlcpy_3); EXPECT_STRBUFEQ(1, strlcpy(buf, "bar", 3), buf, 3, "ba"); break;
- CASE_TEST(strlcpy_4); EXPECT_STRBUFEQ(1, strlcpy(buf, "bar", 4), buf, 3, "bar"); break;
-#endif
+ CASE_TEST(strlcat_0); EXPECT_STRBUFEQ(is_nolibc, strlcat(buf, "bar", 0), buf, 3, "test"); break;
+ CASE_TEST(strlcat_1); EXPECT_STRBUFEQ(is_nolibc, strlcat(buf, "bar", 1), buf, 4, "test"); break;
+ CASE_TEST(strlcat_5); EXPECT_STRBUFEQ(is_nolibc, strlcat(buf, "bar", 5), buf, 7, "test"); break;
+ CASE_TEST(strlcat_6); EXPECT_STRBUFEQ(is_nolibc, strlcat(buf, "bar", 6), buf, 7, "testb"); break;
+ CASE_TEST(strlcat_7); EXPECT_STRBUFEQ(is_nolibc, strlcat(buf, "bar", 7), buf, 7, "testba"); break;
+ CASE_TEST(strlcat_8); EXPECT_STRBUFEQ(is_nolibc, strlcat(buf, "bar", 8), buf, 7, "testbar"); break;
+ CASE_TEST(strlcpy_0); EXPECT_STRBUFEQ(is_nolibc, strlcpy(buf, "bar", 0), buf, 3, "test"); break;
+ CASE_TEST(strlcpy_1); EXPECT_STRBUFEQ(is_nolibc, strlcpy(buf, "bar", 1), buf, 3, ""); break;
+ CASE_TEST(strlcpy_2); EXPECT_STRBUFEQ(is_nolibc, strlcpy(buf, "bar", 2), buf, 3, "b"); break;
+ CASE_TEST(strlcpy_3); EXPECT_STRBUFEQ(is_nolibc, strlcpy(buf, "bar", 3), buf, 3, "ba"); break;
+ CASE_TEST(strlcpy_4); EXPECT_STRBUFEQ(is_nolibc, strlcpy(buf, "bar", 4), buf, 3, "bar"); break;
CASE_TEST(memcmp_20_20); EXPECT_EQ(1, memcmp("aaa\x20", "aaa\x20", 4), 0); break;
CASE_TEST(memcmp_20_60); EXPECT_LT(1, memcmp("aaa\x20", "aaa\x60", 4), 0); break;
CASE_TEST(memcmp_60_20); EXPECT_GT(1, memcmp("aaa\x60", "aaa\x20", 4), 0); break;
@@ -1139,6 +1194,26 @@ int run_stdlib(int min, int max)
CASE_TEST(limit_ptrdiff_min); EXPECT_EQ(1, PTRDIFF_MIN, sizeof(long) == 8 ? (ptrdiff_t) 0x8000000000000000LL : (ptrdiff_t) 0x80000000); break;
CASE_TEST(limit_ptrdiff_max); EXPECT_EQ(1, PTRDIFF_MAX, sizeof(long) == 8 ? (ptrdiff_t) 0x7fffffffffffffffLL : (ptrdiff_t) 0x7fffffff); break;
CASE_TEST(limit_size_max); EXPECT_EQ(1, SIZE_MAX, sizeof(long) == 8 ? (size_t) 0xffffffffffffffffULL : (size_t) 0xffffffffU); break;
+ CASE_TEST(strtol_simple); EXPECT_STRTOX(1, strtol, "35", 10, 35, -1, 0); break;
+ CASE_TEST(strtol_positive); EXPECT_STRTOX(1, strtol, "+35", 10, 35, -1, 0); break;
+ CASE_TEST(strtol_negative); EXPECT_STRTOX(1, strtol, "-35", 10, -35, -1, 0); break;
+ CASE_TEST(strtol_hex_auto); EXPECT_STRTOX(1, strtol, "0xFF", 0, 255, -1, 0); break;
+ CASE_TEST(strtol_base36); EXPECT_STRTOX(1, strtol, "12yZ", 36, 50507, -1, 0); break;
+ CASE_TEST(strtol_cutoff); EXPECT_STRTOX(1, strtol, "1234567890", 8, 342391, 7, 0); break;
+ CASE_TEST(strtol_octal_auto); EXPECT_STRTOX(1, strtol, "011", 0, 9, -1, 0); break;
+ CASE_TEST(strtol_hex_00); EXPECT_STRTOX(1, strtol, "0x00", 16, 0, -1, 0); break;
+ CASE_TEST(strtol_hex_FF); EXPECT_STRTOX(1, strtol, "FF", 16, 255, -1, 0); break;
+ CASE_TEST(strtol_hex_ff); EXPECT_STRTOX(1, strtol, "ff", 16, 255, -1, 0); break;
+ CASE_TEST(strtol_hex_prefix); EXPECT_STRTOX(1, strtol, "0xFF", 16, 255, -1, 0); break;
+ CASE_TEST(strtol_trailer); EXPECT_STRTOX(1, strtol, "35foo", 10, 35, 2, 0); break;
+ CASE_TEST(strtol_overflow); EXPECT_STRTOX(1, strtol, "0x8000000000000000", 16, LONG_MAX, -1, ERANGE); break;
+ CASE_TEST(strtol_underflow); EXPECT_STRTOX(1, strtol, "-0x8000000000000001", 16, LONG_MIN, -1, ERANGE); break;
+ CASE_TEST(strtoul_negative); EXPECT_STRTOX(1, strtoul, "-0x1", 16, ULONG_MAX, 4, 0); break;
+ CASE_TEST(strtoul_overflow); EXPECT_STRTOX(1, strtoul, "0x10000000000000000", 16, ULONG_MAX, -1, ERANGE); break;
+ CASE_TEST(strerror_success); EXPECT_STREQ(is_nolibc, strerror(0), "errno=0"); break;
+ CASE_TEST(strerror_EINVAL); EXPECT_STREQ(is_nolibc, strerror(EINVAL), "errno=22"); break;
+ CASE_TEST(strerror_int_max); EXPECT_STREQ(is_nolibc, strerror(INT_MAX), "errno=2147483647"); break;
+ CASE_TEST(strerror_int_min); EXPECT_STREQ(is_nolibc, strerror(INT_MIN), "errno=-2147483648"); break;
case __LINE__:
return ret; /* must be last */
diff --git a/tools/testing/selftests/nolibc/run-tests.sh b/tools/testing/selftests/nolibc/run-tests.sh
index c0a5a7cea9fa..0446e6326a40 100755
--- a/tools/testing/selftests/nolibc/run-tests.sh
+++ b/tools/testing/selftests/nolibc/run-tests.sh
@@ -15,9 +15,10 @@ download_location="${cache_dir}/crosstools/"
build_location="$(realpath "${cache_dir}"/nolibc-tests/)"
perform_download=0
test_mode=system
+CFLAGS_EXTRA="-Werror"
archs="i386 x86_64 arm64 arm mips32le mips32be ppc ppc64 ppc64le riscv s390 loongarch"
-TEMP=$(getopt -o 'j:d:c:b:a:m:ph' -n "$0" -- "$@")
+TEMP=$(getopt -o 'j:d:c:b:a:m:peh' -n "$0" -- "$@")
eval set -- "$TEMP"
unset TEMP
@@ -40,6 +41,7 @@ Options:
-a [ARCH] Host architecture of toolchains to use (default: ${hostarch})
-b [DIR] Build location (default: ${build_location})
-m [MODE] Test mode user/system (default: ${test_mode})
+ -e Disable -Werror
EOF
}
@@ -66,6 +68,9 @@ while true; do
'-m')
test_mode="$2"
shift 2; continue ;;
+ '-e')
+ CFLAGS_EXTRA=""
+ shift; continue ;;
'-h')
print_usage
exit 0
@@ -153,7 +158,7 @@ test_arch() {
exit 1
esac
printf '%-15s' "$arch:"
- swallow_output "${MAKE[@]}" "$test_target" V=1
+ swallow_output "${MAKE[@]}" CFLAGS_EXTRA="$CFLAGS_EXTRA" "$test_target" V=1
cp run.out run.out."${arch}"
"${MAKE[@]}" report | grep passed
}
diff --git a/tools/testing/selftests/proc/.gitignore b/tools/testing/selftests/proc/.gitignore
index a156ac5dd2c6..973968f45bba 100644
--- a/tools/testing/selftests/proc/.gitignore
+++ b/tools/testing/selftests/proc/.gitignore
@@ -2,6 +2,7 @@
/fd-001-lookup
/fd-002-posix-eq
/fd-003-kthread
+/proc-2-is-kthread
/proc-fsconfig-hidepid
/proc-loadavg-001
/proc-multiple-procfs
@@ -9,6 +10,7 @@
/proc-pid-vm
/proc-self-map-files-001
/proc-self-map-files-002
+/proc-self-isnt-kthread
/proc-self-syscall
/proc-self-wchan
/proc-subset-pid
diff --git a/tools/testing/selftests/proc/Makefile b/tools/testing/selftests/proc/Makefile
index cd95369254c0..b12921b9794b 100644
--- a/tools/testing/selftests/proc/Makefile
+++ b/tools/testing/selftests/proc/Makefile
@@ -1,17 +1,19 @@
# SPDX-License-Identifier: GPL-2.0-only
CFLAGS += -Wall -O2 -Wno-unused-function
-CFLAGS += -D_GNU_SOURCE
+CFLAGS += $(TOOLS_INCLUDES)
LDFLAGS += -pthread
TEST_GEN_PROGS :=
TEST_GEN_PROGS += fd-001-lookup
TEST_GEN_PROGS += fd-002-posix-eq
TEST_GEN_PROGS += fd-003-kthread
+TEST_GEN_PROGS += proc-2-is-kthread
TEST_GEN_PROGS += proc-loadavg-001
TEST_GEN_PROGS += proc-empty-vm
TEST_GEN_PROGS += proc-pid-vm
TEST_GEN_PROGS += proc-self-map-files-001
TEST_GEN_PROGS += proc-self-map-files-002
+TEST_GEN_PROGS += proc-self-isnt-kthread
TEST_GEN_PROGS += proc-self-syscall
TEST_GEN_PROGS += proc-self-wchan
TEST_GEN_PROGS += proc-subset-pid
diff --git a/tools/testing/selftests/proc/proc-2-is-kthread.c b/tools/testing/selftests/proc/proc-2-is-kthread.c
new file mode 100644
index 000000000000..f13668fb482e
--- /dev/null
+++ b/tools/testing/selftests/proc/proc-2-is-kthread.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2024 Alexey Dobriyan <adobriyan@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/* Test that kernel thread is reported as such. */
+#undef NDEBUG
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <string.h>
+#include <unistd.h>
+
+int main(void)
+{
+ /*
+ * The following solutions don't really work:
+ *
+ * 1) jit kernel module which creates kernel thread:
+ * test becomes arch-specific,
+ * problems with mandatory module signing,
+ * problems with lockdown mode,
+ * doesn't work with CONFIG_MODULES=n at all,
+ * kthread creation API is formally unstable internal kernel API,
+ * need a mechanism to report test kernel thread's PID back,
+ *
+ * 2) ksoftirqd/0 and kswapd0 look like stable enough kernel threads,
+ * but their PIDs are unstable.
+ *
+ * Check against kthreadd which always seem to exist under pid 2.
+ */
+ int fd = open("/proc/2/status", O_RDONLY);
+ assert(fd >= 0);
+
+ char buf[4096];
+ ssize_t rv = read(fd, buf, sizeof(buf));
+ assert(0 <= rv && rv < sizeof(buf));
+ buf[rv] = '\0';
+
+ assert(strstr(buf, "Kthread:\t1\n"));
+
+ return 0;
+}
diff --git a/tools/testing/selftests/proc/proc-empty-vm.c b/tools/testing/selftests/proc/proc-empty-vm.c
index 56198d4ca2bf..b3f898aab4ab 100644
--- a/tools/testing/selftests/proc/proc-empty-vm.c
+++ b/tools/testing/selftests/proc/proc-empty-vm.c
@@ -381,9 +381,6 @@ static int test_proc_pid_statm(pid_t pid)
assert(rv >= 0);
assert(rv <= sizeof(buf));
- if (0) {
- write(1, buf, rv);
- }
const char *p = buf;
const char *const end = p + rv;
diff --git a/tools/testing/selftests/proc/proc-pid-vm.c b/tools/testing/selftests/proc/proc-pid-vm.c
index cacbd2a4aec9..d04685771952 100644
--- a/tools/testing/selftests/proc/proc-pid-vm.c
+++ b/tools/testing/selftests/proc/proc-pid-vm.c
@@ -45,6 +45,7 @@
#include <linux/kdev_t.h>
#include <sys/time.h>
#include <sys/resource.h>
+#include <linux/fs.h>
#include "../kselftest.h"
@@ -492,6 +493,91 @@ int main(void)
assert(buf[13] == '\n');
}
+ /* Test PROCMAP_QUERY ioctl() for /proc/$PID/maps */
+ {
+ char path_buf[256], exp_path_buf[256];
+ struct procmap_query q;
+ int fd, err;
+
+ snprintf(path_buf, sizeof(path_buf), "/proc/%u/maps", pid);
+ fd = open(path_buf, O_RDONLY);
+ if (fd == -1)
+ return 1;
+
+ /* CASE 1: exact MATCH at VADDR */
+ memset(&q, 0, sizeof(q));
+ q.size = sizeof(q);
+ q.query_addr = VADDR;
+ q.query_flags = 0;
+ q.vma_name_addr = (__u64)(unsigned long)path_buf;
+ q.vma_name_size = sizeof(path_buf);
+
+ err = ioctl(fd, PROCMAP_QUERY, &q);
+ assert(err == 0);
+
+ assert(q.query_addr == VADDR);
+ assert(q.query_flags == 0);
+
+ assert(q.vma_flags == (PROCMAP_QUERY_VMA_READABLE | PROCMAP_QUERY_VMA_EXECUTABLE));
+ assert(q.vma_start == VADDR);
+ assert(q.vma_end == VADDR + PAGE_SIZE);
+ assert(q.vma_page_size == PAGE_SIZE);
+
+ assert(q.vma_offset == 0);
+ assert(q.inode == st.st_ino);
+ assert(q.dev_major == MAJOR(st.st_dev));
+ assert(q.dev_minor == MINOR(st.st_dev));
+
+ snprintf(exp_path_buf, sizeof(exp_path_buf),
+ "/tmp/#%llu (deleted)", (unsigned long long)st.st_ino);
+ assert(q.vma_name_size == strlen(exp_path_buf) + 1);
+ assert(strcmp(path_buf, exp_path_buf) == 0);
+
+ /* CASE 2: NO MATCH at VADDR-1 */
+ memset(&q, 0, sizeof(q));
+ q.size = sizeof(q);
+ q.query_addr = VADDR - 1;
+ q.query_flags = 0; /* exact match */
+
+ err = ioctl(fd, PROCMAP_QUERY, &q);
+ err = err < 0 ? -errno : 0;
+ assert(err == -ENOENT);
+
+ /* CASE 3: MATCH COVERING_OR_NEXT_VMA at VADDR - 1 */
+ memset(&q, 0, sizeof(q));
+ q.size = sizeof(q);
+ q.query_addr = VADDR - 1;
+ q.query_flags = PROCMAP_QUERY_COVERING_OR_NEXT_VMA;
+
+ err = ioctl(fd, PROCMAP_QUERY, &q);
+ assert(err == 0);
+
+ assert(q.query_addr == VADDR - 1);
+ assert(q.query_flags == PROCMAP_QUERY_COVERING_OR_NEXT_VMA);
+ assert(q.vma_start == VADDR);
+ assert(q.vma_end == VADDR + PAGE_SIZE);
+
+ /* CASE 4: NO MATCH at VADDR + PAGE_SIZE */
+ memset(&q, 0, sizeof(q));
+ q.size = sizeof(q);
+ q.query_addr = VADDR + PAGE_SIZE; /* point right after the VMA */
+ q.query_flags = PROCMAP_QUERY_COVERING_OR_NEXT_VMA;
+
+ err = ioctl(fd, PROCMAP_QUERY, &q);
+ err = err < 0 ? -errno : 0;
+ assert(err == -ENOENT);
+
+ /* CASE 5: NO MATCH WRITABLE at VADDR */
+ memset(&q, 0, sizeof(q));
+ q.size = sizeof(q);
+ q.query_addr = VADDR;
+ q.query_flags = PROCMAP_QUERY_VMA_WRITABLE;
+
+ err = ioctl(fd, PROCMAP_QUERY, &q);
+ err = err < 0 ? -errno : 0;
+ assert(err == -ENOENT);
+ }
+
return 0;
}
#else
diff --git a/tools/testing/selftests/proc/proc-self-isnt-kthread.c b/tools/testing/selftests/proc/proc-self-isnt-kthread.c
new file mode 100644
index 000000000000..e01f4e0a91b4
--- /dev/null
+++ b/tools/testing/selftests/proc/proc-self-isnt-kthread.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2024 Alexey Dobriyan <adobriyan@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/* Test that userspace program is not kernel thread. */
+#undef NDEBUG
+#include <assert.h>
+#include <fcntl.h>
+#include <string.h>
+#include <unistd.h>
+
+int main(void)
+{
+ int fd = open("/proc/self/status", O_RDONLY);
+ assert(fd >= 0);
+
+ char buf[4096];
+ ssize_t rv = read(fd, buf, sizeof(buf));
+ assert(0 <= rv && rv < sizeof(buf));
+ buf[rv] = '\0';
+
+ /* This test is very much not kernel thread. */
+ assert(strstr(buf, "Kthread:\t0\n"));
+
+ return 0;
+}
diff --git a/tools/testing/selftests/resctrl/Makefile b/tools/testing/selftests/resctrl/Makefile
index 021863f86053..f408bd6bfc3d 100644
--- a/tools/testing/selftests/resctrl/Makefile
+++ b/tools/testing/selftests/resctrl/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-CFLAGS = -g -Wall -O2 -D_FORTIFY_SOURCE=2 -D_GNU_SOURCE
+CFLAGS = -g -Wall -O2 -D_FORTIFY_SOURCE=2
CFLAGS += $(KHDR_INCLUDES)
TEST_GEN_PROGS := resctrl_tests
diff --git a/tools/testing/selftests/resctrl/cache.c b/tools/testing/selftests/resctrl/cache.c
index 1b339d6bbff1..1ff1104e6575 100644
--- a/tools/testing/selftests/resctrl/cache.c
+++ b/tools/testing/selftests/resctrl/cache.c
@@ -101,12 +101,12 @@ static int get_llc_occu_resctrl(unsigned long *llc_occupancy)
*
* Return: 0 on success, < 0 on error.
*/
-static int print_results_cache(const char *filename, int bm_pid, __u64 llc_value)
+static int print_results_cache(const char *filename, pid_t bm_pid, __u64 llc_value)
{
FILE *fp;
if (strcmp(filename, "stdio") == 0 || strcmp(filename, "stderr") == 0) {
- printf("Pid: %d \t LLC_value: %llu\n", bm_pid, llc_value);
+ printf("Pid: %d \t LLC_value: %llu\n", (int)bm_pid, llc_value);
} else {
fp = fopen(filename, "a");
if (!fp) {
@@ -114,7 +114,7 @@ static int print_results_cache(const char *filename, int bm_pid, __u64 llc_value
return -1;
}
- fprintf(fp, "Pid: %d \t llc_value: %llu\n", bm_pid, llc_value);
+ fprintf(fp, "Pid: %d \t llc_value: %llu\n", (int)bm_pid, llc_value);
fclose(fp);
}
@@ -133,7 +133,7 @@ static int print_results_cache(const char *filename, int bm_pid, __u64 llc_value
* Return: =0 on success. <0 on failure.
*/
int perf_event_measure(int pe_fd, struct perf_event_read *pe_read,
- const char *filename, int bm_pid)
+ const char *filename, pid_t bm_pid)
{
int ret;
@@ -161,7 +161,7 @@ int perf_event_measure(int pe_fd, struct perf_event_read *pe_read,
*
* Return: =0 on success. <0 on failure.
*/
-int measure_llc_resctrl(const char *filename, int bm_pid)
+int measure_llc_resctrl(const char *filename, pid_t bm_pid)
{
unsigned long llc_occu_resc = 0;
int ret;
diff --git a/tools/testing/selftests/resctrl/cat_test.c b/tools/testing/selftests/resctrl/cat_test.c
index 55315ed695f4..742782438ca3 100644
--- a/tools/testing/selftests/resctrl/cat_test.c
+++ b/tools/testing/selftests/resctrl/cat_test.c
@@ -158,7 +158,6 @@ static int cat_test(const struct resctrl_test *test,
struct resctrl_val_param *param,
size_t span, unsigned long current_mask)
{
- char *resctrl_val = param->resctrl_val;
struct perf_event_read pe_read;
struct perf_event_attr pea;
cpu_set_t old_affinity;
@@ -178,8 +177,7 @@ static int cat_test(const struct resctrl_test *test,
return ret;
/* Write benchmark to specified con_mon grp, mon_grp in resctrl FS*/
- ret = write_bm_pid_to_resctrl(bm_pid, param->ctrlgrp, param->mongrp,
- resctrl_val);
+ ret = write_bm_pid_to_resctrl(bm_pid, param->ctrlgrp, param->mongrp);
if (ret)
goto reset_affinity;
@@ -272,7 +270,6 @@ static int cat_run_test(const struct resctrl_test *test, const struct user_param
start_mask = create_bit_mask(start, n);
struct resctrl_val_param param = {
- .resctrl_val = CAT_STR,
.ctrlgrp = "c1",
.filename = RESULT_FILE_NAME,
.num_of_runs = 0,
diff --git a/tools/testing/selftests/resctrl/cmt_test.c b/tools/testing/selftests/resctrl/cmt_test.c
index 0105afec6188..0c045080d808 100644
--- a/tools/testing/selftests/resctrl/cmt_test.c
+++ b/tools/testing/selftests/resctrl/cmt_test.c
@@ -16,6 +16,17 @@
#define MAX_DIFF 2000000
#define MAX_DIFF_PERCENT 15
+#define CON_MON_LCC_OCCUP_PATH \
+ "%s/%s/mon_data/mon_L3_%02d/llc_occupancy"
+
+static int cmt_init(const struct resctrl_val_param *param, int domain_id)
+{
+ sprintf(llc_occup_path, CON_MON_LCC_OCCUP_PATH, RESCTRL_PATH,
+ param->ctrlgrp, domain_id);
+
+ return 0;
+}
+
static int cmt_setup(const struct resctrl_test *test,
const struct user_params *uparams,
struct resctrl_val_param *p)
@@ -29,6 +40,13 @@ static int cmt_setup(const struct resctrl_test *test,
return 0;
}
+static int cmt_measure(const struct user_params *uparams,
+ struct resctrl_val_param *param, pid_t bm_pid)
+{
+ sleep(1);
+ return measure_llc_resctrl(param->filename, bm_pid);
+}
+
static int show_results_info(unsigned long sum_llc_val, int no_of_bits,
unsigned long cache_span, unsigned long max_diff,
unsigned long max_diff_percent, unsigned long num_of_runs,
@@ -126,13 +144,13 @@ static int cmt_run_test(const struct resctrl_test *test, const struct user_param
}
struct resctrl_val_param param = {
- .resctrl_val = CMT_STR,
.ctrlgrp = "c1",
- .mongrp = "m1",
.filename = RESULT_FILE_NAME,
.mask = ~(long_mask << n) & long_mask,
.num_of_runs = 0,
+ .init = cmt_init,
.setup = cmt_setup,
+ .measure = cmt_measure,
};
span = cache_portion_size(cache_total_size, param.mask, long_mask);
diff --git a/tools/testing/selftests/resctrl/mba_test.c b/tools/testing/selftests/resctrl/mba_test.c
index a6ad39aae162..ab8496a4925b 100644
--- a/tools/testing/selftests/resctrl/mba_test.c
+++ b/tools/testing/selftests/resctrl/mba_test.c
@@ -17,6 +17,19 @@
#define ALLOCATION_MIN 10
#define ALLOCATION_STEP 10
+static int mba_init(const struct resctrl_val_param *param, int domain_id)
+{
+ int ret;
+
+ ret = initialize_mem_bw_imc();
+ if (ret)
+ return ret;
+
+ initialize_mem_bw_resctrl(param, domain_id);
+
+ return 0;
+}
+
/*
* Change schemata percentage from 100 to 10%. Write schemata to specified
* con_mon grp, mon_grp in resctrl FS.
@@ -51,6 +64,12 @@ static int mba_setup(const struct resctrl_test *test,
return 0;
}
+static int mba_measure(const struct user_params *uparams,
+ struct resctrl_val_param *param, pid_t bm_pid)
+{
+ return measure_mem_bw(uparams, param, bm_pid, "reads");
+}
+
static bool show_mba_info(unsigned long *bw_imc, unsigned long *bw_resc)
{
int allocation, runs;
@@ -145,12 +164,11 @@ static void mba_test_cleanup(void)
static int mba_run_test(const struct resctrl_test *test, const struct user_params *uparams)
{
struct resctrl_val_param param = {
- .resctrl_val = MBA_STR,
.ctrlgrp = "c1",
- .mongrp = "m1",
.filename = RESULT_FILE_NAME,
- .bw_report = "reads",
- .setup = mba_setup
+ .init = mba_init,
+ .setup = mba_setup,
+ .measure = mba_measure,
};
int ret;
diff --git a/tools/testing/selftests/resctrl/mbm_test.c b/tools/testing/selftests/resctrl/mbm_test.c
index 6fec51e1ff46..6b5a3b52d861 100644
--- a/tools/testing/selftests/resctrl/mbm_test.c
+++ b/tools/testing/selftests/resctrl/mbm_test.c
@@ -86,6 +86,19 @@ static int check_results(size_t span)
return ret;
}
+static int mbm_init(const struct resctrl_val_param *param, int domain_id)
+{
+ int ret;
+
+ ret = initialize_mem_bw_imc();
+ if (ret)
+ return ret;
+
+ initialize_mem_bw_resctrl(param, domain_id);
+
+ return 0;
+}
+
static int mbm_setup(const struct resctrl_test *test,
const struct user_params *uparams,
struct resctrl_val_param *p)
@@ -105,6 +118,12 @@ static int mbm_setup(const struct resctrl_test *test,
return ret;
}
+static int mbm_measure(const struct user_params *uparams,
+ struct resctrl_val_param *param, pid_t bm_pid)
+{
+ return measure_mem_bw(uparams, param, bm_pid, "reads");
+}
+
static void mbm_test_cleanup(void)
{
remove(RESULT_FILE_NAME);
@@ -113,12 +132,11 @@ static void mbm_test_cleanup(void)
static int mbm_run_test(const struct resctrl_test *test, const struct user_params *uparams)
{
struct resctrl_val_param param = {
- .resctrl_val = MBM_STR,
.ctrlgrp = "c1",
- .mongrp = "m1",
.filename = RESULT_FILE_NAME,
- .bw_report = "reads",
- .setup = mbm_setup
+ .init = mbm_init,
+ .setup = mbm_setup,
+ .measure = mbm_measure,
};
int ret;
diff --git a/tools/testing/selftests/resctrl/resctrl.h b/tools/testing/selftests/resctrl/resctrl.h
index 00d51fa7531c..2dda56084588 100644
--- a/tools/testing/selftests/resctrl/resctrl.h
+++ b/tools/testing/selftests/resctrl/resctrl.h
@@ -43,13 +43,6 @@
#define DEFAULT_SPAN (250 * MB)
-#define PARENT_EXIT() \
- do { \
- kill(ppid, SIGKILL); \
- umount_resctrlfs(); \
- exit(EXIT_FAILURE); \
- } while (0)
-
/*
* user_params: User supplied parameters
* @cpu: CPU number to which the benchmark will be bound to
@@ -88,24 +81,27 @@ struct resctrl_test {
/*
* resctrl_val_param: resctrl test parameters
- * @resctrl_val: Resctrl feature (Eg: mbm, mba.. etc)
* @ctrlgrp: Name of the control monitor group (con_mon grp)
* @mongrp: Name of the monitor group (mon grp)
* @filename: Name of file to which the o/p should be written
- * @bw_report: Bandwidth report type (reads vs writes)
- * @setup: Call back function to setup test environment
+ * @init: Callback function to initialize test environment
+ * @setup: Callback function to setup per test run environment
+ * @measure: Callback that performs the measurement (a single test)
*/
struct resctrl_val_param {
- char *resctrl_val;
- char ctrlgrp[64];
- char mongrp[64];
+ const char *ctrlgrp;
+ const char *mongrp;
char filename[64];
- char *bw_report;
unsigned long mask;
int num_of_runs;
+ int (*init)(const struct resctrl_val_param *param,
+ int domain_id);
int (*setup)(const struct resctrl_test *test,
const struct user_params *uparams,
struct resctrl_val_param *param);
+ int (*measure)(const struct user_params *uparams,
+ struct resctrl_val_param *param,
+ pid_t bm_pid);
};
struct perf_event_read {
@@ -115,11 +111,6 @@ struct perf_event_read {
} values[2];
};
-#define MBM_STR "mbm"
-#define MBA_STR "mba"
-#define CMT_STR "cmt"
-#define CAT_STR "cat"
-
/*
* Memory location that consumes values compiler must not optimize away.
* Volatile ensures writes to this location cannot be optimized away by
@@ -127,8 +118,6 @@ struct perf_event_read {
*/
extern volatile int *value_sink;
-extern pid_t bm_pid, ppid;
-
extern char llc_occup_path[1024];
int get_vendor(void);
@@ -137,7 +126,7 @@ int filter_dmesg(void);
int get_domain_id(const char *resource, int cpu_no, int *domain_id);
int mount_resctrlfs(void);
int umount_resctrlfs(void);
-int validate_bw_report_request(char *bw_report);
+const char *get_bw_report_type(const char *bw_report);
bool resctrl_resource_exists(const char *resource);
bool resctrl_mon_feature_exists(const char *resource, const char *feature);
bool resource_info_file_exists(const char *resource, const char *file);
@@ -145,15 +134,21 @@ bool test_resource_feature_check(const struct resctrl_test *test);
char *fgrep(FILE *inf, const char *str);
int taskset_benchmark(pid_t bm_pid, int cpu_no, cpu_set_t *old_affinity);
int taskset_restore(pid_t bm_pid, cpu_set_t *old_affinity);
-int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, const char *resource);
-int write_bm_pid_to_resctrl(pid_t bm_pid, char *ctrlgrp, char *mongrp,
- char *resctrl_val);
+int write_schemata(const char *ctrlgrp, char *schemata, int cpu_no,
+ const char *resource);
+int write_bm_pid_to_resctrl(pid_t bm_pid, const char *ctrlgrp, const char *mongrp);
int perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu,
int group_fd, unsigned long flags);
unsigned char *alloc_buffer(size_t buf_size, int memflush);
void mem_flush(unsigned char *buf, size_t buf_size);
void fill_cache_read(unsigned char *buf, size_t buf_size, bool once);
int run_fill_buf(size_t buf_size, int memflush, int op, bool once);
+int initialize_mem_bw_imc(void);
+int measure_mem_bw(const struct user_params *uparams,
+ struct resctrl_val_param *param, pid_t bm_pid,
+ const char *bw_report);
+void initialize_mem_bw_resctrl(const struct resctrl_val_param *param,
+ int domain_id);
int resctrl_val(const struct resctrl_test *test,
const struct user_params *uparams,
const char * const *benchmark_cmd,
@@ -174,8 +169,8 @@ void perf_event_initialize_read_format(struct perf_event_read *pe_read);
int perf_open(struct perf_event_attr *pea, pid_t pid, int cpu_no);
int perf_event_reset_enable(int pe_fd);
int perf_event_measure(int pe_fd, struct perf_event_read *pe_read,
- const char *filename, int bm_pid);
-int measure_llc_resctrl(const char *filename, int bm_pid);
+ const char *filename, pid_t bm_pid);
+int measure_llc_resctrl(const char *filename, pid_t bm_pid);
void show_cache_info(int no_of_bits, __u64 avg_llc_val, size_t cache_span, bool lines);
/*
diff --git a/tools/testing/selftests/resctrl/resctrl_val.c b/tools/testing/selftests/resctrl/resctrl_val.c
index 445f306d4c2f..8c275f6b4dd7 100644
--- a/tools/testing/selftests/resctrl/resctrl_val.c
+++ b/tools/testing/selftests/resctrl/resctrl_val.c
@@ -19,30 +19,10 @@
#define MAX_TOKENS 5
#define READ 0
#define WRITE 1
-#define CON_MON_MBM_LOCAL_BYTES_PATH \
- "%s/%s/mon_groups/%s/mon_data/mon_L3_%02d/mbm_local_bytes"
#define CON_MBM_LOCAL_BYTES_PATH \
"%s/%s/mon_data/mon_L3_%02d/mbm_local_bytes"
-#define MON_MBM_LOCAL_BYTES_PATH \
- "%s/mon_groups/%s/mon_data/mon_L3_%02d/mbm_local_bytes"
-
-#define MBM_LOCAL_BYTES_PATH \
- "%s/mon_data/mon_L3_%02d/mbm_local_bytes"
-
-#define CON_MON_LCC_OCCUP_PATH \
- "%s/%s/mon_groups/%s/mon_data/mon_L3_%02d/llc_occupancy"
-
-#define CON_LCC_OCCUP_PATH \
- "%s/%s/mon_data/mon_L3_%02d/llc_occupancy"
-
-#define MON_LCC_OCCUP_PATH \
- "%s/mon_groups/%s/mon_data/mon_L3_%02d/llc_occupancy"
-
-#define LCC_OCCUP_PATH \
- "%s/mon_data/mon_L3_%02d/llc_occupancy"
-
struct membw_read_format {
__u64 value; /* The value of the event */
__u64 time_enabled; /* if PERF_FORMAT_TOTAL_TIME_ENABLED */
@@ -276,7 +256,7 @@ static int num_of_imcs(void)
return count;
}
-static int initialize_mem_bw_imc(void)
+int initialize_mem_bw_imc(void)
{
int imc, j;
@@ -293,44 +273,93 @@ static int initialize_mem_bw_imc(void)
return 0;
}
+static void perf_close_imc_mem_bw(void)
+{
+ int mc;
+
+ for (mc = 0; mc < imcs; mc++) {
+ if (imc_counters_config[mc][READ].fd != -1)
+ close(imc_counters_config[mc][READ].fd);
+ if (imc_counters_config[mc][WRITE].fd != -1)
+ close(imc_counters_config[mc][WRITE].fd);
+ }
+}
+
/*
- * get_mem_bw_imc: Memory band width as reported by iMC counters
- * @cpu_no: CPU number that the benchmark PID is binded to
- * @bw_report: Bandwidth report type (reads, writes)
- *
- * Memory B/W utilized by a process on a socket can be calculated using
- * iMC counters. Perf events are used to read these counters.
+ * perf_open_imc_mem_bw - Open perf fds for IMCs
+ * @cpu_no: CPU number that the benchmark PID is bound to
*
* Return: = 0 on success. < 0 on failure.
*/
-static int get_mem_bw_imc(int cpu_no, char *bw_report, float *bw_imc)
+static int perf_open_imc_mem_bw(int cpu_no)
{
- float reads, writes, of_mul_read, of_mul_write;
- int imc, j, ret;
+ int imc, ret;
- /* Start all iMC counters to log values (both read and write) */
- reads = 0, writes = 0, of_mul_read = 1, of_mul_write = 1;
for (imc = 0; imc < imcs; imc++) {
- for (j = 0; j < 2; j++) {
- ret = open_perf_event(imc, cpu_no, j);
- if (ret)
- return -1;
- }
- for (j = 0; j < 2; j++)
- membw_ioctl_perf_event_ioc_reset_enable(imc, j);
+ imc_counters_config[imc][READ].fd = -1;
+ imc_counters_config[imc][WRITE].fd = -1;
+ }
+
+ for (imc = 0; imc < imcs; imc++) {
+ ret = open_perf_event(imc, cpu_no, READ);
+ if (ret)
+ goto close_fds;
+ ret = open_perf_event(imc, cpu_no, WRITE);
+ if (ret)
+ goto close_fds;
+ }
+
+ return 0;
+
+close_fds:
+ perf_close_imc_mem_bw();
+ return -1;
+}
+
+/*
+ * do_mem_bw_test - Perform memory bandwidth test
+ *
+ * Runs memory bandwidth test over one second period. Also, handles starting
+ * and stopping of the IMC perf counters around the test.
+ */
+static void do_imc_mem_bw_test(void)
+{
+ int imc;
+
+ for (imc = 0; imc < imcs; imc++) {
+ membw_ioctl_perf_event_ioc_reset_enable(imc, READ);
+ membw_ioctl_perf_event_ioc_reset_enable(imc, WRITE);
}
sleep(1);
/* Stop counters after a second to get results (both read and write) */
for (imc = 0; imc < imcs; imc++) {
- for (j = 0; j < 2; j++)
- membw_ioctl_perf_event_ioc_disable(imc, j);
+ membw_ioctl_perf_event_ioc_disable(imc, READ);
+ membw_ioctl_perf_event_ioc_disable(imc, WRITE);
}
+}
+
+/*
+ * get_mem_bw_imc - Memory bandwidth as reported by iMC counters
+ * @bw_report: Bandwidth report type (reads, writes)
+ *
+ * Memory bandwidth utilized by a process on a socket can be calculated
+ * using iMC counters. Perf events are used to read these counters.
+ *
+ * Return: = 0 on success. < 0 on failure.
+ */
+static int get_mem_bw_imc(const char *bw_report, float *bw_imc)
+{
+ float reads, writes, of_mul_read, of_mul_write;
+ int imc;
+
+ /* Start all iMC counters to log values (both read and write) */
+ reads = 0, writes = 0, of_mul_read = 1, of_mul_write = 1;
/*
* Get results which are stored in struct type imc_counter_config
- * Take over flow into consideration before calculating total b/w
+ * Take overflow into consideration before calculating total bandwidth.
*/
for (imc = 0; imc < imcs; imc++) {
struct imc_counter_config *r =
@@ -340,15 +369,13 @@ static int get_mem_bw_imc(int cpu_no, char *bw_report, float *bw_imc)
if (read(r->fd, &r->return_value,
sizeof(struct membw_read_format)) == -1) {
- ksft_perror("Couldn't get read b/w through iMC");
-
+ ksft_perror("Couldn't get read bandwidth through iMC");
return -1;
}
if (read(w->fd, &w->return_value,
sizeof(struct membw_read_format)) == -1) {
- ksft_perror("Couldn't get write bw through iMC");
-
+ ksft_perror("Couldn't get write bandwidth through iMC");
return -1;
}
@@ -369,11 +396,6 @@ static int get_mem_bw_imc(int cpu_no, char *bw_report, float *bw_imc)
writes += w->return_value.value * of_mul_write * SCALE;
}
- for (imc = 0; imc < imcs; imc++) {
- close(imc_counters_config[imc][READ].fd);
- close(imc_counters_config[imc][WRITE].fd);
- }
-
if (strcmp(bw_report, "reads") == 0) {
*bw_imc = reads;
return 0;
@@ -388,84 +410,45 @@ static int get_mem_bw_imc(int cpu_no, char *bw_report, float *bw_imc)
return 0;
}
-void set_mbm_path(const char *ctrlgrp, const char *mongrp, int domain_id)
+/*
+ * initialize_mem_bw_resctrl: Appropriately populate "mbm_total_path"
+ * @param: Parameters passed to resctrl_val()
+ * @domain_id: Domain ID (cache ID; for MB, L3 cache ID)
+ */
+void initialize_mem_bw_resctrl(const struct resctrl_val_param *param,
+ int domain_id)
{
- if (ctrlgrp && mongrp)
- sprintf(mbm_total_path, CON_MON_MBM_LOCAL_BYTES_PATH,
- RESCTRL_PATH, ctrlgrp, mongrp, domain_id);
- else if (!ctrlgrp && mongrp)
- sprintf(mbm_total_path, MON_MBM_LOCAL_BYTES_PATH, RESCTRL_PATH,
- mongrp, domain_id);
- else if (ctrlgrp && !mongrp)
- sprintf(mbm_total_path, CON_MBM_LOCAL_BYTES_PATH, RESCTRL_PATH,
- ctrlgrp, domain_id);
- else if (!ctrlgrp && !mongrp)
- sprintf(mbm_total_path, MBM_LOCAL_BYTES_PATH, RESCTRL_PATH,
- domain_id);
+ sprintf(mbm_total_path, CON_MBM_LOCAL_BYTES_PATH, RESCTRL_PATH,
+ param->ctrlgrp, domain_id);
}
/*
- * initialize_mem_bw_resctrl: Appropriately populate "mbm_total_path"
- * @ctrlgrp: Name of the control monitor group (con_mon grp)
- * @mongrp: Name of the monitor group (mon grp)
- * @cpu_no: CPU number that the benchmark PID is binded to
- * @resctrl_val: Resctrl feature (Eg: mbm, mba.. etc)
+ * Open file to read MBM local bytes from resctrl FS
*/
-static void initialize_mem_bw_resctrl(const char *ctrlgrp, const char *mongrp,
- int cpu_no, char *resctrl_val)
+static FILE *open_mem_bw_resctrl(const char *mbm_bw_file)
{
- int domain_id;
-
- if (get_domain_id("MB", cpu_no, &domain_id) < 0) {
- ksft_print_msg("Could not get domain ID\n");
- return;
- }
+ FILE *fp;
- if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)))
- set_mbm_path(ctrlgrp, mongrp, domain_id);
+ fp = fopen(mbm_bw_file, "r");
+ if (!fp)
+ ksft_perror("Failed to open total memory bandwidth file");
- if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
- if (ctrlgrp)
- sprintf(mbm_total_path, CON_MBM_LOCAL_BYTES_PATH,
- RESCTRL_PATH, ctrlgrp, domain_id);
- else
- sprintf(mbm_total_path, MBM_LOCAL_BYTES_PATH,
- RESCTRL_PATH, domain_id);
- }
+ return fp;
}
/*
* Get MBM Local bytes as reported by resctrl FS
- * For MBM,
- * 1. If con_mon grp and mon grp are given, then read from con_mon grp's mon grp
- * 2. If only con_mon grp is given, then read from con_mon grp
- * 3. If both are not given, then read from root con_mon grp
- * For MBA,
- * 1. If con_mon grp is given, then read from it
- * 2. If con_mon grp is not given, then read from root con_mon grp
*/
-static int get_mem_bw_resctrl(unsigned long *mbm_total)
+static int get_mem_bw_resctrl(FILE *fp, unsigned long *mbm_total)
{
- FILE *fp;
-
- fp = fopen(mbm_total_path, "r");
- if (!fp) {
- ksft_perror("Failed to open total bw file");
-
+ if (fscanf(fp, "%lu\n", mbm_total) <= 0) {
+ ksft_perror("Could not get MBM local bytes");
return -1;
}
- if (fscanf(fp, "%lu", mbm_total) <= 0) {
- ksft_perror("Could not get mbm local bytes");
- fclose(fp);
-
- return -1;
- }
- fclose(fp);
-
return 0;
}
-pid_t bm_pid, ppid;
+static pid_t bm_pid, ppid;
void ctrlc_handler(int signum, siginfo_t *info, void *ptr)
{
@@ -523,6 +506,13 @@ void signal_handler_unregister(void)
}
}
+static void parent_exit(pid_t ppid)
+{
+ kill(ppid, SIGKILL);
+ umount_resctrlfs();
+ exit(EXIT_FAILURE);
+}
+
/*
* print_results_bw: the memory bandwidth results are stored in a file
* @filename: file that stores the results
@@ -532,14 +522,14 @@ void signal_handler_unregister(void)
*
* Return: 0 on success, < 0 on error.
*/
-static int print_results_bw(char *filename, int bm_pid, float bw_imc,
+static int print_results_bw(char *filename, pid_t bm_pid, float bw_imc,
unsigned long bw_resc)
{
unsigned long diff = fabs(bw_imc - bw_resc);
FILE *fp;
if (strcmp(filename, "stdio") == 0 || strcmp(filename, "stderr") == 0) {
- printf("Pid: %d \t Mem_BW_iMC: %f \t ", bm_pid, bw_imc);
+ printf("Pid: %d \t Mem_BW_iMC: %f \t ", (int)bm_pid, bw_imc);
printf("Mem_BW_resc: %lu \t Difference: %lu\n", bw_resc, diff);
} else {
fp = fopen(filename, "a");
@@ -549,7 +539,7 @@ static int print_results_bw(char *filename, int bm_pid, float bw_imc,
return -1;
}
if (fprintf(fp, "Pid: %d \t Mem_BW_iMC: %f \t Mem_BW_resc: %lu \t Difference: %lu\n",
- bm_pid, bw_imc, bw_resc, diff) <= 0) {
+ (int)bm_pid, bw_imc, bw_resc, diff) <= 0) {
ksft_print_msg("Could not log results\n");
fclose(fp);
@@ -561,73 +551,67 @@ static int print_results_bw(char *filename, int bm_pid, float bw_imc,
return 0;
}
-static void set_cmt_path(const char *ctrlgrp, const char *mongrp, char sock_num)
-{
- if (strlen(ctrlgrp) && strlen(mongrp))
- sprintf(llc_occup_path, CON_MON_LCC_OCCUP_PATH, RESCTRL_PATH,
- ctrlgrp, mongrp, sock_num);
- else if (!strlen(ctrlgrp) && strlen(mongrp))
- sprintf(llc_occup_path, MON_LCC_OCCUP_PATH, RESCTRL_PATH,
- mongrp, sock_num);
- else if (strlen(ctrlgrp) && !strlen(mongrp))
- sprintf(llc_occup_path, CON_LCC_OCCUP_PATH, RESCTRL_PATH,
- ctrlgrp, sock_num);
- else if (!strlen(ctrlgrp) && !strlen(mongrp))
- sprintf(llc_occup_path, LCC_OCCUP_PATH, RESCTRL_PATH, sock_num);
-}
-
/*
- * initialize_llc_occu_resctrl: Appropriately populate "llc_occup_path"
- * @ctrlgrp: Name of the control monitor group (con_mon grp)
- * @mongrp: Name of the monitor group (mon grp)
- * @cpu_no: CPU number that the benchmark PID is binded to
- * @resctrl_val: Resctrl feature (Eg: cat, cmt.. etc)
+ * measure_mem_bw - Measures memory bandwidth numbers while benchmark runs
+ * @uparams: User supplied parameters
+ * @param: Parameters passed to resctrl_val()
+ * @bm_pid: PID that runs the benchmark
+ * @bw_report: Bandwidth report type (reads, writes)
+ *
+ * Measure memory bandwidth from resctrl and from another source which is
+ * perf imc value or could be something else if perf imc event is not
+ * available. Compare the two values to validate resctrl value. It takes
+ * 1 sec to measure the data.
*/
-static void initialize_llc_occu_resctrl(const char *ctrlgrp, const char *mongrp,
- int cpu_no, char *resctrl_val)
+int measure_mem_bw(const struct user_params *uparams,
+ struct resctrl_val_param *param, pid_t bm_pid,
+ const char *bw_report)
{
- int domain_id;
+ unsigned long bw_resc, bw_resc_start, bw_resc_end;
+ FILE *mem_bw_fp;
+ float bw_imc;
+ int ret;
- if (get_domain_id("L3", cpu_no, &domain_id) < 0) {
- ksft_print_msg("Could not get domain ID\n");
- return;
- }
+ bw_report = get_bw_report_type(bw_report);
+ if (!bw_report)
+ return -1;
- if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR)))
- set_cmt_path(ctrlgrp, mongrp, domain_id);
-}
+ mem_bw_fp = open_mem_bw_resctrl(mbm_total_path);
+ if (!mem_bw_fp)
+ return -1;
-static int measure_vals(const struct user_params *uparams,
- struct resctrl_val_param *param,
- unsigned long *bw_resc_start)
-{
- unsigned long bw_resc, bw_resc_end;
- float bw_imc;
- int ret;
+ ret = perf_open_imc_mem_bw(uparams->cpu);
+ if (ret < 0)
+ goto close_fp;
- /*
- * Measure memory bandwidth from resctrl and from
- * another source which is perf imc value or could
- * be something else if perf imc event is not available.
- * Compare the two values to validate resctrl value.
- * It takes 1sec to measure the data.
- */
- ret = get_mem_bw_imc(uparams->cpu, param->bw_report, &bw_imc);
+ ret = get_mem_bw_resctrl(mem_bw_fp, &bw_resc_start);
if (ret < 0)
- return ret;
+ goto close_imc;
+
+ rewind(mem_bw_fp);
+
+ do_imc_mem_bw_test();
- ret = get_mem_bw_resctrl(&bw_resc_end);
+ ret = get_mem_bw_resctrl(mem_bw_fp, &bw_resc_end);
if (ret < 0)
- return ret;
+ goto close_imc;
- bw_resc = (bw_resc_end - *bw_resc_start) / MB;
- ret = print_results_bw(param->filename, bm_pid, bw_imc, bw_resc);
- if (ret)
- return ret;
+ ret = get_mem_bw_imc(bw_report, &bw_imc);
+ if (ret < 0)
+ goto close_imc;
- *bw_resc_start = bw_resc_end;
+ perf_close_imc_mem_bw();
+ fclose(mem_bw_fp);
- return 0;
+ bw_resc = (bw_resc_end - bw_resc_start) / MB;
+
+ return print_results_bw(param->filename, bm_pid, bw_imc, bw_resc);
+
+close_imc:
+ perf_close_imc_mem_bw();
+close_fp:
+ fclose(mem_bw_fp);
+ return ret;
}
/*
@@ -654,7 +638,7 @@ static void run_benchmark(int signum, siginfo_t *info, void *ucontext)
fp = freopen("/dev/null", "w", stdout);
if (!fp) {
ksft_perror("Unable to direct benchmark status to /dev/null");
- PARENT_EXIT();
+ parent_exit(ppid);
}
if (strcmp(benchmark_cmd[0], "fill_buf") == 0) {
@@ -668,7 +652,7 @@ static void run_benchmark(int signum, siginfo_t *info, void *ucontext)
once = false;
} else {
ksft_print_msg("Invalid once parameter\n");
- PARENT_EXIT();
+ parent_exit(ppid);
}
if (run_fill_buf(span, memflush, operation, once))
@@ -682,7 +666,7 @@ static void run_benchmark(int signum, siginfo_t *info, void *ucontext)
fclose(stdout);
ksft_print_msg("Unable to run specified benchmark\n");
- PARENT_EXIT();
+ parent_exit(ppid);
}
/*
@@ -700,21 +684,19 @@ int resctrl_val(const struct resctrl_test *test,
const char * const *benchmark_cmd,
struct resctrl_val_param *param)
{
- char *resctrl_val = param->resctrl_val;
- unsigned long bw_resc_start = 0;
struct sigaction sigact;
int ret = 0, pipefd[2];
char pipe_message = 0;
union sigval value;
+ int domain_id;
if (strcmp(param->filename, "") == 0)
sprintf(param->filename, "stdio");
- if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR)) ||
- !strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) {
- ret = validate_bw_report_request(param->bw_report);
- if (ret)
- return ret;
+ ret = get_domain_id(test->resource, uparams->cpu, &domain_id);
+ if (ret < 0) {
+ ksft_print_msg("Could not get domain ID\n");
+ return ret;
}
/*
@@ -755,7 +737,7 @@ int resctrl_val(const struct resctrl_test *test,
/* Register for "SIGUSR1" signal from parent */
if (sigaction(SIGUSR1, &sigact, NULL)) {
ksft_perror("Can't register child for signal");
- PARENT_EXIT();
+ parent_exit(ppid);
}
/* Tell parent that child is ready */
@@ -773,10 +755,10 @@ int resctrl_val(const struct resctrl_test *test,
sigsuspend(&sigact.sa_mask);
ksft_perror("Child is done");
- PARENT_EXIT();
+ parent_exit(ppid);
}
- ksft_print_msg("Benchmark PID: %d\n", bm_pid);
+ ksft_print_msg("Benchmark PID: %d\n", (int)bm_pid);
/*
* The cast removes constness but nothing mutates benchmark_cmd within
@@ -792,22 +774,15 @@ int resctrl_val(const struct resctrl_test *test,
goto out;
/* Write benchmark to specified control&monitoring grp in resctrl FS */
- ret = write_bm_pid_to_resctrl(bm_pid, param->ctrlgrp, param->mongrp,
- resctrl_val);
+ ret = write_bm_pid_to_resctrl(bm_pid, param->ctrlgrp, param->mongrp);
if (ret)
goto out;
- if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
- !strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
- ret = initialize_mem_bw_imc();
+ if (param->init) {
+ ret = param->init(param, domain_id);
if (ret)
goto out;
-
- initialize_mem_bw_resctrl(param->ctrlgrp, param->mongrp,
- uparams->cpu, resctrl_val);
- } else if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR)))
- initialize_llc_occu_resctrl(param->ctrlgrp, param->mongrp,
- uparams->cpu, resctrl_val);
+ }
/* Parent waits for child to be ready. */
close(pipefd[1]);
@@ -841,17 +816,9 @@ int resctrl_val(const struct resctrl_test *test,
if (ret < 0)
break;
- if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
- !strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
- ret = measure_vals(uparams, param, &bw_resc_start);
- if (ret)
- break;
- } else if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) {
- sleep(1);
- ret = measure_llc_resctrl(param->filename, bm_pid);
- if (ret)
- break;
- }
+ ret = param->measure(uparams, param, bm_pid);
+ if (ret)
+ break;
}
out:
diff --git a/tools/testing/selftests/resctrl/resctrlfs.c b/tools/testing/selftests/resctrl/resctrlfs.c
index 1cade75176eb..250c320349a7 100644
--- a/tools/testing/selftests/resctrl/resctrlfs.c
+++ b/tools/testing/selftests/resctrl/resctrlfs.c
@@ -456,6 +456,9 @@ int taskset_restore(pid_t bm_pid, cpu_set_t *old_affinity)
* @grp: Full path and name of the group
* @parent_grp: Full path and name of the parent group
*
+ * Creates a group @grp_name if it does not exist yet. If @grp_name is NULL,
+ * it is interpreted as the root group which always results in success.
+ *
* Return: 0 on success, < 0 on error.
*/
static int create_grp(const char *grp_name, char *grp, const char *parent_grp)
@@ -464,12 +467,7 @@ static int create_grp(const char *grp_name, char *grp, const char *parent_grp)
struct dirent *ep;
DIR *dp;
- /*
- * At this point, we are guaranteed to have resctrl FS mounted and if
- * length of grp_name == 0, it means, user wants to use root con_mon
- * grp, so do nothing
- */
- if (strlen(grp_name) == 0)
+ if (!grp_name)
return 0;
/* Check if requested grp exists or not */
@@ -508,7 +506,7 @@ static int write_pid_to_tasks(char *tasks, pid_t pid)
return -1;
}
- if (fprintf(fp, "%d\n", pid) < 0) {
+ if (fprintf(fp, "%d\n", (int)pid) < 0) {
ksft_print_msg("Failed to write pid to tasks file\n");
fclose(fp);
@@ -524,7 +522,6 @@ static int write_pid_to_tasks(char *tasks, pid_t pid)
* @bm_pid: PID that should be written
* @ctrlgrp: Name of the control monitor group (con_mon grp)
* @mongrp: Name of the monitor group (mon grp)
- * @resctrl_val: Resctrl feature (Eg: mbm, mba.. etc)
*
* If a con_mon grp is requested, create it and write pid to it, otherwise
* write pid to root con_mon grp.
@@ -534,14 +531,13 @@ static int write_pid_to_tasks(char *tasks, pid_t pid)
*
* Return: 0 on success, < 0 on error.
*/
-int write_bm_pid_to_resctrl(pid_t bm_pid, char *ctrlgrp, char *mongrp,
- char *resctrl_val)
+int write_bm_pid_to_resctrl(pid_t bm_pid, const char *ctrlgrp, const char *mongrp)
{
char controlgroup[128], monitorgroup[512], monitorgroup_p[256];
char tasks[1024];
int ret = 0;
- if (strlen(ctrlgrp))
+ if (ctrlgrp)
sprintf(controlgroup, "%s/%s", RESCTRL_PATH, ctrlgrp);
else
sprintf(controlgroup, "%s", RESCTRL_PATH);
@@ -555,22 +551,19 @@ int write_bm_pid_to_resctrl(pid_t bm_pid, char *ctrlgrp, char *mongrp,
if (ret)
goto out;
- /* Create mon grp and write pid into it for "mbm" and "cmt" test */
- if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR)) ||
- !strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) {
- if (strlen(mongrp)) {
- sprintf(monitorgroup_p, "%s/mon_groups", controlgroup);
- sprintf(monitorgroup, "%s/%s", monitorgroup_p, mongrp);
- ret = create_grp(mongrp, monitorgroup, monitorgroup_p);
- if (ret)
- goto out;
-
- sprintf(tasks, "%s/mon_groups/%s/tasks",
- controlgroup, mongrp);
- ret = write_pid_to_tasks(tasks, bm_pid);
- if (ret)
- goto out;
- }
+ /* Create monitor group and write pid into if it is used */
+ if (mongrp) {
+ sprintf(monitorgroup_p, "%s/mon_groups", controlgroup);
+ sprintf(monitorgroup, "%s/%s", monitorgroup_p, mongrp);
+ ret = create_grp(mongrp, monitorgroup, monitorgroup_p);
+ if (ret)
+ goto out;
+
+ sprintf(tasks, "%s/mon_groups/%s/tasks",
+ controlgroup, mongrp);
+ ret = write_pid_to_tasks(tasks, bm_pid);
+ if (ret)
+ goto out;
}
out:
@@ -593,7 +586,8 @@ out:
*
* Return: 0 on success, < 0 on error.
*/
-int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, const char *resource)
+int write_schemata(const char *ctrlgrp, char *schemata, int cpu_no,
+ const char *resource)
{
char controlgroup[1024], reason[128], schema[1024] = {};
int domain_id, fd, schema_len, ret = 0;
@@ -611,7 +605,7 @@ int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, const char *resour
goto out;
}
- if (strlen(ctrlgrp) != 0)
+ if (ctrlgrp)
sprintf(controlgroup, "%s/%s/schemata", RESCTRL_PATH, ctrlgrp);
else
sprintf(controlgroup, "%s/schemata", RESCTRL_PATH);
@@ -837,22 +831,21 @@ int filter_dmesg(void)
return 0;
}
-int validate_bw_report_request(char *bw_report)
+const char *get_bw_report_type(const char *bw_report)
{
if (strcmp(bw_report, "reads") == 0)
- return 0;
+ return bw_report;
if (strcmp(bw_report, "writes") == 0)
- return 0;
+ return bw_report;
if (strcmp(bw_report, "nt-writes") == 0) {
- strcpy(bw_report, "writes");
- return 0;
+ return "writes";
}
if (strcmp(bw_report, "total") == 0)
- return 0;
+ return bw_report;
- fprintf(stderr, "Requested iMC B/W report type unavailable\n");
+ fprintf(stderr, "Requested iMC bandwidth report type unavailable\n");
- return -1;
+ return NULL;
}
int perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu,
diff --git a/tools/testing/selftests/ring-buffer/Makefile b/tools/testing/selftests/ring-buffer/Makefile
index 627c5fa6d1ab..23605782639e 100644
--- a/tools/testing/selftests/ring-buffer/Makefile
+++ b/tools/testing/selftests/ring-buffer/Makefile
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
CFLAGS += -Wl,-no-as-needed -Wall
CFLAGS += $(KHDR_INCLUDES)
-CFLAGS += -D_GNU_SOURCE
TEST_GEN_PROGS = map_test
diff --git a/tools/testing/selftests/riscv/mm/Makefile b/tools/testing/selftests/riscv/mm/Makefile
index c333263f2b27..4664ed79e20b 100644
--- a/tools/testing/selftests/riscv/mm/Makefile
+++ b/tools/testing/selftests/riscv/mm/Makefile
@@ -3,7 +3,7 @@
# Originally tools/testing/arm64/abi/Makefile
# Additional include paths needed by kselftest.h and local headers
-CFLAGS += -D_GNU_SOURCE -std=gnu99 -I.
+CFLAGS += -std=gnu99 -I.
TEST_GEN_FILES := mmap_default mmap_bottomup
diff --git a/tools/testing/selftests/riscv/vector/vstate_prctl.c b/tools/testing/selftests/riscv/vector/vstate_prctl.c
index 27668fb3b6d0..895177f6bf4c 100644
--- a/tools/testing/selftests/riscv/vector/vstate_prctl.c
+++ b/tools/testing/selftests/riscv/vector/vstate_prctl.c
@@ -88,16 +88,16 @@ int main(void)
return -2;
}
- if (!(pair.value & RISCV_HWPROBE_IMA_V)) {
+ if (!(pair.value & RISCV_HWPROBE_EXT_ZVE32X)) {
rc = prctl(PR_RISCV_V_GET_CONTROL);
if (rc != -1 || errno != EINVAL) {
- ksft_test_result_fail("GET_CONTROL should fail on kernel/hw without V\n");
+ ksft_test_result_fail("GET_CONTROL should fail on kernel/hw without ZVE32X\n");
return -3;
}
rc = prctl(PR_RISCV_V_SET_CONTROL, PR_RISCV_V_VSTATE_CTRL_ON);
if (rc != -1 || errno != EINVAL) {
- ksft_test_result_fail("GET_CONTROL should fail on kernel/hw without V\n");
+ ksft_test_result_fail("SET_CONTROL should fail on kernel/hw without ZVE32X\n");
return -4;
}
diff --git a/tools/testing/selftests/sched/cs_prctl_test.c b/tools/testing/selftests/sched/cs_prctl_test.c
index 62fba7356af2..52d97fae4dbd 100644
--- a/tools/testing/selftests/sched/cs_prctl_test.c
+++ b/tools/testing/selftests/sched/cs_prctl_test.c
@@ -42,11 +42,11 @@ static pid_t gettid(void)
#ifndef PR_SCHED_CORE
#define PR_SCHED_CORE 62
-# define PR_SCHED_CORE_GET 0
-# define PR_SCHED_CORE_CREATE 1 /* create unique core_sched cookie */
-# define PR_SCHED_CORE_SHARE_TO 2 /* push core_sched cookie to pid */
-# define PR_SCHED_CORE_SHARE_FROM 3 /* pull core_sched cookie to pid */
-# define PR_SCHED_CORE_MAX 4
+#define PR_SCHED_CORE_GET 0
+#define PR_SCHED_CORE_CREATE 1 /* create unique core_sched cookie */
+#define PR_SCHED_CORE_SHARE_TO 2 /* push core_sched cookie to pid */
+#define PR_SCHED_CORE_SHARE_FROM 3 /* pull core_sched cookie to pid */
+#define PR_SCHED_CORE_MAX 4
#endif
#define MAX_PROCESSES 128
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index 783ebce8c4de..e3f97f90d8db 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -3954,6 +3954,60 @@ TEST(user_notification_filter_empty)
EXPECT_GT((pollfd.revents & POLLHUP) ?: 0, 0);
}
+TEST(user_ioctl_notification_filter_empty)
+{
+ pid_t pid;
+ long ret;
+ int status, p[2];
+ struct __clone_args args = {
+ .flags = CLONE_FILES,
+ .exit_signal = SIGCHLD,
+ };
+ struct seccomp_notif req = {};
+
+ ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
+ ASSERT_EQ(0, ret) {
+ TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
+ }
+
+ if (__NR_clone3 < 0)
+ SKIP(return, "Test not built with clone3 support");
+
+ ASSERT_EQ(0, pipe(p));
+
+ pid = sys_clone3(&args, sizeof(args));
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ int listener;
+
+ listener = user_notif_syscall(__NR_mknodat, SECCOMP_FILTER_FLAG_NEW_LISTENER);
+ if (listener < 0)
+ _exit(EXIT_FAILURE);
+
+ if (dup2(listener, 200) != 200)
+ _exit(EXIT_FAILURE);
+ close(p[1]);
+ close(listener);
+ sleep(1);
+
+ _exit(EXIT_SUCCESS);
+ }
+ if (read(p[0], &status, 1) != 0)
+ _exit(EXIT_SUCCESS);
+ close(p[0]);
+ /*
+ * The seccomp filter has become unused so we should be notified once
+ * the kernel gets around to cleaning up task struct.
+ */
+ EXPECT_EQ(ioctl(200, SECCOMP_IOCTL_NOTIF_RECV, &req), -1);
+ EXPECT_EQ(errno, ENOENT);
+
+ EXPECT_EQ(waitpid(pid, &status, 0), pid);
+ EXPECT_EQ(true, WIFEXITED(status));
+ EXPECT_EQ(0, WEXITSTATUS(status));
+}
+
static void *do_thread(void *data)
{
return NULL;
@@ -4755,6 +4809,83 @@ TEST(user_notification_wait_killable_fatal)
EXPECT_EQ(SIGTERM, WTERMSIG(status));
}
+struct tsync_vs_thread_leader_args {
+ pthread_t leader;
+};
+
+static void *tsync_vs_dead_thread_leader_sibling(void *_args)
+{
+ struct sock_filter allow_filter[] = {
+ BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
+ };
+ struct sock_fprog allow_prog = {
+ .len = (unsigned short)ARRAY_SIZE(allow_filter),
+ .filter = allow_filter,
+ };
+ struct tsync_vs_thread_leader_args *args = _args;
+ void *retval;
+ long ret;
+
+ ret = pthread_join(args->leader, &retval);
+ if (ret)
+ exit(1);
+ if (retval != _args)
+ exit(2);
+ ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, &allow_prog);
+ if (ret)
+ exit(3);
+
+ exit(0);
+}
+
+/*
+ * Ensure that a dead thread leader doesn't prevent installing new filters with
+ * SECCOMP_FILTER_FLAG_TSYNC from other threads.
+ */
+TEST(tsync_vs_dead_thread_leader)
+{
+ int status;
+ pid_t pid;
+ long ret;
+
+ ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
+ ASSERT_EQ(0, ret) {
+ TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
+ }
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ struct sock_filter allow_filter[] = {
+ BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
+ };
+ struct sock_fprog allow_prog = {
+ .len = (unsigned short)ARRAY_SIZE(allow_filter),
+ .filter = allow_filter,
+ };
+ struct tsync_vs_thread_leader_args *args;
+ pthread_t sibling;
+
+ args = malloc(sizeof(*args));
+ ASSERT_NE(NULL, args);
+ args->leader = pthread_self();
+
+ ret = pthread_create(&sibling, NULL,
+ tsync_vs_dead_thread_leader_sibling, args);
+ ASSERT_EQ(0, ret);
+
+ /* Install a new filter just to the leader thread. */
+ ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &allow_prog);
+ ASSERT_EQ(0, ret);
+ pthread_exit(args);
+ exit(1);
+ }
+
+ EXPECT_EQ(pid, waitpid(pid, &status, 0));
+ EXPECT_EQ(0, status);
+}
+
/*
* TODO:
* - expand NNP testing
diff --git a/tools/testing/selftests/sgx/Makefile b/tools/testing/selftests/sgx/Makefile
index 867f88ce2570..03b5e13b872b 100644
--- a/tools/testing/selftests/sgx/Makefile
+++ b/tools/testing/selftests/sgx/Makefile
@@ -12,7 +12,7 @@ OBJCOPY := $(CROSS_COMPILE)objcopy
endif
INCLUDES := -I$(top_srcdir)/tools/include
-HOST_CFLAGS := -Wall -Werror -g $(INCLUDES) -fPIC
+HOST_CFLAGS := -Wall -Werror -g $(INCLUDES) -fPIC $(CFLAGS)
HOST_LDFLAGS := -z noexecstack -lcrypto
ENCL_CFLAGS += -Wall -Werror -static-pie -nostdlib -ffreestanding -fPIE \
-fno-stack-protector -mrdrnd $(INCLUDES)
diff --git a/tools/testing/selftests/sigaltstack/current_stack_pointer.h b/tools/testing/selftests/sigaltstack/current_stack_pointer.h
index ea9bdf3a90b1..09da8f1011ce 100644
--- a/tools/testing/selftests/sigaltstack/current_stack_pointer.h
+++ b/tools/testing/selftests/sigaltstack/current_stack_pointer.h
@@ -8,7 +8,7 @@ register unsigned long sp asm("sp");
register unsigned long sp asm("esp");
#elif __loongarch64
register unsigned long sp asm("$sp");
-#elif __ppc__
+#elif __powerpc__
register unsigned long sp asm("r1");
#elif __s390x__
register unsigned long sp asm("%15");
diff --git a/tools/testing/selftests/timens/exec.c b/tools/testing/selftests/timens/exec.c
index e40dc5be2f66..d12ff955de0d 100644
--- a/tools/testing/selftests/timens/exec.c
+++ b/tools/testing/selftests/timens/exec.c
@@ -30,7 +30,7 @@ int main(int argc, char *argv[])
for (i = 0; i < 2; i++) {
_gettime(CLOCK_MONOTONIC, &tst, i);
- if (abs(tst.tv_sec - now.tv_sec) > 5)
+ if (labs(tst.tv_sec - now.tv_sec) > 5)
return pr_fail("%ld %ld\n", now.tv_sec, tst.tv_sec);
}
return 0;
@@ -50,7 +50,7 @@ int main(int argc, char *argv[])
for (i = 0; i < 2; i++) {
_gettime(CLOCK_MONOTONIC, &tst, i);
- if (abs(tst.tv_sec - now.tv_sec) > 5)
+ if (labs(tst.tv_sec - now.tv_sec) > 5)
return pr_fail("%ld %ld\n",
now.tv_sec, tst.tv_sec);
}
@@ -70,7 +70,7 @@ int main(int argc, char *argv[])
/* Check that a child process is in the new timens. */
for (i = 0; i < 2; i++) {
_gettime(CLOCK_MONOTONIC, &tst, i);
- if (abs(tst.tv_sec - now.tv_sec - OFFSET) > 5)
+ if (labs(tst.tv_sec - now.tv_sec - OFFSET) > 5)
return pr_fail("%ld %ld\n",
now.tv_sec + OFFSET, tst.tv_sec);
}
diff --git a/tools/testing/selftests/timens/timer.c b/tools/testing/selftests/timens/timer.c
index 5e7f0051bd7b..5b939f59dfa4 100644
--- a/tools/testing/selftests/timens/timer.c
+++ b/tools/testing/selftests/timens/timer.c
@@ -56,7 +56,7 @@ int run_test(int clockid, struct timespec now)
return pr_perror("timerfd_gettime");
elapsed = new_value.it_value.tv_sec;
- if (abs(elapsed - 3600) > 60) {
+ if (llabs(elapsed - 3600) > 60) {
ksft_test_result_fail("clockid: %d elapsed: %lld\n",
clockid, elapsed);
return 1;
diff --git a/tools/testing/selftests/timens/timerfd.c b/tools/testing/selftests/timens/timerfd.c
index 9edd43d6b2c1..a4196bbd6e33 100644
--- a/tools/testing/selftests/timens/timerfd.c
+++ b/tools/testing/selftests/timens/timerfd.c
@@ -61,7 +61,7 @@ int run_test(int clockid, struct timespec now)
return pr_perror("timerfd_gettime(%d)", clockid);
elapsed = new_value.it_value.tv_sec;
- if (abs(elapsed - 3600) > 60) {
+ if (llabs(elapsed - 3600) > 60) {
ksft_test_result_fail("clockid: %d elapsed: %lld\n",
clockid, elapsed);
return 1;
diff --git a/tools/testing/selftests/timens/vfork_exec.c b/tools/testing/selftests/timens/vfork_exec.c
index beb7614941fb..5b8907bf451d 100644
--- a/tools/testing/selftests/timens/vfork_exec.c
+++ b/tools/testing/selftests/timens/vfork_exec.c
@@ -32,7 +32,7 @@ static void *tcheck(void *_args)
for (i = 0; i < 2; i++) {
_gettime(CLOCK_MONOTONIC, &tst, i);
- if (abs(tst.tv_sec - now->tv_sec) > 5) {
+ if (labs(tst.tv_sec - now->tv_sec) > 5) {
pr_fail("%s: in-thread: unexpected value: %ld (%ld)\n",
args->tst_name, tst.tv_sec, now->tv_sec);
return (void *)1UL;
@@ -64,7 +64,7 @@ static int check(char *tst_name, struct timespec *now)
for (i = 0; i < 2; i++) {
_gettime(CLOCK_MONOTONIC, &tst, i);
- if (abs(tst.tv_sec - now->tv_sec) > 5)
+ if (labs(tst.tv_sec - now->tv_sec) > 5)
return pr_fail("%s: unexpected value: %ld (%ld)\n",
tst_name, tst.tv_sec, now->tv_sec);
}
diff --git a/tools/testing/selftests/timers/rtcpie.c b/tools/testing/selftests/timers/rtcpie.c
index 4ef2184f1558..7c07edd0d450 100644
--- a/tools/testing/selftests/timers/rtcpie.c
+++ b/tools/testing/selftests/timers/rtcpie.c
@@ -29,7 +29,7 @@ static const char default_rtc[] = "/dev/rtc0";
int main(int argc, char **argv)
{
- int i, fd, retval, irqcount = 0;
+ int i, fd, retval;
unsigned long tmp, data, old_pie_rate;
const char *rtc = default_rtc;
struct timeval start, end, diff;
@@ -120,7 +120,6 @@ int main(int argc, char **argv)
fprintf(stderr, " %d",i);
fflush(stderr);
- irqcount++;
}
/* Disable periodic interrupts */
diff --git a/tools/testing/selftests/tmpfs/Makefile b/tools/testing/selftests/tmpfs/Makefile
index aa11ccc92e5b..3be931e1193f 100644
--- a/tools/testing/selftests/tmpfs/Makefile
+++ b/tools/testing/selftests/tmpfs/Makefile
@@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
CFLAGS += -Wall -O2
-CFLAGS += -D_GNU_SOURCE
TEST_GEN_PROGS :=
TEST_GEN_PROGS += bug-link-o-tmpfile
diff --git a/tools/testing/selftests/vDSO/Makefile b/tools/testing/selftests/vDSO/Makefile
index d53a4d8008f9..98d8ba2afa00 100644
--- a/tools/testing/selftests/vDSO/Makefile
+++ b/tools/testing/selftests/vDSO/Makefile
@@ -1,35 +1,30 @@
# SPDX-License-Identifier: GPL-2.0
-include ../lib.mk
-
uname_M := $(shell uname -m 2>/dev/null || echo not)
ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
-TEST_GEN_PROGS := $(OUTPUT)/vdso_test_gettimeofday $(OUTPUT)/vdso_test_getcpu
-TEST_GEN_PROGS += $(OUTPUT)/vdso_test_abi
-TEST_GEN_PROGS += $(OUTPUT)/vdso_test_clock_getres
+TEST_GEN_PROGS := vdso_test_gettimeofday
+TEST_GEN_PROGS += vdso_test_getcpu
+TEST_GEN_PROGS += vdso_test_abi
+TEST_GEN_PROGS += vdso_test_clock_getres
ifeq ($(ARCH),$(filter $(ARCH),x86 x86_64))
-TEST_GEN_PROGS += $(OUTPUT)/vdso_standalone_test_x86
+TEST_GEN_PROGS += vdso_standalone_test_x86
endif
-TEST_GEN_PROGS += $(OUTPUT)/vdso_test_correctness
+TEST_GEN_PROGS += vdso_test_correctness
CFLAGS := -std=gnu99
-CFLAGS_vdso_standalone_test_x86 := -nostdlib -fno-asynchronous-unwind-tables -fno-stack-protector
-LDFLAGS_vdso_test_correctness := -ldl
+
ifeq ($(CONFIG_X86_32),y)
LDLIBS += -lgcc_s
endif
-all: $(TEST_GEN_PROGS)
+include ../lib.mk
$(OUTPUT)/vdso_test_gettimeofday: parse_vdso.c vdso_test_gettimeofday.c
$(OUTPUT)/vdso_test_getcpu: parse_vdso.c vdso_test_getcpu.c
$(OUTPUT)/vdso_test_abi: parse_vdso.c vdso_test_abi.c
$(OUTPUT)/vdso_test_clock_getres: vdso_test_clock_getres.c
+
$(OUTPUT)/vdso_standalone_test_x86: vdso_standalone_test_x86.c parse_vdso.c
- $(CC) $(CFLAGS) $(CFLAGS_vdso_standalone_test_x86) \
- vdso_standalone_test_x86.c parse_vdso.c \
- -o $@
+$(OUTPUT)/vdso_standalone_test_x86: CFLAGS +=-nostdlib -fno-asynchronous-unwind-tables -fno-stack-protector
+
$(OUTPUT)/vdso_test_correctness: vdso_test_correctness.c
- $(CC) $(CFLAGS) \
- vdso_test_correctness.c \
- -o $@ \
- $(LDFLAGS_vdso_test_correctness)
+$(OUTPUT)/vdso_test_correctness: LDFLAGS += -ldl
diff --git a/tools/testing/selftests/vDSO/parse_vdso.c b/tools/testing/selftests/vDSO/parse_vdso.c
index 413f75620a35..4ae417372e9e 100644
--- a/tools/testing/selftests/vDSO/parse_vdso.c
+++ b/tools/testing/selftests/vDSO/parse_vdso.c
@@ -55,14 +55,20 @@ static struct vdso_info
ELF(Verdef) *verdef;
} vdso_info;
-/* Straight from the ELF specification. */
-static unsigned long elf_hash(const unsigned char *name)
+/*
+ * Straight from the ELF specification...and then tweaked slightly, in order to
+ * avoid a few clang warnings.
+ */
+static unsigned long elf_hash(const char *name)
{
unsigned long h = 0, g;
- while (*name)
+ const unsigned char *uch_name = (const unsigned char *)name;
+
+ while (*uch_name)
{
- h = (h << 4) + *name++;
- if (g = h & 0xf0000000)
+ h = (h << 4) + *uch_name++;
+ g = h & 0xf0000000;
+ if (g)
h ^= g >> 24;
h &= ~g;
}
diff --git a/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c b/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c
index 8a44ff973ee1..27f6fdf11969 100644
--- a/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c
+++ b/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c
@@ -18,7 +18,7 @@
#include "parse_vdso.h"
-/* We need a libc functions... */
+/* We need some libc functions... */
int strcmp(const char *a, const char *b)
{
/* This implementation is buggy: it never returns -1. */
@@ -34,6 +34,20 @@ int strcmp(const char *a, const char *b)
return 0;
}
+/*
+ * The clang build needs this, although gcc does not.
+ * Stolen from lib/string.c.
+ */
+void *memcpy(void *dest, const void *src, size_t count)
+{
+ char *tmp = dest;
+ const char *s = src;
+
+ while (count--)
+ *tmp++ = *s++;
+ return dest;
+}
+
/* ...and two syscalls. This is x86-specific. */
static inline long x86_syscall3(long nr, long a0, long a1, long a2)
{
@@ -70,7 +84,7 @@ void to_base10(char *lastdig, time_t n)
}
}
-__attribute__((externally_visible)) void c_main(void **stack)
+void c_main(void **stack)
{
/* Parse the stack */
long argc = (long)*stack;
diff --git a/tools/testing/selftests/wireguard/qemu/Makefile b/tools/testing/selftests/wireguard/qemu/Makefile
index e95bd56b332f..35856b11c143 100644
--- a/tools/testing/selftests/wireguard/qemu/Makefile
+++ b/tools/testing/selftests/wireguard/qemu/Makefile
@@ -109,9 +109,9 @@ KERNEL_ARCH := x86_64
KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/x86/boot/bzImage
QEMU_VPORT_RESULT := virtio-serial-device
ifeq ($(HOST_ARCH),$(ARCH))
-QEMU_MACHINE := -cpu host -machine microvm,accel=kvm,pit=off,pic=off,rtc=off -no-acpi
+QEMU_MACHINE := -cpu host -machine microvm,accel=kvm,pit=off,pic=off,rtc=off,acpi=off
else
-QEMU_MACHINE := -cpu max -machine microvm -no-acpi
+QEMU_MACHINE := -cpu max -machine microvm,acpi=off
endif
else ifeq ($(ARCH),i686)
CHOST := i686-linux-musl
@@ -120,9 +120,9 @@ KERNEL_ARCH := x86
KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/x86/boot/bzImage
QEMU_VPORT_RESULT := virtio-serial-device
ifeq ($(subst x86_64,i686,$(HOST_ARCH)),$(ARCH))
-QEMU_MACHINE := -cpu host -machine microvm,accel=kvm,pit=off,pic=off,rtc=off -no-acpi
+QEMU_MACHINE := -cpu host -machine microvm,accel=kvm,pit=off,pic=off,rtc=off,acpi=off
else
-QEMU_MACHINE := -cpu coreduo -machine microvm -no-acpi
+QEMU_MACHINE := -cpu coreduo -machine microvm,acpi=off
endif
else ifeq ($(ARCH),mips64)
CHOST := mips64-linux-musl
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
index 0b872c0a42d2..5c8757a25998 100644
--- a/tools/testing/selftests/x86/Makefile
+++ b/tools/testing/selftests/x86/Makefile
@@ -40,6 +40,13 @@ CFLAGS := -O2 -g -std=gnu99 -pthread -Wall $(KHDR_INCLUDES)
# call32_from_64 in thunks.S uses absolute addresses.
ifeq ($(CAN_BUILD_WITH_NOPIE),1)
CFLAGS += -no-pie
+
+ifneq ($(LLVM),)
+# clang only wants to see -no-pie during linking. Here, we don't have a separate
+# linking stage, so a compiler warning is unavoidable without (wastefully)
+# restructuring the Makefile. Avoid this by simply disabling that warning.
+CFLAGS += -Wno-unused-command-line-argument
+endif
endif
define gen-target-rule-32
@@ -73,10 +80,10 @@ all_64: $(BINARIES_64)
EXTRA_CLEAN := $(BINARIES_32) $(BINARIES_64)
$(BINARIES_32): $(OUTPUT)/%_32: %.c helpers.h
- $(CC) -m32 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl -lm
+ $(CC) -m32 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $< $(EXTRA_FILES) -lrt -ldl -lm
$(BINARIES_64): $(OUTPUT)/%_64: %.c helpers.h
- $(CC) -m64 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl
+ $(CC) -m64 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $< $(EXTRA_FILES) -lrt -ldl
# x86_64 users should be encouraged to install 32-bit libraries
ifeq ($(CAN_BUILD_I386)$(CAN_BUILD_X86_64),01)
@@ -100,10 +107,22 @@ warn_32bit_failure:
exit 0;
endif
-# Some tests have additional dependencies.
-$(OUTPUT)/sysret_ss_attrs_64: thunks.S
-$(OUTPUT)/ptrace_syscall_32: raw_syscall_helper_32.S
-$(OUTPUT)/test_syscall_vdso_32: thunks_32.S
+# Add an additional file to the source file list for a given target, and also
+# add a Makefile dependency on that same file. However, do these separately, so
+# that the compiler invocation ("$(CC) file1.c file2.S") is not combined with
+# the dependencies ("header3.h"), because clang, unlike gcc, will not accept
+# header files as an input to the compiler invocation.
+define extra-files
+$(OUTPUT)/$(1): EXTRA_FILES := $(2)
+$(OUTPUT)/$(1): $(2)
+endef
+
+$(eval $(call extra-files,sysret_ss_attrs_64,thunks.S))
+$(eval $(call extra-files,ptrace_syscall_32,raw_syscall_helper_32.S))
+$(eval $(call extra-files,test_syscall_vdso_32,thunks_32.S))
+$(eval $(call extra-files,fsgsbase_restore_64,clang_helpers_64.S))
+$(eval $(call extra-files,fsgsbase_restore_32,clang_helpers_32.S))
+$(eval $(call extra-files,sysret_rip_64,clang_helpers_64.S))
# check_initial_reg_state is special: it needs a custom entry, and it
# needs to be static so that its interpreter doesn't destroy its initial
diff --git a/tools/testing/selftests/x86/amx.c b/tools/testing/selftests/x86/amx.c
index 95aad6d8849b..1fdf35a4d7f6 100644
--- a/tools/testing/selftests/x86/amx.c
+++ b/tools/testing/selftests/x86/amx.c
@@ -39,16 +39,6 @@ struct xsave_buffer {
};
};
-static inline uint64_t xgetbv(uint32_t index)
-{
- uint32_t eax, edx;
-
- asm volatile("xgetbv;"
- : "=a" (eax), "=d" (edx)
- : "c" (index));
- return eax + ((uint64_t)edx << 32);
-}
-
static inline void xsave(struct xsave_buffer *xbuf, uint64_t rfbm)
{
uint32_t rfbm_lo = rfbm;
@@ -164,12 +154,6 @@ static inline void clear_xstate_header(struct xsave_buffer *buffer)
memset(&buffer->header, 0, sizeof(buffer->header));
}
-static inline uint64_t get_xstatebv(struct xsave_buffer *buffer)
-{
- /* XSTATE_BV is at the beginning of the header: */
- return *(uint64_t *)&buffer->header;
-}
-
static inline void set_xstatebv(struct xsave_buffer *buffer, uint64_t bv)
{
/* XSTATE_BV is at the beginning of the header: */
diff --git a/tools/testing/selftests/x86/clang_helpers_32.S b/tools/testing/selftests/x86/clang_helpers_32.S
new file mode 100644
index 000000000000..dc16271bac70
--- /dev/null
+++ b/tools/testing/selftests/x86/clang_helpers_32.S
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * 32-bit assembly helpers for asm operations that lack support in both gcc and
+ * clang. For example, clang asm does not support segment prefixes.
+ */
+.global dereference_seg_base
+dereference_seg_base:
+ mov %fs:(0), %eax
+ ret
+
+.section .note.GNU-stack,"",%progbits
diff --git a/tools/testing/selftests/x86/clang_helpers_64.S b/tools/testing/selftests/x86/clang_helpers_64.S
new file mode 100644
index 000000000000..185a69dbf39c
--- /dev/null
+++ b/tools/testing/selftests/x86/clang_helpers_64.S
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * 64-bit assembly helpers for asm operations that lack support in both gcc and
+ * clang. For example, clang asm does not support segment prefixes.
+ */
+.global dereference_seg_base
+
+dereference_seg_base:
+ mov %gs:(0), %rax
+ ret
+
+.global test_page
+.global test_syscall_insn
+
+.pushsection ".text", "ax"
+.balign 4096
+test_page: .globl test_page
+ .fill 4094,1,0xcc
+
+test_syscall_insn:
+ syscall
+
+.ifne . - test_page - 4096
+ .error "test page is not one page long"
+.endif
+.popsection
+
+.section .note.GNU-stack,"",%progbits
diff --git a/tools/testing/selftests/x86/fsgsbase.c b/tools/testing/selftests/x86/fsgsbase.c
index 8c780cce941d..50cf32de6313 100644
--- a/tools/testing/selftests/x86/fsgsbase.c
+++ b/tools/testing/selftests/x86/fsgsbase.c
@@ -109,11 +109,6 @@ static inline void wrgsbase(unsigned long gsbase)
asm volatile("wrgsbase %0" :: "r" (gsbase) : "memory");
}
-static inline void wrfsbase(unsigned long fsbase)
-{
- asm volatile("wrfsbase %0" :: "r" (fsbase) : "memory");
-}
-
enum which_base { FS, GS };
static unsigned long read_base(enum which_base which)
@@ -212,7 +207,6 @@ static void mov_0_gs(unsigned long initial_base, bool schedule)
}
static volatile unsigned long remote_base;
-static volatile bool remote_hard_zero;
static volatile unsigned int ftx;
/*
diff --git a/tools/testing/selftests/x86/fsgsbase_restore.c b/tools/testing/selftests/x86/fsgsbase_restore.c
index 6fffadc51579..224058c1e4b2 100644
--- a/tools/testing/selftests/x86/fsgsbase_restore.c
+++ b/tools/testing/selftests/x86/fsgsbase_restore.c
@@ -39,12 +39,11 @@
# define SEG "%fs"
#endif
-static unsigned int dereference_seg_base(void)
-{
- int ret;
- asm volatile ("mov %" SEG ":(0), %0" : "=rm" (ret));
- return ret;
-}
+/*
+ * Defined in clang_helpers_[32|64].S, because unlike gcc, clang inline asm does
+ * not support segmentation prefixes.
+ */
+unsigned int dereference_seg_base(void);
static void init_seg(void)
{
diff --git a/tools/testing/selftests/x86/sigreturn.c b/tools/testing/selftests/x86/sigreturn.c
index 5d7961a5f7f6..0b75b29f794b 100644
--- a/tools/testing/selftests/x86/sigreturn.c
+++ b/tools/testing/selftests/x86/sigreturn.c
@@ -487,7 +487,7 @@ static void sigtrap(int sig, siginfo_t *info, void *ctx_void)
greg_t asm_ss = ctx->uc_mcontext.gregs[REG_CX];
if (asm_ss != sig_ss && sig == SIGTRAP) {
/* Sanity check failure. */
- printf("[FAIL]\tSIGTRAP: ss = %hx, frame ss = %hx, ax = %llx\n",
+ printf("[FAIL]\tSIGTRAP: ss = %hx, frame ss = %x, ax = %llx\n",
ss, *ssptr(ctx), (unsigned long long)asm_ss);
nerrs++;
}
diff --git a/tools/testing/selftests/x86/syscall_arg_fault.c b/tools/testing/selftests/x86/syscall_arg_fault.c
index 461fa41a4d02..48ab065a76f9 100644
--- a/tools/testing/selftests/x86/syscall_arg_fault.c
+++ b/tools/testing/selftests/x86/syscall_arg_fault.c
@@ -29,7 +29,6 @@ static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
err(1, "sigaction");
}
-static volatile sig_atomic_t sig_traps;
static sigjmp_buf jmpbuf;
static volatile sig_atomic_t n_errs;
diff --git a/tools/testing/selftests/x86/sysret_rip.c b/tools/testing/selftests/x86/sysret_rip.c
index 84d74be1d902..b30de9aaa6d4 100644
--- a/tools/testing/selftests/x86/sysret_rip.c
+++ b/tools/testing/selftests/x86/sysret_rip.c
@@ -22,21 +22,13 @@
#include <sys/mman.h>
#include <assert.h>
-
-asm (
- ".pushsection \".text\", \"ax\"\n\t"
- ".balign 4096\n\t"
- "test_page: .globl test_page\n\t"
- ".fill 4094,1,0xcc\n\t"
- "test_syscall_insn:\n\t"
- "syscall\n\t"
- ".ifne . - test_page - 4096\n\t"
- ".error \"test page is not one page long\"\n\t"
- ".endif\n\t"
- ".popsection"
- );
-
+/*
+ * These items are in clang_helpers_64.S, in order to avoid clang inline asm
+ * limitations:
+ */
+void test_syscall_ins(void);
extern const char test_page[];
+
static void const *current_test_page_addr = test_page;
static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
diff --git a/tools/testing/selftests/x86/test_FISTTP.c b/tools/testing/selftests/x86/test_FISTTP.c
index 09789c0ce3e9..b9ae9d8cebcb 100644
--- a/tools/testing/selftests/x86/test_FISTTP.c
+++ b/tools/testing/selftests/x86/test_FISTTP.c
@@ -25,7 +25,7 @@ int test(void)
feclearexcept(FE_DIVBYZERO|FE_INEXACT|FE_INVALID|FE_OVERFLOW|FE_UNDERFLOW);
asm volatile ("\n"
" fld1""\n"
- " fisttp res16""\n"
+ " fisttps res16""\n"
" fld1""\n"
" fisttpl res32""\n"
" fld1""\n"
@@ -45,7 +45,7 @@ int test(void)
feclearexcept(FE_DIVBYZERO|FE_INEXACT|FE_INVALID|FE_OVERFLOW|FE_UNDERFLOW);
asm volatile ("\n"
" fldpi""\n"
- " fisttp res16""\n"
+ " fisttps res16""\n"
" fldpi""\n"
" fisttpl res32""\n"
" fldpi""\n"
@@ -66,7 +66,7 @@ int test(void)
asm volatile ("\n"
" fldpi""\n"
" fchs""\n"
- " fisttp res16""\n"
+ " fisttps res16""\n"
" fldpi""\n"
" fchs""\n"
" fisttpl res32""\n"
@@ -88,7 +88,7 @@ int test(void)
feclearexcept(FE_DIVBYZERO|FE_INEXACT|FE_INVALID|FE_OVERFLOW|FE_UNDERFLOW);
asm volatile ("\n"
" fldln2""\n"
- " fisttp res16""\n"
+ " fisttps res16""\n"
" fldln2""\n"
" fisttpl res32""\n"
" fldln2""\n"
diff --git a/tools/testing/selftests/x86/test_shadow_stack.c b/tools/testing/selftests/x86/test_shadow_stack.c
index ee909a7927f9..21af54d5f4ea 100644
--- a/tools/testing/selftests/x86/test_shadow_stack.c
+++ b/tools/testing/selftests/x86/test_shadow_stack.c
@@ -34,6 +34,7 @@
#include <sys/ptrace.h>
#include <sys/signal.h>
#include <linux/elf.h>
+#include <linux/perf_event.h>
/*
* Define the ABI defines if needed, so people can run the tests
@@ -734,6 +735,144 @@ int test_32bit(void)
return !segv_triggered;
}
+static int parse_uint_from_file(const char *file, const char *fmt)
+{
+ int err, ret;
+ FILE *f;
+
+ f = fopen(file, "re");
+ if (!f) {
+ err = -errno;
+ printf("failed to open '%s': %d\n", file, err);
+ return err;
+ }
+ err = fscanf(f, fmt, &ret);
+ if (err != 1) {
+ err = err == EOF ? -EIO : -errno;
+ printf("failed to parse '%s': %d\n", file, err);
+ fclose(f);
+ return err;
+ }
+ fclose(f);
+ return ret;
+}
+
+static int determine_uprobe_perf_type(void)
+{
+ const char *file = "/sys/bus/event_source/devices/uprobe/type";
+
+ return parse_uint_from_file(file, "%d\n");
+}
+
+static int determine_uprobe_retprobe_bit(void)
+{
+ const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe";
+
+ return parse_uint_from_file(file, "config:%d\n");
+}
+
+static ssize_t get_uprobe_offset(const void *addr)
+{
+ size_t start, end, base;
+ char buf[256];
+ bool found = false;
+ FILE *f;
+
+ f = fopen("/proc/self/maps", "r");
+ if (!f)
+ return -errno;
+
+ while (fscanf(f, "%zx-%zx %s %zx %*[^\n]\n", &start, &end, buf, &base) == 4) {
+ if (buf[2] == 'x' && (uintptr_t)addr >= start && (uintptr_t)addr < end) {
+ found = true;
+ break;
+ }
+ }
+
+ fclose(f);
+
+ if (!found)
+ return -ESRCH;
+
+ return (uintptr_t)addr - start + base;
+}
+
+static __attribute__((noinline)) void uretprobe_trigger(void)
+{
+ asm volatile ("");
+}
+
+/*
+ * This test setups return uprobe, which is sensitive to shadow stack
+ * (crashes without extra fix). After executing the uretprobe we fail
+ * the test if we receive SIGSEGV, no crash means we're good.
+ *
+ * Helper functions above borrowed from bpf selftests.
+ */
+static int test_uretprobe(void)
+{
+ const size_t attr_sz = sizeof(struct perf_event_attr);
+ const char *file = "/proc/self/exe";
+ int bit, fd = 0, type, err = 1;
+ struct perf_event_attr attr;
+ struct sigaction sa = {};
+ ssize_t offset;
+
+ type = determine_uprobe_perf_type();
+ if (type < 0) {
+ if (type == -ENOENT)
+ printf("[SKIP]\tUretprobe test, uprobes are not available\n");
+ return 0;
+ }
+
+ offset = get_uprobe_offset(uretprobe_trigger);
+ if (offset < 0)
+ return 1;
+
+ bit = determine_uprobe_retprobe_bit();
+ if (bit < 0)
+ return 1;
+
+ sa.sa_sigaction = segv_gp_handler;
+ sa.sa_flags = SA_SIGINFO;
+ if (sigaction(SIGSEGV, &sa, NULL))
+ return 1;
+
+ /* Setup return uprobe through perf event interface. */
+ memset(&attr, 0, attr_sz);
+ attr.size = attr_sz;
+ attr.type = type;
+ attr.config = 1 << bit;
+ attr.config1 = (__u64) (unsigned long) file;
+ attr.config2 = offset;
+
+ fd = syscall(__NR_perf_event_open, &attr, 0 /* pid */, -1 /* cpu */,
+ -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
+ if (fd < 0)
+ goto out;
+
+ if (sigsetjmp(jmp_buffer, 1))
+ goto out;
+
+ ARCH_PRCTL(ARCH_SHSTK_ENABLE, ARCH_SHSTK_SHSTK);
+
+ /*
+ * This either segfaults and goes through sigsetjmp above
+ * or succeeds and we're good.
+ */
+ uretprobe_trigger();
+
+ printf("[OK]\tUretprobe test\n");
+ err = 0;
+
+out:
+ ARCH_PRCTL(ARCH_SHSTK_DISABLE, ARCH_SHSTK_SHSTK);
+ signal(SIGSEGV, SIG_DFL);
+ if (fd)
+ close(fd);
+ return err;
+}
+
void segv_handler_ptrace(int signum, siginfo_t *si, void *uc)
{
/* The SSP adjustment caused a segfault. */
@@ -926,6 +1065,12 @@ int main(int argc, char *argv[])
goto out;
}
+ if (test_uretprobe()) {
+ ret = 1;
+ printf("[FAIL]\turetprobe test\n");
+ goto out;
+ }
+
return ret;
out:
diff --git a/tools/testing/selftests/x86/test_vsyscall.c b/tools/testing/selftests/x86/test_vsyscall.c
index d4c8e8d79d38..6de11b4df458 100644
--- a/tools/testing/selftests/x86/test_vsyscall.c
+++ b/tools/testing/selftests/x86/test_vsyscall.c
@@ -97,11 +97,6 @@ static inline long sys_gtod(struct timeval *tv, struct timezone *tz)
return syscall(SYS_gettimeofday, tv, tz);
}
-static inline int sys_clock_gettime(clockid_t id, struct timespec *ts)
-{
- return syscall(SYS_clock_gettime, id, ts);
-}
-
static inline long sys_time(time_t *t)
{
return syscall(SYS_time, t);
@@ -252,7 +247,7 @@ static void test_getcpu(int cpu)
if (ret_sys == 0) {
if (cpu_sys != cpu)
- ksft_print_msg("syscall reported CPU %hu but should be %d\n",
+ ksft_print_msg("syscall reported CPU %u but should be %d\n",
cpu_sys, cpu);
have_node = true;
@@ -270,10 +265,10 @@ static void test_getcpu(int cpu)
if (cpu_vdso != cpu || node_vdso != node) {
if (cpu_vdso != cpu)
- ksft_print_msg("vDSO reported CPU %hu but should be %d\n",
+ ksft_print_msg("vDSO reported CPU %u but should be %d\n",
cpu_vdso, cpu);
if (node_vdso != node)
- ksft_print_msg("vDSO reported node %hu but should be %hu\n",
+ ksft_print_msg("vDSO reported node %u but should be %u\n",
node_vdso, node);
ksft_test_result_fail("Wrong values\n");
} else {
@@ -295,10 +290,10 @@ static void test_getcpu(int cpu)
if (cpu_vsys != cpu || node_vsys != node) {
if (cpu_vsys != cpu)
- ksft_print_msg("vsyscall reported CPU %hu but should be %d\n",
+ ksft_print_msg("vsyscall reported CPU %u but should be %d\n",
cpu_vsys, cpu);
if (node_vsys != node)
- ksft_print_msg("vsyscall reported node %hu but should be %hu\n",
+ ksft_print_msg("vsyscall reported node %u but should be %u\n",
node_vsys, node);
ksft_test_result_fail("Wrong values\n");
} else {
diff --git a/tools/testing/selftests/x86/vdso_restorer.c b/tools/testing/selftests/x86/vdso_restorer.c
index fe99f2434155..ac8d8e1e9805 100644
--- a/tools/testing/selftests/x86/vdso_restorer.c
+++ b/tools/testing/selftests/x86/vdso_restorer.c
@@ -92,4 +92,6 @@ int main()
printf("[FAIL]\t!SA_SIGINFO handler was not called\n");
nerrs++;
}
+
+ return nerrs;
}
diff --git a/tools/testing/vsock/Makefile b/tools/testing/vsock/Makefile
index a7f56a09ca9f..6e0b4e95e230 100644
--- a/tools/testing/vsock/Makefile
+++ b/tools/testing/vsock/Makefile
@@ -13,3 +13,16 @@ CFLAGS += -g -O2 -Werror -Wall -I. -I../../include -I../../../usr/include -Wno-p
clean:
${RM} *.o *.d vsock_test vsock_diag_test vsock_perf vsock_uring_test
-include *.d
+
+VSOCK_INSTALL_PATH ?=
+
+install: all
+ifdef VSOCK_INSTALL_PATH
+ mkdir -p $(VSOCK_INSTALL_PATH)
+ install -m 744 vsock_test $(VSOCK_INSTALL_PATH)
+ install -m 744 vsock_perf $(VSOCK_INSTALL_PATH)
+ install -m 744 vsock_diag_test $(VSOCK_INSTALL_PATH)
+ install -m 744 vsock_uring_test $(VSOCK_INSTALL_PATH)
+else
+ $(error Error: set VSOCK_INSTALL_PATH to use install)
+endif
diff --git a/tools/tracing/rtla/src/osnoise_hist.c b/tools/tracing/rtla/src/osnoise_hist.c
index 7be17d09f7e8..214e2c93fde0 100644
--- a/tools/tracing/rtla/src/osnoise_hist.c
+++ b/tools/tracing/rtla/src/osnoise_hist.c
@@ -374,6 +374,7 @@ osnoise_print_stats(struct osnoise_hist_params *params, struct osnoise_tool *too
{
struct osnoise_hist_data *data = tool->data;
struct trace_instance *trace = &tool->trace;
+ int has_samples = 0;
int bucket, cpu;
int total;
@@ -402,11 +403,25 @@ osnoise_print_stats(struct osnoise_hist_params *params, struct osnoise_tool *too
continue;
}
+ /* There are samples above the threshold */
+ has_samples = 1;
trace_seq_printf(trace->seq, "\n");
trace_seq_do_printf(trace->seq);
trace_seq_reset(trace->seq);
}
+ /*
+ * If no samples were recorded, skip calculations, print zeroed statistics
+ * and return.
+ */
+ if (!has_samples) {
+ trace_seq_reset(trace->seq);
+ trace_seq_printf(trace->seq, "over: 0\ncount: 0\nmin: 0\navg: 0\nmax: 0\n");
+ trace_seq_do_printf(trace->seq);
+ trace_seq_reset(trace->seq);
+ return;
+ }
+
if (!params->no_index)
trace_seq_printf(trace->seq, "over: ");
diff --git a/tools/tracing/rtla/src/osnoise_top.c b/tools/tracing/rtla/src/osnoise_top.c
index 07ba55d4ec06..f594a44df840 100644
--- a/tools/tracing/rtla/src/osnoise_top.c
+++ b/tools/tracing/rtla/src/osnoise_top.c
@@ -42,6 +42,7 @@ struct osnoise_top_params {
int hk_cpus;
int warmup;
int buffer_size;
+ int pretty_output;
cpu_set_t hk_cpu_set;
struct sched_attr sched_param;
struct trace_events *events;
@@ -163,7 +164,9 @@ static void osnoise_top_header(struct osnoise_tool *top)
get_duration(top->start_time, duration, sizeof(duration));
- trace_seq_printf(s, "\033[2;37;40m");
+ if (params->pretty_output)
+ trace_seq_printf(s, "\033[2;37;40m");
+
trace_seq_printf(s, " ");
if (params->mode == MODE_OSNOISE) {
@@ -174,12 +177,16 @@ static void osnoise_top_header(struct osnoise_tool *top)
}
trace_seq_printf(s, " ");
- trace_seq_printf(s, "\033[0;0;0m");
+
+ if (params->pretty_output)
+ trace_seq_printf(s, "\033[0;0;0m");
trace_seq_printf(s, "\n");
trace_seq_printf(s, "duration: %9s | time is in us\n", duration);
- trace_seq_printf(s, "\033[2;30;47m");
+ if (params->pretty_output)
+ trace_seq_printf(s, "\033[2;30;47m");
+
trace_seq_printf(s, "CPU Period Runtime ");
trace_seq_printf(s, " Noise ");
trace_seq_printf(s, " %% CPU Aval ");
@@ -192,7 +199,8 @@ static void osnoise_top_header(struct osnoise_tool *top)
trace_seq_printf(s, " IRQ Softirq Thread");
eol:
- trace_seq_printf(s, "\033[0;0;0m");
+ if (params->pretty_output)
+ trace_seq_printf(s, "\033[0;0;0m");
trace_seq_printf(s, "\n");
}
@@ -619,6 +627,9 @@ osnoise_top_apply_config(struct osnoise_tool *tool, struct osnoise_top_params *p
auto_house_keeping(&params->monitored_cpus);
}
+ if (isatty(1) && !params->quiet)
+ params->pretty_output = 1;
+
return 0;
out_err:
diff --git a/tools/virtio/vringh_test.c b/tools/virtio/vringh_test.c
index 98ff808d6f0c..43d3a6aa1dcf 100644
--- a/tools/virtio/vringh_test.c
+++ b/tools/virtio/vringh_test.c
@@ -139,7 +139,7 @@ static int parallel_test(u64 features,
bool fast_vringh)
{
void *host_map, *guest_map;
- int fd, mapsize, to_guest[2], to_host[2];
+ int pipe_ret, fd, mapsize, to_guest[2], to_host[2];
unsigned long xfers = 0, notifies = 0, receives = 0;
unsigned int first_cpu, last_cpu;
cpu_set_t cpu_set;
@@ -161,8 +161,11 @@ static int parallel_test(u64 features,
host_map = mmap(NULL, mapsize, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
guest_map = mmap(NULL, mapsize, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
- pipe(to_guest);
- pipe(to_host);
+ pipe_ret = pipe(to_guest);
+ assert(!pipe_ret);
+
+ pipe_ret = pipe(to_host);
+ assert(!pipe_ret);
CPU_ZERO(&cpu_set);
find_cpus(&first_cpu, &last_cpu);